prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, asarray, cos, exp, floor, pi, sign, sin, sqrt, sum,
size, tril, isnan, atleast_2d, repeat)
from numpy.testing import assert_almost_equal
from .go_benchmark import Benchmark
class CarromTable(Benchmark):
"""
CarromTable objective function.
The CarromTable [1]_ global optimization problem is a multimodal
minimization problem defined as follows:
.. math::
f_{\text{CarromTable}}(x) = - \frac{1}{30}\left(\cos(x_1)
cos(x_2) e^{\left|1 - \frac{\sqrt{x_1^2 + x_2^2}}{\pi}\right|}\right)^2
with :math:`x_i \in [-10, 10]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -24.15681551650653` for :math:`x_i = \pm
9.646157266348881` for :math:`i = 1, 2`
.. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [(9.646157266348881, 9.646134286497169),
(-9.646157266348881, 9.646134286497169),
(9.646157266348881, -9.646134286497169),
(-9.646157266348881, -9.646134286497169)]
self.fglob = -24.15681551650653
def fun(self, x, *args):
self.nfev += 1
u = cos(x[0]) * cos(x[1])
v = sqrt(x[0] ** 2 + x[1] ** 2)
return -((u * exp(abs(1 - v / pi))) ** 2) / 30.
class Chichinadze(Benchmark):
"""
Chichinadze objective function.
This class defines the Chichinadze [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{Chichinadze}}(x) = x_{1}^{2} - 12 x_{1}
+ 8 \sin\left(\frac{5}{2} \pi x_{1}\right)
+ 10 \cos\left(\frac{1}{2} \pi x_{1}\right) + 11
- 0.2 \frac{\sqrt{5}}{e^{\frac{1}{2} \left(x_{2} -0.5 \right)^{2}}}
with :math:`x_i \in [-30, 30]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = -42.94438701899098` for :math:`x =
[6.189866586965680, 0.5]`
.. [1] <NAME>. Global Optimization Benchmarks and AMPGO retrieved 2015
TODO: Jamil#33 has a dividing factor of 2 in the sin term. However, f(x)
for the given solution does not give the global minimum. i.e. the equation
is at odds with the solution.
Only by removing the dividing factor of 2, i.e. `8 * sin(5 * pi * x[0])`
does the given solution result in the given global minimum.
Do we keep the result or equation?
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-30.0] * self.N, [30.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[6.189866586965680, 0.5]]
self.fglob = -42.94438701899098
def fun(self, x, *args):
self.nfev += 1
return (x[0] ** 2 - 12 * x[0] + 11 + 10 * cos(pi * x[0] / 2)
+ 8 * sin(5 * pi * x[0] / 2)
- 1.0 / sqrt(5) * exp(-((x[1] - 0.5) ** 2) / 2))
class Cigar(Benchmark):
"""
Cigar objective function.
This class defines the Cigar [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Cigar}}(x) = x_1^2 + 10^6\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \in
[-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
self.change_dimensionality = True
def fun(self, x, *args):
self.nfev += 1
return x[0] ** 2 + 1e6 * sum(x[1:] ** 2)
class Cola(Benchmark):
"""
Cola objective function.
This class defines the Cola global optimization problem. The 17-dimensional
function computes indirectly the formula :math:`f(n, u)` by setting
:math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i2)}, y_i = u_{2(i2)+1}` :
.. math::
f_{\text{Cola}}(x) = \sum_{i<j}^{n} \left (r_{i,j} - d_{i,j} \right )^2
Where :math:`r_{i, j}` is given by:
.. math::
r_{i, j} = \sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\{d} = \left [ d_{ij} \right ] = \begin{pmatrix}
1.27 & & & & & & & & \\
1.69 & 1.43 & & & & & & & \\
2.04 & 2.35 & 2.43 & & & & & & \\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\end{pmatrix}
This function has bounds :math:`x_0 \in [0, 4]` and :math:`x_i \in [-4, 4]`
for :math:`i = 1, ..., n-1`.
*Global optimum* 11.7464.
.. [1] <NAME>. & <NAME>. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self._bounds = [[0.0, 4.0]] + list(zip([-4.0] * (self.N - 1),
[4.0] * (self.N - 1)))
self.global_optimum = [[0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]]
self.fglob = 11.7464
self.d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97, 0.]])
def fun(self, x, *args):
self.nfev += 1
xi = atleast_2d(asarray([0.0, x[0]] + list(x[1::2])))
xj = repeat(xi, size(xi, 1), axis=0)
xi = xi.T
yi = atleast_2d(asarray([0.0, 0.0] + list(x[2::2])))
yj = repeat(yi, size(yi, 1), axis=0)
yi = yi.T
inner = (sqrt(((xi - xj) ** 2 + (yi - yj) ** 2)) - self.d) ** 2
inner = tril(inner, -1)
return sum(sum(inner, axis=1))
class Colville(Benchmark):
"""
Colville objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Colville}}(x) = \left(x_{1} -1\right)^{2}
+ 100 \left(x_{1}^{2} - x_{2}\right)^{2}
+ 10.1 \left(x_{2} -1\right)^{2} + \left(x_{3} -1\right)^{2}
+ 90 \left(x_{3}^{2} - x_{4}\right)^{2}
+ 10.1 \left(x_{4} -1\right)^{2} + 19.8 \frac{x_{4} -1}{x_{2}}
with :math:`x_i \in [-10, 10]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 1` for
:math:`i = 1, ..., 4`
.. [1] <NAME>. & <NAME>.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
TODO docstring equation is wrong use Jamil#36
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.global_optimum = [[1 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return (100 * (x[0] - x[1] ** 2) ** 2
+ (1 - x[0]) ** 2 + (1 - x[2]) ** 2
+ 90 * (x[3] - x[2] ** 2) ** 2
+ 10.1 * ((x[1] - 1) ** 2 + (x[3] - 1) ** 2)
+ 19.8 * (x[1] - 1) * (x[3] - 1))
class Corana(Benchmark):
"""
Corana objective function.
This class defines the Corana [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Corana}}(x) = \begin{cases} \sum_{i=1}^n 0.15 d_i
[z_i - 0.05\textrm{sgn}(z_i)]^2 & \textrm{if }|x_i-z_i| < 0.05 \\
d_ix_i^2 & \textrm{otherwise}\end{cases}
Where, in this exercise:
.. math::
z_i = 0.2 \lfloor |x_i/s_i|+0.49999\rfloor\textrm{sgn}(x_i),
d_i=(1,1000,10,100, ...)
with :math:`x_i \in [-5, 5]` for :math:`i = 1, ..., 4`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., 4`
..[1] <NAME>. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N))
self.global_optimum = [[0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
d = [1., 1000., 10., 100.]
r = 0
for j in range(4):
zj = floor(abs(x[j] / 0.2) + 0.49999) * | sign(x[j]) | numpy.sign |
# -*- coding: utf-8 -*-
"""
Support functions for reading or calculating DISPATCH2 grid and geometry information.
"""
import numpy as np
class GeometricFactors(dict):
"""Calculate and store the geometric factors used by curvilinear grids."""
def __init__(self, patch):
"""Constructor."""
# Define geometric factors with the same notation as in `mesh_mod` ("c"
# for zone-centred and "f" for face-centred).
self['h2c'] = None
self['h2f'] = None
self['h31c'] = None
self['h31f'] = None
self['h32c'] = None
self['h32f'] = None
self['dx1c'] = None
self['dx1f'] = None
self['dx2c'] = None
self['dx2f'] = None
self['dx3c'] = None
self['dx3f'] = None
self['dvol1c'] = None
self['dvol1f'] = None
self['dvol2c'] = None
self['dvol2f'] = None
self['dvol3c'] = None
self['dvol3f'] = None
self['dar1c'] = None
self['dar1f'] = None
self['dar2c'] = None
self['dar2f'] = None
self['dar31c'] = None
self['dar31f'] = None
self['dar32c'] = None
self['dar32f'] = None
# initialize the grid
self.init_grid(patch)
def init_grid(self, p):
"""Initialise geometric factors based on coord. type."""
if p.mesh_type == 'Cartesian': self.init_Cartesian(p)
elif p.mesh_type == 'cylindrical': self.init_cylindrical(p)
elif p.mesh_type == 'spherical': self.init_spherical(p)
def init_Cartesian(self, p):
"""Initialise geometric factors for a Cartesian coord. system."""
n1, n2, n3 = p.ncell
# 1-direction
self['h2c'] = np.ones(n1)
self['h2f'] = np.ones(n1)
self['h31c'] = self['h2c'].view()
self['h31f'] = self['h2f'].view()
# 2-direction
self['h32c'] = np.ones(n2)
self['h32f'] = self['h32c'].view()
# linear size elements
self['dx1c'] = np.ones(n1) * p.ds[0]
self['dx1f'] = np.ones(n1) * p.ds[0]
self['dx2c'] = np.ones(n2) * p.ds[1]
self['dx2f'] = | np.ones(n2) | numpy.ones |
# with input in the DataFrame format
# update metadata of the dataset
import os, sys
import typing
import scipy.io
import numpy as np
from sklearn import preprocessing
from collections import OrderedDict
from typing import cast, Any, Dict, List, Union, Sequence, Optional, Tuple
#from common_primitives import utils
from d3m import container
from d3m.metadata import base as metadata_base
from d3m.metadata import hyperparams
from d3m.metadata import params
from d3m.primitive_interfaces.supervised_learning import SupervisedLearnerPrimitiveBase
from d3m.primitive_interfaces import base
from d3m.primitive_interfaces.base import CallResult
from rpi_d3m_primitives.structuredClassifier.structured_Classify_model import Model
from rpi_d3m_primitives.featSelect.RelationSet import RelationSet
import rpi_d3m_primitives
import time
from sklearn.impute import SimpleImputer
Inputs = container.DataFrame
Outputs = container.DataFrame
__all__ = ('NaiveBayes_BayesianInf',)
class Params(params.Params):
pass
class Hyperparams(hyperparams.Hyperparams):
pass
class NaiveBayes_BayesianInf(SupervisedLearnerPrimitiveBase[Inputs, Outputs, Params, Hyperparams]):
"""
A primitive which does naive bayes classification. During training, input to this primitive should be a matrix of tabular numercal/categorical data, and an array of labels. During testing, input is data matrix of numerical features only and output will be the predicted labels with metadata generated.
"""
metadata = metadata_base.PrimitiveMetadata({
'id': 'd63942d1-7a0f-47e9-8fcc-5f1e58dffa9b',
'version': '2.1.5',
'name': 'Naive Bayes Classifier',
'keywords': ['Naive Bayes','Classification'],
'description': 'This algorithm is the Bayesian Inference with Naive Bayes classification',
'source': {
'name': rpi_d3m_primitives.__author__,
'contact': 'mailto:<EMAIL>',
'uris': [
'https://github.com/zijun-rpi/d3m-primitives/blob/master/NaiveBayes_BayesianInf.py',
'https://github.com/zijun-rpi/d3m-primitives.git'
]
},
'installation':[
{
'type': metadata_base.PrimitiveInstallationType.PIP,
'package': 'rpi_d3m_primitives',
'version': rpi_d3m_primitives.__version__
}
],
'python_path': 'd3m.primitives.classification.naive_bayes.BayesianInfRPI',
'algorithm_types': [
metadata_base.PrimitiveAlgorithmType.NAIVE_BAYES_CLASSIFIER],
'primitive_family': metadata_base.PrimitiveFamily.CLASSIFICATION
})
def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0, docker_containers: typing.Union[typing.Dict[str, base.DockerContainer]] = None) -> None:
super().__init__(hyperparams=hyperparams, random_seed=random_seed, docker_containers=docker_containers)
self._index = None
self._training_inputs = None
self._training_outputs = None
self._origin_inputs = None #for label encoder
self._fitted = False
self._cate_flag = None
self._clf = Model('nb', bayesInf=1, PointInf=0) #classifier
self._LEoutput = preprocessing.LabelEncoder() #label encoder
self._Imputer = SimpleImputer(missing_values=np.nan, strategy='most_frequent') #imputer
self._nbins = 10
self._Kbins = preprocessing.KBinsDiscretizer(n_bins=self._nbins, encode='ordinal', strategy='uniform') #KbinsDiscretizer
self._discTrainset = None
def _store_target_columns_metadata(self, outputs: Outputs) -> None:
outputs_length = outputs.metadata.query((metadata_base.ALL_ELEMENTS,))['dimension']['length']
target_columns_metadata: List[Dict] = []
for column_index in range(outputs_length):
column_metadata = OrderedDict(outputs.metadata.query_column(column_index))
# Update semantic types and prepare it for predicted targets.
semantic_types = list(column_metadata.get('semantic_types', []))
if 'https://metadata.datadrivendiscovery.org/types/PredictedTarget' not in semantic_types:
semantic_types.append('https://metadata.datadrivendiscovery.org/types/PredictedTarget')
semantic_types = [semantic_type for semantic_type in semantic_types if semantic_type != 'https://metadata.datadrivendiscovery.org/types/TrueTarget']
column_metadata['semantic_types'] = semantic_types
target_columns_metadata.append(column_metadata)
self._target_columns_metadata = target_columns_metadata
##TO DO:
#select columns via semantic types
#remove preprocessing
def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:
## Update semantic types and prepare it for predicted targets
self._store_target_columns_metadata(outputs)
## memory original training inputs
self._origin_inputs = inputs
## set training labels
metadata = outputs.metadata
column_metadata = metadata.query((metadata_base.ALL_ELEMENTS, 0))
semantic_types = column_metadata.get('semantic_types', [])
if 'https://metadata.datadrivendiscovery.org/types/CategoricalData' in semantic_types:
self._LEoutput.fit(outputs)
self._training_outputs = self._LEoutput.transform(outputs) #starting from zero
## convert categorical values to numerical values in training data
metadata = inputs.metadata
[m,n] = inputs.shape
self._training_inputs = np.zeros((m,n))
self._cate_flag = np.zeros((n,))
for column_index in metadata.get_elements((metadata_base.ALL_ELEMENTS,)):
if column_index is metadata_base.ALL_ELEMENTS:
continue
column_metadata = metadata.query((metadata_base.ALL_ELEMENTS, column_index))
semantic_types = column_metadata.get('semantic_types', [])
if 'https://metadata.datadrivendiscovery.org/types/CategoricalData' in semantic_types:
LE = preprocessing.LabelEncoder()
LE = LE.fit(inputs.iloc[:,column_index])
self._training_inputs[:,column_index] = LE.transform(inputs.iloc[:,column_index])
self._cate_flag[column_index] = 1
elif 'http://schema.org/Text' in semantic_types:
pass
else:
temp = list(inputs.iloc[:, column_index].values)
for i in np.arange(len(temp)):
if bool(temp[i]):
self._training_inputs[i,column_index] = float(temp[i])
else:
self._training_inputs[i,column_index] = float('nan')
self._fitted = False
def fit(self, *, timeout: float = None, iterations: int = None) -> None:
if self._fitted:
return CallResult(None)
if self._training_inputs.any() == None or self._training_outputs.any() == None:
raise ValueError('Missing training data, or missing values exist.')
## impute missing values
self._Imputer.fit(self._training_inputs)
self._training_inputs = self._Imputer.transform(self._training_inputs)
## discretize non-categorical values
disc_training_inputs = self._training_inputs
if not len(np.where(self._cate_flag == 0)[0]) == 0:
self._Kbins.fit(self._training_inputs[:, np.where(self._cate_flag == 0)[0]]) #find non-categorical values
temp = self._Kbins.transform(self._training_inputs[:, np.where(self._cate_flag == 0)[0]])
disc_training_inputs[:, | np.where(self._cate_flag == 0) | numpy.where |
import numpy as np
def Resample(input_signal, src_fs, tar_fs):
'''
:param input_signal:输入信号
:param src_fs:输入信号采样率
:param tar_fs:输出信号采样率
:return:输出信号
'''
if src_fs != tar_fs:
dtype = input_signal.dtype
audio_len = input_signal.shape[1]
audio_time_max = 1.0 * (audio_len) / src_fs
src_time = 1.0 * np.linspace(0, audio_len, audio_len) / src_fs
tar_time = 1.0 * np.linspace(0, np.int(audio_time_max * tar_fs), np.int(audio_time_max * tar_fs)) / tar_fs
for i in range(input_signal.shape[0]):
if i == 0:
output_signal = np.interp(tar_time, src_time, input_signal[i, :]).astype(dtype)
output_signal = output_signal.reshape(1, len(output_signal))
else:
tmp = np.interp(tar_time, src_time, input_signal[i, :]).astype(dtype)
tmp = tmp.reshape(1, len(tmp))
output_signal = | np.vstack((output_signal, tmp)) | numpy.vstack |
"""Unittests"""
# pylint: disable=missing-function-docstring
import unittest
from unittest import mock
from typing import Any
import numpy
import ipyopt
import ipyopt.optimize
try:
import scipy
import scipy.optimize
have_scipy = True
except ImportError:
have_scipy = True
try:
from . import c_capsules
have_c_capsules = True
except ImportError:
have_c_capsules = False
def e_x(n):
"""x unit vector"""
out = numpy.zeros(n)
out[0] = 1.0
return out
def sparsity_g(n: int):
return (
numpy.zeros(n, dtype=int),
numpy.arange(n, dtype=int),
)
def sparsity_h(n: int):
return (numpy.arange(n, dtype=int), numpy.arange(n, dtype=int))
def x_L(n: int) -> numpy.ndarray:
return | numpy.full((n,), -10.0) | numpy.full |
from __future__ import absolute_import, division, print_function
# This notebook is for finding the segmentation threshold that most clearly finds worms in a recording.
# It is intended as an alternative method of validating the MultiWorm Tracker's results.
# third party
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from scipy import ndimage
from skimage import morphology
import matplotlib.patches as mpatches
def outline_to_outline_matrix(outline, bbox=None):
"""
returns a filled in binary image of a list of outline points
params
-----
outline: (list of tuples)
the list of points to be turned into a filled in image [(x1, y1), (x2, y2) ... etc.]
bbox: (tuple of four ints)
the bounding box for the image to be created in the form of (xmin, ymin, xmax, ymax)
if not specified, just takes smallest bounding box around outline points.
returns
------
outline_matrix: (np.array)
an np array containing boolean values denoting the filled in outline shape.
"""
if len(outline) == 4:
print('precaution, a len 4 outline is usually something else by accident')
print(outline)
# prepare blob outline and bounding box.
if isinstance(outline, np.ndarray):
x = outline[:, 0]
y = outline[:, 1]
else:
x, y = zip(*outline)
if bbox == None:
bbox = (min(x), min(y), max(x), max(y))
minx, miny, maxx, maxy = bbox
x = [i - minx for i in x]
y = [i - miny for i in y]
shape = (maxx - minx + 1, maxy - miny + 1)
outline_matrix = | np.zeros(shape) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 6 10:10:03 2019
@author: <NAME>
"""
import numpy as np
import pandas as pd
import glob as glob
from tg_set_globalplotting import tg_set_globalplotting
from tg_simulate_behaviour import tg_simulate_behaviour
from tg_suboptimal_goal_choice import tg_suboptimal_goal_choice
from tg_suboptimal_goal_choice_sim import tg_suboptimal_goal_choice_sim
from tg_performance_sim import tg_performance_sim
tg_set_globalplotting(style='frontiers')
dat = pd.read_csv('../Results/preprocessed_results.csv')
#%% S6-7 Figure
#Random agent
n_sample = 1000
param_sample = pd.DataFrame()
param_sample['beta_1'] = np.ones(n_sample)*10
param_sample['beta_2'] = np.ones(n_sample)*-100
param_sample['bias_1'] = | np.ones(n_sample) | numpy.ones |
import numpy as np
from ..core.data import DNPData
def ndalign(data, dim="f2", reference=None, center=None, width=None, average=None):
"""Alignment of NMR spectra using FFT Cross Correlation
Args:
all_data (object) : dnpdata object
dim (str) : dimension to align along
reference (numpy) : second dimension to align along
center (float) : range center
width (float) : range width
returns:
dnpdata: Aligned data in container
"""
proc_parameters = {"dim": dim}
original_order = data.dims # Original order of dims
data.reorder([dim]) # Move dim to first dimension
all_values = data.values # Extract Data Values for alignment
if center != None and width != None:
start = center - 0.5 * width
stop = center + 0.5 * width
elif center == None and width == None:
start = data.coords[dim][-1]
stop = data.coords[dim][0]
else:
raise ValueError("selected range is not accpetable")
values = data[dim, (start, stop)].values
all_original_shape = all_values.shape
original_shape = values.shape # Preserve original shape
all_align_dim_length = all_original_shape[0]
align_dim_length = original_shape[0] # length of dimension to align down
all_values = all_values.reshape(all_align_dim_length, -1)
values = values.reshape(align_dim_length, -1) # Reshape to 2d
new_shape = np.shape(values)
dim2 = new_shape[1]
abs_values = np.abs(values)
if reference is None:
reference = np.abs(values[:, -1])
elif isinstance(reference, DNPData):
reference = np.abs(reference.values)
if average != None:
reference = np.convolve(reference, np.ones(average), "same") / average
ref_max_ix = np.argmax(reference)
all_aligned_values = np.zeros_like(all_values)
for ix in range(dim2):
if average != None:
abs_values[:, ix] = (
np.convolve(abs_values[:, ix], | np.ones(average) | numpy.ones |
import tdpy
from tdpy.util import summgene
import numpy as np
import h5py
import time as timemodu
#import pickle
import scipy.signal
from scipy import interpolate
import os, sys, datetime, fnmatch
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
#from astroquery.mast import Catalogs
#import astroquery
#
#import astropy
#from astropy.wcs import WCS
#from astropy import units as u
#from astropy.io import fits
#import astropy.time
#from astropy.coordinates import SkyCoord
import multiprocessing
def plot_anim(gdat, cntp, strgvarb, cmap='Greys_r', strgtitlbase='', boolresi=False, indxsideyposoffs=0, indxsidexposoffs=0):
vmin = np.amin(cntp)
vmax = np.amax(cntp)
if boolresi:
vmax = max(abs(vmax), abs(vmin))
vmin = -vmax
for t in gdat.indxtime:
strgtitl = strgtitlbase + ', JD = %d' % gdat.time[t]
path = gdat.pathdata + '%s_%s_%05d.pdf' % (strgvarb, gdat.strgcntp, t)
plot_imag(gdat, cntp[:, :, t], path=path, strgvarb=strgvarb, cmap=cmap, strgtitl=strgtitl, \
indxsideyposoffs=indxsideyposoffs, indxsidexposoffs=indxsidexposoffs, boolresi=boolresi, vmin=vmin, vmax=vmax)
os.system('convert -density 300 -delay 10 %s%s_%s_*.pdf %s%s_%s.gif' % (gdat.pathdata, strgvarb, gdat.strgcntp, gdat.pathdata, strgvarb, gdat.strgcntp))
### delete the frame plots
path = gdat.pathdata + '%s_%s_*.pdf' % (strgvarb, gdat.strgcntp)
#os.system('rm %s' % path)
def plot_imag(gdat, cntp, strgvarb, path=None, cmap=None, indxsideyposoffs=0, indxsidexposoffs=0, \
strgtitl='', boolresi=False, xposoffs=None, yposoffs=None, indxpixlcolr=None, vmin=None, vmax=None):
if cmap == None:
if boolresi:
cmap = 'RdBu'
else:
cmap = 'Greys_r'
if vmin is None or vmax is None:
vmax = np.amax(cntp)
vmin = np.amin(cntp)
if boolresi:
vmax = max(abs(vmax), abs(vmin))
vmin = -vmax
if gdat.cntpscaltype == 'asnh':
cntp = np.arcsinh(cntp)
vmin = np.arcsinh(vmin)
vmax = np.arcsinh(vmax)
figr, axis = plt.subplots(figsize=(8, 6))
objtimag = axis.imshow(cntp, origin='lower', interpolation='nearest', cmap=cmap, vmin=vmin, vmax=vmax)
if indxpixlcolr is not None:
temp = np.zeros_like(cntp).flatten()
temp[indxpixlcolr[-1]] = 1.
temp = temp.reshape((gdat.numbside, gdat.numbside))
alph = np.zeros_like(cntp).flatten()
alph[indxpixlcolr[-1]] = 1.
alph = alph.reshape((gdat.numbside, gdat.numbside))
alph = np.copy(temp)
axis.imshow(temp, origin='lower', interpolation='nearest', alpha=0.5)
# overplot catalog
plot_catl(gdat, axis, indxsideyposoffs=indxsideyposoffs, indxsidexposoffs=indxsidexposoffs)
# make color bar
cax = figr.add_axes([0.83, 0.1, 0.03, 0.8])
cbar = figr.colorbar(objtimag, cax=cax)
if gdat.cntpscaltype == 'asnh':
tick = cbar.get_ticks()
tick = np.sinh(tick)
labl = ['%d' % int(tick[k]) for k in range(len(tick))]
cbar.set_ticklabels(labl)
if path is None:
path = gdat.pathimag + '%s_%s.pdf' % (strgvarb, gdat.strgcntp)
print('Writing to %s...' % path)
#plt.tight_layout()
plt.savefig(path)
plt.close()
def init( \
listisec=None, \
listicam=None, \
listiccd=None, \
extrtype='qlop', \
targtype='slen', \
pathfile=None, \
datatype='obsd', \
rasctarg=None, \
decltarg=None, \
labltarg=None, \
strgtarg=None, \
numbside=None, \
**args \
):
# inputs:
# 1) TIC IDs
# 2) One sector, One Cam, One CCD
# preliminary setup
# construct the global object
gdat = tdpy.util.gdatstrt()
#for attr, valu in locals().iteritems():
# if '__' not in attr and attr != 'gdat':
# setattr(gdat, attr, valu)
# copy all provided inputs to the global object
#for strg, valu in args.iteritems():
# setattr(gdat, strg, valu)
gdat.datatype = datatype
# string for date and time
gdat.strgtimestmp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
print('LoSBOT initialized at %s...' % gdat.strgtimestmp)
#if ((listicam is not None or listiccd is not None) and listtici is not None):
# raise Exception('')
#star = eleanor.Source(tic=38846515, sector=1, tc=True)
gdat.strgcntp = gdat.datatype
# paths
## read PCAT path environment variable
gdat.pathbase = os.environ['LSBT_DATA_PATH'] + '/'
gdat.pathdata = gdat.pathbase + 'data/'
gdat.pathimag = gdat.pathbase + 'imag/'
## define paths
#gdat.pathdataorig = '/pdo/qlp-data/orbit-%d/ffi/cam%d/ccd%d/FITS/' % (isec, icam, iccd)
gdat.pathdataorig = gdat.pathdata + 'ffis/'
gdat.pathdatafilt = gdat.pathdata + 'filt/'
gdat.pathdatainit = gdat.pathdata + 'init/'
gdat.pathdatainitimag = gdat.pathdatainit + 'imag/'
gdat.pathdatainitanim = gdat.pathdatainit + 'anim/'
gdat.pathdatacomm = gdat.pathdata + 'comm/'
## make folders
os.system('mkdir -p %s' % gdat.pathdatafilt)
gdat.numbsidefilt = 21
gdat.numbside = 2048
gdat.numbpixl = gdat.numbside**2
gdat.numbtime = 100
gdat.indxtime = np.arange(gdat.numbtime)
gdat.numbdata = gdat.numbtime * gdat.numbpixl
gdat.factdown = 8
gdat.numbsidedown = gdat.numbside / gdat.factdown
if pathfile is None:
if numbside is None:
strgmode = 'full'
else:
strgmode = 'targ'
else:
strgmode = 'file'
random_state = 42
timeexpo = 1426.
if strgmode == 'targ':
from astroquery.mast import Tesscut
from astropy.coordinates import SkyCoord
cutout_coord = SkyCoord(rasctarg, decltarg, unit="deg")
listhdundata = Tesscut.get_cutouts(cutout_coord, gdat.numbside)
sector_table = Tesscut.get_sectors(SkyCoord(gdat.rasctarg, gdat.decltarg, unit="deg"))
listisec = sector_table['sector'].data
listicam = sector_table['camera'].data
listiccd = sector_table['ccd'].data
if len(listhdundata) == 0:
raise Exception('TESSCut could not find any data.')
arryseco = np.zeros((gdat.numbsidedown, gdat.numbsidedown, gdat.numbtime))
for t in gdat.indxtime:
# get FFI iimage
#cntpimag =
cntpimag = np.random.randn(gdat.numbpixl).reshape((gdat.numbside, gdat.numbside))
pathsave = ''
if not os.path.exists(pathsave):
# filter
arrysecotemp = scipy.signal.medfilt(cntpimag, (gdat.numbsidefilt, gdat.numbsidefilt))
# plot
# down-sample
arryseco[:, :, t] = np.mean(arrysecotemp.reshape((gdat.numbsidedown, gdat.factdown, gdat.numbsidedown, gdat.factdown)), (1, 3))
# save filtered FFI
else:
pass
# load filtered FFI
raise Exception('')
# plot
numbsect = len(listisec)
indxsect = np.arange(numbsect)
for o in indxsect:
# check inputs
print('Sector: %d' % listisec[o])
print('Camera: %d' % listicam[o])
print('CCD: %d' % listiccd[o])
isec = listisec[o]
verbtype = 1
np.random.seed(45)
# fix the seed
if gdat.datatype == 'mock':
gdat.numbsour = 1000
numbsupn = 10
gdat.numbtimerebn = None#30
# settings
## plotting
gdat.cntpscaltype = 'asnh'
if pathfile is not None and gdat.datatype == 'mock':
raise Exception('')
# grid of flux space
minmproj = 0.1
maxmproj = 2
limtproj = [minmproj, maxmproj]
arry = np.linspace(minmproj, maxmproj, 100)
xx, yy = np.meshgrid(arry, arry)
magtminm = 12.
magtmaxm = 19.
# get data
if gdat.datatype == 'obsd':
print('Reading files...')
path = gdat.pathdata + 'qlop/'
liststrgfile = fnmatch.filter(os.listdir(path), '*.h5')
liststrgfile = liststrgfile[:10000]
liststrgtici = []
for strgfile in liststrgfile:
liststrgtici.append(strgfile[:-3])
numbdata = len(liststrgfile)
fracdatanann = np.empty(numbdata)
listindxtimebadd = []
for k, strgfile in enumerate(liststrgfile):
with h5py.File(path + strgfile, 'r') as objtfile:
if k == 0:
gdat.time = objtfile['LightCurve/BJD'][()]
gdat.numbtime = gdat.time.size
lcur = np.empty((numbdata, gdat.numbtime, 2))
tmag = objtfile['LightCurve/AperturePhotometry/Aperture_002/RawMagnitude'][()]
if k == 0:
gdat.indxtime = np.arange(gdat.numbtime)
indxtimegood = np.where(np.isfinite(tmag))[0]
indxtimenann = np.setdiff1d(gdat.indxtime, indxtimegood)
lcur[k, :, 0] = 10**(-(tmag - np.median(tmag[indxtimegood])) / 2.5)
listindxtimebadd.append(indxtimenann)
fracdatanann[k] = indxtimenann.size / float(gdat.numbtime)
listindxtimebadd = np.concatenate(listindxtimebadd)
listindxtimebadd = np.unique(listindxtimebadd)
listindxtimebadd = np.concatenate((listindxtimebadd, np.arange(100)))
listindxtimebadd = np.concatenate((listindxtimebadd, gdat.numbtime / 2 + np.arange(100)))
listindxtimegood = np.setdiff1d(gdat.indxtime, listindxtimebadd)
#listhdundata = fits.open(pathfile)
print('Filtering the data...')
# filter the data
gdat.time = gdat.time[listindxtimegood]
lcur = lcur[:, listindxtimegood, :]
gdat.numbtime = gdat.time.size
if (~np.isfinite(lcur)).any():
raise Exception('')
# plot the data
figr, axis = plt.subplots(figsize=(6, 4))
axis.hist(fracdatanann)
axis.set_xlabel('$f_{nan}$')
axis.set_ylabel('$N(f_{nan})$')
path = gdat.pathimag + 'histfracdatanann_%s.pdf' % (gdat.strgcntp)
print('Writing to %s...' % path)
plt.savefig(path)
plt.close()
# inject signal
if gdat.datatype == 'mock':
gdat.numbtime = 50
gdat.numbdata = 100
gdat.time = np.arange(gdat.numbtime)
lcur = np.random.randn(gdat.numbdata * gdat.numbtime).reshape((gdat.numbdata, gdat.numbtime, 1))
gdat.numbtime = gdat.time.size
gdat.indxsupn = np.arange(numbsupn)
truecntpsour = np.empty((gdat.numbtime, gdat.numbsour))
truemagt = np.empty((gdat.numbtime, gdat.numbsour))
gdat.indxsour = np.arange(gdat.numbsour)
gdat.indxsoursupn = np.random.choice(gdat.indxsour, size=numbsupn, replace=False)
for n in gdat.indxsour:
if n in gdat.indxsoursupn:
timenorm = -0.5 + (gdat.time / np.amax(gdat.time)) + 2. * (np.random.random(1) - 0.5)
objtrand = scipy.stats.skewnorm(10.).pdf(timenorm)
objtrand /= np.amax(objtrand)
truemagt[:, n] = 8. + 6. * (2. - objtrand)
else:
truemagt[:, n] = np.random.rand() * 5 + 15.
truecntpsour[:, n] = 10**((20.424 - truemagt[:, n]) / 2.5)
gdat.truemagtmean = np.mean(truemagt, 0)
gdat.truemagtstdv = np.std(truemagt, 0)
numbdata = lcur.shape[0]
if gdat.datatype == 'mock':
listlabltrue = np.zeros(numbdata, dtype=int)
numbinli = numbdata - gdat.numbsour
numboutl = gdat.numbsour
# plot the data
figr, axis = plt.subplots(10, 4)
indxdata = np.arange(numbdata)
numbdataplot = min(40, numbdata)
indxdataplot = np.random.choice(indxdata, size=numbdataplot, replace=False)
for a in range(10):
for b in range(4):
p = a * 4 + b
if p >= numbdata:
continue
axis[a][b].plot(gdat.time, lcur[indxdataplot[p], :, 0], color='black', ls='', marker='o', markersize=1)
if a != 9:
axis[a][b].set_xticks([])
if b != 0:
axis[a][b].set_yticks([])
plt.subplots_adjust(hspace=0, wspace=0)
path = gdat.pathimag + 'lcurrand_%s.pdf' % (gdat.strgcntp)
print('Writing to %s...' % path)
plt.savefig(path)
plt.close()
# temporal median filter
numbtimefilt = min(9, gdat.numbtime)
if numbtimefilt % 2 == 0:
numbtimefilt -= 1
print('Performing the temporal median filter...')
# rebin in time
if gdat.numbtimerebn is not None and gdat.numbtime > gdat.numbtimerebn:
print('Rebinning in time...')
numbtimeoldd = gdat.numbtime
gdat.numbtime = gdat.numbtimerebn
numbtimebins = numbtimeoldd / gdat.numbtime
cntpmemoneww = np.zeros((numbsidememo, numbsidememo, gdat.numbtime)) - 1.
timeneww = np.zeros(gdat.numbtime)
for t in range(gdat.numbtime):
if t == gdat.numbtime - 1:
cntpmemoneww[:, :, t] = np.mean(cntpmemo[:, :, (gdat.numbtime-1)*numbtimebins:], axis=2)
timeneww[t] = np.mean(gdat.time[(gdat.numbtime-1)*numbtimebins:])
else:
cntpmemoneww[:, :, t] = np.mean(cntpmemo[:, :, t*numbtimebins:(t+1)*numbtimebins], axis=2)
timeneww[t] = np.mean(gdat.time[t*numbtimebins:(t+1)*numbtimebins])
gdat.indxtimegood = np.isfinite(timeneww)
gdat.time = timeneww[gdat.indxtimegood]
gdat.numbtime = gdat.indxtimegood.size
gdat.indxtime = np.arange(gdat.numbtime)
# calculate derived maps
## RMS image
strgtype = 'tsne'
lcuravgd = | np.empty(gdat.numbtime) | numpy.empty |
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: <NAME> <<EMAIL>>
# Minor fixes by <NAME>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME>
# <NAME> <<EMAIL>>
# (parts based on earlier work by <NAME>)
#
# License: BSD 3 clause
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.special import logsumexp
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import deprecated
from .utils.extmath import safe_sparse_dot
from .utils.multiclass import _check_partial_fit_first_call
from .utils.validation import check_is_fitted, check_non_negative
from .utils.validation import _check_sample_weight
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB', 'ComplementNB',
'CategoricalNB']
class _BaseNB(ClassifierMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape (n_classes, n_samples).
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
@abstractmethod
def _check_X(self, X):
"""To be overridden in subclasses with the actual checks.
Only used in predict* methods.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Predicted target values for X
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
check_is_fitted(self)
X = self._check_X(X)
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : array-like of shape (n_samples, n_classes)
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute :term:`classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(_BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via :meth:`partial_fit`.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by <NAME>, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Parameters
----------
priors : array-like of shape (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
var_smoothing : float, default=1e-9
Portion of the largest variance of all features that is added to
variances for calculation stability.
.. versionadded:: 0.20
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
number of training samples observed in each class.
class_prior_ : ndarray of shape (n_classes,)
probability of each class.
classes_ : ndarray of shape (n_classes,)
class labels known to the classifier.
epsilon_ : float
absolute additive value to variances.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
sigma_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. deprecated:: 1.0
`sigma_` is deprecated in 1.0 and will be removed in 1.2.
Use `var_` instead.
var_ : ndarray of shape (n_classes, n_features)
Variance of each feature per class.
.. versionadded:: 1.0
theta_ : ndarray of shape (n_classes, n_features)
mean of each feature per class.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, *, priors=None, var_smoothing=1e-9):
self.priors = priors
self.var_smoothing = var_smoothing
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
"""
X, y = self._validate_data(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, reset=False)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by <NAME>, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like of shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like of shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like of shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like of shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_new * n_past / n_total) * (mu - new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit : bool, default=False
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
if _refit:
self.classes_ = None
first_call = _check_partial_fit_first_call(self, classes)
X, y = self._validate_data(X, y, reset=first_call)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
self.epsilon_ = self.var_smoothing * np.var(X, axis=0).max()
if first_call:
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.var_ = np.zeros((n_classes, n_features))
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
# Initialise the class prior
# Take into account the priors
if self.priors is not None:
priors = np.asarray(self.priors)
# Check that the provide prior match the number of classes
if len(priors) != n_classes:
raise ValueError('Number of priors must match number of'
' classes.')
# Check that the sum is 1
if not np.isclose(priors.sum(), 1.0):
raise ValueError('The sum of the priors should be 1.')
# Check that the prior are non-negative
if (priors < 0).any():
raise ValueError('Priors must be non-negative.')
self.class_prior_ = priors
else:
# Initialize the priors to zeros for each class
self.class_prior_ = np.zeros(len(self.classes_),
dtype=np.float64)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.var_[:, :] -= self.epsilon_
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = np.in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(unique_y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.var_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.var_[i, :] = new_sigma
self.class_count_[i] += N_i
self.var_[:, :] += self.epsilon_
# Update if only no priors is provided
if self.priors is None:
# Empirical prior, with sample_weight taken into account
self.class_prior_ = self.class_count_ / self.class_count_.sum()
return self
def _joint_log_likelihood(self, X):
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.var_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.var_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
@deprecated( # type: ignore
"Attribute sigma_ was deprecated in 1.0 and will be removed in"
"1.2. Use var_ instead."
)
@property
def sigma_(self):
return self.var_
_ALPHA_MIN = 1e-10
class _BaseDiscreteNB(_BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per _BaseNB
"""
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
return self._validate_data(X, accept_sparse='csr', reset=False)
def _check_X_y(self, X, y, reset=True):
"""Validate X and y in fit methods."""
return self._validate_data(X, y, accept_sparse='csr', reset=reset)
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
with warnings.catch_warnings():
# silence the warning when count is 0 because class was not yet
# observed
warnings.simplefilter("ignore", RuntimeWarning)
log_class_count = np.log(self.class_count_)
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (log_class_count -
np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.full(n_classes, -np.log(n_classes))
def _check_alpha(self):
if np.min(self.alpha) < 0:
raise ValueError('Smoothing parameter alpha = %.1e. '
'alpha should be > 0.' % np.min(self.alpha))
if isinstance(self.alpha, np.ndarray):
if not self.alpha.shape[0] == self.n_features_in_:
raise ValueError("alpha should be a scalar or a numpy array "
"with shape [n_features]")
if np.min(self.alpha) < _ALPHA_MIN:
warnings.warn('alpha too small will result in numeric errors, '
'setting alpha = %.1e' % _ALPHA_MIN)
return np.maximum(self.alpha, _ALPHA_MIN)
return self.alpha
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
first_call = not hasattr(self, "classes_")
X, y = self._check_X_y(X, y, reset=first_call)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_classes = len(classes)
self._init_counters(n_classes, n_features)
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64, copy=False)
if sample_weight is not None:
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
X, y = self._check_X_y(X, y)
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
if len(self.classes_) == 2:
Y = np.concatenate((1 - Y, Y), axis=1)
else: # degenerate case: just one class
Y = np.ones_like(Y)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
if sample_weight is not None:
Y = Y.astype(np.float64, copy=False)
sample_weight = _check_sample_weight(sample_weight, X)
sample_weight = np.atleast_2d(sample_weight)
Y *= sample_weight.T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_classes = Y.shape[1]
self._init_counters(n_classes, n_features)
self._count(X, Y)
alpha = self._check_alpha()
self._update_feature_log_prob(alpha)
self._update_class_log_prior(class_prior=class_prior)
return self
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_classes, n_features),
dtype=np.float64)
# mypy error: Decorated property not supported
@deprecated("Attribute coef_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def coef_(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
# mypy error: Decorated property not supported
@deprecated("Attribute intercept_ was deprecated in " # type: ignore
"version 0.24 and will be removed in 1.1 (renaming of 0.26).")
@property
def intercept_(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
def _more_tags(self):
return {'poor_score': True}
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"Attribute n_features_ was deprecated in version 1.0 and will be "
"removed in 1.2. Use 'n_features_in_' instead."
)
@property
def n_features_(self):
return self.n_features_in_
class MultinomialNB(_BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `MultinomialNB`
as a linear model.
.. deprecated:: 0.24
``intercept_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB()
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
<NAME>, <NAME> and <NAME> (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
check_non_negative(X, "MultinomialNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
return (safe_sparse_dot(X, self.feature_log_prob_.T) +
self.class_log_prior_)
class ComplementNB(_BaseDiscreteNB):
"""The Complement Naive Bayes classifier described in Rennie et al. (2003).
The Complement Naive Bayes classifier was designed to correct the "severe
assumptions" made by the standard Multinomial Naive Bayes classifier. It is
particularly suited for imbalanced data sets.
Read more in the :ref:`User Guide <complement_naive_bayes>`.
.. versionadded:: 0.20
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter (0 for no smoothing).
fit_prior : bool, default=True
Only used in edge case with a single class in the training set.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. Not used.
norm : bool, default=False
Whether or not a second normalization of the weights is performed. The
default behavior mirrors the implementations found in Mahout and Weka,
which do not follow the full algorithm described in Table 9 of the
paper.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class. Only used in edge
case with a single class in the training set.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
feature_all_ : ndarray of shape (n_features,)
Number of samples encountered for each feature during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature) during fitting.
This value is weighted by the sample weight when provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical weights for class complements.
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `ComplementNB`
as a linear model.
.. deprecated:: 0.24
``coef_`` is deprecated in 0.24 and will be removed in 1.1
(renaming of 0.26).
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import ComplementNB
>>> clf = ComplementNB()
>>> clf.fit(X, y)
ComplementNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
<NAME>., <NAME>., <NAME>., & <NAME>. (2003).
Tackling the poor assumptions of naive bayes text classifiers. In ICML
(Vol. 3, pp. 616-623).
https://people.csail.mit.edu/jrennie/papers/icml03-nb.pdf
"""
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
norm=False):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.norm = norm
def _more_tags(self):
return {'requires_positive_X': True}
def _count(self, X, Y):
"""Count feature occurrences."""
check_non_negative(X, "ComplementNB (input X)")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
self.feature_all_ = self.feature_count_.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and compute the weights."""
comp_count = self.feature_all_ + alpha - self.feature_count_
logged = np.log(comp_count / comp_count.sum(axis=1, keepdims=True))
# _BaseNB.predict uses argmax, but ComplementNB operates with argmin.
if self.norm:
summed = logged.sum(axis=1, keepdims=True)
feature_log_prob = logged / summed
else:
feature_log_prob = -logged
self.feature_log_prob_ = feature_log_prob
def _joint_log_likelihood(self, X):
"""Calculate the class scores for the samples in X."""
jll = safe_sparse_dot(X, self.feature_log_prob_.T)
if len(self.classes_) == 1:
jll += self.class_log_prior_
return jll
class BernoulliNB(_BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, default=0.0
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Log probability of each class (smoothed).
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
coef_ : ndarray of shape (n_classes, n_features)
Mirrors ``feature_log_prob_`` for interpreting `BernoulliNB`
as a linear model.
feature_count_ : ndarray of shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
feature_log_prob_ : ndarray of shape (n_classes, n_features)
Empirical log probability of features given a class, P(x_i|y).
intercept_ : ndarray of shape (n_classes,)
Mirrors ``class_log_prior_`` for interpreting `BernoulliNB`
as a linear model.
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB()
>>> print(clf.predict(X[2:3]))
[3]
References
----------
<NAME>, <NAME> and <NAME> (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
<NAME> and <NAME> (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
<NAME>, <NAME> and <NAME> (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, *, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = super()._check_X(X)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X
def _check_X_y(self, X, y, reset=True):
X, y = super()._check_X_y(X, y, reset=reset)
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
return X, y
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self, alpha):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + alpha
smoothed_cc = self.class_count_ + alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc) -
np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
n_features = self.feature_log_prob_.shape[1]
n_features_X = X.shape[1]
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
class CategoricalNB(_BaseDiscreteNB):
"""Naive Bayes classifier for categorical features
The categorical Naive Bayes classifier is suitable for classification with
discrete features that are categorically distributed. The categories of
each feature are drawn from a categorical distribution.
Read more in the :ref:`User Guide <categorical_naive_bayes>`.
Parameters
----------
alpha : float, default=1.0
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : bool, default=True
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like of shape (n_classes,), default=None
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
min_categories : int or array-like of shape (n_features,), default=None
Minimum number of categories per feature.
- integer: Sets the minimum number of categories per feature to
`n_categories` for each features.
- array-like: shape (n_features,) where `n_categories[i]` holds the
minimum number of categories for the ith column of the input.
- None (default): Determines the number of categories automatically
from the training data.
.. versionadded:: 0.24
Attributes
----------
category_count_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the number of samples
encountered for each class and category of the specific feature.
class_count_ : ndarray of shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
class_log_prior_ : ndarray of shape (n_classes,)
Smoothed empirical log probability for each class.
classes_ : ndarray of shape (n_classes,)
Class labels known to the classifier
feature_log_prob_ : list of arrays of shape (n_features,)
Holds arrays of shape (n_classes, n_categories of respective feature)
for each feature. Each array provides the empirical log probability
of categories given the respective feature and class, ``P(x_i|y)``.
n_features_ : int
Number of features of each sample.
.. deprecated:: 1.0
Attribute `n_features_` was deprecated in version 1.0 and will be
removed in 1.2. Use `n_features_in_` instead.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
n_categories_ : ndarray of shape (n_features,), dtype=np.int64
Number of categories for each feature. This value is
inferred from the data or set by the minimum number of categories.
.. versionadded:: 0.24
Examples
--------
>>> import numpy as np
>>> rng = np.random.RandomState(1)
>>> X = rng.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import CategoricalNB
>>> clf = CategoricalNB()
>>> clf.fit(X, y)
CategoricalNB()
>>> print(clf.predict(X[2:3]))
[3]
"""
def __init__(self, *, alpha=1.0, fit_prior=True, class_prior=None,
min_categories=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
self.min_categories = min_categories
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().fit(X, y, sample_weight=sample_weight)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features. Here, each feature of X is
assumed to be from a different categorical distribution.
It is further assumed that all categories of each feature are
represented by the numbers 0, ..., n - 1, where n refers to the
total number of categories for the given feature. This can, for
instance, be achieved with the help of OrdinalEncoder.
y : array-like of shape (n_samples,)
Target values.
classes : array-like of shape (n_classes,), default=None
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like of shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
"""
return super().partial_fit(X, y, classes,
sample_weight=sample_weight)
def _more_tags(self):
return {'requires_positive_X': True}
def _check_X(self, X):
"""Validate X, used only in predict* methods."""
X = self._validate_data(X, dtype='int', accept_sparse=False,
force_all_finite=True, reset=False)
check_non_negative(X, "CategoricalNB (input X)")
return X
def _check_X_y(self, X, y, reset=True):
X, y = self._validate_data(X, y, dtype='int', accept_sparse=False,
force_all_finite=True, reset=reset)
check_non_negative(X, "CategoricalNB (input X)")
return X, y
def _init_counters(self, n_classes, n_features):
self.class_count_ = np.zeros(n_classes, dtype=np.float64)
self.category_count_ = [np.zeros((n_classes, 0))
for _ in range(n_features)]
@staticmethod
def _validate_n_categories(X, min_categories):
# rely on max for n_categories categories are encoded between 0...n-1
n_categories_X = X.max(axis=0) + 1
min_categories_ = | np.array(min_categories) | numpy.array |
import os
import glob
import pickle as pkl
import torch
import numpy as np
import nibabel as nib
import scipy.stats
from tqdm import tqdm
import eval
import layers_list
from custom_transforms import brats_label_to_raw
from monai.losses import DiceLoss
import skimage.morphology as morph
class VisNotFoundError(Exception):
pass
def get_file(pattern):
files = list(glob.glob(pattern))
if len(files) != 1:
raise VisNotFoundError(f'not found {pattern}')
return files[0]
#_cache = {}
def load_vis(vis_key, layer_key, ex_id, args):
key = (vis_key, layer_key, ex_id)
#if key in _cache:
# return _cache[key]
vis_dir = os.path.join(args.model_dir,
f'an_vis_niftis_{vis_key}_{layer_key}',
'result_dataset',
ex_id)
vis_name = eval.vis_key_name_mapping[vis_key]
layer_name = dict(layers_list.unet_layers)[layer_key]
vis_fname = get_file(os.path.join(vis_dir,
f'*_{vis_name.replace(" ", "-")}_{layer_name.replace(" ", "-")}.nii.gz'))
vis = nib.load(vis_fname).get_fdata().transpose((3, 0, 1, 2))
# BCHWD
vis = torch.tensor(vis[None])
#_cache[key] = vis
return vis
def find_border(seg):
assert len(seg.shape) == 3
result = morph.dilation(seg, morph.cube(3))
return np.maximum(0, result - seg)
# overlap with background
with open('background_mask.pkl', 'rb') as f:
background_mask = pkl.load(f)
background_mask = torch.tensor(background_mask[None, None])
_bkg_cache = {}
def get_background_like(map1):
key = map1.shape, map1.dtype, map1.device
if key in _bkg_cache:
return _bkg_cache[key]
result = background_mask.expand(*map1.shape).to(map1)
_bkg_cache[key] = result
return result
def _single_rank_compare(img1, img2):
assert len(img1.shape) == 5
assert len(img2.shape) == 5
assert img1.shape[0] == 1
assert img1.shape[1] == img2.shape[1]
result = []
for c in range(img1.shape[1]):
corr = scipy.stats.spearmanr(img1[0, c].numpy().flatten(),
img2[0, c].numpy().flatten())
result.append(corr)
result = [result]
return torch.tensor(result).to(img1)
def rank_compare(img1, img2):
result = _single_rank_compare(img1, img2)
result = result.to(img1)
result_back = _single_rank_compare(get_background_like(img1), img2)
result_back = result_back.to(img1)
assert len(result.shape) == 3
return torch.stack([result, result_back], dim=3)
_full_cache = {}
def _full_like(map1, val):
key = map1.shape, map1.dtype, map1.device, val
if key in _full_cache:
return _full_cache[key]
result = torch.full_like(map1, val)
_full_cache[key] = result
return result
# use the loss function instead of the metric because it's soft so I can ignore thresholding
iou_loss = DiceLoss(jaccard=True, reduction='none')
def iou_compare(map1, map2):
result = 1 - iou_loss(map1, map2)
result0_0 = 1 - iou_loss(_full_like(map1, 0.0), map2)
result0_5 = 1 - iou_loss(_full_like(map1, 0.5), map2)
result1_0 = 1 - iou_loss(_full_like(map1, 1.0), map2)
result_bkg = 1 - iou_loss(get_background_like(map1), map2)
assert len(result.shape) == 2
return torch.stack([result, result0_0, result0_5, result1_0, result_bkg], dim=2)
def metric_one_volume(ex_dir):
examples = [] # one per vis
ex_id = os.path.basename(ex_dir)
# load gt and add l1,l2
gt_fname = get_file(os.path.join(ex_dir, '*_gt.nii.gz'))
gt = nib.load(gt_fname).get_fdata().transpose((3, 0, 1, 2))
extra_gt = brats_label_to_raw(gt, onehot=True)
# just labels 1 and 2 since label 4 is ET (channel 0 of gt)
extra_gt = extra_gt[:2]
# channels = ET, TC, WT, l1, l2
gt = | np.concatenate([gt, extra_gt], axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
u"""
Created on 2017-1-25
@author: cheng.li
"""
import unittest
import copy
import pickle
import tempfile
import os
import numpy as np
import pandas as pd
from PyFin.Analysis.SeriesValues import SeriesValues
class TestSecurityValues(unittest.TestCase):
def testSecurityValuesInit(self):
data = np.array([1, 2, 3])
index = ['c', 'b', 'a']
test = SeriesValues(data, dict(zip(index, range(len(index)))))
expected = dict(zip(index, data))
for name in test.index():
self.assertEqual(test[name], expected[name])
def testSecurityValuesRank(self):
data = np.array([3, 2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
data = SeriesValues(data, index)
test = data.rank()
expected = SeriesValues(np.array([2, 1, np.nan, np.nan, 3, 4]), dict(zip(index, range(len(index)))))
for name in test.index():
if np.isnan(test[name]):
self.assertTrue(np.isnan(expected[name]))
else:
self.assertEqual(test[name], expected[name])
def testSecurityValuesRankWithGroup(self):
data = np.random.randn(3000)
groups = np.random.randint(0, 30, 3000)
index = list(range(3000))
data = SeriesValues(data, index)
groups = SeriesValues(groups, index)
test = data.rank(groups)
pd_series = pd.Series(data.values)
expected = pd_series.groupby(groups.values).rank()
np.testing.assert_array_almost_equal(test.values, expected.values)
def testSecurityValuesUnit(self):
data = np.array([3, -2, np.nan, np.nan, 4, 5])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
test = test.unit()
expected = SeriesValues(data / np.nansum(np.abs(data)), dict(zip(index, range(len(index)))))
for name in test.index():
if np.isnan(test[name]):
self.assertTrue(np.isnan(expected[name]))
else:
self.assertEqual(test[name], expected[name])
def testSecurityValuesDeepCopy(self):
data = np.array([3, 2, 2., 1., 4., 5.])
index = [1, 2, 3, 4, 5, 6]
test = SeriesValues(data, index)
copied = copy.deepcopy(test)
np.testing.assert_array_equal(test.values, copied.values)
self.assertEqual(test.name_mapping, copied.name_mapping)
def testSecurityValuesAdd(self):
data1 = | np.array([3, 2, 2., 1., 4., 5.]) | numpy.array |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.4*tol_factor)
# Patch on 2,3
print('with patches on 2,3:')
kkkp.process(cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Patch on 1,3
print('with patches on 1,3:')
kkkp.process(catp, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Finally a set (with all patches) using the KKKCrossCorrelation class.
kkkc = treecorr.KKKCrossCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
print('CrossCorrelation:')
kkkc.process(catp, catp, catp)
for k1 in kkkc._all:
print(k1.ntri.ravel())
print(k1.zeta.ravel())
print(k1.varzeta.ravel())
np.testing.assert_allclose(k1.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(k1.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(k1.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkc.estimate_cov('jackknife')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkc.estimate_cov('sample')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkc.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkc.estimate_cov('bootstrap')
print(np.diagonal(cov))
for i in range(6):
v = np.diagonal(cov)[i*6:(i+1)*6]
print('max log(ratio) = ',np.max(np.abs(np.log(v)-np.log(var_kkk))))
np.testing.assert_allclose(np.log(v), np.log(var_kkk), atol=0.5*tol_factor)
# All catalogs need to have the same number of patches
catq = treecorr.Catalog(x=x, y=y, k=k, npatch=2*npatch)
with assert_raises(RuntimeError):
kkkp.process(catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catp, catq, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catp, catq)
with assert_raises(RuntimeError):
kkkp.process(catq, catq, catp)
@timer
def test_ggg_jk():
# Test jackknife and other covariance estimates for ggg correlations.
if __name__ == '__main__':
# This setup takes about 590 sec to run.
nhalo = 5000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 160 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 50 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 13 sec to run.
nhalo = 500
nsource = 500
npatch = 8
tol_factor = 3
# I couldn't figure out a way to get reasonable S/N in the shear field. I thought doing
# discrete halos would give some significant 3pt shear pattern, at least for equilateral
# triangles, but the signal here is still consistent with zero. :(
# The point is the variance, which is still calculated ok, but I would have rathered
# have something with S/N > 0.
# For these tests, I set up the binning to just accumulate all roughly equilateral triangles
# in a small separation range. The binning always uses two bins for each to get + and - v
# bins. So this function averages these two values to produce 1 value for each gamma.
f = lambda g: np.array([np.mean(g.gam0), np.mean(g.gam1), np.mean(g.gam2), np.mean(g.gam3)])
file_name = 'data/test_ggg_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_gggs = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng1)
# For some reason std(g2) is coming out about 1.5x larger than std(g1).
# Probably a sign of some error in the generate function, but I don't see it.
# For this purpose I think it doesn't really matter, but it's a bit odd.
print(run,': ',np.mean(g1),np.std(g1),np.mean(g2),np.std(g2))
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1)
ggg.process(cat)
print(ggg.ntri.ravel())
print(f(ggg))
all_gggs.append(ggg)
all_ggg = np.array([f(ggg) for ggg in all_gggs])
mean_ggg = np.mean(all_ggg, axis=0)
var_ggg = np.var(all_ggg, axis=0)
np.savez(file_name, mean_ggg=mean_ggg, var_ggg=var_ggg)
data = np.load(file_name)
mean_ggg = data['mean_ggg']
var_ggg = data['var_ggg']
print('mean = ',mean_ggg)
print('var = ',var_ggg)
rng = np.random.RandomState(12345)
x, y, g1, g2, _ = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2)
ggg = treecorr.GGGCorrelation(nbins=1, min_sep=20., max_sep=40.,
min_u=0.6, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.6, nvbins=1, rng=rng)
ggg.process(cat)
print(ggg.ntri.ravel())
print(ggg.gam0.ravel())
print(ggg.gam1.ravel())
print(ggg.gam2.ravel())
print(ggg.gam3.ravel())
gggp = ggg.copy()
catp = treecorr.Catalog(x=x, y=y, g1=g1, g2=g2, npatch=npatch)
# Do the same thing with patches.
gggp.process(catp)
print('with patches:')
print(gggp.ntri.ravel())
print(gggp.vargam0.ravel())
print(gggp.vargam1.ravel())
print(gggp.vargam2.ravel())
print(gggp.vargam3.ravel())
print(gggp.gam0.ravel())
print(gggp.gam1.ravel())
print(gggp.gam2.ravel())
print(gggp.gam3.ravel())
np.testing.assert_allclose(gggp.ntri, ggg.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.vargam0, ggg.vargam0, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam1, ggg.vargam1, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam2, ggg.vargam2, rtol=0.1 * tol_factor)
np.testing.assert_allclose(gggp.vargam3, ggg.vargam3, rtol=0.1 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.9*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
gggp.process(catp, catp, catp)
print(gggp.gam0.ravel())
np.testing.assert_allclose(gggp.gam0, ggg.gam0, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam1, ggg.gam1, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam2, ggg.gam2, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
np.testing.assert_allclose(gggp.gam3, ggg.gam3, rtol=0.3 * tol_factor, atol=0.3 * tol_factor)
print('jackknife:')
cov = gggp.estimate_cov('jackknife', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.4*tol_factor)
print('sample:')
cov = gggp.estimate_cov('sample', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.6*tol_factor)
print('marked:')
cov = gggp.estimate_cov('marked_bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_ggg), atol=0.8*tol_factor)
print('bootstrap:')
cov = gggp.estimate_cov('bootstrap', func=f)
print(np.diagonal(cov).real)
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_ggg))))
np.testing.assert_allclose(np.log( | np.diagonal(cov) | numpy.diagonal |
import os, sys
import glob
import shutil
import argparse
import time
import wave
import numpy as np
class Input():
def __init__(self, filename, n_input):
_, ext = os.path.splitext(filename)
if ext == '.wav':
if os.path.isfile(filename):
self.input = _InputMultiWav(filename, n_input)
else:
self.input = _InputMonoWav(filename, n_input)
elif ext == '.npy':
if os.path.isfile(filename):
self.input = _InputMultiNpy(filename, n_input)
else:
self.input = _InputMonoNpy(filename, n_input)
else:
message = '{} file is not supported.'.format(ext)
print(message)
sys.exit()
self.nframes = self.input.nframes
return
def close(self):
self.input.close()
return
def readframes(self, n):
return self.input.readframes(n)
class _InputMultiWav():
def __init__(self, filename, n_input):
self.wr = wave.open(filename, 'r')
params = self.wr.getparams()
self.nchannels = params[0]
self.ws = params[1]
self.fs = params[2]
self.nframes = params[3]
if self.nchannels != n_input:
print('number of input channels does not match.')
print('%s contains %d ch signals. != %d'\
% (filename, self.nchannels, n_input))
sys.exit()
self.pointer = 0
return
def close(self):
self.wr.close()
return
def readframes(self, n):
s = self.pointer
e = s + n
if e > self.nframes:
e = self.nframes
N = e - s
frames = self.wr.readframes(N)
if self.ws == 3:
d = np.zeros((N * self.nchannels, 4), dtype=np.uint8)
d[:, 1:] = np.frombuffer(frames, dtype=np.uint8).reshape(-1, 3)
data = d.view(np.int32)[:, 0] / 2147483648
elif self.ws == 2:
data = np.frombuffer(frames, dtype=np.int16) / 32768
elif self.ws == 4:
data = np.frombuffer(frames, dtype=np.int32) / 2147483648
data = data.reshape((self.nchannels, -1), order='F')
self.pointer += e - s
return data
class _InputMultiNpy():
def __init__(self, filename, n_input):
self.wr = np.load(filename, 'r')
self.nchannels = self.wr.shape[0]
if self.nchannels != n_input:
print('number of input channels does not match.')
print('%s contains %d ch signals. != %d'\
% (filename, self.nchannels, n_input))
sys.exit()
self.nframes = self.wr.shape[1]
self.pointer = 0
return
def close(self):
pass
return
def readframes(self, n):
s = self.pointer
e = s + n
if e > self.nframes:
e = self.nframes
data = np.copy(self.wr[:, s:e])
self.pointer += e - s
return data
class _InputMonoWav():
def __init__(self, filename, n_input):
self.nchannels = n_input
for ch in range(1, self.nchannels + 1):
if not os.path.isfile(filename.format(i=ch)):
print('input file does not exits.')
print('->', filename.format(i=ch))
sys.exit()
self.wr = []
for ch in range(1, self.nchannels + 1):
w = wave.open(filename.format(i=ch))
if w.getparams()[0] != 1:
message = '%s is not monaural file.' % filename.format(i=ch)
print(message)
sys.exit()
else:
self.wr.append(w)
params_ref = self.wr[0].getparams()
self.ws = params_ref[1]
self.fs = params_ref[2]
self.nframes = params_ref[3]
for i in range(1, self.nchannels):
params = self.wr[i].getparams()
if params[1] != self.ws:
print('input sampwidth is not unified.')
sys.exit()
if params[2] != self.fs:
print('input sample rate is not unified.')
sys.exit()
if params[3] != self.nframes:
print('input length is not aligned.')
sys.exit()
self.pointer = 0
return
def close(self):
for i in range(self.nchannels):
self.wr[i].close()
return
def readframes(self, n):
s = self.pointer
e = s + n
if e > self.nframes:
e = self.nframes
N = e - s
data = | np.empty([self.nchannels, N]) | numpy.empty |
import pandas as pd
import numpy as np
from scipy import stats
UIM = pd.DataFrame(np.zeros(5000,1000))
IIM = pd.DataFrame(np.zeros(1000,1000))
UUM = pd.DataFrame(np.zeros(5000,5000))
top_n = 20
labels = pd.DataFrame(np.zeros(1000))
def initI(uid):
#Returns a list of item indices in UIM st. UIM(uid, i) = 0
cond = UIM[uid] == 0
return UIM.index[cond].tolist()
def initI_noIID(uid, iid):
#Returns a list of item indices in UIM st. UIM(uid, i) = 0 and i != iid
return init(uid).drop([iid])
def cbscore(uid, iid):
I = initI_noIID(uid)
#Storing the IIM values of this iid column restricted to indices in I
M = IIM.iloc[I,iid]
#Sort M and return the top n indices
return pd.argsort(M)[:top_n]
def ucfscore(uid):
I = initI(uid)
#Storing list of user indices having the same label as this uid
cond = labels == labels[uid]
U = labels.index[cond].tolist()
return slicingUIM(U, I)
def icfscore(uid, iid):
I = initI_noIID(uid)
U = genU(iid)
#Storing top user indices
U1 = pd.argsort(UUM[uid, U])[:top_n].tolist()
return slicingUIM(U1, I)
def slicingUIM(U, I):
#Storing UIM values for indices in I and U for respective axes
M = UIM.iloc[I,U]
#Square and sum across users
M2 = (M * M).sum(0)
#Sort M and return the top n indices
return pd.argsort(M2)[:top_n]
# Generate basic scoring_list to score the top 'n'
def scoring_list_gen(n):
scoring_list = np.zeros(n)
for i in n:
scoring_list[i] = n - i
return scoring_list
# Score the top 'n' chosen through a particular method
def score(sorted_list, scoring_list, top_n):
scored_list = | np.zeros(5000) | numpy.zeros |
from PIL import Image, ImageDraw
import matplotlib.path as mplPath
import numpy as np
import webbrowser
import os.path
import copy
import sys
import random
import argparse
POPULATION = 500
DIMENSION = 30
class DNA(object):
def __init__(self, _img_size, _polygons):
self.polygons = _polygons
self.size_img = _img_size
def draw(self, alpha):
size = self.size_img
img2 = Image.new('RGBA', size)
draw = Image.new('RGBA', size)
pdraw = ImageDraw.Draw(draw)
for polygon in self.polygons:
color = polygon.color
array_point = []
for point in polygon.points:
array_point.append(tuple([point[0], point[1]]))
pdraw.polygon(array_point, fill=(color[0], color[1], color[2], alpha))
img2 = Image.alpha_composite(img2, draw)
return img2
def random_polygon(self):
finish = []
for polygon in self.polygons:
if polygon.changeable is False:
finish.append(polygon)
if (len(finish) == len(self.polygons)):
print("\n\nFinished\n\n")
sys.exit()
rand = random.randrange(0, len(self.polygons))
while self.polygons[rand].changeable is False:
rand = random.randrange(0, len(self.polygons))
random_poly = self.polygons[rand]
return random_poly, rand
class Polygons(object):
def __init__(self, _points, _color, _fitness, _changeable):
self.points = _points
self.color = _color
self.fitness = _fitness
self.changeable = _changeable
def mutate_polygon(self, size):
rand = random.random()
if rand <= 0.5:
if (self.fitness[0] > self.fitness[1] and self.fitness[0] > self.fitness[2]):
idx = 0
elif (self.fitness[1] > self.fitness[0] and self.fitness[1] > self.fitness[2]):
idx = 1
else:
idx = 2
value = random.randrange(0, 256)
color = | np.array(self.color) | numpy.array |
import os
import shutil
import time
import numpy as np
from baselines import logger
from collections import deque
import tensorflow as tf
from baselines.common import explained_variance, set_global_seeds
from ppo_iter.policies import build_policy
from baselines.common.tf_util import get_session
from baselines.common.mpi_util import sync_from_root
from ppo_iter.utils import get_docs, get_file_id, save_file_from_db, constfn, get_alpha
from ppo_iter.utils import scheduling, get_lr_fn, save_model, switch_training_model, get_all_burnin_data_dict
from ppo_iter.utils import safemean, save_data, load_batch
from ppo_iter.utils import db_uri, db_name
from ppo_iter.model import Model
try:
from mpi4py import MPI
except ImportError:
MPI = None
from ppo_iter.runner import Runner
def learn(*, network, env, total_timesteps, iter_loss, arch, _run,
seed=None, nsteps=2048, ent_coef=0.0, learning_rate=3e-4, lr_schedule=None,
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
load_path=None, mpi_rank_weight=1, comm=None,
eval=None, **network_kwargs):
'''
Learn policy using PPO algorithm (https://arxiv.org/abs/1707.06347)
Parameters:
----------
network:
The network model. Will only work with the one in this repo because of IBAC
env: baselines.common.vec_env.VecEnv
total_timesteps: int
number of timesteps (i.e. number of actions taken in the environment)
iter_loss: dict
the config dict as specified in default.yaml and/or overwritting by command line arguments
see sacred for further documentation
arch: dict
config dict similar to iter_loss
eval: dict
config dict similar to iter_loss
_run:
sacred Experiment._run object. Used for logging
ent_coef: float
policy entropy coefficient in the optimization objective
seed: float
random seed
nsteps: int
number of steps of the vectorized environment per update (i.e. batch size is nsteps * nenv where
nenv is number of environment copies simulated in parallel)
ent_coef: float
value function loss coefficient in the optimization objective
learning_rate: float
learning rate
lr_schedule: None or str
If None, use a const. learning rate. If string, only "linear" is implemented at the moment
vf_coef: float
Coefficient for vf optimisation
max_grad_norm: flaot
Max gradient norm before it's clipped
gamma: float
Discount factor
lam: float
For GAE
log_interval: int
number of timesteps between logging events
nminibatches: int
number of training minibatches per update. For recurrent policies,
should be smaller or equal than number of environments run in parallel.
noptepochs: int
number of training epochs per update
cliprange: float or function
clipping range, constant or schedule function [0,1] -> R+ where 1 is beginning of the training
and 0 is the end of the training
save_interval: int
number of timesteps between saving events
load_path: str
path to load the model from
**network_kwargs:
keyword arguments to the policy / network builder.
See baselines.common/policies.py/build_policy and arguments to a particular type of network
For instance, 'mlp' network architecture has arguments num_hidden and num_layers.
'''
# Set learning rate schedule
lr = get_lr_fn(lr_schedule, start_learning_rate=learning_rate)
set_global_seeds(seed)
session = get_session()
# if isinstance(lr, float): lr = constfn(lr)
# else: assert callable(lr)
if isinstance(cliprange, float): cliprange = constfn(cliprange)
else: assert callable(cliprange)
total_timesteps = int(total_timesteps)
# Get the nb of env
nenvs = env.num_envs
# Get state_space and action_space
ob_space = env.observation_space
ac_space = env.action_space
# Calculate the batch_size
nbatch = nenvs * nsteps
nbatch_train = nbatch // nminibatches
is_mpi_root = (MPI is None or MPI.COMM_WORLD.Get_rank() == 0)
model_fn = Model
policy = build_policy(env, network, arch, **network_kwargs)
# Instantiate the model object (that creates act_model and train_model)
def create_model(scope_name, **kwargs):
return model_fn(scope_name=scope_name, policy=policy, ob_space=ob_space, ac_space=ac_space,
nbatch_act=nenvs, nbatch_train=nbatch_train,
nsteps=nsteps, ent_coef=ent_coef, vf_coef=vf_coef,
max_grad_norm=max_grad_norm, comm=comm, mpi_rank_weight=mpi_rank_weight,
iter_loss=iter_loss, arch=arch, **kwargs)
# model_train is the teacher and always executed
# model_burnin is trained. If teacher and student are swapped, the parameters from burnin are
# copied into the teacher and burnin is re-initialized
model_train = create_model("ppo_iter_train")
model_burnin = create_model("ppo_iter_burnin",
target_vf=model_train.train_model.vf_run,
target_dist_param=model_train.train_model.pi_run)
get_session().run(tf.variables_initializer(tf.global_variables()))
global_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="")
if MPI is not None:
sync_from_root(session, global_variables, comm=comm) # pylint: disable=E1101
if load_path is not None:
print("Load model...")
if eval["load_id"]:
# Only works with mongodb as backend, not with tinydb
raise NotImplementedError("Requires MongoDB backend to work")
docs = get_docs(db_uri, db_name, "runs")
projection = {'config': True}
projection.update({'artifacts': True})
doc = docs.find_one({'_id': eval["load_id"]}, projection)
print("Loading model from db to disc")
file_id = get_file_id(doc, eval["file_name"])
load_path = os.path.join(logger.get_dir(), "loadmodel_{}".format(_run._id))
save_file_from_db(file_id, load_path , db_uri, db_name)
model_train.load(load_path)
if eval["switch_after_load"]:
switch_training_model(0, is_mpi_root, model_train, _run, iter_loss, session, comm,
save=False)
# Instantiate the runner object
runner = Runner(env=env, model=model_train, model_burnin=model_burnin, nsteps=nsteps, gamma=gamma, lam=lam,
iter_loss=iter_loss, eval=eval)
epinfobuf = deque(maxlen=100)
burnin_data_idx = 0
all_burnin_data = None
assert iter_loss["timesteps_anneal"] > iter_loss["v2_buffer_size"] * env.num_envs * nsteps, \
"{}, {}".format(iter_loss["timesteps_anneal"], iter_loss["v2_buffer_size"] * env.num_envs * nsteps)
# Start total timer
tfirststart = time.perf_counter()
nupdates = total_timesteps//nbatch
current_cycle_count = 0
for update in range(1, nupdates+1):
assert nbatch % nminibatches == 0
num_timesteps = update * nbatch
# Start timer
frac = 1.0 - (update - 1.0) / nupdates
# Calculate the learning rate
lrnow = lr(frac)
# Calculate the cliprange
cliprangenow = cliprange(frac)
# 'Burnin_phase' tells us whether we need regularization
cycle_count, alpha_reg, burnin_phase = scheduling(num_timesteps, iter_loss, "alpha_reg")
if cycle_count != current_cycle_count:
current_cycle_count = cycle_count
if iter_loss["v2"]:
logger.info("Training student")
train_student(
teacher=model_train,
student=model_burnin,
data=all_burnin_data,
iter_loss=iter_loss,
lr=lrnow,
cliprange=cliprangenow,
nminibatches=nminibatches,
session=session,
max_idx=burnin_data_idx,
nenvs=env.num_envs,
nsteps=nsteps,
id=_run._id,
)
switch_training_model(update, is_mpi_root, model_train, _run, iter_loss, session, comm)
# Resetting
all_burnin_data = None
burnin_data_idx = 0
logger.info("Switched training model")
tstart = time.perf_counter()
if update % log_interval == 0 and is_mpi_root: logger.info('Stepping environment...')
# Get minibatch
obs, returns, b_returns, masks, actions, values, b_values, neglogpacs, states, b_states, epinfos, burnin_data= \
runner.run(burnin_phase) #pylint: disable=E0632
if burnin_phase and (iter_loss["v2"] or eval["save_latent"]):
print("Saving data")
if iter_loss["v2_use_files"] or eval["save_latent"]:
# Burnin_data_idx is incremented by nsteps, which is nr. of files
save_data(burnin_data, burnin_data_idx, _run._id, nsteps)
else:
if all_burnin_data is None:
all_burnin_data = get_all_burnin_data_dict(
env, iter_loss, nsteps, comm)
for key, value in burnin_data.items():
all_burnin_data[key][burnin_data_idx:burnin_data_idx + nsteps] = value
burnin_data_idx += nsteps
if update % log_interval == 0 and is_mpi_root: logger.info('Done.')
epinfobuf.extend(epinfos)
# Here what we're going to do is for each minibatch calculate the loss and append it.
mblossvals = []
mblossvals_burnin = []
if states is None: # nonrecurrent version
# Index of each element of batch_size
# Create the indices array
inds = np.arange(nbatch)
for _ in range(noptepochs):
# Randomize the indexes
np.random.shuffle(inds)
# 0 to batch_size with batch_train_size step
for start in range(0, nbatch, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
slices_train = (arr[mbinds] for arr in (obs, returns, actions, values, neglogpacs))
slices_burnin = (arr[mbinds] for arr in (obs, b_returns, actions, b_values, neglogpacs))
stats_train, train_op_train, feed = model_train.train(
lrnow, cliprangenow, *slices_train,
)
stats_burnin, train_op_burnin, feed_burnin = model_burnin.train(
lrnow, cliprangenow, *slices_burnin, alpha=alpha_reg,
)
feed.update(feed_burnin) # Needs both!
fetches = {}
if eval["eval_only"]:
pass
session_outputs = {}
elif not burnin_phase or iter_loss["v2"]:
# For v2, normal PPO training is only the old policy,
# The student policy is trained differently
fetches.update({"stats_train": stats_train,})
fetches.update({"train_op": train_op_train})
session_outputs = session.run(fetches, feed)
elif (iter_loss["update_old_policy"] or
(iter_loss["update_old_policy_in_initial"] and cycle_count==0)):
fetches.update({"stats_burnin": stats_burnin})
fetches.update({"train_op": train_op_burnin})
session_outputs_burnin = session.run(fetches, feed)
fetches.update({"stats_train": stats_train,})
fetches.update({"train_op": train_op_train})
session_outputs = session.run(fetches, feed)
session_outputs.update(session_outputs_burnin)
else:
fetches.update({"stats_burnin": stats_burnin})
fetches.update({"train_op": train_op_burnin})
session_outputs = session.run(fetches, feed)
if "stats_train" in session_outputs.keys():
mblossvals.append(session_outputs["stats_train"])
else:
mblossvals.append(
[0 for loss in model_train.loss_names]
)
if "stats_burnin" in session_outputs.keys():
mblossvals_burnin.append(session_outputs["stats_burnin"])
else:
mblossvals_burnin.append(
[0 for loss in model_burnin.loss_names]
)
else: # recurrent version
raise NotImplementedError("Recurrent version not implemented")
# Feedforward --> get losses --> update
lossvals = np.mean(mblossvals, axis=0)
lossvals_burnin = np.mean(mblossvals_burnin, axis=0)
# End timer
tnow = time.perf_counter()
# Calculate the fps (frame per second)
fps = int(nbatch / (tnow - tstart))
if update % log_interval == 0 or update == 1:
# Calculates if value function is a good predicator of the returns (ev > 1)
# or if it's just worse than predicting nothing (ev =< 0)
ev = explained_variance(values, returns)
logger.logkv("misc/serial_timesteps", update*nsteps)
logger.logkv("misc/nupdates", update)
logger.logkv("misc/total_timesteps", update*nbatch)
logger.logkv("fps", fps)
logger.logkv("misc/explained_variance", float(ev))
logger.logkv('eprewmean', safemean([epinfo['r'] for epinfo in epinfobuf]))
logger.logkv('eplenmean', safemean([epinfo['l'] for epinfo in epinfobuf]))
logger.logkv('misc/time_elapsed', tnow - tfirststart)
for (lossval, lossname) in zip(lossvals, model_train.loss_names):
logger.logkv('loss/' + lossname, lossval)
for (lossval, lossname) in zip(lossvals_burnin, model_burnin.loss_names):
logger.logkv('loss_burnin/' + lossname, lossval)
logger.logkv("schedule/alpha_reg", alpha_reg)
logger.logkv("schedule/current_cycle_count", current_cycle_count)
logger.logkv("schedule/burnin_phase", burnin_phase)
logger.dumpkvs()
if is_mpi_root:
save_model(model_train, "model", update, _run)
return model_train
def train_student(teacher, student, data, iter_loss, lr, cliprange,
nminibatches, session, max_idx, nenvs, nsteps, id):
"""Train student for sequential ITER (i.e. v2=True).
Args:
teacher: teacher model
student: student model
data: either a np array or None if we use files to store the data
iter_loss: config dict
lr: learning rate
cliprange: cliprange used for gradients
nminibatches: How many minibatches are used in PPO?
session: TF session
max_idx: How many frames have been stored? Need to know when things are stored in files
nenvs: How many parallel envs are being exectued
nsteps: How many steps per batch are executed
id: Run id, needed to find the folder with the files
Doesn't return anything, but updates the student
"""
use_data = data is not None
# In unit of steps
num_processed_parallel = int(max(nsteps // nminibatches, 1))
num_batches = int(max_idx // num_processed_parallel)
max_idx = num_batches * num_processed_parallel
if use_data:
obs = data["obs"][:max_idx]
actions = data["actions"][:max_idx]
returns = data["returns"][:max_idx]
neglogpacs = data["neglogpacs"][:max_idx]
values = data["values"][:max_idx]
# Get example so I know dimensionality of pi
test_obs = obs[0:num_processed_parallel]
sa = test_obs.shape
v, pi = teacher.train_model.value_and_pi(
test_obs.reshape(-1, *sa[2:]))
teacher_values = | np.empty_like(values) | numpy.empty_like |
'''
Runs the exercises and regressions for the cAndCwithStickyE paper.
'''
import sys
import os
sys.path.insert(0, os.path.abspath('../'))
sys.path.insert(0, os.path.abspath('../ConsumptionSaving'))
import numpy as np
#from copy import copy, deepcopy
from StickyEmodel import StickyEconsumerSOEType, StickyEconsumerDSGEType
from ConsAggShockModel import SmallOpenEconomy, CobbDouglasEconomy
from HARKutilities import plotFuncs
import matplotlib.pyplot as plt
periods_to_sim = 1200
ignore_periods = 500
# Define parameters for the small open economy version of the model
init_SOE_consumer = { 'CRRA': 2.0,
'DiscFac': 0.969,
'LivPrb': [0.995],
'PermGroFac': [1.0],
'AgentCount': 10000,
'aXtraMin': 0.00001,
'aXtraMax': 40.0,
'aXtraNestFac': 3,
'aXtraCount': 48,
'aXtraExtra': [None],
'PermShkStd': [np.sqrt(0.004)],
'PermShkCount': 7,
'TranShkStd': [np.sqrt(0.12)],
'TranShkCount': 7,
'UnempPrb': 0.05,
'UnempPrbRet': 0.0,
'IncUnemp': 0.0,
'IncUnempRet': 0.0,
'BoroCnstArt':0.0,
'tax_rate':0.0,
'T_retire':0,
'MgridBase': np.array([0.5,1.5]),
'aNrmInitMean' : np.log(0.00001),
'aNrmInitStd' : 0.0,
'pLvlInitMean' : 0.0,
'pLvlInitStd' : 0.0,
'PermGroFacAgg' : 1.0,
'UpdatePrb' : 0.25,
'T_age' : None,
'T_cycle' : 1,
'cycles' : 0,
'T_sim' : periods_to_sim
}
init_DSGE_consumer = { 'CRRA': 2.0,
'DiscFac': 1.0/1.014189682528173,
'LivPrb': [1.0],
'PermGroFac': [1.0],
'AgentCount': 1,
'aXtraMin': 0.00001,
'aXtraMax': 40.0,
'aXtraNestFac': 3,
'aXtraCount': 48,
'aXtraExtra': [None],
'PermShkStd': [0.0],
'PermShkCount': 1,
'TranShkStd': [0.0],
'TranShkCount': 1,
'UnempPrb': 0.0,
'UnempPrbRet': 0.0,
'IncUnemp': 0.0,
'IncUnempRet': 0.0,
'BoroCnstArt':0.0,
'tax_rate':0.0,
'T_retire':0,
'MgridBase': np.array([0.1,0.3,0.6,0.8,0.9,0.98,1.0,1.02,1.1,1.2,1.6,2.0,3.0]),
'aNrmInitMean' : np.log(0.00001),
'aNrmInitStd' : 0.0,
'pLvlInitMean' : 0.0,
'pLvlInitStd' : 0.0,
'PermGroFacAgg' : 1.0,
'UpdatePrb' : 0.25,
'CapShare' : 0.36,
'T_age' : None,
'T_cycle' : 1,
'cycles' : 0,
'T_sim' : periods_to_sim
}
init_SOE_market = { 'PermShkAggCount': 3,
'TranShkAggCount': 3,
'PermShkAggStd': np.sqrt(0.00004),
'TranShkAggStd': np.sqrt(0.00001),
'DeprFac': 1.0 - 0.94**(0.25),
'CapShare': 0.36,
'Rfree': 1.014189682528173,
'wRte': 2.5895209258224536,
'act_T': periods_to_sim
}
init_DSGE_market = { 'PermShkAggCount': 7,
'TranShkAggCount': 7,
'PermShkAggStd': np.sqrt(0.00004),
'TranShkAggStd': np.sqrt(0.00001),
'DeprFac': 1.0 - 0.94**(0.25),
'CapShare': 0.36,
'CRRA': 2.0,
'DiscFac': 1.0/1.014189682528173,
'slope_prev': 1.0,
'intercept_prev': 0.0,
'kSS':12.0**(1.0/(1.0-0.36)),
'AggregateL': 1.0,
'ignore_periods':ignore_periods,
'tolerance':0.0001,
'act_T': periods_to_sim
}
# Make a small open economy and the consumers who live in it
StickySOEconsumers = StickyEconsumerSOEType(**init_SOE_consumer)
StickySOEconomy = SmallOpenEconomy(**init_SOE_market)
StickySOEconomy.agents = [StickySOEconsumers]
StickySOEconomy.makeAggShkHist()
StickySOEconsumers.getEconomyData(StickySOEconomy)
StickySOEconsumers.track_vars = ['aLvlNow','mNrmNow','cNrmNow','pLvlNow','pLvlErrNow']
# Solve the model and display some output
StickySOEconomy.solveAgents()
StickySOEconomy.makeHistory()
# Plot some of the results
cFunc = lambda m : StickySOEconsumers.solution[0].cFunc(m,np.ones_like(m))
plotFuncs(cFunc,0.0,20.0)
plt.plot(np.mean(StickySOEconsumers.aLvlNow_hist,axis=1))
plt.show()
plt.plot(np.mean(StickySOEconsumers.mNrmNow_hist*StickySOEconsumers.pLvlNow_hist,axis=1))
plt.show()
plt.plot(np.mean(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist,axis=1))
plt.show()
plt.plot(np.mean(StickySOEconsumers.pLvlNow_hist,axis=1))
plt.plot(np.mean(StickySOEconsumers.pLvlErrNow_hist,axis=1))
plt.show()
print('Average aggregate assets = ' + str(np.mean(StickySOEconsumers.aLvlNow_hist[ignore_periods:,:])))
print('Average aggregate consumption = ' + str(np.mean(StickySOEconsumers.cNrmNow_hist[ignore_periods:,:]*StickySOEconsumers.pLvlNow_hist[ignore_periods:,:])))
print('Standard deviation of log aggregate assets = ' + str(np.std(np.log(np.mean(StickySOEconsumers.aLvlNow_hist[ignore_periods:,:],axis=1)))))
LogC = np.log(np.mean(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist,axis=1))[ignore_periods:]
DeltaLogC = LogC[1:] - LogC[0:-1]
print('Standard deviation of change in log aggregate consumption = ' + str(np.std(DeltaLogC)))
print('Standard deviation of log individual assets = ' + str(np.mean(np.std(np.log(StickySOEconsumers.aLvlNow_hist[ignore_periods:,:]),axis=1))))
print('Standard deviation of log individual consumption = ' + str(np.mean(np.std(np.log(StickySOEconsumers.cNrmNow_hist[ignore_periods:,:]*StickySOEconsumers.pLvlNow_hist[ignore_periods:,:]),axis=1))))
print('Standard deviation of log individual productivity = ' + str(np.mean(np.std(np.log(StickySOEconsumers.pLvlNow_hist[ignore_periods:,:]),axis=1))))
Logc = | np.log(StickySOEconsumers.cNrmNow_hist*StickySOEconsumers.pLvlNow_hist) | numpy.log |
import estimators
import matplotlib.pyplot as plt
import numpy as np
import argparse
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from time import time
def annStructureTest():
sample_list = [1e3, 1e4, 1e5]
estimator_list, descriptions = estimators.get()
figure, ax_list = plt.subplots(len(sample_list), len(
estimator_list), sharex=True, sharey=True)
figure.suptitle("ANN regression fit of $y=x^2$")
for sample_idx, num_samples in enumerate(sample_list):
X = np.linspace(-10, 10, num_samples).reshape(-1, 1)
y = np.ravel(np.square(X))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,
random_state=0)
ax_list[sample_idx, 0].set_ylabel(
str(len(X_train)) + " training samples")
for estimator_idx, est in enumerate(estimator_list):
print("Training with " + str(len(X_train)) +
" samples and " + descriptions[estimator_idx] + "...")
tic = time()
est.fit(X_train, y_train)
print("done in {:.3f}s".format(time() - tic))
y_est = est.predict(X_test)
err = np.absolute(y_est - y_test)
rel_err = np.absolute(np.divide(y_est - y_test, y_test))
print("Mean error: {:.2f}".format(np.mean(err)))
print("Max error: {:.2f}".format(np.max(err)))
print("Mean relative error: {:.2f}\n".format(np.mean(rel_err)))
ax_list[sample_idx, estimator_idx].scatter(
X_test, y_est, color='r')
ax_list[sample_idx, estimator_idx].plot(X, y)
ax_list[sample_idx, estimator_idx].set_title(
descriptions[estimator_idx])
ax_list[sample_idx, estimator_idx].set_xlabel(
"$\epsilon_\mu=${:.2f} $\epsilon_{{max}}=${:.2f}".format(np.mean(err), np.max(err)))
plt.tight_layout()
def extrapolationTest():
num_samples = 1e5
est = MLPRegressor(hidden_layer_sizes=(50, 50),
learning_rate_init=0.01,
early_stopping=True)
plt.figure()
plt.title(
"ANN regression fit of $y=x^2$, with extrapolation outside of [-10 10]")
X_train = np.linspace(-10, 10, num_samples).reshape(-1, 1)
y_train = np.ravel(np.square(X_train))
X_test = np.linspace(-100, 100, 10e3).reshape(-1, 1)
y_test = np.ravel(np.square(X_test))
plt.ylabel(str(len(X_train)) + " training samples")
print("Training with " + str(len(X_train)) +
" samples and 50 neurons, 2 layers...")
tic = time()
est.fit(X_train, y_train)
print("done in {:.3f}s".format(time() - tic))
y_est = est.predict(X_test)
err = np.absolute(y_est - y_test)
rel_err = np.absolute(np.divide(y_est - y_test, y_test))
print("Mean error: {:.2f}".format(np.mean(err)))
print("Max error: {:.2f}".format(np.max(err)))
print("Mean relative error: {:.2f}\n".format(np.mean(rel_err)))
plt.scatter(
X_test, y_est, color='r')
plt.plot(X_test, y_test)
plt.xlabel(
"$\epsilon_\mu=${:.2f} $\epsilon_{{max}}=${:.2f}".format(np.mean(err), np.max(err)))
plt.tight_layout()
def interpolationTest():
num_samples = 1e5
est = MLPRegressor(hidden_layer_sizes=(50, 50),
learning_rate_init=0.01,
early_stopping=True)
plt.figure()
plt.title(
"ANN regression fit of $y=x^2$, with void interpolation in [-10, 10]")
X_train = np.linspace(-100, 100, num_samples)
X_train = X_train[np.where(np.abs(X_train) > 10)].reshape(-1, 1)
y_train = np.ravel(np.square(X_train))
X_test = np.linspace(-100, 100, 10e3).reshape(-1, 1)
y_test = np.ravel(np.square(X_test))
plt.ylabel(str(len(X_train)) + " training samples")
print("Training with " + str(len(X_train)) +
" samples and 50 neurons, 2 layers...")
tic = time()
est.fit(X_train, y_train)
print("done in {:.3f}s".format(time() - tic))
y_est = est.predict(X_test)
err = np.absolute(y_est - y_test)
rel_err = np.absolute(np.divide(y_est - y_test, y_test))
print("Mean error: {:.2f}".format(np.mean(err)))
print("Max error: {:.2f}".format(np.max(err)))
print("Mean relative error: {:.2f}\n".format(np.mean(rel_err)))
plt.scatter(
X_test, y_est, color='r')
plt.plot(X_test, y_test)
plt.xlabel(
"$\epsilon_\mu=${:.2f} $\epsilon_{{max}}=${:.2f}".format(np.mean(err), np.max(err)))
plt.xlim((-20, 20))
plt.ylim((min(-5, np.min(y_est)-5), 405))
plt.tight_layout()
def dimensionTest():
num_samples = 1e5
est = MLPRegressor(hidden_layer_sizes=(50, 50),
learning_rate_init=0.01,
early_stopping=True)
num_dims = [1,3,5]
figure, ax_list = plt.subplots(len(num_dims), 1, sharex=True, sharey=True)
figure.suptitle(r'ANN regression fit of $y=\Vert x \Vert ^2, x \in \mathbb{{R}}^n$')
for idx, n in enumerate(num_dims):
X = np.random.rand(int(num_samples), int(n))*20 - 10
y = np.sum(np.square(X), 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,
random_state=0)
ax_list[idx].set_ylabel(
str(len(X_train)) + " training samples")
print("Training with " + str(len(X_train)) +
" samples and 50 neurons, 2 layers...")
tic = time()
est.fit(X_train, y_train)
print("done in {:.3f}s".format(time() - tic))
y_est = est.predict(X_test)
err = np.absolute(y_est - y_test)
rel_err = np.absolute(np.divide(y_est - y_test, y_test))
print("Mean error: {:.2f}".format(np.mean(err)))
print("Max error: {:.2f}".format(np.max(err)))
print("Mean relative error: {:.2f}\n".format(np.mean(rel_err)))
X_test = | np.linspace(-10, 10, 1e3) | numpy.linspace |
"""ageo - active geolocation library: core.
"""
__all__ = ('Location', 'Map', 'Observation')
import bisect
import functools
import itertools
import numpy as np
import pyproj
from scipy import sparse
from shapely.geometry import Point, MultiPoint, Polygon, box as Box
from shapely.ops import transform as sh_transform
import tables
import math
import sys
# scipy.sparse.find() materializes vectors which, in several cases
# below, can be enormous. This is slower, but more memory-efficient.
# Code from https://stackoverflow.com/a/31244368/388520 with minor
# modifications.
def iter_csr_nonzero(matrix):
irepeat = itertools.repeat
return zip(
# reconstruct the row indices
itertools.chain.from_iterable(
irepeat(i, r)
for (i,r) in enumerate(matrix.indptr[1:] - matrix.indptr[:-1])
),
# matrix.indices gives the column indices as-is
matrix.indices,
matrix.data
)
def Disk(x, y, radius):
return Point(x, y).buffer(radius)
# Important note: pyproj consistently takes coordinates in lon/lat
# order and distances in meters. lon/lat order makes sense for
# probability matrices, because longitudes are horizontal = columns,
# latitudes are vertical = rows, and scipy matrices are column-major
# (blech). Therefore, this library also consistently uses lon/lat
# order and meters.
# Coordinate transformations used by Location.centroid()
wgs_proj = pyproj.Proj("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
gcen_proj = pyproj.Proj("+proj=geocent +datum=WGS84 +units=m +no_defs")
wgs_to_gcen = functools.partial(pyproj.transform, wgs_proj, gcen_proj)
gcen_to_wgs = functools.partial(pyproj.transform, gcen_proj, wgs_proj)
# ... and Location.area()
cea_proj = pyproj.Proj(proj='cea', ellps='WGS84', lon_0=0, lat_ts=0)
wgs_to_cea = functools.partial(pyproj.transform, wgs_proj, cea_proj)
# Smooth over warts in pyproj.Geod.inv(), which is vectorized
# internally, but does not support numpy-style broadcasting, and
# returns things we don't need. The prebound _Inv and _Bcast are
# strictly performance hacks.
_WGS84geod = pyproj.Geod(ellps='WGS84')
def WGS84dist(lon1, lat1, lon2, lat2, *,
_Inv = _WGS84geod.inv, _Bcast = np.broadcast_arrays):
_, _, dist = _Inv(*_Bcast(lon1, lat1, lon2, lat2))
return dist
def cartesian2(a, b):
"""Cartesian product of two 1D vectors A and B."""
return np.tile(a, len(b)), np.repeat(b, len(a))
def mask_ij(bounds, longitudes, latitudes):
"""Given a rectangle-tuple BOUNDS (west, south, east, north; as
returned by shapely .bounds properties), and sorted grid index
vectors LONGITUDES, LATITUDES, return vectors I, J which give the
x- and y-indices of every grid point within the rectangle.
LATITUDES and LONGITUDES must be sorted.
"""
try:
(west, south, east, north) = bounds
except ValueError as e:
raise ValueError("invalid bounds argument {!r}".format(bounds)) from e
min_i = bisect.bisect_left(longitudes, west)
max_i = bisect.bisect_right(longitudes, east)
min_j = bisect.bisect_left(latitudes, south)
max_j = bisect.bisect_right(latitudes, north)
I = np.array(range(min_i, max_i))
J = np.array(range(min_j, max_j))
return cartesian2(I, J)
def mask_matrix(bounds, longitudes, latitudes):
"""Construct a sparse matrix which is 1 at all latitude+longitude
grid points inside the rectangle BOUNDS, 0 outside.
LATITUDES and LONGITUDES must be sorted.
"""
I, J = mask_ij(bounds, longitudes, latitudes)
return sparse.csr_matrix((np.ones_like(I), (I, J)),
shape=(len(longitudes), len(latitudes)))
class LocationRowOnDisk(tables.IsDescription):
"""The row format of the pytables table used to save Location objects
on disk. See Location.save and Location.load."""
grid_x = tables.UInt32Col()
grid_y = tables.UInt32Col()
longitude = tables.Float64Col()
latitude = tables.Float64Col()
prob_mass = tables.Float32Col()
class Location:
"""An estimated location for a host. This is represented by a
probability mass function over the surface of the Earth, quantized
to a cell grid, and stored as a sparse matrix.
Properties:
resolution - Grid resolution, in meters at the equator
lon_spacing - East-west (longitude) grid resolution, in decimal degrees
lat_spacing - North-south (latitude) grid resolution, in decimal degrees
fuzz - Coastline uncertainty factor, in meters at the equator
north - Northernmost latitude covered by the grid
south - Southernmost latitude ditto
east - Easternmost longitude ditto
west - Westernmost longitude ditto
latitudes - Vector of latitude values corresponding to grid points
longitudes - Vector of longitude values ditto
probability - Probability mass matrix (may be lazily computed)
bounds - Bounding region of the nonzero portion of the
probability mass matrix (may be lazily computed)
centroid - Centroid of the nonzero &c
area - Weighted area of the nonzero &c
covariance - Covariance matrix of the nonzero &c
(relative to the centroid)
rep_pt - "Representative point" of the nonzero &c; see docstring
for exactly what this means
annotations - Dictionary of arbitrary additional metadata; saved and
loaded but not otherwise inspected by this code
You will normally not construct bare Location objects directly, only
Map and Observation objects (these are subclasses). However, any two
Locations can be _intersected_ to produce a new one.
A Location is _vacuous_ if it has no nonzero entries in its
probability matrix.
"""
def __init__(self, *,
resolution, fuzz, lon_spacing, lat_spacing,
north, south, east, west,
longitudes, latitudes,
probability=None, vacuity=None, bounds=None,
centroid=None, covariance=None, rep_pt=None,
loaded_from=None, annotations=None
):
self.resolution = resolution
self.fuzz = fuzz
self.north = north
self.south = south
self.east = east
self.west = west
self.lon_spacing = lon_spacing
self.lat_spacing = lat_spacing
self.longitudes = longitudes
self.latitudes = latitudes
self._probability = probability
self._vacuous = vacuity
self._bounds = bounds
self._centroid = centroid
self._covariance = covariance
self._rep_pt = rep_pt
self._loaded_from = loaded_from
self._area = None
self.annotations = annotations if annotations is not None else {}
@property
def probability(self):
if self._probability is None:
self.compute_probability_matrix_now()
return self._probability
@property
def vacuous(self):
if self._vacuous is None:
self.compute_probability_matrix_now()
return self._vacuous
@property
def centroid(self):
if self._centroid is None:
self.compute_centroid_now()
return self._centroid
@property
def covariance(self):
if self._covariance is None:
self.compute_centroid_now()
return self._covariance
@property
def area(self):
"""Weighted area of the nonzero region of the probability matrix."""
if self._area is None:
# Notionally, each grid point should be treated as a
# rectangle of parallels and meridians _centered_ on the
# point. The area of such a rectangle, however, only
# depends on its latitude and its breadth; the actual
# longitude values don't matter. Since the grid is
# equally spaced, we can use [0],[1] always, and then we
# do not have to worry about crossing the discontinuity at ±180.
west = self.longitudes[0]
east = self.longitudes[1]
# For latitude, the actual values do matter, but the map
# never goes all the way to the poles, and the grid is
# equally spaced, so we can precompute the north-south
# delta from any pair of latitudes and not have to worry
# about running off the ends of the array.
d_lat = (self.latitudes[1] - self.latitudes[0]) / 2
# We don't need X, so throw it away immediately. (We
# don't use iter_csr_nonzero here because we need to
# modify V.)
X, Y, V = sparse.find(self.probability); X = None
# The value vector is supposed to be normalized, but make
# sure it is, and then adjust from 1-overall to 1-per-cell
# normalization.
assert len(V.shape) == 1
S = V.sum()
if S == 0:
return 0
if S != 1:
V /= S
V *= V.shape[0]
area = 0
for y, v in zip(Y, V):
north = self.latitudes[y] + d_lat
south = self.latitudes[y] - d_lat
if not (-90 <= south < north <= 90):
raise AssertionError("expected -90 <= {} < {} <= 90"
.format(south, north))
tile = sh_transform(wgs_to_cea, Box(west, south, east, north))
area += v * tile.area
self._area = area
return self._area
@property
def rep_pt(self, epsilon=1e-8):
"""Representative point of the nonzero region of the probability
matrix. This is, of all points with the greatest probability,
the one closest to the centroid.
"""
if self._rep_pt is None:
lons = self.longitudes
lats = self.latitudes
cen = self.centroid
aeqd_cen = pyproj.Proj(proj='aeqd', ellps='WGS84', datum='WGS84',
lon_0=cen[0], lat_0=cen[1])
wgs_to_aeqd = functools.partial(pyproj.transform,
wgs_proj, aeqd_cen)
# mathematically, wgs_to_aeqd(Point(lon, lat)) == Point(0, 0);
# the latter is faster and more precise
cen_pt = Point(0,0)
# It is unacceptably costly to construct a shapely MultiPoint
# out of some locations with large regions (can require more than
# 32GB of scratch memory). Instead, iterate over the points
# one at a time.
max_prob = 0
min_dist = math.inf
rep_pt = None
for x, y, v in iter_csr_nonzero(self.probability):
lon = lons[x]
lat = lats[y]
if rep_pt is None or v > max_prob - epsilon:
dist = WGS84dist(cen[0], cen[1], lon, lat)
# v < max_prob has already been excluded
if (rep_pt is None or v > max_prob or
(v > max_prob - epsilon and dist < min_dist)):
rep_pt = [lon, lat]
max_prob = max(max_prob, v)
min_dist = dist
if rep_pt is None:
rep_pt = cen
else:
rep_pt = np.array(rep_pt)
self._rep_pt = rep_pt
return self._rep_pt
def distance_to_point(self, lon, lat):
"""Find the shortest geodesic distance from (lon, lat) to a nonzero
cell of the probability matrix."""
aeqd_pt = pyproj.Proj(proj='aeqd', ellps='WGS84', datum='WGS84',
lon_0=lon, lat_0=lat)
wgs_to_aeqd = functools.partial(pyproj.transform, wgs_proj, aeqd_pt)
# mathematically, wgs_to_aeqd(Point(lon, lat)) == Point(0, 0);
# the latter is faster and more precise
pt = Point(0, 0)
# It is unacceptably costly to construct a shapely MultiPoint
# out of some locations with large regions (requires more than
# 32GB of scratch memory). Instead, iterate over the points
# one at a time.
min_distance = math.inf
for x, y, v in iter_csr_nonzero(self.probability):
cell = sh_transform(wgs_to_aeqd, Point(self.longitudes[x],
self.latitudes[y]))
if pt.distance(cell) - self.resolution*2 < min_distance:
cell = cell.buffer(self.resolution * 3/2)
min_distance = min(min_distance, pt.distance(cell))
if min_distance < self.resolution * 3/2:
return 0
return min_distance
def contains_point(self, lon, lat):
"""True if a grid cell with a nonzero probability contains
or adjoins (lon, lat)."""
i = bisect.bisect_left(self.longitudes, lon)
j = bisect.bisect_left(self.latitudes, lat)
return (self.probability[i-1, j-1] > 0 or
self.probability[i, j-1] > 0 or
self.probability[i+1, j-1] > 0 or
self.probability[i-1, j ] > 0 or
self.probability[i, j ] > 0 or
self.probability[i+1, j ] > 0 or
self.probability[i-1, j+1] > 0 or
self.probability[i, j+1] > 0 or
self.probability[i+1, j+1] > 0)
def compute_probability_matrix_now(self):
"""Compute and set self._probability and self._vacuous.
"""
if self._probability is not None:
return
if self._loaded_from:
self._lazy_load_pmatrix()
else:
M, vac = self.compute_probability_matrix_within(self.bounds)
self._probability = M
self._vacuous = vac
def compute_probability_matrix_within(self, bounds):
"""Subclasses must override if _probability is lazily computed.
Returns a tuple (matrix, vacuous).
"""
assert self._probability is not None
assert self._vacuous is not None
if self._vacuous:
return self._probability, True # 0 everywhere, so 0 within bounds
if bounds.is_empty or bounds.bounds == ():
return (
sparse.csr_matrix((len(self.longitudes),
len(self.latitudes))),
True
)
M = (mask_matrix(bounds.bounds, self.longitudes, self.latitudes)
.multiply(self._probability))
s = M.sum()
if s:
M /= s
return M, False
else:
return M, True
@property
def bounds(self):
if self._bounds is None:
self.compute_bounding_region_now()
return self._bounds
def compute_bounding_region_now(self):
"""Subclasses must implement if necessary:
compute and set self._bounds.
"""
if self._bounds is None and self._loaded_from:
self._lazy_load_pmatrix()
assert self._bounds is not None
def intersection(self, other, bounds=None):
"""Compute the intersection of this object's probability matrix with
OTHER's. If BOUNDS is specified, we don't care about
anything outside that area, and it will become the bounding
region of the result; otherwise this object and OTHER's
bounding regions are intersected first and the computation
is restricted to that region.
"""
if (self.resolution != other.resolution or
self.fuzz != other.fuzz or
self.north != other.north or
self.south != other.south or
self.east != other.east or
self.west != other.west or
self.lon_spacing != other.lon_spacing or
self.lat_spacing != other.lat_spacing):
raise ValueError("can't intersect locations with "
"inconsistent grids")
if bounds is None:
bounds = self.bounds.intersection(other.bounds)
# Compute P(self AND other), but only consider points inside
# BOUNDS. For simplicity we actually look at the quantized
# bounding rectangle of BOUNDS.
M1, V1 = self.compute_probability_matrix_within(bounds)
M2, V2 = other.compute_probability_matrix_within(bounds)
if V1:
M = M1
V = True
elif V2:
M = M2
V = True
else:
M = None
V = False
# Optimization: if M1 and M2 have the same set of nonzero
# entries, and all the nonzero values in one matrix are equal
# or nearly so, then just use the other matrix as the result,
# because the multiply-and-then-normalize operation will be a
# nop.
if (np.array_equal(M1.indptr, M2.indptr) and
np.array_equal(M1.indices, M2.indices)):
if np.allclose(M1.data, M1.data[0]):
M = M2
elif np.allclose(M2.data, M2.data[0]):
M = M1
if M is None:
M = M1.multiply(M2)
s = M.sum()
if s:
M /= s
else:
V = True
M.eliminate_zeros()
return Location(
resolution = self.resolution,
fuzz = self.fuzz,
north = self.north,
south = self.south,
east = self.east,
west = self.west,
lon_spacing = self.lon_spacing,
lat_spacing = self.lat_spacing,
longitudes = self.longitudes,
latitudes = self.latitudes,
probability = M,
vacuity = V,
bounds = bounds
)
def compute_centroid_now(self):
"""Compute the weighted centroid and covariance matrix
of the probability mass function.
"""
if self._centroid is not None: return
# The centroid of a cloud of points is just the average of
# their coordinates, but this only works correctly in
# geocentric Cartesian space, not in lat/long space.
X = []
Y = []
Z = []
for i, j, v in iter_csr_nonzero(self.probability):
lon = self.longitudes[i]
lat = self.latitudes[j]
# PROJ.4 requires a dummy third argument when converting
# to geocentric (this appears to be interpreted as meters
# above/below the datum).
x, y, z = wgs_to_gcen(lon, lat, 0)
if math.isinf(x) or math.isinf(y) or math.isinf(z):
sys.stderr.write("wgs_to_gcen({}, {}, 0) = {}, {}, {}\n"
.format(lon, lat, x, y, z))
else:
X.append(x*v)
Y.append(y*v)
Z.append(z*v)
# We leave the covariance matrix in geocentric terms, since
# I'm not sure how to transform it back to lat/long space, or
# if that even makes sense.
M = np.vstack((X, Y, Z))
self._covariance = np.cov(M)
# Since the probability matrix is normalized, it is not
# necessary to divide the weighted sums by anything to get
# the means.
lon, lat, _ = gcen_to_wgs(*np.sum(M, 1))
if math.isinf(lat) or math.isinf(lon):
raise ValueError("bogus centroid {}/{} - X={} Y={} Z={}"
.format(lat, lon, X, Y, Z))
self._centroid = np.array((lon, lat))
def save(self, fname):
"""Write out this location to an HDF file.
For compactness, we write only the nonzero entries in a
pytables record form, and we _don't_ write out the full
longitude/latitude grid (it can be reconstructed from
the other metadata).
"""
self.compute_centroid_now()
with tables.open_file(fname, mode="w", title="location") as f:
t = f.create_table(f.root, "location",
LocationRowOnDisk, "location",
expectedrows=self.probability.getnnz())
t.attrs.resolution = self.resolution
t.attrs.fuzz = self.fuzz
t.attrs.north = self.north
t.attrs.south = self.south
t.attrs.east = self.east
t.attrs.west = self.west
t.attrs.lon_spacing = self.lon_spacing
t.attrs.lat_spacing = self.lat_spacing
t.attrs.lon_count = len(self.longitudes)
t.attrs.lat_count = len(self.latitudes)
t.attrs.centroid = self.centroid
t.attrs.covariance = self.covariance
if self.annotations:
t.attrs.annotations = self.annotations
cur = t.row
for i, j, pmass in iter_csr_nonzero(self.probability):
lon = self.longitudes[i]
lat = self.latitudes[j]
cur['grid_x'] = i
cur['grid_y'] = j
cur['longitude'] = lon
cur['latitude'] = lat
cur['prob_mass'] = pmass
cur.append()
t.flush()
def _lazy_load_pmatrix(self):
assert self._loaded_from is not None
with tables.open_file(self._loaded_from, "r") as f:
t = f.root.location
M = sparse.dok_matrix((t.attrs.lon_count, t.attrs.lat_count),
dtype=np.float32)
vacuous = True
negative_warning = False
for row in t.iterrows():
pmass = row['prob_mass']
# The occasional zero is normal, but negative numbers
# should never occur.
if pmass > 0:
M[row['grid_x'], row['grid_y']] = pmass
vacuous = False
elif pmass < 0:
if not negative_warning:
sys.stderr.write(fname + ": warning: negative pmass\n")
negative_warning = True
M = M.tocsr()
if vacuous:
wb = 0
eb = 0
sb = 0
nb = 0
else:
i, j = M.nonzero()
wb = self.longitudes[i.min()]
eb = self.longitudes[i.max()]
sb = self.latitudes[j.min()]
nb = self.latitudes[j.max()]
self._probability = M
self._vacuous = vacuous
self._bounds = Box(wb, sb, eb, nb)
@classmethod
def load(cls, fname):
"""Read an HDF file containing a location (the result of save())
and instantiate a Location object from it. The probability
matrix is lazily loaded.
"""
with tables.open_file(fname, "r") as f:
t = f.root.location
longs = np.linspace(t.attrs.west, t.attrs.east,
t.attrs.lon_count)
lats = np.linspace(t.attrs.south, t.attrs.north,
t.attrs.lat_count)
return cls(
resolution = t.attrs.resolution,
fuzz = t.attrs.fuzz,
north = t.attrs.north,
south = t.attrs.south,
east = t.attrs.east,
west = t.attrs.west,
lon_spacing = t.attrs.lon_spacing,
lat_spacing = t.attrs.lat_spacing,
longitudes = longs,
latitudes = lats,
centroid = getattr(t.attrs, 'centroid', None),
covariance = getattr(t.attrs, 'covariance', None),
annotations = getattr(t.attrs, 'annotations', None),
loaded_from = fname
)
class Map(Location):
"""The map on which to locate a host.
Maps are defined by HDF5 files (see maps/ for the program that
generates these from shapefiles) that define a grid over the
surface of the Earth and a "baseline matrix" which specifies
the Bayesian prior probability of locating a host at any point
on that grid. (For instance, nobody puts servers in the middle
of the ocean.)
"""
def __init__(self, mapfile):
with tables.open_file(mapfile, 'r') as f:
M = f.root.baseline
if M.shape[0] == len(M.attrs.longitudes):
baseline = sparse.csr_matrix(M)
elif M.shape[1] == len(M.attrs.longitudes):
baseline = sparse.csr_matrix(M).T
else:
raise RuntimeError(
"mapfile matrix shape {!r} is inconsistent with "
"lon/lat vectors ({},{})"
.format(M.shape,
len(M.attrs.longitudes),
len(M.attrs.latitudes)))
# The probabilities stored in the file are not normalized.
s = baseline.sum()
assert s > 0
baseline /= s
# Note: this bound may not be tight, but it should be
# good enough. It's not obvious to me how to extract
# a tight bounding rectangle from a scipy sparse matrix.
bounds = Box(M.attrs.west, M.attrs.south,
M.attrs.east, M.attrs.north)
if not bounds.is_valid:
bounds = bounds.buffer(0)
assert bounds.is_valid
Location.__init__(
self,
resolution = M.attrs.resolution,
fuzz = M.attrs.fuzz,
north = M.attrs.north,
south = M.attrs.south,
east = M.attrs.east,
west = M.attrs.west,
lon_spacing = M.attrs.lon_spacing,
lat_spacing = M.attrs.lat_spacing,
longitudes = M.attrs.longitudes,
latitudes = M.attrs.latitudes,
probability = baseline,
vacuity = False,
bounds = bounds
)
class Observation(Location):
"""A single observation of the distance to a host.
An observation is defined by a map (used only for its grid;
if you want to intersect the observation with the map, do that
explicitly), the longitude and latitude of a reference point, a
_ranging function_ (see ageo.ranging) that computes probability
as a function of distance, calibration data for the ranging
function (see ageo.calibration) and finally a set of observed
round-trip times.
Both the bounds and the probability matrix are computed lazily.
"""
def __init__(self, *,
basemap, ref_lon, ref_lat,
range_fn, calibration, rtts):
Location.__init__(
self,
resolution = basemap.resolution,
fuzz = basemap.fuzz,
north = basemap.north,
south = basemap.south,
east = basemap.east,
west = basemap.west,
lon_spacing = basemap.lon_spacing,
lat_spacing = basemap.lat_spacing,
longitudes = basemap.longitudes,
latitudes = basemap.latitudes
)
self.ref_lon = ref_lon
self.ref_lat = ref_lat
self.calibration = calibration
self.rtts = rtts
self.range_fn = range_fn(calibration, rtts, basemap.fuzz)
def compute_bounding_region_now(self):
if self._bounds is not None: return
distance_bound = self.range_fn.distance_bound()
# If the distance bound is too close to half the circumference
# of the Earth, the projection operation below will produce an
# invalid polygon. We don't get much use out of a bounding
# region that includes the whole planet but for a tiny disk
# (which will probably be somewhere in the ocean anyway) so
# just give up and say that the bound is the entire planet.
# Similarly, if the distance bound is zero, give up.
if distance_bound > 19975000 or distance_bound == 0:
self._bounds = Box(self.west, self.south, self.east, self.north)
return
# To find all points on the Earth within a certain distance of
# a reference latitude and longitude, back-project onto the
# Earth from an azimuthal-equidistant map with its zero point
# at the reference latitude and longitude.
aeqd = pyproj.Proj(proj='aeqd', ellps='WGS84', datum='WGS84',
lat_0=self.ref_lat, lon_0=self.ref_lon)
try:
disk = sh_transform(
functools.partial(pyproj.transform, aeqd, wgs_proj),
Disk(0, 0, distance_bound))
# Two special cases must be manually dealt with. First, if
# any side of the "circle" (really a many-sided polygon)
# crosses the coordinate singularity at longitude ±180, we
# must replace it with a diversion to either the north or
# south pole (whichever is closer) to ensure that it still
# encloses all of the area it should.
boundary = | np.array(disk.boundary) | numpy.array |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 14 18:25:54 2018
@author: paul
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
fs = 14
def plots(model):
# =============================================================================
# hystersis loop
# =============================================================================
fig, ax = plt.subplots()
time = model.time
x = model.x
dot_x = model.dot_x
Delta_x = model.Delta_x
distance = model.distance
car = 0
start = 0
end = model.iters
iters = end - start
jump = 1 # just plot every 3rd iteration to save time
fs =14
c = np.linspace(model.time[start],model.time[end-1],iters)
#ax.set_title("velocity vs. headway, car=" + str(car))
ax_scatter = ax.scatter(Delta_x[car,start:end:jump],dot_x[car,start:end:jump],marker="x",s=10,c=c[::jump])
ax.set_xlabel('headway [m]', fontsize = fs)
ax.set_ylabel('velocity [s]',fontsize = fs)
#ax.set_ylim(0,10)
#ax.set_xlim(0,15)
ax.tick_params(direction="in")
ax.set_title(r'ANN, $L=$'+str(model.L))
cb=fig.colorbar(ax_scatter, ax=ax)
cb.set_label(label="time [s]",size=fs)
# =============================================================================
# trajectories
# =============================================================================
fig, ax = plt.subplots()
for j in np.arange(0,model.N,jump):
diffx = np.roll(x[j,:],-1)-x[j,:]
masked_x = np.ma.array(x[j,:])
masked_x[diffx<-100] = np.ma.masked
ax.plot(time,masked_x,lw=0.8,c="red")
#ax.set_title("car positions", fontsize = fs)
ax.set_ylabel("position [m]", fontsize = fs)
ax.set_xlabel("time [s]", fontsize = fs)
ax.tick_params(direction="in")
# =============================================================================
# hovmöller velocites
# =============================================================================
fig, ax = plt.subplots()
jump = int(model.tmax/20) # just consider every 20 iteration for the interpolation to save time
x_data = x[:,::jump]
dot_x_data = dot_x[:,::jump]
t_data = time[::jump]
lent = len(t_data)
grid_x, grid_t = | np.meshgrid(distance,time) | numpy.meshgrid |
"""Test EKF implementation with CartPole"""
import numpy as np
import matplotlib.pyplot as plt
import random
from systems.cart_pole import (
CartPole, PendulumTipPosition, PendulumTipVelocity)
from estimation.extended_kf import ExtendedKF
from utils import add_gaussian_noise, simulate_system, wrap_angles
# Simulate system to extract simulated measurements and ground truth
cp = CartPole()
t_final = 15
s0 = np.zeros(cp.n_state)
class BangBang:
def __init__(self, u_max: float, t_final: float):
self._u_max = u_max
self._switch_pt = t_final/2
def __call__(self, t, state):
if t < self._switch_pt:
return self._u_max
else:
return -self._u_max
policy = BangBang(1, t_final)
simulated_sys = simulate_system(cp, s0, policy, t_final, n_steps = 150)
# define x0 and P0
x0 = np.array([.2, 0, 0, 0])
P0 = np.diag([1, .01, .5, .01])
Q = .1 * np.eye(cp.n_state)
R = | np.eye(2) | numpy.eye |
# Copyright 2020 FiveAI Ltd.
# All rights reserved.
#
# This file is part of the PaRoT toolbox, and is released under the
# "MIT License Agreement". Please see the LICENSE file that should
# have been included as part of this package.
"""
Experimental setup used for the PaRoT DiffAI comparison
"""
import warnings
import os
import json
import time
import argparse
import tensorflow as tf
import numpy as np
from tqdm import tqdm
from parot.domains import Box, HZ
from parot.utils.testing import PGD
from parot.properties import Ball, BallDemoted, BallPromoted, Fourier
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
warnings.filterwarnings("ignore")
model_ids = ["FFNN", "ConvSmall", "ConvMed", "ConvBig", "ConvSuper", "Skip"]
domain_ids = ['box', 'hz']
property_ids = ['ball', 'ball_demoted', 'ball_promoted', 'fourier']
dataset_ids = ['MNIST', 'CIFAR10']
def DiffAIModels(id, input_shape, num_classes):
"""
Factory for different models identified by `id` with
inputs of `input_shape` and an ouput of `num_classes`
Args:
id (str): string description of the model
input_shape (np.ndarray): shape of the input
num_classes (int): number of output classes
Returns:
tf.keras.model.Sequential: desired model
"""
if id == 'FFNN':
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=input_shape),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(100, activation='relu'),
tf.keras.layers.Dense(
num_classes, activation='softmax', name='y_pred')
])
elif id == 'ConvSmall':
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
16, kernel_size=(4, 4), strides=2,
activation='relu',
input_shape=input_shape),
tf.keras.layers.Conv2D(32, (4, 4), strides=2, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100),
tf.keras.layers.Dense(
num_classes, activation='softmax', name='y_pred')
], name='ConvSmall')
elif id == 'ConvMed':
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
16, kernel_size=(4, 4), strides=2,
activation='relu', padding='same',
input_shape=input_shape),
tf.keras.layers.Conv2D(32, (4, 4), strides=2, padding='same',
activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100),
tf.keras.layers.Dense(
num_classes, activation='softmax', name='y_pred')
], name='ConvMed')
elif id == 'ConvBig':
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), strides=1,
activation='relu', padding='same',
input_shape=input_shape),
tf.keras.layers.Conv2D(32, (4, 4), strides=2, padding='same',
activation='relu'),
tf.keras.layers.Conv2D(64, (3, 3), strides=1, padding='same',
activation='relu'),
tf.keras.layers.Conv2D(64, (4, 4), strides=2, padding='same',
activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(512),
tf.keras.layers.Dense(
num_classes, activation='softmax', name='y_pred')
], name='ConvBig')
elif id == 'ConvSuper':
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), strides=1,
activation='relu', padding='valid',
input_shape=input_shape),
tf.keras.layers.Conv2D(32, (4, 4), strides=1, padding='valid',
activation='relu'),
tf.keras.layers.Conv2D(64, (3, 3), strides=1, padding='valid',
activation='relu'),
tf.keras.layers.Conv2D(64, (4, 4), strides=1, padding='valid',
activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(512),
tf.keras.layers.Dense(
num_classes, activation='softmax', name='y_pred')
], name='ConvSuper')
elif id == 'Skip':
input_ = tf.keras.layers.Input(shape=input_shape)
m1 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
16, kernel_size=(3, 3), strides=1,
activation='relu', padding='valid',
input_shape=input_shape),
tf.keras.layers.Conv2D(16, (3, 3), strides=1, padding='valid',
activation='relu'),
tf.keras.layers.Conv2D(32, (3, 3), strides=1, padding='valid',
activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(200)
])
m2 = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(
32, kernel_size=(4, 4), strides=1,
activation='relu', padding='valid',
input_shape=input_shape),
tf.keras.layers.Conv2D(32, (4, 4), strides=1, padding='valid',
activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(200)
])
o1 = m1(input_)
o2 = m2(input_)
merged = tf.keras.layers.concatenate([o1, o2])
output = tf.keras.layers.ReLU()(merged)
output = tf.keras.layers.Dense(200, activation='relu')(output)
output = tf.keras.layers.Dense(
num_classes, activation='softmax')(output)
return tf.keras.models.Model(inputs=[input_], outputs=output,
name='y_pred')
raise ValueError('model id "%s" not available' % id)
def loss_fn(y_true, y_pred):
return tf.reduce_mean(
tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred))
def setup_graph(model_id, training_iterator, input_shape, num_classes,
learning_rate, pgd_step_count, pgd_learning_rate,
batch_size, domain_max, domain_min,
prop=Ball, domain=Box
):
"""
Setups up the tensorflow graph for a given `model_id` with an
input of `input_shape` and an output of `num_classes`,
assuming a training dataset represented by a `training_iterator`
Args:
model_id (str): string identifier of the model to be used
training_iterator (Iterator): iterator for the training
dataset
input_shape (ndarray): shape of the input of the model
num_classes (int): number of output classes in the model
learning_rate (float): Optimiser learning rate.
pgd_step_count: number of steps to run the PGD optimiser for
pgd_learning_rate: learning rate for PGD optimiser.
batch_size (int): size of the batch
domain_max (float): maximum value that the input tensor is allowed to
take
domain_min (float): minimum value that the input tensor is allowed to
take
prop (function(x,eps) -> domain): the type of property that it
should be trained against.
Returns:
training_ops (dict): dictionary containg the training
operations to be used in the training of the model
testing_ops (dict): dictionary containg the testing
operations to be used in the testing of the model
placeholder_vars (dict): dictionary containing the
placeholder variables that must be substituted
before running the model
"""
# eps and lam are scalar placeholder vars
eps = tf.compat.v1.placeholder(tf.float32, shape=(), name='eps')
adversary_eps = tf.compat.v1.placeholder(tf.float32, shape=(),
name="adversary_eps")
lam = tf.compat.v1.placeholder(tf.float32, shape=(), name='lam')
# get the model
model = DiffAIModels(model_id, input_shape, num_classes)
optim = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
# training operations
x, y_true = training_iterator.get_next()
x = tf.reshape(x, shape=[batch_size, *input_shape])
y_pred = model(x)
regular_loss = loss_fn(y_true, y_pred)
x_box = prop(eps).of(domain, x)
[y_pred_box] = x_box.transform(outputs=[y_pred], input=x)
y_adversary = y_pred_box.get_center() - y_pred_box.get_errors() * (
tf.cast(tf.one_hot(y_true, depth=num_classes), tf.float32) * 2 - 1)
adversary_loss = loss_fn(y_true, y_adversary)
trainable_vars = tf.compat.v1.trainable_variables()
regularisation_loss = tf.add_n(
[tf.compat.v1.nn.l2_loss(v) for v in trainable_vars],
name='regularization_loss')
combined_loss = lam * adversary_loss + regular_loss +\
0.01 * regularisation_loss
# training operation
train_op = optim.minimize(
combined_loss, var_list=model.variables,
global_step=tf.compat.v1.train.get_global_step())
training_ops = {
'train': train_op
}
# testing operations
x_t = tf.compat.v1.placeholder(
tf.float32, shape=[None, *x_test.shape[1:]], name='x_t')
y_t = tf.compat.v1.placeholder(
tf.float32, shape=[None, *y_test.shape[1:]], name='y_t')
# compute accuracy as the number of correctly predicted classes
y_pred_ = model(x_t)
y_pred_int = tf.cast(tf.argmax(y_pred_, axis=1), tf.uint8)
y_test_int = tf.cast(y_t, tf.uint8)
compare_op_test = tf.cast(tf.equal(y_pred_int, y_test_int), tf.float32)
test_op = tf.stop_gradient(tf.reduce_mean(compare_op_test), name='test_op')
x_t_elem = tf.compat.v1.placeholder(
tf.float32, shape=(1, *x_test.shape[1:]), name='x_t_elem')
y_t_elem = tf.compat.v1.placeholder(
tf.float32, shape=(1, *y_test.shape[1:]), name='y_t_elem')
y_t_elem_int = tf.cast(y_t_elem, tf.uint8)
y_pred_elem = model(x_t_elem)
# Compute the transformer on x_t_elem
x_box_ = prop(eps).of(domain, x_t_elem)
[y_pred_box_] = x_box_.transform(
outputs=[y_pred_elem],
input=x_t_elem
)
y_test_one_hot = tf.cast(tf.one_hot(y_t_elem_int, num_classes), tf.float32)
(y_pred_box_center, y_pred_box_error) = y_pred_box_.get_center_errors()
y_verify_adversary = y_pred_box_center - y_pred_box_error * \
(y_test_one_hot * 2 - 1)
y_verify_adversary_int = tf.cast(
tf.argmax(y_verify_adversary, axis=1), tf.uint8)
verify_adversary_test_op = 1 - \
tf.reduce_mean(tf.cast(tf.equal(y_verify_adversary_int, y_t_elem_int),
tf.float32), name='verify_adversary_test_op')
# compute an adversarial example.
pgd = PGD(
property=prop,
domain=domain,
epsilon=adversary_eps,
learning_rate=pgd_learning_rate,
step_count=pgd_step_count,
domain_max=domain_max,
domain_min=domain_min,
)
# `x_adversary` is a perturbed input, `y_adversary` is the perturbed output
x_pgd_adversary, [y_pgd_adversary_], _ = pgd(
x_t_elem, [y_pred_elem], loss_fn(y_t_elem, y_pred_elem))
y_pgd_adversary_int = tf.cast(
tf.argmax(y_pgd_adversary_, axis=1), tf.uint8)
pgd_adversary_test_op =\
1 - tf.reduce_mean(
tf.cast(tf.equal(y_pgd_adversary_int, y_t_elem_int), tf.float32),
name='pgd_adversary_test_op')
testing_ops = {
'test': test_op,
'verify_adversary_test': verify_adversary_test_op,
'pgd_adversary_test': pgd_adversary_test_op,
'y_verify_adversary_int': y_verify_adversary_int,
'y_pgd_adversary_int': y_pgd_adversary_int,
'y_t_elem_int': y_t_elem_int
}
# placeholder variables
placeholder_vars = {
'eps': eps,
'adversary_eps': adversary_eps,
'lam': lam,
'x_t': x_t,
'y_t': y_t,
'x_t_elem': x_t_elem,
'y_t_elem': y_t_elem
}
return training_ops, testing_ops, placeholder_vars
def train(ops, placeholders, iterator, name, max_epochs, n_runs, eps, lam,
parent_folder='checkpoints', testing_ops={}, testing_data={}
):
"""
Run the training multiple times and save each checkpoint
(0, ..., `n_runs`-1) to a folder named `name` inside `parent_folder`.
Args:
ops (dict): dictionary of instances of training ops to be run
placeholders (dict): dictionary with the placeholder variables
to be replaced in the session
iterator (Iterator): training dataset iterator
name (str): folder inside `parent_folder` where the training
configuration and checkpoints will be stored
max_epochs (int): maximum number of epochs to run training for
n_runs (int): number of runs to perform at this training stage
eps (float): epsilon value for the box training
lam (float): lambda value in the combined loss function
parent_folder (str): path to the folder where this training
session will be stored
testing_ops (dict, optional): if passed, testing accuracy will be
computed for debugging purposes
testing_data (dict, optional): if testing_ops is passed,
required for computing accuracy
"""
# create the folder with the name of the training op
os.system(
'mkdir -p %s > /dev/null 2>&1' % os.path.join(parent_folder, name))
# write the config
config = {
'eps': eps,
'lam': lam
}
with open(os.path.join(parent_folder, name, 'config.json'), 'w') as f:
json.dump(config, f)
# substitute the placeholder operations
sub_placeholders = {
placeholders['eps']: eps,
placeholders['lam']: lam
}
for r in range(n_runs):
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
i = 0
print('Run %d of %d for training "%s"...' % (r + 1, n_runs, name))
t_cum = 0
for epoch in range(max_epochs):
t_start_epoch = time.time()
sess.run(iterator.initializer)
while True:
i += 1
try:
sess.run(ops['train'], feed_dict=sub_placeholders)
except tf.errors.OutOfRangeError:
break
delta_t = time.time() - t_start_epoch
t_cum += delta_t
test_acc = 'N/A'
if len(testing_ops) != 0 and len(testing_data) != 0:
test_acc = sess.run(
testing_ops['test'],
feed_dict={
placeholders['eps']: eps,
placeholders['lam']: lam,
placeholders['x_t']: testing_data['x_t'],
placeholders['y_t']: testing_data['y_t']})
print(
("Epoch: %d, accuracy: %s, elapsed time (s): %.3f," +
" cumulative time (s): %.3f") %
(epoch, test_acc, delta_t, t_cum))
# add checkpoint and return
saver = tf.compat.v1.train.Saver()
saver.save(
sess, os.path.join(parent_folder, name, str(r) + '.ckpt'))
def test_folder(checkpoint_folder, ops, placeholders, x_test, y_test, dataset,
test_epsilon, pgd_sample_size):
"""
Given a `checkpoint_folder` with at least one checkpoint inside, run
the testing operations in `ops` using the placeholder variables in
`placeholders`
Args:
checkpoint_folder (str): path to a folder with checkpoints for a
certain model; it should also contain a 'config.json' with the
parameters used in the training stage
ops (dict): dictionary of testing ops to be run
placeholders (dict): dictionary with the placeholder variables
to be replaced in the session
x_test (ndarray): input testing data
y_test (ndarray): output testing data
dataset (TYPE): Description
test_epsilon (TYPE): Description
pgd_sample_size (int): number of samples to be used in PGD; must be
at most the number of points in the testing dataset
Returns:
ops_results (list): list of results of the testing operations
"""
models = [
f
for f in os.listdir(checkpoint_folder)
if os.path.isdir(os.path.join(checkpoint_folder, f)) and dataset in f]
sub_adv_vars = {
placeholders['x_t']: x_test,
placeholders['y_t']: y_test
}
np.random.seed(0)
adversary_sample_indices = np.random.choice(
x_test.shape[0], pgd_sample_size, replace=False)
# test only on these indices
sub_test_vars = {
placeholders['x_t']: x_test[adversary_sample_indices],
placeholders['y_t']: y_test[adversary_sample_indices]
}
results = {}
for m in models:
model_folder = os.path.join(checkpoint_folder, m)
ckpts = [
f[:-10]
for f in os.listdir(model_folder)
if (os.path.isfile(os.path.join(model_folder, f)) and
'.ckpt.meta' in f)]
# read the config of this model (all checkpoints will share it)
with open(os.path.join(model_folder, 'config.json'), 'r') as f:
config = json.load(f)
sub_test_vars[placeholders['eps']] = test_epsilon
sub_test_vars[placeholders['lam']] = config['lam']
sub_adv_vars[placeholders['eps']] = test_epsilon
sub_adv_vars[placeholders['lam']] = config['lam']
sub_adv_vars[placeholders['adversary_eps']] = test_epsilon
acc = []
verify = []
pgd = []
for ckpt in ckpts:
with tf.compat.v1.Session() as sess:
saver = tf.compat.v1.train.Saver()
saver.restore(sess, os.path.join(model_folder, ckpt + '.ckpt'))
acc.append(
float(sess.run(ops['test'], feed_dict=sub_test_vars)))
pgd_result = 0
verify_result = 0
for idx in tqdm(adversary_sample_indices):
sub_adv_vars[placeholders['x_t_elem']] = [x_test[idx]]
sub_adv_vars[placeholders['y_t_elem']] = [y_test[idx]]
output = sess.run(
[ops['verify_adversary_test'],
ops['pgd_adversary_test'],
ops['y_verify_adversary_int'],
ops['y_pgd_adversary_int'],
ops['y_t_elem_int']], feed_dict=sub_adv_vars)
[verify_ad_test, pgd_ad_test, y_verify_adversary_int,
y_pgd_adversary_int, y_t_elem_int] = output
verify_result += verify_ad_test
pgd_result += pgd_ad_test
verify.append(verify_result / pgd_sample_size)
pgd.append(pgd_result / pgd_sample_size)
print('%s: test error %.2f, PGD %.2f, verify %.2f' %
(m,
100 * (1 - acc[-1]),
100 * (pgd[-1]),
100 * (verify[-1])))
results[m] = {
'name': m,
'epsilon': float(config['eps']),
'lam': float(config['lam']),
'test_epsilon': float(test_epsilon),
'acc': acc,
'verify': verify,
'pgd': pgd
}
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='DiffAI comparison experiments',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--model', type=str, required=True, choices=model_ids,
help='Define the model to use')
parser.add_argument(
'--domain', choices=domain_ids, help="specify the domain type",
required=True)
parser.add_argument(
'--property', choices=property_ids, help="specify the property type",
required=True)
parser.add_argument(
'--dataset', '-D', choices=dataset_ids,
help="specify the dataset to be used", required=True)
parser.add_argument(
'--buffer-size', type=int, default=5000, help='dataset buffer size')
parser.add_argument(
'--batch-size', type=int, default=64, help='training batch size')
parser.add_argument(
'--test-only', dest='test', action="store_true",
help="simply test the models found in checkpoints only")
parser.set_defaults(test=False)
parser.add_argument(
'-e', '--epsilon', nargs="+", type=float, default=[0.1],
help="the width of the property")
parser.add_argument(
'-l', '--lam', nargs="+", type=float, default=[0.0, 0.1])
parser.add_argument(
'--epochs', type=int, default=200,
help="number of epochs to train for")
parser.add_argument(
'--learning-rate', type=float, default=0.0001,
help="learning rate of training optimiser")
parser.add_argument(
'--runs', type=int, default=1,
help="number of times to train the model. ")
parser.add_argument(
'--pgd-step-count', type=int, default=100,
help="number of steps to run pgd optimiser for")
parser.add_argument(
'--pgd-learning-rate', type=float, default=1.0,
help="learning rate of pgd optimiser")
parser.add_argument(
'--test-verify-sample-size', type=int, default=500,
help="number of samples to get PGD and verify bounds on test data.")
parser.add_argument(
'--test-epsilon', type=float, default=0.1,
help="epsilon to test against")
args = parser.parse_args()
# Data configuration
if args.dataset == 'MNIST':
(x_train, y_train), (x_test, y_test) =\
tf.keras.datasets.mnist.load_data()
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
elif args.dataset == 'CIFAR10':
(x_train, y_train), (x_test, y_test) =\
tf.keras.datasets.cifar10.load_data()
# Prepare datasets
x_train = (x_train / 255.0).astype(np.float32)
x_test = (x_test / 255.0).astype(np.float32)
# depending on the dataset, a data reshape might be required
if len(y_train.shape) > 1:
# reshape the vector
y_train = y_train.reshape(y_train.shape[0])
y_test = y_test.reshape(y_test.shape[0])
# normalize based on the dataset
if args.dataset == 'MNIST':
def norm(x):
return (x - 0.1307) / 0.3081
x_train = norm(x_train)
x_test = norm(x_test)
domain_min = norm(0.0)
domain_max = norm(1.0)
elif args.dataset == 'CIFAR10':
def norm(x):
x[..., 0] = (x[..., 0] - 0.4914) / 0.2023
x[..., 1] = (x[..., 1] - 0.4822) / 0.1994
x[..., 2] = (x[..., 2] - 0.4465) / 0.2010
return x
x_train = norm(x_train)
x_test = norm(x_test)
domain_min = norm( | np.zeros([3]) | numpy.zeros |
# Copyright 2021 Division of Medical Image Computing, German Cancer Research Center (DKFZ)
# and Applied Computer Vision Lab, Helmholtz Imaging Platform
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from batchgenerators.augmentations.crop_and_pad_augmentations import random_crop, center_crop, pad_nd_image_and_seg, \
crop
class TestCrop(unittest.TestCase):
def setUp(self):
np.random.seed(1234)
def test_random_crop_3D(self):
data = np.random.random((32, 4, 64, 56, 48))
seg = np.ones(data.shape)
d, s = random_crop(data, seg, 32, 0)
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32, 32), d.shape)), "data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32, 32), s.shape)), "seg has unexpected return shape")
self.assertEqual(np.sum(s == 0), 0, "Zeros encountered in seg meaning that we did padding which should not have"
" happened here!")
def test_random_crop_2D(self):
data = np.random.random((32, 4, 64, 56))
seg = np.ones(data.shape)
d, s = random_crop(data, seg, 32, 0)
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), d.shape)), "data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), s.shape)), "seg has unexpected return shape")
self.assertEqual(np.sum(s == 0), 0, "Zeros encountered in seg meaning that we did padding which should not have"
" happened here!")
def test_random_crop_3D_from_List(self):
data = [np.random.random((4, 64+i, 56+i, 48+i)) for i in range(32)]
seg = [np.random.random((4, 64+i, 56+i, 48+i)) for i in range(32)]
d, s = random_crop(data, seg, 32, 0)
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), d.shape)), "data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), s.shape)), "seg has unexpected return shape")
self.assertEqual(np.sum(s == 0), 0, "Zeros encountered in seg meaning that we did padding which should not have"
" happened here!")
def test_random_crop_2D_from_List(self):
data = [np.random.random((4, 64+i, 56+i)) for i in range(32)]
seg = [np.random.random((4, 64+i, 56+i)) for i in range(32)]
d, s = random_crop(data, seg, 32, 0)
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), d.shape)), "data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), s.shape)), "seg has unexpected return shape")
self.assertEqual(np.sum(s == 0), 0, "Zeros encountered in seg meaning that we did padding which should not have"
" happened here!")
def test_random_crop_with_cropsize_larger_image(self):
'''
should fall back to center crop
:return:
'''
data = [np.random.random((4, 64+i, 56+i)) for i in range(32)]
seg = [np.random.random((4, 64+i, 56+i)) for i in range(32)]
d, s = random_crop(data, seg, 32, 32)
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), d.shape)), "data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((32, 4, 32, 32), s.shape)), "seg has unexpected return shape")
self.assertEqual(np.sum(s == 0), 0, "Zeros encountered in seg meaning that we did padding which should not have"
" happened here!")
def test_crop_size_larger_than_image(self):
data = np.random.random((8, 4, 64, 56))
seg = np.ones(data.shape)
d, s = random_crop(data, seg, 96, 0)
self.assertTrue(all(i == j for i, j in zip((8, 4, 96, 96), d.shape)), "data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((8, 4, 96, 96), s.shape)), "seg has unexpected return shape")
self.assertNotEqual(np.sum(s == 0), 0, "seg was not padded properly")
def test_center_crop_3D(self):
data = np.random.random((8, 4, 30, 30, 30))
seg = np.random.random(data.shape)
crop_size = 10
d, s = center_crop(data, crop_size=crop_size, seg=seg)
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size, crop_size), d.shape)),
"data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size, crop_size), s.shape)),
"seg has unexpected return shape")
np.testing.assert_array_equal(data[:, :, 10:20, 10:20, 10:20], d, err_msg="crop not equal image center")
np.testing.assert_array_equal(seg[:, :, 10:20, 10:20, 10:20], s, err_msg="crop not equal image center")
def test_center_crop_2D(self):
data = np.random.random((8, 4, 30, 30))
seg = np.random.random(data.shape)
crop_size = 10
d, s = center_crop(data, crop_size=crop_size, seg=seg)
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size), d.shape)),
"data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size), s.shape)),
"seg has unexpected return shape")
np.testing.assert_array_equal(data[:, :, 10:20, 10:20], d, err_msg="crop not equal image center")
np.testing.assert_array_equal(seg[:, :, 10:20, 10:20], s, err_msg="crop not equal image center")
def test_center_crop_3D_padding(self):
data = np.random.random((8, 4, 30, 30, 30))
seg = np.random.random(data.shape)
crop_size = 50
d, s = center_crop(data, crop_size=crop_size, seg=seg)
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size, crop_size), d.shape)),
"data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size, crop_size), s.shape)),
"seg has unexpected return shape")
tmp_d = d[:, :, 10:40, 10:40, 10:40]
tmp_s = s[:, :, 10:40, 10:40, 10:40]
np.testing.assert_array_equal(tmp_d, data, err_msg="Original data is not included in padded image")
self.assertAlmostEqual(np.sum(d.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
np.testing.assert_array_equal(tmp_s, seg, err_msg="Original segmentation is not included in padded image")
self.assertAlmostEqual(np.sum(d.flatten()), np.sum(data.flatten()), msg="Padding of segmentation is not zero")
def test_center_crop_2D_padding(self):
data = np.random.random((8, 4, 30, 30))
seg = np.random.random(data.shape)
crop_size = 50
d, s = center_crop(data, crop_size=crop_size, seg=seg)
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size), d.shape)),
"data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size, crop_size), s.shape)),
"seg has unexpected return shape")
tmp_d = d[:, :, 10:40, 10:40]
tmp_s = s[:, :, 10:40, 10:40]
np.testing.assert_array_equal(tmp_d, data, err_msg="Original data is not included in padded image")
self.assertAlmostEqual(np.sum(d.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
np.testing.assert_array_equal(tmp_s, seg, err_msg="Original segmentation is not included in padded image")
self.assertAlmostEqual(np.sum(d.flatten()), np.sum(data.flatten()), msg="Padding of segmentation is not zero")
def test_center_crop_2D_list(self):
data = np.random.random((8, 4, 30, 30))
seg = np.random.random(data.shape)
crop_size = [10, 20]
d, s = center_crop(data, crop_size=crop_size, seg=seg)
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size[0], crop_size[1]), d.shape)),
"data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size[0], crop_size[1]), s.shape)),
"seg has unexpected return shape")
np.testing.assert_array_equal(data[:, :, 10:20, 5:25], d, err_msg="crop not equal image center")
np.testing.assert_array_equal(seg[:, :, 10:20, 5:25], s, err_msg="crop not equal image center")
def test_center_crop_3D_list(self):
data = np.random.random((8, 4, 30, 30, 30))
seg = np.random.random(data.shape)
crop_size = [10, 20, 29]
d, s = center_crop(data, crop_size=crop_size, seg=seg)
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size[0], crop_size[1], crop_size[2]), d.shape)),
"data has unexpected return shape")
self.assertTrue(all(i == j for i, j in zip((8, 4, crop_size[0], crop_size[1], crop_size[2]), s.shape)),
"seg has unexpected return shape")
np.testing.assert_array_equal(data[:, :, 10:20, 5:25, 0:29], d, err_msg="crop not equal image center")
np.testing.assert_array_equal(seg[:, :, 10:20, 5:25, 0:29], s, err_msg="crop not equal image center")
def test_pad_nd_image_and_seg_2D(self):
print('Test test_pad_nd_image_and_seg_2D. [START]')
input_shape = (5, 5, 30, 30)
data = np.random.random(input_shape)
seg = np.random.random(data.shape)
new_shape = (15, 15, 50, 50)
new_shape2 = (4, 2, 10, 10)
new_shape3 = (4, 2, 50, 50)
new_shape4 = (5, 5, 51, 51)
new_shape5 = (50, 50)
new_shape6 = (2, 5, 5, 50, 50)
new_shape7 = (5, 5, 30, 50)
expected_shape3 = (5, 5, 50, 50)
expected_shape5 = (5, 5, 50, 50)
number_of_padded_ones7 = np.prod(new_shape7) - np.prod(input_shape)
data_padded, seg_padded = pad_nd_image_and_seg(data, seg, new_shape=new_shape)
data_padded2, seg_padded2 = pad_nd_image_and_seg(data, seg, new_shape=new_shape2)
data_padded3, seg_padded3 = pad_nd_image_and_seg(data, seg, new_shape=new_shape3)
data_padded4, seg_padded4 = pad_nd_image_and_seg(data, seg, new_shape=new_shape4)
data_padded5, seg_padded5 = pad_nd_image_and_seg(data, seg, new_shape=new_shape5)
data_padded7, seg_padded7 = pad_nd_image_and_seg(data, seg, new_shape=new_shape7, np_pad_kwargs_seg={'constant_values': 1})
print('Zero padding to bigger output shape in all dimensions. [START]')
self.assertTrue(all(i == j for i, j in zip(new_shape, data_padded.shape)), "padded data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(new_shape, seg_padded.shape)), "padded seg has unexpected shape")
np.testing.assert_array_equal(data_padded[5:10, 5:10, 10:40, 10:40], data, err_msg="data wrongly padded")
np.testing.assert_array_equal(seg_padded[5:10, 5:10, 10:40, 10:40], seg, err_msg="seg wrongly padded")
self.assertAlmostEqual(np.sum(data_padded.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
self.assertAlmostEqual(np.sum(seg_padded.flatten()), np.sum(seg.flatten()), msg="Padding of data is not zero")
print('Zero padding to bigger output shape in all dimensions. [DONE]')
print('Zero padding to smaller output shape in all dimensions. [START]')
self.assertTrue(all(i == j for i, j in zip(input_shape, data_padded2.shape)), "padded data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(input_shape, seg_padded2.shape)), "padded seg has unexpected shape")
np.testing.assert_array_equal(data_padded2, data, err_msg="data wrongly padded for smaller output shape than input shape")
np.testing.assert_array_equal(seg_padded2, seg, err_msg="seg wrongly padded for smaller output shape than input shape")
print('Zero padding to smaller output shape in all dimensions. [DONE]')
print('Zero padding to smaller output shape in first two dimensions and bigger output shape in last two dimensions. [START]')
self.assertTrue(all(i == j for i, j in zip(expected_shape3, data_padded3.shape)), "padded data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(expected_shape3, seg_padded3.shape)), "padded seg has unexpected shape")
np.testing.assert_array_equal(data_padded3[:, :, 10:40, 10:40], data, err_msg="data wrongly padded")
np.testing.assert_array_equal(seg_padded3[:, :, 10:40, 10:40], seg, err_msg="seg wrongly padded")
self.assertAlmostEqual(np.sum(data_padded3.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
self.assertAlmostEqual(np.sum(seg_padded3.flatten()), np.sum(seg.flatten()), msg="Padding of data is not zero")
print('Zero padding to smaller output shape in first two dimensions and bigger output shape in last two dimensions. [DONE]')
print('Zero padding to odd padding dimensions. [START]')
self.assertTrue(all(i == j for i, j in zip(new_shape4, data_padded4.shape)), "padded data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(new_shape4, seg_padded4.shape)), "padded seg has unexpected shape")
np.testing.assert_array_equal(data_padded4[:, :, 10:40, 10:40], data, err_msg="data wrongly padded")
np.testing.assert_array_equal(seg_padded4[:, :, 10:40, 10:40], seg, err_msg="seg wrongly padded")
self.assertAlmostEqual(np.sum(data_padded4.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
self.assertAlmostEqual(np.sum(seg_padded4.flatten()), np.sum(seg.flatten()), msg="Padding of data is not zero")
print('Zero padding to odd padding dimensions. [DONE]')
print('Zero padding with new_shape.shape smaller than data.shape. [START]')
self.assertTrue(all(i == j for i, j in zip(expected_shape5, data_padded5.shape)), "data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(expected_shape5, seg_padded5.shape)), "seg has unexpected shape")
np.testing.assert_array_equal(data_padded5[:, :, 10:40, 10:40], data, err_msg="data wrongly padded")
np.testing.assert_array_equal(seg_padded5[:, :, 10:40, 10:40], seg, err_msg="seg wrongly padded")
self.assertAlmostEqual(np.sum(data_padded5.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
self.assertAlmostEqual(np.sum(seg_padded5.flatten()), np.sum(seg.flatten()), msg="Padding of data is not zero")
print('Zero padding with new_shape.shape smaller than data.shape. [DONE]')
print('Zero padding with new_shape.shape bigger than data.shape. [START]')
self.assertRaises(IndexError, pad_nd_image_and_seg, data, seg, new_shape=new_shape6)
print('Zero padding with new_shape.shape bigger than data.shape. [DONE]')
print('Padding to bigger output shape in all dimensions with constant_value=1 for segmentation padding . [START]')
self.assertTrue(all(i == j for i, j in zip(new_shape, data_padded.shape)), "padded data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(new_shape, seg_padded.shape)), "padded seg has unexpected shape")
np.testing.assert_array_equal(data_padded7[:, :, :, 10:40], data, err_msg="data wrongly padded")
np.testing.assert_array_equal(seg_padded7[:, :, :, 10:40], seg, err_msg="seg wrongly padded")
self.assertAlmostEqual(np.sum(data_padded7.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
self.assertAlmostEqual(np.sum(seg_padded7.flatten()), np.sum(seg.flatten()) + number_of_padded_ones7, msg="Padding of data is not one")
print('Padding to bigger output shape in all dimensions with constant_value=1 for segmentation padding . [DONE]')
print('Test test_pad_nd_image_and_seg_2D. [DONE]')
def test_pad_nd_image_and_seg_3D(self):
print('Test test_pad_nd_image_and_seg_3D. [START]')
input_shape = (5, 5, 30, 30, 30)
data = np.random.random(input_shape)
seg = np.random.random(data.shape)
new_shape = (15, 15, 50, 50, 50)
new_shape2 = (4, 2, 10, 10, 10)
new_shape3 = (4, 2, 50, 50, 50)
new_shape4 = (5, 5, 51, 51, 49)
new_shape5 = (50, 50)
new_shape6 = (2, 5, 5, 50, 50, 50)
new_shape7 = (5, 5, 30, 30, 50)
expected_shape3 = (5, 5, 50, 50, 50)
expected_shape5 = (5, 5, 30, 50, 50)
number_of_padded_ones7 = np.prod(new_shape7) - np.prod(input_shape)
data_padded, seg_padded = pad_nd_image_and_seg(data, seg, new_shape=new_shape)
data_padded2, seg_padded2 = pad_nd_image_and_seg(data, seg, new_shape=new_shape2)
data_padded3, seg_padded3 = pad_nd_image_and_seg(data, seg, new_shape=new_shape3)
data_padded4, seg_padded4 = pad_nd_image_and_seg(data, seg, new_shape=new_shape4)
data_padded5, seg_padded5 = pad_nd_image_and_seg(data, seg, new_shape=new_shape5)
data_padded7, seg_padded7 = pad_nd_image_and_seg(data, seg, new_shape=new_shape7, np_pad_kwargs_data={'constant_values': 1} , np_pad_kwargs_seg={'constant_values': 1})
print('Zero padding to bigger output shape in all dimensions. [START]')
self.assertTrue(all(i == j for i, j in zip(new_shape, data_padded.shape)), "padded data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(new_shape, seg_padded.shape)), "padded seg has unexpected shape")
np.testing.assert_array_equal(data_padded[5:10, 5:10, 10:40, 10:40, 10:40], data, err_msg="data wrongly padded")
np.testing.assert_array_equal(seg_padded[5:10, 5:10, 10:40, 10:40, 10:40], seg, err_msg="seg wrongly padded")
self.assertAlmostEqual(np.sum(data_padded.flatten()), np.sum(data.flatten()), msg="Padding of data is not zero")
self.assertAlmostEqual(np.sum(seg_padded.flatten()), np.sum(seg.flatten()), msg="Padding of data is not zero")
print('Zero padding to bigger output shape in all dimensions. [DONE]')
print('Zero padding to smaller output shape in all dimensions. [START]')
self.assertTrue(all(i == j for i, j in zip(input_shape, data_padded2.shape)), "padded data has unexpected shape")
self.assertTrue(all(i == j for i, j in zip(input_shape, seg_padded2.shape)), "padded seg has unexpected shape")
np.testing.assert_array_equal(data_padded2, data, err_msg="data wrongly padded for smaller output shape than input shape")
| np.testing.assert_array_equal(seg_padded2, seg, err_msg="seg wrongly padded for smaller output shape than input shape") | numpy.testing.assert_array_equal |
import os
import argparse
import math
import numpy as np
import pandas as pd
import json
from tqdm import tqdm
import scipy.sparse as sp
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.nn.functional as F
from sklearn.metrics import f1_score
import networkx as nx
import dgl
import dgl.function as fn
import dgl.nn.pytorch as dglnn
from dgl.nn.pytorch import edge_softmax
from models import HAMP as Model
# note: there may still be some variability
torch.manual_seed(8)
np.random.seed(8)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import warnings
warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser()
parser.add_argument('--task', default='screen_genre_class', type=str, help='selected task: screen_genre_class or element_comp_class')
parser.add_argument('--n_epochs', default=3000, type=int, help='number of epochs')
args = parser.parse_args()
# paths
home_dir = Path(os.getcwd())
version_dir = 'rico_n'
main_data_dir = home_dir/'data'
data_dir = home_dir/'data'/version_dir
# load data - features
app2ui_edgelist = pd.read_hdf(data_dir/'app2ui_edgelist.h5', key='edgelist')
ui2class_edgelist = pd.read_hdf(data_dir/'ui2class_edgelist.h5', key='edgelist')
class2element_edgelist = pd.read_hdf(data_dir/'class2element_edgelist.h5', key='edgelist')
element2element_edgelist = pd.read_hdf(data_dir/'element2element_edgelist.h5', key='edgelist')
app_description_features_df = pd.read_hdf(data_dir/'app_description_features.h5', key='features')
ui_image_features = pd.read_hdf(data_dir/'ui_image_features.h5', key='features')
ui_pos_features_df = pd.read_hdf(data_dir/'ui_position_features.h5', key='features')
class_name_features = pd.read_hdf(data_dir/'charngram_features.h5', key='features')
element_spatial_features_df = pd.read_hdf(data_dir/'spatial_features.h5', key='features')
element_image_features_df = pd.read_hdf(data_dir/'element_image_features.h5', key='features')
# load labels
comp_labels = pd.read_hdf(data_dir/'comp_labels.h5', key='labels')
genre_labels = pd.read_hdf(data_dir/'genre_labels.h5', key='labels')
# process edgelists
e2e_num_edges = len(element2element_edgelist)
e2e_adj_row = list(element2element_edgelist.target_element_encoded)
e2e_adj_col = list(element2element_edgelist.source_element_encoded)
e2e_num = len(element2element_edgelist.target_element_encoded.unique())
e2e_adj = sp.csc_matrix((np.ones(e2e_num_edges), (e2e_adj_row, e2e_adj_col)), shape=(e2e_num, e2e_num))
e2c_num_edges = len(class2element_edgelist)
e2c_adj_row = list(class2element_edgelist.target_element_encoded)
e2c_adj_col = list(class2element_edgelist.class_name_encoded)
e2c_num_row = len(class2element_edgelist.target_element_encoded.unique())
e2c_num_col = len(class2element_edgelist.class_name_encoded.unique())
e2c_adj = sp.csc_matrix(( | np.ones(e2c_num_edges) | numpy.ones |
# -*- coding: utf-8 -*-
from functools import reduce
from itertools import zip_longest
from math import ceil
from math import floor
from math import log
from scipy import ndimage
import numpy as np
def morton_array(shape):
"""
Return array with Morton numbers.
Inspired by:
https://graphics.stanford.edu/%7Eseander/bithacks.html#InterleaveBMN
"""
# determine the number of dimensions
ndims = len(shape)
# 1d compatibility
if ndims == 1:
return np.arange(shape[0])
def bitcount(number):
""" Return amount of bits used for in number """
return int(ceil(log(number + 1, 2)))
# feasbility check
for i, j in enumerate(shape):
# bit number assessment
count = bitcount(j) # in the number
count += (ndims - 1) * (count - 1) # after spacing
count += (ndims - 1) - i # after shifting
# numpy does not go higher than 64 bits currently
if count > 64:
raise ValueError('Too many bits needed for the computation')
# generate list of zeros and masks
ones = 1
masks = []
shifts = []
pos = range(63, -1, -1)
bmax = max(map(bitcount, shape))
while ones < bmax:
zeros = (ndims - 1) * ones
shifts.append(zeros)
period = ones + zeros
masks.append(
int(''.join('1' if i % period < ones else '0' for i in pos), 2),
)
ones *= 2
# make indices and space them
indices = [np.uint64(k) for k in np.ogrid[tuple(map(slice, shape))]]
for i, (j, k) in enumerate(zip(shape, indices)):
if j < 2:
continue
if j > 2:
start = int(floor(log(bitcount(j) - 1, 2)))
else:
start = 0
for n in range(start, -1, -1):
k[:] = (k | k << shifts[n]) & masks[n]
k <<= (ndims - 1) - i
return reduce(np.bitwise_or, indices)
def get_morton_lut(array, no_data_value):
"""
Return lookup table to rearrange an array of ints in morton order.
:param array: 2D int array with a range of integers from 0 to no_data_value
:param no_data_value: no data value that is excluded from rearrangement.
The no_data_value does not have to be present in the array, but if it is,
it does not get reordered by the lookup table (lut):
lut[no_data_value] == no_data_value
"""
# morton variables have underscores
_array = morton_array(array.shape)
_no_data_value = _array.max().item() + 1
# make lookup from node to morton number
index = np.arange(no_data_value + 1)
lut1 = ndimage.minimum(_array, labels=array, index=index)
lut1[no_data_value] = _no_data_value
# make lookup from morton number back to node numbers
lut2 = np.empty(_no_data_value + 1, dtype='i8')
lut2[np.sort(lut1)] = index
lut2[_no_data_value] = no_data_value
# return the combined lookup table
return lut2[lut1]
def group(array):
"""
Return generator of arrays of indices to equal values.
"""
order = array.argsort()
_, index = np.unique(array[order], return_index=True)
for start, stop in zip_longest(index, index[1:]):
yield order[start:stop]
def analyze(x, y):
""" Return (x_step, y_step) tuple.
Return the smallest separation between points in the x-direction for points
with the same y-coordinates and vice versa. That reveals the highest
refinement level of the quadtree structure.
"""
assert x.dtype == float
assert y.dtype == float
init = {'initial': np.inf}
xs = min(np.diff(np.sort(x[i])).min(**init) for i in group(y))
ys = min(np.diff(np.sort(y[i])).min(**init) for i in group(x))
return None if | np.isinf(xs) | numpy.isinf |
#利用加噪声的图像进行降噪自编码器的测试
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.utils import save_image
from torchvision.datasets import MNIST
import glob
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import os
import cv2
import pySQI
import pyGTemplate
import testAAE
import time
import region
import scipy.io as scio
# import Imgprocessing
class AEGenerator(nn.Module):
def __init__(self):
super(AEGenerator, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1,32, 5, stride=2, padding=2),
nn.ReLU(True),# 64*128*128
nn.Conv2d(32,32, 5, stride=2, padding=2),
nn.ReLU(True),# 128*64*64
nn.Conv2d(32,64, 5, stride=2, padding=2),
nn.ReLU(True),# 256*32*32
nn.Conv2d(64,64, 5, stride=2, padding=2),
nn.ReLU(True),# 256*16*16
nn.Conv2d(64,128, 5, stride=2, padding=2),
nn.ReLU(True)# 512*8*8
)
self.fc1 = nn.Sequential(
nn.Linear(128*8*8, 128),
nn.ReLU(True)
)
self.fc2 = nn.Sequential(
nn.Linear(128, 128 * 8 * 8),
nn.ReLU(True)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(128, 64, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 256 * 16 * 16
nn.ConvTranspose2d(64, 64, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 256 * 32 * 32
nn.ConvTranspose2d(64, 32, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 128 * 64 * 64
nn.ConvTranspose2d(32, 32, 4, stride=2, padding=1), # b, 16, 5, 5
nn.ReLU(True), # 64 * 128 * 128
nn.ConvTranspose2d(32, 1, 4, stride=2, padding=1), # b, 16, 5, 5
nn.Sigmoid() # 1 * 256 * 256
)
def forward(self, x):
x = self.encoder(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
x = x.view(x.size(0), 128, 8, 8)
x = self.decoder(x)
return x
def preprocessing(total_num, sample_id, threshold, exposure, write_flag):
sobel_mask_vect = []
src_vect = []
sobel_x =np.array([[-1, 0, 1],[-1, 0, 1],[-1, 0, 1]], dtype=np.float32)
sobel_y =np.array([[1, 1, 1],[0, 0, 0],[-1, -1, -1]], dtype=np.float32)
new_img = np.zeros((256,256), np.uint8)
for pic_num in range(1, total_num):
if write_flag:
src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.jpg'
output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
# output_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'TT' + '/' + '{:02d}'.format(pic_num) + '.png'
# region_file = './roi/region_' + str(pic_num) + '.png'
# print(src_file)
img = cv2.imread(src_file)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
m,n = img.shape
img = img[0:n]
new_img[3:253,3:253] = img
cv2.imwrite(output_file, new_img)
new_img_copy = new_img.copy()
# IN_img = cv2.imread(IN_src_file)
# IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
# src_vect.append(IN_img)
else:
src_file = '../Dataset/defect_img/{:02}.png'.format(pic_num)
# src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '/' + str(pic_num) + '.png'
# IN_src_file = '../data/sample_' + str(sample_id) + '/{:03d}'.format(exposure) + '_IN/' + 'SQI' + '/' + '{:02d}'.format(pic_num) + '.png'
new_img = cv2.imread(src_file)
new_img = cv2.cvtColor(new_img,cv2.COLOR_BGR2GRAY)
# IN_img = cv2.imread(IN_src_file)
# IN_img = cv2.cvtColor(IN_img, cv2.COLOR_BGR2GRAY)
# src_vect.append(IN_img)
sobel_mag = np.zeros(new_img.shape, np.float)
# sobel_angle = np.zeros(new_img.shape, np.float)
# quantized_angle = np.zeros(new_img.shape, np.uint8)
sobel_mask = np.zeros(new_img.shape, np.uint8)
# img_Guassian = cv2.GaussianBlur(new_img,(5,5),0)
# img_Guassian.astype(np.uint8)
# m,n = img_Guassian.shape
# m,n = new_img.shape
# for i in range(2,m-1):
# for j in range(2,n-1):
# Gx = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_x)
# Gy = np.sum(new_img[i-1:i+2, j-1:j+2] * sobel_y)
# sobel_mag[i,j] = math.sqrt(math.pow(Gx,2) + math.pow(Gy,2))
# sobel_angle[i,j] = math.atan2(Gy, Gx) * 180 / math.pi
# # quantized_angle[i,j] = quantizeAngle(sobel_angle[i,j])
# if sobel_mag[i,j] >= threshold:
# sobel_mask[i,j] = 1
# contour = angleFilter(sobel_mask, quantized_angle)
# contour = cv2.blur(contour, (3,3))
# sobelx = cv2.Sobel(new_img,cv2.CV_32F,1,0) #默认ksize=3
# sobely = cv2.Sobel(new_img,cv2.CV_32F,0,1)
sobelx = cv2.filter2D(new_img, cv2.CV_32F, sobel_x)
sobely = cv2.filter2D(new_img, cv2.CV_32F, sobel_y)
sobel_mag = np.sqrt(pow(sobelx,2) + pow(sobely,2))
# sobel_angle = np.arctan2(sobely,sobelx) * 180 /math.pi
sobel_mag = cv2.convertScaleAbs(sobel_mag)
_, sobel_mask = cv2.threshold(sobel_mag, threshold, 255, 0)
# contour = angleFilter(sobel_mask, sobel_angle)
# contour = cv2.blur(contour, (3,3))
# sobel_mask = cv2.blur(sobel_mask, (3,3))
# contour_vect.append(contour)
# cv2.imshow('sobel', sobel_mask)
# cv2.waitKey(0)
sobel_mask_vect.append(sobel_mask)
return sobel_mask_vect
def Contour_extraction(img_files, model):
width = 256
height = 256
x_truth = np.reshape(img_files, (len(img_files), width, height, 1)) # adapt this if using `channels_first` image data format
#先增加一个维度
# user_emb_dims = np.expand_dims(self.user_emb, axis=0)
# user_emb_dims.shape
x_test = x_truth
x_truth = np.array(x_truth)
x_truth = x_truth.astype('float32') / 255.
x_test = np.array(x_test)
x_test = x_test.astype('float32') / 255.
x_truth = np.reshape(x_truth, (len(x_truth),1, width, height)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test),1, width, height)) # adapt this if using `channels_first` image data format
batch_test=torch.Tensor(x_test)
img = Variable(batch_test).cuda()
# ===================forward=====================
output = model(img)
output_imgs = output.cpu().data.numpy()
noise_imgs = img.cpu().data.numpy()
output_imgs = output_imgs * 255
output_imgs = output_imgs.transpose(0,2,3,1)
noise_imgs = noise_imgs * 255
noise_imgs = noise_imgs.transpose(0,2,3,1)
contours = []
for i,singleimg in enumerate(output_imgs):
_,singleimg = cv2.threshold(singleimg, 170, 255, 0)
contours.append(singleimg)
return contours
def Template_method():
total_num = 28
foldernum = 11
# sample_id = 0
threshold = 160
# exposure = 6
# write_flag = False
evaluate_flag = False
extract_CF = False
# W = 30
# H = 20
Wh = 0.3
Wl = 0.5
Wh_vect = np.array([Wh])
Wl_vect = np.array([Wl])
if(evaluate_flag):
Wh_vect = | np.linspace(0.1,0.9, 85, endpoint= True) | numpy.linspace |
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
from model import CycleGAN
from imageio import imread, imsave
import glob
import os
# 中间输出文件
image_file = 'face.jpg'
W = 256
# 4行5列
result = np.zeros((4 * W, 5 * W, 3))
for gender in ['male', 'female']:
if gender == 'male':
images = glob.glob('../data/male/*.jpg')
model = '../pretrained/male2female.pb'
r = 0 # 前两行:一行male原始,一行转换后male
else:
images = glob.glob('../data/female/*.jpg')
model = '../pretrained/female2male.pb'
r = 2 # 后两行:一行female原始,一行转换后female
graph = tf.Graph()
with graph.as_default():
graph_def = tf.GraphDef()
with tf.gfile.FastGFile(model, 'rb') as model_file:
graph_def.ParseFromString(model_file.read())
tf.import_graph_def(graph_def, name='')
with tf.Session(graph=graph) as sess:
input_tensor = graph.get_tensor_by_name('input_image:0')
output_tensor = graph.get_tensor_by_name('output_image:0')
for i, image in enumerate(images):
image = imread(image)
# 此时output是一个字符串
output = sess.run(output_tensor, feed_dict={input_tensor: image})
with open(image_file, 'wb') as f:
f.write(output)
# 此时output是一个nparray
output = imread(image_file)
maxv = np.max(output)
minv = | np.min(output) | numpy.min |
# -*- coding: utf-8 -*-
import numpy as np
"""
HMM
问题1:概率计算方法:给定λ=(A,B,π)和观测序列O,求P(O|λ)
直接计算法:
按照概率公式,列举所有可能的长度为T的状态序列,求各状态序列与观测序列的联合概率,
对所有可能的状态序列的联合概率求和。这是一棵深度为T,各节点的子节点为所有隐藏状态的完整树。
可写成DFS的遍历或递归。
"""
class HMM:
def __init__(self,A=None,B=None,π=None):
self.A = A
self.B = B
self.π = π
self.N = len(π) ## 隐藏态个数
def forward(self,O,record = False):
α = self.π*self.B[:,O[0]]
α_T = [α.tolist()]
for t in range(1,len(O)):
α = α.dot(self.A)*self.B[:,O[t]]
if record: α_T.append(α.tolist())
return np.array(α_T) if record else α.sum()
def backward(self,O,record = False):
β = np.ones_like(self.π,dtype=float)
β_T = [β.tolist()]
for t in range(len(O)-2,-1,-1):
β = np.dot(self.A*self.B[:,O[t+1]],β)
if record: β_T.append(β.tolist())
return np.array(β_T[::-1]) if record else np.dot(self.π*self.B[:,O[0]],β)
def em_fit(self,O,N,maxiter=50): ## O:观测序列 N:隐状态个数
V = np.unique(O)
self.A = np.ones([N,N])/N
self.B = np.ones([N,len(V)])/len(V)
self.π = np.random.sample(N)
self.π /= self.π.sum()
self.p = [0]
T_V = (O[:,None]==V).astype(int) ## T行V列的one-hot矩阵
while len(self.p)<=maxiter:
## e_step:求当前参数下使Q函数导数为0时,有用部分的值
T_α = self.forward(O, record = True)
T_β = self.backward(O, record = True)
## m_step:根据e_step得到的值,按照解析解更新Q函数参数
T_αβ = T_α*T_β
self.A *= T_α[:-1].T.dot(T_β[1:]*self.B[:,O[1:]].T)/T_αβ[:-1].sum(0)[:,None]
self.B = T_αβ.T.dot(T_V) / T_αβ.sum(0)[:,None]
self.π = T_αβ[0] / T_αβ[0].sum(0)
## 记录当前λ下的O的概率
self.p.append(T_αβ[0].sum())
return 'train done!'
def dp_pred(self,O):
'''dp数组定义:dp[t,i]定义为,t时的状态为i的1~t个状态的最大概率。
递推条件:dp[t,i] = max(dp[t-1,:]*A[:,i])*B[i,O[t]]
'''
dp = np.zeros((len(O),self.N))
dp[0] = self.π*self.B[:,O[0]]
for i in range(1,len(O)):
tmp = dp[i-1,:,None]*self.A
dp[i-1] = np.argmax(tmp,axis=0) ## 记下Ψ
dp[i] = | np.max(tmp,axis=0) | numpy.max |
# -*- coding: utf-8 -*-
import numpy as np
import cvxpy as cp
class PenaltyConvexConcaveProcedure():
def __init__(self, model, Q0, Q1, q, c, A0_i, A1_i, b_i, r_i, mad=None):
self.model = model
self.mad = mad
self.Q0 = Q0
self.Q1 = Q1
self.q = q
self.c = c
self.A0s = A0_i
self.A1s = A1_i
self.bs = b_i
self.rs = r_i
self.dim = None
if not(len(self.A0s) == len(self.A1s) and len(self.A0s) == len(self.bs) and len(self.rs) == len(self.bs)):
raise ValueError("Inconsistent number of constraint parameters")
def _solve(self, prob):
prob.solve(solver=cp.SCS, verbose=False)
def solve_aux(self, xcf, tao, x_orig):
try:
self.dim = x_orig.shape[0]
# Variables
x = cp.Variable(self.dim)
beta = cp.Variable(self.dim)
s = cp.Variable(len(self.A0s))
# Constants
s_z = np.zeros(len(self.A0s))
s_c = np.ones(len(self.A0s))
z = np.zeros(self.dim)
c = np.ones(self.dim)
I = np.eye(self.dim)
# Build constraints
constraints = []
for i in range(len(self.A0s)):
A = cp.quad_form(x, self.A0s[i])
q = x.T @ self.bs[i]
c = self.rs[i] + np.dot(xcf, | np.dot(xcf, self.A1s[i]) | numpy.dot |
#coding=utf8
import os
import numpy as np
import pickle
# from lib.net.point_rcnn import PointRCNN
# from lib.datasets.mada_rcnn_dataset import MadaRCNNDataset
# import tools.train_utils.train_utils as train_utils
# from lib.utils.bbox_transform import decode_bbox_target
# from tools.kitti_object_eval_python.visualize_common import VisualizePcd, quaternion_from_euler
# from lib.config import cfg, cfg_from_file, save_config_to_file, cfg_from_list
# import lib.utils.kitti_utils as kitti_utils
# import lib.utils.iou3d.iou3d_utils as iou3d_utils
import logging
import math
import re
import glob
import time
import rospy
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2 as pc2
from std_msgs.msg import Header
from jsk_recognition_msgs.msg import BoundingBox, BoundingBoxArray
from jsk_rviz_plugins.msg import Pictogram,PictogramArray
import sys
from pynput.keyboard import Controller, Key, Listener
from pynput import keyboard
import json
# import struct
FIXED_FRAME = 'pandar'
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# code from /opt/ros/kinetic/lib/python2.7/dist-packages/tf/transformations.py
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> np.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
# print("ak : {}".format(type(ak)))
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = np.empty((4, ), dtype=np.float64)
if repetition:
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
quaternion[3] = cj*(cc - ss)
else:
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
quaternion[3] = cj*cc + sj*ss
if parity:
quaternion[j] *= -1
return quaternion
# /velodyne_points topic's subscriber callback function
# publishing function for DEBUG
def publish_test(np_p_ranged, frame_id):
header = Header()
header.stamp = rospy.Time()
header.frame_id = frame_id
x = np_p_ranged[:, 0].reshape(-1)
y = np_p_ranged[:, 1].reshape(-1)
z = np_p_ranged[:, 2].reshape(-1)
# if intensity field exists
if np_p_ranged.shape[1] == 4:
i = np_p_ranged[:, 3].reshape(-1)
else:
i = np.zeros((np_p_ranged.shape[0], 1)).reshape(-1)
cloud = np.stack((x, y, z, i))
# point cloud segments
# 4 PointFields as channel description
msg_segment = pc2.create_cloud(header=header,
fields=_make_point_field(4),
points=cloud.T)
# publish to /velodyne_points_modified
point_pub.publish(msg_segment) # DEBUG
# code from SqueezeSeg (inspired from Durant35)
def hv_in_range(x, y, z, fov, fov_type='h'):
"""
Extract filtered in-range velodyne coordinates based on azimuth & elevation angle limit
Args:
`x`:velodyne points x array
`y`:velodyne points y array
`z`:velodyne points z array
`fov`:a two element list, e.g.[-45,45]
`fov_type`:the fov type, could be `h` or 'v',defualt in `h`
Return:
`cond`:condition of points within fov or not
Raise:
`NameError`:"fov type must be set between 'h' and 'v' "
"""
d = np.sqrt(x ** 2 + y ** 2 + z ** 2)
if fov_type == 'h':
return np.logical_and(np.arctan2(y, x) > (-fov[1] * np.pi/180), np.arctan2(y, x) < (-fov[0] * np.pi/180))
elif fov_type == 'v':
return np.logical_and(np.arctan2(z, d) < (fov[1] * np.pi / 180), np.arctan2(z, d) > (fov[0] * np.pi / 180))
else:
raise NameError("fov type must be set between 'h' and 'v' ")
def _make_point_field(num_field):
msg_pf1 = pc2.PointField()
msg_pf1.name = np.str('x')
msg_pf1.offset = np.uint32(0)
msg_pf1.datatype = np.uint8(7)
msg_pf1.count = np.uint32(1)
msg_pf2 = pc2.PointField()
msg_pf2.name = np.str('y')
msg_pf2.offset = np.uint32(4)
msg_pf2.datatype = np.uint8(7)
msg_pf2.count = np.uint32(1)
msg_pf3 = pc2.PointField()
msg_pf3.name = np.str('z')
msg_pf3.offset = np.uint32(8)
msg_pf3.datatype = np.uint8(7)
msg_pf3.count = np.uint32(1)
msg_pf4 = pc2.PointField()
msg_pf4.name = | np.str('intensity') | numpy.str |
"""CM3 algorithm for Checkers environment.
Same algorithm as alg_credit.py, except that Checkers global state is defined
by three parts (s_env, s^n, s^{-n}) instead of just (s^n, s^{-n})
"""
import numpy as np
import tensorflow.compat.v1 as tf
import sys
import networks
class Alg(object):
def __init__(self, experiment, dimensions, stage=1, n_agents=1,
tau=0.01, lr_V=0.001, lr_Q=0.001,
lr_actor=0.0001, gamma=0.99, use_Q_credit=1,
use_V=0, nn={}):
"""
Same as alg_credit. Checkers global state has two parts
Inputs:
experiment - string
dimensions - dictionary containing tensor dimensions
(h,w,c) for tensor
l for 1D vector
stage - 1: Q_global and actor, does not use Q_credit
2: Q_global, actor and Q_credit
tau - target variable update rate
lr_V, lr_Q, lr_actor - learning rates for optimizer
gamma - discount factor
use_Q_credit - if 1, activates Q_credit network for use in policy gradient
use_V - if 1, uses V_n(s) as the baseline in the policy gradient (this is an ablation)
nn : neural net architecture parameters
"""
self.experiment = experiment
if self.experiment == "checkers":
# Global state
self.rows_state = dimensions['rows_state']
self.columns_state = dimensions['columns_state']
self.channels_state = dimensions['channels_state']
self.l_state = n_agents * dimensions['l_state_one']
self.l_state_one_agent = dimensions['l_state_one']
self.l_state_other_agents = (n_agents-1) * dimensions['l_state_one']
# Agent observations
self.l_obs_others = dimensions['l_obs_others']
self.l_obs_self = dimensions['l_obs_self']
# Dimensions for image input
self.rows_obs = dimensions['rows_obs']
self.columns_obs = dimensions['columns_obs']
self.channels_obs = dimensions['channels_obs']
# Dimension of agent's observation of itself
self.l_action = dimensions['l_action']
self.l_goal = dimensions['l_goal']
self.n_agents = n_agents
self.tau = tau
self.lr_V = lr_V
self.lr_Q = lr_Q
self.lr_actor = lr_actor
self.gamma = gamma
self.use_V = use_V
self.use_Q_credit = use_Q_credit
self.nn = nn
self.agent_labels = np.eye(self.n_agents)
self.actions = | np.eye(self.l_action) | numpy.eye |
import numpy as np
import tensorflow as tf
from model.Sample_MIL import InstanceModels, RaggedModels
from model.KerasLayers import Losses, Metrics
from model import DatasetsUtils
from sklearn.model_selection import StratifiedShuffleSplit, StratifiedKFold
import pickle
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[-1], True)
tf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')
import pathlib
path = pathlib.Path.cwd()
if path.stem == 'ATGC2':
cwd = path
else:
cwd = list(path.parents)[::-1][path.parts.index('ATGC2')]
import sys
sys.path.append(str(cwd))
D, samples, maf, sample_df = pickle.load(open(cwd / 'figures' / 'tmb' / 'pcawg' / 'DFCI_ONCO' / 'data' / 'data_all.pkl', 'rb'))
panels = pickle.load(open(cwd / 'files' / 'pcawg_panel_table.pkl', 'rb'))
strand_emb_mat = np.concatenate([np.zeros(2)[np.newaxis, :], np.diag(np.ones(2))], axis=0)
D['strand_emb'] = strand_emb_mat[D['strand'].astype(int)]
chr_emb_mat = np.concatenate([np.zeros(24)[np.newaxis, :], np.diag(np.ones(24))], axis=0)
D['chr_emb'] = chr_emb_mat[D['chr']]
frame_emb_mat = np.concatenate([np.zeros(3)[np.newaxis, :], np.diag(np.ones(3))], axis=0)
D['cds_emb'] = frame_emb_mat[D['cds'].astype(int)]
##bin position
def pos_one_hot(pos):
one_pos = int(pos * 100)
return one_pos, (pos * 100) - one_pos
result = | np.apply_along_axis(pos_one_hot, -1, D['pos_float'][:, np.newaxis]) | numpy.apply_along_axis |
from unittest import TestCase
import numpy as np
from cate.util.im.geoextent import GeoExtent
# noinspection PyMethodMayBeStatic
class GeoRectTest(TestCase):
def test_init(self):
rect = GeoExtent()
self.assertEqual(rect.west, -180.)
self.assertEqual(rect.south, -90.)
self.assertEqual(rect.east, 180.)
self.assertEqual(rect.north, 90.)
self.assertEqual(rect.inv_y, False)
self.assertEqual(rect.eps, 1e-04)
self.assertEqual(str(rect), '-180.0, -90.0, 180.0, 90.0')
self.assertEqual(repr(rect), 'GeoExtend()')
self.assertEqual(rect.crosses_antimeridian, False)
with self.assertRaises(ValueError):
GeoExtent(west=-180.1)
with self.assertRaises(ValueError):
GeoExtent(south=-90.1)
with self.assertRaises(ValueError):
GeoExtent(east=180.1)
with self.assertRaises(ValueError):
GeoExtent(north=90.1)
with self.assertRaises(ValueError):
GeoExtent(west=20, east=20 + 0.9e-6)
with self.assertRaises(ValueError):
GeoExtent(south=20, north=20 + 0.9e-6)
with self.assertRaises(ValueError):
GeoExtent(south=21, north=20)
def test_repr(self):
self.assertEqual(repr(GeoExtent()), 'GeoExtend()')
self.assertEqual(repr(GeoExtent(west=43.2)), 'GeoExtend(west=43.2)')
self.assertEqual(repr(GeoExtent(south=43.2)), 'GeoExtend(south=43.2)')
self.assertEqual(repr(GeoExtent(east=43.2)), 'GeoExtend(east=43.2)')
self.assertEqual(repr(GeoExtent(north=43.2)), 'GeoExtend(north=43.2)')
self.assertEqual(repr(GeoExtent(inv_y=True)), 'GeoExtend(inv_y=True)')
self.assertEqual(repr(GeoExtent(inv_y=False, eps=0.001)), 'GeoExtend(eps=0.001)')
self.assertEqual(repr(GeoExtent(12.5, 43.2, 180.0, 64.1, inv_y=True)),
'GeoExtend(west=12.5, south=43.2, north=64.1, inv_y=True)')
def test_crosses_antimeridian(self):
self.assertEqual(GeoExtent(west=170., east=-160.).crosses_antimeridian, True)
self.assertEqual(GeoExtent(west=-170., east=160.).crosses_antimeridian, False)
def test_from_coord_arrays(self):
rect = GeoExtent.from_coord_arrays(np.array([1, 2, 3, 4, 5, 6]), np.array([1, 2, 3]))
np.testing.assert_almost_equal(np.array(rect.coords), np.array((0.5, 0.5, 6.5, 3.5)))
self.assertEqual(rect.inv_y, True)
rect = GeoExtent.from_coord_arrays(np.array([1, 2, 3, 4, 5, 6]), np.array([3, 2, 1]))
np.testing.assert_almost_equal(np.array(rect.coords), np.array((0.5, 0.5, 6.5, 3.5)))
self.assertEqual(rect.inv_y, False)
rect = GeoExtent.from_coord_arrays(np.array([-3, -2, -1, 0, 1, 2]), np.array([3, 2, 1]))
np.testing.assert_almost_equal(np.array(rect.coords), np.array((-3.5, 0.5, 2.5, 3.5)))
self.assertEqual(rect.inv_y, False)
rect = GeoExtent.from_coord_arrays(np.array([177, 178, 179, -180, -179, -178]), np.array([3, 2, 1]))
np.testing.assert_almost_equal(np.array(rect.coords), np.array((176.5, 0.5, -177.5, 3.5)))
self.assertEqual(rect.inv_y, False)
rect = GeoExtent.from_coord_arrays(np.array([-150., -90., -30., 30., 90., 150.]), np.array([-60., 0., 60.]))
np.testing.assert_almost_equal(np.array(rect.coords), np.array((-180.0, -90.0, 180.0, 90.0)))
self.assertEqual(rect.inv_y, True)
rect = GeoExtent.from_coord_arrays( | np.array([-150., -90., -30., 30., 90., 150.]) | numpy.array |
'''<NAME> 11/10/2019 <NAME>'''
# coding=utf-8
import numpy as np
import scipy.sparse as sparse
from math import cos, sin
class powerflow:
'''
'''
def __init__(self, filename=''):
with open(filename) as cdf:
# Leo el archivo hasta llegar a la sección de BUS DATA
words = ['', ]
while words[0] != 'BUS':
line = cdf.readline()
words = line.split(' ')
words = [item for item in words if item] # Elimino los elementos vacios
# Leo la cantidad de nodos en la 4ta columna
self.n = int(words[3])
n = self.n
# Creo la Ybus (matriz de admitancia) nula de n x n numeros complejos
self.Ybus = np.zeros((n, n), dtype=np.complex128)
# Creo los vectores con las variables en cada nodo:
self.load = np.zeros(n, dtype=np.complex128) # P + jQ
self.generation = np.zeros(n, dtype=np.complex128) # P + jQ
self.voltage = np.zeros(n, dtype=np.float) # V(por unidad), angulo en grados
self.angle = np.zeros(n, dtype=np.float) # Angulo en grados
self.PV_buses = np.array((0,2), dtype=int) #Vector que contiene el índice del nodo PV, y la tensión del nodo
self.Q_inj = np.zeros(n, dtype=np.float64)
self.P_inj = np.zeros(n, dtype=np.float64)
#Inicializo el valor del swing bus, pero en un nodo que no existe
self.swing_bus = n+1
# Leo las siguientes n lineas con la info de cada nodo
for i in range(n):
line = cdf.readline()
words = line.split(' ')
words = [item for item in words if item] # Elimino los elementos vacios
self.voltage[i] = float(words[7])
self.angle[i] = np.deg2rad(float(words[8]))
self.load[i] = complex(float(words[9]), float(words[10]))
self.generation[i] = complex(float(words[11]), float(words[12]))
self.Q_inj[i] = self.generation[i].imag - self.load[i].imag
self.P_inj[i] = self.generation[i].real - self.load[i].real
# Asigno el swing_bus
if (int(words[6]) == 3):
self.swing_bus = i
self.swing_bus_angle = self.angle[i]
self.swing_bus_voltage = self.voltage[i]
#Como en los PV buses no se conoce ni P ni Q, se asignan valores nulos
self.P_inj[i] = 0
self.Q_inj[i] = 0
# PV buses
if (int(words[6]) == 2):
self.PV_buses = np.vstack((self.PV_buses, [i,float(words[14])])) #El índice y la tensión del bus
self.Q_inj[i] = 0 #Como en los PV buses se desconoce Q, se asigno un valor nulo
# Leo el archivo hasta llegar a la sección de BRANCH DATA
while words[0] != 'BRANCH':
line = cdf.readline()
words = line.split(' ')
words = [item for item in words if item] # Elimino los elementos vacios
# Leo las lineas de la sección Branch
while True: # Salgo con un break en el próximo if
line = cdf.readline()
words = line.split(' ')
words = [item for item in words if item] # Elimino los elementos vacios
# Si llego al fin de la sección indicado por un -999\n salgo del bucle
if words[0] == '-999\n':
break
i = int(words[0]) - 1
j = int(words[1]) - 1 # La impedancia entre el nodo i y el nodo j
self.Ybus[i, j] = self.Ybus[j, i] = -1 / complex(float(words[6]), float(
words[7])) # Asigno la impendancia R + jX
self.Ybus[i, i] = self.Ybus[j, j] = complex(0, float(
words[8])) # En la diagonal sumo Charging B ''la impedancia paralelo del equivalente pi''
# Recorro la matriz de admitacnia para asignarle a la diagonal la suma de las filas
for i in range(0, n):
for j in range(0, n):
if j != i:
self.Ybus[i, i] += -self.Ybus[i, j]
self.init_v_theta()
#np.savetxt('Ybus.txt', self.Ybus, fmt='%+9.4f', delimiter=' ')
return
def init_v_theta(self, init_voltage=1, init_angle=0):
self.v = np.empty(self.n, dtype=np.float64)
self.theta = np.empty(self.n, dtype=np.float64)
for i in range(self.n):
self.v[i] = init_voltage
self.theta[i] = init_angle
if np.any(self.PV_buses[:,0]==i):
l = np.argwhere(self.PV_buses[:,0]==i)
self.v[i] = self.PV_buses[l[0],1]
if i == self.swing_bus:
self.theta[i] = self.swing_bus_angle
self.v[i] = self.swing_bus_voltage
def reducir(self, x):
'''Elimina las filas (y columas si es una matrix) que corresponden a Q del jacobiano y a V'''
# Reducir un vector
if x.ndim == 1:
PV_buses_Q = self.PV_buses[:, 0] + self.n - 1
filas_a_eliminar = np.append([self.swing_bus], [self.swing_bus + self.n - 1], )
filas_a_eliminar = np.append(filas_a_eliminar, np.int32(PV_buses_Q))
return np.delete(x, filas_a_eliminar, 0)
# Reducir una matriz
else:
PV_buses_Q = self.PV_buses[:, 0] + self.n - 1
filas_a_eliminar = np.append([self.swing_bus], [self.swing_bus+self.n-1], )
filas_a_eliminar = np.append(filas_a_eliminar, np.int32(PV_buses_Q))
columnas_a_eliminar = filas_a_eliminar
x = np.delete(x, filas_a_eliminar, 0)
return np.delete(x, columnas_a_eliminar, 1)
def J(self):
'''Computa el jacobiano para un valor de tensión y ángulo dado
:parameter x: un vactor de 2*(n-1) donde n es la cantidad de nodos del sistema
:returns jacobiano: una matriz de 2(n-1) x 2(n-1)
'''
#Cuatro matrices cuadradadas que despues se unen para formar el jacobiano
J11 = np.zeros((self.n, self.n), dtype=np.float64)
J12 = np.zeros((self.n, self.n), dtype=np.float64)
J21 = np.zeros((self.n, self.n), dtype=np.float64)
J22 = np.zeros((self.n, self.n), dtype=np.float64)
for i in range(self.n):
for j in range(self.n):
# Saltear el swing_bus
if (i == self.swing_bus or j == self.swing_bus):
continue
# Elementos que no son de la diagonal
# ---------------------------------------------------------------------------------------------
if (i != j):
v_i = self.v[i]
v_j = self.v[j]
theta_i = self.theta[i]
theta_j = self.theta[j]
delta_theta = theta_i - theta_j
G_ij = self.Ybus[i,j].real
B_ij = self.Ybus[i,j].imag
cos_theta = cos(delta_theta)
sin_theta = sin(delta_theta)
a = v_i * v_j
b = a * G_ij
c = a * B_ij
# dP/dtheta
J11[i, j] = b * sin_theta - c * cos_theta
# dQ/dtheta
J21[i, j] = -b * cos_theta + c * sin_theta
d = v_i * G_ij
e = v_i * B_ij
# dP/dV
J12[i, j] = d * cos(delta_theta) + e * sin(delta_theta)
# dQ/dV
J22[i, j] = d * sin(delta_theta) - e * cos(delta_theta)
# Elementos de la diagonal
# ---------------------------------------------------------------------------------------------
else:
v_i = self.v[i]
G_ii = self.Ybus[i,i].real
B_ii = self.Ybus[i,i].imag
P_i = self.last_P[i]
Q_i = self.last_Q[i]
# dP/dtheta
J11[i, j] = - Q_i - B_ii * (v_i ** 2)
# dP/dV
J21[i, j] = P_i / v_i + G_ii * v_i
# dQ/dtheta
J21[i, j] = P_i - G_ii * (v_i ** 2)
# dQ/dV
J22[i, j] = Q_i / v_i - B_ii * v_i
# --------------------------------------------------------------------------------
np.savetxt('jacobiano11.txt', J12, fmt='%+7.2f', delimiter=' ')
J1 = np.hstack([J11, J12])
J2 = | np.hstack([J21, J22]) | numpy.hstack |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module provides core classes needed by all define electronic structure,
such as the Spin, Orbital, etc.
"""
from monty.json import MSONable
from enum import Enum, unique
import numpy as np
__author__ = "<NAME>"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "Sep 23, 2011"
@unique
class Spin(Enum):
"""
Enum type for Spin. Only up and down.
Usage: Spin.up, Spin.down.
"""
up, down = (1, -1)
def __int__(self):
return self.value
def __float__(self):
return float(self.value)
def __str__(self):
return str(self.value)
@unique
class OrbitalType(Enum):
"""
Enum type for orbital type. Indices are basically the azimuthal quantum
number, l.
"""
s = 0
p = 1
d = 2
f = 3
def __str__(self):
return self.name
@unique
class Orbital(Enum):
"""
Enum type for specific orbitals. The indices are basically the order in
which the orbitals are reported in VASP and has no special meaning.
"""
s = 0
py = 1
pz = 2
px = 3
dxy = 4
dyz = 5
dz2 = 6
dxz = 7
dx2 = 8
f_3 = 9
f_2 = 10
f_1 = 11
f0 = 12
f1 = 13
f2 = 14
f3 = 15
def __int__(self):
return self.value
def __str__(self):
return self.name
@property
def orbital_type(self):
"""
Returns OrbitalType of an orbital.
"""
return OrbitalType[self.name[0]]
class Magmom(MSONable):
"""
New class in active development. Use with caution, feedback is
appreciated.
Class to handle magnetic moments. Defines the magnetic moment of a
site or species relative to a spin quantization axis. Designed for
use in electronic structure calculations.
* For the general case, Magmom can be specified by a vector,
e.g. m = Magmom([1.0, 1.0, 2.0]), and subscripts will work as
expected, e.g. m[0] gives 1.0
* For collinear calculations, Magmom can assumed to be scalar-like,
e.g. m = Magmom(5.0) will work as expected, e.g. float(m) gives 5.0
Both of these cases should be safe and shouldn't give any surprises,
but more advanced functionality is available if required.
There also exist useful static methods for lists of magmoms:
* Magmom.are_collinear(magmoms) - if true, a collinear electronic
structure calculation can be safely initialized, with float(Magmom)
giving the expected scalar magnetic moment value
* Magmom.get_consistent_set_and_saxis(magmoms) - for non-collinear
electronic structure calculations, a global, consistent spin axis
has to be used. This method returns a list of Magmoms which all
share a common spin axis, along with the global spin axis.
All methods that take lists of magmoms will accept magmoms either as
Magmom objects or as scalars/lists and will automatically convert to
a Magmom representation internally.
The following methods are also particularly useful in the context of
VASP calculations:
* Magmom.get_xyz_magmom_with_001_saxis()
* Magmom.get_00t_magmom_with_xyz_saxis()
See VASP documentation for more information:
https://cms.mpi.univie.ac.at/wiki/index.php/SAXIS
"""
def __init__(self, moment, saxis=(0, 0, 1)):
"""
:param moment: magnetic moment, supplied as float or list/np.ndarray
:param saxis: spin axis, supplied as list/np.ndarray, parameter will
be converted to unit vector (default is [0, 0, 1])
:return: Magmom object
"""
# to init from another Magmom instance
if isinstance(moment, Magmom):
saxis = moment.saxis
moment = moment.moment
moment = np.array(moment, dtype='d')
if moment.ndim == 0:
moment = moment * [0, 0, 1]
self.moment = moment
saxis = np.array(saxis, dtype='d')
self.saxis = saxis / np.linalg.norm(saxis)
@classmethod
def from_global_moment_and_saxis(cls, global_moment, saxis):
"""
Convenience method to initialize Magmom from a given global
magnetic moment, i.e. magnetic moment with saxis=(0,0,1), and
provided saxis.
Method is useful if you do not know the components of your
magnetic moment in frame of your desired saxis.
:param global_moment:
:param saxis: desired saxis
:return:
"""
magmom = Magmom(global_moment)
return cls(magmom.get_moment(saxis=saxis), saxis=saxis)
def _get_transformation_matrix(self, saxis):
saxis = saxis / np.linalg.norm(saxis)
alpha = np.arctan2(saxis[1], saxis[0])
beta = np.arctan2(np.sqrt(saxis[0] ** 2 + saxis[1] ** 2), saxis[2])
cos_a = np.cos(alpha)
cos_b = np.cos(beta)
sin_a = np.sin(alpha)
sin_b = np.sin(beta)
m = [[cos_b * cos_a, -sin_a, sin_b * cos_a],
[cos_b * sin_a, cos_a, sin_b * sin_a],
[-sin_b, 0, cos_b]]
return m
def _get_transformation_matrix_inv(self, saxis):
saxis = saxis / np.linalg.norm(saxis)
alpha = np.arctan2(saxis[1], saxis[0])
beta = np.arctan2(np.sqrt(saxis[0] ** 2 + saxis[1] ** 2), saxis[2])
cos_a = np.cos(alpha)
cos_b = np.cos(beta)
sin_a = np.sin(alpha)
sin_b = np.sin(beta)
m = [[cos_b * cos_a, cos_b * sin_a, -sin_b],
[-sin_a, cos_a, 0],
[sin_b * cos_a, sin_b * sin_a, cos_b]]
return m
def get_moment(self, saxis=(0, 0, 1)):
"""
Get magnetic moment relative to a given spin quantization axis.
If no axis is provided, moment will be given relative to the
Magmom's internal spin quantization axis, i.e. equivalent to
Magmom.moment
:param axis: (list/numpy array) spin quantization axis
:return: np.ndarray of length 3
"""
# transform back to moment with spin axis [0, 0, 1]
m_inv = self._get_transformation_matrix_inv(self.saxis)
moment = | np.matmul(self.moment, m_inv) | numpy.matmul |
"""
=============================================================
Angle-based Joint and Individual Variation Explained (AJIVE)
=============================================================
Adopted from the code at https://github.com/idc9/py_jive and tutorial
written by:
Author: <NAME>
License: MIT License
<blockquote>
[1] Lock, <NAME>., et al. “Joint and Individual Variation Explained (JIVE)
for Integrated Analysis of Multiple Data Types.” The Annals of Applied
Statistics, vol. 7, no. 1, 2013, pp. 523–542., doi:10.1214/12-aoas597.
</blockquote>
AJIVE is a useful algorithm that decomposes multiple views of data into two
main pieces
- Joint Variation
- Individual Variation
whose sum is the original data minus noise. This notebook will demonstrate the
functionality of AJIVE and show some examples of the algorithm's usefulness.
"""
import numpy as np
from mvlearn.decomposition import AJIVE
import seaborn as sns
import matplotlib.pyplot as plt
##############################################################################
# ## Data Creation
#
# Here we create data in the same way detailed in the initial JIVE paper:
#
# The two views are created with shared joint variation, unique individual
# variation, and independent noise.
#
np.random.seed(12)
# First View
X1_joint = np.vstack([-1 * np.ones((10, 20)), np.ones((10, 20))])
X1_joint = np.hstack([np.zeros((20, 80)), X1_joint])
X1_indiv_t = np.vstack([
np.ones((4, 50)),
-1 * np.ones((4, 50)),
np.zeros((4, 50)),
np.ones((4, 50)),
-1 * np.ones((4, 50)),
])
X1_indiv_b = np.vstack(
[np.ones((5, 50)), -1 * np.ones((10, 50)), np.ones((5, 50))]
)
X1_indiv_tot = | np.hstack([X1_indiv_t, X1_indiv_b]) | numpy.hstack |
from typing import Any, Union
from typing import Dict, Hashable
import numpy as np
from cumm import tensorview as tv
import json
from collections import abc
from functools import reduce
JSON_INDEX_KEY = "__cumm_io_json_index"
NPDTYPE_TO_JSONARRAY_MAP = {
np.dtype(np.uint64): tv.uint64,
np.dtype(np.uint32): tv.uint32,
np.dtype(np.uint16): tv.uint16,
np.dtype(np.uint8): tv.uint8,
np.dtype(np.int64): tv.int64,
np.dtype(np.int32): tv.int32,
np.dtype(np.int16): tv.int16,
np.dtype(np.int8): tv.int8,
np.dtype(np.float64): tv.float64,
np.dtype(np.float32): tv.float32,
np.dtype(np.float16): tv.float16,
np.dtype(np.bool_): tv.bool_,
}
def _inv_map(dict_map: Dict[Hashable, Hashable]) -> Dict[Hashable, Hashable]:
return {v: k for k, v in dict_map.items()}
INV_NPDTYPE_TO_JSONARRAY_MAP = _inv_map(NPDTYPE_TO_JSONARRAY_MAP)
class Placeholder(object):
def __init__(self, index: int, nbytes: int):
self.index = index
self.nbytes = nbytes
def __add__(self, other):
assert self.index == other.index
return Placeholder(self.index, self.nbytes + other.nbytes)
def __repr__(self):
return "Placeholder[{},{}]".format(self.index, self.nbytes)
def __eq__(self, other):
return self.index == other.index and self.nbytes == other.nbytes
def is_json_index(data):
return isinstance(data, dict) and JSON_INDEX_KEY in data
def byte_size(obj: Union[np.ndarray, tv.Tensor]) -> int:
if isinstance(obj, np.ndarray):
return obj.nbytes
if isinstance(obj, tv.Tensor):
return obj.size * obj.itemsize
else:
raise NotImplementedError
def _extract_arrays_from_data(arrays,
data,
object_classes=(np.ndarray,),
json_index=False):
# can't use abc.Sequence because string is sequence too.
if isinstance(data, (list, tuple)):
data_skeleton = [None] * len(data)
for i in range(len(data)):
e = data[i]
if isinstance(e, object_classes):
data_skeleton[i] = {JSON_INDEX_KEY: len(arrays)}
arrays.append(e)
else:
data_skeleton[i] = _extract_arrays_from_data(
arrays, e, object_classes, json_index)
if isinstance(data, tuple):
data_skeleton = tuple(data_skeleton)
return data_skeleton
elif isinstance(data, abc.Mapping):
data_skeleton = {}
for k, v in data.items():
if isinstance(v, object_classes):
data_skeleton[k] = {JSON_INDEX_KEY: len(arrays)}
arrays.append(v)
else:
data_skeleton[k] = _extract_arrays_from_data(
arrays, v, object_classes, json_index)
return data_skeleton
else:
data_skeleton = None
if isinstance(data, object_classes):
data_skeleton = {JSON_INDEX_KEY: len(arrays)}
arrays.append(data)
else:
data_skeleton = data
return data_skeleton
def extract_arrays_from_data(data,
object_classes=(np.ndarray,),
json_index=False):
arrays = []
data_skeleton = _extract_arrays_from_data(arrays,
data,
object_classes=object_classes,
json_index=json_index)
return arrays, data_skeleton
def align_offset(offset, n):
"""given a byte offset, align it and return an aligned offset
"""
if n <= 0:
return offset
return n * ((offset + n - 1) // n)
def put_arrays_to_data(arrays, data_skeleton, json_index=False) -> Any:
if not arrays:
return data_skeleton
return _put_arrays_to_data(arrays, data_skeleton, json_index)
def _put_arrays_to_data(arrays, data_skeleton, json_index=False):
if isinstance(data_skeleton, (list, tuple)):
length = len(data_skeleton)
data = [None] * length
for i in range(length):
e = data_skeleton[i]
if is_json_index(e):
data[i] = arrays[e[JSON_INDEX_KEY]]
else:
data[i] = _put_arrays_to_data(arrays, e, json_index)
if isinstance(data_skeleton, tuple):
data = tuple(data)
return data
elif isinstance(data_skeleton, abc.Mapping):
data = {}
for k, v in data_skeleton.items():
if is_json_index(v):
data[k] = arrays[v[JSON_INDEX_KEY]]
else:
data[k] = _put_arrays_to_data(arrays, v, json_index)
return data
else:
if is_json_index(data_skeleton):
data = arrays[data_skeleton[JSON_INDEX_KEY]]
else:
data = data_skeleton
return data
def dumps_jsonarray(obj, multi_thread=False, buffer=None, use_bytearray=False, align_size: int = 32):
"""
layout:
+--------------+------------+---------------------------------+--------------+
|meta_start_pos|meta_end_pos| array/bytes content | meta |
+--------------+------------+---------------------------------+--------------+
data without array/bytes will be saved as bytes in content.
meta format:
{
"array": [
{
"shape": [...]
"dtype": ...
"offset": ...
}
]
"data": skeleton
}
"""
arrays, data_skeleton = extract_arrays_from_data(obj, (np.ndarray, tv.Tensor), True)
array_meta = []
start = 16
for i in range(len(arrays)):
arr = arrays[i]
start_aligned = align_offset(start, align_size)
if isinstance(arr, tv.Tensor):
assert arr.device == -1
arr_np = arr.numpy_view()
else:
arr_np = arr
# ascontiguous will convert scalar to 1-D array. be careful.
arrays[i] = np.ascontiguousarray(arr_np)
array_meta.append({
"shape": arrays[i].shape,
"dtype": NPDTYPE_TO_JSONARRAY_MAP[arrays[i].dtype],
"offset": start_aligned,
"is_np": isinstance(arr, np.ndarray),
})
start = start_aligned + arrays[i].nbytes
meta = {
"array": array_meta,
"data": data_skeleton,
}
meta_json = json.dumps(meta).encode("utf8")
meta_length = len(meta_json)
array_buffers = []
for i in range(len(arrays)):
array_buffers.append((arrays[i].view(np.uint8),
array_meta[i]["offset"], arrays[i].nbytes))
total_length = start + meta_length
if buffer is None:
if not use_bytearray:
buffer = np.empty(total_length, dtype=np.uint8)
else:
buffer = bytearray(total_length)
else:
assert len(buffer) >= total_length
buffer_view = memoryview(buffer)
content_end_offset = start
meta_end_offset = content_end_offset + meta_length
buffer_view[:8] = np.array(content_end_offset, dtype=np.int64).tobytes()
buffer_view[8:16] = np.array(meta_end_offset, dtype=np.int64).tobytes()
buffer_view[16:24] = | np.array(align_size, dtype=np.int64) | numpy.array |
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression, LinearRegression
from math import log
from causallib.estimation import IPW
def check_input(A, Y, X):
if not isinstance(A, pd.Series):
if not np.max(A.shape) == A.size:
raise Exception(f'A must be one dimensional, got shape {A.shape}')
A = pd.Series(A.flatten())
if not isinstance(Y, pd.Series):
if not np.max(A.shape) == A.size:
raise Exception(f'A must be one dimensional, got shape {A.shape}')
Y = pd.Series(Y.flatten())
if not isinstance(X, pd.DataFrame):
X = pd.DataFrame(X)
if not len(A.index) == len(Y.index) == len(X.index):
raise Exception(f'A, Y, X must have same number of samples, '
f'got A: {len(A.index)} samples, Y: {len(Y.index)} samples, X: {len(X.index)} samples')
return A, Y, X
def calc_ate_vanilla_ipw(A, Y, X):
ipw = IPW(LogisticRegression(solver='liblinear', penalty='l1', C=1e2, max_iter=500), use_stabilized=True).fit(X, A)
weights = ipw.compute_weights(X, A)
outcomes = ipw.estimate_population_outcome(X, A, Y, w=weights)
effect = ipw.estimate_effect(outcomes[1], outcomes[0])
return effect[0]
def calc_group_diff(X, idx_trt, ipw, l_norm):
"""Utility function to calculate the difference in covariates between treatment and control groups"""
return (np.abs(np.average(X[idx_trt], weights=ipw[idx_trt], axis=0) -
| np.average(X[~idx_trt], weights=ipw[~idx_trt], axis=0) | numpy.average |
"""
Simple example of a generated time series with known coefficients that
is used to feed to pressure time series to the quaternion_mode function
to show how the coefficients are retrieved from the pressure series only
Expression for the azimuthal pressure mode is taken from [1]
References
----------
[1] <NAME> and <NAME>, "Quaternion structure of
azimuthal instabilities", Physical Review Fluids, 2018
"""
import numpy as np
import matplotlib.pyplot as plt
from quatazim import quaternion_mode, filter_signal
# Set sampling time, total runtime and create the time series
fs = 50e3
t_end = 10
time = np.arange(t_end * int(fs)) / fs
# Setting the oscillation frequency
freq = 1200
omega = 2 * np.pi * freq
# Set up the parameters
A = np.ones_like(time) + 0.3 * | np.sin(3 * time) | numpy.sin |
import matplotlib.pyplot as plt
import time
from skimage.color import hsv2rgb
from flowbias.data_manipulation.extract_minor_flow import compute_secondary_flows
#from flowbias.utils.flow import compute_color
from flowbias.utils.meta_infrastructure import get_available_datasets, load_model_from_meta
from flowbias.utils.model_loading import sample_to_torch_batch, stack_torch_batch
from flowbias.utils.visualization.AGrid import AGrid
import numpy as np
from PIL import Image
from flowbias.config import Config
from flowbias.losses import _elementwise_epe
from flowbias.utils.flow import compute_color, make_color_wheel
from flowbias.utils.meta_infrastructure import get_available_datasets, load_model_from_meta
from flowbias.utils.model_loading import sample_to_torch_batch
from flowbias.utils.visualization.AGrid import AGrid
from skimage.color import hsv2rgb
def compute_color(x,y, norm_sat=None, report_norm_sat=False):
mask = np.isnan(x)
x[np.abs(x) > 1000] = np.NaN
y[np.abs(y) > 1000] = np.NaN
x = np.nan_to_num(x, nan=0)
y = np.nan_to_num(y, nan=0)
hue = np.arctan2(x, y)
hue = ((hue + 2 * np.pi) % (2 * np.pi)) / (2*np.pi)
sat = np.linalg.norm(np.dstack([x,y]), axis=2)
if norm_sat is None:
outlier_flow = np.ones_like(sat)
#max_sat = np.max(sat) + 1e-8
sat_sort = np.sort(sat.flat)
max_sat = sat_sort[int(sat_sort.size*0.99)] + 1e-8
else:
outlier_flow = 1.0 - (sat > norm_sat) * 0.25
max_sat = norm_sat
sat /= max_sat
sat = np.minimum(sat, 1.0)
#print(np.min(hue), np.max(hue), np.min(sat), np.max(sat), np.min(outlier_flow), np.max(outlier_flow))
#hsv = np.dstack([hue, sat, ~mask])
hsv = np.dstack([hue, sat, outlier_flow])
rgb = hsv2rgb(hsv)
#rgb = np.dstack([sat, sat, sat])
if report_norm_sat:
return rgb, max_sat
else:
return rgb
def make_flow_wheel(size, ticks):
field = np.linspace([-1]*450, [+1]*450, num=450)
flow_wheel = compute_color(field, field.T, 1)
#flow_wheel[np.linalg.norm(np.dstack([field, field.T]), ord=2, axis=2)>0.99] = 1.0
return flow_wheel
def save_im(im, path):
int_im = (im * 255).astype(np.uint8)
Image.fromarray(int_im).save(path)
def flow_to_rgb(flow, max=None, get_max=False):
if get_max:
return compute_color(flow), None
else:
return compute_color(flow)
mask = np.isnan(flow)
flow = np.nan_to_num(flow, nan=0)
hue = | np.arctan2(flow[:, :, 0], flow[:, :, 1]) | numpy.arctan2 |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 3 20:18:00 2019
@author: wcoll
Adapted by <NAME>
"""
# use pandas instead
import pandas as pd
import numpy.testing
from numpy.testing import assert_allclose
from ar6_ch6_rcmipfigs.utils.plot import get_chem_col
from ar6_ch6_rcmipfigs.constants import RESULTS_DIR
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from ar6_ch6_rcmipfigs.notebooks.GSAT_change_hist_attribution.utils_hist_att.co2_forcing_AR6 import co2_forcing_AR6
from ar6_ch6_rcmipfigs.notebooks.GSAT_change_hist_attribution.utils_hist_att.ch4_forcing_AR6 import ch4_forcing_AR6
from ar6_ch6_rcmipfigs.notebooks.GSAT_change_hist_attribution.utils_hist_att.n2o_forcing_AR6 import n2o_forcing_AR6
import seaborn as sns
from ar6_ch6_rcmipfigs.constants import INPUT_DATA_DIR
# All from table 7.8
co2_erf_AR6 = 2.16
ch4_erf_AR6 = 0.54
n2o_erf_AR6 = 0.21
hc_erf_AR6 = 0.41
o3_erf_AR6 = 0.47
ari_erf_AR6 = -0.22 # for 2019
aci_erf_AR6 = -0.84 # for 2019
co2_1850 = 286.7 # LLGHG_history_AR6_v8a
co2_2014 = 397.12
ch4_1750 = 729.2
ch4_1850 = 807.6 # LLGHG_history_AR6_v8a
ch4_2014 = 1822.88 #
ch4_2019 = 1866.3
n2o_1850 = 272.5 # LLGHG_history_AR6_v8a
n2o_2014 = 327.37
#Rapid adjustments to WMGHGs
co2_ra = 0.05 # FGD
ch4_ra = -0.14 # FGD
n2o_ra = 0.07 # FGD
tot_em_co2 = 582. # Cumulative C since 1850 - from MAGICC input files
# %%
def main(plot=False):
# %%
ch4_erf = ch4_forcing_AR6(ch4_2014, ch4_1850, n2o_1850)*(1+ch4_ra)
n2o_erf = n2o_forcing_AR6(n2o_2014, n2o_1850, co2_1850, ch4_1850)*(1+n2o_ra)
hc_erf = 0.40 # 1850-2014 Interpolated between 2011 (0.39) and 2019 (0.41)
erf_bc = 0.15 # Thornhill et al.
irf_ari = -0.3 # AR6 for 2014
erf_aci = -1.0 # AR6 for 2014
ncols = 5 # columns in csv file
nspec = 9 # number of species
dtype = 'U12'+', f8'*ncols
data = np.genfromtxt(INPUT_DATA_DIR/'input_from_bill_collins/attribution_input.csv', delimiter=',', filling_values=0,
names=True, dtype=(dtype))
data_sd = np.genfromtxt(INPUT_DATA_DIR/'input_from_bill_collins/attribution_input_sd.csv', delimiter=',', filling_values=0,
names=True, dtype=(dtype))
rfo3 = data['o3_rf']
rfo3_sd = data_sd['o3_rf_sd']
lifech4 = data['lifech4']
lifech4_sd = data_sd['lifech4_sd']
ari = data['ari']
ari_sd = data_sd['ari_sd']
ac = data['ac']
ac_sd = data_sd['ac_sd']
erf = data['erf']
erf_sd = data_sd['erf_sd']
i_ch4 = np.where(data['Experiment']=='CH4')[0][0]
i_nox = np.where(data['Experiment']=='NOx')[0][0]
i_voc = np.where(data['Experiment']=='VOC')[0][0]
i_n2o = np.where(data['Experiment']=='N2O')[0][0]
i_hc = np.where(data['Experiment']=='HC')[0][0]
i_gas = np.array([i_ch4, i_n2o, i_hc, i_nox, i_voc])
i_non_ch4 = np.array([i_n2o, i_hc, i_nox, i_voc])
total_o3 = np.sum(rfo3)
alpha = 1.30 # From chapter 6
# %%
#print(alpha)
ch4 = ch4_2014*(1+lifech4)**alpha
ch4_sd = (ch4-ch4_2014)*lifech4_sd/lifech4
ch4_sd = np.where(lifech4 == 0, 0., ch4_sd)
# %%
# Ozone primary mode
rfo3perch4 = rfo3[i_ch4]/(ch4_2014-ch4_1850) # Use CH4 expt
rfo3perch4_sd = rfo3_sd[i_ch4]/(ch4_2014-ch4_1850) # Use CH4 expt
rfo3_prime = rfo3perch4*(ch4-ch4_2014)
rfo3_prime_sd = np.sqrt(
(rfo3perch4_sd*(ch4-ch4_2014))**2+
# add 15% uncertainty in radiative transfer - from Ragnhild
(rfo3perch4*(ch4-ch4_2014)*0.15)**2)
# Set ch4 o3 prime to be minus sum of non-ch4 terms
# - ensures total sum of prime terms is zero
rfo3_prime[i_ch4] = -np.sum(rfo3_prime[i_non_ch4])
rfo3_prime_sd[i_ch4] = np.sqrt(np.sum(np.square(rfo3_prime_sd[[i_non_ch4]])))
# CH4 forcing
rfch4 = np.zeros(nspec)
rfch4_sd = np.zeros(nspec)
for ispec in np.arange(nspec):
rfch4[ispec] = \
ch4_forcing_AR6(ch4[ispec], ch4_2014, n2o_2014)*\
(1+ch4_ra)
rfch4_sd[ispec] = \
ch4_forcing_AR6(ch4[ispec]+ch4_sd[ispec], ch4_2014, n2o_2014)*\
(1+ch4_ra)-rfch4[ispec]
# rfch4 due to ch4 is minus sum of non-ch4 terms
# - ensures total sum of rfch4 changes is zero
rfch4[i_ch4] = -np.sum(rfch4[i_non_ch4])
rfch4_sd[i_ch4] = np.sqrt(np.sum(np.square(rfch4_sd[[i_non_ch4]])))
# Add in 14% spectral uncertainty
rfch4_sd=np.sqrt((rfch4*0.14)**2+(rfch4_sd)**2)
em_co2 = np.zeros(nspec)
em_co2[[i_ch4, i_hc, i_voc]] = [6.6, 0.02, 26.]
# From MAGICC input files
# CH4 HC VOC, CO CO2 scalings applied of 75%, 100%, 50%, 100%
# Assume 88% of CH4 emitted oxidised (12% remains as CH4)
# Assume can attributed present day CO2 change by scaling cumulative emissions
co2 = (em_co2/tot_em_co2)*(co2_2014-co2_1850)
# %%
rfco2=np.zeros(nspec)
for ispec in np.arange(nspec):
rfco2[ispec] = \
co2_forcing_AR6(co2_2014, co2_2014-co2[ispec], n2o_2014)*\
(1+co2_ra)
# co2 contribution from direct co2 emissions
rfco2_co2 = co2_forcing_AR6(co2_2014, co2_1850, n2o_2014)*(1+co2_ra) \
-np.sum(rfco2) # Subtract off non-co2 carbon contributions
#Set up WMGHG direct ERFs
rfghg = np.zeros(nspec)
rfghg[i_ch4] = ch4_erf
rfghg[i_n2o] = n2o_erf
rfghg[i_hc] = hc_erf
#Aerosols
#Set indicies
i_bc = np.where(data['Experiment']=='BC')[0][0]
i_oc = np.where(data['Experiment']=='OC')[0][0]
i_so2 = np.where(data['Experiment']=='SO2')[0][0]
i_nh3 = np.where(data['Experiment']=='NH3')[0][0]
i_aer = np.array([i_bc, i_oc, i_so2, i_nh3]) # all aerosols
i_scat = np.array([i_oc, i_so2, i_nh3]) # scattering aerosols
#Overwrite Ghan ari for aerosols. Ghan ari still used for gases
#Set aerosol ari to be erf-ac to ensure components add to erf
ari[i_aer] = erf[i_aer]-ac[i_aer]
ari_sd[i_aer] = np.sqrt(erf_sd[i_aer]**2 +ac_sd[i_aer]**2)
#Don't need 2014 scaling anymore since BC excluded from both, and gases included in both
#Product of two scalings is equal to a single scalng to 2019
#Scale BC separately from 2014 to 2019
# Use ratio of Ch 7 ari values for *both* ari and ac
# This assumes that for BC ac scales with the direct effect rather than indirect
ari_sd[i_bc] *= ari_erf_AR6/irf_ari # 2019/2014
ari[i_bc] *= ari_erf_AR6/irf_ari # 2019/2014
ac_sd[i_bc] *= ari_erf_AR6/irf_ari # 2019/2014
ac[i_bc] *= ari_erf_AR6/irf_ari # 2019/2014
#Now Scale everything to 2019 as in table 7.8
#Scale the denominator last otherwise you get 1.0 for subsequent scalings!
rfco2_co2 *= co2_erf_AR6/(rfco2_co2+np.sum(rfco2))
rfch4 *= ch4_erf_AR6/rfghg[i_ch4]
rfch4_sd *= ch4_erf_AR6/rfghg[i_ch4]
rfo3_prime *= ch4_erf_AR6/rfghg[i_ch4]
rfo3_prime_sd *= ch4_erf_AR6/rfghg[i_ch4]
rfghg[i_ch4] *= ch4_erf_AR6/rfghg[i_ch4]
rfghg[i_n2o] *= n2o_erf_AR6/rfghg[i_n2o]
rfghg[i_hc] *= hc_erf_AR6/rfghg[i_hc]
rfo3_sd *= o3_erf_AR6/np.sum(rfo3)
rfo3 *= o3_erf_AR6/np.sum(rfo3)
#Need to subtract off BC values from Ch 7 to get non-BC ari and aci
ari_erf_AR6_nonBC = ari_erf_AR6-ari[i_bc]
aci_erf_AR6_nonBC = aci_erf_AR6-ac[i_bc]
#Scale non-BC aerosols
#This includes ari from gas precursors
ari_nonBC = np.sum(ari[i_scat])+np.sum(ari[i_gas])
ari_sd[i_scat] *= ari_erf_AR6_nonBC/ari_nonBC # Scale scattering aerosols
ari[i_scat] *= ari_erf_AR6_nonBC/ari_nonBC
ari_sd[i_gas] *= ari_erf_AR6_nonBC/ari_nonBC # Scale gases
ari[i_gas] *= ari_erf_AR6_nonBC/ari_nonBC
#Aci scaling excludes ac from gas precursors
ac_nonBC = np.sum(ac[i_scat])
ac_sd[i_scat] *= aci_erf_AR6_nonBC/ac_nonBC # Scale scattering aerosols
ac[i_scat] *= aci_erf_AR6_nonBC/ac_nonBC
rfghg_sd = rfghg*0.14 # assume 14% for all WMGHGs
table = np.zeros(nspec+1,
dtype={'names':
['Species', 'CO2', 'GHG', 'CH4_lifetime', 'O3',
'O3_prime', 'Strat_H2O', 'Aerosol', 'Cloud', 'Total'],
'formats':
['U20', 'f8', 'f8', 'f8', 'f8',
'f8', 'f8', 'f8', 'f8', 'f8']})
table_sd = np.zeros(nspec+1,
dtype={'names':
['Species', 'CO2_sd', 'GHG_sd', 'CH4_lifetime_sd',
'O3_sd', 'O3_prime_sd', 'Strat_H2O_sd',
'Aerosol_sd', 'Cloud_sd', 'Total_sd'],
'formats':
['U20', 'f8', 'f8', 'f8', 'f8',
'f8', 'f8', 'f8', 'f8', 'f8']})
table['Species'][0] = 'CO2'
table['CO2'][0] = rfco2_co2
table['Total'][0] = rfco2_co2
table_sd['Species'][0] = 'CO2'
table_sd['CO2_sd'][0] = rfco2_co2*0.12 # 12% uncertainty
table_sd['Total_sd'][0] = rfco2_co2*0.12
for ispec in np.arange(nspec):
table['Species'][ispec+1] = data['Experiment'][ispec]
table['CO2'][ispec+1] = rfco2[ispec]
table['GHG'][ispec+1] = rfghg[ispec]
table['CH4_lifetime'][ispec+1] = rfch4[ispec]
table['O3'][ispec+1] = rfo3[ispec]
table['O3_prime'][ispec+1] = rfo3_prime[ispec]
table['Aerosol'][ispec+1] = ari[ispec]
table['Cloud'][ispec+1] = ac[ispec]
table['Total'][ispec+1] = np.sum([rfco2[ispec], rfghg[ispec], rfch4[ispec],
rfo3[ispec], rfo3_prime[ispec], ari[ispec], ac[ispec]])
table_sd['Species'][ispec+1] = data['Experiment'][ispec]
table_sd['CO2_sd'][ispec+1] = rfco2[ispec]*0.12
table_sd['GHG_sd'][ispec+1] = rfghg_sd[ispec]
table_sd['CH4_lifetime_sd'][ispec+1] = rfch4_sd[ispec]
table_sd['O3_sd'][ispec+1] = rfo3_sd[ispec]
table_sd['O3_prime_sd'][ispec+1] = rfo3_prime_sd[ispec]
table_sd['Aerosol_sd'][ispec+1] = ari_sd[ispec]
table_sd['Cloud_sd'][ispec+1] = ac_sd[ispec]
table_sd['Total_sd'][ispec+1] = np.sqrt(np.sum(np.square(
[rfco2[ispec]*0.12, rfghg_sd[ispec], rfch4_sd[ispec],
rfo3_sd[ispec]+rfo3_prime_sd[ispec], ari_sd[ispec], ac_sd[ispec]])))
table['Strat_H2O'][i_ch4+1] = 0.05
table['Total'][i_ch4+1] += 0.05
table_sd['Strat_H2O_sd'][i_ch4+1] = 0.05
table_sd['Total_sd'][i_ch4+1] = np.sqrt(np.sum(np.square(
[rfco2[i_ch4]*0.12, rfghg_sd[i_ch4]+rfch4_sd[i_ch4],
rfo3_sd[i_ch4]+rfo3_prime_sd[i_ch4], 0.05,
ari_sd[i_ch4], ac_sd[i_ch4]])))
#np.savetxt("attribution_output_1750_2019_newBC.csv", table, delimiter=',',
# fmt='%15s'+9*', %8.3f',
# header=','.join(table.dtype.names))
#np.savetxt("attribution_output_1750_2019.csv_sd_newBC.csv", table_sd, delimiter=',',
# fmt='%15s'+9*', %8.3f',
# header=','.join(table_sd.dtype.names))
# %%
df_tab = pd.DataFrame(table).set_index('Species')
#df_tab.loc[df_tab.index[::-1]].drop('Total', axis=1).plot.barh(stacked =True)
# %%
fn = 'attribution_1750_2019_newBC.csv'
fp = RESULTS_DIR /'tables_historic_attribution'/fn
fp.parent.mkdir(parents=True, exist_ok=True)
df_tab.to_csv(fp)
#df_tab.loc[df_tab.index[::-1]].drop('Total', axis=1).plot.barh(stacked =True)
# %%
df_tab_sd = pd.DataFrame(table_sd).set_index('Species')
df_tab
# %%
fn = 'attribution_1750_2019_newBC_standard_deviation.csv'
fp = RESULTS_DIR /'tables_historic_attribution'/fn
fp.parent.mkdir(parents=True, exist_ok=True)
df_tab_sd.to_csv(fp)
if not plot:
return df_tab, df_tab_sd
# %%
fig = plt.figure()
width = 0.7
species =[r'CO$_2$', r'CH$_4$', r'N$_2$O', 'Halocarbon', r'NO$_X$', 'VOC', r'SO$_2$',
'Organic Carbon', 'Black Carbon', 'Ammonia']
exp_list = \
np.array([i_ch4, i_n2o, i_hc, i_nox, i_voc, i_so2, i_oc, i_bc, i_nh3])
ybar = np.arange(nspec+1, 0, -1)
labels = [r'CO$_2$', 'WMGHG', r'CH$_4$ lifetime', r'O$_3$', 'Aerosol (ari)', 'Cloud']
pos_ghg = | np.zeros(nspec+1) | numpy.zeros |
import pandas as pd
import sys
# sys.path.insert(1, '../CLEF_Datasets_ICD/processed_data/')
from process_data import *
import torch
import io
import re
import numpy as np
import os
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.preprocessing import MultiLabelBinarizer
import pickle
import logging
import random
import json
import argparse
from loss import *
import random
from utils import *
from RAkEL import *
from label_clusterer import *
from models import *
from ICDHierarchyParser import *
from hierarchical_evaluation import *
import scipy.stats as ss
from torch.utils.data import Dataset, RandomSampler, DataLoader, SequentialSampler
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss
from transformers.modeling_bert import BertConfig, BertModel, BertPreTrainedModel
from transformers.modeling_xlm_roberta import XLMRobertaModel
from transformers.modeling_xlm_roberta import XLMRobertaModel, XLMRobertaConfig
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
XLMRobertaConfig, XLMRobertaForSequenceClassification,
XLMRobertaTokenizer, AdamW, get_linear_schedule_with_warmup)
from collections import defaultdict
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = ['Bert', 'XLMRoberta']
def generate_output_dir(args, to_append=None):
encoder = 'mlbertB' if args.encoder_name_or_path == 'bert-base-multilingual-cased' else None
encoder = 'mlbertL' if args.encoder_name_or_path == 'bert-large-multilingual-cased' else encoder
encoder = 'xlmrB' if args.encoder_name_or_path == 'xlm-roberta-base' else encoder
max_cluster_size = None if 'mc2c' not in args.model else args.max_cluster_size
min_cluster_size = None if 'mc2c' not in args.model else args.min_cluster_size
max_cluster_threshold = None if 'mc2c' not in args.model else args.max_cluster_threshold
max_m = None if 'mc2c' not in args.model else args.max_m
label_msl = None if args.model != 'la_mc2c' and args.model != 'label_attn' else args.label_max_seq_length
with_none = 'Without_None_Label' if not args.train_with_none else 'With_None_Label'
mcc_loss = None if 'mc2c' not in args.model else args.mcc_loss
model = args.model + '_no_mlcc' if 'mc2c' in args.model and args.no_mlcc else args.model
lmbda = None if args.lmbda == 1.0 else args.lmbda
frz_bert = args.n_bert2freeze if args.n_bert2freeze else args.freeze_bert
output_dir = os.path.join('exps_dir', with_none, model, '_'.join([args.data_dir.split('/')[1],
str(args.doc_max_seq_length),
str(label_msl),
encoder,
str(args.learning_rate),
args.loss_fct,
str(max_cluster_size),
str(max_cluster_threshold),
str(min_cluster_size),
str(max_m),
str(args.n_gpu),
str(args.num_train_epochs),
str(args.per_gpu_train_batch_size),
str(mcc_loss),
str(lmbda),
str(frz_bert)]))
if to_append:
output_dir += to_append
return output_dir
class ICDDataloader(Dataset):
def __init__(self, data_path):
self.data = pickle_load(data_path)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
return self.data[idx]
# return self.data.iloc[idx,]
def plackett_luce(some_list):
for i in range(1, len(some_list)):
some_list[i] /= np.sum(some_list[i:])
return np.sum(np.log(some_list))
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def acc_and_f1(preds, labels, metric_avg):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds, average=metric_avg)
prec = precision_score(y_true=labels, y_pred=preds, average=metric_avg)
recall = recall_score(y_true=labels, y_pred=preds, average=metric_avg)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
"precision": prec,
"recall": recall,
}
# MODEL_CLASSES = {
# "xlmroberta-label_attn": (XLMRobertaConfig, BertForMLSCWithLabelAttention, XLMRobertaTokenizer),
# "bert-label_attn": (BertConfig, BertForMLSCWithLabelAttention, BertTokenizer),
# "xlmroberta-stacked": (XLMRobertaConfig, StackedBertForMultiLabelSequenceClassification, XLMRobertaTokenizer),
# "bert-stacked": (BertConfig, StackedBertForMultiLabelSequenceClassification, BertTokenizer),
# "xlmroberta-label_attn-stacked": (XLMRobertaConfig, StackedBertForMLSCWithLabelAttention, XLMRobertaTokenizer),
# "bert-label_attn-stacked": (BertConfig, StackedBertForMLSCWithLabelAttention, BertTokenizer),
# "xlmroberta-baseline": (XLMRobertaConfig, BertForMultiLabelSequenceClassification, XLMRobertaTokenizer),
# "bert-baseline": (BertConfig, BertForMultiLabelSequenceClassification, BertTokenizer),
#
# }
MODEL_CLASSES = {
"bert-label_attn": (BertConfig, BertForMLSCWithLabelAttention, BertTokenizer),
"bert-baseline": (BertConfig, BertForMultiLabelSequenceClassification, BertTokenizer),
"bert-mc2c": (BertConfig, MC2C, BertTokenizer),
"bert-la_mc2c": (BertConfig, LabelAttentionMC2C, BertTokenizer),
"bert-mc2c-no_mlcc": (BertConfig, MC2C_noMLCC, BertTokenizer),
# "bert-la_mc2c-no_mlcc": (BertConfig, LabelAttentionMC2C_noMLCC, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def load_local_labels(self, seed):
return pickle.load(open(os.path.join(self.args.data_dir,
'MCC/{}_{}_{}/{}/train_doc_id2gold.p'.format(self.args.min_cluster_size, self.args.max_cluster_size,
self.args.max_cluster_threshold, seed)), 'rb'))
def train(args, train_dataset, label_dataset, model, tokenizer, class_weights, idx2id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
if args.doc_batching:
train_dataloader = DataLoader(train_dataset, sampler=None, batch_size=args.n_gpu, collate_fn=my_collate)
train_dataloader = list(train_dataloader)
else:
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
label_dataloader = DataLoader(label_dataset, sampler=None, batch_size=len(label_dataset))
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
p_count = 0
np_count = 0
for param in model.parameters():
if param.requires_grad:
p_count += 1
for name, param in model.named_parameters():
if param.requires_grad:
# print(name)
np_count += 1
# num_warmup_steps = int(len(train_dataloader) * args.warmup_proportion) * args.num_train_epochs
num_warmup_steps = int(len(train_dataloader) * args.warmup_proportion * args.num_train_epochs)
# if 'checkpoint-' in args.encoder_name_or_path:
# optimizer = torch.load(os.path.join(args.output_dir, 'optimizer.pt'))
# scheduler = torch.load(os.path.join(args.output_dir, 'scheduler.pt'))
# else:
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon, correct_bias=False)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.encoder_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.encoder_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.encoder_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.encoder_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if args.model == 'label_attn' or args.model == 'la_mc2c':
model.initialize_label_data(next(iter(label_dataloader)))
if 'mc2c' in args.model:
model.get_idx2id(idx2id)
# multi-gpu training (should be after apex fp16 initialization)
n_clusters = model.n_clusters if 'mc2c' in args.model else 0
if args.n_gpu > 1:
if args.doc_batching:
model = MyDataParallel(model)
else:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num labels = %d", args.num_labels)
logger.info(" Num Epochs = %d", args.num_train_epochs)
if 'mc2c' in args.model:
logger.info(" Num Clusters = %d", n_clusters)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.encoder_name_or_path):
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.encoder_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss = 0.0, 0.0
tr_cluster_loss, logging_cluster_loss = 0.0, 0.0
tr_micro_loss, logging_micro_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0],
)
set_seed(args) # Added here for reproductibility
for ep, _ in enumerate(train_iterator):
if args.doc_batching:
random.shuffle(train_dataloader)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# label_data = next(iter(label_dataloader))
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
if args.doc_batching:
batch = tuple(tuple(ti.to(args.device) for ti in t) for t in batch)
else:
batch = tuple(t.to(args.device) for t in batch)
inputs = {"doc_input_ids": batch[0], "doc_attention_mask": batch[1], "labels": batch[2], "ranks": batch[4],
"epoch": ep, 'doc_ids': batch[3], 'train': True, 't_total':t_total}
if args.encoder_type == 'bert':
inputs['token_type_ids'] = batch[-1]
# outputs = model(**inputs)
try:
outputs = model(**inputs)
except:
inputs = {"doc_input_ids": batch[0], "doc_attention_mask": batch[1], "labels": batch[2],
"ranks": batch[4], "epoch": ep, 'doc_ids': batch[3], 'train': True, 'debug':True}
outputs = model(**inputs)
#
if 'mc2c' in args.model and not args.no_mlcc:
cluster_loss, micro_loss = outputs[0], outputs[1]
micro_loss = args.lmbda * micro_loss
loss = cluster_loss + micro_loss
# cluster_loss, micro_loss, loss = outputs[0], outputs[1], outputs[2]
elif 'mc2c' in args.model and args.no_mlcc:
cluster_loss = torch.Tensor([0])
micro_loss = outputs[0] # model outputs are always tuple in transformers (see doc)
loss = micro_loss
else:
cluster_loss, micro_loss = torch.Tensor([0]), torch.Tensor([0])
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
cluster_loss = cluster_loss.mean()
micro_loss = micro_loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
cluster_loss = cluster_loss / args.gradient_accumulation_steps
micro_loss = micro_loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
tr_cluster_loss += cluster_loss.item()
tr_micro_loss += micro_loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
if 'mc2c' in args.model:
results = evaluate_mc2c(args, model, tokenizer)
else:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
cluster_loss_scalar = (tr_cluster_loss - logging_cluster_loss) / args.logging_steps
micro_loss_scalar = (tr_micro_loss - logging_micro_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs["learning_rate"] = learning_rate_scalar
logs["loss"] = loss_scalar
logs['cluster loss'] = cluster_loss_scalar
logs['micro loss'] = micro_loss_scalar
logging_loss = tr_loss
logging_cluster_loss = tr_cluster_loss
logging_micro_loss = tr_micro_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{"step": global_step}}))
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
# print(torch.sum(model.cluster_classifier.weight))
return global_step, tr_loss / global_step
def evaluate_mc2c(args, model, tokenizer, prefix="", test=False):
eval_output_dir = args.output_dir
# print(torch.sum(model.cluster_classifier.weight))
results = {}
eval_dataset, label_dataset, idx2id = load_and_cache_examples(args, tokenizer, evaluate=True, label_data=True) if not test else load_and_cache_examples(args, tokenizer, test=True, label_data=True)
if 'mc2c' in args.model:
model.get_idx2id(idx2id)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
if args.doc_batching:
eval_dataloader = DataLoader(eval_dataset, sampler=None, batch_size=1, collate_fn=my_collate)
else:
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
label_dataloader = DataLoader(label_dataset, sampler=None, batch_size=len(label_dataset))
if args.model == 'label_attn' or args.model == 'la_mc2c':
model.initialize_label_data(next(iter(label_dataloader)))
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
ids = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
if args.doc_batching:
batch = tuple(tuple(ti.to(args.device) for ti in t) for t in batch)
else:
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
##############################
if args.doc_batching:
input_ids = batch[0][0]
attn_mask = batch[1][0]
labels = batch[2][0]
# ranks = batch[4][0]
else:
input_ids = batch[0] # may need to fix this!
attn_mask = batch[1] # may need to fix this!
labels = batch[2]
# ranks = batch[4]
inputs = {"doc_input_ids": input_ids, "doc_attention_mask": attn_mask, "labels": labels, "ranks": None, 'doc_ids': batch[3]}
if args.encoder_type == 'bert':
inputs['token_type_ids'] = batch[-1][0] # prolly gonna need to fix this
#############################
logits = model(**inputs)[0]
tmp_ids = []
for doc_id, logit in logits.items():
n_labels = logit.shape[0]
ids.append(doc_id)
tmp_ids.append(doc_id)
logits = torch.cat([logits[d] for d in tmp_ids])
logits.reshape((-1, n_labels))
eval_loss = 0
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
if args.doc_batching:
out_label_ids = batch[2][0].detach().cpu().numpy()
else:
out_label_ids = batch[2].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.doc_batching:
out_label_ids = np.append(out_label_ids, batch[2][0].detach().cpu().numpy(), axis=0)
else:
out_label_ids = np.append(out_label_ids, batch[2].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = preds.reshape((len(eval_dataset), args.num_labels))
if args.train_with_none:
out_label_ids = out_label_ids.reshape((len(eval_dataset), args.num_labels-1))
else:
out_label_ids = out_label_ids.reshape((len(eval_dataset), args.num_labels))
if args.train_with_none:
preds = preds[:,:-1]
total_uniq = len(np.nonzero(np.sum(preds, axis=0))[0])
total_uniq_true = len(np.nonzero(np.sum(out_label_ids, axis=0))[0])
sorted_preds_idx = np.flip(np.argsort(preds), axis=1)
preds = (preds > args.prediction_threshold)
if not args.train_with_none:
assert preds.shape == out_label_ids.shape
result = acc_and_f1(preds, out_label_ids, args.metric_avg)
results.update(result)
n_labels = np.sum(preds, axis=1)
avg_pred_n_labels = | np.mean(n_labels) | numpy.mean |
import os
import sys
import numpy as np
def azi_ele_the_rho(top_dir):
pgm_filepath = [line for line in os.listdir(top_dir) if line.endswith('.pgm') and line.startswith('frame80')][0]
tmp = pgm_filepath.split('.pgm')[0].split('_')
azimuth_deg = float(tmp[2].split('azi')[1])
elevation_deg = float(tmp[3].split('ele')[1])
theta_deg = float(tmp[4].split('theta')[1])
rho = float(tmp[1].split('rho')[1])
print('azi %f ele %f the %f rho %f' % (azimuth_deg, elevation_deg, theta_deg, rho))
view_params = {}
view_params['azi'] = azimuth_deg
view_params['ele'] = elevation_deg
view_params['rho'] = rho
view_params['the'] = theta_deg
return view_params
def tran_rot(filepath):
rot = np.zeros((3,3))
tran = | np.zeros((3,)) | numpy.zeros |
import matplotlib.pyplot as plt
from collections.abc import Iterable
from h5py import Dataset
import numpy as np
def show_spectrum(spectrum):
naxes = 0
if 'power' in spectrum.fields:
naxes += 1
if 'phase' in spectrum.fields:
naxes += 1
fig, axs = plt.subplots(naxes, 1, sharex=True)
caxes = 0
if not isinstance(axs, Iterable):
ax = [axs]
for ax_ in ax:
if 'power' in spectrum.fields:
ax_.semilogy(np.asarray(spectrum.frequencies), np.asarray(spectrum.power))
ax_.set_ylabel('Power')
caxes += 1
if 'phase' in spectrum.fields:
ax_.plot(np.asarray(spectrum.frequencies), | np.asarray(spectrum.phase) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Package for dealing with alignment methods between two AmpObject meshes
Copyright: <NAME> 2020, <EMAIL>
"""
import numpy as np
import copy
import vtk
import math
from scipy import spatial
from scipy.optimize import minimize
from ampscan.core import AmpObject
from ampscan.vis import vtkRenWin
from ampscan.analyse import create_slices, est_volume, calc_csa
# For doc examples
import os
staticfh = os.path.join(os.getcwd(), "tests", "stl_file.stl")
movingfh = os.path.join(os.getcwd(), "tests", "stl_file_2.stl")
class align(object):
r"""
Automated alignment methods between two meshes
Parameters
----------
moving: AmpObject
The moving AmpObject that is to be aligned to the static object
static: AmpObject
The static AmpObject that the moving AmpObject that the moving object
will be aligned to
method: str, default 'linPoint2Plane'
A string of the method used for alignment
*args:
The arguments used for the alignment methods
**kwargs:
The keyword arguments used for the alignment methods
Returns
-------
m: AmpObject
The aligned AmpObject, it same number of vertices and face array as
the moving AmpObject
Access this using align.m
Examples
--------
>>> static = AmpObject(staticfh)
>>> moving = AmpObject(movingfh)
>>> al = align(moving, static).m
"""
def __init__(self, moving, static, method = 'linPoint2Plane',
inverse=False, *args, **kwargs):
mData = dict(zip(['vert', 'faces', 'values'],
[moving.vert, moving.faces, moving.values]))
alData = copy.deepcopy(mData)
self.m = AmpObject(alData, stype='reg')
self.s = static
if inverse:
self.inverse(method=method, *args, **kwargs)
else:
self.runICP(method=method, *args, **kwargs)
def runICP(self, method = 'linPoint2Plane', maxiter=20, inlier=1.0,
initTransform=None, *args, **kwargs):
r"""
The function to run the ICP algorithm, this function calls one of
multiple methods to calculate the affine transformation
Parameters
----------
method: str, default 'linPoint2Plane'
A string of the method used for alignment
maxiter: int, default 20
Maximum number of iterations to run the ICP algorithm
inlier: float, default 1.0
The proportion of closest points to use to calculate the
transformation, if < 1 then vertices with highest error are
discounted
*args:
The arguments used for the alignment methods
**kwargs:
The keyword arguments used for the alignment methods
"""
# Define the rotation, translation, error and quaterion arrays
Rs = np.zeros([3, 3, maxiter+1])
Ts = np.zeros([3, maxiter+1])
err = np.zeros([maxiter+1])
if initTransform is None:
initTransform = np.eye(4)
Rs[:, :, 0] = initTransform[:3, :3]
Ts[:, 0] = initTransform[3, :3]
fC = self.s.vert[self.s.faces].mean(axis=1)
kdTree = spatial.cKDTree(fC)
self.m.rigidTransform(Rs[:, :, 0], Ts[:, 0])
inlier = math.ceil(self.m.vert.shape[0]*inlier)
[dist, idx] = kdTree.query(self.m.vert, 1)
# Sort by distance
sort = np.argsort(dist)
# Keep only those within the inlier fraction
[dist, idx] = [dist[sort], idx[sort]]
[dist, idx, sort] = dist[:inlier], idx[:inlier], sort[:inlier]
err[0] = math.sqrt(dist.mean())
for i in range(maxiter):
if method == 'linPoint2Point':
[R, T] = getattr(self, method)(self.m.vert[sort, :],
fC[idx, :],
*args, **kwargs)
elif method == 'linPoint2Plane':
[R, T] = getattr(self, method)(self.m.vert[sort, :],
fC[idx, :],
self.s.norm[idx, :],
*args, **kwargs)
elif method == 'optPoint2Point':
[R, T] = getattr(self, method)(self.m.vert[sort, :],
fC[idx, :],
*args, **kwargs)
elif method == 'contPoints':
[R, T] = getattr(self, method)(*args, **kwargs)
self.m.rigidTransform(R, T)
[dist, idx] = kdTree.query(self.m.vert, 1)
sort = np.argsort(dist)
[dist, idx] = [dist[sort], idx[sort]]
[dist, idx, sort] = dist[:inlier], idx[:inlier], sort[:inlier]
self.tForm = np.r_[np.c_[R, np.zeros(3)], np.append(T, 1)[:, None].T]
self.R = R
self.T = T
self.rmse = math.sqrt(dist.mean())
return
elif method == 'idxPoints':
[R, T] = getattr(self, 'idxPoints')(*args, **kwargs)
self.m.rigidTransform(R, T)
[dist, idx] = kdTree.query(self.m.vert, 1)
sort = np.argsort(dist)
[dist, idx] = [dist[sort], idx[sort]]
[dist, idx, sort] = dist[:inlier], idx[:inlier], sort[:inlier]
self.tForm = np.r_[np.c_[R, np.zeros(3)], np.append(T, 1)[:, None].T]
self.R = R
self.T = T
self.rmse = math.sqrt(dist.mean())
return
elif method == 'optZVol':
self.optZVol(*args, **kwargs)
# print(self.T)
[dist, idx] = kdTree.query(self.m.vert, 1)
sort = np.argsort(dist)
[dist, idx] = [dist[sort], idx[sort]]
[dist, idx, sort] = dist[:inlier], idx[:inlier], sort[:inlier]
self.tForm = np.r_[np.c_[self.R, np.zeros(3)], np.append(self.T, 1)[:, None].T]
self.rmse = math.sqrt(dist.mean())
return
else: KeyError('Not a supported alignment method')
Rs[:, :, i+1] = np.dot(R, Rs[:, :, i])
Ts[:, i+1] = np.dot(R, Ts[:, i]) + T
self.m.rigidTransform(R, T)
[dist, idx] = kdTree.query(self.m.vert, 1)
sort = np.argsort(dist)
[dist, idx] = [dist[sort], idx[sort]]
[dist, idx, sort] = dist[:inlier], idx[:inlier], sort[:inlier]
err[i+1] = math.sqrt(dist.mean())
# qs[:, i+1] = np.r_[self.rot2quat(R), T]
R = Rs[:, :, -1]
#Simpl
[U, s, V] = np.linalg.svd(R)
R = np.dot(U, V)
self.tForm = np.r_[np.c_[R, np.zeros(3)], np.append(Ts[:, -1], 1)[:, None].T]
self.R = R
self.T = Ts[:, -1]
self.rmse = err[-1]
def inverse(self, method = 'linPoint2Plane', *args, **kwargs):
#inverting the objects
self.temp = self.s
self.s = self.m
self.m = self.temp
self.runICP(method=method, *args, **kwargs)
#resetting the objects
self.temp = self.s
self.s = self.m
self.m = self.temp
del self.temp
#inverting the transformation on both objects
self.R = self.R.transpose()
self.T = -self.T
self.tForm = np.r_[np.c_[self.R, np.zeros(3)], np.append(self.T, 1)[:, None].T]
self.s.rigidTransform(self.R, self.T)
self.m.rigidTransform(self.R, self.T)
@staticmethod
def linPoint2Plane(mv, sv, sn):
r"""
Iterative Closest Point algorithm which relies on using least squares
method from converting the minimisation problem into a set of linear
equations. This uses a
Parameters
----------
mv: ndarray
The array of vertices to be moved
sv: ndarray
The array of static vertices, these are the face centroids of the
static mesh
sn: ndarray
The normals of the point in teh static array, these are derived
from the normals of the faces for each centroid
Returns
-------
R: ndarray
The optimal rotation array
T: ndarray
The optimal translation array
References
----------
.. [1] <NAME>.; <NAME> (1992). "A Method for Registration of 3-D
Shapes". IEEE Trans. on Pattern Analysis and Machine Intelligence (Los
Alamitos, CA, USA: IEEE Computer Society) 14 (2): 239-256.
.. [2] <NAME>; <NAME> (1991). "Object modelling by registration of
multiple range images". Image Vision Comput. (Newton, MA, USA:
Butterworth-Heinemann): 145-155
Examples
--------
>>> static = AmpObject(staticfh)
>>> moving = AmpObject(movingfh)
>>> al = align(moving, static, method='linPoint2Plane').m
"""
cn = np.c_[np.cross(mv, sn), sn]
C = np.dot(cn.T, cn)
v = sv - mv
b = np.zeros([6])
for i, col in enumerate(cn.T):
b[i] = (v * np.repeat(col[:, None], 3, axis=1) * sn).sum()
X = np.linalg.lstsq(C, b, rcond=None)[0]
[cx, cy, cz] = np.cos(X[:3])
[sx, sy, sz] = np.sin(X[:3])
R = np.array([[cy*cz, sx*sy*cz-cx*sz, cx*sy*cz+sx*sz],
[cy*sz, cx*cz+sx*sy*sz, cx*sy*sz-sx*cz],
[-sy, sx*cy, cx*cy]])
T = X[3:]
return (R, T)
@staticmethod
def linPoint2Point(mv, sv):
r"""
Point-to-Point Iterative Closest Point algorithm which
relies on using singular value decomposition on the centered arrays.
Parameters
----------
mv: ndarray
The array of vertices to be moved
sv: ndarray
The array of static vertices, these are the face centroids of the
static mesh
Returns
-------
R: ndarray
The optimal rotation array
T: ndarray
The optimal translation array
References
----------
.. [1] <NAME>.; <NAME> (1992). "A Method for Registration of 3-D
Shapes". IEEE Trans. on Pattern Analysis and Machine Intelligence (Los
Alamitos, CA, USA: IEEE Computer Society) 14 (2): 239-256.
.. [2] <NAME>; <NAME> (1991). "Object modelling by registration of
multiple range images". Image Vision Comput. (Newton, MA, USA:
Butterworth-Heinemann): 145-155
Examples
--------
>>> static = AmpObject(staticfh)
>>> moving = AmpObject(movingfh)
>>> al = align(moving, static, method='linPoint2Point').m
"""
mCent = mv - mv.mean(axis=0)
sCent = sv - sv.mean(axis=0)
C = np.dot(mCent.T, sCent)
[U,_,V] = np.linalg.svd(C)
det = np.linalg.det(np.dot(U, V))
sign = np.eye(3)
sign[2,2] = np.sign(det)
R = np.dot(V.T, sign)
R = np.dot(R, U.T)
T = sv.mean(axis=0) - np.dot(R, mv.mean(axis=0))
return (R, T)
@staticmethod
def contPoints(mv=None, sv=None):
r"""
Point-to-Point Iterative Closest Point algorithm which
relies on using singular value decomposition on the centered arrays.
Parameters
----------
mv: ndarray
The array of control points to be moved
sv: ndarray
The array of control points
Returns
-------
R: ndarray
The optimal rotation array
T: ndarray
The optimal translation array
References
----------
.. [1] <NAME>.; <NAME> (1992). "A Method for Registration of 3-D
Shapes". IEEE Trans. on Pattern Analysis and Machine Intelligence (Los
Alamitos, CA, USA: IEEE Computer Society) 14 (2): 239-256.
.. [2] <NAME>; <NAME> (1991). "Object modelling by registration of
multiple range images". Image Vision Comput. (Newton, MA, USA:
Butterworth-Heinemann): 145-155
Examples
--------
>>> static = AmpObject(staticfh)
>>> moving = AmpObject(movingfh)
>>> al = align(moving, static, method='linPoint2Point').m
"""
if mv is None or sv is None:
return ValueError('To call the contPoints ICP method, ensure that '
'mv and sv have been defined as keyword arguments')
mv = np.asarray(mv)
sv = np.asarray(sv)
if mv.shape != sv.shape:
return ValueError('Not the same number of static and moving control points')
mCent = mv - mv.mean(axis=0)
sCent = sv - sv.mean(axis=0)
C = np.dot(mCent.T, sCent)
[U,_,V] = np.linalg.svd(C)
det = np.linalg.det(np.dot(U, V))
sign = np.eye(3)
sign[2,2] = np.sign(det)
R = np.dot(V.T, sign)
R = np.dot(R, U.T)
T = sv.mean(axis=0) - np.dot(R, mv.mean(axis=0))
# print(R)
# print(T)
return (R, T)
def idxPoints(self, mv=None, sv=None):
r"""
Point-to-Point Iterative Closest Point algorithm which
relies on using singular value decomposition on the centered arrays.
Parameters
----------
mv: ndarray
The index array of moving vertices to be aligned
sv: ndarray
The index array of static vertices to be aligned to
Returns
-------
R: ndarray
The optimal rotation array
T: ndarray
The optimal translation array
References
----------
.. [1] <NAME>.; <NAME> (1992). "A Method for Registration of 3-D
Shapes". IEEE Trans. on Pattern Analysis and Machine Intelligence (Los
Alamitos, CA, USA: IEEE Computer Society) 14 (2): 239-256.
.. [2] <NAME>; <NAME> (1991). "Object modelling by registration of
multiple range images". Image Vision Comput. (Newton, MA, USA:
Butterworth-Heinemann): 145-155
Examples
--------
>>> static = AmpObject(staticfh)
>>> moving = AmpObject(movingfh)
>>> al = align(moving, static, mv = [0, 1, 2], sv = [0, 1, 2], method='idxPoints').m
"""
if mv is None or sv is None:
return ValueError('To call the contPoints ICP method, ensure that '
'mv and sv have been defined as keyword arguments')
return self.contPoints(mv=self.m.vert[mv, :], sv=self.s.vert[sv, :])
@staticmethod
def optPoint2Point(mv, sv, opt='L-BFGS-B'):
r"""
Direct minimisation of the rmse between the points of the two meshes. This
method enables access to all of Scipy's minimisation algorithms
Parameters
----------
mv: ndarray
The array of vertices to be moved
sv: ndarray
The array of static vertices, these are the face centroids of the
static mesh
opt: str, default 'L_BFGS-B'
The string of the scipy optimiser to use
Returns
-------
R: ndarray
The optimal rotation array
T: ndarray
The optimal translation array
Examples
--------
>>> static = AmpObject(staticfh)
>>> moving = AmpObject(movingfh)
>>> al = align(moving, static, method='optPoint2Point', opt='SLSQP').m
"""
X = np.zeros(6)
lim = [-np.pi/4, np.pi/4] * 3 + [-5, 5] * 3
lim = np.reshape(lim, [6, 2])
try:
X = minimize(align.optDistError, X,
args=(mv, sv),
bounds=lim, method=opt)
except:
X = minimize(align.optDistError, X,
args=(mv, sv),
method=opt)
[angx, angy, angz] = X.x[:3]
Rx = np.array([[1, 0, 0],
[0, np.cos(angx), -np.sin(angx)],
[0, np.sin(angx), np.cos(angx)]])
Ry = np.array([[np.cos(angy), 0, np.sin(angy)],
[0, 1, 0],
[-np.sin(angy), 0, np.cos(angy)]])
Rz = np.array([[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1]])
R = np.dot(np.dot(Rz, Ry), Rx)
T = X.x[3:]
return (R, T)
@staticmethod
def optDistError(X, mv, sv):
r"""
The function to minimise. It performs the affine transformation then returns
the rmse between the two vertex sets
Parameters
----------
X: ndarray
The affine transformation corresponding to [Rx, Ry, Rz, Tx, Ty, Tz]
mv: ndarray
The array of vertices to be moved
sv: ndarray
The array of static vertices, these are the face centroids of the
static mesh
Returns
-------
err: float
The RMSE between the two meshes
"""
[angx, angy, angz] = X[:3]
Rx = np.array([[1, 0, 0],
[0, np.cos(angx), -np.sin(angx)],
[0, np.sin(angx), np.cos(angx)]])
Ry = np.array([[np.cos(angy), 0, np.sin(angy)],
[0, 1, 0],
[-np.sin(angy), 0, np.cos(angy)]])
Rz = np.array([[np.cos(angz), -np.sin(angz), 0],
[np.sin(angz), np.cos(angz), 0],
[0, 0, 1]])
R = np.dot(np.dot(Rz, Ry), Rx)
moved = np.dot(mv, R.T)
moved += X[3:]
dist = (moved - sv)**2
dist = dist.sum(axis=1)
err = np.sqrt(dist.mean())
return err
def optZVol(self, z0 = 0):
r"""
Direct minimisation of the volume.
1) Translate moving object to match minZ of static
2) Calculate volume to static z0
3) Cumulative sum the slices and evaluate volume
4) Find slice nearest volume of static
Parameters
----------
opt: z0, default 0
The slice height to evaluate the static volume from
Returns
-------
R: ndarray
The optimal rotation array, rotation
T: ndarray
The optimal translation array
"""
sMinZ = self.s.vert[:, 2].min()
mMinZ = self.m.vert[:, 2].min()
dZ = mMinZ - sMinZ
# Keep track of T
T = dZ
self.m.vert[:, 2] += dZ
mMaxZ = self.m.vert[:, 2].max()
# Create slices of static from 2 mm below dist to z0
# print([sMinZ + 1, z0])
sPolys = create_slices(self.s, [sMinZ + 1, z0], 0.5, typ='real_intervals', axis=2)
sVol = est_volume(sPolys)
# Create slices of static from 2 mm below dist to z0
mPolys = create_slices(self.m, [sMinZ + 1, mMaxZ - 1], 0.5, typ='real_intervals', axis=2)
# Iterate through mPolys
csa = calc_csa(mPolys)
# Get the distance between each slice
d = []
for p in mPolys:
d.append(p[:, 2].mean())
d = np.asarray(d)
# Get distance between each slice
dist = np.abs(d[1:]- d[:-1])
vol = np.c_[csa[1:], csa[:-1]]
vol = np.mean(vol, axis=1) * dist
# Add in 0 at start to ease indexing
vol = np.insert(vol, 0, 0)
# print(sVol)
# print(vol)
vol = np.cumsum(vol) - sVol
# print(vol)
for (i, v) in enumerate(vol):
if v >= 0:
break
# Linear interpolate z in between slices, different as (n-1) sections to slices
zl = d[i - 1]
zh = d[i]
vl = vol[i - 1]
vh = vol[i]
dz = zh - zl
dv = vh - vl
# Absolute value of z to reach
z = zl + ((0 - vl)/ dv) * dz;
# print(z)
# Translate by the calculated z value
# z -= d[0]
T -= z
# print(vl, sVol, vh)
self.m.vert[:, 2] -= z
self.R = np.eye(3)
self.T = [0, 0, T]
@staticmethod
def rot2quat(R):
"""
Convert a rotation matrix to a quaternionic matrix
Parameters
----------
R: array_like
The 3x3 rotation array to be converted to a quaternionic matrix
Returns
-------
Q: ndarray
The quaternionic matrix
"""
[[Qxx, Qxy, Qxz],
[Qyx, Qyy, Qyz],
[Qzx, Qzy, Qzz]] = R
t = Qxx + Qyy + Qzz
if t >= 0:
r = math.sqrt(1+t)
s = 0.5/r
w = 0.5*r
x = (Qzy-Qyz)*s
y = (Qxz-Qzx)*s
z = (Qyx-Qxy)*s
else:
maxv = max([Qxx, Qyy, Qzz])
if maxv == Qxx:
r = math.sqrt(1+Qxx-Qyy-Qzz)
s = 0.5/r
w = (Qzy-Qyz)*s
x = 0.5*r
y = (Qyx+Qxy)*s
z = (Qxz+Qzx)*s
elif maxv == Qyy:
r = math.sqrt(1+Qyy-Qxx-Qzz)
s = 0.5/r
w = (Qxz-Qzx)*s
x = (Qyx+Qxy)*s
y = 0.5*r
z = (Qzy+Qyz)*s
else:
r = math.sqrt(1+Qzz-Qxx-Qyy)
s = 0.5/r
w = (Qyx-Qxy)*s
x = (Qxz+Qzx)*s
y = (Qzy+Qyz)*s
z = 0.5*r
return np.array([w, x, y, z])
def display(self):
r"""
Display the static mesh and the aligned within an interactive VTK
window
"""
if not hasattr(self.s, 'actor'):
self.s.addActor()
if not hasattr(self.m, 'actor'):
self.m.addActor()
# Generate a renderer window
win = vtkRenWin()
# Set the number of viewports
win.setnumViewports(1)
# Set the background colour
win.setBackground([1,1,1])
# Set camera projection
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(win)
renderWindowInteractor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
# Set camera projection
win.setView()
self.s.actor.setColor([1.0, 0.0, 0.0])
self.s.actor.setOpacity(0.5)
self.m.actor.setColor([0.0, 0.0, 1.0])
self.m.actor.setOpacity(0.5)
win.renderActors([self.s.actor, self.m.actor])
win.Render()
win.rens[0].GetActiveCamera().Azimuth(180)
win.rens[0].GetActiveCamera().SetParallelProjection(True)
win.Render()
return win
def genIm(self, crop=False):
r"""
Display the static mesh and the aligned within an interactive VTK
window
"""
if not hasattr(self.s, 'actor'):
self.s.addActor()
if not hasattr(self.m, 'actor'):
self.m.addActor()
# Generate a renderer window
win = vtkRenWin()
# Set the number of viewports
win.setnumViewports(1)
# Set the background colour
win.setBackground([1,1,1])
# Set camera projection
# Set camera projection
win.setView([0, -1, 0], 0)
win.SetSize(512, 512)
win.Modified()
win.OffScreenRenderingOn()
self.s.actor.setColor([1.0, 0.0, 0.0])
self.s.actor.setOpacity(0.5)
self.m.actor.setColor([0.0, 0.0, 1.0])
self.m.actor.setOpacity(0.5)
win.renderActors([self.s.actor, self.m.actor])
win.Render()
win.rens[0].GetActiveCamera().Azimuth(0)
win.rens[0].GetActiveCamera().SetParallelProjection(True)
win.Render()
im = win.getImage()
if crop is True:
mask = np.all(im == 1, axis=2)
mask = ~np.all(mask, axis=1)
im = im[mask, :, :]
mask = np.all(im == 1, axis=2)
mask = ~ | np.all(mask, axis=0) | numpy.all |
#importing the necessary modules
import pandas as pd
import numpy as np
from pandas import DataFrame as df
from pandas import Series as sr
#NxN tic-tac-toe board
N=3
#function to check the winner
def checkwin(board,n=3):
global N
N=n
flag1=True
flag2=True
temp3=board[0][0]
temp4=board[0][N-1]
for i in range(N):
flag=True
temp1=board[i][0]
temp2=board[0][i]
for j in range(N):
if temp1!=board[i][j] or temp1==-1:
flag=False
break
if flag:
return temp1
flag=True
for j in range(N):
if temp2!=board[j][i] or temp2==-1:
flag=False
break
if flag:
return temp2
if temp3!=board[i][i]:
flag1=False
if temp4!=board[i][N-1-i]:
flag2=False
if flag1:
return temp3
if flag2:
return temp4
return -1
#function to generate random ti-tac-toe boards
#used for testing purposes
def gen_rnd_board(steps=5):
global N
x=np.array([-1]*(N*N)).reshape((N,N))
move=1
for i in range(steps):
ind=np.argwhere(x==-1)
ch=np.random.choice(len(ind))
x[ind[ch][0]][ind[ch][1]]=move
move=(move+1)%2
return x
#Check if the move is invalid i.e., position of the new move is empty or not
def checkinvalid(board,a,b):
global N
if a>=0 and a<N and b<N and b>=0:
if board[a][b]==-1:
return True
return False
return False
#function to print the board in tic-tac-toe grid format
def print_board(board,flag=False):
if flag:
return
txt=''
x=np.where(board==-1,' ',np.where(board==0,'O','X'))
for i in range(N):
t1=' | '.join(x[i])
print(t1)
if i!=N-1:
txt2=['_']*(3*N)
t2=''.join(txt2)
print(t2)
def print_(text,flag):
if flag==False:
print(text)
return
#AI algorithm 1(SLOWER)
#generally it doesn't choose the move which ensures faster win but it is still undefeatable
def find_best_move(t,depth=0):
global N
board=t.copy()
ind=np.argwhere(board<0)
move=(N-len(ind)+1)%2
if len(ind)==0 or checkwin(board)==1 or checkwin(board)==0:
winner=checkwin(board)
if winner==1:
return 1
elif winner==0:
return -1
else:
return 0
scores=[]
for i in range(len(ind)):
indices=ind[i]
board[indices[0]][indices[1]]=move
scores.append(find_best_move(board,depth+1))
board[indices[0]][indices[1]]=-1
if move==0:
if depth==0:
return ind[(np.array(scores)).argmin()]
return min(scores)
if move==1:
if depth==0:
return ind[(np.array(scores)).argmax()]
return max(scores)
#AI algorithm 2(LITTLE FASTER)
#chooses moves which ensures faster win by using a evaluating function for board which considers the number of moves ('depth') also
#impossible to defeat
def find_best_move2(t,depth=0):
global N
board=t.copy()
ind=np.argwhere(board<0)
move=(N-len(ind)+1)%2
if len(ind)==0 or checkwin(board)==1 or checkwin(board)==0:
winner=checkwin(board)
if winner==1:
return max(1-depth/(N*N),0.1)
elif winner==0:
return min(-1+depth/(N*N),-0.1)
else:
return 0
scores=[]
for i in range(len(ind)):
indices=ind[i]
board[indices[0]][indices[1]]=move
scores.append(find_best_move2(board,depth+1))
board[indices[0]][indices[1]]=-1
if move==0:
if depth==0:
return ind[(np.array(scores)).argmin()]
return min(scores)
if move==1:
if depth==0:
return ind[(np.array(scores)).argmax()]
return max(scores)
#AI algorithm 3 (FASTEST)
#uses alpha-beta pruning to increase the efficency in rejecting bad moves
#wins in minimum number of moves
#CAN'T BEAT IT
Max=1000
Min=-1000
def find_best_move3(t,alpha=-1000,beta=1000,depth=0):
global N,Max,Min
board=t.copy()
ind= | np.argwhere(board<0) | numpy.argwhere |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 7 19:51:59 2021
@author: <NAME>
"""
import sys
import numpy as np
import pandas as pd
from scipy.optimize import fsolve, least_squares
import matplotlib.pyplot as plt
from pickle import load
from gpModel import gp_model
import GPy
from copy import deepcopy
from pickle import load, dump
def plotResults(calcName):
standData = pd.read_csv("data/StandardTestData.csv")
# plt.figure()
# plt.fill_between(cum_times[6:], np.abs(avg_max[6:])+2*std_max[6:], np.abs(avg_max[6:])-2*std_max[6:], alpha=0.5)
# plt.plot(cum_times[6:], np.abs(avg_max[6:]))
# plt.ylim([0,10])
pass
def ThreeHumpCamel(x):
x = x*10 - 5
if x.shape[0] == 2:
output = 2*x[0]**2 - 1.05*x[0]**4 + (x[0]**6)/6 + x[0]*x[1] + x[1]**2
else:
output = 2*x[:,0]**2 - 1.05*x[:,0]**4 + (x[:,0]**6)/6 + x[:,0]*x[:,1] + x[:,1]**2
return -output
def ThreeHumpCamel_LO1(x):
x = x*10 - 5
if x.shape[0] == 2:
output = 1.05*(x[0]-0.5)**4 + (x[0]**6)/6 + x[0]*x[1] + x[1]**2
else:
output = 1.05*(x[:,0]-0.5)**4 + (x[:,0]**6)/6 + x[:,0]*x[:,1] + x[:,1]**2
return -output
def ThreeHumpCamel_LO2(x):
x = x*10 - 5
if x.shape[0] == 2:
output = 2*(x[0]+0.5)**2 + (x[0]**6)/6 + x[0]*x[1] + x[1]**2
else:
output = 2*(x[:,0]+0.5)**2 + (x[:,0]**6)/6 + x[:,0]*x[:,1] + x[:,1]**2
return -output
def ThreeHumpCamel_LO3(x):
x = x*10 - 5
if x.shape[0] == 2:
output = 2*(x[0]*0.5)**2 - 1.05*x[0]**4 + x[0]*x[1] + x[1]**2
else:
output = 2*(x[:,0]*0.5)**2 - 1.05*x[:,0]**4 + x[:,0]*x[:,1] + x[:,1]**2
return -output
class RVE_GP():
def __init__(self):
self.mean = 0
self.std = 0
self.gp = 0
self.setup()
def setup(self):
data = pd.read_excel('./data/rve_data.xlsx')
data.iloc[:,0] = (data.iloc[:,0]-650)/200
data.iloc[:,2] = data.iloc[:,2]/3
data.iloc[:,3] = data.iloc[:,3]/2
self.mean = np.mean(data.iloc[:,5])
self.std = np.std(data.iloc[:,5])
data.iloc[:,5] = (data.iloc[:,5]-self.mean)/self.std
self.gp = gp_model(data.iloc[:,0:4], data.iloc[:,5], np.array([0.12274117, 0.08612411, 0.65729583, 0.23342798]), 0.16578065, 0.1, 4, 'SE')
def predict(self, x_predict):
if len(x_predict.shape) == 1:
x_predict = np.expand_dims(x_predict, axis=0)
# x = np.ones((x_predict.shape[0],4))
# x[:,0] = (x_predict[:,0]-650)/200 #Temperature
# x[:,1] = x_predict[:,1] #wt% C
# x[:,2] = x[:,2]/2 #wt% Si
# x[:,3] = x[:,3]/3 #wt% Mn
mean, var = self.gp.predict_var(x_predict)
return (mean*self.std + self.mean)
def test_fit(self):
data = pd.read_excel('../data/rve_data.xlsx')
data_1 = deepcopy(data)
data.iloc[:,0] = (data.iloc[:,0]-650)/200
data.iloc[:,2] = data.iloc[:,2]/3
data.iloc[:,3] = data.iloc[:,3]/2
test_data = [[],[],[],[],[],[],[],[],[],[]]
train_data = [[],[],[],[],[],[],[],[],[],[]]
count = 1
while count <= 1500:
new_num = np.random.randint(0,1522)
if (new_num not in test_data[0]) and (len(test_data[0])<150):
test_data[0].append(new_num)
count += 1
elif (new_num not in test_data[1]) and (len(test_data[1])<150):
test_data[1].append(new_num)
count += 1
elif (new_num not in test_data[2]) and (len(test_data[2])<150):
test_data[2].append(new_num)
count += 1
elif (new_num not in test_data[3]) and (len(test_data[3])<150):
test_data[3].append(new_num)
count += 1
elif (new_num not in test_data[4]) and (len(test_data[4])<150):
test_data[4].append(new_num)
count += 1
elif (new_num not in test_data[5]) and (len(test_data[5])<150):
test_data[5].append(new_num)
count += 1
elif (new_num not in test_data[6]) and (len(test_data[6])<150):
test_data[6].append(new_num)
count += 1
elif (new_num not in test_data[7]) and (len(test_data[7])<150):
test_data[7].append(new_num)
count += 1
elif (new_num not in test_data[8]) and (len(test_data[8])<150):
test_data[8].append(new_num)
count += 1
elif (new_num not in test_data[9]) and (len(test_data[9])<150):
test_data[9].append(new_num)
count += 1
for i in range(1522):
if i not in test_data[0]:
train_data[0].append(i)
if i not in test_data[1]:
train_data[1].append(i)
if i not in test_data[2]:
train_data[2].append(i)
if i not in test_data[3]:
train_data[3].append(i)
if i not in test_data[4]:
train_data[4].append(i)
if i not in test_data[5]:
train_data[5].append(i)
if i not in test_data[6]:
train_data[6].append(i)
if i not in test_data[7]:
train_data[7].append(i)
if i not in test_data[8]:
train_data[8].append(i)
if i not in test_data[9]:
train_data[9].append(i)
test_data = np.array(test_data)
train_data = np.array(train_data)
self.mean = np.mean(data.iloc[:,5])
self.std = np.std(data.iloc[:,5])
data.iloc[:,5] = (data.iloc[:,5]-self.mean)/self.std
results = np.zeros((1500,2))
for i in range(10):
self.gp = gp_model(data.iloc[train_data[i],[0,1,2,3]],
data.iloc[train_data[i],5],
[0.12274117, 0.08612411, 0.65729583, 0.23342798],
0.16578065, 0.1, 4, 'SE')
out = self.predict(np.array(data_1.iloc[test_data[i],[0,1,2,3]]))
results[i*150:(i+1)*150,0] = out
results[i*150:(i+1)*150,1] = data.iloc[test_data[i],5] * self.std + self.mean
self.setup()
results_all = np.zeros((1522,2))
results_all[:,1] = data.iloc[:,5] * self.std + self.mean
results_all[:,0] = self.predict(np.array(data_1.iloc[:,[0,1,2,3]]))
return results, results_all
class TC_GP():
def __init__(self):
self.y_mean = []
self.y_std = []
self.y_max = []
self.tc_gp = []
self.setup()
def setup(self):
data = pd.read_excel("./data/tc_data.xlsx")
x_train = np.array(data.iloc[:,1:5])
x_train[:,0] = (x_train[:,0]-650)/200
x_train[:,1] = 100*x_train[:,1]
x_train[:,2] = 100*x_train[:,2]/2
x_train[:,3] = 100*x_train[:,3]/3
l_param_list = [[np.sqrt(0.28368), np.sqrt(0.44255), np.sqrt(0.19912), np.sqrt(5.48465)],
[np.sqrt(2.86816), np.sqrt(2.57049), np.sqrt(0.64243), np.sqrt(94.43864)],
[np.sqrt(6.41552), np.sqrt(12.16391), np.sqrt(7.16226), np.sqrt(27.87327)],
[np.sqrt(34.57352), np.sqrt(12.83549), np.sqrt(4.73291), np.sqrt(275.83489)]]
sf_list = [4*1.57933, 4*5.5972, 4*78.32377, 4*14.79803]
for k in range(4):
self.y_mean.append(np.mean(np.array(data.iloc[:,k+5])))
self.y_max.append(np.max(np.array(data.iloc[:,k+5])))
self.y_std.append(np.std(np.array(data.iloc[:,k+5])))
y_train = (np.array(data.iloc[:,k+5])-self.y_mean[k])/self.y_std[k]
l_param = l_param_list[k]
sf = sf_list[k]
self.tc_gp.append(gp_model(x_train, y_train, np.array(l_param), sf, 0.05, 4, 'M52'))
def TC_GP_Predict(self, index, x_predict):
# x_predict = np.expand_dims(x_predict, 0)
y_out, y_out_var = self.tc_gp[index].predict_var(x_predict)
y_pred = y_out*self.y_std[index] + self.y_mean[index]
y_pred[np.where(y_pred<0)] = 0
y_pred[np.where(y_pred>self.y_max[index])] = self.y_max[index]
return y_pred
# if y_pred < 0:
# return 0
# elif y_pred > self.y_max[index]:
# return self.y_max[index]
# else:
# return y_pred
def predict(self, x_predict):
if len(x_predict.shape) == 1:
x_predict = np.expand_dims(x_predict, axis=0)
x = np.ones((x_predict.shape[0],4))
x[:,0] = (x_predict[:,0]-650)/200 #Temperature
x[:,1] = x_predict[:,1] #wt% C
x[:,2] = x[:,2]/2 #wt% Si
x[:,3] = x[:,3]/3 #wt% Mn
vf = self.TC_GP_Predict(0, x)
xC = self.TC_GP_Predict(1, x)
xSi = self.TC_GP_Predict(2, x)
xMn = self.TC_GP_Predict(3, x)
vf_ferr = 1-vf
xMn_ferr = np.zeros_like(vf_ferr)
xSi_ferr = np.zeros_like(vf_ferr)
xMn_ferr[np.where(vf_ferr>1e-6)] = (x[np.where(vf_ferr>1e-6),3]/100-vf[np.where(vf_ferr>1e-6)]*xMn[np.where(vf_ferr>1e-6)])/vf_ferr[np.where(vf_ferr>1e-6)]
xSi_ferr[np.where(vf_ferr>1e-6)] = (x[np.where(vf_ferr>1e-6),2]/100-vf[np.where(vf_ferr>1e-6)]*xSi[np.where(vf_ferr>1e-6)])/vf_ferr[np.where(vf_ferr>1e-6)]
xMn_ferr[np.where(xMn_ferr<0)] = 0
xSi_ferr[np.where(xSi_ferr<0)] = 0
xMn_ferr[np.where(xMn_ferr>x[:,3]/100)] = x[np.where(xMn_ferr>x[:,3]/100),3]/100
xSi_ferr[np.where(xSi_ferr>x[:,2]/100)] = x[np.where(xSi_ferr>x[:,2]/100),2]/100
return np.array([vf,xC,xMn_ferr,xSi_ferr]).transpose()
def isostrain_IS(x,ep):
beta_Si = 732.7676
beta_Mn = 213.4494
# beta_C = 7507.582
single_calc = False
mm = x.shape[0]
if x.shape[0] == 4:
try:
a = x.shape[1]
x = x.transpose()
except IndexError:
x = np.array([[x[0],x[1],x[2],x[3]],[0,0,0,0]])
single_calc = True
mm = 1
f=x[:,0]
x_C = x[:,1]
x_Mn = x[:,2]
x_Si = x[:,3]
s0F = np.zeros((mm,1))
s0M = np.zeros((mm,1))
sF = np.zeros((mm,1))
sM = np.zeros((mm,1))
stress = np.zeros((mm,10001))
str_ = np.zeros((mm,1))
dsde = np.zeros((mm,1))
cc = np.zeros((mm,))
index = np.zeros((mm,90))
for ii in range(mm):
# yield strength of the phases
s0F[ii]=200 + beta_Mn*((x_Mn[ii])**0.5) + beta_Si*((x_Si[ii])**0.5)
s0M[ii]=400+1000*((100*x_C[ii])**(1/3))
kF=2200
kM=450
nF=0.5
nM=0.06
strain=np.linspace(0,1,10001,endpoint=True)
for i in range(10001):
sF[ii]=s0F[ii]+kF*strain[i]**nF
sM[ii]=s0M[ii]+kM*strain[i]**nM
stress[ii,i]=((1-f[ii])*sF[ii])+(f[ii]*sM[ii])
index[ii,:] = np.array(np.nonzero(strain <= ep))
str_[ii]=stress[ii,int(np.max(index[ii,:]))]
dsde[ii]=(stress[ii,int(np.max(index[ii,:]))+1]-stress[ii,int(np.max(index[ii,:]))-1])/(2*(strain[int(np.max(index[ii,:]))+1]-strain[int(np.max(index[ii,:]))]))
cc[ii]=dsde[ii]/str_[ii]
if single_calc:
return cc[0]
else:
return cc
def isostress_IS(x,ep):
beta_Si = 732.7676
beta_Mn = 213.4494
# beta_C = 7507.582
single_calc = False
mm = x.shape[0]
if x.shape[0] == 4:
try:
a = x.shape[1]
x = x.transpose()
except IndexError:
x = np.array([[x[0],x[1],x[2],x[3]],[0,0,0,0]])
single_calc = True
mm = 1
f=x[:,0]
x_C = x[:,1]
x_Mn = x[:,2]
x_Si = x[:,3]
s0F = np.zeros((mm,1))
s0M = np.zeros((mm,1))
# sF = np.zeros((mm,1))
# sM = np.zeros((mm,1))
# stress = np.zeros((mm,10001))
str_ = np.zeros((mm,1))
dsde = np.zeros((mm,1))
cc = np.zeros((mm,))
for ii in range(mm):
# yield strength of the phases
s0F[ii]=200 + beta_Mn*((x_Mn[ii])**0.5) + beta_Si*((x_Si[ii])**0.5)
s0M[ii]=400+1000*((100*x_C[ii])**(1/3))
vf=f[ii]
kF=2200
kM=450
nF=0.5
nM=0.06
# Overall Stress
stress=np.linspace(170,1900,173000,endpoint=True)
l=len(stress)
strain = np.zeros((l,1))
for i in range(l):
if (stress[i] < s0F[ii]):
epF=0;
else:
epF=((stress[i]-s0F[ii])/kF)**(1/nF)
if (stress[i] < s0M[ii]):
epM=0
else:
epM=((stress[i]-s0M[ii])/kM)**(1/nM);
strain[i]=((1-vf)*epF)+(vf*epM);
index = np.array(np.nonzero(strain <= ep))
str_=stress[np.max(index)];
dsde=(stress[np.max(index)+1]-stress[np.max(index)-1])/(2*(strain[np.max(index)+1]-strain[np.max(index)]))
cc[ii]=dsde/str_
if single_calc:
return cc[0]
else:
return cc
def isowork_IS(x,ep):
beta_Si = 732.7676
beta_Mn = 213.4494
# beta_C = 7507.582
single_calc = False
mm = x.shape[0]
if x.shape[0] == 4:
try:
a = x.shape[1]
x = x.transpose()
except IndexError:
x = np.array([[x[0],x[1],x[2],x[3]],[0,0,0,0]])
single_calc = True
mm = 1
f=x[:,0]
x_C = x[:,1]
x_Mn = x[:,2]
x_Si = x[:,3]
cc = np.zeros((mm,))
for ii in range(mm):
# yield strength of the phases
s0F=200 + beta_Mn*((x_Mn[ii])**0.5) + beta_Si*((x_Si[ii])**0.5)
s0M=400+1000*((100*x_C[ii])**(1/3))
vf=f[ii]
kF=2200
kM=450
nF=0.5
nM=0.06
# strain increment in ferrite
depF=0.0001
epF=np.zeros((10000,1))
epM=np.zeros((10000,1))
sF=np.ones((10000,1))*s0F
sM=np.ones((10000,1))*s0M
sT=np.zeros((10000,1))
epT=np.zeros((10000,1))
SS = np.zeros((10000,2))
for k in range(9999): #i=2:(1/depF)
i = k+1
epF[i]=epF[i-1]+depF
sF[i]=s0F+kF*epF[i]**nF
wF=sF[i]*depF
temp=epM[i-1]
isow = lambda wF,s0M,kM,nM,temp,depM : wF-((s0M+kM*(temp+depM)**nM)*depM)
# isow=@(wF,s0M,kM,nM,temp,depM) wF-((s0M+kM*(temp+depM)^nM)*depM)
fun = lambda depM : isow(wF,s0M,kM,nM,temp,depM)
# fun=@(depM) isow(wF,s0M,kM,nM,temp,depM)
depM=fsolve(fun,depF) # depF is initial guess
epM[i]=epM[i-1]+depM
sM[i]=s0M+kM*epM[i]**nM
sT[i]=((1-vf)*sF[i])+(vf*sM[i])
epT[i]=((1-vf)*epF[i])+(vf*epM[i])
SS[i,0]=epT[i]
SS[i,1]=sT[i]
strain=np.zeros((10000,1))
stress=np.zeros((10000,1))
for iii in range(10000):
strain[iii]=SS[iii,0]
stress[iii]=SS[iii,1]
index = np.array(np.nonzero(strain <= ep))
str_=stress[np.max(index)];
dsde=(stress[np.max(index)+1]-stress[np.max(index)-1])/(2*(strain[np.max(index)+1]-strain[np.max(index)]))
cc[ii]=dsde/str_
if single_calc:
return cc[0]
else:
return cc
def EC_Mart_IS(x,ep):
# 0 represents - Matrix
# 1 represents - inclusions
# Input Variables
beta_Si = 732.7676
beta_Mn = 213.4494
# beta_C = 7507.582;
single_calc = False
mm = x.shape[0]
if x.shape[0] == 4:
try:
a = x.shape[1]
x = x.transpose()
except IndexError:
x = np.array([[x[0],x[1],x[2],x[3]],[0,0,0,0]])
single_calc = True
mm = 1
f=x[:,0]
x_C = x[:,1]
x_Mn = x[:,2]
x_Si = x[:,3]
cc = np.zeros((mm))
for ii in range(mm):
vf=f[ii]
# Ferrite
E_1 = 200*10**3
PR_1 = 0.3
Mu_1 = E_1/(2*(1+PR_1))
sigy_1 = 200 + beta_Mn*((x_Mn[ii])**0.5) + beta_Si*((x_Si[ii])**0.5)
h_1=2200
n_1=0.5
# Martensite (Matrix) Matrix yields first
E_0 = 200*10**3
PR_0 = 0.3
Mu_0 = E_0/(2*(1+PR_0))
sigy_0 = 400+1000*((100*x_C[ii])**(1/3))
h_0=450
n_0=0.06
# Composition of Phases
c_0 = vf
c_1 = 1-c_0
# Alpha and Beta Values
# Ferrite
# alpha_0 = (1/3)*((1+PR_0)/(1-PR_0))
beta_0 = (2/15)*((4-5*PR_0)/(1-PR_0))
# Austenite
# alpha_1 = (1/3)*((1+PR_1)/(1-PR_1))
# beta_1 = (2/15)*((4-5*PR_1)/(1-PR_1))
#Plastic Strain in Matrix
strain_p_1 = np.linspace(0, 0.2, num=2000, endpoint=True)
# Elastic stage
Mu_0 = E_0/(2*(1+PR_0))
Mu_1 = E_1/(2*(1+PR_1))
# K_0 = E_0/(3*(1-2*PR_0))
# K_1 = E_1/(3*(1-2*PR_1))
# K = K_0*(1 + (c_1*(K_1-K_0))/(c_0*alpha_0*(K_1-K_0) + K_0))
# Mu = Mu_0*(1 + (c_1*(Mu_1-Mu_0))/(c_0*beta_0*(Mu_1-Mu_0) + Mu_0))
# E = 9*K*Mu/(3*K+Mu)
# a_0 = (alpha_0*(K_1-K_0) + K_0)/((c_1 + (1-c_1)*alpha_0)*(K_1-K_0)+ K_0)
b_0 = (beta_0*(Mu_1-Mu_0) + Mu_0)/((c_1 + (1-c_1)*beta_0)*(Mu_1-Mu_0)+ Mu_0)
# a_1 = K_1/((c_1 + (1-c_1)*alpha_0)*(K_1-K_0)+ K_0)
b_1 = Mu_1/((c_1 + (1-c_1)*beta_0)*(Mu_1-Mu_0)+ Mu_0)
strain_p_0 = np.zeros((len(strain_p_1)))
count=0
SS=np.zeros((len(strain_p_1),2))
strain_c = np.zeros((len(strain_p_1)))
stress_c = np.zeros((len(strain_p_1)))
for i in range(len(strain_p_1)):
strain_c[i] = c_1*b_1*strain_p_1[i]
stress_c[i] = (1/b_1)*(sigy_1 + h_1*(strain_p_1[i]**n_1) + 3*Mu_0*(1-beta_0)*(c_0*b_1*strain_p_1[i]))
temp = (1/b_0)*(sigy_0 - 3*Mu_0*(1-beta_0)*strain_c[i])
if (stress_c[i]>(temp+150)) or (c_1 == 0):
count=count+1
A = b_0
B = 3*Mu_0*(1-beta_0)*c_1*b_1
C = sigy_1 + h_1*(strain_p_1[i]**n_1)
D = b_1
G = 3*Mu_0*(1-beta_0)*(1-c_1)*b_1
x0=np.random.rand(2)
F = lambda y : [-A*y[0] + B*y[1] + sigy_0 + h_0*(y[1]**n_0) - B*strain_p_1[i], -D*y[0] - G*y[1] + C + G*strain_p_1[i]]
y = least_squares(F, x0, bounds=((0,0),(np.inf,np.inf))).x
stress_c[i] = y[0];
strain_p_0[i]= y[1];
strain_c[i] = c_0*b_0*strain_p_0[i] + c_1*b_1*strain_p_1[i]
SS[:,0] = strain_c
SS[:,1] = stress_c
strain=np.zeros((len(strain_p_1)))
stress=np.zeros((len(strain_p_1)))
for iii in range(SS.shape[0]):
strain[iii] = SS[iii,0]
stress[iii] = SS[iii,1]
index = np.where(strain <= ep)
strs=stress[np.max(index)]
dsde=(stress[np.max(index)+1]-stress[np.max(index)-1])/(2*(strain[np.max(index)+1]-strain[np.max(index)]))
cc[ii]=dsde/strs
if single_calc:
return cc[0]
else:
return cc
def secant1_IS(x,ep):
# Input Variables
beta_Si = 732.7676
beta_Mn = 213.4494
# beta_C = 7507.582
single_calc = False
mm = x.shape[0]
if x.shape[0] == 4:
try:
a = x.shape[1]
x = x.transpose()
except IndexError:
x = np.array([[x[0],x[1],x[2],x[3]],[0,0,0,0]])
single_calc = True
mm = 1
f=x[:,0]
x_C = x[:,1]
x_Mn = x[:,2]
x_Si = x[:,3]
cc = np.zeros((mm))
for ii in range(mm):
vf = f[ii]
# # Ferrite (Matrix)
E_a = 200*10**3
PR_a = 0.3
Mu_a = E_a/(2*(1+PR_a))
sigy_a = 200 + beta_Mn*((x_Mn[ii])**0.5) + beta_Si*((x_Si[ii])**0.5)
h_a=2200
n_a=0.5
# Martensite
E_f = 200*10**3
PR_f = 0.3
Mu_f = E_f/(2*(1+PR_f))
sigy_f = 400+1000*((100*x_C[ii])**(1/3))
h_f=450
n_f=0.06
# Composition of Phases
c_f = vf
c_a = 1-c_f
# Alpha and Beta Values
# Austenite
alpha_a = (1/3)*((1+PR_a)/(1-PR_a))
beta_a = (2/15)*((4-5*PR_a)/(1-PR_a))
#Ferrite
# alpha_f = (1/3)*((1+PR_f)/(1-PR_f))
# beta_f = (2/15)*((4-5*PR_f)/(1-PR_f))
#Plastic Strain in Matrix
strain_p_a = np.linspace(0,0.17,num=340,endpoint=True)
# Elastic stage
K_a = E_a/(3*(1-2*PR_a));
K_f = E_f/(3*(1-2*PR_f));
K = K_a*(1 + (c_f*(K_f-K_a))/(c_a*alpha_a*(K_f-K_a) + K_a));
Mu = Mu_a*(1 + (c_f*(Mu_f-Mu_a))/(c_a*beta_a*(Mu_f-Mu_a) + Mu_a));
E = 9*K*Mu/(3*K+Mu);
# a_a = (alpha_a*(K_f-K_a) + K_a)/((c_f + (1-c_f)*alpha_a)*(K_f-K_a)+ K_a);
# b_a = (beta_a*(Mu_f-Mu_a) + Mu_a)/((c_f + (1-c_f)*beta_a)*(Mu_f-Mu_a)+ Mu_a);
# a_f = K_f/((c_f + (1-c_f)*alpha_a)*(K_f-K_a)+ K_a);
# b_f = Mu_f/((c_f + (1-c_f)*beta_a)*(Mu_f-Mu_a)+ Mu_a);
count1=0
# Starting with a given plastic strain in the matrix and then
# increasing the value
strain_p_f=np.zeros(strain_p_a.shape[0])
strain_p_c=np.zeros(strain_p_a.shape[0])
E_s_a=np.zeros(strain_p_a.shape[0])
PR_s_a=np.zeros(strain_p_a.shape[0])
Mu_s_a=np.zeros(strain_p_a.shape[0])
alpha_s_a=np.zeros(strain_p_a.shape[0])
beta_s_a=np.zeros(strain_p_a.shape[0])
b_s_a=np.zeros(strain_p_a.shape[0])
b_s_f=np.zeros(strain_p_a.shape[0])
K_s=np.zeros(strain_p_a.shape[0])
Mu_s=np.zeros(strain_p_a.shape[0])
E_s=np.zeros(strain_p_a.shape[0])
stress_c=np.zeros(strain_p_a.shape[0])
SS=np.zeros((strain_p_a.shape[0],2))
for j in range(strain_p_a.shape[0]):
count1 = count1+1;
# Secant Modulus given by Eq 2.8
E_s_a[j] = 1/((1/E_a) + strain_p_a[j]/(sigy_a + h_a*(strain_p_a[j])**n_a))
PR_s_a[j] = 0.5 - ((0.5 - PR_a)*(E_s_a[j]/E_a))
Mu_s_a[j] = E_s_a[j]/(2*(1+PR_s_a[j]))
# Austenite
alpha_s_a[j] = (1/3)*((1+PR_s_a[j])/(1-PR_s_a[j]))
beta_s_a[j] = (2/15)*((4-5*PR_s_a[j])/(1-PR_s_a[j]))
b_s_a[j] = (beta_s_a[j]*(Mu_f-Mu_s_a[j]) + Mu_s_a[j])/((c_f + (1-c_f)*beta_s_a[j])*(Mu_f-Mu_s_a[j])+ Mu_s_a[j])
b_s_f[j] = Mu_f/((c_f + (1-c_f)*beta_s_a[j])*(Mu_f-Mu_s_a[j])+ Mu_s_a[j])
K_s[j] = K_a*(1+ ((c_f*(K_f-K_a))/((1-c_f)*alpha_s_a[j]*(K_f-K_a) + K_a)))
Mu_s[j] = Mu_s_a[j]*(1+ ((c_f*(Mu_f-Mu_s_a[j]))/((1-c_f)*beta_s_a[j]*(Mu_f-Mu_s_a[j]) + Mu_s_a[j])))
E_s[j] = (9*K_s[j]*Mu_s[j])/(3*K_s[j] + Mu_s[j])
# Total stress and plastic strain of composite
stress_c[j] = ((1/b_s_a[j])*(sigy_a + h_a*((strain_p_a[j])**n_a)))
if (stress_c[j]-(sigy_f/b_s_f[j])) > 110:
A = b_s_a[j]
B = 3*Mu_s_a[j]*(1-beta_s_a[j])*c_f*b_s_f[j]
C = sigy_a + h_a*(strain_p_a[j]**n_a)
D = b_s_f[j]
G = 3*Mu_s_a[j]*(1-beta_s_a[j])*(1-c_f)*b_s_f[j]
x0=np.random.rand(2)
F = lambda x : [A*x[0] + B*x[1] - C, D*x[0] - G*x[1] - sigy_f - h_f*((x[1])**n_f)];
x = least_squares(F,x0,bounds=((0,0),(np.inf,np.inf)),max_nfev=200000,ftol=1e-60,xtol=3e-9).x
stress_c[j] = x[0]
strain_p_f[j]= x[1]
#
strain_p_c[j] = c_f*b_s_f[j]*x[1] + (2/3)*(1/(2*Mu_s[j]) - 1/(2*Mu))*stress_c[j]
else:
strain_p_c[j] = ((1/E_s[j]) - (1/E))*stress_c[j]
SS[j,1] = stress_c[j]
SS[j,0] = strain_p_c[j]
strain=np.zeros((len(SS)))
stress=np.zeros((len(SS)))
for iii in range(len(SS)):
strain[iii]=SS[iii,0]
stress[iii]=SS[iii,1]
index = np.where(strain <= ep);
strs=stress[np.max(index)]
dsde=(stress[np.max(index)+1]-stress[np.max(index)-1])/(2*(strain[np.max(index)+1]-strain[np.max(index)]));
cc[ii]=dsde/strs
if single_calc:
return cc[0]
else:
return cc
def linear(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x[:,0] = -1*x[:,0]
print(x)
return np.sum(x, axis=1)*20
pass
def quadratic(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x = x**2
x[:,0] = -1*x[:,0]
print(x)
return np.sum(x, axis=1)*20
def calphad(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
factors = [[-2, -0.32, -1, -0.5],
[2, 0.32, -1, 0.5],
[1, 0.32, -1, 1],
[5, -0.32, 2, 3]]
for ii in range(4):
x[:,ii] = factors[ii][0]*x[:,ii] + factors[ii][1]*x[:,ii]*np.log(x[:,ii]) + factors[ii][2]*x[:,ii]**2 + factors[ii][3]*x[:,ii]**3
return np.sum(x, axis=1)
def linearT(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
x[:,0] = x[:,0]
x[:,1] = x[:,1]
x[:,2] = x[:,2]
x[:,3] = x[:,3]
return np.array(np.sum(x, axis=1))
def quadraticT(x):
if len(x.shape) == 1:
x = np.expand_dims(x, axis=0)
out = | np.zeros_like(x) | numpy.zeros_like |
import numpy as np
import numba as nb
from dataclasses import dataclass
from numba import types
from numba.typed import Dict
from numba import njit
import pandas as pd
import time
import datetime
import csv
from openpyxl import load_workbook
from pyModbusTCP.client import ModbusClient
from pyModbusTCP import utils
@dataclass
class Data:
Ppv: np.array
Pbat: np.array
Pperi: np.array
soc: np.array
soc0: int
Pbs0: int
E: dict
class BatModDC(object):
"""Performance Simulation Class for DC-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pr: Residual power for battery charging
:type Pr: numpy array
:param Prpv: AC residual power
:type Pr: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param ppv2ac: Normalized AC output power of the PV2AC conversion pathway to cover the AC power demand
:type ppv2ac: numpy array
:param Ppv2ac_out: Target AC output power of the PV2AC conversion pathway
:type Ppv2ac_out: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = 0.1
def __init__(self, parameter, d, ppv, pl, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.dt = dt
self.th = False # Start threshold for the recharging of the battery
self.spi = float()
# Initialization and preallocation
self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.ppv2ac, self.Real.Ppv2ac_out = max_self_consumption(parameter, ppv, pl, pvmod=True)
self.Real.Ppv2ac_out0 = 0
self.Real.Ppv2bat_in0 = 0
self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery
self.Real.soc0 = 0 # State of charge of the battery in the first time step
# Input power of the PV2BAT conversion pathway in W
self.Real.Ppv2bat_in = np.zeros_like(self.ppv)
# Output power of the BAT2AC conversion pathway in W
self.Real.Pbat2ac_out = np.zeros_like(self.ppv)
self.Real.Pbat2ac_out0 = 0
# AC power of the PV-battery system in W
self.Real.Ppvbs = np.zeros_like(self.ppv)
# Additional power consumption of other system components (e.g. AC power meter) in W
self.Real.Pperi = np.ones(self.ppv.size) * self.parameter['P_PERI_AC']
self.Ideal.Ppv = np.maximum(0, self.ppv) * self.parameter['P_PV'] * 1000
self.Ideal.Pr = self.Ideal.Ppv - self.pl
self.Ideal.Pbat = np.zeros_like(self.ppv)
self.Ideal.soc = np.zeros_like(self.ppv)
self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ideal.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ideal.Pbat2ac_out = np.zeros_like(self.ppv)
self.Ideal.Ppvbs = np.zeros_like(self.ppv)
@dataclass
class Real(Data):
Pr : np.array
Prpv : np.array
ppv2ac : np.array
Ppv2ac_out : np.array
Ppv2ac_out0 : int
Ppv2bat_in : np.array
Pbat2ac_out : np.array
Ppvbs : np.array
@dataclass
class Ideal(Real):
def __init__(self):
super().__init__()
def simulation(self, pvmod=True):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.Real.Ppv2ac_out, self.Real.Ppv2bat_in, self.Real.Ppv2bat_in0, self.Real.Pbat2ac_out, self.Real.Pbat2ac_out0, self.Real.Ppvbs, self.Real.Pbat, self.Real.soc, self.Real.soc0 = batmod_dc(
self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Prpv, self.Real.Ppv, self.Real.Ppv2bat_in0, self.Real.Ppv2bat_in,
self.Real.Pbat2ac_out0, self.Real.Pbat2ac_out, self.Real.Ppv2ac_out, self.Real.Ppvbs, self.Real.Pbat)
self.Ideal.Pbat, self.Ideal.soc, self.Ideal.soc0 = batmod_dc_ideal(self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)
# Define missing parameters
self.Real.Ppv2ac = self.Real.Ppv2ac_out # AC output power of the PV2AC conversion pathway
self.Real.Ppv2bat = self.Real.Ppv2bat_in # DC input power of the PV2BAT conversion pathway
self.Ideal.Ppvbs = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) - (np.minimum(0, self.Ideal.Pbat)) # Realized AC power of the PV-battery system
self.Ideal.Ppv2ac = self.Ideal.Ppv - np.maximum(0, self.Ideal.Pbat) # AC output power of the PV2AC conversion pathway
self.Ideal.Ppv2bat = np.maximum(0, self.Ideal.Pbat) # DC input power of the PV2BAT conversion pathway
print()
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.Real.E = bat_res_mod(self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat,
self.dt, self.Real.Ppv2ac, self.Real.Ppv2bat, self.Real.Ppvbs, self.Real.Pperi)
self.Ideal.E = bat_res_mod_ideal(self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat,
self.dt, self.Ideal.Ppv2ac, self.Ideal.Ppv2bat, self.Ideal.Ppvbs, self.Ideal.Pperi)
def calculate_spi(self):
self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.Real.E, self.Ideal.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
def get_SPI(self):
return self.spi
class BatModAC(object):
"""Performance Simulation Class for AC-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pr: AC residual power
:type Pr: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param Ppvs: AC power output of the PV inverter taking into account the conversion losses and maximum output power of the PV inverter
:type Ppvs: numpy array
:param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W
:type Pperi: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = '0.1'
def __init__(self, parameter, d, ppv, pl, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.dt = dt
self.spi = float()
self.th = False # Start threshold for the recharging of the battery
# Initialization and preallocation
self.Real.Pr, self.Real.Ppv, self.Real.Ppvs, self.Real.Pperi = max_self_consumption(parameter, ppv, pl, pvmod=True)
self.Real.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.Real.Pbs = np.zeros_like(self.ppv) # AC power of the battery system in W
self.Real.soc = np.zeros_like(self.ppv) # State of charge of the battery
self.Real.soc0 = 0 # State of charge of the battery in the first time step
self.Real.Pbs0 = 0 # State of the battery storage in the previous time step
self.Ideal.Ppv = np.maximum(0, ppv) * parameter['P_PV'] * 1000
self.Ideal.Pr = self.Ideal.Ppv - pl
self.Ideal.Pbat = np.zeros_like(self.ppv)
self.Ideal.Pbs = np.zeros_like(self.ppv)
self.Ideal.Pbs0 = 0
self.Ideal.soc = np.zeros_like(self.ppv)
self.Ideal.soc0 = 0
self.Ideal.Ppvs = self.Ideal.Ppv
self.Ideal.Pperi = np.zeros_like(self.ppv)
@dataclass
class Real(Data):
Pr : np.array
Ppvs : np.array
Pbs : np.array
@dataclass
class Ideal(Real):
def __init__(self):
super().__init__()
def simulation(self):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.Real.Pbat, self.Real.Pbs, self.Real.soc, self.Real.soc0, self.Real.Pbs0 = batmod_ac(
self.d, self.dt, self.Real.soc0, self.Real.soc, self.Real.Pr, self.Real.Pbs0, self.Real.Pbs, self.Real.Pbat)
self.Ideal.Pbs, self.Ideal.Pbat, self.Ideal.soc0, self.Ideal.soc = batmod_ac_ideal(
self.d, self.dt, self.Ideal.soc0, self.Ideal.soc, self.Ideal.Pr, self.Ideal.Pbat)
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.Real.E = bat_res_mod(
self.parameter, self.pl, self.Real.Ppv, self.Real.Pbat, self.dt, self.Real.Ppvs, self.Real.Pbs, self.Real.Pperi)
self.Ideal.E = bat_res_mod_ideal(
self.parameter, self.pl, self.Ideal.Ppv, self.Ideal.Pbat, self.dt, self.Ideal.Ppvs, self.Ideal.Pbs, self.Ideal.Pperi)
def calculate_spi(self):
self.spi = calculate_spi(_E_real=self.Real.E, _E_ideal=self.Ideal.E)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.Real.E, self.Ideal.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
def get_Pbs(self):
"""Returns the AC power of the battery system in W
:return: AC power of the battery system in W
:rtype: numpy array
"""
return self.Pbs
def get_SPI(self):
return self.spi
class BatModPV(object):
"""Performance Simulation Class for PV-coupled PV-Battery systems
:param parameter: PV battery system parameters
:type parameter: dict
:param d: array containing parameters
:type d: numpy array
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
:param Pac: Power demand on the AC side
:type Pac: numpy array
:param Ppv: DC power output of the PV generator
:type Ppv: numpy array
:param Pperi: Additional power consumption of other system components (e.g. AC power meter) in W
:type Pperi: numpy array
:param dt: time step width in seconds
:type dt: integer
"""
_version = '0.1'
def __init__(self, parameter, d, ppv, pl, Pac, Ppv, Pperi, dt):
"""Constructor method
"""
self.parameter = parameter
self.d = d
self.ppv = ppv
self.pl = pl
self.Pac = Pac
self.Ppv = Ppv
self.Pperi = Pperi
self.dt = dt
# Initialization and preallocation
self.Pbat = np.zeros_like(self.ppv) # DC power of the battery in W
self.soc = np.zeros_like(self.ppv) # State of charge of the battery
# Output power of the PV2AC conversion pathway in W
self.Ppv2ac_out = np.zeros_like(self.ppv)
# Input power of the PV2BAT conversion pathway in W
self.Ppv2bat_in = np.zeros_like(self.ppv)
self.Ppv2bat_in0 = 0
# Output power of the BAT2PV conversion pathway in W
self.Pbat2pv_out = np.zeros_like(self.ppv)
self.Pbat2pv_out0 = 0
# AC power of the PV-battery system in W
self.Ppvbs = np.zeros_like(self.ppv)
self.simulation()
self.bat_mod_res()
def simulation(self, pvmod=True):
"""Manages the Performance Simulation Model for AC-coupled PV-Battery Systems
"""
self.th = 0 # Start threshold for the recharging of the battery
self.soc0 = 0 # Initial state of charge of the battery in the first time step
# Simulation of the battery system
#start = time.process_time()
self.soc, self.soc0, self.Ppv, self.Ppvbs, self.Pbat, self.Ppv2ac_out, self.Pbat2pv_out, self.Ppv2bat_in = batmod_pv(self.d, self.dt, self.soc0, self.soc, self.Ppv, self.Pac, self.Ppv2bat_in0, self.Ppv2bat_in, self.Ppv2ac_out, self.Pbat2pv_out0, self.Pbat2pv_out, self.Ppvbs, self.Pbat)
#print(time.process_time()-start)
# Define missing parameters
self.Ppv2ac = self.Ppv2ac_out # AC output power of the PV2AC conversion pathway
self.Ppv2bat = self.Ppv2bat_in # DC input power of the PV2BAT conversion pathway
def bat_mod_res(self):
"""Function to calculate the power flows and energy sums including curtailment of PV power
"""
self.E = bat_res_mod(self.parameter, self.pl, self.Ppv, self.Pbat, self.dt, self.Ppv2ac, self.Ppv2bat, self.Ppvbs, self.Pperi)
def get_E(self):
"""Returns the energy sums of the simulation
:return: Energy sums of the simulation in MWh
:rtype: dict
"""
return self.E
def get_soc(self):
"""Returns the state of charge of the battery
:return: state of charge of the battery
:rtype: numpy array
"""
return self.soc
def get_Pbat(self):
"""Returns the DC power of the battery in W
:return: DC power of the battery in W
:rtype: numpy array
"""
return self.Pbat
class ModBus(object):
"""Establishes connection to a battery system via ModBus protocol
:param host: IP address of the host
:type host: string
:param port: Server port of the host
:type port: integer
:param unit_id: Unit-ID of the host
:type unit_id: integer
"""
def __init__(self, host, port, unit_id, input_vals, dt, fname):
"""Constructor method
"""
self.host = host
self.port = port
self.unit_id = unit_id
self.dt = dt
self.input_vals = input_vals
self.fname = fname
self.open_connection()
self.create_csv_file()
self.start_loop()
def open_connection(self):
"""Opens the connection to the host
"""
# Open ModBus connection
try:
self.c = ModbusClient(host=self.host, port=self.port,
unit_id=self.unit_id, auto_open=True, auto_close=True)
except ValueError:
print("Error with host: {}, port: {} or unit-ID: {} params".format(
self.host, self.port, self.unit_id))
def start_loop(self):
"""Starts the writing and reading process
"""
# Transform the array to fit the 1 minute time duration
#self.set_vals = np.repeat(self.input_vals, self.dt * 60)
i = 0
idx = pd.date_range(start=datetime.datetime.now(),
periods=(self.input_vals.size), freq='S')
while i < len(idx):
if datetime.datetime.now().second == idx[i].second:
# Set chrging value
self.set_val = int(self.input_vals[i])
if self.set_val < 0:
# Write negative value to battery charge power (AC) setpoint register
self.c.write_single_register(1024, self.set_val & 0xFFFF)
# Log writing time
self.set_time = datetime.datetime.now()
else:
# Write positive value to battery charge power (AC) setpoint to register
self.c.write_single_register(1024, self.set_val)
# Log writing time
self.set_time = datetime.datetime.now()
try:
# Read total AC power value from register
_P_ac = self.c.read_holding_registers(172, 2)
self.read_time_P_ac = datetime.datetime.now()
except:
print('Could not read register 172!')
try:
# Read actual battery charge/discharge power value from register
_P_bat = self.c.read_holding_registers(582, 1)
self.read_time_P_bat = datetime.datetime.now()
except:
print('Could not read register 582!')
# Load content of two registers into a single float value
zregs = utils.word_list_to_long(_P_ac, big_endian=False)
# Decode and store float value of the AC-power
self.P_ac = utils.decode_ieee(*zregs)
# Store the DC charging power
self.P_bat = np.int16(*_P_bat)
# Read actual soc
self.soc0 = self.read_soc(210)
try:
# Save the values to a csv file
self.save_to_csv()
except:
print('Could not save to csv!')
i += 1
def read_soc(self, reg):
"""Reads the state of charge of the battery
"""
# Load the actual state fo charge of the battery
regs = self.c.read_holding_registers(reg, 2)
# Load content of two registers into a single float value
zregs = utils.word_list_to_long(regs, big_endian=False)
return utils.decode_ieee(*zregs)
def create_csv_file(self):
"""Creates a csv file from set and read values
"""
# Create a new csv-file
with open(self.fname, 'w') as f:
writer = csv.writer(f, dialect='excel')
writer.writerow(['set_time',
'read_time_P_ac',
'read_time_P_bat',
'soc',
'set_value',
'P_ac',
'P_bat'])
def save_to_csv(self):
"""Saves the set and read values to s csv file
"""
# Save the read values to a csv file
with open(self.fname, "a") as f:
wr = csv.writer(f, dialect='excel')
wr.writerow([self.set_time, self.read_time_P_ac, self.read_time_P_bat,
self.soc0, self.set_val, self.P_ac, self.P_bat])
def max_self_consumption(parameter, ppv, pl, pvmod=True, ideal=False):
"""Function for maximizing self consumption
:param parameter: PV battery system parameters
:type parameter: dict
:param ppv: normalized DC power output of the PV generator
:type ppv: numpy array
:param pl: AC load power
:type pl: numpy array
"""
# Maximize self consumption for AC-coupled systems
if parameter['Top'] == 'AC':
# DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
if ideal:
Ppv = np.maximum(0, ppv ) * parameter['P_PV'] * 1000
else:
Ppv = np.minimum(ppv * parameter['P_PV'], parameter['P_PV2AC_in']) * 1000
else: # ppv: DC power output of the PV generator in W
if ideal:
Ppv = np.maximum(0, ppv)
else:
Ppv = np.minimum(ppv, parameter['P_PV2AC_in'] * 1000)
# Normalized input power of the PV inverter
ppvinvin = Ppv / parameter['P_PV2AC_in'] / 1000
# AC power output of the PV inverter taking into account the conversion losses and maximum
# output power of the PV inverter
Ppvs = np.minimum(np.maximum(0, Ppv-(parameter['PV2AC_a_in'] * ppvinvin * ppvinvin + parameter['PV2AC_b_in'] * ppvinvin + parameter['PV2AC_c_in'])), parameter['P_PV2AC_out'] * 1000)
# 3.2 Residual power
# Additional power consumption of other system components (e.g. AC power meter) in W
Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']
# Adding the standby consumption of the PV inverter in times without any AC power output of the PV system
# to the additional power consumption
Pperi[Ppvs == 0] += parameter['P_PVINV_AC']
# Residual power
if ideal:
Pr = Ppv - pl
else:
Pr = Ppvs - pl - Pperi
return Pr, Ppv, Ppvs, Pperi
# Maximize self consumption for DC-coupled systems
elif parameter['Top'] == 'DC':
# Initialization and preallocation
Ppv2ac_in_ac = np.zeros_like(ppv)
Ppv = np.empty_like(ppv) # DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
Ppv = ppv * parameter['P_PV'] * 1000
else:
Ppv = ppv
# DC power output of the PV generator taking into account the maximum
# DC input power of the PV2AC conversion pathway
Ppv = np.minimum(Ppv, parameter['P_PV2AC_in'] * 1000)
# Residual power
# Power demand on the AC side
Pac = pl + parameter['P_PERI_AC']
# Normalized AC output power of the PV2AC conversion pathway to cover the AC
# power demand
ppv2ac = np.minimum(
Pac, parameter['P_PV2AC_out'] * 1000) / parameter['P_PV2AC_out'] / 1000
# Target DC input power of the PV2AC conversion pathway
Ppv2ac_in_ac = np.minimum(Pac, parameter['P_PV2AC_out'] * 1000) + (
parameter['PV2AC_a_out'] * ppv2ac**2 + parameter['PV2AC_b_out'] * ppv2ac + parameter['PV2AC_c_out'])
# Normalized DC input power of the PV2AC conversion pathway TODO 1
ppv2ac = Ppv / parameter['P_PV2AC_in'] / 1000
# Target AC output power of the PV2AC conversion pathway
Ppv2ac_out = np.maximum(
0, Ppv - (parameter['PV2AC_a_in'] * ppv2ac**2 + parameter['PV2AC_b_in'] * ppv2ac + parameter['PV2AC_c_in']))
# Residual power for battery charging
Prpv = Ppv - Ppv2ac_in_ac
# Residual power for battery discharging
Pr = Ppv2ac_out - Pac
return Pr, Prpv, Ppv, ppv2ac, Ppv2ac_out
# Maximize self consumption for PV-coupled systems
elif parameter['Top'] == 'PV':
# Preallocation
# Pbat = np.zeros_like(ppv) # DC power of the battery in W
# soc = np.zeros_like(ppv) # State of charge of the battery
# Ppv2ac_out = np.zeros_like(ppv) # Output power of the PV2AC conversion pathway in W
# Ppv2bat_in = np.zeros_like(ppv) # Input power of the PV2BAT conversion pathway in W
# Pbat2pv_out = np.zeros_like(ppv) # Output power of the BAT2PV conversion pathway in W
# Ppvbs = np.zeros_like(ppv) # AC power of the PV-battery system in W
Ppv = np.empty_like(ppv) # DC power output of the PV generator
# Additional power consumption of other system components (e.g. AC power meter) in W
Pperi = np.ones_like(ppv) * parameter['P_PERI_AC']
# dt = 1 # Time increment in s
# th = 0 # Start threshold for the recharging of the battery
# soc0 = 0 # State of charge of the battery in the first time step
# DC power output of the PV generator
if pvmod: # ppv: Normalized DC power output of the PV generator in kW/kWp
Ppv = ppv * parameter['P_PV'] * 1000
else: # ppv: DC power output of the PV generator in W
Ppv = ppv
# Power demand on the AC side
Pac = pl + Pperi
return Pac, Ppv, Pperi
@nb.jit(nopython=True)
def batmod_ac(d, _dt, _soc0, _soc, _Pr, _Pbs0, _Pbs, _Pbat):
"""Performance Simulation function for AC-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge in the previous time step
:type soc0: float
:param Pr: residual power
:type Pr: numpy array
:param Pbs0: AC-power of the battery system in the previous time step
:type Pbs0: float
:param Pbs: AC-power of the battery syste
:type Pbs: numpy array
:param Pbat: DC-power oof the battery
:type Pbat: numpy array
"""
# Loading of particular variables
_E_BAT = d[0]
_eta_BAT = d[1]
_t_CONSTANT = d[2]
_P_SYS_SOC0_DC = d[3]
_P_SYS_SOC0_AC = d[4]
_P_SYS_SOC1_DC = d[5]
_P_SYS_SOC1_AC = d[6]
_AC2BAT_a_in = d[7]
_AC2BAT_b_in = d[8]
_AC2BAT_c_in = d[9]
_BAT2AC_a_out = d[10]
_BAT2AC_b_out = d[11]
_BAT2AC_c_out = d[12]
_P_AC2BAT_DEV = d[13]
_P_BAT2AC_DEV = d[14]
_P_BAT2AC_out = d[15]
_P_AC2BAT_in = d[16]
_t_DEAD = int(round(d[17]))
_SOC_h = d[18]
_P_AC2BAT_min = _AC2BAT_c_in
_P_BAT2AC_min = _BAT2AC_c_out
# Correction factor to avoid over charge and discharge the battery
corr = 0.1
# Initialization of particular variables
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Pr.size)
_th = 0
# Capacity of the battery, conversion from kWh to Wh
_E_BAT *= 1000
# Effiency of the battery in percent
_eta_BAT /= 100
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Calculate the AC power of the battery system from the residual power
# with regard to the dead time of the system control
if T_DEAD:
P_bs = _Pr[t - _t_DEAD]
else:
P_bs = _Pr[t]
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy in Wh that is supplied to or discharged from the storage unit.
E_bs_est = P_bs * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_est > 0 and E_bs_est > (_E_BAT - E_b0):
P_bs = (_E_BAT - E_b0) * 3600 / _dt
# When discharging take the correction factor into account
elif E_bs_est < 0 and np.abs(E_bs_est) > (E_b0):
P_bs = (E_b0 * 3600 / _dt) * (1-corr)
# Adjust the AC power of the battery system due to the stationary
# deviations taking the minimum charging and discharging power into
# account
if P_bs > _P_AC2BAT_min:
P_bs = np.maximum(_P_AC2BAT_min, P_bs + _P_AC2BAT_DEV)
elif P_bs < -_P_BAT2AC_min:
P_bs = np.minimum(-_P_BAT2AC_min, P_bs - _P_BAT2AC_DEV)
else:
P_bs = 0
# Limit the AC power of the battery system to the rated power of the
# battery converter
P_bs = np.maximum(-_P_BAT2AC_out * 1000,
np.minimum(_P_AC2BAT_in * 1000, P_bs))
# Adjust the AC power of the battery system due to the settling time
# (modeled by a first-order time delay element) Hier hat der Schritt vorher eine Null?
# Muss der vorherige Wert mit übergeben werden?
if SETTLING:
if t > 0:
P_bs = _tde * _Pbs[t-1] + _tde * (P_bs - _Pbs[t-1]) * _ftde + P_bs * (not _tde)
else:
P_bs = _tde * _Pbs0 + _tde * (P_bs - _Pbs0) * _ftde + P_bs * (not _tde)
# Decision if the battery should be charged or discharged
if P_bs > 0 and _soc0 < 1 - _th * (1 - _SOC_h):
# The last term th*(1-SOC_h) avoids the alternation between
# charging and standby mode due to the DC power consumption of the
# battery converter when the battery is fully charged. The battery
# will not be recharged until the SOC falls below the SOC-threshold
# (SOC_h) for recharging from PV.
# Normalized AC power of the battery system
p_bs = P_bs / _P_AC2BAT_in / 1000
# DC power of the battery affected by the AC2BAT conversion losses
# of the battery converter
P_bat = np.maximum(
0, P_bs - (_AC2BAT_a_in * p_bs * p_bs + _AC2BAT_b_in * p_bs + _AC2BAT_c_in))
elif P_bs < 0 and _soc0 > 0:
# Normalized AC power of the battery system
p_bs = np.abs(P_bs / _P_BAT2AC_out / 1000)
# DC power of the battery affected by the BAT2AC conversion losses
# of the battery converter
P_bat = P_bs - (_BAT2AC_a_out * p_bs * p_bs +
_BAT2AC_b_out * p_bs + _BAT2AC_c_out)
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Decision if the standby mode is active
if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC and AC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
P_bs = _P_SYS_SOC0_AC
elif P_bat == 0 and _soc0 > 0: # Standby mode in fully charged state
# DC and AC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
P_bs = _P_SYS_SOC1_AC
# Transfer the realized AC power of the battery system and
# the DC power of the battery
_Pbs0 = P_bs
_Pbs[t] = P_bs
_Pbat[t] = P_bat
# Change the energy content of the battery from Ws to Wh conversion
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT)
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation
# between charging and standby mode due to the DC power
# consumption of the battery converter.
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _Pbat, _Pbs, _soc, _soc0, _Pbs0
@nb.jit(nopython=True)
def batmod_ac_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat):
_E_BAT = d[0]
for t in range(_Pr.size):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT * 1000
# Calculate the DC power of the battery from the residual power
P_bat = _Pr[t]
# Decision if the battery should be charged or discharged
if P_bat > 0 and _soc0 < 1: # Battery charging
E_b = E_b0 + P_bat * _dt / 3600 # Change the energy content of the battery
elif P_bat < 0 and _soc0 > 0: # Battery discharging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# No change in the energy content of the battery
E_b = E_b0
# Transfer the realized DC power of the battery
_Pbat[t] = P_bat
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT * 1000)
_soc[t] = _soc0
# Define missing parameters
_Pbs = _Pbat # Realized AC power of the battery system
return _Pbs, _Pbat, _soc0, _soc
@nb.jit(nopython=True)
def batmod_dc(d, _dt, _soc0, _soc, _Pr, _Prpv, _Ppv, _Ppv2bat_in0, _Ppv2bat_in, _Pbat2ac_out0, _Pbat2ac_out, _Ppv2ac_out, _Ppvbs, _Pbat):
"""Performance simulation function for DC-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge in the previous time step
:type soc0: float
:param Pr: residual power
:type Pr: numpy array
:param Prpv: residual power of the PV-system
:type Prpv: numpy array
:param Ppv: PV-power
:type Ppv: numpy array
:param Ppv2bat_in0: AC input power of the battery system in the previous time step
:type Ppv2bat_in0: float
:param Ppv2bat_in: AC input power of the battery system
:type Ppv2bat_in: numpy array
:param Pbat2ac_out0: AC output power of the battery system in the previous time step
:type Pbat2ac_out0: float
:param Pbat2ac_out: AC output power of the battery system
:type Pbat2ac_out: numpy array
:param Ppv2ac_out0: AC output power of the PV inverter in the previous time step
:type Ppv2ac_out0: float
:param Ppv2ac_out: AC output power of the PV inverter
:type Ppv2ac_out: numpy array
:param Ppvbs: AC power from the PV system to the battery system
:type Ppvbs: numpy array
:param Pbat: DC power of the battery
:type Pbat: float
"""
_E_BAT = d[0]
_P_PV2AC_in = d[1]
_P_PV2AC_out = d[2]
_P_PV2BAT_in = d[3]
_P_BAT2AC_out = d[4]
_PV2AC_a_in = d[5]
_PV2AC_b_in = d[6]
_PV2AC_c_in = d[7]
_PV2BAT_a_in = d[8]
_PV2BAT_b_in = d[9]
_BAT2AC_a_out = d[10]
_BAT2AC_b_out = d[11]
_BAT2AC_c_out = d[12]
_eta_BAT = d[13]
_SOC_h = d[14]
_P_PV2BAT_DEV = d[15]
_P_BAT2AC_DEV = d[16]
_t_DEAD = int(round(d[17]))
_t_CONSTANT = d[18]
_P_SYS_SOC1_DC = d[19]
_P_SYS_SOC0_AC = d[20]
_P_SYS_SOC0_DC = d[21]
_P_PV2AC_min = _PV2AC_c_in
# Capacity of the battery, conversion from kWh to Wh
_E_BAT *= 1000
# Effiency of the battery in percent
_eta_BAT /= 100
# Initialization of particular variables
# _P_PV2AC_min = _parameter['PV2AC_c_in'] # Minimum input power of the PV2AC conversion pathway
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Pr.size)
_th = 0
corr = 0.1
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Residual power with regard to the dead time of the system control
if T_DEAD:
P_rpv = _Prpv[t - _t_DEAD]
P_r = _Pr[t - _t_DEAD]
else:
P_rpv = _Prpv[t]
P_r = _Pr[t]
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy that is supplied to or discharged from the storage unit.
E_bs_rpv = P_rpv * _dt / 3600
E_bs_r = P_r * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0):
P_rpv = (_E_BAT - E_b0) * 3600 / _dt
# When discharging take the correction factor into account
elif E_bs_r < 0 and np.abs(E_bs_r) > (E_b0):
P_r = ((E_b0) * 3600 / _dt) * (1-corr)
# Decision if the battery should be charged or discharged
if P_rpv > 0 and _soc0 < 1 - _th * (1 - _SOC_h):
'''
The last term th*(1-SOC_h) avoids the alternation between
charging and standby mode due to the DC power consumption of the
battery converter when the battery is fully charged. The battery
will not be recharged until the SOC falls below the SOC-threshold
(SOC_h) for recharging from PV.
'''
# Charging power
P_pv2bat_in = P_rpv
# Adjust the charging power due to the stationary deviations
P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV)
# Limit the charging power to the maximum charging power
P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000)
# Adjust the charging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_pv2bat_in = _tde * _Ppv2bat_in[(t-1)] + _tde * (
P_pv2bat_in - _Ppv2bat_in[(t-1)]) * _ftde + P_pv2bat_in * (not _tde)
else:
P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \
(P_pv2bat_in - _Ppv2bat_in0) * \
_ftde + P_pv2bat_in * (not _tde)
# Limit the charging power to the current power output of the PV generator
P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t])
# Normalized charging power
ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000
# DC power of the battery affected by the PV2BAT conversion losses
# (the idle losses of the PV2BAT conversion pathway are not taken
# into account)
P_bat = np.maximum(
0, P_pv2bat_in - (_PV2BAT_a_in * ppv2bat**2 + _PV2BAT_b_in * ppv2bat))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] - P_pv2bat_in
# Normalized DC input power of the PV2AC conversion pathway
_ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pv2ac_out = np.maximum(
0, P_pv2ac_in - (_PV2AC_a_in * _ppv2ac**2 + _PV2AC_b_in * _ppv2ac + _PV2AC_c_in))
P_pvbs = P_pv2ac_out
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Ppv2bat_in0 = P_pv2bat_in
_Ppv2bat_in[t] = P_pv2bat_in
elif P_rpv < 0 and _soc0 > 0:
# Discharging power
P_bat2ac_out = P_r * -1
# Adjust the discharging power due to the stationary deviations
P_bat2ac_out = np.maximum(0, P_bat2ac_out + _P_BAT2AC_DEV)
# Adjust the discharging power to the maximum discharging power
P_bat2ac_out = np.minimum(P_bat2ac_out, _P_BAT2AC_out * 1000)
# Adjust the discharging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_bat2ac_out = _tde * _Pbat2ac_out[t-1] + _tde * (
P_bat2ac_out - _Pbat2ac_out[t-1]) * _ftde + P_bat2ac_out * (not _tde)
else:
P_bat2ac_out = _tde * _Pbat2ac_out0 + _tde * \
(P_bat2ac_out - _Pbat2ac_out0) * \
_ftde + P_bat2ac_out * (not _tde)
# Limit the discharging power to the maximum AC power output of the PV-battery system
P_bat2ac_out = np.minimum(
_P_PV2AC_out * 1000 - _Ppv2ac_out[t], P_bat2ac_out)
# Normalized discharging power
ppv2bat = P_bat2ac_out / _P_BAT2AC_out / 1000
# DC power of the battery affected by the BAT2AC conversion losses
# (if the idle losses of the PV2AC conversion pathway are covered by
# the PV generator, the idle losses of the BAT2AC conversion pathway
# are not taken into account)
if _Ppv[t] > _P_PV2AC_min:
P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out *
ppv2bat**2 + _BAT2AC_b_out * ppv2bat))
else:
P_bat = -1 * (P_bat2ac_out + (_BAT2AC_a_out * ppv2bat **
2 + _BAT2AC_b_out * ppv2bat + _BAT2AC_c_out)) + _Ppv[t]
# Realized AC power of the PV-battery system
P_pvbs = _Ppv2ac_out[t] + P_bat2ac_out
# Transfer the final values
_Pbat2ac_out0 = P_bat2ac_out
_Pbat2ac_out[t] = P_bat2ac_out
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Realized AC power of the PV-battery system
P_pvbs = _Ppv2ac_out[t]
# Decision if the standby mode is active
if P_bat == 0 and P_pvbs == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC and AC power consumption of the PV-battery inverter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
P_pvbs = -_P_SYS_SOC0_AC
elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state
# DC power consumption of the PV-battery inverter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
# Transfer the realized AC power of the PV-battery system and the DC power of the battery
_Ppvbs[t] = P_pvbs
_Pbat[t] = P_bat
# Change the energy content of the battery Wx to Wh conversion
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / _E_BAT
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation between charging
# and standby mode due to the DC power consumption of the
# PV-battery inverter
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _Ppv2ac_out, _Ppv2bat_in, _Ppv2bat_in0, _Pbat2ac_out, _Pbat2ac_out0, _Ppvbs, _Pbat, _soc, _soc0
@nb.jit(nopython=True)
def batmod_dc_ideal(d, _dt, _soc0, _soc, _Pr, _Pbat):
_E_BAT = d[0]
for t in range(_Pr.size):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT * 1000
P_bat = _Pr[t]
if P_bat > 0 and _soc0 < 1: # Battery charging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
elif P_bat < 0 and _soc0 > 0: # Battery discharging
# Change the energy content of the battery
E_b = E_b0 + P_bat * _dt / 3600
else: # Neither charging nor discharging of the battery
P_bat = 0
E_b = E_b0
_Pbat[t] = P_bat
_soc0 = E_b / (_E_BAT * 1000)
_soc[t] = _soc0
return _Pbat, _soc, _soc0
@nb.jit(nopython=True)
def batmod_pv(d, _dt, _soc0, _soc, _Ppv, _Pac, _Ppv2bat_in0, _Ppv2bat_in, _Ppv2ac_out, _Pbat2pv_out0, _Pbat2pv_out, _Ppvbs, _Pbat):
"""Performance simulation function for PV-coupled battery systems
:param d: array containing parameters
:type d: numpy array
:param dt: time step width
:type dt: integer
:param soc0: state of charge of the battery in the previous time step
:type soc0: float
:param soc: state of charge of the battery
:type soc: numpy array
:param Pr: residual power
:type Pr: numpy array
:param Ppv: PV-power
:type Ppv: numpy array
:param Pac: AC output power of the PV inverter
:type Pac: numpy array
:param Ppv2bat_in: AC input power of the battery system
:type Ppv2bat_in: numpy array
:param Ppv2bat_in0: AC input power of the battery system in the previous time step
:type Ppv2bat_in0: float
:param Pbat2pv_out0: AC output power of the battery system in the previous time step
:type Pbat2pv_out0: float
:param Pbat2pv_out: AC output power of the battery system
:type Pbat2pv_out: numpy array
:param Ppvbs: AC power from the PV system to the battery system
:type Ppvbs: numpy array
:param Pbat: DC power of the battery
:type Pbat: float
"""
# Initialization of particular variables
_E_BAT = d[0]
_P_PV2AC_in = d[1]
_P_PV2AC_out = d[2]
_P_PV2BAT_in = d[3]
_P_BAT2PV_out = d[4]
_PV2AC_a_in = d[5]
_PV2AC_b_in = d[6]
_PV2AC_c_in = d[7]
_PV2BAT_a_in = d[8]
_PV2BAT_b_in = d[9]
_PV2BAT_c_in = d[10]
_PV2AC_a_out = d[11]
_PV2AC_b_out = d[12]
_PV2AC_c_out = d[13]
_BAT2PV_a_out = d[14]
_BAT2PV_b_out = d[15]
_BAT2PV_c_out = d[16]
_eta_BAT = d[17]
_SOC_h = d[18]
_P_PV2BAT_DEV = d[19]
_P_BAT2AC_DEV = d[20]
_P_SYS_SOC1_DC = d[21]
_P_SYS_SOC0_AC = d[22]
_P_SYS_SOC0_DC = d[23]
_t_DEAD = int(round(d[24]))
_t_CONSTANT = d[25]
# Correction factor to avoid over charge and discharge the battery
corr = 0.1
_P_PV2BAT_min = _PV2BAT_c_in # Minimum DC charging power
_P_BAT2PV_min = _BAT2PV_c_out # Minimum DC discharging power
# Initialization of particular variables
_tde = _t_CONSTANT > 0 # Binary variable to activate the first-order time delay element
# Factor of the first-order time delay element
_ftde = 1 - np.exp(-_dt / _t_CONSTANT)
# First time step with regard to the dead time of the system control
_tstart = np.maximum(2, 1 + _t_DEAD)
_tend = int(_Ppv.size)
_th = 0
_E_BAT *= 1000 # Conversion from W to kW
_eta_BAT /= 100
# Check if the dead or settling time can be ignored and set flags accordingly
if _dt >= (3 * _t_CONSTANT) or _tend == 1:
_tstart = 1
T_DEAD = False
else:
T_DEAD = True
if _dt >= _t_DEAD + 3 * _t_CONSTANT:
SETTLING = False
else:
SETTLING = True
for t in range(_tstart - 1, _tend):
# Energy content of the battery in the previous time step
E_b0 = _soc0 * _E_BAT
# Target AC output power of the PV-battery system to cover the AC power demand
if T_DEAD:
P_pvbs = np.minimum(_Pac[t - _t_DEAD], _P_PV2AC_out * 1000)
else:
P_pvbs = np.minimum(_Pac[t], _P_PV2AC_out * 1000)
# Normalized AC output power of the PV2AC conversion pathway
ppv2ac = P_pvbs / _P_PV2AC_out / 1000
# Target DC input power of the PV2AC conversion pathway
P_pv2ac_in = P_pvbs + (_PV2AC_a_out * ppv2ac **
2 + _PV2AC_b_out * ppv2ac + _PV2AC_c_out)
# Residual power
if T_DEAD:
P_rpv = _Ppv[t - _t_DEAD] - P_pv2ac_in
else:
P_rpv = _Ppv[t] - P_pv2ac_in
# Check if the battery holds enough unused capacity for charging or discharging
# Estimated amount of energy that is supplied to or discharged from the storage unit.
E_bs_rpv = P_rpv * _dt / 3600
# Reduce P_bs to avoid over charging of the battery
if E_bs_rpv > 0 and E_bs_rpv > (_E_BAT - E_b0):
P_rpv = ((_E_BAT - E_b0) * 3600) / _dt
# When charging take the correction factor into account
elif E_bs_rpv < 0 and np.abs(E_bs_rpv) > (E_b0):
P_rpv = ((E_b0) * 3600 / _dt) * (1-corr)
# Decision if the battery should be charged or discharged
if P_rpv > _P_PV2BAT_min and _soc0 < 1 - _th * (1 - _SOC_h):
'''
The last term th*(1-SOC_h) avoids the alternation between
charging and standby mode due to the DC power consumption of the
battery converter when the battery is fully charged. The battery
will not be recharged until the SOC falls below the SOC-threshold
(SOC_h) for recharging from PV.
'''
# Charging power
P_pv2bat_in = P_rpv
# Adjust the charging power due to stationary deviations
P_pv2bat_in = np.maximum(0, P_pv2bat_in + _P_PV2BAT_DEV)
# Limit the charging power to the maximum charging power
P_pv2bat_in = np.minimum(P_pv2bat_in, _P_PV2BAT_in * 1000)
# Adjust the charging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_pv2bat_in = _tde * _Ppv2bat_in[t-1] + _tde * (
P_pv2bat_in - _Ppv2bat_in[t-1]) * _ftde + P_pv2bat_in * (not _tde)
else:
P_pv2bat_in = _tde * _Ppv2bat_in0 + _tde * \
(P_pv2bat_in - _Ppv2bat_in0) * \
_ftde + P_pv2bat_in * (not _tde)
# Limit the charging power to the current power output of the PV generator
P_pv2bat_in = np.minimum(P_pv2bat_in, _Ppv[t])
# Normalized charging power
ppv2bat = P_pv2bat_in / _P_PV2BAT_in / 1000
# DC power of the battery
P_bat = np.maximum(0, P_pv2bat_in - (_PV2BAT_a_in *
ppv2bat**2 + _PV2BAT_b_in * ppv2bat + _PV2BAT_c_in))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] - P_pv2bat_in
# Limit the DC input power of the PV2AC conversion pathway
P_pv2ac_in = np.minimum(P_pv2ac_in, _P_PV2AC_in * 1000)
# Recalculate Ppv(t) with limited PV2AC input power
_Ppv[t] = P_pv2ac_in + P_pv2bat_in
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pv2ac_out = np.maximum(
0, P_pv2ac_in - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
P_pvbs = P_pv2ac_out
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Ppv2bat_in0 = P_pv2bat_in
_Ppv2bat_in[t] = P_pv2bat_in
elif P_rpv < -_P_BAT2PV_min and _soc0 > 0:
# Target discharging power of the battery
P_bat2pv_out = np.abs(P_rpv)
# Adjust the discharging power due to the stationary deviations
P_bat2pv_out = np.maximum(0, P_bat2pv_out + _P_BAT2AC_DEV)
# Adjust the discharging power to the maximum discharging power
P_bat2pv_out = np.minimum(P_bat2pv_out, _P_BAT2PV_out * 1000)
# Adjust the discharging power due to the settling time
# (modeled by a first-order time delay element)
if SETTLING:
if t > 0:
P_bat2pv_out = _tde * _Pbat2pv_out[t-1] + _tde * (P_bat2pv_out - _Pbat2pv_out[t-1]) * _ftde + P_bat2pv_out * (not _tde)
else:
P_bat2pv_out = _tde * _Pbat2pv_out0 + _tde * (P_bat2pv_out - _Pbat2pv_out0) * _ftde + P_bat2pv_out * (not _tde)
# Recalculate Ppv(t) with limited PV2AC input power
_Ppv[t] = np.minimum(_P_PV2AC_in * 1000, _Ppv[t])
# Limit the discharging power to the maximum AC power output of the PV-battery system
P_bat2pv_out = np.minimum(_P_PV2AC_in * 1000 - _Ppv[t], P_bat2pv_out)
# Normalized discharging power
pbat2pv = P_bat2pv_out / _P_BAT2PV_out / 1000
# DC power of the battery affected by the BAT2PV conversion losses
P_bat = -1*(P_bat2pv_out+(_BAT2PV_a_out * pbat2pv**2 + _BAT2PV_b_out * pbat2pv + _BAT2PV_c_out))
# Realized DC input power of the PV2AC conversion pathway
P_pv2ac_in = _Ppv[t] + P_bat2pv_out
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = P_pv2ac_in / _P_PV2AC_in / 1000
# AC power of the PV-battery system
P_pvbs = np.maximum(0, P_pv2ac_in-(_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
P_pv2ac_out = P_pvbs
# Transfer the final values
_Ppv2ac_out[t] = P_pv2ac_out
_Pbat2pv_out0 = P_bat2pv_out
_Pbat2pv_out[t] = P_bat2pv_out
else: # Neither charging nor discharging of the battery
# Set the DC power of the battery to zero
P_bat = 0
# Limit the power output of the PV generator to the maximum input power
# of the PV inverter
_Ppv[t] = np.minimum(_Ppv[t], _P_PV2AC_in * 1000)
# Normalized DC input power of the PV2AC conversion pathway
ppv2ac = _Ppv[t] / _P_PV2AC_in / 1000
# Realized AC power of the PV-battery system
P_pvbs = np.maximum(0, _Ppv[t] - (_PV2AC_a_in * ppv2ac**2 + _PV2AC_b_in * ppv2ac + _PV2AC_c_in))
# Transfer the final values
_Ppv2ac_out[t] = P_pvbs
# Decision if the standby mode is active
if P_bat == 0 and _soc0 <= 0: # Standby mode in discharged state
# DC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC0_DC)
if P_pvbs == 0:
P_pvbs = -_P_SYS_SOC0_AC
elif P_bat == 0 and P_pvbs > 0 and _soc0 > 0: # Standby mode in fully charged state
# DC power consumption of the battery converter
P_bat = -np.maximum(0, _P_SYS_SOC1_DC)
# Transfer the realized AC power of the battery system and
# the DC power of the battery
_Ppvbs[t] = P_pvbs
_Pbat[t] = P_bat
# Change the energy content of the battery Wx to Wh conversio
if P_bat > 0:
E_b = E_b0 + P_bat * np.sqrt(_eta_BAT) * _dt / 3600
elif P_bat < 0:
E_b = E_b0 + P_bat / np.sqrt(_eta_BAT) * _dt / 3600
else:
E_b = E_b0
# Calculate the state of charge of the battery
_soc0 = E_b / (_E_BAT)
_soc[t] = _soc0
# Adjust the hysteresis threshold to avoid alternation
# between charging and standby mode due to the DC power
# consumption of the battery converter.
if _th and _soc[t] > _SOC_h or _soc[t] > 1:
_th = True
else:
_th = False
return _soc, _soc0, _Ppv, _Ppvbs, _Pbat, _Ppv2ac_out, _Pbat2pv_out, _Ppv2bat_in
def bat_res_mod(_parameter, _Pl, _Ppv, _Pbat, _dt, *args):
"""Function for calculating energy sums
:param _parameter: parameter of the system
:type _parameter: dict
:param _Pl: load power
:type _Pl: numpy array
:param _Ppv: output power of the PV generator
:type _Ppv: numpy array
:param _Pbat: DC power of the battery
:type _Pbat: numpy array
:param _dt: time step width
:type _dt: integer
:return: energy sums
:rtype: dict
"""
_E = dict()
if _parameter['Top'] == 'AC': # AC-coupled systems
_Ppvs = args[0] # AC output power of the PV system
_Pbs = args[1] # AC power of the battery system
# Additional power consumption of the other system components
_Pperi = args[2]
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
_Ppv2ac = args[0] # AC output power of the PV2AC conversion pathway
_Ppv2bat_in = args[1] # Input power of the PV2BAT conversion pathway
_Ppvbs = args[2] # AC power of the PV-battery system
# Additional power consumption of the other system components
_Pperi = args[3]
_Ppv2ac_in = _Ppv - _Ppv2bat_in # Input power of the PV2AC conversion pathway
# Total load including the power consumption of the other system components
_Plt = _Pl + _Pperi
# DC input power of the battery (charged)
_Pbatin = np.maximum(0, _Pbat)
# DC output power of the battery (discharged)
_Pbatout = np.minimum(0, _Pbat)
# Maximum PV feed-in power
_P_ac2g_max = _parameter['p_ac2g_max'] * _parameter['P_PV'] * 1000
if _parameter['Top'] == 'AC': # AC-coupled systems
# Residual power without curtailment
_Pr = _Ppvs - _Plt
# AC input power of the battery system
_Pac2bs = np.maximum(0, _Pbs)
# AC output power of the battery system
_Pbs2ac = np.minimum(0, _Pbs)
# Negative residual power (residual load demand)
_Prn = np.minimum(0, _Pr)
# Positive residual power (surplus PV power)
_Prp = np.maximum(0, _Pr)
# Direct use of PV power by the load
_Ppvs2l = np.minimum(_Ppvs, _Plt)
# PV charging power
_Ppvs2bs = np.minimum(_Prp, _Pac2bs)
# Grid charging power
_Pg2bs = np.maximum(_Pac2bs - _Prp, 0)
# Grid supply power of the load
_Pg2l = np.minimum(_Prn - _Pbs2ac, 0)
# Battery supply power of the load
_Pbs2l = np.maximum(_Prn, _Pbs2ac)
# Battery feed-in power
_Pbs2g = np.minimum(_Pbs2ac - _Prn, 0)
# PV feed-in power including curtailment
_Ppvs2g = np.minimum(np.maximum(_Prp - _Pac2bs, 0), _P_ac2g_max)
# Power demand from the grid
_Pg2ac = _Pg2l - _Pg2bs
# Feed-in power to the grid
_Pac2g = _Ppvs2g - _Pbs2g
# Grid power
_Pg = _Pac2g + _Pg2ac
# Curtailed PV power (AC output power)
_Pct = np.maximum(_Prp - _Pac2bs, 0) - _Ppvs2g
# AC output power of the PV system including curtailment
_Ppvs = _Ppvs - _Pct
# Residual power including curtailment
_Pr = _Ppvs - _Plt
# Index for PV curtailment
_idx = np.where(_Pct > 0)[0]
for i in range(len(_idx)):
_tct = _idx[i]
# Normalized output power of the PV inverter
_ppvinvout = _Ppvs[_tct] / _parameter['P_PV2AC_out'] / 1000
# DC output power of the PV generator taking into account the
# conversion and curtailment losses
_Ppv[_tct] = _Ppvs[_tct] + (_parameter['PV2AC_a_out'] * _ppvinvout **
2 + _parameter['PV2AC_b_out'] * _ppvinvout + _parameter['PV2AC_c_out'])
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
# Grid power demand of the PV-battery system
_Pg2pvbs = np.minimum(0, _Ppvbs)
# AC input power of the PV-battery system
_Pac2pvbs = _Pg2pvbs
# AC output power of the PV-battery system
_Ppvbs2ac = np.maximum(0, _Ppvbs)
# Load supply power by the PV-battery system
_Ppvbs2l = np.minimum(_Plt, _Ppvbs2ac)
# Load supply power by the grid
_Pg2l = _Plt - _Ppvbs2l
# Direct use of PV power by the load
_Ppv2l = np.minimum(_Plt, _Ppv2ac)
# PV feed-in power including curtailment
_Ppv2g = np.minimum(_Ppv2ac - _Ppv2l, _P_ac2g_max)
# Curtailed PV power (AC output power)
_Pct = _Ppv2ac - _Ppv2l - _Ppv2g
if np.sum(_Pct) > 0:
# Power of the PV-battery system including curtailment
_Ppvbs = _Ppvbs - _Pct
# AC output power of the PV-battery system including curtailment
_Ppvbs2ac = np.maximum(0, _Ppvbs)
# AC output power of the PV2AC conversion pathway including curtailment
_Ppv2ac = _Ppv2ac - _Pct
# Index for PV curtailment
_idx = np.where(_Pct > 0)[0]
for i in range(len(_idx)):
_tct = _idx[i]
# Specific AC output power of the PV2AC conversion pathway
_ppv2ac = _Ppv2ac[_tct] / _parameter['P_PV2AC_out'] / 1000
# DC input power of the PV2AC conversion pathway including curtailment
_Ppv2ac_in[_tct] = _Ppv2ac[_tct] + (_parameter['PV2AC_a_out'] * _ppv2ac **
2 + _parameter['PV2AC_b_out'] * _ppv2ac + _parameter['PV2AC_c_out'])
# DC output power of the PV generator including curtailment
_Ppv = _Ppv2ac_in + _Ppv2bat_in
# Grid power including curtailment
_Pg = _Ppvbs-_Plt
# Feed-in power to the grid including curtailment
_Pac2g = np.maximum(0, _Pg)
# Power demand from the grid
_Pg2ac = np.minimum(0, _Pg)
# Energy sums in MWH
# Electrical demand including the energy consumption of the other system components
_E['El'] = np.sum(np.abs(_Plt)) * _dt / 3.6e9
# DC output of the PV generator including curtailment
_E['Epv'] = np.sum(np.abs(_Ppv)) * _dt / 3.6e9
# DC input of the battery (charged)
_E['Ebatin'] = np.sum(np.abs(_Pbatin)) * _dt / 3.6e9
# DC output of the battery (discharged)
_E['Ebatout'] = np.sum(np.abs(_Pbatout)) * _dt / 3.6e9
# Grid feed-in
_E['Eac2g'] = np.sum(np.abs(_Pac2g)) * _dt / 3.6e9
# Grid demand
_E['Eg2ac'] = np.sum(np.abs(_Pg2ac)) * _dt / 3.6e9
# Load supply by the grid
_E['Eg2l'] = np.sum(np.abs(_Pg2l)) * _dt / 3.6e9
# Demand of the other system components
_E['Eperi'] = np.sum(np.abs(_Pperi)) * _dt / 3.6e9
# Curtailed PV energy
_E['Ect'] = np.sum(np.abs(_Pct)) * _dt / 3.6e9
if _parameter['Top'] == 'AC': # AC-coupled systems
# AC output of the PV system including curtailment
_E['Epvs'] = np.sum(np.abs(_Ppvs)) * _dt / 3.6e9
# AC input of the battery system
_E['Eac2bs'] = np.sum(np.abs(_Pac2bs)) * _dt / 3.6e9
# AC output of the battery system
_E['Ebs2ac'] = np.sum(np.abs(_Pbs2ac)) * _dt / 3.6e9
# Direct use of PV energy
_E['Epvs2l'] = np.sum(np.abs(_Ppvs2l)) * _dt / 3.6e9
# PV charging
_E['Epvs2bs'] = np.sum(np.abs(_Ppvs2bs)) * _dt / 3.6e9
# Grid charging
_E['Eg2bs'] = np.sum(np.abs(_Pg2bs)) * _dt / 3.6e9
# PV feed-in
_E['Epvs2g'] = np.sum(np.abs(_Ppvs2g)) * _dt / 3.6e9
# Load supply by the battery system
_E['Ebs2l'] = np.sum(np.abs(_Pbs2l)) * _dt / 3.6e9
# Battery feed-in
_E['Ebs2g'] = np.sum(np.abs(_Pbs2g)) * _dt / 3.6e9
elif _parameter['Top'] == 'DC' or _parameter['Top'] == 'PV': # DC- and PV-coupled systems
# Grid demand of the PV-battery system
_E['Eg2pvbs'] = np.sum(np.abs(_Pg2pvbs)) * _dt / 3.6e9
# AC input of the PV-battery system
_E['Eac2pvbs'] = np.sum(np.abs(_Pac2pvbs)) * _dt / 3.6e9
# AC output of the PV-battery system
_E['Epvbs2ac'] = np.sum(np.abs(_Ppvbs2ac)) * _dt / 3.6e9
# Load supply by the PV-battery system
_E['Epvbs2l'] = np.sum(np.abs(_Ppvbs2l)) * _dt / 3.6e9
return _E
def bat_res_mod_ideal(_parameter, _Pl, _Ppv, _Pbat, _dt, *args):
E = dict() # Dictionary to store energy sums
if _parameter['Top'] == 'AC':
Ppvs = args[0] # AC output power of the PV system
Pbs = args[1] # AC power of the battery system
Pperi = args[2] # Additional power consumption of the other system components
elif _parameter['Top'] == 'DC':
Ppv2ac = args[0]
Ppv2bat_in = args[1]
Ppvbs = args[2]
Pperi = args[3]
Ppv2ac_in = _Ppv - Ppv2bat_in
# Additional power consumption of the other system components
Pperi = np.zeros_like(_Ppv)
# Total load including the power consumption of the other system components
Plt = _Pl
# DC input power of the battery (charged)
Pbatin = np.maximum(0, _Pbat)
# DC output power of the battery (discharged)
Pbatout = np.minimum(0, _Pbat)
if _parameter['Top'] == 'AC':
# Grid power
Pg = Ppvs - _Pl - Pbs
# Residual power
Pr = Ppvs - Plt
# AC input power of the battery system
Pac2bs = np.maximum(0, Pbs)
# AC output power of the battery system
Pbs2ac = np.minimum(0, Pbs)
# Negative residual power (residual load demand)
Prn = np.minimum(0, Pr)
# Positive residual power (surplus PV power)
Prp = np.maximum(0, Pr)
# Direct use of PV power by the load
Ppvs2l = np.minimum(Ppvs, Plt)
# PV charging power
Ppvs2bs=np.minimum(Prp, Pac2bs)
# Grid charging power
Pg2bs=np.maximum(Pac2bs - Prp, 0)
# Grid supply power of the load
Pg2l=np.minimum(Prn - Pbs2ac, 0)
# Battery supply power of the load
Pbs2l=np.maximum(Prn, Pbs2ac)
# Battery feed-in power
Pbs2g=np.minimum(Pbs2ac - Prn, 0)
# PV feed-in power
Ppvs2g=np.maximum(Prp - Pac2bs, 0)
elif _parameter['Top'] == 'DC':
# Grid power
Pg = Ppvbs - _Pl
# Grid power demand of the PV-battery system
Pg2pvbs = np.minimum(0, Ppvbs)
# AC input power of the PV-battery system
Pac2pvbs = Pg2pvbs
# AC output power of the PV-battery system
Ppvbs2ac = np.maximum(0, Ppvbs)
# Load supply power by the PV-battery system
Ppvbs2l = np.minimum(_Pl, Ppvbs2ac)
# Load supply power by the grid
Pg2l = (Plt - Ppvbs2l)
# Curtailed PV power (AC output power)
Pct = np.zeros_like(_Ppv)
# Power demand from the grid
Pg2ac = np.minimum(0, Pg)
# Feed-in power to the grid
Pac2g=np.maximum(0, Pg)
# Energy sums
# Electrical demand including the energy consumption of the other system components
E['El'] = np.sum(np.abs(Plt)) / 3.6e9
# DC output of the PV generator including curtailment
E['Epv'] = np.sum(np.abs(_Ppv)) / 3.6e9
# DC input of the battery (charged)
E['Ebatin'] = np.sum(np.abs(Pbatin)) / 3.6e9
# DC output of the battery (discharged)
E['Ebatout'] = np.sum(np.abs(Pbatout)) / 3.6e9
# Grid feed-in
E['Eac2g'] = np.sum(np.abs(Pac2g)) / 3.6e9
# Grid demand
E['Eg2ac'] = np.sum(np.abs(Pg2ac)) / 3.6e9
# Load supply by the grid
E['Eg2l'] = np.sum(np.abs(Pg2l)) / 3.6e9
# Demand of the other system components
E['Eperi'] = np.sum(np.abs(Pperi)) / 3.6e9
# Curtailed PV energy
E['Ect'] = np.sum(np.abs(Pct)) / 3.6e9
if _parameter['Top'] == 'AC':
# AC output of the PV system including curtailment
E['Epvs']=np.sum(np.abs(Ppvs)) / 3.6e9
# AC input of the battery system
E['Eac2bs']=np.sum(np.abs(Pac2bs)) / 3.6e9
# AC output of the battery system
E['Ebs2ac']=np.sum(np.abs(Pbs2ac)) / 3.6e9
# Direct use of PV energy
E['Epvs2l']=np.sum(np.abs(Ppvs2l)) / 3.6e9
# PV charging
E['Epvs2bs']=np.sum(np.abs(Ppvs2bs)) / 3.6e9
# Grid charging
E['Eg2bs']=np.sum(np.abs(Pg2bs)) / 3.6e9
# PV feed-in
E['Epvs2g']=np.sum(np.abs(Ppvs2g)) / 3.6e9
# Load supply by the battery system
E['Ebs2l']=np.sum(np.abs(Pbs2l)) / 3.6e9
# Battery feed-in
E['Ebs2g']=np.sum(np.abs(Pbs2g)) / 3.6e9
elif _parameter['Top'] == 'DC':
# Grid demand of the PV-battery system
E['Eg2pvbs'] = np.sum(np.abs(Pg2pvbs)) / 3.6e9
# AC input of the PV-battery system
E['Eac2pvbs'] = np.sum(np.abs(Pac2pvbs)) / 3.6e9
# AC output of the PV-battery system
E['Epvbs2ac'] = np.sum(np.abs(Ppvbs2ac)) / 3.6e9
# Load supply by the PV-battery system
E['Epvbs2l'] = np.sum(np.abs(Ppvbs2l)) / 3.6e9
return E
def load_parameter(fname, col_name):
"""Loads system parameter from excel file
:param fname: Path to the excel file
:type fname: string
:param col_name: Column to read data from
:type col_name: string
:return: Dictionary holding parameters from the Excel sheet
:rtype: dict
"""
wb = load_workbook(fname, data_only=True)
ws = wb['Data'] # Load Data sheet of excel file
# read keys and values from Excel sheet
keys = (c.value for c in ws['E'][1:])
values = (c.value if c.value != 'ns' else None for c in ws[col_name][1:])
parameter = dict(zip(keys, values))
# deletes entries where key is None
del parameter[None]
# Assign specific parameters
parameter['P_PV2AC_out_PVINV'] = ws[col_name][15].value
parameter['P_PV2AC_out'] = ws[col_name][24].value
parameter['P_AC2BAT_in_DCC'] = ws[col_name][25].value
parameter['P_AC2BAT_in'] = ws[col_name][26].value
parameter['P_BAT2AC_out'] = ws[col_name][27].value
parameter['P_BAT2AC_out_DCC'] = ws[col_name][28].value
# Set refrence case values to boolean
if parameter['ref_1'] == 'yes':
parameter['ref_1'] = True
elif parameter['ref_1'] == 'no':
parameter['ref_1'] = False
if parameter['ref_2'] == 'yes':
parameter['ref_2'] = True
elif parameter['ref_2'] == 'no':
parameter['ref_2'] = False
# Specific parameters of DC-coupled systems
if parameter['Top'] == 'DC':
parameter['P_AC2BAT_in'] = parameter['P_AC2BAT_in_DCC'] # Nominal charging power (AC) in kW
parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']
# Specific parameters of PV inverters and AC-coupled systems
if parameter['Top'] == 'PVINV' or parameter['Top'] == 'AC' and parameter['P_PV2AC_out_PVINV'] is not None:
parameter['P_PV2AC_out'] = parameter['P_PV2AC_out_PVINV']
# Specific parameters of PV-coupled systems
if parameter['Top'] == 'PV':
parameter['P_BAT2PV_in'] = parameter['P_BAT2AC_in']
parameter['P_BAT2AC_out'] = parameter['P_BAT2AC_out_DCC']
# replace 'ns', 'o' and 'c' entries to None
for key, value in parameter.items():
if value == 'ns' or value == 'o' or value == 'c' or value == ' ':
parameter[key] = None
# Convert to kW
convert_to_kw = ['P_PV2AC_in', 'P_PV2AC_out_PVINV','P_PV2AC_out','P_AC2BAT_in_DCC','P_AC2BAT_in','P_BAT2AC_out',
'P_BAT2AC_out_DCC','P_PV2BAT_in','P_BAT2PV_out','P_PV2BAT_out','P_BAT2AC_in']
for par in convert_to_kw:
if parameter[par]:
parameter[par] /= 1000
return parameter
def eta2abc(parameter):
"""Function to calculate the parameters of the power loss functions (quadratic equations) from the path efficiencies
:param parameter: Holds parameters of the system
:type parameter: dict
:return: Dictionary holding parameters from the Excel sheet
:rtype: dict
"""
# PV2AC conversion pathway TODO
if parameter['Top'] == 'DC' or parameter['Top'] == 'PVINV' or parameter['Top'] == 'PV' and parameter['P_PV2AC_out'] is not None or parameter['Top'] == 'AC' and parameter['P_PV2AC_out'] is not None:
# Create variables for the sampling points and corresponding efficiencies TODO
p_pv2ac = np.fromiter((value for key, value in parameter.items() if 'p_PV2AC_' in key and value is not None), float)
eta_pv2ac = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_PV2AC_' in key and value is not None), float)
# Absolute input and output power in W
p_pv2ac_out = parameter['P_PV2AC_out'] * p_pv2ac * 1000
p_pv2ac_in = p_pv2ac_out / eta_pv2ac
# Absolute power loss in W
P_l_pv2ac_in = (1 - eta_pv2ac) * p_pv2ac_in
P_l_pv2ac_out = (1 / eta_pv2ac - 1) * p_pv2ac_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_pv2ac_in / parameter['P_PV2AC_in'] / 1000, P_l_pv2ac_in, 2)
parameter['PV2AC_a_in'] = p[0]
parameter['PV2AC_b_in'] = p[1]
parameter['PV2AC_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_pv2ac, P_l_pv2ac_out, 2)
parameter['PV2AC_a_out'] = p[0]
parameter['PV2AC_b_out'] = p[1]
parameter['PV2AC_c_out'] = p[2]
# PV2BAT conversion pathway
if parameter['Top'] == 'DC' or parameter['Top'] == 'PV':
# Create variables for the sampling points and corresponding efficiencies
p_pv2bat = np.array([value for key, value in parameter.items() if 'p_PV2BAT_' in key])
eta_pv2bat = np.array([value / 100 for key, value in parameter.items() if 'eta_PV2BAT_' in key])
# Create missing variables
# Nominal input power of the PV2BAT conversion pathway of DC-coupled systems
if parameter['P_PV2BAT_in'] is None:
parameter['P_PV2BAT_in'] = parameter['P_PV2BAT_out'] / (parameter['eta_PV2BAT_100'] / 100)
# Absolute input and output power in W
p_pv2bat_out = parameter['P_PV2BAT_out'] * p_pv2bat * 1000
p_pv2bat_in = p_pv2bat_out / eta_pv2bat
# Absolute power loss in W
P_l_pv2bat_in = (1 - eta_pv2bat) * p_pv2bat_in
P_l_pv2bat_out = (1 / eta_pv2bat - 1) * p_pv2bat_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_pv2bat_in / parameter['P_PV2BAT_in'] / 1000, P_l_pv2bat_in, 2)
parameter['PV2BAT_a_in'] = p[0]
parameter['PV2BAT_b_in'] = p[1]
parameter['PV2BAT_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_pv2bat, P_l_pv2bat_out, 2)
parameter['PV2BAT_a_out'] = p[0]
parameter['PV2BAT_b_out'] = p[1]
parameter['PV2BAT_c_out'] = p[2]
# AC2BAT conversion pathway
if parameter['Top'] == 'AC' or parameter['Top'] == 'DC' and parameter['P_AC2BAT_in'] is not None:
# Create variables for the sampling points and corresponding efficiencies TODO
p_ac2bat = np.fromiter((value for key, value in parameter.items() if 'p_AC2BAT_' in key), float)
eta_ac2bat = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_AC2BAT_' in key), float)
# Absolute input and output power in W
p_ac2bat_out = parameter['P_PV2BAT_out'] * p_ac2bat * 1000
p_ac2bat_in = p_ac2bat_out / eta_ac2bat
# Absolute power loss in W
P_l_ac2bat_in = (1 - eta_ac2bat) * p_ac2bat_in
P_l_ac2bat_out = (1 / eta_ac2bat - 1) * p_ac2bat_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_ac2bat_in / parameter['P_AC2BAT_in'] / 1000, P_l_ac2bat_in, 2)
parameter['AC2BAT_a_in'] = p[0]
parameter['AC2BAT_b_in'] = p[1]
parameter['AC2BAT_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_ac2bat, P_l_ac2bat_out, 2)
parameter['AC2BAT_a_out'] = p[0]
parameter['AC2BAT_b_out'] = p[1]
parameter['AC2BAT_c_out'] = p[2]
# BAT2AC conversion pathway
if parameter['Top'] =='AC' or parameter['Top'] =='DC' or parameter['Top'] =='PV' and parameter['P_BAT2AC_out'] is not None:
# Create variables for the sampling points and corresponding efficiencies TODO
p_bat2ac = np.fromiter((value for key, value in parameter.items() if 'p_BAT2AC_' in key), float)
eta_bat2ac = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_BAT2AC_' in key), float)
# Absolute input and output power in W
p_bat2ac_out = parameter['P_BAT2AC_out'] * p_bat2ac * 1000
p_bat2ac_in = p_bat2ac_out / eta_bat2ac
# Absolute power loss in W
P_l_bat2ac_in = (1 - eta_bat2ac) * p_bat2ac_in
P_l_bat2ac_out = (1 / eta_bat2ac - 1) * p_bat2ac_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power
p = np.polyfit(p_bat2ac_in / parameter['P_BAT2AC_in'] / 1000, P_l_bat2ac_in, 2)
parameter['BAT2AC_a_in'] = p[0]
parameter['BAT2AC_b_in'] = p[1]
parameter['BAT2AC_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_bat2ac, P_l_bat2ac_out, 2)
parameter['BAT2AC_a_out'] = p[0]
parameter['BAT2AC_b_out'] = p[1]
parameter['BAT2AC_c_out'] = p[2]
# BAT2PV conversion pathway
if parameter['Top'] =='PV':
# Create variables for the sampling points and corresponding efficiencies TODO
p_bat2pv = np.fromiter((value for key, value in parameter.items() if 'p_BAT2PV_' in key), float)
eta_bat2pv = np.fromiter((value / 100 for key, value in parameter.items() if 'eta_BAT2PV_' in key), float)
# Absolute input and output power in W
p_bat2pv_out = parameter['P_BAT2PV_out'] * p_bat2pv * 1000
p_bat2pv_in = p_bat2pv_out / eta_bat2pv
# Absolute power loss in W
P_l_bat2pv_in = (1 - eta_bat2pv) * p_bat2pv_in
P_l_bat2pv_out = (1 / eta_bat2pv - 1) * p_bat2pv_out
# Polynomial curve fitting parameters of the power loss functions in W
# Based on input power TODO
p = np.polyfit(p_bat2pv_in / parameter['P_BAT2AC_in'] / 1000, P_l_bat2pv_in, 2)
parameter['BAT2PV_a_in'] = p[0]
parameter['BAT2PV_b_in'] = p[1]
parameter['BAT2PV_c_in'] = p[2]
# Based on output power
p = np.polyfit(p_bat2pv, P_l_bat2pv_out, 2)
parameter['BAT2PV_a_out'] = p[0]
parameter['BAT2PV_b_out'] = p[1]
parameter['BAT2PV_c_out'] = p[2]
# Additional parameters
# Mean battery capacity in kWh
try:
parameter['E_BAT'] = (parameter['E_BAT_usable'] / parameter['eta_BAT'] * 100 + parameter['E_BAT_usable']) / 2
except:
parameter['E_BAT'] = None
# Mean stationary deviation of the charging power in W
try:
parameter['P_PV2BAT_DEV'] = parameter['P_PV2BAT_DEV_IMPORT'] - parameter['P_PV2BAT_DEV_EXPORT']
except:
parameter['P_PV2BAT_DEV'] = None
if parameter['Top'] == 'AC':
parameter['P_AC2BAT_DEV'] = parameter['P_PV2BAT_DEV']
# Mean stationary deviation of the discharging power in W
try:
parameter['P_BAT2AC_DEV'] = parameter['P_BAT2AC_DEV_EXPORT'] - parameter['P_BAT2AC_DEV_IMPORT']
except:
parameter['P_BAT2AC_DEV'] = None
# Time constant for the first-order time delay element in s
try:
parameter['t_CONSTANT'] = (parameter['t_SETTLING'] - round(parameter['t_DEAD'])) / 3
except:
parameter['t_CONSTANT'] = None
# Hysteresis threshold for the recharging of the battery
parameter['SOC_h'] = 0.98
# Feed-in power limit in kW/kWp
parameter['p_ac2g_max'] = 0.7
return parameter
def load_ref_case(fname, name):
"""Loads PV power or Load from the reference cases
:param fname: Path to mat file
:type fname: string
:param name: Identifier for PV Power or Load
:type name: string
:return: Returns PV power or load from the reference case
:rtype: numpy array
"""
with open(fname, 'rb') as f:
a = np.load(f)
data = a[name]
return data
def resample_data_frame(df):
"""Function for resampling data frames
:param df: data frame
:type df: pandas data frame
:return: data frame
:rtype: pandas data frame
"""
df_rs = df.resample('15min').mean()
return df_rs
def transform_dict_to_array(parameter):
"""Function for transforming a dict to an numpy array
:param parameter: dict of system parameters
:type parameter: dict
:return: array of system parameters
:rtype: numpy array
"""
if parameter['Top'] == 'AC':
d = np.array(parameter['E_BAT']) # 0
d = np.append(d, parameter['eta_BAT']) # 1
d = np.append(d, parameter['t_CONSTANT']) # 2
d = np.append(d, parameter['P_SYS_SOC0_DC']) # 3
d = np.append(d, parameter['P_SYS_SOC0_AC']) # 4
d = np.append(d, parameter['P_SYS_SOC1_DC']) # 5
d = np.append(d, parameter['P_SYS_SOC1_AC']) # 6
d = np.append(d, parameter['AC2BAT_a_in']) # 7
d = np.append(d, parameter['AC2BAT_b_in']) # 8
d = np.append(d, parameter['AC2BAT_c_in']) # 9
d = np.append(d, parameter['BAT2AC_a_out']) # 10
d = np.append(d, parameter['BAT2AC_b_out']) # 11
d = np.append(d, parameter['BAT2AC_c_out']) # 12
d = np.append(d, parameter['P_AC2BAT_DEV']) # 13
d = np.append(d, parameter['P_BAT2AC_DEV']) # 14
d = np.append(d, parameter['P_BAT2AC_out']) # 15
d = np.append(d, parameter['P_AC2BAT_in']) # 16
d = np.append(d, parameter['t_DEAD']) # 17
d = np.append(d, parameter['SOC_h']) # 18
if parameter['Top'] == 'DC':
d = | np.array(parameter['E_BAT']) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch import Tensor
import numpy as np
from collections import OrderedDict
# https://discuss.pytorch.org/t/torch-round-gradient/28628/6
class Round_fn(torch.autograd.function.InplaceFunction):
@staticmethod
def forward(ctx, input):
ctx.input = input
return torch.round(input)
@staticmethod
def backward(ctx, grad_output):
grad_input = grad_output.clone()
return grad_input
def softmax_init(bits):
degree = 4
theta = (bits ** degree)/(bits ** degree).sum
return theta
"""
@inproceedings{
esser2020learned,
title={LEARNED STEP SIZE QUANTIZATION},
author={<NAME> and <NAME> and <NAME> and <NAME> and <NAME>},
booktitle={International Conference on Learning Representations},
year={2020},
url={https://openreview.net/forum?id=rkgO66VKDS}
}
"""
def grad_scale(x, scale):
yOut = x
yGrad = x * scale
return (yOut-yGrad).detach() + yGrad
class Q_ReLU(nn.Module):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU, self).__init__()
self.bits = Parameter(Tensor([32]))
self.act_func = act_func
self.inplace = inplace
self.a = Parameter(Tensor(1))
self.c = Parameter(Tensor(1))
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def initialize_qonly(self, offset, diff):
self.a.data.fill_(np.log(np.exp(offset + diff)-1))
self.c.data.fill_(np.log(np.exp(offset + diff)-1))
def forward(self, x):
if self.act_func:
x = F.relu(x, self.inplace)
if len(self.bits)==1 and self.bits[0]==32:
return x
else:
a = F.softplus(self.a)
c = F.softplus(self.c)
nlvs = torch.pow(2, self.bits) # soft forward
#nlvs = torch.round(bits ** 2) # hard forward
x = F.hardtanh(x / a, 0, 1)
x_bar = Round_fn.apply(x.mul(nlvs-1)).div_(nlvs-1) * c
#x_bar = RoundQuant.apply(x, nlvs) * c
return x_bar
class Q_ReLU6(Q_ReLU):
def __init__(self, act_func=True, inplace=False):
super(Q_ReLU6, self).__init__(act_func, inplace)
def initialize(self, bits, offset, diff):
self.bits = Parameter(Tensor(bits), requires_grad=True)
self.n_lvs = 2 ** self.bits
self.a = Parameter(Tensor(len(self.bits)))
self.c = Parameter(Tensor(len(self.bits)))
if offset + diff > 6:
self.a.data.fill_(np.log(np.exp(6)-1))
self.c.data.fill_(np.log(np.exp(6)-1))
else:
self.a.data.fill_(np.log( | np.exp(offset + diff) | numpy.exp |
import numpy as np
import pandas as pd
import threading
import time
import pickle
import tsfresh
from psutil import cpu_percent
from tsfresh import extract_features
from tsfresh import select_features
from tsfresh.utilities.dataframe_functions import impute
from sklearn.decomposition import PCA
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import scipy as sp
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler,normalize
from scipy import io
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial.distance import pdist, cdist, squareform
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.metrics import recall_score, f1_score, precision_score
from tsfresh.feature_extraction import extract_features, ComprehensiveFCParameters
from tsfresh.feature_extraction.settings import from_columns
from tsfresh.feature_extraction import feature_calculators
from tsfresh.feature_extraction import EfficientFCParameters
import os
import glob
from tsfresh.feature_extraction import extract_features, EfficientFCParameters
def Recovery (DataName): #Recovery function
#Changing Work Folder
add_path1 = "/PCA_Analyses/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd ()
working_path = os.getcwd() + '/Model'
PCA_Analyses_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
if DataName == 'D_S_parameters':
try:
# Now change to Kernel directory
os.chdir( Kernel_path )
Final_Target = np.genfromtxt('FinalTarget.csv', delimiter = ',')
# Now change to Recovery directory
os.chdir( Recovery_path )
P_N_groups = int(np.load('M_N_groups.npy'))
Output_Id = int(np.load('ID.npy'))
P_N_Ids = int(np.load('N_IDs.npy'))
# Now change to base directory
os.chdir( base_path )
Output = {'FinalTarget': Final_Target,
'M_N_groups': P_N_groups,
'ID': Output_Id,
'N_IDs': P_N_Ids}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("D_S_parameters Recovered!")
return Output
except:
print("D_S_parameters not recovered =(" + '\033[0m')
elif DataName == 'ExtractedNames':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
extracted_names = np.load('extracted_names.npy')
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("ExtractedNames recovered!")
return extracted_names
except:
print('\033[93m' + "ExtractedNames not recovered =(" + '\033[0m')
elif DataName == 'SelectedFeatures':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
Output_Id = int(np.load('ID.npy'))
# Now change to Kernel directory
os.chdir( Kernel_path )
features_filtered_1 = pd.read_csv('features_filtered_' + str(Output_Id) + '.csv')
# Now change to base directory
os.chdir( base_path )
Output = {'FeaturesFiltered': features_filtered_1,
'ID': Output_Id}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("SelectedFeatures recovered!")
return Output
except:
print('\033[93m' + "SelectedFeatures not recovered =(" + '\033[0m')
elif DataName == 'ReducedFeatures':
try:
# Now change to Recovery directory
os.chdir( Recovery_path )
Output_Id = int(np.load('ID.npy'))
# Now change to PCA Analyses directory
os.chdir( PCA_Analyses_path )
features_reduzidas = np.genfromtxt("features_reduzidas_" + str(Output_Id) + ".csv", delimiter=',')
# Now change to base directory
os.chdir( base_path )
Output = {'ReducedFeatures': features_reduzidas,
'ID': Output_Id}
#retval = os.getcwd()
#print ("Final working directory %s" % retval)
print("ReducedFeatures recovered!")
return Output
except:
print('\033[93m' + "ReducedFeatures not recovered =(" + '\033[0m')
elif DataName == 'SODA_parameters_processing_parameters':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
Output_Id = int(np.load('ID.npy'))
processing_parameters = np.load(('processing_parameters.npy'), allow_pickle=True)
processing_parameters = processing_parameters.tolist()
distances = np.load(('distances.npy'), allow_pickle=True)
distances = distances.tolist()
min_granularity = np.load('Min_g.npy')
max_granularity = np.load('Max_g.npy')
pace = np.load('Pace.npy')
Output = {'Distances': distances,
'Min_g': min_granularity,
'Max_g': max_granularity,
'Pace': pace,
'ID': Output_Id}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("SODA_parameters_processing_parameters recovered!")
return Output, processing_parameters
except:
print('\033[93m' + "SODA_parameters_processing_parameters not recovered =(" + '\033[0m')
elif DataName == 'ClassificationPar':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
Output_Id = int(np.load('ID.npy'))
pace = np.load("Pace.npy")
distances = np.load(('distances.npy'), allow_pickle=True)
distances = distances.tolist()
define_percent = np.load('define_percent.npy')
Output = {'Percent': define_percent,
'Distances': distances,
'Pace': pace,
'ID': Output_Id}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("ClassificationPar recovered!")
return Output
except:
print('\033[93m' + "ClassificationPar not recovered =(" + '\033[0m')
elif DataName == 'ModelPar':
try:
# Now change to base directory
os.chdir( Recovery_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
# load the model from disk
model = pickle.load(open("Model.sav", 'rb'))
X_test = np.load('X_test.npy')
y_test = np.load('y_test.npy')
Output = {'Model': model,
'X': X_test,
'Y': y_test}
# Now change to base directory
os.chdir( base_path )
#retval = os.getcwd()
#print ("Current working directory %s" % retval)
print("ModelPar recovered!")
return Output
except:
print('\033[93m' + "ModelPar not recovered =(" + '\033[0m')
else:
print('\033[93m' + "Wrong name lad/lass, please check de Recovery input" + '\033[0m')
def scale(X, x_min, x_max): #Normalization
nom = (X-X.min(axis=0))*(x_max-x_min)
denom = X.max(axis=0) - X.min(axis=0)
if denom==0:
denom = 1
return x_min + nom/denom
def format_func(value, tick_number): #Plot Formater
# find number of multiples of pi/2
N = int(value)
if N == 0:
return "X1"
elif N == 50:
return "X50"
elif N == 100:
return "X100"
elif N == 150:
return "X150"
elif N == 200:
return "X200"
elif N == 250:
return "X250"
elif N == 300:
return "X300"
elif N == 350:
return "X350"
elif N == 400:
return "X400"
elif N == 450:
return "X450"
elif N == 500:
return "X500"
elif N == 550:
return "X550"
elif N == 600:
return "X600"
elif N == 650:
return "X650"
elif N == 700:
return "X700"
elif N == 750:
return "X750"
elif N == 800:
return "X800"
elif N == 850:
return "X850"
def DataSlicer (Output_Id, id_per_group=20, Choice='All'):
''' Function to Slice a time series dataset into several datasets
for save RAM during model execution
Parameters:
------
Output_Id : int
identifier for the dataset
id_per_group: int, optional
number of time series per division (default is 20)
Choice : str, optional
option of data, can be ['Main Data', 'Eminence Data', 'All'] (default is 'All')
Returns:
-------
dictionary, with the following items
'FinalTarget': np.array
targets of the entire dataset
'M_N_groups': int
number of groups
'ID': int
identifier for the dataset
'N_IDs': int
number of time series
'''
print('Data Slicer Control Output')
print('----------------------------------')
#Changing Work Folder
add_path1 = "/Input/"
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Input_path = working_path + add_path1
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
# Now change to Input directory
os.chdir( Input_path )
# Loading the required input
Full_data = np.genfromtxt('Output_' + str(int(Output_Id)) + '.csv', delimiter=',')
#E_data = np.genfromtxt('Eminence_Data_' + str(Output_Id) + '.csv', delimiter=',')
columns = Full_data.shape[1]
data = Full_data[:,2:columns-1]
info = Full_data[:,0:2]
#centralizar os dados e colocá-los com desvioPadrão=1
#scaler = MinMaxScaler(feature_range=(-1,1)).fit(data)
#data = scaler.transform(data)
P_data = np.concatenate((info,data), axis=1)
Target = Full_data[:,columns-1]
print('Full Matrix: ' + str(Full_data.shape))
print('Main Data: ' + str(P_data.shape))
print('Labels: ' + str(Target.shape))
#print('Eminence Data: ' + str(E_data.shape))
# Now change to Kernel directory
os.chdir( Kernel_path )
#pickle.dump(scaler, open('norm.sav', 'wb'))
###______________________________________________________________________###
### ProDiMes Slicing Parameters ###
P_N_Ids = int(np.amax(P_data,axis=0)[0])
P_N_voos = int(np.amax(P_data,axis=0)[1])
P_last_group = int(P_N_Ids % id_per_group)
if P_last_group != 0:
P_N_groups = int((P_N_Ids / id_per_group) + 1)
else:
P_N_groups = int (P_N_Ids / id_per_group)
### Formating Final Target ###
Final_Target = np.zeros((P_N_Ids))
p_n_good = 0
p_n_bad = 0
aquired_time = P_N_Ids*P_N_voos/1000
for i in range (P_N_Ids):
if Target [i*P_N_voos] == 0:
p_n_good += 1
else:
p_n_bad += 1
Final_Target[i] = Target [i*P_N_voos]
print ('Total Number of Ids: ' + str(P_N_Ids))
print ('Number of healthy Ids: ' + str(p_n_good))
print ('Number of falty Ids: ' + str(p_n_bad))
print ('Total lifetime: ' + str(aquired_time) + ' s')
print ('Main data Number of mesures: ' + str(P_N_voos ))
print ('Main data Number of groups: ' + str(P_N_groups ))
print ('Main data Last group: ' + str(P_last_group ))
print ('___________________________________________')
###______________________________________________________________________###
### Eminences Slicing Parameters ###
#E_N_Ids = int(np.amax(E_data,axis=0)[0] - np.amax(P_data,axis=0)[0])
#E_N_voos = int(np.amax(E_data,axis=0)[1]) + 1
#E_last_group = int(E_N_Ids % id_per_group)
#if (E_last_group != 0):
# E_N_groups = int((E_N_Ids / id_per_group) + 1)
#else:
# E_N_groups = int (E_N_Ids / id_per_group)
#print ('Eminences Number of Ids: ' + str(E_N_Ids ))
#print ('Eminences Number of flights: ' + str(E_N_voos ))
#print ('Eminences Number of groups: ' + str(E_N_groups ))
#print ('Eminences Last group: ' + str(E_last_group ))
#np.savetxt(('Target_' + str(int(Output_Id)) + '.csv'), Final_Target, delimiter = ',')
###______________________________________________________________________###
### Slicing Prodimes Data ###
if (Choice =='Main Data') or (Choice =='All'):
for i in range (P_N_groups):
Data = np.zeros(((id_per_group * P_N_voos),columns-1))
for j in range (id_per_group):
for k in range (P_N_voos):
if (i < (P_N_groups - 1)):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
elif (P_last_group == 0) and (i == (P_N_groups - 1)):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
if (P_last_group != 0) and (i == (P_N_groups - 1)):
Data = np.zeros(((P_last_group * P_N_voos),columns-1))
for j in range (P_last_group):
for k in range (P_N_voos):
Data[(j * P_N_voos) + k,:] = P_data [(((i * id_per_group + j) * P_N_voos) + k ) ,:]
np.savetxt(('Data_' + str(i) + '.csv'), Data, delimiter = ',')
###______________________________________________________________________###
### Slicing Eminences ###
'''
if (Choice == 'Eminence Data') or (Choice =='All'):
for i in range (E_N_groups):
Data = np.zeros(((id_per_group * E_N_voos),columns-3))
for j in range (id_per_group):
for k in range (E_N_voos):
if (i < (E_N_groups - 1)):
Data[(j * E_N_voos) + k,:] = E_data [(((i * id_per_group + j) * E_N_voos) + k ) ,:]
if (E_last_group != 0) and (i == (E_N_groups - 1)):
Data = np.zeros(((E_last_group * E_N_voos),columns-3))
for j in range (E_last_group):
for k in range (E_N_voos):
Data[(j * E_N_voos) + k,:] = E_data [(((i * id_per_group + j) * E_N_voos) + k ) ,:]
np.savetxt(('Eminence_' + str(i) + '.csv'), Data, delimiter = ',')
'''
np.savetxt(('FinalTarget.csv'), Final_Target, delimiter = ',')
# Now change to Recovery directory
os.chdir( Recovery_path )
np.save(('M_N_groups.npy'), P_N_groups)
np.save(('ID.npy'), Output_Id)
np.save(('N_IDs.npy'), P_N_Ids)
# Now change back to Base directory
os.chdir( base_path )
Output = {'FinalTarget': Final_Target,
'M_N_groups': P_N_groups,
'ID': Output_Id,
'N_IDs': P_N_Ids}
return Output
def TSFRESH_Extraction(D_S_parameters):
''' Function to extract features of the time series using
TSFRESH method
Parameters:
------
D_S_parameters : dictionary, with the following items
'FinalTarget': np.array
targets of the entire dataset
'M_N_groups': int
number of groups
'ID': int
identifier for the dataset
'N_IDs': int
number of time series
Returns:
-------
list
a list of string with the name of the extracted features by TSFRESH
'''
print(' ')
print('TSFRESH Control Output')
print('----------------------------------')
#Changing Work Folder
add_path2 = "/Kernel/"
add_path3 = "/.Recovery/"
base_path = os.getcwd()
working_path = os.getcwd()
Kernel_path = working_path + add_path2
Recovery_path = working_path + add_path3
# Now change to Kernel directory
os.chdir( Kernel_path )
###______________________________________________________________________###
### Feature Extraction ###
#E_N_groups = np.load('E_N_groups.npy')
P_N_groups = D_S_parameters['M_N_groups']
for i in range(P_N_groups):
Data = np.genfromtxt('Data_' + str(i) + '.csv', delimiter=',')
data = pd.DataFrame(Data, columns= ['id','time'] + ['Sensor_' + str(x) for x in range(1,(Data.shape[1]-1))])
Data_extracted_features = extract_features(data,column_id = "id", column_sort="time",n_jobs=4,disable_progressbar=True)
extracted_names = list(Data_extracted_features.columns)
np.savetxt('Data_Features_' + str(i) + '.csv', Data_extracted_features.values, delimiter=',')
#for i in range(E_N_groups):
# data = pd.DataFrame(np.genfromtxt('Eminence_' + str(i) + '.csv', delimiter=','),
# columns= ['id','time','sensor_1','sensor_2','sensor_3','sensor_4',
# 'sensor_5','sensor_6','sensor_7'])
# extracted_features = extract_features(data, column_id = "id", column_sort="time")
# np.savetxt('Eminence_Features_' + str(i) + '.csv', extracted_features, delimiter=',')
# Now change to Recovery directory
os.chdir( Recovery_path )
np.save('extracted_names.npy',extracted_names)
# Now change back to base directory
os.chdir( base_path )
print("Number of Extracted Features: {}".format(len(extracted_names)))
return extracted_names
def tsfresh_chucksize(full_data,output_id):
# Loading the required input
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
n_measures = int(max(info[:,1]))
target = full_data[::n_measures,-1]
u, idx = np.unique(info[:,0], return_index=True)
df = pd.DataFrame(np.concatenate((info,data), axis=1), columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
with open('Kernel/valid_features_dict.pkl', 'rb') as f:
kind_to_fc_parameters = pickle.load(f)
columns = []
for i,x in enumerate(kind_to_fc_parameters):
aux = pd.DataFrame(np.hstack((df.loc[:,:'time'].values,
df.loc[:,x].values.reshape((-1,1)))),
columns=['id','time',x])
aux2 = tsfresh.extract_features(aux, column_id="id", column_sort="time",
default_fc_parameters=kind_to_fc_parameters[x],
#chunksize=3*24000,
n_jobs=4,
disable_progressbar=False)
for j in range(len(aux2.columns.tolist())):columns.append(aux2.columns.tolist()[j])
if i == 0:
extracted_features = np.array(aux2.values)
else:
extracted_features = np.hstack((extracted_features,aux2.values))
final_features = pd.DataFrame(extracted_features,columns=columns)
filtered_features = select_features(final_features, target,n_jobs=4)
filtered_features.sort_index(inplace = True)
with open('Kernel/final_target_' + output_id + '.pkl', 'wb') as f:
pickle.dump(target, f)
# Extracting the selected features dictionary from pandas data frame
kind_to_fc_parameters = tsfresh.feature_extraction.settings.from_columns(filtered_features)
# Saving dictionary for the on-line phase
with open('Kernel/kind_to_fc_parameters.pkl', 'wb') as f:
pickle.dump(kind_to_fc_parameters, f)
with open('Kernel/columns.pkl', 'wb') as f:
pickle.dump(filtered_features.columns.to_list(), f)
Output = {'FeaturesFiltered': filtered_features,
'FinalTarget': target,
'ID': int(output_id)}
return Output
def tsfresh_chucksize_test(output_id):
# Loading the required input
full_data = np.genfromtxt('Input/Output_' + output_id + '.csv',
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
n_measures = int(max(info[:,1]))
target = full_data[::n_measures,-1]
u, idx = np.unique(info[:,0], return_index=True)
df = pd.DataFrame(np.concatenate((info,data), axis=1), columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=4,default_fc_parameters=EfficientFCParameters())
return extracted_features
def tsfresh_NaN_filter(output_id,fft=False):
"""
Given an output_id, this function
withdraw all NaN features from the
TSFRESH extraction;
Inputs:
-output_id: str() -> the given id
-fft: True or False -> filter fft features
Outputs:
- Saves via picklen in ./Kernel/
an extraction dictonary without
features that generates NaN
"""
df = tsfresh_chucksize_test(output_id)
features = df.columns
nan_columns = []
for col in features:
data = df.loc[:,col].values
nan_test = np.isnan(data)
aux = col.split('__')[1].split('_')[0]
if aux == 'fft' and fft == True:
nan_columns.append(col)
elif any(nan == True for nan in nan_test):
nan_columns.append(col)
print('Percentage of invalid features: ', len(nan_columns)*100/len(features))
valid_features = []
for i in range(len(features)):
if features[i] not in nan_columns:
valid_features.append(features[i])
print('Percentage of valid features: ', len(valid_features)*100/len(features))
valid_features_dict = from_columns(valid_features)
with open('Kernel/valid_features_dict.pkl', 'wb') as f:
pickle.dump(valid_features_dict, f)
return
def tsfresh_ensemble(output_id):
# Loading the required input
full_data = np.genfromtxt('Input/Output_{}.csv'.format(output_id),
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
n_measures = int(max(info[:,1]))
n_timeseries = int(max(info[:,0]))
label = full_data[::n_measures,-1]
scaler = MinMaxScaler(feature_range=(-1,1)).fit(data)
data = scaler.transform(data)
with open('Kernel/scaler.pkl', 'wb') as f:
pickle.dump(scaler, f)
full_data = np.concatenate((info,data), axis=1)
divisions = 1
idx = np.random.choice(range(n_timeseries),n_timeseries,replace=False)
idx_division = np.array_split(idx,divisions)
for i,div in enumerate(idx_division):
div.sort()
indices = [d2 for d1 in div for d2 in range(d1*n_measures,(d1+1)*n_measures)]
ensemble_data = full_data[indices,:]
ensemble_label = label[div]
df = pd.DataFrame(ensemble_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=0)
features = extracted_features.columns
nan_columns = []
for col in features:
nan_test = np.isnan(extracted_features.loc[:,col].values)
if any(nan == True for nan in nan_test):
nan_columns.append(col)
print(' - Percentage of invalid features: ', len(nan_columns)*100/len(features))
cleaned_features = features.drop(nan_columns)
cleaned_df = extracted_features[cleaned_features]
filtered_df, relevance_table = selection.select_features(cleaned_df, ensemble_label, n_jobs=0)
relevance_table.fillna(value=100)
if i == 0:
relevance_table_final = relevance_table.copy()
extracted_features_final = extracted_features.copy()
else:
relevance_table_final.p_value = relevance_table_final.p_value + relevance_table.p_value
extracted_features_final = pd.concat([extracted_features_final,extracted_features], axis=0)
extracted_features_final = extracted_features_final.sort_index()
relevance_table_final.p_value = relevance_table_final.p_value/divisions
relevance_table_final.relevant = relevance_table_final.p_value < 0.0029
relevant_features = relevance_table_final[relevance_table_final.relevant].feature
extracted_features_final = extracted_features_final[relevant_features]
kind_to_fc_parameters = from_columns(relevant_features)
with open('Kernel/kind_to_fc_parameters.pkl', 'wb') as f:
pickle.dump(kind_to_fc_parameters, f)
with open('Kernel/columns.pkl', 'wb') as f:
pickle.dump(relevant_features.keys().tolist(), f)
with open('Kernel/final_target_{}.pkl'.format(output_id), 'wb') as f:
pickle.dump(label, f)
Output = {'FeaturesFiltered': extracted_features_final,
'FinalTarget': label,
'ID': int(output_id)}
return Output
def dynamic_tsfresh (total_data, mode='prototype'):
''' Function for ONLINE mode
This function read the data from the acquisition module and executes a
dynamic and lighter version of TSFRESH.
Parameters:
------
output_id : int
identifier of the seed dataset
extracted_names: list
Returns:
-------
dataframe #########################################################
'''
data = total_data[:,2:-1]
info = total_data[:,0:2]
# Normalizing
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
total_data = np.concatenate((info,data), axis=1)
# ----------------------------------------------------------------- #
df = pd.DataFrame(total_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,(total_data.shape[1]-1))])
# Loading feature dictionary
with open('Kernel/kind_to_fc_parameters.pkl', 'rb') as f:
kind_to_fc_parameters = pickle.load(f)
# Loading column names
with open('Kernel/columns.pkl', 'rb') as f:
original_columns = pickle.load(f)
columns = []
for i,x in enumerate(kind_to_fc_parameters):
aux = pd.DataFrame(np.hstack((df.loc[:,:'time'].values,
df.loc[:,x].values.reshape((-1,1)))),
columns=['id','time',x])
aux2 = tsfresh.extract_features(aux, column_id="id", column_sort="time",
default_fc_parameters=kind_to_fc_parameters[x],#chunksize=24000,
n_jobs=0
#disable_progressbar=True
)
for j in range(len(aux2.columns.tolist())):columns.append(aux2.columns.tolist()[j])
if i == 0:
extracted_features = np.array(aux2.values)
else:
extracted_features = np.hstack((extracted_features,aux2.values))
final_features = pd.DataFrame(extracted_features,columns=columns)
final_features = final_features[original_columns]
return impute(final_features), extracted_features
def test_tsfresh (SelectedFeatures,extracted_features):
tsf_offline = SelectedFeatures['FeaturesFiltered'].values
tsf_online = extracted_features.values
equal = np.equal(tsf_offline,tsf_online)
n_errors = 0
error_size = []
for i in range(equal.shape[0]):
for j in range(equal.shape[1]):
if equal[i,j]== False:
n_errors += 1
error_size.append(100*(tsf_offline[i,j]-tsf_online[i,j])/tsf_online[i,j])
error_size = pd.DataFrame(error_size)
error_size = impute(error_size)
print('Porcentagem de amostrar erradas (%): ',n_errors*100/(equal.shape[0]*equal.shape[1]))
print('Média de erro percentual (%): ',np.mean(error_size[0]))
print('Desvio (%): ',np.std(error_size[0]))
def PCA_calc (SelectedFeatures,N_PCs,Chose = 'Analytics',it=0):
''' Function to project and execute a Principal Components Analysis
Parameters:
------
SelectedFeatures : dictionary, with the following items
'FeaturesFiltered': pd.DataFrame
contain the output data of TSFRESH, i.e., the dataset with features selected by the hypothesis test
'FinalTarget': np.array
targets of the entire dataset
'ID': int
identifier for the dataset
N_PCs: int
number of Principal Components to mantain
Chose: str
type of analysis, can be ['Test', 'Calc', 'Specific', 'Analytics']
(default is 'Analytics')
Returns:
-------
dictionary, with the following items
'ReducedFeatures': np.array
contain the output data of PCA, i.e., the dataset with Principal Componentes projected by PCA
'ID': int
identifier for the dataset
'''
if (Chose == 'Test') or (Chose == 'Calc') or (Chose == 'Specific') or (Chose == 'Analytics'):
#Changing Work Folder
add_path1 = "/PCA_Analyses/"
add_path2 = "/Input/"
add_path3 = "/Kernel/"
add_path4 = "/PCA_Analyses/Figures/"
base_path = os.getcwd()
working_path = os.getcwd()
PCA_Analyses_path = working_path + add_path1
Input_path = working_path + add_path2
Kernel_path = working_path + add_path3
PCA_Figures_path = working_path + add_path4
# Now change to PCA Figures directory
os.chdir( Kernel_path )
print(' ')
print('PCA Control Output')
print('----------------------------------')
Output_Id = SelectedFeatures['ID']
features = SelectedFeatures['FeaturesFiltered']
Target = SelectedFeatures['FinalTarget']
selected_names = list(features.columns)
#centralizar os dados e colocá-los com desvioPadrão=1
scaler = StandardScaler().fit(features)
features_padronizadas = scaler.transform(features)
#features_padronizadas = pd.DataFrame(features_padronizadas)
pickle.dump(scaler, open('pca_scaler.sav', 'wb'))
pca= PCA(n_components = N_PCs)
pca.fit(features_padronizadas)
# save the model to disk
pickle.dump(pca, open('pca.sav', 'wb'))
variacao_percentual_pca = np.round(pca.explained_variance_ratio_ * 100, decimals = 2)
# Now change to PCA Figures directory
fig = plt.figure(figsize=[16,8])
ax = fig.subplots(1,1)
ax.bar(x=['PC' + str(x) for x in range(1,(N_PCs+1))],height=variacao_percentual_pca[0:N_PCs])
ax.set_ylabel('Percentage of Variance Held',fontsize=27)
ax.set_xlabel('Principal Components',fontsize=20)
ax.tick_params(axis='x', labelsize=22)
ax.tick_params(axis='y', labelsize=22)
ax.grid()
#plt.show()
fig.savefig('Percentage_of_Variance_Held__{}__{}.png'.format(Output_Id,it), bbox_inches='tight')
print('Variation maintained: %.2f' % variacao_percentual_pca.sum())
print(' ')
if (Chose != 'Test'):
features_reduzidas = pca.transform(features_padronizadas)
print('Filtered Features')
print('-' * 20)
print(np.size(features_padronizadas,0))
print(np.size(features_padronizadas,1))
print('-' * 20)
print('Reduced Features')
print('-' * 20)
print(np.size(features_reduzidas,0))
print(np.size(features_reduzidas,1))
if (Chose != 'Test'):
### Análise de atributos ###
eigen_matrix = np.array(pca.components_)
eigen_matrix = pow((pow(eigen_matrix,2)),0.5) #invertendo valores negativos
for i in range (eigen_matrix.shape[0]):
LineSum = sum(eigen_matrix[i,:])
for j in range (eigen_matrix.shape[1]):
eigen_matrix[i,j] = ((eigen_matrix[i,j]*100)/LineSum)
if Chose == 'Specific':
### Análise Expecífica ###
fig = plt.figure(figsize=[16,int(8*N_PCs)])
fig.suptitle('Contribution percentage per PC', fontsize=16)
ax = fig.subplots(int(N_PCs),1)
for i in range (int(N_PCs)):
s = eigen_matrix[i,:]
ax[i].bar(x=range(0,(eigen_matrix.shape[1])),height=s)
ax[i].set(xlabel='Features', ylabel='Contribution Percentage', title = 'PC ' + str(i+1))
ax[i].grid()
# Hide x labels and tick labels for top plots and y ticks for right plots.
for axs in ax.flat:
axs.label_outer()
plt.show()
fig.savefig('Contribution_Percentage_Per_PC_{}.png'.format(Output_Id), bbox_inches='tight')
if (Chose == 'Analytics'):
### Análise Geral ###
weighted_contribution = np.zeros((2,eigen_matrix.shape[1]))
for i in range (eigen_matrix.shape[1]):
NumeratorSum = 0
for j in range (N_PCs):
NumeratorSum += eigen_matrix[j,i] * variacao_percentual_pca[j]
weighted_contribution[0,i] = NumeratorSum / sum(variacao_percentual_pca)
df_weighted_contribution = pd.DataFrame(weighted_contribution,columns=selected_names)
df_weighted_contribution = df_weighted_contribution.drop([1])
df_weighted_contribution = df_weighted_contribution.sort_values(by=0, axis=1, ascending=False)
#pd.set_option('display.max_rows', len(df_weighted_contribution))
#print(type(df_weighted_contribution))
#print(df_weighted_contribution.head())
#pd.reset_option('display.max_rows')
#Creating Separated Data Frames por Sensors and Features Contribution
sensors_names = [None] * int(df_weighted_contribution.shape[1])
features_names = [None] * int(df_weighted_contribution.shape[1])
general_features = [None] * int(df_weighted_contribution.shape[1])
for i, names in zip(range (df_weighted_contribution.shape[1]), df_weighted_contribution.columns):
c = '__'
words = names.split(c)
sensors_names[i] = words[0]
general_features[i]= words[1]
features_names[i] = c.join(words[1:])
#print(names)
#print(words)
#print(sensors_names[i])
#print(features_names[i])
#print(50*'-')
unique_sensors_names = np.ndarray.tolist(np.unique(np.array(sensors_names)))
unique_general_feature = np.ndarray.tolist(np.unique(np.array(general_features)))
unique_features_names = np.ndarray.tolist(np.unique(np.array(features_names)))
sensors_contribution = pd.DataFrame (np.zeros((2,np.shape(unique_sensors_names)[0])), columns=unique_sensors_names)
general_features_contribution = pd.DataFrame (np.zeros((2,np.shape(unique_general_feature)[0])), columns=unique_general_feature)
features_contribution = pd.DataFrame (np.zeros((2,np.shape(unique_features_names)[0])), columns=unique_features_names)
sensors_contribution = sensors_contribution.drop([1])
general_features_contribution = general_features_contribution.drop([1])
features_contribution = features_contribution.drop([1])
# For the output Formating
"""
unique_sensors_names = np.ndarray.tolist(np.unique(np.array(sensors_names)))
unique_features_names = np.ndarray.tolist(np.unique(np.array(features_names)))
sensor_dt = np.transpose(np.vstack((unique_sensors_names,np.asarray(np.zeros(np.shape(unique_sensors_names)[0]),object))))
feature_dt = np.transpose(np.vstack((unique_features_names,np.asarray(np.zeros(np.shape(unique_features_names)[0]),object))))
sensors_contribution = pd.DataFrame(sensor_dt,columns = ['Sensor','Contribution'])
features_contribution = pd.DataFrame(feature_dt,columns = ['Feature','Contribution'])
"""
#print(sensors_contribution.head())
#print(features_contribution.head())
#Creating dictionaries form Data Frame orientation
"""
Creates a mapping from kind names to fc_parameters objects
(which are itself mappings from feature calculators to settings)
to extract only the features contained in the columns.
To do so, for every feature name in columns this method
1. split the column name into col, feature, params part
2. decide which feature we are dealing with (aggregate with/without params or apply)
3. add it to the new name_to_function dict
4. set up the params
:param columns: containing the feature names
:type columns: list of str
:param columns_to_ignore: columns which do not contain tsfresh feature names
:type columns_to_ignore: list of str
:return: The kind_to_fc_parameters object ready to be used in the extract_features function.
:rtype: dict
"""
weighted_contribution_dic = {}
for col in df_weighted_contribution.columns:
# Split according to our separator into <col_name>, <feature_name>, <feature_params>
parts = col.split('__')
n_parts = len(parts)
if n_parts == 1:
raise ValueError("Splitting of columnname {} resulted in only one part.".format(col))
kind = parts[0]
feature = c.join(parts[1:])
feature_name = parts[1]
if kind not in weighted_contribution_dic:
weighted_contribution_dic[kind] = {}
if not hasattr(feature_calculators, feature_name):
raise ValueError("Unknown feature name {}".format(feature_name))
sensors_contribution.loc[0,kind] += df_weighted_contribution.loc[0,col]
general_features_contribution.loc[0,feature_name] += df_weighted_contribution.loc[0,col]
features_contribution.loc[0,feature] += df_weighted_contribution.loc[0,col]
weighted_contribution_dic[kind][feature] = df_weighted_contribution.loc[0,col]
# End of the tsfresh stolen function
"""
sensors_dic = {}
for i in range(len(unique_sensors_names)):
sensors_dic[unique_sensors_names[i]] = i
features_dic = {}
for i in range(len(unique_features_names)):
features_dic[unique_features_names[i]] = i
#Suming the contibution for Sensors and Features
for i in range(df_weighted_contribution.shape[0]):
names = df_weighted_contribution.loc[i,'tsfresh_info']
c = '__'
words = names.split(c)
S= words[0]
F= c.join(words[1:])
sensors_contribution.loc[sensors_dic[S],'Contribution'] += df_weighted_contribution.loc[i,'Contribution']
features_contribution.loc[features_dic[F],'Contribution'] += df_weighted_contribution.loc[i,'Contribution']
sensors_contribution = sensors_contribution.sort_values(by=['Contribution'], ascending=False)
features_contribution = features_contribution.sort_values(by=['Contribution'], ascending=False)
"""
features_contribution = features_contribution.sort_values(by=0, axis=1, ascending=False)
general_features_contribution = general_features_contribution.sort_values(by=0, axis=1, ascending=False)
features_indexes = [x for x in range(1,(features_contribution.shape[0])+1)]
general_features_indexes = [x for x in range(1,(general_features_contribution.shape[0])+1)]
features_contribution.set_index(pd.Index(features_indexes))
general_features_contribution.set_index(pd.Index(general_features_indexes))
sorted_sensors_contribution = sensors_contribution.values[0,:]
sorted_features_contribution = features_contribution.values[0,:]
sorted_general_features_contribution = general_features_contribution.values[0,:]
#Ploting Cntribution Sensors Results
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Sensors Weighted Contribution Percentage', fontsize=16)
ax = fig.subplots(1,1)
s = sorted_sensors_contribution[:]
ax.bar(x=['Voltage','Current'],height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Sensors',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.grid()
plt.show()
fig.savefig('Sensor_Weighted_Contribution_Percentage_{}.png'.format(Output_Id), bbox_inches='tight')
#Ploting Cntribution Features Results
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Features Weighted Contribution Percentage', fontsize=16)
ax = fig.subplots(1,1)
s = sorted_features_contribution[:]
ax.bar(x=range(0,(sorted_features_contribution.shape[0])),height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Features',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.xaxis.set_major_locator(plt.MultipleLocator(50))
ax.xaxis.set_minor_locator(plt.MultipleLocator(50))
ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax.grid()
plt.show()
fig.savefig('Features_Weighted_Contribution_Percentage_{}.png'.format(Output_Id), bbox_inches='tight')
### Análise Geral para os 20 melhores atributos completos ###
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Best Features Weighted Contribution Percentage', fontsize=16)
#print('Porcentagem de pertinência: ', np.sum(sorted_features_contribution[0:140]))
#print('Number of Selected Features: ', sorted_features_contribution.shape[0])
ax = fig.subplots(1,1)
s = sorted_features_contribution[0:20]
ax.bar(x=['X' + str(x) for x in range(1,(20+1))],height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Features',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.grid()
plt.show()
fig.savefig('{}th_Best_Features_Weighted_Contribution_Percentage_{}.png'.format(20,Output_Id), bbox_inches='tight')
### Análise Geral para os 20 melhores atributos gerais ###
fig = plt.figure(figsize=[16,8])
#fig.suptitle('Best Features Weighted Contribution Percentage', fontsize=16)
#print('Porcentagem de pertinência: ', np.sum(sorted_features_contribution[0:140]))
#print('Number of Selected Features: ', sorted_features_contribution.shape[0])
ax = fig.subplots(1,1)
s = sorted_features_contribution[0:20]
ax.bar(x=['X' + str(x) for x in range(1,(20+1))],height=s)
plt.ylabel('Relevance Percentage',fontsize = 20)
plt.xlabel('Features',fontsize = 20)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=18)
ax.grid()
ax.set_ylim([s[-1]-0.05,s[0]+0.05])
plt.show()
fig.savefig('{}th_Best_Features_Weighted_Contribution_Percentage_{}_zoom.png'.format(20,Output_Id), bbox_inches='tight')
#Ploting the data of the most relevant sensor with the best features
sensors_contribution.values[:,0]
name_1 = df_weighted_contribution.columns[0]
name_2 = df_weighted_contribution.columns[1]
name_3 = df_weighted_contribution.columns[2]
#pd.set_option('display.max_columns', len(features))
#print(features)
#pd.reset_option('display.max_columns')
x = features.loc[:,name_1].values
y = features.loc[:,name_2].values
z = features.loc[:,name_3].values
data_saida = np.array([x, y, z]).T
np.savetxt('atributos.csv', data_saida, delimiter=',')
x = scale(x,-1,1)
y = scale(y,-1,1)
z = scale(z,-1,1)
x_bom=[]
x_ruim=[]
y_bom=[]
y_ruim=[]
z_bom=[]
z_ruim=[]
for i in range(len(Target)):
if Target[i] == 0:
x_bom.append(x[i])
y_bom.append(y[i])
z_bom.append(z[i])
if Target[i] == 1:
x_ruim.append(x[i])
y_ruim.append(y[i])
z_ruim.append(z[i])
os.chdir( base_path )
#np.savetxt('x_bom.csv', x_bom, delimiter=',')
#np.savetxt('x_ruim.csv', x_ruim, delimiter=',')
os.chdir( PCA_Figures_path )
fig = plt.figure(figsize=[14,10])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_bom, y_bom, z_bom, c = 'blue' )
ax.scatter(x_ruim, y_ruim, z_ruim, c = 'red' )
plt.ylabel('X2',fontsize = 20,labelpad=18)
plt.xlabel('X1',fontsize = 20, labelpad=18)
ax.set_zlabel('X3', fontsize = 20, labelpad=12)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.tick_params(axis='z', labelsize=16)
ax.grid()
red_patch = mpatches.Patch(color='red', label='Non-Funcional Tools')
blue_patch = mpatches.Patch(color='blue', label='Funcional Tools')
plt.legend(handles=[red_patch,blue_patch],fontsize = 20)
#plt.show()
fig.savefig('ScatterPlot_PCA_{}.png'.format(Output_Id), bbox_inches='tight')
# -------------------------------------------
fig = plt.figure(figsize=[21,7])
ax = fig.subplots(1,3)
ax[0].scatter(x_bom, y_bom, c = 'blue' )
ax[0].scatter(x_ruim, y_ruim, c = 'red' )
ax[0].set_xlabel('X1',fontsize = 20)
ax[0].set_ylabel('X2',fontsize = 20)
ax[0].grid()
ax[1].scatter(x_bom, z_bom, c = 'blue' )
ax[1].scatter(x_ruim, z_ruim, c = 'red' )
ax[1].set_xlabel('X1',fontsize = 20)
ax[1].set_ylabel('X3',fontsize = 20)
ax[1].grid()
ax[2].scatter(y_bom, z_bom, c = 'blue' )
ax[2].scatter(y_ruim, z_ruim, c = 'red' )
ax[2].set_xlabel('X2',fontsize = 20,)
ax[2].set_ylabel('X3',fontsize = 20)
ax[2].grid()
#plt.show()
fig.savefig('X1X2X3_{}.png'.format(Output_Id), bbox_inches='tight')
# -------------------------------------------
# Now change to PCA Analyses directory
os.chdir( PCA_Analyses_path )
general_features_contribution.to_csv('unique_features_used_{}.csv'.format(Output_Id),index = False)
sensors_contribution.to_csv('sensors_weighted_contribution_{}.csv'.format(Output_Id), index=True)
features_contribution.to_csv('features_weighted_contribution_{}.csv'.format(Output_Id), index=True)
# Now change to PCA Analyses directory
# -------------------------------------------
x = features_reduzidas[:,0]
y = features_reduzidas[:,1]
z = features_reduzidas[:,2]
x_bom=[]
x_ruim=[]
y_bom=[]
y_ruim=[]
z_bom=[]
z_ruim=[]
for i in range(len(Target)):
if Target[i] == 0:
x_bom.append(x[i])
y_bom.append(y[i])
z_bom.append(z[i])
if Target[i] == 1:
x_ruim.append(x[i])
y_ruim.append(y[i])
z_ruim.append(z[i])
os.chdir( PCA_Figures_path )
fig = plt.figure(figsize=[14,10])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x_bom, y_bom, z_bom, c = 'blue' )
ax.scatter(x_ruim, y_ruim, z_ruim, c = 'red' )
plt.ylabel('PC2',fontsize = 20,labelpad=18)
plt.xlabel('PC1',fontsize = 20, labelpad=18)
ax.set_zlabel('PC3', fontsize = 20, labelpad=12)
plt.tick_params(axis='x', labelsize=16)
plt.tick_params(axis='y', labelsize=16)
plt.tick_params(axis='z', labelsize=16)
ax.grid()
red_patch = mpatches.Patch(color='red', label='Non-Funcional Tools')
blue_patch = mpatches.Patch(color='blue', label='Funcional Tools')
plt.legend(handles=[red_patch,blue_patch],fontsize = 20)
#plt.show()
fig.savefig('ScatterPlot_features__{}__{}.png'.format(Output_Id,it), bbox_inches='tight')
# -------------------------------------------
fig = plt.figure(figsize=[21,7])
ax = fig.subplots(1,3)
ax[0].scatter(x_bom, y_bom, c = 'blue' )
ax[0].scatter(x_ruim, y_ruim, c = 'red' )
ax[0].set_xlabel('PC1',fontsize = 20)
ax[0].set_ylabel('PC2',fontsize = 20)
ax[0].grid()
ax[1].scatter(x_bom, z_bom, c = 'blue' )
ax[1].scatter(x_ruim, z_ruim, c = 'red' )
ax[1].set_xlabel('PC1',fontsize = 20)
ax[1].set_ylabel('PC3',fontsize = 20)
ax[1].grid()
ax[2].scatter(y_bom, z_bom, c = 'blue' )
ax[2].scatter(y_ruim, z_ruim, c = 'red' )
ax[2].set_xlabel('PC2',fontsize = 20,)
ax[2].set_ylabel('PC3',fontsize = 20)
ax[2].grid()
#plt.show()
fig.savefig('PC1PC2PC3__{}__{}.png'.format(Output_Id,it), bbox_inches='tight')
# -------------------------------------------
# -------------------------------------------
os.chdir( PCA_Analyses_path )
np.savetxt("features_reduzidas_" + str(Output_Id) + ".csv", features_reduzidas, delimiter=',')
Output = {'ReducedFeatures': features_reduzidas,
'ID': Output_Id}
elif (Chose == 'Test'):
Output = {'ID': Output_Id}
# Now change back to base directory
os.chdir( base_path )
return Output
def PCA_projection (features):
''' Function for ONLINE mode
This function project the data into a trained PCA.
Parameters:
------
features: dataframe
#############################################################
Returns:
-------
dataframe
contain the output data of PCA, i.e., the dataset with Principal Componentes projected by PCA
'''
loaded_scaler = pickle.load(open('Kernel/pca_scaler.sav', 'rb'))
features_padronizadas = loaded_scaler.transform(features)
#centralizar os dados e colocá-los com desvioPadrão=1
#scaler = StandardScaler().fit(features)
#features_padronizadas = scaler.transform(features)
pca= PCA(n_components = 3)
pca.fit(features_padronizadas)
variacao_percentual_pca = np.round(pca.explained_variance_ratio_ * 100, decimals = 2)
print('Variation maintained: %.2f' % variacao_percentual_pca.sum())
print(' ')
features_reduzidas = pca.transform(features_padronizadas)
"""
# load the model from disk
loaded_pca = pickle.load(open('Kernel/pca.sav', 'rb'))
scaler = StandardScaler().fit(features)
features_padronizadas = scaler.transform(features)
features_padronizadas = scaler.transform(features)
features_reduzidas = loaded_pca.transform(features_padronizadas)
"""
return features_reduzidas
class cpu_usage(threading.Thread):### Thread to calculate duration and mean cpu percente usage in a SODA classifier
def __init__(self):
threading.Thread.__init__(self)
self.control = True
def run(self):
cpu = []
t_inicial = time.time()
while self.control:
cpu.append(cpu_percent(interval=1, percpu=True))
t_final = time.time()
self.deltatime = t_final - t_inicial
self.mean_cpu = np.mean(cpu)
def stop(self):
self.control = False
def join(self):
threading.Thread.join(self)
return self.deltatime, self.mean_cpu
def grid_set(data, N): #SODA process
_ , W = data.shape
AvD1 = data.mean(0)
X1 = np.mean(np.sum(np.power(data,2),axis=1))
grid_trad = np.sqrt(2*(X1 - AvD1*AvD1.T))/N
Xnorm = np.sqrt(np.sum(np.power(data,2),axis=1))
aux = Xnorm
for _ in range(W-1):
aux = np.insert(aux,0,Xnorm.T,axis=1)
data = data / aux
seq = np.argwhere(np.isnan(data))
if tuple(seq[::]): data[tuple(seq[::])] = 1
AvD2 = data.mean(0)
grid_angl = np.sqrt(1-AvD2*AvD2.T)/N
return X1, AvD1, AvD2, grid_trad, grid_angl
def pi_calculator(Uniquesample, mode):#SODA process
UN, W = Uniquesample.shape
if mode == 'euclidean' or mode == 'mahalanobis' or mode == 'cityblock' or mode == 'chebyshev' or mode == 'canberra':
AA1 = Uniquesample.mean(0)
X1 = sum(sum(np.power(Uniquesample,2)))/UN
DT1 = X1 - sum(np.power(AA1,2))
aux = []
for i in range(UN): aux.append(AA1)
aux2 = [Uniquesample[i]-aux[i] for i in range(UN)]
uspi = np.sum(np.power(aux2,2),axis=1)+DT1
if mode == 'minkowski':
AA1 = Uniquesample.mean(0)
X1 = sum(sum(np.power(Uniquesample,2)))/UN
DT1 = X1 - sum(np.power(AA1,2))
aux = np.matrix(AA1)
for i in range(UN-1): aux = np.insert(aux,0,AA1,axis=0)
aux = np.array(aux)
uspi = np.sum(np.power(cdist(Uniquesample, aux, mode, p=1.5),2),1)+DT1
if mode == 'cosine':
Xnorm = np.matrix(np.sqrt(np.sum(np.power(Uniquesample,2),axis=1))).T
aux2 = Xnorm
for i in range(W-1):
aux2 = np.insert(aux2,0,Xnorm.T,axis=1)
Uniquesample1 = Uniquesample / aux2
AA2 = | np.mean(Uniquesample1,0) | numpy.mean |
'''
do not directly run this script, you should execute the unit test
by launching the "run_test.sh"
'''
import libqpsolver
import os
import time
import progressbar
import numpy as np
from random import random
from cvxopt import matrix, solvers
#show detailed unit test message
verbose = False
#unit test run time and test item numbers
test_suite_exec_times = 1000
test_suite_items = 12
#global variables
sol_diff_cnt = 0
curr_test_num = 0
progress_cnt_max = test_suite_exec_times * test_suite_items
progress = \
progressbar.ProgressBar(maxval=progress_cnt_max, \
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def quadprog_cvxopt(P, q, A=None, b=None, A_eq=None, b_eq=None, options=None):
"""
qp solver provided by cvxopt package:
Minimize (1/2)(x.' * P * x) + (q.' * x)
Subject to A * x < b
and A_eq * x = b_eq
"""
#verbose option
options = solvers.options['show_progress'] = verbose
#objective function
P, q = matrix(P), matrix(q)
#inequality constraint
if (A is not None) and (b is not None):
A, b = matrix(A), matrix(b)
else:
A, b = None, None
#equality constraint
if (A_eq is not None) and (b_eq is not None):
A_eq, b_eq = matrix(A_eq), matrix(b_eq)
else:
A_eq, b_eq = None, None
#solve qp
sol = solvers.qp(P, q, A, b, A_eq, b_eq, options);
return | np.array(sol['x']) | numpy.array |
"""DEM coregistration classes and functions."""
from __future__ import annotations
import copy
import concurrent.futures
import json
import os
import subprocess
import tempfile
import warnings
from enum import Enum
from typing import Any, Callable, Optional, overload, Union, Sequence, TypeVar
try:
import cv2
_has_cv2 = True
except ImportError:
_has_cv2 = False
import fiona
import geoutils as gu
from geoutils.georaster import RasterType
import numpy as np
import rasterio as rio
import rasterio.warp # pylint: disable=unused-import
import rasterio.windows # pylint: disable=unused-import
import scipy
import scipy.interpolate
import scipy.ndimage
import scipy.optimize
import skimage.transform
from rasterio import Affine
from tqdm import trange, tqdm
import pandas as pd
import xdem
try:
import richdem as rd
_has_rd = True
except ImportError:
_has_rd = False
try:
from pytransform3d.transform_manager import TransformManager
import pytransform3d.transformations
_HAS_P3D = True
except ImportError:
_HAS_P3D = False
def filter_by_range(ds: rio.DatasetReader, rangelim: tuple[float, float]):
"""
Function to filter values using a range.
"""
print('Excluding values outside of range: {0:f} to {1:f}'.format(*rangelim))
out = np.ma.masked_outside(ds, *rangelim)
out.set_fill_value(ds.fill_value)
return out
def filtered_slope(ds_slope, slope_lim=(0.1, 40)):
print("Slope filter: %0.2f - %0.2f" % slope_lim)
print("Initial count: %i" % ds_slope.count())
flt_slope = filter_by_range(ds_slope, slope_lim)
print(flt_slope.count())
return flt_slope
def apply_xy_shift(ds: rio.DatasetReader, dx: float, dy: float) -> np.ndarray:
"""
Apply horizontal shift to rio dataset using Transform affine matrix
:param ds: DEM
:param dx: dx shift value
:param dy: dy shift value
Returns:
Rio Dataset with updated transform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
# Update geotransform
ds_meta = ds.meta
gt_orig = ds.transform
gt_align = Affine(gt_orig.a, gt_orig.b, gt_orig.c+dx,
gt_orig.d, gt_orig.e, gt_orig.f+dy)
print("Original transform:", gt_orig)
print("Updated transform:", gt_align)
# Update ds Geotransform
ds_align = ds
meta_update = ds.meta.copy()
meta_update({"driver": "GTiff", "height": ds.shape[1],
"width": ds.shape[2], "transform": gt_align, "crs": ds.crs})
# to split this part in two?
with rasterio.open(ds_align, "w", **meta_update) as dest:
dest.write(ds_align)
return ds_align
def apply_z_shift(ds: rio.DatasetReader, dz: float):
"""
Apply vertical shift to rio dataset using Transform affine matrix
:param ds: DEM
:param dx: dz shift value
"""
src_dem = rio.open(ds)
a = src_dem.read(1)
ds_shift = a + dz
return ds_shift
def rio_to_rda(ds: rio.DatasetReader) -> rd.rdarray:
"""
Get georeferenced richDEM array from rasterio dataset
:param ds: DEM
:return: DEM
"""
arr = ds.read(1)
rda = rd.rdarray(arr, no_data=ds.get_nodatavals()[0])
rda.geotransform = ds.get_transform()
rda.projection = ds.get_gcps()
return rda
def get_terrainattr(ds: rio.DatasetReader, attrib='slope_degrees') -> rd.rdarray:
"""
Derive terrain attribute for DEM opened with rasterio. One of "slope_degrees", "slope_percentage", "aspect",
"profile_curvature", "planform_curvature", "curvature" and others (see richDEM documentation)
:param ds: DEM
:param attrib: terrain attribute
:return:
"""
rda = rio_to_rda(ds)
terrattr = rd.TerrainAttribute(rda, attrib=attrib)
return terrattr
def get_horizontal_shift(elevation_difference: np.ndarray, slope: np.ndarray, aspect: np.ndarray,
min_count: int = 20) -> tuple[float, float, float]:
"""
Calculate the horizontal shift between two DEMs using the method presented in Nuth and Kääb (2011).
:param elevation_difference: The elevation difference (reference_dem - aligned_dem).
:param slope: A slope map with the same shape as elevation_difference (units = pixels?).
:param aspect: An aspect map with the same shape as elevation_difference (units = radians).
:param min_count: The minimum allowed bin size to consider valid.
:raises ValueError: If very few finite values exist to analyse.
:returns: The pixel offsets in easting, northing, and the c_parameter (altitude?).
"""
input_x_values = aspect
with np.errstate(divide="ignore", invalid="ignore"):
input_y_values = elevation_difference / slope
# Remove non-finite values
x_values = input_x_values[np.isfinite(input_x_values) & np.isfinite(input_y_values)]
y_values = input_y_values[np.isfinite(input_x_values) & np.isfinite(input_y_values)]
assert y_values.shape[0] > 0
# Remove outliers
lower_percentile = np.percentile(y_values, 1)
upper_percentile = np.percentile(y_values, 99)
valids = np.where((y_values > lower_percentile) & (y_values < upper_percentile) & (np.abs(y_values) < 200))
x_values = x_values[valids]
y_values = y_values[valids]
# Slice the dataset into appropriate aspect bins
step = np.pi / 36
slice_bounds = np.arange(start=0, stop=2 * np.pi, step=step)
y_medians = np.zeros([len(slice_bounds)])
count = y_medians.copy()
for i, bound in enumerate(slice_bounds):
y_slice = y_values[(bound < x_values) & (x_values < (bound + step))]
if y_slice.shape[0] > 0:
y_medians[i] = np.median(y_slice)
count[i] = y_slice.shape[0]
# Filter out bins with counts below threshold
y_medians = y_medians[count > min_count]
slice_bounds = slice_bounds[count > min_count]
if slice_bounds.shape[0] < 10:
raise ValueError("Less than 10 different cells exist.")
# Make an initial guess of the a, b, and c parameters
initial_guess: tuple[float, float, float] = (3 * np.std(y_medians) / (2 ** 0.5), 0.0, np.mean(y_medians))
def estimate_ys(x_values: np.ndarray, parameters: tuple[float, float, float]) -> np.ndarray:
"""
Estimate y-values from x-values and the current parameters.
y(x) = a * cos(b - x) + c
:param x_values: The x-values to feed the above function.
:param parameters: The a, b, and c parameters to feed the above function
:returns: Estimated y-values with the same shape as the given x-values
"""
return parameters[0] * np.cos(parameters[1] - x_values) + parameters[2]
def residuals(parameters: tuple[float, float, float], y_values: np.ndarray, x_values: np.ndarray):
"""
Get the residuals between the estimated and measured values using the given parameters.
err(x, y) = est_y(x) - y
:param parameters: The a, b, and c parameters to use for the estimation.
:param y_values: The measured y-values.
:param x_values: The measured x-values
:returns: An array of residuals with the same shape as the input arrays.
"""
err = estimate_ys(x_values, parameters) - y_values
return err
# Estimate the a, b, and c parameters with least square minimisation
plsq = scipy.optimize.leastsq(func=residuals, x0=initial_guess, args=(y_medians, slice_bounds), full_output=1)
a_parameter, b_parameter, c_parameter = plsq[0]
# Calculate the easting and northing offsets from the above parameters
east_offset = a_parameter * np.sin(b_parameter)
north_offset = a_parameter * np.cos(b_parameter)
return east_offset, north_offset, c_parameter
def calculate_slope_and_aspect(dem: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""
Calculate the slope and aspect of a DEM.
:param dem: A numpy array of elevation values.
:returns: The slope (in pixels??) and aspect (in radians) of the DEM.
"""
# TODO: Figure out why slope is called slope_px. What unit is it in?
# TODO: Change accordingly in the get_horizontal_shift docstring.
# Calculate the gradient of the slope
gradient_y, gradient_x = np.gradient(dem)
slope_px = np.sqrt(gradient_x ** 2 + gradient_y ** 2)
aspect = np.arctan2(-gradient_x, gradient_y)
aspect += np.pi
return slope_px, aspect
def deramping(elevation_difference, x_coordinates: np.ndarray, y_coordinates: np.ndarray,
degree: int, verbose: bool = False,
metadata: Optional[dict[str, Any]] = None) -> Callable[[np.ndarray, np.ndarray], np.ndarray]:
"""
Calculate a deramping function to account for rotational and non-rigid components of the elevation difference.
:param elevation_difference: The elevation difference array to analyse.
:param x_coordinates: x-coordinates of the above array (must have the same shape as elevation_difference)
:param y_coordinates: y-coordinates of the above array (must have the same shape as elevation_difference)
:param degree: The polynomial degree to estimate the ramp.
:param verbose: Print the least squares optimization progress.
:param metadata: Optional. A metadata dictionary that will be updated with the key "deramp".
:returns: A callable function to estimate the ramp.
"""
#warnings.warn("This function is deprecated in favour of the new Coreg class.", DeprecationWarning)
# Extract only the finite values of the elevation difference and corresponding coordinates.
valid_diffs = elevation_difference[np.isfinite(elevation_difference)]
valid_x_coords = x_coordinates[np.isfinite(elevation_difference)]
valid_y_coords = y_coordinates[np.isfinite(elevation_difference)]
# Randomly subsample the values if there are more than 500,000 of them.
if valid_x_coords.shape[0] > 500_000:
random_indices = np.random.randint(0, valid_x_coords.shape[0] - 1, 500_000)
valid_diffs = valid_diffs[random_indices]
valid_x_coords = valid_x_coords[random_indices]
valid_y_coords = valid_y_coords[random_indices]
# Create a function whose residuals will be attempted to minimise
def estimate_values(x_coordinates: np.ndarray, y_coordinates: np.ndarray,
coefficients: np.ndarray, degree: int) -> np.ndarray:
"""
Estimate values from a 2D-polynomial.
:param x_coordinates: x-coordinates of the difference array (must have the same shape as elevation_difference).
:param y_coordinates: y-coordinates of the difference array (must have the same shape as elevation_difference).
:param coefficients: The coefficients (a, b, c, etc.) of the polynomial.
:param degree: The degree of the polynomial.
:raises ValueError: If the length of the coefficients list is not compatible with the degree.
:returns: The values estimated by the polynomial.
"""
# Check that the coefficient size is correct.
coefficient_size = (degree + 1) * (degree + 2) / 2
if len(coefficients) != coefficient_size:
raise ValueError()
# Do Amaury's black magic to estimate the values.
estimated_values = np.sum([coefficients[k * (k + 1) // 2 + j] * x_coordinates ** (k - j) *
y_coordinates ** j for k in range(degree + 1) for j in range(k + 1)], axis=0)
return estimated_values # type: ignore
# Creat the error function
def residuals(coefficients: np.ndarray, values: np.ndarray, x_coordinates: np.ndarray,
y_coordinates: np.ndarray, degree: int) -> np.ndarray:
"""
Calculate the difference between the estimated and measured values.
:param coefficients: Coefficients for the estimation.
:param values: The measured values.
:param x_coordinates: The x-coordinates of the values.
:param y_coordinates: The y-coordinates of the values.
:param degree: The degree of the polynomial to estimate.
:returns: An array of residuals.
"""
error = estimate_values(x_coordinates, y_coordinates, coefficients, degree) - values
error = error[np.isfinite(error)]
return error
# Run a least-squares minimisation to estimate the correct coefficients.
# TODO: Maybe remove the full_output?
initial_guess = np.zeros(shape=((degree + 1) * (degree + 2) // 2))
if verbose:
print("Deramping...")
coefficients = scipy.optimize.least_squares(
fun=residuals,
x0=initial_guess,
args=(valid_diffs, valid_x_coords, valid_y_coords, degree),
verbose=2 if verbose and degree > 1 else 0
).x
# Generate the return-function which can correctly estimate the ramp
def ramp(x_coordinates: np.ndarray, y_coordinates: np.ndarray) -> np.ndarray:
"""
Get the values of the ramp that corresponds to given coordinates.
:param x_coordinates: x-coordinates of interest.
:param y_coordinates: y-coordinates of interest.
:returns: The estimated ramp offsets.
"""
return estimate_values(x_coordinates, y_coordinates, coefficients, degree)
if metadata is not None:
metadata["deramp"] = {
"coefficients": coefficients,
"nmad": xdem.spatialstats.nmad(residuals(coefficients, valid_diffs, valid_x_coords, valid_y_coords, degree))
}
# Return the function which can be used later.
return ramp
def mask_as_array(reference_raster: gu.georaster.Raster, mask: Union[str, gu.geovector.Vector, gu.georaster.Raster]) -> np.ndarray:
"""
Convert a given mask into an array.
:param reference_raster: The raster to use for rasterizing the mask if the mask is a vector.
:param mask: A valid Vector, Raster or a respective filepath to a mask.
:raises: ValueError: If the mask path is invalid.
:raises: TypeError: If the wrong mask type was given.
:returns: The mask as a squeezed array.
"""
# Try to load the mask file if it's a filepath
if isinstance(mask, str):
# First try to load it as a Vector
try:
mask = gu.geovector.Vector(mask)
# If the format is unsopported, try loading as a Raster
except fiona.errors.DriverError:
try:
mask = gu.georaster.Raster(mask)
# If that fails, raise an error
except rio.errors.RasterioIOError:
raise ValueError(f"Mask path not in a supported Raster or Vector format: {mask}")
# At this point, the mask variable is either a Raster or a Vector
# Now, convert the mask into an array by either rasterizing a Vector or by fetching a Raster's data
if isinstance(mask, gu.geovector.Vector):
mask_array = mask.create_mask(reference_raster)
elif isinstance(mask, gu.georaster.Raster):
# The true value is the maximum value in the raster, unless the maximum value is 0 or False
true_value = np.nanmax(mask.data) if not np.nanmax(mask.data) in [0, False] else True
mask_array = (mask.data == true_value).squeeze()
else:
raise TypeError(
f"Mask has invalid type: {type(mask)}. Expected one of: "
f"{[gu.georaster.Raster, gu.geovector.Vector, str, type(None)]}"
)
return mask_array
def _transform_to_bounds_and_res(shape: tuple[int, ...],
transform: rio.transform.Affine) -> tuple[rio.coords.BoundingBox, float]:
"""Get the bounding box and (horizontal) resolution from a transform and the shape of a DEM."""
bounds = rio.coords.BoundingBox(
*rio.transform.array_bounds(shape[0], shape[1], transform=transform))
resolution = (bounds.right - bounds.left) / shape[1]
return bounds, resolution
def _get_x_and_y_coords(shape: tuple[int, ...], transform: rio.transform.Affine):
"""Generate center coordinates from a transform and the shape of a DEM."""
bounds, resolution = _transform_to_bounds_and_res(shape, transform)
x_coords, y_coords = np.meshgrid(
np.linspace(bounds.left + resolution / 2, bounds.right - resolution / 2, num=shape[1]),
np.linspace(bounds.bottom + resolution / 2, bounds.top - resolution / 2, num=shape[0])[::-1]
)
return x_coords, y_coords
CoregType = TypeVar("CoregType", bound="Coreg")
class Coreg:
"""
Generic Coreg class.
Made to be subclassed.
"""
_fit_called: bool = False # Flag to check if the .fit() method has been called.
_is_affine: Optional[bool] = None
def __init__(self, meta: Optional[dict[str, Any]] = None, matrix: Optional[np.ndarray] = None):
"""Instantiate a generic Coreg method."""
self._meta: dict[str, Any] = meta or {} # All __init__ functions should instantiate an empty dict.
if matrix is not None:
with warnings.catch_warnings():
# This error is fixed in the upcoming 1.8
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
valid_matrix = pytransform3d.transformations.check_transform(matrix)
self._meta["matrix"] = valid_matrix
def fit(self: CoregType, reference_dem: np.ndarray | np.ma.masked_array | RasterType,
dem_to_be_aligned: np.ndarray | np.ma.masked_array | RasterType,
inlier_mask: Optional[np.ndarray] = None,
transform: Optional[rio.transform.Affine] = None,
weights: Optional[np.ndarray] = None,
subsample: Union[float, int] = 1.0,
verbose: bool = False) -> CoregType:
"""
Estimate the coregistration transform on the given DEMs.
:param reference_dem: 2D array of elevation values acting reference.
:param dem_to_be_aligned: 2D array of elevation values to be aligned.
:param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True).
:param transform: Optional. Transform of the reference_dem. Mandatory in some cases.
:param weights: Optional. Per-pixel weights for the coregistration.
:param subsample: Subsample the input to increase performance. <1 is parsed as a fraction. >1 is a pixel count.
:param verbose: Print progress messages to stdout.
"""
if weights is not None:
raise NotImplementedError("Weights have not yet been implemented")
# Validate that both inputs are valid array-like (or Raster) types.
if not all(hasattr(dem, "__array_interface__") for dem in (reference_dem, dem_to_be_aligned)):
raise ValueError(
"Both DEMs need to be array-like (implement a numpy array interface)."
f"'reference_dem': {reference_dem}, 'dem_to_be_aligned': {dem_to_be_aligned}"
)
# If both DEMs are Rasters, validate that 'dem_to_be_aligned' is in the right grid. Then extract its data.
if isinstance(dem_to_be_aligned, gu.Raster) and isinstance(reference_dem, gu.Raster):
dem_to_be_aligned = dem_to_be_aligned.reproject(reference_dem, silent=True).data
# If any input is a Raster, use its transform if 'transform is None'.
# If 'transform' was given and any input is a Raster, trigger a warning.
# Finally, extract only the data of the raster.
for name, dem in [("reference_dem", reference_dem), ("dem_to_be_aligned", dem_to_be_aligned)]:
if hasattr(dem, "transform"):
if transform is None:
transform = getattr(dem, "transform")
elif transform is not None:
warnings.warn(f"'{name}' of type {type(dem)} overrides the given 'transform'")
"""
if name == "reference_dem":
reference_dem = dem.data
else:
dem_to_be_aligned = dem.data
"""
if transform is None:
raise ValueError("'transform' must be given if both DEMs are array-like.")
ref_dem, ref_mask = xdem.spatial_tools.get_array_and_mask(reference_dem)
tba_dem, tba_mask = xdem.spatial_tools.get_array_and_mask(dem_to_be_aligned)
# Make sure that the mask has an expected format.
if inlier_mask is not None:
inlier_mask = np.asarray(inlier_mask).squeeze()
assert inlier_mask.dtype == bool, f"Invalid mask dtype: '{inlier_mask.dtype}'. Expected 'bool'"
if np.all(~inlier_mask):
raise ValueError("'inlier_mask' had no inliers.")
ref_dem[~inlier_mask] = np.nan
tba_dem[~inlier_mask] = np.nan
if np.all(ref_mask):
raise ValueError("'reference_dem' had only NaNs")
if np.all(tba_mask):
raise ValueError("'dem_to_be_aligned' had only NaNs")
# If subsample is not equal to one, subsampling should be performed.
if subsample != 1.0:
# The full mask (inliers=True) is the inverse of the above masks and the provided mask.
full_mask = (~ref_mask & ~tba_mask & (np.asarray(inlier_mask) if inlier_mask is not None else True)).squeeze()
# If subsample is less than one, it is parsed as a fraction (e.g. 0.8 => retain 80% of the values)
if subsample < 1.0:
subsample = int(np.count_nonzero(full_mask) * (1 - subsample))
# Randomly pick N inliers in the full_mask where N=subsample
random_falses = np.random.choice(np.argwhere(full_mask.flatten()).squeeze(), int(subsample), replace=False)
# Convert the 1D indices to 2D indices
cols = (random_falses // full_mask.shape[0]).astype(int)
rows = random_falses % full_mask.shape[0]
# Set the N random inliers to be parsed as outliers instead.
full_mask[rows, cols] = False
# Run the associated fitting function
self._fit_func(ref_dem=ref_dem, tba_dem=tba_dem, transform=transform, weights=weights, verbose=verbose)
# Flag that the fitting function has been called.
self._fit_called = True
return self
@overload
def apply(self, dem: RasterType, transform: rio.transform.Affine | None) -> RasterType: ...
@overload
def apply(self, dem: np.ndarray, transform: rio.transform.Affine | None) -> np.ndarray: ...
@overload
def apply(self, dem: np.ma.masked_array, transform: rio.transform.Affine | None) -> np.ma.masked_array: ...
def apply(self, dem: np.ndarray | np.ma.masked_array | RasterType,
transform: rio.transform.Affine | None = None) -> RasterType | np.ndarray | np.ma.masked_array:
"""
Apply the estimated transform to a DEM.
:param dem: A DEM array or Raster to apply the transform on.
:param transform: The transform object of the DEM. Required if 'dem' is an array and not a Raster.
:returns: The transformed DEM.
"""
if not self._fit_called and self._meta.get("matrix") is None:
raise AssertionError(".fit() does not seem to have been called yet")
if isinstance(dem, gu.Raster):
if transform is None:
transform = dem.transform
else:
warnings.warn(f"DEM of type {type(dem)} overrides the given 'transform'")
else:
if transform is None:
raise ValueError("'transform' must be given if DEM is array-like.")
# The array to provide the functions will be an ndarray with NaNs for masked out areas.
dem_array, dem_mask = xdem.spatial_tools.get_array_and_mask(dem)
if np.all(dem_mask):
raise ValueError("'dem' had only NaNs")
# See if a _apply_func exists
try:
# Run the associated apply function
applied_dem = self._apply_func(dem_array, transform) # pylint: disable=assignment-from-no-return
# If it doesn't exist, use apply_matrix()
except NotImplementedError:
if self.is_affine: # This only works on it's affine, however.
# Apply the matrix around the centroid (if defined, otherwise just from the center).
applied_dem = apply_matrix(
dem_array,
transform=transform,
matrix=self.to_matrix(),
centroid=self._meta.get("centroid"),
dilate_mask=True
)
else:
raise ValueError("Coreg method is non-rigid but has no implemented _apply_func")
# If the DEM was a masked_array, copy the mask to the new DEM
if hasattr(dem, "mask"):
applied_dem = np.ma.masked_array(applied_dem, mask=dem.mask) # type: ignore
# If the DEM was a Raster with a mask, copy the mask to the new DEM
elif hasattr(dem, "data") and hasattr(dem.data, "mask"):
applied_dem = np.ma.masked_array(applied_dem, mask=dem.data.mask) # type: ignore
# If the input was a Raster, return a Raster as well.
if isinstance(dem, gu.Raster):
return dem.from_array(applied_dem, transform, dem.crs, nodata=dem.nodata)
return applied_dem
def apply_pts(self, coords: np.ndarray) -> np.ndarray:
"""
Apply the estimated transform to a set of 3D points.
:param coords: A (N, 3) array of X/Y/Z coordinates or one coordinate of shape (3,).
:returns: The transformed coordinates.
"""
if not self._fit_called and self._meta.get("matrix") is None:
raise AssertionError(".fit() does not seem to have been called yet")
# If the coordinates represent just one coordinate
if np.shape(coords) == (3,):
coords = np.reshape(coords, (1, 3))
assert len(np.shape(coords)) == 2 and np.shape(coords)[1] == 3, f"'coords' shape must be (N, 3). Given shape: {np.shape(coords)}"
coords_c = coords.copy()
# See if an _apply_pts_func exists
try:
transformed_points = self._apply_pts_func(coords)
# If it doesn't exist, use opencv's perspectiveTransform
except NotImplementedError:
if self.is_affine: # This only works on it's rigid, however.
# Transform the points (around the centroid if it exists).
if self._meta.get("centroid") is not None:
coords_c -= self._meta["centroid"]
transformed_points = cv2.perspectiveTransform(coords_c.reshape(1, -1, 3), self.to_matrix()).squeeze()
if self._meta.get("centroid") is not None:
transformed_points += self._meta["centroid"]
else:
raise ValueError("Coreg method is non-rigid but has not implemented _apply_pts_func")
return transformed_points
@property
def is_affine(self) -> bool:
"""Check if the transform be explained by a 3D affine transform."""
# _is_affine is found by seeing if to_matrix() raises an error.
# If this hasn't been done yet, it will be None
if self._is_affine is None:
try: # See if to_matrix() raises an error.
self.to_matrix()
self._is_affine = True
except (ValueError, NotImplementedError):
self._is_affine = False
return self._is_affine
def to_matrix(self) -> np.ndarray:
"""Convert the transform to a 4x4 transformation matrix."""
return self._to_matrix_func()
def centroid(self) -> Optional[tuple[float, float, float]]:
"""Get the centroid of the coregistration, if defined."""
meta_centroid = self._meta.get("centroid")
if meta_centroid is None:
return None
# Unpack the centroid in case it is in an unexpected format (an array, list or something else).
return (meta_centroid[0], meta_centroid[1], meta_centroid[2])
def residuals(self, reference_dem: Union[np.ndarray, np.ma.masked_array],
dem_to_be_aligned: Union[np.ndarray, np.ma.masked_array],
inlier_mask: Optional[np.ndarray] = None,
transform: Optional[rio.transform.Affine] = None) -> np.ndarray:
"""
Calculate the residual offsets (the difference) between two DEMs after applying the transformation.
:param reference_dem: 2D array of elevation values acting reference.
:param dem_to_be_aligned: 2D array of elevation values to be aligned.
:param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True).
:param transform: Optional. Transform of the reference_dem. Mandatory in some cases.
:returns: A 1D array of finite residuals.
"""
# Use the transform to correct the DEM to be aligned.
aligned_dem = self.apply(dem_to_be_aligned, transform=transform)
# Format the reference DEM
ref_arr, ref_mask = xdem.spatial_tools.get_array_and_mask(reference_dem)
if inlier_mask is None:
inlier_mask = np.ones(ref_arr.shape, dtype=bool)
# Create the full inlier mask (manual inliers plus non-nans)
full_mask = (~ref_mask) & np.isfinite(aligned_dem) & inlier_mask
# Calculate the DEM difference
diff = ref_arr - aligned_dem
# Sometimes, the float minimum (for float32 = -3.4028235e+38) is returned. This and inf should be excluded.
if "float" in str(diff.dtype):
full_mask[(diff == np.finfo(diff.dtype).min) | np.isinf(diff)] = False
# Return the difference values within the full inlier mask
return diff[full_mask]
def error(self, reference_dem: Union[np.ndarray, np.ma.masked_array],
dem_to_be_aligned: Union[np.ndarray, np.ma.masked_array],
error_type: str | list[str] = "nmad",
inlier_mask: Optional[np.ndarray] = None,
transform: Optional[rio.transform.Affine] = None) -> float | list[float]:
"""
Calculate the error of a coregistration approach.
Choices:
- "nmad": Default. The Normalized Median Absolute Deviation of the residuals.
- "median": The median of the residuals.
- "mean": The mean/average of the residuals
- "std": The standard deviation of the residuals.
- "rms": The root mean square of the residuals.
- "mae": The mean absolute error of the residuals.
- "count": The residual count.
:param reference_dem: 2D array of elevation values acting reference.
:param dem_to_be_aligned: 2D array of elevation values to be aligned.
:param error_type: The type of error meaure to calculate. May be a list of error types.
:param inlier_mask: Optional. 2D boolean array of areas to include in the analysis (inliers=True).
:param transform: Optional. Transform of the reference_dem. Mandatory in some cases.
:returns: The error measure of choice for the residuals.
"""
if isinstance(error_type, str):
error_type = [error_type]
residuals = self.residuals(reference_dem=reference_dem, dem_to_be_aligned=dem_to_be_aligned,
inlier_mask=inlier_mask, transform=transform)
error_functions = {
"nmad": xdem.spatialstats.nmad,
"median": np.median,
"mean": np.mean,
"std": np.std,
"rms": lambda residuals: np.sqrt(np.mean(np.square(residuals))),
"mae": lambda residuals: np.mean(np.abs(residuals)),
"count": lambda residuals: residuals.size
}
try:
errors = [error_functions[err_type](residuals) for err_type in error_type]
except KeyError as exception:
raise ValueError(
f"Invalid 'error_type'{'s' if len(error_type) > 1 else ''}: "
f"'{error_type}'. Choices: {list(error_functions.keys())}"
) from exception
return errors if len(errors) > 1 else errors[0]
@classmethod
def from_matrix(cls, matrix: np.ndarray):
"""
Instantiate a generic Coreg class from a transformation matrix.
:param matrix: A 4x4 transformation matrix. Shape must be (4,4).
:raises ValueError: If the matrix is incorrectly formatted.
:returns: The instantiated generic Coreg class.
"""
if np.any(~np.isfinite(matrix)):
raise ValueError(f"Matrix has non-finite values:\n{matrix}")
with warnings.catch_warnings():
# This error is fixed in the upcoming 1.8
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
valid_matrix = pytransform3d.transformations.check_transform(matrix)
return cls(matrix=valid_matrix)
@classmethod
def from_translation(cls, x_off: float = 0.0, y_off: float = 0.0, z_off: float = 0.0):
"""
Instantiate a generic Coreg class from a X/Y/Z translation.
:param x_off: The offset to apply in the X (west-east) direction.
:param y_off: The offset to apply in the Y (south-north) direction.
:param z_off: The offset to apply in the Z (vertical) direction.
:raises ValueError: If the given translation contained invalid values.
:returns: An instantiated generic Coreg class.
"""
matrix = np.diag(np.ones(4, dtype=float))
matrix[0, 3] = x_off
matrix[1, 3] = y_off
matrix[2, 3] = z_off
return cls.from_matrix(matrix)
def copy(self: CoregType) -> CoregType:
"""Return an identical copy of the class."""
new_coreg = self.__new__(type(self))
new_coreg.__dict__ = {key: copy.copy(value) for key, value in self.__dict__.items()}
return new_coreg
def __add__(self, other: Coreg) -> CoregPipeline:
"""Return a pipeline consisting of self and the other coreg function."""
if not isinstance(other, Coreg):
raise ValueError(f"Incompatible add type: {type(other)}. Expected 'Coreg' subclass")
return CoregPipeline([self, other])
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
# FOR DEVELOPERS: This function needs to be implemented.
raise NotImplementedError("This should have been implemented by subclassing")
def _to_matrix_func(self) -> np.ndarray:
# FOR DEVELOPERS: This function needs to be implemented if the `self._meta['matrix']` keyword is not None.
# Try to see if a matrix exists.
meta_matrix = self._meta.get("matrix")
if meta_matrix is not None:
assert meta_matrix.shape == (4, 4), f"Invalid _meta matrix shape. Expected: (4, 4), got {meta_matrix.shape}"
return meta_matrix
raise NotImplementedError("This should be implemented by subclassing")
def _apply_func(self, dem: np.ndarray, transform: rio.transform.Affine) -> np.ndarray:
# FOR DEVELOPERS: This function is only needed for non-rigid transforms.
raise NotImplementedError("This should have been implemented by subclassing")
def _apply_pts_func(self, coords: np.ndarray) -> np.ndarray:
# FOR DEVELOPERS: This function is only needed for non-rigid transforms.
raise NotImplementedError("This should have been implemented by subclassing")
class BiasCorr(Coreg):
"""
DEM bias correction.
Estimates the mean (or median, weighted avg., etc.) offset between two DEMs.
"""
def __init__(self, bias_func=np.average): # pylint: disable=super-init-not-called
"""
Instantiate a bias correction object.
:param bias_func: The function to use for calculating the bias. Default: (weighted) average.
"""
super().__init__(meta={"bias_func": bias_func})
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Estimate the bias using the bias_func."""
if verbose:
print("Estimating bias...")
diff = ref_dem - tba_dem
diff = diff[np.isfinite(diff)]
if np.count_nonzero(np.isfinite(diff)) == 0:
raise ValueError("No finite values in bias comparison.")
# Use weights if those were provided.
bias = self._meta["bias_func"](diff) if weights is None \
else self._meta["bias_func"](diff, weights=weights)
if verbose:
print("Bias estimated")
self._meta["bias"] = bias
def _to_matrix_func(self) -> np.ndarray:
"""Convert the bias to a transform matrix."""
empty_matrix = np.diag(np.ones(4, dtype=float))
empty_matrix[2, 3] += self._meta["bias"]
return empty_matrix
class ICP(Coreg):
"""
Iterative Closest Point DEM coregistration.
Estimates a rigid transform (rotation + translation) between two DEMs.
Requires 'opencv'
See opencv docs for more info: https://docs.opencv.org/master/dc/d9b/classcv_1_1ppf__match__3d_1_1ICP.html
"""
def __init__(self, max_iterations=100, tolerance=0.05, rejection_scale=2.5, num_levels=6):
"""
Instantiate an ICP coregistration object.
:param max_iterations: The maximum allowed iterations before stopping.
:param tolerance: The residual change threshold after which to stop the iterations.
:param rejection_scale: The threshold (std * rejection_scale) to consider points as outliers.
:param num_levels: Number of octree levels to consider. A higher number is faster but may be more inaccurate.
"""
if not _has_cv2:
raise ValueError("Optional dependency needed. Install 'opencv'")
self.max_iterations = max_iterations
self.tolerance = tolerance
self.rejection_scale = rejection_scale
self.num_levels = num_levels
super().__init__()
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Estimate the rigid transform from tba_dem to ref_dem."""
if weights is not None:
warnings.warn("ICP was given weights, but does not support it.")
bounds, resolution = _transform_to_bounds_and_res(ref_dem.shape, transform)
points: dict[str, np.ndarray] = {}
# Generate the x and y coordinates for the reference_dem
x_coords, y_coords = _get_x_and_y_coords(ref_dem.shape, transform)
centroid = np.array([np.mean([bounds.left, bounds.right]), np.mean([bounds.bottom, bounds.top]), 0.0])
# Subtract by the bounding coordinates to avoid float32 rounding errors.
x_coords -= centroid[0]
y_coords -= centroid[1]
for key, dem in zip(["ref", "tba"], [ref_dem, tba_dem]):
gradient_x, gradient_y = np.gradient(dem)
normal_east = np.sin(np.arctan(gradient_y / resolution)) * -1
normal_north = np.sin(np.arctan(gradient_x / resolution))
normal_up = 1 - np.linalg.norm([normal_east, normal_north], axis=0)
valid_mask = ~np.isnan(dem) & ~np.isnan(normal_east) & ~np.isnan(normal_north)
point_cloud = np.dstack([
x_coords[valid_mask],
y_coords[valid_mask],
dem[valid_mask],
normal_east[valid_mask],
normal_north[valid_mask],
normal_up[valid_mask]
]).squeeze()
points[key] = point_cloud[~np.any(np.isnan(point_cloud), axis=1)].astype("float32")
icp = cv2.ppf_match_3d_ICP(self.max_iterations, self.tolerance, self.rejection_scale, self.num_levels)
if verbose:
print("Running ICP...")
try:
_, residual, matrix = icp.registerModelToScene(points["tba"], points["ref"])
except cv2.error as exception:
if "(expected: 'n > 0'), where" not in str(exception):
raise exception
raise ValueError(
"Not enough valid points in input data."
f"'reference_dem' had {points['ref'].size} valid points."
f"'dem_to_be_aligned' had {points['tba'].size} valid points."
)
if verbose:
print("ICP finished")
assert residual < 1000, f"ICP coregistration failed: residual={residual}, threshold: 1000"
self._meta["centroid"] = centroid
self._meta["matrix"] = matrix
class Deramp(Coreg):
"""
Polynomial DEM deramping.
Estimates an n-D polynomial between the difference of two DEMs.
"""
def __init__(self, degree: int = 1, subsample: Union[int, float] = 5e5):
"""
Instantiate a deramping correction object.
:param degree: The polynomial degree to estimate. degree=0 is a simple bias correction.
:param subsample: Factor for subsampling the input raster for speed-up.
If <= 1, will be considered a fraction of valid pixels to extract.
If > 1 will be considered the number of pixels to extract.
"""
self.degree = degree
self.subsample = subsample
super().__init__()
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Fit the dDEM between the DEMs to a least squares polynomial equation."""
x_coords, y_coords = _get_x_and_y_coords(ref_dem.shape, transform)
ddem = ref_dem - tba_dem
valid_mask = np.isfinite(ddem)
ddem = ddem[valid_mask]
x_coords = x_coords[valid_mask]
y_coords = y_coords[valid_mask]
# Formulate the 2D polynomial whose coefficients will be solved for.
def poly2d(x_coordinates: np.ndarray, y_coordinates: np.ndarray,
coefficients: np.ndarray) -> np.ndarray:
"""
Estimate values from a 2D-polynomial.
:param x_coordinates: x-coordinates of the difference array (must have the same shape as elevation_difference).
:param y_coordinates: y-coordinates of the difference array (must have the same shape as elevation_difference).
:param coefficients: The coefficients (a, b, c, etc.) of the polynomial.
:param degree: The degree of the polynomial.
:raises ValueError: If the length of the coefficients list is not compatible with the degree.
:returns: The values estimated by the polynomial.
"""
# Check that the coefficient size is correct.
coefficient_size = (self.degree + 1) * (self.degree + 2) / 2
if len(coefficients) != coefficient_size:
raise ValueError()
# Do Amaury's black magic to formulate and calculate the polynomial equation.
estimated_values = np.sum([coefficients[k * (k + 1) // 2 + j] * x_coordinates ** (k - j) *
y_coordinates ** j for k in range(self.degree + 1) for j in range(k + 1)], axis=0)
return estimated_values # type: ignore
def residuals(coefs: np.ndarray, x_coords: np.ndarray, y_coords: np.ndarray, targets: np.ndarray):
res = targets - poly2d(x_coords, y_coords, coefs)
return res[np.isfinite(res)]
if verbose:
print("Estimating deramp function...")
# reduce number of elements for speed
# Get number of points to extract
max_points = np.size(x_coords)
if (self.subsample <= 1) & (self.subsample >= 0):
npoints = int(self.subsample * max_points)
elif self.subsample > 1:
npoints = int(self.subsample)
else:
raise ValueError("`subsample` must be >= 0")
if max_points > npoints:
indices = np.random.choice(max_points, npoints, replace=False)
x_coords = x_coords[indices]
y_coords = y_coords[indices]
ddem = ddem[indices]
# Optimize polynomial parameters
coefs = scipy.optimize.leastsq(
func=residuals,
x0=np.zeros(shape=((self.degree + 1) * (self.degree + 2) // 2)),
args=(x_coords, y_coords, ddem)
)
self._meta["coefficients"] = coefs[0]
self._meta["func"] = lambda x, y: poly2d(x, y, coefs[0])
def _apply_func(self, dem: np.ndarray, transform: rio.transform.Affine) -> np.ndarray:
"""Apply the deramp function to a DEM."""
x_coords, y_coords = _get_x_and_y_coords(dem.shape, transform)
ramp = self._meta["func"](x_coords, y_coords)
return dem + ramp
def _apply_pts_func(self, coords: np.ndarray) -> np.ndarray:
"""Apply the deramp function to a set of points."""
new_coords = coords.copy()
new_coords[:, 2] += self._meta["func"](new_coords[:, 0], new_coords[:, 1])
return new_coords
def _to_matrix_func(self) -> np.ndarray:
"""Return a transform matrix if possible."""
if self.degree > 1:
raise ValueError(
"Nonlinear deramping degrees cannot be represented as transformation matrices."
f" (max 1, given: {self.degree})")
if self.degree == 1:
raise NotImplementedError("Vertical shift, rotation and horizontal scaling has to be implemented.")
# If degree==0, it's just a bias correction
empty_matrix = np.diag(np.ones(4, dtype=float))
empty_matrix[2, 3] += self._meta["coefficients"][0]
return empty_matrix
class CoregPipeline(Coreg):
"""
A sequential set of coregistration steps.
"""
def __init__(self, pipeline: list[Coreg]):
"""
Instantiate a new coregistration pipeline.
:param: Coregistration steps to run in the sequence they are given.
"""
self.pipeline = pipeline
super().__init__()
def __repr__(self):
return f"CoregPipeline: {self.pipeline}"
def copy(self: CoregType) -> CoregType:
"""Return an identical copy of the class."""
new_coreg = self.__new__(type(self))
new_coreg.__dict__ = {key: copy.copy(value) for key, value in self.__dict__.items() if key != "pipeline"}
new_coreg.pipeline = [step.copy() for step in self.pipeline]
return new_coreg
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Fit each coregistration step with the previously transformed DEM."""
tba_dem_mod = tba_dem.copy()
for i, coreg in enumerate(self.pipeline):
if verbose:
print(f"Running pipeline step: {i + 1} / {len(self.pipeline)}")
coreg._fit_func(ref_dem, tba_dem_mod, transform=transform, weights=weights, verbose=verbose)
coreg._fit_called = True
tba_dem_mod = coreg.apply(tba_dem_mod, transform)
def _apply_func(self, dem: np.ndarray, transform: rio.transform.Affine) -> np.ndarray:
"""Apply the coregistration steps sequentially to a DEM."""
dem_mod = dem.copy()
for coreg in self.pipeline:
dem_mod = coreg.apply(dem_mod, transform)
return dem_mod
def _apply_pts_func(self, coords: np.ndarray) -> np.ndarray:
"""Apply the coregistration steps sequentially to a set of points."""
coords_mod = coords.copy()
for coreg in self.pipeline:
coords_mod = coreg.apply_pts(coords_mod).reshape(coords_mod.shape)
return coords_mod
def _to_matrix_func(self) -> np.ndarray:
"""Try to join the coregistration steps to a single transformation matrix."""
if not _HAS_P3D:
raise ValueError("Optional dependency needed. Install 'pytransform3d'")
transform_mgr = TransformManager()
with warnings.catch_warnings():
# Deprecation warning from pytransform3d. Let's hope that is fixed in the near future.
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
for i, coreg in enumerate(self.pipeline):
new_matrix = coreg.to_matrix()
transform_mgr.add_transform(i, i + 1, new_matrix)
return transform_mgr.get_transform(0, len(self.pipeline))
def __iter__(self):
"""Iterate over the pipeline steps."""
for coreg in self.pipeline:
yield coreg
def __add__(self, other: Union[list[Coreg], Coreg, CoregPipeline]) -> CoregPipeline:
"""Append Coreg(s) or a CoregPipeline to the pipeline."""
if not isinstance(other, Coreg):
other = list(other)
else:
other = [other]
pipelines = self.pipeline + other
return CoregPipeline(pipelines)
class NuthKaab(Coreg):
"""
Nuth and Kääb (2011) DEM coregistration.
Implemented after the paper:
https://doi.org/10.5194/tc-5-271-2011
"""
def __init__(self, max_iterations: int = 10, offset_threshold: float = 0.05):
"""
Instantiate a new Nuth and Kääb (2011) coregistration object.
:param max_iterations: The maximum allowed iterations before stopping.
:param offset_threshold: The residual offset threshold after which to stop the iterations.
"""
self.max_iterations = max_iterations
self.offset_threshold = offset_threshold
super().__init__()
def _fit_func(self, ref_dem: np.ndarray, tba_dem: np.ndarray, transform: Optional[rio.transform.Affine],
weights: Optional[np.ndarray], verbose: bool = False):
"""Estimate the x/y/z offset between two DEMs."""
if verbose:
print("Running Nuth and Kääb (2011) coregistration")
bounds, resolution = _transform_to_bounds_and_res(ref_dem.shape, transform)
# Make a new DEM which will be modified inplace
aligned_dem = tba_dem.copy()
# Calculate slope and aspect maps from the reference DEM
if verbose:
print(" Calculate slope and aspect")
slope, aspect = calculate_slope_and_aspect(ref_dem)
# Make index grids for the east and north dimensions
east_grid = np.arange(ref_dem.shape[1])
north_grid = np.arange(ref_dem.shape[0])
# Make a function to estimate the aligned DEM (used to construct an offset DEM)
elevation_function = scipy.interpolate.RectBivariateSpline(
x=north_grid, y=east_grid, z=np.where(np.isnan(aligned_dem), -9999, aligned_dem), kx=1, ky=1
)
# Make a function to estimate nodata gaps in the aligned DEM (used to fix the estimated offset DEM)
# Use spline degree 1, as higher degrees will create instabilities around 1 and mess up the nodata mask
nodata_function = scipy.interpolate.RectBivariateSpline(
x=north_grid, y=east_grid, z=np.isnan(aligned_dem), kx=1, ky=1
)
# Initialise east and north pixel offset variables (these will be incremented up and down)
offset_east, offset_north, bias = 0.0, 0.0, 0.0
# Calculate initial dDEM statistics
elevation_difference = ref_dem - aligned_dem
bias = np.nanmedian(elevation_difference)
nmad_old = xdem.spatialstats.nmad(elevation_difference)
if verbose:
print(" Statistics on initial dh:")
print(" Median = {:.2f} - NMAD = {:.2f}".format(bias, nmad_old))
# Iteratively run the analysis until the maximum iterations or until the error gets low enough
if verbose:
print(" Iteratively estimating horizontal shit:")
# If verbose is True, will use progressbar and print additional statements
pbar = trange(self.max_iterations, disable=not verbose, desc=" Progress")
for i in pbar:
# Calculate the elevation difference and the residual (NMAD) between them.
elevation_difference = ref_dem - aligned_dem
bias = np.nanmedian(elevation_difference)
# Correct potential biases
elevation_difference -= bias
# Estimate the horizontal shift from the implementation by Nuth and Kääb (2011)
east_diff, north_diff, _ = get_horizontal_shift( # type: ignore
elevation_difference=elevation_difference,
slope=slope,
aspect=aspect
)
if verbose:
pbar.write(" #{:d} - Offset in pixels : ({:.2f}, {:.2f})".format(i + 1, east_diff, north_diff))
# Increment the offsets with the overall offset
offset_east += east_diff
offset_north += north_diff
# Calculate new elevations from the offset x- and y-coordinates
new_elevation = elevation_function(y=east_grid + offset_east, x=north_grid - offset_north)
# Set NaNs where NaNs were in the original data
new_nans = nodata_function(y=east_grid + offset_east, x=north_grid - offset_north)
new_elevation[new_nans >= 1] = np.nan
# Assign the newly calculated elevations to the aligned_dem
aligned_dem = new_elevation
# Update statistics
elevation_difference = ref_dem - aligned_dem
bias = np.nanmedian(elevation_difference)
nmad_new = xdem.spatialstats.nmad(elevation_difference)
nmad_gain = (nmad_new - nmad_old) / nmad_old*100
if verbose:
pbar.write(" Median = {:.2f} - NMAD = {:.2f} ==> Gain = {:.2f}%".format(bias, nmad_new, nmad_gain))
# Stop if the NMAD is low and a few iterations have been made
assert ~np.isnan(nmad_new), (offset_east, offset_north)
offset = np.sqrt(east_diff**2 + north_diff**2)
if i > 1 and offset < self.offset_threshold:
if verbose:
pbar.write(f" Last offset was below the residual offset threshold of {self.offset_threshold} -> stopping")
break
nmad_old = nmad_new
# Print final results
if verbose:
print("\n Final offset in pixels (east, north) : ({:f}, {:f})".format(offset_east, offset_north))
print(" Statistics on coregistered dh:")
print(" Median = {:.2f} - NMAD = {:.2f}".format(bias, nmad_new))
self._meta["offset_east_px"] = offset_east
self._meta["offset_north_px"] = offset_north
self._meta["bias"] = bias
self._meta["resolution"] = resolution
def _to_matrix_func(self) -> np.ndarray:
"""Return a transformation matrix from the estimated offsets."""
offset_east = self._meta["offset_east_px"] * self._meta["resolution"]
offset_north = self._meta["offset_north_px"] * self._meta["resolution"]
matrix = np.diag(np.ones(4, dtype=float))
matrix[0, 3] += offset_east
matrix[1, 3] += offset_north
matrix[2, 3] += self._meta["bias"]
return matrix
def invert_matrix(matrix: np.ndarray) -> np.ndarray:
"""Invert a transformation matrix."""
with warnings.catch_warnings():
# Deprecation warning from pytransform3d. Let's hope that is fixed in the near future.
warnings.filterwarnings("ignore", message="`np.float` is a deprecated alias for the builtin `float`")
checked_matrix = pytransform3d.transformations.check_matrix(matrix)
# Invert the transform if wanted.
return pytransform3d.transformations.invert_transform(checked_matrix)
def apply_matrix(dem: np.ndarray, transform: rio.transform.Affine, matrix: np.ndarray, invert: bool = False,
centroid: Optional[tuple[float, float, float]] = None,
resampling: Union[int, str] = "bilinear",
dilate_mask: bool = False) -> np.ndarray:
"""
Apply a 3D transformation matrix to a 2.5D DEM.
The transformation is applied as a value correction using linear deramping, and 2D image warping.
1. Convert the DEM into a point cloud (not for gridding; for estimating the DEM shifts).
2. Transform the point cloud in 3D using the 4x4 matrix.
3. Measure the difference in elevation between the original and transformed points.
4. Estimate a linear deramp from the elevation difference, and apply the correction to the DEM values.
5. Convert the horizontal coordinates of the transformed points to pixel index coordinates.
6. Apply the pixel-wise displacement in 2D using the new pixel coordinates.
7. Apply the same displacement to a nodata-mask to exclude previous and/or new nans.
:param dem: The DEM to transform.
:param transform: The Affine transform object (georeferencing) of the DEM.
:param matrix: A 4x4 transformation matrix to apply to the DEM.
:param invert: Invert the transformation matrix.
:param centroid: The X/Y/Z transformation centroid. Irrelevant for pure translations. Defaults to the midpoint (Z=0)
:param resampling: The resampling method to use. Can be `nearest`, `bilinear`, `cubic` or an integer from 0-5.
:param dilate_mask: Dilate the nan mask to exclude edge pixels that could be wrong.
:returns: The transformed DEM with NaNs as nodata values (replaces a potential mask of the input `dem`).
"""
# Parse the resampling argument given.
if isinstance(resampling, int):
resampling_order = resampling
elif resampling == "cubic":
resampling_order = 3
elif resampling == "bilinear":
resampling_order = 1
elif resampling == "nearest":
resampling_order = 0
else:
raise ValueError(
f"`{resampling}` is not a valid resampling mode."
" Choices: [`nearest`, `bilinear`, `cubic`] or an integer."
)
# Copy the DEM to make sure the original is not modified, and convert it into an ndarray
demc = np.array(dem)
# Check if the matrix only contains a Z correction. In that case, only shift the DEM values by the bias.
empty_matrix = np.diag(np.ones(4, float))
empty_matrix[2, 3] = matrix[2, 3]
if np.mean(np.abs(empty_matrix - matrix)) == 0.0:
return demc + matrix[2, 3]
# Opencv is required down from here
if not _has_cv2:
raise ValueError("Optional dependency needed. Install 'opencv'")
nan_mask = xdem.spatial_tools.get_mask(dem)
assert np.count_nonzero(~nan_mask) > 0, "Given DEM had all nans."
# Create a filled version of the DEM. (skimage doesn't like nans)
filled_dem = np.where(~nan_mask, demc, np.nan)
# Get the centre coordinates of the DEM pixels.
x_coords, y_coords = _get_x_and_y_coords(demc.shape, transform)
bounds, resolution = _transform_to_bounds_and_res(dem.shape, transform)
# If a centroid was not given, default to the center of the DEM (at Z=0).
if centroid is None:
centroid = (np.mean([bounds.left, bounds.right]), np.mean([bounds.bottom, bounds.top]), 0.0)
else:
assert len(centroid) == 3, f"Expected centroid to be 3D X/Y/Z coordinate. Got shape of {len(centroid)}"
# Shift the coordinates to centre around the centroid.
x_coords -= centroid[0]
y_coords -= centroid[1]
# Create a point cloud of X/Y/Z coordinates
point_cloud = np.dstack((x_coords, y_coords, filled_dem))
# Shift the Z components by the centroid.
point_cloud[:, 2] -= centroid[2]
if invert:
matrix = invert_matrix(matrix)
# Transform the point cloud using the matrix.
transformed_points = cv2.perspectiveTransform(
point_cloud.reshape((1, -1, 3)),
matrix,
).reshape(point_cloud.shape)
# Estimate the vertical difference of old and new point cloud elevations.
deramp = deramping(
(point_cloud[:, :, 2] - transformed_points[:, :, 2])[~nan_mask].flatten(),
point_cloud[:, :, 0][~nan_mask].flatten(),
point_cloud[:, :, 1][~nan_mask].flatten(),
degree=1
)
# Shift the elevation values of the soon-to-be-warped DEM.
filled_dem -= deramp(x_coords, y_coords)
# Create gap-free arrays of x and y coordinates to be converted into index coordinates.
x_inds = rio.fill.fillnodata(transformed_points[:, :, 0].copy(), mask=(~nan_mask).astype("uint8"))
y_inds = rio.fill.fillnodata(transformed_points[:, :, 1].copy(), mask=(~nan_mask).astype("uint8"))
# Divide the coordinates by the resolution to create index coordinates.
x_inds /= resolution
y_inds /= resolution
# Shift the x coords so that bounds.left is equivalent to xindex -0.5
x_inds -= x_coords.min() / resolution
# Shift the y coords so that bounds.top is equivalent to yindex -0.5
y_inds = (y_coords.max() / resolution) - y_inds
# Create a skimage-compatible array of the new index coordinates that the pixels shall have after warping.
inds = np.vstack((y_inds.reshape((1,) + y_inds.shape), x_inds.reshape((1,) + x_inds.shape)))
with warnings.catch_warnings():
# An skimage warning that will hopefully be fixed soon. (2021-07-30)
warnings.filterwarnings("ignore", message="Passing `np.nan` to mean no clipping in np.clip")
# Warp the DEM
transformed_dem = skimage.transform.warp(
filled_dem,
inds,
order=resampling_order,
mode="constant",
cval=np.nan,
preserve_range=True
)
# Warp the NaN mask, setting true to all values outside the new frame.
tr_nan_mask = skimage.transform.warp(
nan_mask.astype("uint8"),
inds,
order=resampling_order,
mode="constant",
cval=1,
preserve_range=True
) > 0.1 # Due to different interpolation approaches, everything above 0.1 is assumed to be 1 (True)
if dilate_mask:
tr_nan_mask = scipy.ndimage.morphology.binary_dilation(tr_nan_mask, iterations=resampling_order)
# Apply the transformed nan_mask
transformed_dem[tr_nan_mask] = np.nan
assert np.count_nonzero(~ | np.isnan(transformed_dem) | numpy.isnan |
"""
Group-wise function alignment using SRSF framework and Dynamic Programming
moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import matplotlib.pyplot as plt
import fdasrsf.utility_functions as uf
import fdasrsf.bayesian_functions as bf
import fdasrsf.fPCA as fpca
import fdasrsf.geometry as geo
from scipy.integrate import trapz, cumtrapz
from scipy.interpolate import interp1d
from scipy.linalg import svd, cholesky
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform, pdist
import GPy
from numpy.linalg import norm, inv
from numpy.random import rand, normal
from joblib import Parallel, delayed
from fdasrsf.fPLS import pls_svd
from tqdm import tqdm
import fdasrsf.plot_style as plot
import fpls_warp as fpls
import collections
class fdawarp:
"""
This class provides alignment methods for functional data using the SRVF framework
Usage: obj = fdawarp(f,t)
:param f: (M,N): matrix defining N functions of M samples
:param time: time vector of length M
:param fn: aligned functions
:param qn: aligned srvfs
:param q0: initial srvfs
:param fmean: function mean
:param mqn: mean srvf
:param gam: warping functions
:param psi: srvf of warping functions
:param stats: alignment statistics
:param qun: cost function
:param lambda: lambda
:param method: optimization method
:param gamI: inverse warping function
:param rsamps: random samples
:param fs: random aligned functions
:param gams: random warping functions
:param ft: random warped functions
:param qs: random aligned srvfs
:param type: alignment type
:param mcmc: mcmc output if bayesian
Author : <NAME> (JDT) <jdtuck AT sandia.gov>
Date : 15-Mar-2018
"""
def __init__(self, f, time):
"""
Construct an instance of the fdawarp class
:param f: numpy ndarray of shape (M,N) of N functions with M samples
:param time: vector of size M describing the sample points
"""
a = time.shape[0]
if f.shape[0] != a:
raise Exception('Columns of f and time must be equal')
self.f = f
self.time = time
self.rsamps = False
def srsf_align(self, method="mean", omethod="DP2", center=True,
smoothdata=False, MaxItr=20, parallel=False, lam=0.0,
cores=-1, grid_dim=7):
"""
This function aligns a collection of functions using the elastic
square-root slope (srsf) framework.
:param method: (string) warp calculate Karcher Mean or Median
(options = "mean" or "median") (default="mean")
:param omethod: optimization method (DP, DP2, RBFGS) (default = DP2)
:param center: center warping functions (default = T)
:param smoothdata: Smooth the data using a box filter (default = F)
:param MaxItr: Maximum number of iterations (default = 20)
:param parallel: run in parallel (default = F)
:param lam: controls the elasticity (default = 0)
:param cores: number of cores for parallel (default = -1 (all))
:param grid_dim: size of the grid, for the DP2 method only (default = 7)
:type lam: double
:type smoothdata: bool
Examples
>>> import tables
>>> fun=tables.open_file("../Data/simu_data.h5")
>>> f = fun.root.f[:]
>>> f = f.transpose()
>>> time = fun.root.time[:]
>>> obj = fs.fdawarp(f,time)
>>> obj.srsf_align()
"""
M = self.f.shape[0]
N = self.f.shape[1]
self.lam = lam
if M > 500:
parallel = True
elif N > 100:
parallel = True
eps = np.finfo(np.double).eps
f0 = self.f
self.method = omethod
methods = ["mean", "median"]
self.type = method
# 0 mean, 1-median
method = [i for i, x in enumerate(methods) if x == method]
if len(method) == 0:
method = 0
else:
method = method[0]
# Compute SRSF function from data
f, g, g2 = uf.gradient_spline(self.time, self.f, smoothdata)
q = g / np.sqrt(abs(g) + eps)
print("Initializing...")
mnq = q.mean(axis=1)
a = mnq.repeat(N)
d1 = a.reshape(M, N)
d = (q - d1) ** 2
dqq = np.sqrt(d.sum(axis=0))
min_ind = dqq.argmin()
mq = q[:, min_ind]
mf = f[:, min_ind]
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq, self.time,
q[:, n], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
gam = np.zeros((M,N))
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq,self.time,q[:,k],omethod,lam,grid_dim)
gamI = uf.SqrtMeanInverse(gam)
mf = np.interp((self.time[-1] - self.time[0]) * gamI + self.time[0], self.time, mf)
mq = uf.f_to_srsf(mf, self.time)
# Compute Karcher Mean
if method == 0:
print("Compute Karcher Mean of %d function in SRSF space..." % N)
if method == 1:
print("Compute Karcher Median of %d function in SRSF space..." % N)
ds = np.repeat(0.0, MaxItr + 2)
ds[0] = np.inf
qun = np.repeat(0.0, MaxItr + 1)
tmp = np.zeros((M, MaxItr + 2))
tmp[:, 0] = mq
mq = tmp
tmp = np.zeros((M, MaxItr+2))
tmp[:,0] = mf
mf = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = self.f
f = tmp
tmp = np.zeros((M, N, MaxItr + 2))
tmp[:, :, 0] = q
q = tmp
for r in range(0, MaxItr):
print("updating step: r=%d" % (r + 1))
if r == (MaxItr - 1):
print("maximal number of iterations is reached")
# Matching Step
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r],
self.time, q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0],
omethod, lam, grid_dim)
gam_dev = np.zeros((M, N))
vtil = np.zeros((M,N))
dtil = np.zeros(N)
for k in range(0, N):
f[:, k, r + 1] = np.interp((self.time[-1] - self.time[0]) * gam[:, k]
+ self.time[0], self.time, f[:, k, 0])
q[:, k, r + 1] = uf.f_to_srsf(f[:, k, r + 1], self.time)
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
v = q[:, k, r + 1] - mq[:,r]
d = np.sqrt(trapz(v*v, self.time))
vtil[:,k] = v/d
dtil[k] = 1.0/d
mqt = mq[:, r]
a = mqt.repeat(N)
d1 = a.reshape(M, N)
d = (q[:, :, r + 1] - d1) ** 2
if method == 0:
d1 = sum(trapz(d, self.time, axis=0))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = qtemp.mean(axis=1)
mf[:, r + 1] = ftemp.mean(axis=1)
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if method == 1:
d1 = np.sqrt(sum(trapz(d, self.time, axis=0)))
d2 = sum(trapz((1 - np.sqrt(gam_dev)) ** 2, self.time, axis=0))
ds_tmp = d1 + lam * d2
ds[r + 1] = ds_tmp
# Minimization Step
# compute the mean of the matched function
stp = .3
vbar = vtil.sum(axis=1)*(1/dtil.sum())
qtemp = q[:, :, r + 1]
ftemp = f[:, :, r + 1]
mq[:, r + 1] = mq[:,r] + stp*vbar
tmp = np.zeros(M)
tmp[1:] = cumtrapz(mq[:, r + 1] * np.abs(mq[:, r + 1]), self.time)
mf[:, r + 1] = np.median(f0[1, :])+tmp
qun[r] = norm(mq[:, r + 1] - mq[:, r]) / norm(mq[:, r])
if qun[r] < 1e-2 or r >= MaxItr:
break
# Last Step with centering of gam
if center:
r += 1
if parallel:
out = Parallel(n_jobs=cores)(delayed(uf.optimum_reparam)(mq[:, r], self.time,
q[:, n, 0], omethod, lam, grid_dim) for n in range(N))
gam = np.array(out)
gam = gam.transpose()
else:
for k in range(0,N):
gam[:,k] = uf.optimum_reparam(mq[:, r], self.time, q[:, k, 0], omethod,
lam, grid_dim)
gam_dev = np.zeros((M, N))
for k in range(0, N):
gam_dev[:, k] = np.gradient(gam[:, k], 1 / float(M - 1))
gamI = uf.SqrtMeanInverse(gam)
gamI_dev = np.gradient(gamI, 1 / float(M - 1))
time0 = (self.time[-1] - self.time[0]) * gamI + self.time[0]
mq[:, r + 1] = np.interp(time0, self.time, mq[:, r]) * np.sqrt(gamI_dev)
for k in range(0, N):
q[:, k, r + 1] = np.interp(time0, self.time, q[:, k, r]) * np.sqrt(gamI_dev)
f[:, k, r + 1] = | np.interp(time0, self.time, f[:, k, r]) | numpy.interp |
import numpy as np
from sklearn.linear_model import LinearRegression
import os
import pandas as pd
import statsmodels.api as sm
# Define working folder
out_folder = 'results'
if not os.path.exists(out_folder):
os.makedirs(out_folder)
os.chdir(out_folder)
# Define countries and states to loop over
file_list = ['Average_Temperature_Country_Level.csv','Average_Temperature_State_Level.csv','Average_Temperature_Global_Level.csv']
#Define years for linear regression
years = np.arange(1950,2021)
years_1970 = np.arange(1970,2021)
for file_name in file_list:
country_df = pd.read_csv(file_name)
if 'Global' in file_name:
country_df['Region'] = 'Global'
#Separate attribute columns and data
attribute_columns = [x for x in list(country_df) if x not in map(str, years)]
attribute_columns = [x for x in attribute_columns if x not in ['system:index','.geo']]
out_df = country_df.copy()[attribute_columns]
#Get temperature columns and data
temperature_columns = [x for x in list(country_df) if x in map(str, years)]
temperature_columns.sort()
temperature_data = country_df[temperature_columns]
#Reorder columns and resave CSV
select_columns = attribute_columns+temperature_columns
select_columns = [x for x in select_columns if x not in ['system:index','.geo']]
country_df = country_df[select_columns]
country_df.to_csv(file_name,index=False)
for i,row in temperature_data.iterrows():
#Check if data is available for all years
if ~row.isnull().any():
#Calculate change in average temperature over different time period lengths
change = np.mean(row[[str(x) for x in | np.arange(2009,2021) | numpy.arange |
#!/usr/bin/env python -u
"""staticbyte.py - encode a stream of bytes as static noise
Usage:
python -u staticbyte.py (-e | -d) <FILE> [-s <N>]
python -u staticbyte.py -h
Options:
-s Set the chip size (default: 4096)
-h Print this help
"""
from __future__ import print_function
import numpy
import os
import sys
import wave
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__license__ = 'ISC'
__status__ = 'Development'
__version__ = '0.1'
PCM_DTYPE = numpy.int16
PCM_MAX = 2.0 ** (16 - 1) - 1.0
def readbytes(f):
while True:
char = f.read(1)
if char == '':
break
byte = ord(char)
yield byte
def xcor(x, y):
x = numpy.copy(x)
y = numpy.copy(y[::-1])
n = x.size + y.size
x.resize(n)
y.resize(n)
X = numpy.fft.rfft(x)
Y = | numpy.fft.rfft(y) | numpy.fft.rfft |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
License:
Copyright (c) 2019 <NAME>, <NAME>, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Usage:
null_list = find_nulls(nx,ny,nz, xv,yv,zv, B_x1,B_y1,B_z1, tolerance=None)
This function checks the existence of null points and localize them in the grid
Parameters: nx, ny, nz, xv, yv, zv, B_x1, B_y1, B_z1, tolerence(=None)
nx, ny, nz represent the resolution of the grid
xv, yv, zv are the coordinate in the x, y and z directions
B_x1, B_y1, B_z1 are the components of the B field
tolerance sets the exit condition for Newton's method (root finding)
Returns: null_list
null_list is a concatenated list of coordinates for all detected nulls
"""
import numpy as np
import math
from numpy.linalg import inv
def find_nulls(nx,ny,nz,xv,yv,zv,B_x1,B_y1,B_z1, tolerance=None):
null_list = []
num_nulls = 0
# calculating the sign change of the field components at the corners of the cells of the grid
bx_sc = field_sign_change(B_x1)
by_sc = field_sign_change(B_y1)
bz_sc = field_sign_change(B_z1)
# REDUCTION STAGE: keeping the indices of those cells for which the field components change sign at one of the vertices of the cells
ind_list = np.array(np.where(bx_sc & by_sc & bz_sc)).T
if not tolerance: tolerance = 10**-5
# looping over the cells that pass the reduction stage
for ind in ind_list:
# retrieving the indices that satisfy the reduction stage
i = ind[0]
j = ind[1]
k = ind[2]
# trilinear interpolation
tri_x = trilinear_coeffs(xv,yv,zv,i,j,k,B_x1)
tri_y = trilinear_coeffs(xv,yv,zv,i,j,k,B_y1)
tri_z = trilinear_coeffs(xv,yv,zv,i,j,k,B_z1)
trilinear = np.array([tri_x,tri_y,tri_z])
# BILINEAR STAGE
# creating three lists that store the sign of each field component on the faces of the cube
# the sign is appended only if the location given by the bilinear interpolation is on the face that is being considered
bxx = []
byy = []
bzz = []
# FACE 1
# f is the parameter that tells the code if we're on an x/y/z face
f = 0
face1 = xv[i]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = x_face(face1,trilinear[0],trilinear[1])
z_sign1 = bilinear(bxby,yv,zv,j,k,face1,tri_z,f)
# append the sublist to the main list only if it is not empty
if z_sign1:
bzz.append(z_sign1)
# by = 0 and bz = 0
bybz = x_face(face1,trilinear[1],trilinear[2])
x_sign1 = bilinear(bybz,yv,zv,j,k,face1,tri_x,f)
if x_sign1:
bxx.append(x_sign1)
# bx = 0 and bz = 0
bxbz = x_face(face1,trilinear[0],trilinear[2])
y_sign1 = bilinear(bxbz,yv,zv,j,k,face1,tri_y,f)
if y_sign1:
byy.append(y_sign1)
# FACE 2
f = 0
face2 = xv[i+1]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = x_face(face2,trilinear[0],trilinear[1])
z_sign2 = bilinear(bxby,yv,zv,j,k,face2,tri_z,f)
if z_sign2:
bzz.append(z_sign2)
# by = 0 and bz = 0
bybz = x_face(face2,trilinear[1],trilinear[2])
x_sign2 = bilinear(bybz,yv,zv,j,k,face2,tri_x,f)
if x_sign2:
bxx.append(x_sign2)
# bx = 0 and bz = 0
bxbz = x_face(face2,trilinear[0],trilinear[2])
y_sign2 = bilinear(bxbz,yv,zv,j,k,face2,tri_y,f)
if y_sign2:
byy.append(y_sign2)
# FACE 3
f = 1
face3 = yv[j]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = y_face(face3,trilinear[0],trilinear[1])
z_sign3 = bilinear(bxby,xv,zv,i,k,face3,tri_z,f)
if z_sign3:
bzz.append(z_sign3)
# by = 0 and bz = 0
bybz = y_face(face3,trilinear[1],trilinear[2])
x_sign3 = bilinear(bybz,xv,zv,i,k,face3,tri_x,f)
if x_sign3:
bxx.append(x_sign3)
# bx = 0 and bz = 0
bxbz = y_face(face3,trilinear[0],trilinear[2])
y_sign3 = bilinear(bxbz,xv,zv,i,k,face3,tri_y,f)
if y_sign3:
byy.append(y_sign3)
# FACE 4
f = 1
face4 = yv[j+1]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = y_face(face4,trilinear[0],trilinear[1])
z_sign4 = bilinear(bxby,xv,zv,i,k,face4,tri_z,f)
if z_sign4:
bzz.append(z_sign4)
# by = 0 and bz = 0
bybz = y_face(face4,trilinear[1],trilinear[2])
x_sign4 = bilinear(bybz,xv,zv,i,k,face4,tri_x,f)
if x_sign4:
bxx.append(x_sign4)
# bx = 0 and bz = 0
bxbz = y_face(face4,trilinear[0],trilinear[2])
y_sign4 = bilinear(bxbz,xv,zv,i,k,face4,tri_y,f)
if y_sign4:
byy.append(y_sign4)
# FACE 5
f = 2
face5 = zv[k]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = z_face(face5,trilinear[0],trilinear[1])
z_sign5 = bilinear(bxby,xv,yv,i,j,face5,tri_z,f)
if z_sign5:
bzz.append(z_sign5)
# by = 0 and bz = 0
bybz = z_face(face5,trilinear[1],trilinear[2])
x_sign5 = bilinear(bybz,xv,yv,i,j,face5,tri_x,f)
if x_sign5:
bxx.append(x_sign5)
# bx = 0 and bz = 0
bxbz = z_face(face5,trilinear[0],trilinear[2])
y_sign5 = bilinear(bxbz,xv,yv,i,j,face5,tri_y,f)
if y_sign5:
byy.append(y_sign5)
# FACE 6
f = 2
face6 = zv[k+1]
# bx = 0 and by = 0
# get bilinear coefficients
bxby = z_face(face6,trilinear[0],trilinear[1])
z_sign6 = bilinear(bxby,xv,yv,i,j,face6,tri_z,f)
if z_sign6:
bzz.append(z_sign6)
# by = 0 and bz = 0
bybz = z_face(face6,trilinear[1],trilinear[2])
x_sign6 = bilinear(bybz,xv,yv,i,j,face6,tri_x,f)
if x_sign6:
bxx.append(x_sign6)
# bx = 0 and bz = 0
bxbz = z_face(face6,trilinear[0],trilinear[2])
y_sign6 = bilinear(bxbz,xv,yv,i,j,face6,tri_y,f)
if y_sign6:
byy.append(y_sign6)
# making flat lists
bxx = [item for sublist in bxx for item in sublist]
byy = [item for sublist in byy for item in sublist]
bzz = [item for sublist in bzz for item in sublist]
# if the function check_sign detects a change in sign in at least one of the three field components, then a single null point must exist in the cell
# hence, apply Newton-Raphson method to find its location
if (not check_sign(bxx)) or (not check_sign(byy)) or (not check_sign(bzz)):
# if not (check_sign(bxx) and check_sign(byy) and check_sign(bzz)):
# NEWTON RAPHSON METHOD
# first guess: centre of the cube
xg = 0.5
yg = 0.5
zg = 0.5
xs = xv[i]+(xv[i+1]-xv[i])*xg
ys = yv[j]+(yv[j+1]-yv[j])*yg
zs = zv[k]+(zv[k+1]-zv[k])*zg
# grid size
delta_x = xv[i+1]-xv[i]
delta_y = yv[j+1]-yv[j]
delta_z = zv[k+1]-zv[k]
# values of solution
x = [0]
y = [0]
z = [0]
# step size
step_x = []
step_y = []
step_z = []
# error relative to the local grid size
err_rel_grid = []
# error relative to the solution
err_rel_sol = []
converged = False
# set a counter to limit the number of iterations
n_steps = 0
while (not converged) and (n_steps < 11):
n_steps += 1
# calculating B field magnitude and components at the guessed location
B = B_field(xs,ys,zs,trilinear)
jac = jacobian(xs,ys,zs,trilinear)
if np.linalg.det(jac)==0:
print('The matrix is singular')
break
else:
jac_inv = inv(jacobian(xs,ys,zs,trilinear))
xs_prev = xs
ys_prev = ys
zs_prev = zs
xs = xs_prev-(jac_inv[0,0]*B[1]+jac_inv[0,1]*B[2]+jac_inv[0,2]*B[3])
ys = ys_prev-(jac_inv[1,0]*B[1]+jac_inv[1,1]*B[2]+jac_inv[1,2]*B[3])
zs = zs_prev-(jac_inv[2,0]*B[1]+jac_inv[2,1]*B[2]+jac_inv[2,2]*B[3])
new_B = B_field(xs,ys,zs,trilinear)
step_x.append(xs-xs_prev)
step_y.append(ys-ys_prev)
step_z.append(zs-zs_prev)
x.append(xs_prev+step_x[-1])
y.append(ys_prev+step_y[-1])
z.append(zs_prev+step_z[-1])
err_rel_grid.append(math.sqrt((step_x[-1]/delta_x)**2+(step_y[-1]/delta_y)**2+(step_z[-1]/delta_z)**2))
err_rel_sol.append(math.sqrt((step_x[-1]/x[-1])**2+(step_y[-1]/y[-1])**2+(step_z[-1]/z[-1])**2))
if np.max([err_rel_grid[-1], err_rel_sol[-1]]) < tolerance:
converged = True
B1 = math.sqrt(B_x1[i,j,k]**2 + B_y1[i,j,k]**2 + B_z1[i,j,k]**2)
B2 = math.sqrt(B_x1[i+1,j,k]**2 + B_y1[i+1,j,k]**2 + B_z1[i+1,j,k]**2)
B3 = math.sqrt(B_x1[i,j+1,k]**2 + B_y1[i,j+1,k]**2 + B_z1[i,j+1,k]**2)
B4 = math.sqrt(B_x1[i+1,j+1,k]**2 + B_y1[i+1,j+1,k]**2 + B_z1[i+1,j+1,k]**2)
B5 = math.sqrt(B_x1[i,j,k+1]**2 + B_y1[i,j,k+1]**2 + B_z1[i,j,k+1]**2)
B6 = math.sqrt(B_x1[i+1,j,k+1]**2 + B_y1[i+1,j,k+1]**2 + B_z1[i+1,j,k+1]**2)
B7 = math.sqrt(B_x1[i,j+1,k+1]**2 + B_y1[i,j+1,k+1]**2 + B_z1[i,j+1,k+1]**2)
B8 = math.sqrt(B_x1[i+1,j+1,k+1]**2 + B_y1[i+1,j+1,k+1]**2 + B_z1[i+1,j+1,k+1]**2)
if n_steps>100:
print('Maximum number of steps exceeded -- exiting')
if converged:
if ((xv[i] <= xs <= xv[i+1]) and (yv[j] <= ys <= yv[j+1]) and (zv[k] <= zs <= zv[k+1])):
if new_B[0] < tolerance*np.mean([B1,B2,B3,B4,B5,B6,B7,B8]):
num_nulls+=1
# here if we want, we can also get the eigenvectors/eigenvalues
# use your previous function to get jacobian of magnetic field
# use numpy.linalg.eig to find eigen-stuff of jacobian
if zs <= zv[-2]: # this excludes the null points located on the null line that goes around the two outermost shells
this_null = {'i':i, 'j':j, 'k':k, 'n': num_nulls, 'x': xs, 'y': ys, 'z': zs, 'B': new_B[0], 'Error' : np.array([err_rel_grid[-1], err_rel_sol[-1]]).max(), 'iter' : n_steps }
null_list.append(this_null)
return(null_list)
# function that checks if Bx/By/Bz changes sign:
# it compares the length of the list with the occurrence of each sign
# if '1' (positive) appears 8 times, then B has the same sign at all 8 corners
# similarly for -1 (negative) and 0 (field component = 0)
def check_sign(vertices):
if len(vertices) < 1:
return True
return len(vertices) == vertices.count(vertices[0])
def field_sign_change (f):
# returns a mask of dim (nx-1, ny-1, nz-1).
# true implies that the component changes signs at one of the vertices of the rhs cell.
p000 = ( | np.roll(f, (-0,-0,-0), axis=(0,1,2)) | numpy.roll |
# coding: utf-8
import warnings
import numpy as np
import pandas as pd
from packaging import version
from sklearn.metrics import pairwise_distances_chunked
from sklearn.utils import check_X_y,check_random_state
from sklearn.preprocessing import LabelEncoder
import functools
from pyclustering.cluster.clarans import clarans
from pyclustering.utils import timedcall
from pyclustering.utils import (draw_clusters,
average_inter_cluster_distance,
average_intra_cluster_distance,
average_neighbor_distance)
import sklearn
from sklearn.metrics import (davies_bouldin_score,
silhouette_score,
pairwise_distances,
calinski_harabasz_score
)
# They changed the name of calinski_harabaz_score in later version of sklearn:
# https://github.com/scikit-learn/scikit-learn/blob/c4733f4895c1becdf587b38970f6f7066656e3f9/doc/whats_new/v0.20.rst#id2012
sklearn_version = version.parse(sklearn.__version__)
nm_chg_ver = version.parse("0.23")
if sklearn_version >= nm_chg_ver:
from sklearn.metrics import calinski_harabasz_score as _cal_score
else:
from sklearn.metrics import calinski_harabaz_score as _cal_score
def _get_clust_pairs(clusters):
return [(i, j) for i in clusters for j in clusters if i > j]
def _dunn(data=None, dist=None, labels=None):
clusters = set(labels)
inter_dists = [
dist[np.ix_(labels == i, labels == j)].min()
for i, j in _get_clust_pairs(clusters)
]
intra_dists = [
dist[np.ix_(labels == i, labels == i)].max()
for i in clusters
]
return min(inter_dists) / max(intra_dists)
def dunn(dist, labels):
return _dunn(data=None, dist=dist, labels=labels)
def cop(data, dist, labels):
clusters = set(labels)
cpairs = _get_clust_pairs(clusters)
prox_lst = [
dist[np.ix_(labels == i[0], labels == i[1])].max()
for i in cpairs
]
out_l = []
for c in clusters:
c_data = data[labels == c]
c_center = c_data.mean(axis=0, keepdims=True)
c_intra = pairwise_distances(c_data, c_center).mean()
c_prox = [prox for pair, prox in zip(cpairs, prox_lst) if c in pair]
c_inter = min(c_prox)
to_add = len(c_data) * c_intra / c_inter
out_l.append(to_add)
return sum(out_l) / len(labels)
def _silhouette_score2(data=None, dist=None, labels=None):
return silhouette_score(dist, labels, metric='precomputed')
def _davies_bouldin_score2(data=None, dist=None, labels=None):
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'divide by zero')
return davies_bouldin_score(data, labels)
def _calinski_harabaz_score2(data=None, dist=None, labels=None):
return _cal_score(data, labels)
def check_number_of_labels(n_labels, n_samples):
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
def cluster_dist_reduce(D_chunk, start, labels, label_freqs):
# accumulate distances from each sample to each cluster
clust_dists = np.zeros((len(D_chunk), len(label_freqs)),
dtype=D_chunk.dtype)
for i in range(len(D_chunk)):
clust_dists[i] += np.bincount(labels, weights=D_chunk[i],
minlength=len(label_freqs))
# intra_index selects intra-cluster distances within clust_dists
intra_index = (np.arange(len(D_chunk)), labels[start:start + len(D_chunk)])
# intra_clust_dists are averaged over cluster size outside this function
intra_clust_dists = clust_dists[intra_index]
# of the remaining distances we normalise and extract the minimum
clust_dists[intra_index] = np.inf
clust_dists /= label_freqs
inter_clust_dists = clust_dists.min(axis=1)
return intra_clust_dists, inter_clust_dists
def inter_cluster_dist(data=None, dist=None, labels=None):
_, inter_dist = cluster_distances(dist, labels, metric='precomputed')
return inter_dist
def intra_cluster_dist(data=None, dist=None, labels=None):
intra_dist, _ = cluster_distances(dist, labels, metric='precomputed')
return intra_dist
def cluster_distances(X, labels, *, metric='precomputed', random_state=None, **kwds):
return intra_inter_distances(X, labels, metric=metric, **kwds)
def intra_inter_distances(X, labels, metric='precomputed'):
# Check for non-zero diagonal entries in precomputed distance matrix
atol = | np.finfo(X.dtype) | numpy.finfo |
"""
Napari-time_series_plotter test module.
"""
from napari_time_series_plotter._dock_widget import *
import pytest
import numpy as np
# fixture for LayerSelector class tests
@pytest.fixture
def selector(make_napari_viewer):
_sync = None
viewer = make_napari_viewer(show=False)
viewer.add_image(np.random.rand(10, 10, 10), name='3D')
yield LayerSelector(viewer)
# fixture for VoxelPlotter class tests
@pytest.fixture
def plotter(make_napari_viewer):
_sync = None
viewer = make_napari_viewer(show=False)
viewer.add_image( | np.random.rand(10, 10, 10) | numpy.random.rand |
# -*- coding: utf-8 -*-
"""Tests for BARD interaction module"""
from time import sleep
import math
import pytest
import numpy as np
import sksurgerybard.interaction.interaction as inter
class WritePointerEvent(Exception):#pylint: disable=missing-class-docstring
pass
class CycleAnatomyEvent(Exception):#pylint: disable=missing-class-docstring
pass
class NextTargetEvent(Exception):#pylint: disable=missing-class-docstring
pass
class TurnOnAllEvent(Exception):#pylint: disable=missing-class-docstring
pass
class VisibilityToggleEvent(Exception):#pylint: disable=missing-class-docstring
pass
class ChangeOpacityEvent(Exception):#pylint: disable=missing-class-docstring
pass
class PositionModelEvent(Exception):#pylint: disable=missing-class-docstring
def __init__(self, increment):
super().__init__()
self.increment = increment
class StopTrackingEvent(Exception):#pylint: disable=missing-class-docstring
pass
class StartTrackingEvent(Exception):#pylint: disable=missing-class-docstring
pass
class _FakePointerWriter:
def write_pointer_tip(self): # pylint: disable=no-self-use
"""Raises an exception so we know when it's run"""
raise WritePointerEvent
class _FakeKBEvent:
def __init__(self, keycode):
self._key = keycode
def GetKeySym(self):# pylint: disable=invalid-name
"""return a key symbol"""
return self._key
class _FakeMouseEvent:
def __init__(self, size, position):
self._size = size
self._position = position
def GetEventPosition(self):# pylint: disable=invalid-name
"""return mouse position"""
return self._position
def GetSize(self):# pylint: disable=invalid-name
"""return mouse position"""
return self._size
class _FakeVisualisationControl:
def cycle_visible_anatomy_vis(self): # pylint: disable=no-self-use
"""Raises an error so we know when it's run"""
raise CycleAnatomyEvent
def next_target(self): # pylint: disable=no-self-use
"""Raises an error so we know when it's run"""
raise NextTargetEvent
def turn_on_all_targets(self): # pylint: disable=no-self-use
"""Raises an error so we know when it's run"""
raise TurnOnAllEvent
def visibility_toggle(self, _): # pylint: disable=no-self-use
"""Raises an error so we know when it's run"""
raise VisibilityToggleEvent
def change_opacity(self, _): # pylint: disable=no-self-use
"""Raises an error so we know when it's run"""
raise ChangeOpacityEvent
class _FakeBardWidget:
def position_model_actors(self, increment): # pylint: disable=no-self-use
"""Raises and error so we know it's run"""
raise PositionModelEvent(increment)
class transform_manager: #pylint: disable=invalid-name
"""A fake transform manager"""
def get(transform_name):# pylint: disable=no-self-argument
"""A fake get function"""
return transform_name
class tracker: #pylint: disable=invalid-name
"""A fake tracker"""
def stop_tracking():# pylint: disable=no-method-argument
"""A fake stop tracking function"""
raise StopTrackingEvent
def start_tracking():# pylint: disable=no-method-argument
"""A fake start tracking function"""
raise StartTrackingEvent
def test_keyboard_event():
"""
KB event check
"""
event = _FakeKBEvent('d')
kb_event = inter.BardKBEvent(_FakePointerWriter(),
_FakeVisualisationControl(),
_FakeBardWidget())
with pytest.raises(WritePointerEvent):
kb_event(event, None)
event = _FakeKBEvent('e')
kb_event(event, None)
event = _FakeKBEvent('b')
with pytest.raises(CycleAnatomyEvent):
kb_event(event, None)
event = _FakeKBEvent('n')
with pytest.raises(NextTargetEvent):
kb_event(event, None)
event = _FakeKBEvent('m')
with pytest.raises(TurnOnAllEvent):
kb_event(event, None)
event = _FakeKBEvent('Down')
with pytest.raises(StartTrackingEvent):
kb_event(event, None)
event = _FakeKBEvent('Up')
with pytest.raises(StopTrackingEvent):
kb_event(event, None)
def test_keyboard_translatios():
"""
Check that the translation events work
"""
kb_event = inter.BardKBEvent(_FakePointerWriter(),
_FakeVisualisationControl(),
_FakeBardWidget())
event = _FakeKBEvent('5')
expected_increment = np.array([[1., 0., 0., 1.],
[0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('t')
expected_increment = np.array([[1., 0., 0., -1.],
[0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('6')
expected_increment = np.array([[1., 0., 0., 0.],
[0., 1., 0., 1.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('y')
expected_increment = np.array([[1., 0., 0., 0.],
[0., 1., 0., -1.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('7')
expected_increment = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.], [0., 0., 1., 1.], [0., 0., 0., 1.]])
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('u')
expected_increment = np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.], [0., 0., 1., -1.], [0., 0., 0., 1.]])
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('u')
with pytest.raises(ValueError):
kb_event._translate_model('r') #pylint:disable = protected-access
def test_keyboard_rotations():
"""
Check that the rotations work
"""
kb_event = inter.BardKBEvent(_FakePointerWriter(),
_FakeVisualisationControl(),
_FakeBardWidget())
event = _FakeKBEvent('8')
expected_increment = np.eye(4)
expected_increment[1][1]=np.cos(math.pi/180.)
expected_increment[1][2]=-np.sin(math.pi/180.)
expected_increment[2][1]=np.sin(math.pi/180.)
expected_increment[2][2]=np.cos(math.pi/180.)
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('i')
expected_increment = np.eye(4)
expected_increment[1][1]=np.cos(-math.pi/180.)
expected_increment[1][2]=-np.sin(-math.pi/180.)
expected_increment[2][1]=np.sin(-math.pi/180.)
expected_increment[2][2]=np.cos(-math.pi/180.)
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('9')
expected_increment = np.eye(4)
expected_increment[0][0]=np.cos(math.pi/180.)
expected_increment[0][2]=np.sin(math.pi/180.)
expected_increment[2][0]=-np.sin(math.pi/180.)
expected_increment[2][2]=np.cos(math.pi/180.)
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('o')
expected_increment = np.eye(4)
expected_increment[0][0]=np.cos(-math.pi/180.)
expected_increment[0][2]=np.sin(-math.pi/180.)
expected_increment[2][0]=-np.sin(-math.pi/180.)
expected_increment[2][2]=np.cos(-math.pi/180.)
try:
kb_event(event, None)
except PositionModelEvent as pos_model:
assert np.array_equal(pos_model.increment, expected_increment)
event = _FakeKBEvent('0')
expected_increment = np.eye(4)
expected_increment[0][0]=np.cos(math.pi/180.)
expected_increment[0][1]=-np.sin(math.pi/180.)
expected_increment[1][0]=np.sin(math.pi/180.)
expected_increment[1][1]= | np.cos(math.pi/180.) | numpy.cos |
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
from numba import njit
import vectorbt as vbt
from tests.utils import record_arrays_close
from vectorbt.generic.enums import range_dt, drawdown_dt
from vectorbt.portfolio.enums import order_dt, trade_dt, log_dt
day_dt = np.timedelta64(86400000000000)
example_dt = np.dtype([
('id', np.int64),
('col', np.int64),
('idx', np.int64),
('some_field1', np.float64),
('some_field2', np.float64)
], align=True)
records_arr = np.asarray([
(0, 0, 0, 10, 21),
(1, 0, 1, 11, 20),
(2, 0, 2, 12, 19),
(3, 1, 0, 13, 18),
(4, 1, 1, 14, 17),
(5, 1, 2, 13, 18),
(6, 2, 0, 12, 19),
(7, 2, 1, 11, 20),
(8, 2, 2, 10, 21)
], dtype=example_dt)
records_nosort_arr = np.concatenate((
records_arr[0::3],
records_arr[1::3],
records_arr[2::3]
))
group_by = pd.Index(['g1', 'g1', 'g2', 'g2'])
wrapper = vbt.ArrayWrapper(
index=['x', 'y', 'z'],
columns=['a', 'b', 'c', 'd'],
ndim=2,
freq='1 days'
)
wrapper_grouped = wrapper.replace(group_by=group_by)
records = vbt.records.Records(wrapper, records_arr)
records_grouped = vbt.records.Records(wrapper_grouped, records_arr)
records_nosort = records.replace(records_arr=records_nosort_arr)
records_nosort_grouped = vbt.records.Records(wrapper_grouped, records_nosort_arr)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# col_mapper.py ############# #
class TestColumnMapper:
def test_col_arr(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
records.col_mapper.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_get_col_arr(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_arr(),
records.col_mapper.col_arr
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_arr(),
np.array([0, 0, 0, 0, 0, 0, 1, 1, 1])
)
def test_col_range(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_range,
np.array([
[0, 3]
])
)
np.testing.assert_array_equal(
records.col_mapper.col_range,
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
def test_get_col_range(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_range(),
np.array([
[0, 3],
[3, 6],
[6, 9],
[-1, -1]
])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_range(),
np.array([[0, 6]])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_range(),
np.array([[0, 6], [6, 9]])
)
def test_col_map(self):
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[0],
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
records['a'].col_mapper.col_map[1],
np.array([3])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records.col_mapper.col_map[1],
np.array([3, 3, 3, 0])
)
def test_get_col_map(self):
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[0],
records.col_mapper.col_map[0]
)
np.testing.assert_array_equal(
records.col_mapper.get_col_map()[1],
records.col_mapper.col_map[1]
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
records_grouped['g1'].col_mapper.get_col_map()[1],
np.array([6])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[0],
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
np.testing.assert_array_equal(
records_grouped.col_mapper.get_col_map()[1],
np.array([6, 3])
)
def test_is_sorted(self):
assert records.col_mapper.is_sorted()
assert not records_nosort.col_mapper.is_sorted()
# ############# mapped_array.py ############# #
mapped_array = records.map_field('some_field1')
mapped_array_grouped = records_grouped.map_field('some_field1')
mapped_array_nosort = records_nosort.map_field('some_field1')
mapped_array_nosort_grouped = records_nosort_grouped.map_field('some_field1')
mapping = {x: 'test_' + str(x) for x in pd.unique(mapped_array.values)}
mp_mapped_array = mapped_array.replace(mapping=mapping)
mp_mapped_array_grouped = mapped_array_grouped.replace(mapping=mapping)
class TestMappedArray:
def test_config(self, tmp_path):
assert vbt.MappedArray.loads(mapped_array.dumps()) == mapped_array
mapped_array.save(tmp_path / 'mapped_array')
assert vbt.MappedArray.load(tmp_path / 'mapped_array') == mapped_array
def test_mapped_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
mapped_array.values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
def test_id_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.id_arr,
np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
)
def test_col_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].col_arr,
np.array([0, 0, 0])
)
np.testing.assert_array_equal(
mapped_array.col_arr,
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
)
def test_idx_arr(self):
np.testing.assert_array_equal(
mapped_array['a'].idx_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
mapped_array.idx_arr,
np.array([0, 1, 2, 0, 1, 2, 0, 1, 2])
)
def test_is_sorted(self):
assert mapped_array.is_sorted()
assert mapped_array.is_sorted(incl_id=True)
assert not mapped_array_nosort.is_sorted()
assert not mapped_array_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert mapped_array.sort().is_sorted()
assert mapped_array.sort().is_sorted(incl_id=True)
assert mapped_array.sort(incl_id=True).is_sorted(incl_id=True)
assert mapped_array_nosort.sort().is_sorted()
assert mapped_array_nosort.sort().is_sorted(incl_id=True)
assert mapped_array_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = mapped_array['a'].values >= mapped_array['a'].values.mean()
np.testing.assert_array_equal(
mapped_array['a'].apply_mask(mask_a).id_arr,
np.array([1, 2])
)
mask = mapped_array.values >= mapped_array.values.mean()
filtered = mapped_array.apply_mask(mask)
np.testing.assert_array_equal(
filtered.id_arr,
np.array([2, 3, 4, 5, 6])
)
np.testing.assert_array_equal(filtered.col_arr, mapped_array.col_arr[mask])
np.testing.assert_array_equal(filtered.idx_arr, mapped_array.idx_arr[mask])
assert mapped_array_grouped.apply_mask(mask).wrapper == mapped_array_grouped.wrapper
assert mapped_array_grouped.apply_mask(mask, group_by=False).wrapper.grouper.group_by is None
def test_map_to_mask(self):
@njit
def every_2_nb(inout, idxs, col, mapped_arr):
inout[idxs[::2]] = True
np.testing.assert_array_equal(
mapped_array.map_to_mask(every_2_nb),
np.array([True, False, True, True, False, True, True, False, True])
)
def test_top_n_mask(self):
np.testing.assert_array_equal(
mapped_array.top_n_mask(1),
np.array([False, False, True, False, True, False, True, False, False])
)
def test_bottom_n_mask(self):
np.testing.assert_array_equal(
mapped_array.bottom_n_mask(1),
np.array([True, False, False, True, False, False, False, False, True])
)
def test_top_n(self):
np.testing.assert_array_equal(
mapped_array.top_n(1).id_arr,
np.array([2, 4, 6])
)
def test_bottom_n(self):
np.testing.assert_array_equal(
mapped_array.bottom_n(1).id_arr,
np.array([0, 3, 8])
)
def test_to_pd(self):
target = pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
index=wrapper.index,
columns=wrapper.columns
)
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(),
target['a']
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(),
target
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0.),
target.fillna(0.)
)
mapped_array2 = vbt.MappedArray(
wrapper,
records_arr['some_field1'].tolist() + [1],
records_arr['col'].tolist() + [2],
idx_arr=records_arr['idx'].tolist() + [2]
)
with pytest.raises(Exception):
_ = mapped_array2.to_pd()
pd.testing.assert_series_equal(
mapped_array['a'].to_pd(ignore_index=True),
pd.Series(np.array([10., 11., 12.]), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., np.nan],
[11., 14., 11., np.nan],
[12., 13., 10., np.nan]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.to_pd(fill_value=0, ignore_index=True),
pd.DataFrame(
np.array([
[10., 13., 12., 0.],
[11., 14., 11., 0.],
[12., 13., 10., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.to_pd(ignore_index=True),
pd.DataFrame(
np.array([
[10., 12.],
[11., 11.],
[12., 10.],
[13., np.nan],
[14., np.nan],
[13., np.nan],
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_apply(self):
@njit
def cumsum_apply_nb(idxs, col, a):
return np.cumsum(a)
np.testing.assert_array_equal(
mapped_array['a'].apply(cumsum_apply_nb).values,
np.array([10., 21., 33.])
)
np.testing.assert_array_equal(
mapped_array.apply(cumsum_apply_nb).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=False).values,
np.array([10., 21., 33., 13., 27., 40., 12., 23., 33.])
)
np.testing.assert_array_equal(
mapped_array_grouped.apply(cumsum_apply_nb, apply_per_group=True).values,
np.array([10., 21., 33., 46., 60., 73., 12., 23., 33.])
)
assert mapped_array_grouped.apply(cumsum_apply_nb).wrapper == \
mapped_array.apply(cumsum_apply_nb, group_by=group_by).wrapper
assert mapped_array.apply(cumsum_apply_nb, group_by=False).wrapper.grouper.group_by is None
def test_reduce(self):
@njit
def mean_reduce_nb(col, a):
return np.mean(a)
assert mapped_array['a'].reduce(mean_reduce_nb) == 11.
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0.),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, fill_value=0., wrap_kwargs=dict(dtype=np.int_)),
pd.Series(np.array([11., 13.333333333333334, 11., 0.]), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, wrap_kwargs=dict(to_timedelta=True)),
pd.Series(np.array([11., 13.333333333333334, 11., np.nan]), index=wrapper.columns).rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(mean_reduce_nb),
pd.Series([12.166666666666666, 11.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
assert mapped_array_grouped['g1'].reduce(mean_reduce_nb) == 12.166666666666666
pd.testing.assert_series_equal(
mapped_array_grouped[['g1']].reduce(mean_reduce_nb),
pd.Series([12.166666666666666], index=pd.Index(['g1'], dtype='object')).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb),
mapped_array_grouped.reduce(mean_reduce_nb, group_by=False)
)
pd.testing.assert_series_equal(
mapped_array.reduce(mean_reduce_nb, group_by=group_by),
mapped_array_grouped.reduce(mean_reduce_nb)
)
def test_reduce_to_idx(self):
@njit
def argmin_reduce_nb(col, a):
return np.argmin(a)
assert mapped_array['a'].reduce(argmin_reduce_nb, returns_idx=True) == 'x'
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True),
pd.Series(np.array(['x', 'x', 'z', np.nan], dtype=object), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 0, 2, -1], dtype=int), index=wrapper.columns).rename('reduce')
)
pd.testing.assert_series_equal(
mapped_array_grouped.reduce(argmin_reduce_nb, returns_idx=True, to_index=False),
pd.Series(np.array([0, 2], dtype=int), index=pd.Index(['g1', 'g2'], dtype='object')).rename('reduce')
)
def test_reduce_to_array(self):
@njit
def min_max_reduce_nb(col, a):
return np.array([np.min(a), np.max(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(min_max_reduce_nb, returns_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([10., 12.], index=pd.Index(['min', 'max'], dtype='object'), name='a')
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
index=pd.Index(['min', 'max'], dtype='object'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, fill_value=0.),
pd.DataFrame(
np.array([
[10., 13., 10., 0.],
[12., 14., 12., 0.]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, wrap_kwargs=dict(to_timedelta=True)),
pd.DataFrame(
np.array([
[10., 13., 10., np.nan],
[12., 14., 12., np.nan]
]),
columns=wrapper.columns
) * day_dt
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame(
np.array([
[10., 10.],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True, group_by=False)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(min_max_reduce_nb, returns_array=True, group_by=group_by),
mapped_array_grouped.reduce(min_max_reduce_nb, returns_array=True)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g1'].reduce(min_max_reduce_nb, returns_array=True),
pd.Series([10., 14.], name='g1')
)
pd.testing.assert_frame_equal(
mapped_array_grouped[['g1']].reduce(min_max_reduce_nb, returns_array=True),
pd.DataFrame([[10.], [14.]], columns=pd.Index(['g1'], dtype='object'))
)
def test_reduce_to_idx_array(self):
@njit
def idxmin_idxmax_reduce_nb(col, a):
return np.array([np.argmin(a), np.argmax(a)])
pd.testing.assert_series_equal(
mapped_array['a'].reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['min', 'max'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])
),
pd.DataFrame(
{
'a': ['x', 'z'],
'b': ['x', 'y'],
'c': ['z', 'x'],
'd': [np.nan, np.nan]
},
index=pd.Index(['min', 'max'], dtype='object')
)
)
pd.testing.assert_frame_equal(
mapped_array.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 0, 2, -1],
[2, 1, 0, -1]
]),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.reduce(
idxmin_idxmax_reduce_nb,
returns_array=True,
returns_idx=True,
to_index=False
),
pd.DataFrame(
np.array([
[0, 2],
[1, 0]
]),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
def test_nth(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth(0),
pd.Series(np.array([10., 13., 12., np.nan]), index=wrapper.columns).rename('nth')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth(-1),
pd.Series(np.array([12., 13., 10., np.nan]), index=wrapper.columns).rename('nth')
)
with pytest.raises(Exception):
_ = mapped_array.nth(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth(0),
pd.Series(np.array([10., 12.]), index=pd.Index(['g1', 'g2'], dtype='object')).rename('nth')
)
def test_nth_index(self):
assert mapped_array['a'].nth(0) == 10.
pd.testing.assert_series_equal(
mapped_array.nth_index(0),
pd.Series(
np.array(['x', 'x', 'x', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
assert mapped_array['a'].nth(-1) == 12.
pd.testing.assert_series_equal(
mapped_array.nth_index(-1),
pd.Series(
np.array(['z', 'z', 'z', np.nan], dtype='object'),
index=wrapper.columns
).rename('nth_index')
)
with pytest.raises(Exception):
_ = mapped_array.nth_index(10)
pd.testing.assert_series_equal(
mapped_array_grouped.nth_index(0),
pd.Series(
np.array(['x', 'x'], dtype='object'),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('nth_index')
)
def test_min(self):
assert mapped_array['a'].min() == mapped_array['a'].to_pd().min()
pd.testing.assert_series_equal(
mapped_array.min(),
mapped_array.to_pd().min().rename('min')
)
pd.testing.assert_series_equal(
mapped_array_grouped.min(),
pd.Series([10., 10.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('min')
)
def test_max(self):
assert mapped_array['a'].max() == mapped_array['a'].to_pd().max()
pd.testing.assert_series_equal(
mapped_array.max(),
mapped_array.to_pd().max().rename('max')
)
pd.testing.assert_series_equal(
mapped_array_grouped.max(),
pd.Series([14., 12.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('max')
)
def test_mean(self):
assert mapped_array['a'].mean() == mapped_array['a'].to_pd().mean()
pd.testing.assert_series_equal(
mapped_array.mean(),
mapped_array.to_pd().mean().rename('mean')
)
pd.testing.assert_series_equal(
mapped_array_grouped.mean(),
pd.Series([12.166667, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('mean')
)
def test_median(self):
assert mapped_array['a'].median() == mapped_array['a'].to_pd().median()
pd.testing.assert_series_equal(
mapped_array.median(),
mapped_array.to_pd().median().rename('median')
)
pd.testing.assert_series_equal(
mapped_array_grouped.median(),
pd.Series([12.5, 11.], index=pd.Index(['g1', 'g2'], dtype='object')).rename('median')
)
def test_std(self):
assert mapped_array['a'].std() == mapped_array['a'].to_pd().std()
pd.testing.assert_series_equal(
mapped_array.std(),
mapped_array.to_pd().std().rename('std')
)
pd.testing.assert_series_equal(
mapped_array.std(ddof=0),
mapped_array.to_pd().std(ddof=0).rename('std')
)
pd.testing.assert_series_equal(
mapped_array_grouped.std(),
pd.Series([1.4719601443879746, 1.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('std')
)
def test_sum(self):
assert mapped_array['a'].sum() == mapped_array['a'].to_pd().sum()
pd.testing.assert_series_equal(
mapped_array.sum(),
mapped_array.to_pd().sum().rename('sum')
)
pd.testing.assert_series_equal(
mapped_array_grouped.sum(),
pd.Series([73.0, 33.0], index=pd.Index(['g1', 'g2'], dtype='object')).rename('sum')
)
def test_count(self):
assert mapped_array['a'].count() == mapped_array['a'].to_pd().count()
pd.testing.assert_series_equal(
mapped_array.count(),
mapped_array.to_pd().count().rename('count')
)
pd.testing.assert_series_equal(
mapped_array_grouped.count(),
pd.Series([6, 3], index=pd.Index(['g1', 'g2'], dtype='object')).rename('count')
)
def test_idxmin(self):
assert mapped_array['a'].idxmin() == mapped_array['a'].to_pd().idxmin()
pd.testing.assert_series_equal(
mapped_array.idxmin(),
mapped_array.to_pd().idxmin().rename('idxmin')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmin(),
pd.Series(
np.array(['x', 'z'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmin')
)
def test_idxmax(self):
assert mapped_array['a'].idxmax() == mapped_array['a'].to_pd().idxmax()
pd.testing.assert_series_equal(
mapped_array.idxmax(),
mapped_array.to_pd().idxmax().rename('idxmax')
)
pd.testing.assert_series_equal(
mapped_array_grouped.idxmax(),
pd.Series(
np.array(['y', 'x'], dtype=object),
index=pd.Index(['g1', 'g2'], dtype='object')
).rename('idxmax')
)
def test_describe(self):
pd.testing.assert_series_equal(
mapped_array['a'].describe(),
mapped_array['a'].to_pd().describe()
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=None),
mapped_array.to_pd().describe(percentiles=None)
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=[]),
mapped_array.to_pd().describe(percentiles=[])
)
pd.testing.assert_frame_equal(
mapped_array.describe(percentiles=np.arange(0, 1, 0.1)),
mapped_array.to_pd().describe(percentiles=np.arange(0, 1, 0.1))
)
pd.testing.assert_frame_equal(
mapped_array_grouped.describe(),
pd.DataFrame(
np.array([
[6., 3.],
[12.16666667, 11.],
[1.47196014, 1.],
[10., 10.],
[11.25, 10.5],
[12.5, 11.],
[13., 11.5],
[14., 12.]
]),
columns=pd.Index(['g1', 'g2'], dtype='object'),
index=mapped_array.describe().index
)
)
def test_value_counts(self):
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(),
pd.Series(
np.array([1, 1, 1]),
index=pd.Float64Index([10.0, 11.0, 12.0], dtype='float64'),
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array['a'].value_counts(mapping=mapping),
pd.Series(
np.array([1, 1, 1]),
index=pd.Index(['test_10.0', 'test_11.0', 'test_12.0'], dtype='object'),
name='a'
)
)
pd.testing.assert_frame_equal(
mapped_array.value_counts(),
pd.DataFrame(
np.array([
[1, 0, 1, 0],
[1, 0, 1, 0],
[1, 0, 1, 0],
[0, 2, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array_grouped.value_counts(),
pd.DataFrame(
np.array([
[1, 1],
[1, 1],
[1, 1],
[2, 0],
[1, 0]
]),
index=pd.Float64Index([10.0, 11.0, 12.0, 13.0, 14.0], dtype='float64'),
columns=pd.Index(['g1', 'g2'], dtype='object')
)
)
mapped_array2 = mapped_array.replace(mapped_arr=[4, 4, 3, 2, np.nan, 4, 3, 2, 1])
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=False),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 3.0, 2.0, 1.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort_uniques=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([1.0, 2.0, 3.0, 4.0, None], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True),
pd.DataFrame(
np.array([
[2, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[0, 0, 1, 0],
[0, 1, 0, 0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, ascending=True),
pd.DataFrame(
np.array([
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 1, 1, 0],
[1, 0, 1, 0],
[2, 1, 0, 0]
]),
index=pd.Float64Index([1.0, np.nan, 2.0, 3.0, 4.0], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True),
pd.DataFrame(
np.array([
[0.2222222222222222, 0.1111111111111111, 0.0, 0.0],
[0.0, 0.1111111111111111, 0.1111111111111111, 0.0],
[0.1111111111111111, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.0, 0.1111111111111111, 0.0],
[0.0, 0.1111111111111111, 0.0, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0, np.nan], dtype='float64'),
columns=wrapper.columns
)
)
pd.testing.assert_frame_equal(
mapped_array2.value_counts(sort=True, normalize=True, dropna=True),
pd.DataFrame(
np.array([
[0.25, 0.125, 0.0, 0.0],
[0.0, 0.125, 0.125, 0.0],
[0.125, 0.0, 0.125, 0.0],
[0.0, 0.0, 0.125, 0.0]
]),
index=pd.Float64Index([4.0, 2.0, 3.0, 1.0], dtype='float64'),
columns=wrapper.columns
)
)
@pytest.mark.parametrize(
"test_nosort",
[False, True],
)
def test_indexing(self, test_nosort):
if test_nosort:
ma = mapped_array_nosort
ma_grouped = mapped_array_nosort_grouped
else:
ma = mapped_array
ma_grouped = mapped_array_grouped
np.testing.assert_array_equal(
ma['a'].id_arr,
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
ma['a'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['a'].wrapper.columns,
pd.Index(['a'], dtype='object')
)
np.testing.assert_array_equal(
ma['b'].id_arr,
np.array([3, 4, 5])
)
np.testing.assert_array_equal(
ma['b'].col_arr,
np.array([0, 0, 0])
)
pd.testing.assert_index_equal(
ma['b'].wrapper.columns,
pd.Index(['b'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'a']].id_arr,
np.array([0, 1, 2, 0, 1, 2])
)
np.testing.assert_array_equal(
ma[['a', 'a']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'a']].wrapper.columns,
pd.Index(['a', 'a'], dtype='object')
)
np.testing.assert_array_equal(
ma[['a', 'b']].id_arr,
np.array([0, 1, 2, 3, 4, 5])
)
np.testing.assert_array_equal(
ma[['a', 'b']].col_arr,
np.array([0, 0, 0, 1, 1, 1])
)
pd.testing.assert_index_equal(
ma[['a', 'b']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
with pytest.raises(Exception):
_ = ma.iloc[::2, :] # changing time not supported
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped['g1'].wrapper.ndim == 2
assert ma_grouped['g1'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g1'].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.columns,
pd.Index(['c', 'd'], dtype='object')
)
assert ma_grouped['g2'].wrapper.ndim == 2
assert ma_grouped['g2'].wrapper.grouped_ndim == 1
pd.testing.assert_index_equal(
ma_grouped['g2'].wrapper.grouper.group_by,
pd.Index(['g2', 'g2'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.columns,
pd.Index(['a', 'b'], dtype='object')
)
assert ma_grouped[['g1']].wrapper.ndim == 2
assert ma_grouped[['g1']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1'], dtype='object')
)
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.columns,
pd.Index(['a', 'b', 'c', 'd'], dtype='object')
)
assert ma_grouped[['g1', 'g2']].wrapper.ndim == 2
assert ma_grouped[['g1', 'g2']].wrapper.grouped_ndim == 2
pd.testing.assert_index_equal(
ma_grouped[['g1', 'g2']].wrapper.grouper.group_by,
pd.Index(['g1', 'g1', 'g2', 'g2'], dtype='object')
)
def test_magic(self):
a = vbt.MappedArray(
wrapper,
records_arr['some_field1'],
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
a_inv = vbt.MappedArray(
wrapper,
records_arr['some_field1'][::-1],
records_arr['col'][::-1],
id_arr=records_arr['id'][::-1],
idx_arr=records_arr['idx'][::-1]
)
b = records_arr['some_field2']
a_bool = vbt.MappedArray(
wrapper,
records_arr['some_field1'] > np.mean(records_arr['some_field1']),
records_arr['col'],
id_arr=records_arr['id'],
idx_arr=records_arr['idx']
)
b_bool = records_arr['some_field2'] > np.mean(records_arr['some_field2'])
assert a ** a == a ** 2
with pytest.raises(Exception):
_ = a * a_inv
# binary ops
# comparison ops
np.testing.assert_array_equal((a == b).values, a.values == b)
np.testing.assert_array_equal((a != b).values, a.values != b)
np.testing.assert_array_equal((a < b).values, a.values < b)
np.testing.assert_array_equal((a > b).values, a.values > b)
np.testing.assert_array_equal((a <= b).values, a.values <= b)
np.testing.assert_array_equal((a >= b).values, a.values >= b)
# arithmetic ops
np.testing.assert_array_equal((a + b).values, a.values + b)
np.testing.assert_array_equal((a - b).values, a.values - b)
np.testing.assert_array_equal((a * b).values, a.values * b)
np.testing.assert_array_equal((a ** b).values, a.values ** b)
np.testing.assert_array_equal((a % b).values, a.values % b)
np.testing.assert_array_equal((a // b).values, a.values // b)
np.testing.assert_array_equal((a / b).values, a.values / b)
# __r*__ is only called if the left object does not have an __*__ method
np.testing.assert_array_equal((10 + a).values, 10 + a.values)
np.testing.assert_array_equal((10 - a).values, 10 - a.values)
np.testing.assert_array_equal((10 * a).values, 10 * a.values)
np.testing.assert_array_equal((10 ** a).values, 10 ** a.values)
np.testing.assert_array_equal((10 % a).values, 10 % a.values)
np.testing.assert_array_equal((10 // a).values, 10 // a.values)
np.testing.assert_array_equal((10 / a).values, 10 / a.values)
# mask ops
np.testing.assert_array_equal((a_bool & b_bool).values, a_bool.values & b_bool)
np.testing.assert_array_equal((a_bool | b_bool).values, a_bool.values | b_bool)
np.testing.assert_array_equal((a_bool ^ b_bool).values, a_bool.values ^ b_bool)
np.testing.assert_array_equal((True & a_bool).values, True & a_bool.values)
np.testing.assert_array_equal((True | a_bool).values, True | a_bool.values)
np.testing.assert_array_equal((True ^ a_bool).values, True ^ a_bool.values)
# unary ops
np.testing.assert_array_equal((-a).values, -a.values)
np.testing.assert_array_equal((+a).values, +a.values)
np.testing.assert_array_equal((abs(-a)).values, abs((-a.values)))
def test_stats(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Mean', 'Std', 'Min', 'Median', 'Max', 'Min Index', 'Max Index'
], dtype='object')
pd.testing.assert_series_equal(
mapped_array.stats(),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
2.25, 11.777777777777779, 0.859116756396542, 11.0, 11.666666666666666, 12.666666666666666
],
index=stats_index[:-2],
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='a'),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
3, 11.0, 1.0, 10.0, 11.0, 12.0, 'x', 'z'
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x', 'z', pd.Timedelta('3 days 00:00:00'),
6, 12.166666666666666, 1.4719601443879746, 10.0, 12.5, 14.0, 'x', 'y'
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mapped_array['c'].stats(),
mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mapped_array_grouped['g2'].stats(),
mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 11)
pd.testing.assert_index_equal(stats_df.index, mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
def test_stats_mapping(self):
stats_index = pd.Index([
'Start', 'End', 'Period', 'Count', 'Value Counts: test_10.0',
'Value Counts: test_11.0', 'Value Counts: test_12.0',
'Value Counts: test_13.0', 'Value Counts: test_14.0'
], dtype='object')
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
2.25, 0.5, 0.5, 0.5, 0.5, 0.25
],
index=stats_index,
name='agg_func_mean'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='a'),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
3, 1, 1, 1, 0, 0
],
index=stats_index,
name='a'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(column='g1', group_by=group_by),
pd.Series([
'x',
'z',
pd.Timedelta('3 days 00:00:00'),
6, 1, 1, 1, 2, 1
],
index=stats_index,
name='g1'
)
)
pd.testing.assert_series_equal(
mp_mapped_array.stats(),
mapped_array.stats(settings=dict(mapping=mapping))
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c')
)
pd.testing.assert_series_equal(
mp_mapped_array['c'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='c', group_by=False)
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array_grouped.stats(column='g2')
)
pd.testing.assert_series_equal(
mp_mapped_array_grouped['g2'].stats(settings=dict(incl_all_keys=True)),
mp_mapped_array.stats(column='g2', group_by=group_by)
)
stats_df = mp_mapped_array.stats(agg_func=None)
assert stats_df.shape == (4, 9)
pd.testing.assert_index_equal(stats_df.index, mp_mapped_array.wrapper.columns)
pd.testing.assert_index_equal(stats_df.columns, stats_index)
# ############# base.py ############# #
class TestRecords:
def test_config(self, tmp_path):
assert vbt.Records.loads(records['a'].dumps()) == records['a']
assert vbt.Records.loads(records.dumps()) == records
records.save(tmp_path / 'records')
assert vbt.Records.load(tmp_path / 'records') == records
def test_records(self):
pd.testing.assert_frame_equal(
records.records,
pd.DataFrame.from_records(records_arr)
)
def test_recarray(self):
np.testing.assert_array_equal(records['a'].recarray.some_field1, records['a'].values['some_field1'])
np.testing.assert_array_equal(records.recarray.some_field1, records.values['some_field1'])
def test_records_readable(self):
pd.testing.assert_frame_equal(
records.records_readable,
pd.DataFrame([
[0, 'a', 'x', 10.0, 21.0], [1, 'a', 'y', 11.0, 20.0], [2, 'a', 'z', 12.0, 19.0],
[3, 'b', 'x', 13.0, 18.0], [4, 'b', 'y', 14.0, 17.0], [5, 'b', 'z', 13.0, 18.0],
[6, 'c', 'x', 12.0, 19.0], [7, 'c', 'y', 11.0, 20.0], [8, 'c', 'z', 10.0, 21.0]
], columns=pd.Index(['Id', 'Column', 'Timestamp', 'some_field1', 'some_field2'], dtype='object'))
)
def test_is_sorted(self):
assert records.is_sorted()
assert records.is_sorted(incl_id=True)
assert not records_nosort.is_sorted()
assert not records_nosort.is_sorted(incl_id=True)
def test_sort(self):
assert records.sort().is_sorted()
assert records.sort().is_sorted(incl_id=True)
assert records.sort(incl_id=True).is_sorted(incl_id=True)
assert records_nosort.sort().is_sorted()
assert records_nosort.sort().is_sorted(incl_id=True)
assert records_nosort.sort(incl_id=True).is_sorted(incl_id=True)
def test_apply_mask(self):
mask_a = records['a'].values['some_field1'] >= records['a'].values['some_field1'].mean()
record_arrays_close(
records['a'].apply_mask(mask_a).values,
np.array([
(1, 0, 1, 11., 20.), (2, 0, 2, 12., 19.)
], dtype=example_dt)
)
mask = records.values['some_field1'] >= records.values['some_field1'].mean()
filtered = records.apply_mask(mask)
record_arrays_close(
filtered.values,
np.array([
(2, 0, 2, 12., 19.), (3, 1, 0, 13., 18.), (4, 1, 1, 14., 17.),
(5, 1, 2, 13., 18.), (6, 2, 0, 12., 19.)
], dtype=example_dt)
)
assert records_grouped.apply_mask(mask).wrapper == records_grouped.wrapper
def test_map_field(self):
np.testing.assert_array_equal(
records['a'].map_field('some_field1').values,
np.array([10., 11., 12.])
)
np.testing.assert_array_equal(
records.map_field('some_field1').values,
np.array([10., 11., 12., 13., 14., 13., 12., 11., 10.])
)
assert records_grouped.map_field('some_field1').wrapper == \
records.map_field('some_field1', group_by=group_by).wrapper
assert records_grouped.map_field('some_field1', group_by=False).wrapper.grouper.group_by is None
def test_map(self):
@njit
def map_func_nb(record):
return record['some_field1'] + record['some_field2']
np.testing.assert_array_equal(
records['a'].map(map_func_nb).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map(map_func_nb).values,
np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.])
)
assert records_grouped.map(map_func_nb).wrapper == \
records.map(map_func_nb, group_by=group_by).wrapper
assert records_grouped.map(map_func_nb, group_by=False).wrapper.grouper.group_by is None
def test_map_array(self):
arr = records_arr['some_field1'] + records_arr['some_field2']
np.testing.assert_array_equal(
records['a'].map_array(arr[:3]).values,
np.array([31., 31., 31.])
)
np.testing.assert_array_equal(
records.map_array(arr).values,
| np.array([31., 31., 31., 31., 31., 31., 31., 31., 31.]) | numpy.array |
import numpy as np
import math
#def recognit(forCounter,x_in,y_in,heading):
#data = np.genfromtxt('data'+str(forCounter)+'.csv',delimiter=',')
x_in = 1000
y_in = -800
heading = math.pi*0.1355
#data = np.genfromtxt('T1/data36.csv',delimiter=',')
data = np.concatenate ((data[211:400], data[0:211]),axis=0)
pos_flag = 0
heading-=math.pi/2
global min_ind,max_ind
theta = []
for i in range(400):
theta.append(i*math.pi/200)
for i in range(400):
if (data[i]==0):
data[i] = 500
rmin=min(data)
rmin_ind=np.argmin(data)
if(rmin_ind>370)|(rmin_ind<30):
pos_flag = 1
data = np.concatenate ((data[100:400], data[0:100]),axis=0)
rmin=min(data)
rmin_ind=np.argmin(data)
for i in range(30):
if(data[(rmin_ind+i)] < 240):
max_ind = rmin_ind+i+1
if(data[(rmin_ind-i)] < 240):
min_ind = rmin_ind-i
sel_r = data[min_ind:(max_ind+1)]
sel_th = theta[min_ind:(max_ind+1)]
rm_ind=np.argmin(sel_r)
sel_x = np.multiply(sel_r,np.cos(sel_th))
sel_y = np.multiply(sel_r, | np.sin(sel_th) | numpy.sin |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from pandas import DataFrame, Series
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.utils import pandas_sort, pandas_concat
from trackpy.tests.common import StrictTestCase
def random_walk(N):
return np.cumsum( | np.random.randn(N) | numpy.random.randn |
import tensorflow as tf
from tensorflow.keras import layers, regularizers
from tensorflow.keras.models import Sequential
import pandas as pd
import numpy as np
class Random_Agent:
"""
This agent takes actions corresponding to the actual demand in (1-eps) percent of cases, and random actions in (eps) percent of cases.
The agent need not learn in the training loop, and does not need action-selection networks, because the action-selection policy is deterministic.
"""
def __init__(self, env, eps = 0.1):
self.env = env
self.eps = eps
self.env.eps = self.eps
self.agent_type = 'random'
self.env.agent_type = 'random'
@staticmethod
def create(env,eps):
obj = Random_Agent(env, eps)
return obj, obj.env
def policy(self, noise_object, env):
"""
`policy()` returns the demand as a target action in (1-eps) percent of cases, and random actions in (eps) percent of cases.
"""
upper_bound = env.action_space.high[0]
lower_bound = env.action_space.low[0]
sampled_actions = tf.squeeze(env.D[env.t])
draw = np.random.uniform(0,1)
if draw < self.eps:
noise = noise_object()
sampled_actions = sampled_actions.numpy() + noise
else:
noise = 0.
sampled_actions = sampled_actions.numpy()
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, lower_bound, upper_bound)
return [np.squeeze(legal_action)], noise
class Planner:
"""
This agent has access to information about the future at different time scales, so it can plan releases around expected inflows.
"""
def __init__(self, env, weights_dir = None, TD3 = True, warmup = True, eps = None, epi_start = 0, epi_steps = 5000, max_epi = 100):
self.agent_type = 'planner'
self.weights_dir = weights_dir
self.TD3 = TD3
self.warmup = warmup
self.env = env
self.epi_start = epi_start
self.epi_steps = epi_steps
self.max_epi = max_epi
self.epi_count = 0
self.epi_reward_list = []
self.avg_reward_list = []
self.avg_action_list = []
self.epi_avg_reward_list = []
self.upper_bound = env.action_space.high[0]
self.lower_bound = env.action_space.low[0]
self.eps = eps
self.models = self.env.models
self.ensembles = self.env.ensembles
self.ens = self.env.ens # init to environment init data, can be stepped forward later
self.model = self.env.model # init to environment init data, can be stepped forward later
self.flows_format()
self.Q_num_inputs = len(self.env.Q_future.columns)
self.reset()
@staticmethod
def create(env, weights_dir = None, TD3 = True, warmup = True, eps = None, epi_start = 0, epi_steps = 5000, max_epi = 100):
obj = Planner(env, weights_dir = weights_dir, TD3 = TD3, warmup = warmup, eps = eps, epi_start = epi_start, epi_steps = epi_steps, max_epi = max_epi)
return obj, obj.env
def flows_format(self):
flows = self.env.Q_df.copy(deep=False)
flows['inf_t+1'] = np.append(flows.inflow.values[1:],np.NaN)
for i in range(2,6):
flows['inf_t+{}'.format(i)] = np.append(flows['inf_t+{}'.format(i-1)].values[1:],np.NaN)
for _,(i,j) in enumerate(zip(['5d','1m','3m','6m','1y','2y','3y','4y','5y'],[5,30,90,180,365,730,1095,1460,1825])):
flows['inf_{}_mean'.format(i)] = flows['inflow'].iloc[::-1].rolling(window='{}D'.format(j)).mean().iloc[::-1]
# flows['inf_{}_sum'.format(i)] = flows['inflow'].iloc[::-1].rolling(window='{}D'.format(j)).sum().iloc[::-1]
self.env.Q_future = flows.ffill()
self.env.Q_future_numpy = self.env.Q_future.values
def get_actor(self,env):
# initializers:
initializer = tf.random_uniform_initializer(minval=-0.03, maxval=0.03)
last_init = tf.random_uniform_initializer(minval=-0.03, maxval=0.03)
# state_input = layers.Input(shape=env.observation_space.shape)
# state_out = layers.experimental.preprocessing.Rescaling(1./255)(state_input)
# state_out = layers.Conv3D(8,5,padding='same',activation='relu',kernel_initializer=initializer,kernel_regularizer='l2')(state_out)
# state_out = layers.Conv3D(8,4,padding='same',activation='relu',kernel_initializer=initializer,kernel_regularizer='l2')(state_out)
# state_out = layers.AveragePooling3D()(state_out)
# state_out = layers.Conv3D(8,3,padding='same',activation='relu',kernel_initializer=initializer,kernel_regularizer='l2')(state_out)
# state_out = layers.AveragePooling3D()(state_out)
# state_out = layers.Flatten()(state_out)
if self.agent_type == 'planner':
res_input = layers.Input(shape=env.reservoir_space.shape)
res_out = layers.experimental.preprocessing.Rescaling(scale=[1./env.K,1./365]+list(1./env.Q_future.max().values))(res_input)
# elif self.agent_type == 'scalar_climate':
# do something
# elif self.agent_type == 'hybrid_climate':
# do something
elif self.agent_type == 'baseline' or self.agent_type == 'spatial_climate':
res_input = layers.Input(shape=env.reservoir_space.shape)
res_out = layers.experimental.preprocessing.Rescaling(scale=[1./env.K,1./365])(res_input)
res_out = layers.Dense(32,activation='selu',kernel_initializer=initializer)(res_out)
res_out = tf.keras.layers.BatchNormalization()(res_out)
res_out = layers.Dense(32,activation='selu',kernel_initializer=initializer)(res_out)
res_out = tf.keras.layers.BatchNormalization()(res_out)
out = layers.Dense(64,activation='selu',kernel_initializer=initializer)(res_out)
out = tf.keras.layers.BatchNormalization()(out)
out = layers.Dense(64,activation='selu',kernel_initializer=initializer)(out)
out = tf.keras.layers.BatchNormalization()(out)
out = layers.Dense(1,activation='sigmoid',kernel_initializer=last_init)(out)
out = env.action_space.low[0] + out*(env.action_space.high[0] - env.action_space.low[0])
model = tf.keras.Model(res_input,out)
return model
def get_critic(self,env):
initializer = tf.random_uniform_initializer(minval=-0.03, maxval=0.03)
last_init = tf.random_uniform_initializer(minval=-0.03, maxval=0.03)
# state input - climate images
# state_input = layers.Input(shape=env.observation_space.shape)
# state_out = layers.experimental.preprocessing.Rescaling(1./255)(state_input)
# state_out = layers.Conv3D(8,5,padding='same',activation='selu',kernel_initializer=initializer,kernel_regularizer='l2')(state_out)
# state_out = layers.Conv3D(8,4,padding='same',activation='selu',kernel_initializer=initializer,kernel_regularizer='l2')(state_out)
# state_out = layers.AveragePooling3D()(state_out)
# state_out = layers.Conv3D(8,3,padding='same',activation='selu',kernel_initializer=initializer,kernel_regularizer='l2')(state_out)
# state_out = layers.AveragePooling3D()(state_out)
# state_out = layers.Flatten()(state_out)
# reservoir input
if self.agent_type == 'planner':
res_input = layers.Input(shape=env.reservoir_space.shape)
res_out = layers.experimental.preprocessing.Rescaling(scale=[1./env.K,1./365]+list(1./env.Q_future.max().values))(res_input)
# elif self.agent_type == 'scalar_climate':
# do something
# elif self.agent_type == 'hybrid_climate':
# do something
elif self.agent_type == 'baseline' or self.agent_type == 'spatial_climate':
res_input = layers.Input(shape=env.reservoir_space.shape)
res_out = layers.experimental.preprocessing.Rescaling(scale=[1./env.K,1./365])(res_input)
res_out = layers.Dense(32,activation='selu',kernel_initializer=initializer,kernel_regularizer='l2')(res_out)
res_out = tf.keras.layers.BatchNormalization()(res_out)
res_out = layers.Dense(32,activation='selu',kernel_initializer=initializer,kernel_regularizer='l2')(res_out)
res_out = tf.keras.layers.BatchNormalization()(res_out)
# action input
act_input = layers.Input(shape=env.action_space.shape)
act_out = layers.experimental.preprocessing.Rescaling(scale=1./env.action_space.high[0])(act_input)
concat = layers.Concatenate()([act_out, res_out])
out = layers.Dense(64,activation="selu",kernel_initializer=initializer,kernel_regularizer='l2')(concat)
out = tf.keras.layers.BatchNormalization()(out)
out = layers.Dense(64,activation="selu",kernel_initializer=initializer,kernel_regularizer='l2')(out)
out = tf.keras.layers.BatchNormalization()(out)
out = layers.Dense(1,activation="tanh",kernel_initializer=last_init,kernel_regularizer='l2')(out)
# outputs single value for give state-action
model = tf.keras.Model([res_input,act_input], out)
return model
def policy(self, res_state):
"""
`policy()` returns an action sampled from our Actor network plus some noise for
exploration.
"""
sampled_actions = tf.squeeze(self.actor(res_state))
draw = np.random.uniform(0,1)
if self.eps is None:
if draw > self.epi_count/self.max_epi:
noise = self.noise_object()
sampled_actions = sampled_actions.numpy() + noise
else:
noise = 0.
sampled_actions = sampled_actions.numpy()
else:
if draw < self.eps:
# noise = self.noise_object()
noise = | np.random.uniform(-5.,5.) | numpy.random.uniform |
import os
import torch
import random
import copy
import csv
from glob import glob
from PIL import Image
import numpy as np
from scipy import ndimage
import SimpleITK as sitk
from skimage import measure
from skimage.transform import resize
from torch.utils.data import Dataset
import torchvision.transforms as transforms
NORMALIZATION_STATISTICS = {"luna16": [[0.2563873675129015, 0.2451283333368983]],
"self_learning_cubes_32": [[0.11303308354465243, 0.12595135887180803]],
"self_learning_cubes_64": [[0.11317437834743148, 0.12611378817031038]],
"lidc": [[0.23151727, 0.2168428080133056]],
"luna_fpr": [[0.18109835972793722, 0.1853707675313153]],
"lits_seg": [[0.46046468844492944, 0.17490586272419967]],
"pe": [[0.26125720740546626, 0.20363551346695796]],
"pe16": [[0.2887357771623902, 0.24429971299033243]],
# [[0.29407377554678416, 0.24441741466975556]], ->256x256x128
"brats": [[0.28239742604241436, 0.22023889204407615]],
"luna16_lung": [[0.1968134997129321, 0.20734707135528743]]}
# ---------------------------------------------2D Data augmentation---------------------------------------------
class Augmentation():
def __init__(self, normalize):
if normalize.lower() == "imagenet":
self.normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
elif normalize.lower() == "chestx-ray":
self.normalize = transforms.Normalize([0.5056, 0.5056, 0.5056], [0.252, 0.252, 0.252])
elif normalize.lower() == "none":
self.normalize = None
else:
print("mean and std for [{}] dataset do not exist!".format(normalize))
exit(-1)
def get_augmentation(self, augment_name, mode, *args):
try:
aug = getattr(Augmentation, augment_name)
return aug(self, mode, *args)
except:
print("Augmentation [{}] does not exist!".format(augment_name))
exit(-1)
def basic(self, mode):
transformList = []
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def _basic_crop(self, transCrop, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomCrop(transCrop))
else:
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_crop_224(self, mode):
transCrop = 224
return self._basic_crop(transCrop, mode)
def _basic_resize(self, size, mode="train"):
transformList = []
transformList.append(transforms.Resize(size))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_resize_224(self, mode):
size = 224
return self._basic_resize(size, mode)
def _basic_crop_rot(self, transCrop, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomCrop(transCrop))
transformList.append(transforms.RandomRotation(7))
else:
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_crop_rot_224(self, mode):
transCrop = 224
return self._basic_crop_rot(transCrop, mode)
def _basic_crop_flip(self, transCrop, transResize, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
else:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_crop_flip_224(self, mode):
transCrop = 224
transResize = 256
return self._basic_crop_flip(transCrop, transResize, mode)
def _basic_rdcrop_flip(self, transCrop, transResize, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomResizedCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
else:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_rdcrop_flip_224(self, mode):
transCrop = 224
transResize = 256
return self._basic_rdcrop_flip(transCrop, transResize, mode)
def _full(self, transCrop, transResize, mode="train", test_augment=True):
transformList = []
if mode == "train":
transformList.append(transforms.RandomResizedCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
transformList.append(transforms.RandomRotation(7))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "valid":
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "test":
if test_augment:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.TenCrop(transCrop))
transformList.append(
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
if self.normalize is not None:
transformList.append(transforms.Lambda(lambda crops: torch.stack([self.normalize(crop) for crop in crops])))
else:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def full_224(self, mode, test_augment=True):
transCrop = 224
transResize = 256
return self._full(transCrop, transResize, mode, test_augment=test_augment)
def full_448(self, mode):
transCrop = 448
transResize = 512
return self._full(transCrop, transResize, mode)
def _full_colorjitter(self, transCrop, transResize, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomResizedCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
transformList.append(transforms.RandomRotation(7))
transformList.append(transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "valid":
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "test":
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.TenCrop(transCrop))
transformList.append(
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
if self.normalize is not None:
transformList.append(transforms.Lambda(lambda crops: torch.stack([self.normalize(crop) for crop in crops])))
transformSequence = transforms.Compose(transformList)
return transformSequence
def full_colorjitter_224(self, mode):
transCrop = 224
transResize = 256
return self._full_colorjitter(transCrop, transResize, mode)
# ---------------------------------------------3D Data Normalization--------------------------------------------
def channel_wise_normalize_3d(data, mean_std):
num_data = data.shape[0]
num_channel = data.shape[1]
if len(mean_std) == 1:
mean_std = [mean_std[0]] * num_channel
normalized_data = []
for i in range(num_data):
img = data[i, ...]
normalized_img = []
for j in range(num_channel):
img_per_channel = img[j, ...]
mean, std = mean_std[j][0], mean_std[j][1]
_img = (img_per_channel - mean) / std
normalized_img.append(_img)
normalized_data.append(normalized_img)
return np.array(normalized_data)
# ---------------------------------------------Downstream ChestX-ray14------------------------------------------
class ChestX_ray14(Dataset):
def __init__(self, pathImageDirectory, pathDatasetFile, augment, num_class=14, anno_percent=100):
self.img_list = []
self.img_label = []
self.augment = augment
with open(pathDatasetFile, "r") as fileDescriptor:
line = True
while line:
line = fileDescriptor.readline()
if line:
lineItems = line.split()
imagePath = os.path.join(pathImageDirectory, lineItems[0])
imageLabel = lineItems[1:num_class + 1]
imageLabel = [int(i) for i in imageLabel]
self.img_list.append(imagePath)
self.img_label.append(imageLabel)
indexes = np.arange(len(self.img_list))
if anno_percent < 100:
random.Random(99).shuffle(indexes)
num_data = int(indexes.shape[0] * anno_percent / 100.0)
indexes = indexes[:num_data]
_img_list, _img_label = copy.deepcopy(self.img_list), copy.deepcopy(self.img_label)
self.img_list = []
self.img_label = []
for i in indexes:
self.img_list.append(_img_list[i])
self.img_label.append(_img_label[i])
def __getitem__(self, index):
imagePath = self.img_list[index]
imageData = Image.open(imagePath).convert('RGB')
imageLabel = torch.FloatTensor(self.img_label[index])
if self.augment != None: imageData = self.augment(imageData)
return imageData, imageLabel
def __len__(self):
return len(self.img_list)
# ---------------------------------------------Downstream CheXpert------------------------------------------
class CheXpert(Dataset):
def __init__(self, pathImageDirectory, pathDatasetFile, augment, num_class=14,
uncertain_label="LSR-Ones", unknown_label=0, anno_percent=100):
self.img_list = []
self.img_label = []
self.augment = augment
assert uncertain_label in ["Ones", "Zeros", "LSR-Ones", "LSR-Zeros"]
self.uncertain_label = uncertain_label
with open(pathDatasetFile, "r") as fileDescriptor:
csvReader = csv.reader(fileDescriptor)
next(csvReader, None)
for line in csvReader:
imagePath = os.path.join(pathImageDirectory, line[0])
label = line[5:]
for i in range(num_class):
if label[i]:
a = float(label[i])
if a == 1:
label[i] = 1
elif a == 0:
label[i] = 0
elif a == -1: # uncertain label
label[i] = -1
else:
label[i] = unknown_label # unknown label
self.img_list.append(imagePath)
imageLabel = [int(i) for i in label]
self.img_label.append(imageLabel)
indexes = np.arange(len(self.img_list))
if anno_percent < 100:
random.Random(99).shuffle(indexes)
num_data = int(indexes.shape[0] * anno_percent / 100.0)
indexes = indexes[:num_data]
_img_list, _img_label = copy.deepcopy(self.img_list), copy.deepcopy(self.img_label)
self.img_list = []
self.img_label = []
for i in indexes:
self.img_list.append(_img_list[i])
self.img_label.append(_img_label[i])
def __getitem__(self, index):
imagePath = self.img_list[index]
imageData = Image.open(imagePath).convert('RGB')
label = []
for l in self.img_label[index]:
if l == -1:
if self.uncertain_label == "Ones":
label.append(1)
elif self.uncertain_label == "Zeros":
label.append(0)
elif self.uncertain_label == "LSR-Ones":
label.append(random.uniform(0.55, 0.85))
elif self.uncertain_label == "LSR-Zeros":
label.append(random.uniform(0, 0.3))
else:
label.append(l)
imageLabel = torch.FloatTensor(label)
if self.augment != None: imageData = self.augment(imageData)
return imageData, imageLabel
def __len__(self):
return len(self.img_list)
# ---------------------------------------------------NPY DataSet------------------------------------------------
class NPYDataLoader(Dataset):
def __init__(self, data):
self.data_x, self.data_y = data
def __len__(self):
return self.data_x.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.data_x[idx, ...], self.data_y[idx, ...]
# --------------------------------------------Downstream LUNA FPR 3D--------------------------------------------
def LUNA_FPR_3D(data_dir, fold, input_size, hu_range, crop=True, normalization=None, set="data", anno_percent=100,
shuffle=True):
input_rows, input_cols, input_deps = input_size[0], input_size[1], input_size[2]
hu_min, hu_max = hu_range[0], hu_range[1]
def load_image(data_dir, fold, input_rows, input_cols, hu_min, hu_max, crop=True):
positives, negatives = [], []
for subset in fold:
LUNA16_PROCESSED_DIR_POS = os.path.join(data_dir, "subset" + str(subset), "positives")
LUNA16_PROCESSED_DIR_NEG = os.path.join(data_dir, "subset" + str(subset), "negatives")
positive_file_list = glob(os.path.join(LUNA16_PROCESSED_DIR_POS, "*.npy"))
negative_file_list = glob(os.path.join(LUNA16_PROCESSED_DIR_NEG, "*.npy"))
positive_index = [x for x in range(len(positive_file_list))]
negative_index = [x for x in range(len(negative_file_list))]
if shuffle:
random.shuffle(positive_index)
random.shuffle(negative_index)
for i in range(min(len(positive_file_list), len(negative_file_list))):
im_pos_ = np.load(positive_file_list[positive_index[i]])
im_neg_ = np.load(negative_file_list[negative_index[i]])
if crop:
im_pos = np.zeros((input_rows, input_cols, im_pos_.shape[-1]), dtype="float")
im_neg = np.zeros((input_rows, input_cols, im_pos_.shape[-1]), dtype="float")
for z in range(im_pos_.shape[-1]):
im_pos[:, :, z] = resize(im_pos_[:, :, z], (input_rows, input_cols), preserve_range=True)
im_neg[:, :, z] = resize(im_neg_[:, :, z], (input_rows, input_cols), preserve_range=True)
else:
im_pos, im_neg = im_pos_, im_neg_
im_pos[im_pos < hu_min] = hu_min
im_pos[im_pos > hu_max] = hu_max
im_neg[im_neg < hu_min] = hu_min
im_neg[im_neg > hu_max] = hu_max
im_pos = (im_pos - hu_min) / (hu_max - hu_min)
im_neg = (im_neg - hu_min) / (hu_max - hu_min)
positives.append(im_pos)
negatives.append(im_neg)
positives, negatives = np.array(positives), np.array(negatives)
positives, negatives = np.expand_dims(positives, axis=-1), np.expand_dims(negatives, axis=-1)
return positives, negatives
x_pos, x_neg = load_image(data_dir, fold, input_rows, input_cols, hu_min, hu_max, crop=crop)
x_data = np.concatenate((x_pos, x_neg), axis=0)
y_data = np.concatenate((np.ones((x_pos.shape[0],)),
np.zeros((x_neg.shape[0],)),
), axis=0)
x_data = np.expand_dims(np.squeeze(x_data), axis=1)
if normalization is not None and normalization.lower() != "none":
mean_std = NORMALIZATION_STATISTICS[normalization.lower()]
x_data = channel_wise_normalize_3d(x_data, mean_std=mean_std)
if anno_percent < 100:
ind_list = [i for i in range(x_data.shape[0])]
random.Random(99).shuffle(ind_list)
num_data = int(x_data.shape[0] * anno_percent / 100.0)
x_data = x_data[ind_list[:num_data], ...]
y_data = y_data[ind_list[:num_data], ...]
print("x_{}: {} | {:.2f} ~ {:.2f}".format(set, x_data.shape, np.min(x_data), np.max(x_data)))
print("y_{}: {} | {:.2f} ~ {:.2f}".format(set, y_data.shape, np.min(y_data), np.max(y_data)))
return x_data, y_data
# ----------------------------------------------Downstream LIDC 3D----------------------------------------------
def LIDC_3D(data_dir, set, normalization=None, anno_percent=100):
x_data = np.squeeze(np.load(os.path.join(data_dir, 'x_' + set + '_64x64x32.npy')))
y_data = np.squeeze(np.load(os.path.join(data_dir, 'm_' + set + '_64x64x32.npy')))
x_data = np.expand_dims(x_data, axis=1)
y_data = np.expand_dims(y_data, axis=1)
if normalization is not None and normalization.lower() != "none":
mean_std = NORMALIZATION_STATISTICS[normalization.lower()]
x_data = channel_wise_normalize_3d(x_data, mean_std=mean_std)
if anno_percent < 100:
ind_list = [i for i in range(x_data.shape[0])]
random.Random(99).shuffle(ind_list)
num_data = int(x_data.shape[0] * anno_percent / 100.0)
x_data = x_data[ind_list[:num_data], ...]
y_data = y_data[ind_list[:num_data], ...]
print("x_{}: {} | {:.2f} ~ {:.2f}".format(set, x_data.shape, np.min(x_data), np.max(x_data)))
print("y_{}: {} | {:.2f} ~ {:.2f}".format(set, y_data.shape, np.min(y_data), np.max(y_data)))
return x_data, y_data
# ----------------------------------------------Downstream LiTS 3D----------------------------------------------
def LiTS_3D(data_path, id_list, obj="liver", normalization=None, anno_percent=100,
input_size=(64, 64, 32), hu_range=(-1000.0, 1000.0), status=None):
def load_data_npy(data_path, id_list, obj="liver", input_size=(64, 64, 32), hu_range=(-1000.0, 1000.0), status=None):
x_data, y_data = [], []
input_rows, input_cols, input_deps = input_size[0], input_size[1], input_size[2]
hu_min, hu_max = hu_range[0], hu_range[1]
for patient_id in id_list:
Vol = np.load(os.path.join(data_path, "volume-" + str(patient_id) + ".npy"))
Vol[Vol > hu_max] = hu_max
Vol[Vol < hu_min] = hu_min
Vol = (Vol - hu_min) / (hu_max - hu_min)
Vol = np.expand_dims(Vol, axis=0)
Mask = np.load(os.path.join(data_path, "segmentation-" + str(patient_id) + ".npy"))
liver_mask, lesion_mask = copy.deepcopy(Mask), copy.deepcopy(Mask)
liver_mask[Mask > 0.5] = 1
liver_mask[Mask <= 0.5] = 0
lesion_mask[Mask > 1] = 1
lesion_mask[Mask <= 1] = 0
Mask = np.concatenate((np.expand_dims(liver_mask, axis=0), np.expand_dims(lesion_mask, axis=0)), axis=0)
if obj == "liver":
for i in range(input_rows - 1, Vol.shape[1] - input_rows + 1, input_rows):
for j in range(input_cols - 1, Vol.shape[2] - input_cols + 1, input_cols):
for k in range(input_deps - 1, Vol.shape[3] - input_deps + 1, input_deps):
if np.sum(Mask[0, i:i + input_rows, j:j + input_cols,
k:k + input_deps]) > 0 or random.random() < 0.01:
x_data.append(Vol[:, i:i + input_rows, j:j + input_cols, k:k + input_deps])
y_data.append(Mask[:, i:i + input_rows, j:j + input_cols, k:k + input_deps])
if np.sum(Mask[0]) > 1000:
cx, cy, cz = ndimage.measurements.center_of_mass(np.squeeze(Mask[0]))
# print(cx, cy, cz)
cx, cy, cz = int(cx), int(cy), int(cz)
for delta_x in range(-10, 20, 20):
for delta_y in range(-10, 20, 20):
for delta_z in range(-5, 10, 10):
if cx + delta_x - int(input_rows / 2) < 0 or cx + delta_x + int(input_rows / 2) > Vol.shape[1] - 1 or \
cy + delta_y - int(input_cols / 2) < 0 or cy + delta_y + int(input_cols / 2) > Vol.shape[2] - 1 or \
cz + delta_z - int(input_deps / 2) < 0 or cz + delta_z + int(input_deps / 2) > Vol.shape[3] - 1:
pass
else:
x_data.append(Vol[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
y_data.append(Mask[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
elif obj == "lesion":
if np.sum(Mask[1]) > 0:
labels = measure.label(Mask[1], neighbors=8, background=0)
for label in np.unique(labels):
if label == 0:
continue
labelMask = np.zeros(Mask[1].shape, dtype="int")
labelMask[labels == label] = 1
cx, cy, cz = ndimage.measurements.center_of_mass(np.squeeze(labelMask))
cx, cy, cz = int(cx), int(cy), int(cz)
if labelMask[cx, cy, cz] == 1:
for delta_x in range(-5, 5, 5):
for delta_y in range(-5, 5, 5):
for delta_z in range(-3, 3, 3):
if cx + delta_x - int(input_rows / 2) < 0 or cx + delta_x + int(input_rows / 2) > Vol.shape[1] - 1 \
or \
cy + delta_y - int(input_cols / 2) < 0 or cy + delta_y + int(input_cols / 2) > Vol.shape[2] - 1 \
or \
cz + delta_z - int(input_deps / 2) < 0 or cz + delta_z + int(input_deps / 2) > Vol.shape[3] - 1:
pass
else:
x_data.append(
Vol[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
y_data.append(
Mask[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
else:
print("Objetc [{}] does not exist!".format(obj))
return np.array(x_data), np.array(y_data)
x_data, y_data = load_data_npy(data_path, id_list, obj=obj, input_size=input_size, hu_range=hu_range, status=status)
# print(x_data.shape, y_data.shape)
if obj == "liver":
y_data = y_data[:, 0:1, :, :, :]
elif obj == "lesion":
y_data = y_data[:, 1:2, :, :, :]
if normalization is not None and normalization.lower() != "none":
mean_std = NORMALIZATION_STATISTICS[normalization.lower()]
x_data = channel_wise_normalize_3d(x_data, mean_std=mean_std)
if anno_percent < 100:
ind_list = [i for i in range(x_data.shape[0])]
random.Random(99).shuffle(ind_list)
num_data = int(x_data.shape[0] * anno_percent / 100.0)
x_data = x_data[ind_list[:num_data], ...]
y_data = y_data[ind_list[:num_data], ...]
print("x_{}: {} | {:.2f} ~ {:.2f}".format(status, x_data.shape, np.min(x_data), np.max(x_data)))
print("y_{}: {} | {:.2f} ~ {:.2f}".format(status, y_data.shape, np.min(y_data), np.max(y_data)))
return x_data, y_data
# ----------------------------------------------Downstream PE 3D----------------------------------------------
def PE_3D(data_dir, normalization=None, hu_range=(-1000.0, 1000.0), status="train", anno_percent=100, seed=None):
hu_min, hu_max = hu_range[0], hu_range[1]
if status == "train":
x_data = np.load(os.path.join(data_dir, "pe-gt-voxels-features-tr-hu.npy"))
y_data = np.load(os.path.join(data_dir, "pe-gt-voxels-labels-tr.npy"))
validation_rate = 0.2
idx_list = [i for i in range(x_data.shape[0])]
random.Random(seed).shuffle(idx_list)
x_train = x_data[idx_list[int(round(x_data.shape[0] * validation_rate)):]]
y_train = y_data[idx_list[int(round(y_data.shape[0] * validation_rate)):]]
x_train = np.expand_dims(x_train, axis=1)
x_train[x_train > hu_max] = hu_max
x_train[x_train < hu_min] = hu_min
x_train = 1.0 * (x_train - hu_min) / (hu_max - hu_min)
x_valid = x_data[idx_list[:int(round(x_data.shape[0] * validation_rate))]]
y_valid = y_data[idx_list[:int(round(y_data.shape[0] * validation_rate))]]
x_valid = np.expand_dims(x_valid, axis=1)
x_valid[x_valid > hu_max] = hu_max
x_valid[x_valid < hu_min] = hu_min
x_valid = 1.0 * (x_valid - hu_min) / (hu_max - hu_min)
# augmentation
x, y = [], []
for i in range(x_train.shape[0]):
if y_train[i] == 1:
for b in range(13, 19):
degree = random.choice([0, 1, 2, 3])
if degree == 0:
x.append(x_train[i, :, :, :, b:b + 32])
else:
x.append(np.flip(x_train[i, :, :, :, b:b + 32], axis=degree))
y.append(y_train[i])
else:
x.append(x_train[i, :, :, :, 16:48])
y.append(y_train[i])
x_train, y_train = copy.deepcopy(np.array(x)), copy.deepcopy(np.array(y))
x, y = [], []
for i in range(x_valid.shape[0]):
if y_valid[i] == 1:
for b in range(13, 19):
degree = random.choice([0, 1, 2, 3])
if degree == 0:
x.append(x_valid[i, :, :, :, b:b + 32])
else:
x.append(np.flip(x_valid[i, :, :, :, b:b + 32], axis=degree))
y.append(y_valid[i])
else:
x.append(x_valid[i, :, :, :, 16:48])
y.append(y_valid[i])
x_valid, y_valid = copy.deepcopy(np.array(x)), copy.deepcopy(np.array(y))
if normalization is not None and normalization.lower() != "none":
mean_std = NORMALIZATION_STATISTICS[normalization.lower()]
x_train = channel_wise_normalize_3d(x_train, mean_std=mean_std)
x_valid = channel_wise_normalize_3d(x_valid, mean_std=mean_std)
if anno_percent < 100:
ind_list = [i for i in range(x_train.shape[0])]
random.Random(99).shuffle(ind_list)
num_data = int(x_train.shape[0] * anno_percent / 100.0)
x_train = x_train[ind_list[:num_data], ...]
y_train = y_train[ind_list[:num_data], ...]
print("x_train: {} | {:.2f} ~ {:.2f}".format(x_train.shape, np.min(x_train), np.max(x_train)))
print("y_train: {} | {:.2f} ~ {:.2f}".format(y_train.shape, np.min(y_train), np.max(y_train)))
print("x_valid: {} | {:.2f} ~ {:.2f}".format(x_valid.shape, np.min(x_valid), np.max(x_valid)))
print("y_valid: {} | {:.2f} ~ {:.2f}".format(y_valid.shape, np.min(y_valid), np.max(y_valid)))
return x_train, y_train, x_valid, y_valid
else:
x_test = np.load(os.path.join(data_dir, "pe-gt-voxels-features-te-hu.npy"))
y_test = np.load(os.path.join(data_dir, "pe-gt-voxels-labels-te.npy"))
x_test = np.expand_dims(x_test, axis=1)
x_test[x_test > hu_max] = hu_max
x_test[x_test < hu_min] = hu_min
x_test = 1.0 * (x_test - hu_min) / (hu_max - hu_min)
x_test = x_test[:, :, :, :, 16:48]
if normalization is not None and normalization.lower() != "none":
mean_std = NORMALIZATION_STATISTICS[normalization.lower()]
x_test = channel_wise_normalize_3d(x_test, mean_std=mean_std)
print("x_test: {} | {:.2f} ~ {:.2f}".format(x_test.shape, np.min(x_test), np.max(x_test)))
print("y_test: {} | {:.2f} ~ {:.2f}".format(y_test.shape, np.min(y_test), np.max(y_test)))
return x_test, y_test
# ----------------------------------------------Downstream BraTS 3D----------------------------------------------
class BraTS_Seg_3D(Dataset):
def __init__(self, data_dir, file, mode="train", modality="flair", input_size=(64, 64, 32), normalization=None,
positives=[1, 2, 4], crop_size=(100, 100, 50), delta=30, anno_percent=100, seed=0):
self.patient_list = []
with open(file, "r") as fileDescriptor:
line = True
while line:
line = fileDescriptor.readline()
if line:
lineItems = line.split()
self.patient_list.append(lineItems[0])
indexes = np.arange(len(self.patient_list))
if anno_percent < 100:
random.Random(99).shuffle(indexes)
num_data = int(indexes.shape[0] * anno_percent / 100.0)
indexes = indexes[:num_data]
_patient_list = copy.deepcopy(self.patient_list)
self.patient_list = []
for i in indexes:
self.patient_list.append(_patient_list[i])
self.indexes = np.arange(len(self.patient_list))
import BraTS
self.brats = BraTS.DataSet(brats_root=data_dir, year=2018)
self.modality = modality
self.positives = positives
self.input_size = input_size
self.crop_size = crop_size
self.delta = delta
self.normalization = normalization
self.mode = mode
if seed is None:
self.seed = random.randint(0, 10000)
else:
self.seed = seed
self.batch_generator = random.Random()
self.batch_generator.seed(self.seed)
self.patch_generator = random.Random()
self.patch_generator.seed(self.seed)
def load_patient(self, patient_id):
patient = self.brats.train.patient(patient_id)
# load images
if self.modality == "flair":
img = patient.flair
img = (img - np.min(img)) * 1.0 / (np.max(img) - np.min(img))
elif self.modality == "t1":
img = patient.t1
img = (img - np.min(img)) * 1.0 / (np.max(img) - | np.min(img) | numpy.min |
#
# Copyright © 2018 United States Government as represented by the Administrator of the
# National Aeronautics and Space Administration. All Rights Reserved.
#
import time
import numpy as np
import sunpy.map
import sunpy.io
import json
import astropy.units as u
import pandas as pd
from scipy import stats
from scipy.spatial import ConvexHull
from astropy.utils.data import download_file
import urllib
from datetime import datetime, timedelta
import requests
from pyquaternion import Quaternion
# quantity of polygons
n = 21
# domain definition
theta = np.linspace(0, np.pi / 2, n)
phi = np.linspace(0, 2 * np.pi, n)
theta, phi = np.meshgrid(theta, phi)
# Constant for aspect ratio of lemniscate silhouette
AU_REFERENCE_CUBE = 1
# constant for domain and grid inits
GRID_HALF_WIDTH = 800
# function takes care of updating all of the points for the different plots
def plot_update(radial, angular, long, lat):
# data calculation section for width and distance interaction with figure
# scalars of the lemniscate
# c3 is not stored because it is always 1
lem_distance_c_straight_pixel = (radial * u.solRad).to(u.km) * (
GRID_HALF_WIDTH / (AU_REFERENCE_CUBE * u.AU).to(u.km))
c_one = lem_distance_c_straight_pixel
c_two = c_one * np.tan(((angular / 2) * u.deg))
x_mod = c_one * np.cos(theta)
y_mod = c_two * np.cos(theta) * np.sin(theta) * np.cos(phi)
z_mod = c_two * | np.cos(theta) | numpy.cos |
import torch
from torch.optim import lr_scheduler
from tqdm import tqdm
from torchsummary import summary
from torch.utils.tensorboard import SummaryWriter
from apex import amp
from loss import dice
from pathlib import Path
from data import CaseDataset, load_case, save_pred, \
orient_crop_case, regions_crop_case, resample_normalize_case
import nibabel as nib
import numpy as np
import scipy.special as spe
from transform import pad, crop_pad, to_numpy, to_tensor, resize
def predict_per_patch(input,
model,
num_classes=3,
patch_size=(96, 96, 96),
step_per_patch=4,
verbose=True,
one_hot=False):
device = next(model.parameters()).device
# add padding if patch is larger than input shape
origial_shape = input.shape[:3]
input = pad(input, patch_size)
padding_shape = input.shape[:3]
coord_start = np.array([i // 2 for i in patch_size])
coord_end = np.array([padding_shape[i] - patch_size[i] // 2
for i in range(len(patch_size))])
num_steps = np.ceil([(coord_end[i] - coord_start[i]) / (patch_size[i] / step_per_patch)
for i in range(3)])
step_size = np.array([(coord_end[i] - coord_start[i]) / (num_steps[i] + 1e-8)
for i in range(3)])
step_size[step_size == 0] = 9999999
x_steps = np.arange(coord_start[0], coord_end[0] + 1e-8, step_size[0], dtype=np.int)
y_steps = np.arange(coord_start[1], coord_end[1] + 1e-8, step_size[1], dtype=np.int)
z_steps = | np.arange(coord_start[2], coord_end[2] + 1e-8, step_size[2], dtype=np.int) | numpy.arange |
# Imports: standard library
import os
import sys
import argparse
import datetime
import tempfile
import multiprocessing as mp
from time import time
from typing import Dict, List, Type, Tuple, Union, Callable
from itertools import product
# Imports: third party
import h5py
import numpy as np
import pytest
import neurokit2 as nk
# Imports: first party
import tensormap
from definitions.edw import EDW_FILES
from ml4c3.arguments import parse_args
from ingest.icu.writers import Writer
from definitions.globals import TENSOR_EXT
from tensormap.TensorMap import TensorMap, Interpretation, get_local_timestamps
from tensormap.icu_signals import get_tmap as GET_SIGNAL_TMAP
from ingest.icu.data_objects import (
Event,
Procedure,
Medication,
StaticData,
Measurement,
ICUDataObject,
BedmasterAlarm,
BedmasterSignal,
)
from tensormap.icu_list_signals import get_tmap as GET_LIST_TMAP
# pylint: disable=redefined-outer-name, unused-argument, missing-class-docstring
def pytest_configure():
def tff(tm, hd5):
return hd5[f"/{tm.name}"][:]
pytest.TFF = tff
pytest.N_TENSORS = 50
pytest.CONTINUOUS_TMAPS = [
TensorMap(
f"{n}d_cont",
shape=tuple(range(2, n + 2)),
interpretation=Interpretation.CONTINUOUS,
tensor_from_file=tff,
)
for n in range(1, 6)
]
pytest.CATEGORICAL_TMAPS = [
TensorMap(
f"{n}d_cat",
shape=tuple(range(2, n + 2)),
interpretation=Interpretation.CATEGORICAL,
channel_map={f"c_{i}": i for i in range(n + 1)},
tensor_from_file=tff,
)
for n in range(1, 6)
]
pytest.TMAPS_UP_TO_4D = pytest.CONTINUOUS_TMAPS[:-1] + pytest.CATEGORICAL_TMAPS[:-1]
pytest.TMAPS_5D = pytest.CONTINUOUS_TMAPS[-1:] + pytest.CATEGORICAL_TMAPS[-1:]
pytest.MULTIMODAL_UP_TO_4D = [
list(x)
for x in product(pytest.CONTINUOUS_TMAPS[:-1], pytest.CATEGORICAL_TMAPS[:-1])
]
pytest.SEGMENT_IN = TensorMap(
"2d_for_segment_in",
shape=(32, 32, 1),
interpretation=Interpretation.CONTINUOUS,
metrics=["mse"],
tensor_from_file=tff,
)
pytest.SEGMENT_OUT = TensorMap(
"2d_for_segment_out",
shape=(32, 32, 2),
interpretation=Interpretation.CATEGORICAL,
channel_map={"yes": 0, "no": 1},
tensor_from_file=tff,
)
pytest.MOCK_TMAPS = {
tmap.name: tmap for tmap in pytest.CONTINUOUS_TMAPS + pytest.CATEGORICAL_TMAPS
}
pytest.example_mrn = "123"
pytest.example_visit_id = "345"
pytest.run_id = "1234567"
pytest.run_id_par = "12345678"
pytest.datadir = os.path.join(os.path.dirname(__file__), "icu_ingest", "data")
# CrossRef
pytest.cross_ref_file = os.path.join(pytest.datadir, "xref_file.csv")
pytest.cross_ref_file_tens = os.path.join(pytest.datadir, "xref_file_tensorize.csv")
# BedMaster
pytest.bedmaster_dir = os.path.join(pytest.datadir, "bedmaster")
pytest.mat_file = os.path.join(pytest.bedmaster_dir, "bedmaster_file-123_5_v4.mat")
pytest.bedmaster_matching = os.path.join(pytest.datadir, "bedmaster_matching_files")
# EDW
pytest.edw_dir = os.path.join(pytest.datadir, "edw")
pytest.edw_patient_dir = os.path.join(
pytest.edw_dir,
pytest.example_mrn,
pytest.example_visit_id,
)
pytest.adt_path = os.path.join(pytest.edw_dir, "adt.csv")
# Alarms
pytest.alarms_dir = os.path.join(pytest.datadir, "bedmaster_alarms")
pytest_configure()
class Utils:
@staticmethod
def build_hd5s(
path: str,
tensor_maps: List[TensorMap],
n=5,
) -> Dict[Tuple[str, TensorMap], np.ndarray]:
"""
Builds hd5s at path given TensorMaps. Only works for Continuous and
Categorical TensorMaps.
"""
out = {}
for i in range(n):
hd5_path = os.path.join(path, f"{i}{TENSOR_EXT}")
with h5py.File(hd5_path, "w") as hd5:
for tm in tensor_maps:
if tm.is_continuous:
value = np.full(tm.shape, fill_value=i, dtype=np.float32)
elif tm.is_categorical:
value = np.zeros(tm.shape, dtype=np.float32)
value[..., i % tm.shape[-1]] = 1
else:
raise NotImplementedError(
"Cannot automatically build hd5 from interpretation"
f' "{tm.interpretation}"',
)
hd5.create_dataset(f"/{tm.name}", data=value)
out[(hd5_path, tm)] = value
return out
@pytest.fixture(scope="session")
def utils() -> Type[Utils]:
return Utils
# The purpose of this fixture is to always use the fake testing TMaps.
# The function which retrieves tmaps is update_tmaps from TensorMap.py;
# However, that function is usually imported directly, i.e.
#
# from ml4c3.TensorMap import update_tmaps
#
# This import creates a new object with the same name in the importing file,
# and now needs to be mocked too, e.g.
#
# mock ml4c3.arguments.update_tmaps --> mock_update_tmaps
#
# https://stackoverflow.com/a/45466846
@pytest.fixture(autouse=True)
def use_testing_tmaps(monkeypatch):
def mock_update_tmaps(tmap_name: str, tmaps: Dict[str, TensorMap]):
return pytest.MOCK_TMAPS
monkeypatch.setattr(tensormap.TensorMap, "update_tmaps", mock_update_tmaps)
baseline_default_arguments = [
"--input_tensors",
"3d_cont",
"--output_tensors",
"1d_cat",
"--conv_x",
"3",
"--conv_y",
"3",
"--conv_z",
"3",
"--pool_x",
"1",
"--pool_y",
"1",
"--pool_z",
"1",
"--num_workers",
"1",
"--epochs",
"2",
"--batch_size",
"2",
"--dense_layers",
"4",
"--conv_blocks",
"4",
"--conv_block_size",
"3",
"--optimizer",
"adam",
"--activation_layer",
"relu",
"--learning_rate",
"0.001",
]
@pytest.fixture(scope="function")
def default_arguments(tmpdir_factory, utils: Utils) -> argparse.Namespace:
temp_dir = tmpdir_factory.mktemp("data")
utils.build_hd5s(temp_dir, pytest.MOCK_TMAPS.values(), n=pytest.N_TENSORS)
hd5_dir = str(temp_dir)
sys.argv = [
".",
"train",
"--tensors",
hd5_dir,
"--output_folder",
hd5_dir,
]
sys.argv.extend(baseline_default_arguments)
args = parse_args()
return args
@pytest.fixture(scope="function")
def default_arguments_infer(tmpdir_factory, utils: Utils) -> argparse.Namespace:
temp_dir = tmpdir_factory.mktemp("data")
utils.build_hd5s(temp_dir, pytest.MOCK_TMAPS.values(), n=pytest.N_TENSORS)
hd5_dir = str(temp_dir)
sys.argv = [
".",
"infer",
"--tensors",
hd5_dir,
"--output_folder",
hd5_dir,
]
sys.argv.extend(baseline_default_arguments)
args = parse_args()
return args
@pytest.fixture(scope="function")
def default_arguments_explore(tmpdir_factory, utils: Utils) -> argparse.Namespace:
temp_dir = tmpdir_factory.mktemp("data")
utils.build_hd5s(temp_dir, pytest.MOCK_TMAPS.values(), n=pytest.N_TENSORS)
hd5_dir = str(temp_dir)
sys.argv = [
".",
"explore",
"--tensors",
hd5_dir,
"--output_folder",
hd5_dir,
]
args = parse_args()
return args
def pytest_exception_interact(node, call, report):
for child in mp.active_children():
child.terminate()
@pytest.fixture(scope="function")
def matfile() -> h5py.File:
with h5py.File(pytest.mat_file, "r") as mat_file:
yield mat_file
@pytest.fixture(scope="function")
def empty_matfile() -> h5py.File:
with tempfile.NamedTemporaryFile(delete=False) as _file:
with h5py.File(_file.name, "w") as mat_file:
mat_file.create_group("vs")
mat_file.create_group("wv")
yield _file
try:
os.remove(_file.name)
except OSError:
pass
@pytest.fixture(scope="module")
def temp_file():
with tempfile.NamedTemporaryFile(delete=False) as _file:
yield _file
@pytest.fixture(scope="function")
def temp_dir():
with tempfile.TemporaryDirectory() as _tmp_dir:
yield _tmp_dir
@pytest.fixture(scope="session")
def test_scale_units() -> Dict[str, Dict[str, Union[int, float, str]]]:
# fmt: off
return {
"CUFF": {"scaling_factor": 1, "units": "mmHg"},
"HR": {"scaling_factor": 0.5, "units": "Bpm"},
"I": {"scaling_factor": 0.0243, "units": "mV"},
"II": {"scaling_factor": 0.0243, "units": "mV"},
"V": {"scaling_factor": 0.0243, "units": "mV"},
"SPO2": {"scaling_factor": 0.039, "units": "%"},
"RR": {"scaling_factor": 0.078, "units": "UNKNOWN"},
"VNT_PRES": {"scaling_factor": 1, "units": "UNKNOWN"},
"VNT_FLOW": {"scaling_factor": 1, "units": "UNKNOWN"},
"CO2": {"scaling_factor": 1, "units": "UNKNOWN"},
}
# fmt: on
class FakeSignal:
"""
Mock signal objects for use in testing.
"""
def __init__(self):
self.today = datetime.date.today()
@staticmethod
def get_bedmaster_signal() -> BedmasterSignal:
starting_time = int(time())
sample_freq = 60
duration_sec = 10
n_points = duration_sec * sample_freq
m_signal = BedmasterSignal(
name="Some_signal",
source="waveform",
channel="ch10",
value=np.array(np.random.randint(40, 100, n_points)),
time=np.arange(starting_time, starting_time + duration_sec, 0.25),
units="mmHg",
sample_freq=np.array(
[(sample_freq, 0), (120, n_points / 10)],
dtype="float,int",
),
scale_factor=np.random.randint(0, 5),
time_corr_arr=np.packbits(np.random.randint(0, 2, 100).astype(np.bool)),
samples_per_ts=np.array([15] * int(duration_sec / 0.25)),
)
return m_signal
@staticmethod
def get_static_data() -> StaticData:
static_data = StaticData(
department_id=np.array([1234, 12341]),
department_nm=np.array(["BLAKE1", "BLAKE2"]).astype("S"),
room_bed=np.array(["123 - 222", "456 - 333"]).astype("S"),
move_time=np.array(["2021-05-15 06:47:00", "2021-05-25 06:47:00"]).astype(
"S",
),
weight=np.random.randint(50, 100),
height=np.random.randint(150, 210) / 100,
admin_type="testing",
admin_date="1995-08-06 00:00:00.0000000",
birth_date="1920-05-06 00:00:00.0000000",
race=str(np.random.choice(["Asian", "Native American", "Black"])),
sex=str(np.random.choice(["male", "female"])),
end_date="2020-07-10 12:00:00.0000000",
end_stay_type=str(np.random.choice(["discharge", "death"])),
local_time=["UTC-4:00"],
medical_hist=np.array(
["ID: 245324; NAME: Diabetes; COMMENTS: typeI; DATE: UNKNOWN"],
).astype("S"),
surgical_hist=np.array(
["ID: 241324; NAME: VASECTOMY; COMMENTS: Sucessfully; DATE: UNKNOWN"],
).astype("S"),
tobacco_hist="STATUS: Yes - Quit; COMMENT: 10 years ago",
alcohol_hist="STATUS: Yes; COMMENT: a little",
admin_diag="aortic valve repair.",
)
return static_data
@staticmethod
def get_measurement() -> Measurement:
starting_time = int(time())
measurement = Measurement(
name="Some_Measurment",
source=str(
np.random.choice(
[
EDW_FILES["lab_file"]["source"],
EDW_FILES["vitals_file"]["source"],
],
),
),
value=np.array(np.random.randint(40, 100, 100)),
time=np.array(list(range(starting_time, starting_time + 100))),
units=str(np.random.choice(["mmHg", "bpm", "%"])),
data_type=str(np.random.choice(["categorical", "numerical"])),
metadata={"Some_Metadata": np.array(np.random.randint(0, 1, 100))},
)
return measurement
@staticmethod
def get_medication() -> Medication:
starting_time = int(time())
medication = Medication(
name="Some_medication_in_g/ml",
dose=np.array(np.random.randint(0, 2, 10)),
units=str(np.random.choice(["g/ml", "mg", "pills"])),
start_date=np.array(list(range(starting_time, starting_time + 100, 10))),
action=np.random.choice(["Given", "New bag", "Rate Change"], 10).astype(
"S",
),
route=str(np.random.choice(["Oral", "Nasal", "Otic"])),
wt_based_dose=bool(np.random.randint(0, 2)),
)
return medication
@staticmethod
def get_procedure() -> Procedure:
starting_time = int(time())
procedure = Procedure(
name="Some_procedure",
source=EDW_FILES["other_procedures_file"]["source"],
start_date=np.array(list(range(starting_time, starting_time + 100, 5))),
end_date=np.array(
list(range(starting_time + 10000, starting_time + 10100, 5)),
),
)
return procedure
@staticmethod
def get_demo() -> StaticData:
return FakeSignal.get_static_data()
@staticmethod
def get_measurements() -> Dict[str, Measurement]:
starting_time = int(time())
measurements_dic = {
"creatinine": EDW_FILES["lab_file"]["source"],
"ph_arterial": EDW_FILES["lab_file"]["source"],
"pulse": EDW_FILES["vitals_file"]["source"],
"r_phs_ob_bp_systolic_outgoing": EDW_FILES["vitals_file"]["source"],
}
measurements = {
measurement_name: Measurement(
name=measurement_name,
source=f"{measurements_dic[measurement_name]}",
value=np.array(np.random.randint(40, 100, 100)),
time=np.array(list(range(starting_time, starting_time + 100))),
units=str(np.random.choice(["mmHg", "bpm", "%"])),
data_type=str(np.random.choice(["categorical", "numerical"])),
)
for measurement_name in measurements_dic
}
sys = np.random.randint(40, 100, 250)
dias = np.random.randint(80, 160, 250)
measurements["blood_pressure"] = Measurement(
name="blood_pressure",
source=EDW_FILES["vitals_file"]["source"],
value=np.array(
[f"{sys[i]}/{dias[i]}" for i in range(0, len(sys))],
dtype="S",
),
time=np.array(list(range(starting_time - 50000, starting_time, 200))),
units="",
data_type="categorical",
)
return measurements
@staticmethod
def get_procedures() -> Dict[str, Procedure]:
starting_time = int(time())
start_times = np.array(list(range(starting_time, starting_time + 1000, 100)))
end_times = np.array(
list(range(starting_time + 100, starting_time + 1100, 100)),
)
procedures_dic = {
"colonoscopy": EDW_FILES["surgery_file"]["source"],
"hemodialysis": EDW_FILES["other_procedures_file"]["source"],
"transfuse_red_blood_cells": EDW_FILES["transfusions_file"]["source"],
}
procedures = {
procedure_name: Procedure(
name=procedure_name,
source=f"{procedures_dic[procedure_name]}",
start_date=start_times,
end_date=end_times,
)
for procedure_name in procedures_dic
}
return procedures
@staticmethod
def get_medications() -> Dict[str, Medication]:
starting_time = int(time())
meds_list = [
"aspirin_325_mg_tablet",
"cefazolin_2_gram|50_ml_in_dextrose_iso-osmotic_intravenous_piggyback",
"lactated_ringers_iv_bolus",
"norepinephrine_infusion_syringe_in_swfi_80_mcg|ml_cmpd_central_mgh",
"sodium_chloride_0.9_%_intravenous_solution",
"aspirin_500_mg_tablet",
]
medications = {
med: Medication(
name=med,
dose=np.array(np.random.randint(0, 2, 10)),
units=str(np.random.choice(["g/ml", "mg", "pills"])),
start_date=np.array(
list(range(starting_time, starting_time + 100, 10)),
),
action=np.random.choice(["Given", "New bag", "Rate Change"], 10).astype(
"S",
),
route=str(np.random.choice(["Oral", "Nasal", "Otic"])),
wt_based_dose=bool(np.random.randint(0, 2)),
)
for med in meds_list
}
return medications
@staticmethod
def get_events() -> Dict[str, Event]:
starting_time = int(time())
start_times = np.array(list(range(starting_time, starting_time + 1000, 100)))
events_names = ["code_start", "rapid_response_start"]
events = {
event_name: Event(name=event_name, start_date=start_times)
for event_name in events_names
}
return events
@staticmethod
def get_alarms() -> Dict[str, BedmasterAlarm]:
starting_time = int(time())
start_times = np.array(list(range(starting_time, starting_time + 1000, 100)))
alarms_names = ["cpp_low", "v_tach", "apnea"]
alarms = {
alarm_name: BedmasterAlarm(
name=alarm_name,
start_date=start_times,
duration=np.random.randint(0, 21, size=len(start_times)),
level=np.random.randint(1, 6),
)
for alarm_name in alarms_names
}
return alarms
@staticmethod
def get_bedmaster_waveforms() -> Dict[str, BedmasterSignal]:
starting_time = int(time())
duration = 8
sample_freq_1 = 240
sample_freq_2 = 120
times = np.arange(starting_time, starting_time + duration, 0.25)
values1 = nk.ecg_simulate(
duration=int(duration / 2),
sampling_rate=sample_freq_1,
)
values2 = nk.ecg_simulate(
duration=int(duration / 2),
sampling_rate=sample_freq_2,
)
values = np.concatenate([values1, values2])
n_samples = np.array(
[sample_freq_1 * 0.25] * int(duration * 4 / 2)
+ [sample_freq_2 * 0.25] * int(duration * 4 / 2),
)
# Remove some samples
values = np.delete(values, [10, 11, 250])
n_samples[0] = 58
n_samples[5] = 59
leads = {
lead: BedmasterSignal(
name=lead,
source="waveform",
channel=f"ch{idx}",
value=values,
time=times,
units="mV",
sample_freq=np.array(
[(sample_freq_1, 0), (sample_freq_2, 16)],
dtype="float,int",
),
scale_factor=np.random.uniform() * 5,
time_corr_arr=np.packbits(np.random.randint(0, 2, 100).astype(np.bool)),
samples_per_ts=n_samples,
)
for idx, lead in enumerate(["i", "ii", "iii", "v", "spo2"])
}
return leads
@staticmethod
def get_bedmaster_vitals() -> Dict[str, BedmasterSignal]:
starting_time = int(time())
times = np.array(list(range(starting_time, starting_time + 100)))
signals = {
signal: BedmasterSignal(
name=signal,
source="vitals",
channel=signal,
value=np.array( | np.random.randint(40, 100, 100) | numpy.random.randint |
"""
Functions for performing EDA on the LendingClub and economy data. It is useful to observe
statistics about the data like max, min, and mean to understand distribution, as well as
look for outliers and missing values.
"""
import numpy as np
import pandas as pd
def outlier(arr):
"""
:param arr:
:return:
"""
return
def print_statistics(arr, population=False):
"""
Computes and prints statistics/parameters from the inputted data.
:param arr: array of data
:param population: population or sample data
"""
print("Max: ", max(arr))
print("Min: ", min(arr))
print("Mean: ", | np.mean(arr) | numpy.mean |
from math import isclose
import numpy as np
from . import gel_max_sat
from . import linprog
import time
EPSILON = 1e-7
TRACE = False
def is_satisfiable(kb):
return solve(kb)['satisfiable']
def solve(kb):
C = initialize_C(kb)
c = initialize_c(kb)
d = initialize_d(kb)
signs = initialize_signs(kb)
trace(f'C:\n {C}')
trace(f'c: {c}')
trace(f'd: {d}')
trace(f'signs: {signs}')
lp = linprog.solve(c, C, d, signs)
trace(str_lp(lp))
i = 0
iteration_times = []
while not is_min_cost_zero(lp):
start = time.time()
trace(f'\n\niteration: {i}')
result = generate_column(kb, lp)
if not result['success']:
return {'satisfiable': False, 'iterations': i,
'iteration_times': iteration_times}
trace(f'column {result["column"]}')
column = result['column']
C = np.column_stack((C, column))
c = np.append(c, 0)
lp = linprog.solve(c, C, d, signs)
trace(str_lp(lp))
i += 1
end = time.time()
iteration_times += [end - start]
assert_result(C @ lp.x, signs, d)
return {'satisfiable': True, 'lp': lp, 'iterations': i,
'iteration_times': iteration_times}
def initialize_C(kb):
C_left = np.identity(kb.n + kb.k + 1)
C_right = np.vstack((
- np.identity(kb.n),
kb.A,
np.zeros(kb.n)
))
return np.hstack((C_left, C_right))
def initialize_c(kb):
c_left = np.ones(kb.n + kb.k + 1)
c_right = np.zeros(kb.n)
return np.hstack((c_left, c_right))
def initialize_d(kb):
return np.hstack((np.zeros(kb.n), kb.b, 1))
def initialize_signs(kb):
return ['==']*kb.n + kb.signs + ['==']
def is_min_cost_zero(lp):
return isclose(lp.cost, 0, abs_tol=EPSILON)
def get_weights(lp):
return np.array(lp.y)
def generate_column(kb, lp):
weights = get_weights(lp)
trace(f'weights {weights}')
result = gel_max_sat.solve(kb, weights[:kb.n])
if not result['success']:
return {'success': False}
column = extract_column(kb, result)
if weights @ column < 0:
return {'success': False}
return {'success': True, 'column': column}
def extract_column(kb, result):
m_column = | np.ones(kb.n) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 18 05:26:23 2019
@author: mudit
"""
import numpy as np
from numpy.matlib import repmat
lr = 0.1
weightcost = 0.0002
momentum = 0.001
def rbm(numcases : int, numdims : int, numhids : int, numbatches : int, n_epochs : int, batchdata : np.array):
vishid = 0.1 * np.random.randn(numdims, numhids)
hidbiases = np.zeros((1, numhids))
visbiases = np.zeros((1, numdims))
poshidprobs = np.zeros((numcases, numhids))
neghidprobs = np.zeros((numcases,numhids))
posprods = np.zeros((numdims,numhids))
negprods = np.zeros((numdims,numhids))
vishidinc = np.zeros((numdims,numhids))
hidbiasinc = np.zeros((1,numhids))
visbiasinc = np.zeros((1,numdims))
batchposhidprobs= np.zeros((numcases,numhids,numbatches))
for i in range(n_epochs):
print("Epoch {}".format(i))
error_sum = 0
for batch in range(numbatches):
data = batchdata[:,:,batch]
poshidprobs = 1 / (1 + np.exp(-data@vishid - repmat(hidbiases, numcases, 1)))
batchposhidprobs[:,:,batch] = poshidprobs
posprods = np.transpose(data) @ poshidprobs
poshidact = sum(poshidprobs)#print(poshidact)
posvisact = sum(data)
poshidstates = poshidprobs > | np.random.rand(numcases, numhids) | numpy.random.rand |
import copy
import numpy as np
from collections import defaultdict
import utils
class DAS3HStudent:
def __init__(self, time_weight, n_items, n_skills, seed):
np.random.seed(seed)
self.alpha = | np.random.normal(loc=-1.5, scale=0.3, size=1) | numpy.random.normal |
# ==============================================================================
# Copyright 2021 SciANN -- <NAME>.
# All Rights Reserved.
#
# Licensed under the MIT License.
#
# A guide for generating collocation points for PINN solvers.
#
# Includes:
# - DataGeneratorX:
# Generate 1D collocation grid.
# - DataGeneratorXY:
# Generate 2D collocation grid for a rectangular domain.
# - DataGeneratorXT:
# Generate 1D time-dependent collocation grid.
# - DataGeneratorXYT:
# Generate 2D time-dependent collocation grid for a rectangular domain.
# ==============================================================================
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
cycol = cycle('bgrcmk')
class DataGeneratorX:
""" Generates 1D collocation grid for training PINNs
# Arguments:
X: [X0, X1]
targets: list and type of targets you wish to impose on PINNs.
('domain', 'bc-left', 'bc-right', 'all')
num_sample: total number of collocation points.
# Examples:
>> dg = DataGeneratorX([0., 1.], ["domain", "bc-left", "bc-right"], 10000)
>> input_data, target_data = dg.get_data()
"""
def __init__(self,
X=[0., 1.],
targets=['domain', 'bc-left', 'bc-right'],
num_sample=10000):
'Initialization'
self.Xdomain = X
self.targets = targets
self.num_sample = num_sample
self.input_data = None
self.target_data = None
self.set_data()
def __len__(self):
return self.input_data[0].shape[0]
def set_data(self):
self.input_data, self.target_data = self.generate_data()
def get_data(self):
return self.input_data, self.target_data
def generate_data(self):
# distribute half inside domain half on the boundary
num_sample = int(self.num_sample/2)
counter = 0
# domain points
x_dom = np.random.uniform(self.Xdomain[0], self.Xdomain[1], num_sample)
ids_dom = np.arange(x_dom.shape[0])
counter += ids_dom.size
# left bc points
x_bc_left = np.full(int(num_sample/2), self.Xdomain[0])
ids_bc_left = np.arange(x_bc_left.shape[0]) + counter
counter += ids_bc_left.size
# right bc points
x_bc_right = np.full(num_sample-int(num_sample/2), self.Xdomain[1])
ids_bc_right = np.arange(x_bc_right.shape[0]) + counter
counter += ids_bc_right.size
ids_bc = np.concatenate([ids_bc_left, ids_bc_right])
ids_all = np.concatenate([ids_dom, ids_bc])
ids = {
'domain': ids_dom,
'bc-left': ids_bc_left,
'bc-right': ids_bc_right,
'bc': ids_bc,
'all': ids_all
}
assert all([t in ids.keys() for t in self.targets]), \
'accepted target types: {}'.format(ids.keys())
input_data = [
| np.concatenate([x_dom, x_bc_left, x_bc_right]) | numpy.concatenate |
'''
Date: 9/28/20
Commit: <PASSWORD>
'''
import numpy as np
# import torch
# from torch.autograd import Variable
import sys
import os
import matplotlib.pyplot as plt
np.set_printoptions(threshold=sys.maxsize)
RGB_COLORS = {
"yellow": [255, 255, 50],
"cyan": [100, 255, 255],
"purple": [128, 0, 255],
"red": [255, 0, 0],
"green": [128, 255, 0],
} # color of each agent in order
# AGENT_SIZES = [[7,7], [7,7], [7,7], [7,7], [7,7]]
AGENT_SIZES = [[2,4], [4,2], [2,4], [2,4], [4,2]]
# AGENT_SIZES = [[1,1], [1,1], [1,1], [1,1], [1,1]]
# AGENT_SIZES = [[3,3], [3,3], [3,3], [3,3], [3,3]]
def to_rgb_np(inputs): # converts 16x16xn input to RGB
"""
:param inputs: batch_size x height x width x n_agents
:return: batch_size x height*scale_factor x width*scale_factor x 3
"""
n_channels = 3
n_agents = inputs.shape[3]
batch_size = inputs.shape[0]
if n_agents > 5:
raise NotImplementedError("rgb input not implemented for more than 5 agents")
rgb_im = np.zeros((batch_size, *inputs.shape[1:3], n_channels))
colors = list(RGB_COLORS.values())[:n_agents]
for i in range(n_agents):
cur_agent = inputs[:, :, :, i, None]
cur_im = np.tile(cur_agent, (1, 1, 1, n_channels))
for c_idx, c in enumerate(colors[i]):
cur_im[:, :, :, c_idx] += cur_im[:, :, :, c_idx] * c
rgb_im += cur_im
rgb_im /= 255 # normalize
# save_image(rgb_im[:16]/10, "results/grid/08-19-20-exp/sample-im-3.png", padding=5, pad_value=10)
return rgb_im
def get_max_agent_positions(n_agents, grid_n):
max_positions = []
for agent_idx in range(n_agents):
max_x = grid_n-AGENT_SIZES[agent_idx][0]
max_y = grid_n-AGENT_SIZES[agent_idx][1]
max_positions.append(np.array([max_x, max_y]))
return np.concatenate(max_positions)
def sample_single(n):
'''
:param n: grid size
:return: np array of anchors, np array of positives
step size is 1, agent is 1 pixel
'''
o_samples = np.mgrid[0:n:1, 0:n:1].reshape(2, -1).T
actions = [[0, 1], [0, -1], [-1, 0], [1, 0]]
o_next = []
os = []
for o in o_samples:
for action in actions:
next_pos = (o + action) % n
os.append(o)
o_next.append(next_pos)
os = np.array(os)
o_next_samples = np.array(o_next)
return os, o_next_samples
def log_sum_exp(arr):
max_arr = torch.max(arr, dim=1, keepdim=True)[0]
return max_arr + torch.log(torch.sum(torch.exp(arr - max_arr), dim=1))
def sample_double(n):
'''
:param n: grid size
:return: np array of anchors, np array of positives
step size is 1, agents are each 1 pixel
'''
xy = np.mgrid[0:n:1, 0:n:1, 0:n:1, 0:n:1].reshape(4, -1).T
actions = [[0, 1, 0, 0], [0, -1, 0, 0], [-1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0], [0, 0, -1, 0], [0, 0, 0, 1], [0, 0, 0, -1]]
o_samples = xy
o_next = []
os = []
for o in o_samples:
for action in actions:
next_pos = o.copy()
next_pos = (o + action) % n
os.append(o)
o_next.append(next_pos)
os = np.array(os)
o_next_samples = np.array(o_next)
return os, o_next_samples
def visualize_single(model, dataset, encoder, n, epoch=0):
data, _ = dataset # only look at anchors
data = data[::4]
batch_size = n # process by row
i = 0
map = []
while i < len(data):
batch = from_numpy_to_var(np.transpose(data[i:i + batch_size], (0, 3, 1, 2)).astype('float32'))
zs = model.encode(batch, vis=True).squeeze(1).detach().cpu().numpy()
map.append(zs)
i += batch_size
map = np.array(map)
return map
def from_numpy_to_var(npx, dtype='float32'):
var = Variable(torch.from_numpy(npx.astype(dtype)))
if torch.cuda.is_available():
return var.cuda()
else:
return var
def reset_grad(params):
for p in params:
if p.grad is not None:
data = p.grad.data
p.grad = Variable(data.new().resize_as_(data).zero_())
def setup_savepath(kwargs):
savepath = kwargs["savepath"]
included = ["n_agents",
"num_onehots",
"rgb",
"grid_n",
"step_size",
"allow_agent_overlap",
"encoder",
"batch_size",
"fast",
"n_negs",
"num_filters",
"num_layers",
"z_dim",
"W",
"separate_W",
"temp",
"scale_weights",
"h_distance",
"seed",
"lr"
]
if kwargs['encoder'].startswith("attention") or kwargs['encoder'].startswith("cswm"):
for key in ["activation", "normalization", "scope", "reg"]:
if key in kwargs.keys():
included.append(key)
if kwargs['encoder'].startswith("cswm-key"):
included.extend(["env", "n_agents", "n_keys", "n_traj", "len_traj", "key_alpha", 'ce_temp'])
final_savepath = savepath
for key in included:
if key in kwargs.keys():
final_savepath = os.path.join(final_savepath, key + "-%s" % str(kwargs[key]))
return final_savepath
def save_python_cmd(savepath, kwargs, script):
excluded = ['hamming_reg', 'hamming_alpha', 'vis_freq', 'label_online']
cmd = 'python ' + script + " "
for key, val in kwargs.items():
if key not in excluded:
cmd += " --%s %s" % (key, str(val))
with open(os.path.join(savepath, "cmd.txt"), "w") as text_file:
text_file.write(cmd)
def process_data_single(dataset, n):
'''
:param dataset: tuple of anchor, positive np arrays
:param n: grid size
:return: n x n x 1 onehot encodings of agent position
'''
o_pos, o_next_pos = dataset
o = []
o_next = []
for pos, pos_next in zip(o_pos, o_next_pos):
im_cur = np.zeros((n, n, 1))
im_next = np.zeros((n, n, 1))
x_cur, y_cur = pos
x_next, y_next = pos_next
im_cur[x_cur, y_cur ,0] = 10
im_next[x_next, y_next, 0] = 10
o.append(im_cur)
o_next.append(im_next)
return np.array(o), np.array(o_next)
def process_data_double(dataset, n):
'''
:param dataset: tuple of anchor, positive np arrays
:param n: grid size
:return: n x n x 2 onehot encodings of agent position
'''
o_pos, o_next_pos = dataset
o = []
o_next = []
i = 0
for pos, pos_next in zip(o_pos, o_next_pos):
im_cur = np.zeros((n, n, 2))
im_next = np.zeros((n, n, 2))
x_cur0, y_cur0, x_cur1, y_cur1,= pos
x_next0, y_next0, x_next1, y_next1 = pos_next
im_cur[x_cur0, y_cur0, 0], im_cur[x_cur1, y_cur1, 1] = 10, 10
im_next[x_next0, y_next0, 0], im_next[x_next1, y_next1, 1] = 10, 10
o.append(im_cur)
o_next.append(im_next)
i+=1
return np.array(o), np.array(o_next)
def process_single_pos(n_agents, pos, grid_n):
'''
:param n_agents: # agents
:param pos: position
:param n: grid size
:return: single [n x n x n_agents] onehot image corresponding to input position
'''
im = np.zeros((grid_n, grid_n, n_agents))
for i in range(n_agents):
agent_dim = AGENT_SIZES[i]
x_cur, y_cur = pos[2 * i], pos[2 * i + 1]
im[x_cur, y_cur, i] = 10
for x in range(agent_dim[0]):
for y in range(agent_dim[1]):
im[(x_cur + x) % grid_n, (y_cur + y) % grid_n, i] = 10
return im
def sample_single_agent_pos(agent_idx, n_agents, grid_n, circular=False):
max_positions = get_max_agent_positions(n_agents, grid_n)
if circular:
sample_pos = np.random.randint(grid_n, size=2)
else:
max_position_agent = max_positions[2*agent_idx:2*agent_idx+2]
sample_pos_x = | np.random.randint(max_position_agent[0]) | numpy.random.randint |
from __future__ import division
# Provides some universal statistical utilities and stats comparison tools
from past.utils import old_div
from builtins import object
from math import sqrt
import numpy as np
import pandas as pd
import scipy.interpolate
import scipy.stats
import warnings
from scipy.special import erfinv
from threeML.io.rich_display import display
def aic(log_like, n_parameters, n_data_points):
"""
The Aikake information criterion.
A model comparison tool based of infomormation theory. It assumes that N is large i.e.,
that the model is approaching the CLT.
"""
val = -2. * log_like + 2 * n_parameters
val += 2 * n_parameters * (n_parameters + 1) / float(n_data_points - n_parameters - 1)
if not np.isfinite(val):
val = 0
warnings.warn('AIC was NAN. Recording zero, but you should examine your fit.')
return val
def bic(log_like, n_parameters, n_data_points):
"""
The Bayesian information criterion.
"""
val = -2. * log_like + n_parameters * np.log(n_data_points)
if not np.isfinite(val):
val = 0
warnings.warn('BIC was NAN. Recording zero, but you should examine your fit.')
return val
def waic(bayesian_trace):
raise NotImplementedError("Coming soon to a theater near you.")
def dic(bayes_analysis):
"""
elpd_DIC = log p(y|mean(parameters)) - p_DIC
the first term is the deviance at the mean of the posterior
and p_DIC is the effective number of free parameters:
p_DIC = 2(log p(y|mean(parameters)) - 1/N sum(log p(y|parameters_s), 1,N) )
DIC = -2*elpd_DIC
the effective number of free parameters can be negative if the mean is
the mean is far from the mode
:param bayes_analysis: a bayesian analysis object
:return dic, effective number of free parameters:
"""
mean_of_free_parameters = np.mean(bayes_analysis.raw_samples, axis=0)
deviance_at_mean = bayes_analysis.get_posterior(mean_of_free_parameters)
mean_deviance = np.mean(bayes_analysis.log_probability_values)
pdic = 2 * (deviance_at_mean - mean_deviance)
elpd_dic = deviance_at_mean - pdic
if not np.isfinite(pdic) or not np.isfinite(elpd_dic):
elpd_dic = 0
pdic = 0
warnings.warn('DIC was NAN. Recording zero, but you should examine your fit.')
return -2 * elpd_dic, pdic
def sqrt_sum_of_squares(arg):
"""
:param arg: and array of number to be squared and summed
:return: the sqrt of the sum of the squares
"""
return np.sqrt( np.square(arg).sum() )
class PoissonResiduals(object):
"""
This class implements a way to compute residuals for a Poisson distribution mapping them to residuals of a standard
normal distribution. The probability of obtaining the observed counts given the expected one is computed, and then
transformed "in unit of sigma", i.e., the sigma value corresponding to that probability is computed.
The algorithm implemented here uses different branches so that it is fairly accurate between -36 and +36 sigma.
NOTE: if the expected number of counts is not very high, then the Poisson distribution is skewed and so the
probability of obtaining a downward fluctuation at a given sigma level is not the same as obtaining the same
fluctuation in the upward direction. Therefore, the distribution of residuals is *not* expected to be symmetric
in that case. The sigma level at which this effect is visible depends strongly on the expected number of counts.
Under normal circumstances residuals are expected to be a few sigma at most, in which case the effect becomes
important for expected number of counts <~ 15-20.
"""
# Putting these here make them part of the *class*, not the instance, i.e., they are created
# only once when the module is imported, and then are referred to by any instance of the class
# These are lookup tables for the significance from a Poisson distribution when the
# probability is very low so that the normal computation is not possible due to
# the finite numerical precision of the computer
_x = np.logspace(np.log10(5), np.log10(36), 1000)
_logy = np.log10(scipy.stats.norm.sf(_x))
# Make the interpolator here so we do it only once. Also use ext=3 so that the interpolation
# will return the maximum value instead of extrapolating
_interpolator = scipy.interpolate.InterpolatedUnivariateSpline(_logy[::-1], _x[::-1], k=1, ext=3)
def __init__(self, Non, Noff, alpha=1.0):
assert alpha > 0 and alpha <= 1, 'alpha was %f' %alpha
self.Non = np.array(Non, dtype=float, ndmin=1)
self.Noff = np.array(Noff, dtype=float, ndmin=1)
self.alpha = float(alpha)
self.expected = self.alpha * self.Noff
self.net = self.Non - self.expected
# This is the minimum difference between 1 and the next representable floating point number
self._epsilon = np.finfo(float).eps
def significance_one_side(self):
# For the points where Non > expected, we need to use the survival function
# sf(x) = 1 - cdf, which can go do very low numbers
# Instead, for points where Non < expected, we need to use the cdf which allows
# to go to very low numbers in that directions
idx = self.Non >= self.expected
out = np.zeros_like(self.Non)
if np.sum(idx) > 0:
out[idx] = self._using_sf(self.Non[idx], self.expected[idx])
if np.sum(~idx) > 0:
out[~idx] = self._using_cdf(self.Non[~idx], self.expected[~idx])
return out
def _using_sf(self, x, exp):
sf = scipy.stats.poisson.sf(x, exp)
# print(sf)
# return erfinv(2 * sf) * sqrt(2)
return scipy.stats.norm.isf(sf)
def _using_cdf(self, x, exp):
# Get the value of the cumulative probability function, instead of the survival function (1 - cdf),
# because for extreme values sf(x) = 1 - cdf(x) = 1 due to numerical precision problems
cdf = scipy.stats.poisson.cdf(x, exp)
# print(cdf)
out = np.zeros_like(x)
idx = (cdf >= 2 * self._epsilon)
# We can do a direct computation, because the numerical precision is sufficient
# for this computation, as -sf = cdf - 1 is a representable number
out[idx] = erfinv(2 * cdf[idx] - 1) * sqrt(2)
# We use a lookup table with interpolation because the numerical precision would not
# be sufficient to make the computation
out[~idx] = -1 * self._interpolator(np.log10(cdf[~idx]))
return out
class Significance(object):
"""
Implements equations in Li&Ma 1983
"""
def __init__(self, Non, Noff, alpha=1):
assert alpha > 0 and alpha <= 1, 'alpha was %f' %alpha
self.Non = | np.array(Non, dtype=float, ndmin=1) | numpy.array |
from Instances import *
from Initial_str_KEG import *
# control time
from time import time
from copy import deepcopy
import numpy as np
import gurobipy as grb
from Maximal_strategy import *
# under unix: to limit time
# import signal
#
# def signal_handler(signum, frame):
# raise Exception("Timed out!")
###########################################################
# SGM
def IterativeSG_NOT_DFS(G,max_iter,opt_solver=1, S=[]):
r"""Create instances in a standard format.
Parameters:
----------
G: Game class (see Instances file)
max_iter: maximum number of sampled games to be solved
opt_solver: 0 if cplex is used and 1 otherwise (use gurobi); in the paper it is always 1.
S: intial set of strategies (optinal)
Returns:
-------
ce: array of probabilities indicating the correlated equilibrium for the strategies in the last sampled game S
Profits: List of profits for each player under ce
S: final set of strategies
count: number of iterations, i.e., sampled games solved
cpu_time: computational time
"""
# STEP 0 - INITIALIZATION
# initialize set of strategies
if S == []:
S, U_p, Best_m = InitialStrategies(G,opt_solver)
#S, U_p, Best_m = InitialStrategiesII(G,opt_solver)
else:
U_p, S = IndUtilities(G.m(), G.c(), G.Q(), [[] for _ in range(G.m())], [[] for _ in range(G.m())], S)
Best_m = CreateModels(G.m(), G.n_I(), G.n_C(), G.n_constr(), G.c(), G.Q(), G.A(), G.b())
S_new = [[] for p in range(G.m())]
if [[]] in S:
print("ERROR: There is a player without feasible strategies")
return [],[],S,0,0,False
Numb_stra = [len(S[p]) for p in range(G.m())]
U_depend = [[{} for k in range(G.m())] for p in range(G.m())]
# set mandatory strategy in the support
Numb_stra_S = [0]*G.m()
# STEP 2 - COMPUTE Correlated EQUILIBRIA OF RESTRICTED GAME
count = 1
U_depend = Utilities_Poymatrix(G.m(),G.Q(),U_depend,S_new,S,Numb_stra_S)
list_best = list(range(G.m()))
time_aux = time()
ce = []
ce_previous = ce[:]
while True and count <= max_iter and time()-time_aux<=3600:
print("\n\n Processing node ... ", count)
print("Computing correlated equilibrium.... \n")
ce_previous = ce[:]
#signal.signal(signal.SIGALRM, signal_handler)
#signal.alarm(3600-int(time()-time_aux)) # seconds
try:
ce, Profits, Profits_pure_strategy = ComputeCE(U_depend,U_p,G.m(),G.n_I(),G.n_C(),Numb_stra,opt_solver)
### modify ###
#return ne,Profits,S,count,time()-time_aux
#except Exception, msg: python2.7
except Exception:
print("Time limit exceeded")
return ce_previous, [], S,count,time()-time_aux, False
print("Correlated equilibrium computed sucessfully")
aux = True # no player has incentive to deviate
S_new = [[] for p in range(G.m())] # set of new strategies to be considered
# artificial profile to avoid changing BestReactionGurobi
#Profile = [np.identity(G.n_I()[p]+G.n_C()[p]) for p in range(G.m())] # this will change nothing as we are multiplying by an identity matrix
aux_p = 0
it = np.nditer(np.ones(tuple(Numb_stra)),flags=['multi_index'])
aux_it = [it.multi_index for _ in it]
# we just find one violated inequality at time: this might not be the most efficient
while aux and aux_p<G.m(): # FEED BEST RESPONSES WITH CE solution
p = list_best[aux_p]
# for each strategy of player p verify if the correlated equilibria constraint is violated
for bar_xp in range(Numb_stra[p]):
# determine temporary c[p] which depends on ce and bar_xp
coef_p = sum(ce[s] for s in aux_it if s[p]==bar_xp)
c_tmp = G.c()[p]*coef_p
# determine temporary Q[p][p] which depends on ce and bar_xp
#Q_tmp = deepcopy(G.Q()[p])
Q_tmp = [[] for _ in range(G.m())]
Q_tmp[p] = G.Q()[p][p]*coef_p
#Q_tmp[p] = Q_tmp[p]*coef_p
# determine temporary Q[p][k] which depends on ce and bar_xp
for k in range(G.m()):
if k!=p:
Q_tmp[k] = sum(np.dot(S[k][s[k]],G.Q()[p][k])*ce[s] for s in aux_it if s[p]==bar_xp)
#try:
s_p, u_max, _ = BestReactionGurobi(G.m(),G.n_I()[p],G.n_C()[p],G.n_constr()[p],c_tmp,Q_tmp,G.A()[p],G.b()[p],[],p,False,Best_m[p],True)
s_p = Convert_to_maximal(s_p,G,p)
#except Exception:
# print("Timed out while checking best response")
# return ce, [], S, count, time()-time_aux, False
if Profits_pure_strategy[p][bar_xp]+10**-6<= u_max: # ce constraint is violated
aux = False
S_new[p].append(s_p)
Numb_stra_S = deepcopy(Numb_stra)
Numb_stra[p] = Numb_stra[p]+1
U_depend = Utilities_Poymatrix(G.m(),G.Q(),U_depend,S,S_new,Numb_stra_S)
U_p, S = IndUtilities(G.m(), G.c(), G.Q(), S, U_p, S_new)
S_new = [[] for _ in range(G.m())]
list_best.append(p)
list_best = list_best[:aux_p]+list_best[aux_p+1:]
break
aux_p = aux_p+1
if aux:
final_time = time()-time_aux
# verify it is a NE
CE_is_NE = Verify_CE_NE(G.m(),ce,Profits,U_depend,U_p,Numb_stra)
return ce, Profits, S,count,final_time, CE_is_NE
count = count +1
if time()-time_aux>3600:
print("Time Limit Exceeded")
else:
print(" Maximum number of iterations was attained")
return ce_previous, [], S,count,time()-time_aux,False
## verify if CE is NE (or can be transformed in one)
# we can do this by solving the feasibiliy problem associated with ce
# This might not be enough as there can be multiple NE for the same support
def Verify_CE_NE(m,ce,Profits_ce,U_depend,U_p,Numb_stra):
it = np.nditer(np.ones(tuple(Numb_stra)),flags=['multi_index'])
aux_it = [it.multi_index for _ in it]
A_supp = [set([]) for _ in range(m)]
for s in aux_it:
if ce[s]>10**-4:
for p in range(m):
A_supp[p] = A_supp[p].union(set([s[p]]))
A_supp = [tuple(a) for a in A_supp]
ne, Profits = FeasibilityProblem_Gurobi(m,A_supp, U_depend,U_p,Numb_stra,None,Profits_ce)
if ne == []:
return False
else:
return True
# dist = 0
# for s in aux_it:
# ne_to_ce = 1
# for p in range(m):
# ne_to_ce = ne_to_ce *ne[p][s[p]]
# dist = dist + abs(ne_to_ce-ce[s])
# if dist<=10**-4:
# return True
# else:
# return False
# def Verify_CE_NE(m,ce,Profits_ce,U_depend,U_p,Numb_stra):
# it = np.nditer(np.ones(tuple(Numb_stra)),flags=['multi_index'])
# aux_it = [it.multi_index for _ in it]
# # each player must be indifferent among her strategies
# for p in range(m):
# List_profits_p = []
# Max_profit_p = Profits_ce[p]
# for bar_s1 in range(Numb_stra[p]):
# Profit_bar_s1 = sum(ce[s]*(U_p[bar_s1]+sum(U_depend[p][k][bar_s1,s[k]] for k in range(m) if k!=p)) for s in aux_it if s[p] == bar_s1)
# List_profits_p.append(Profit_bar_s1)
# if Profit_bar_s1 >=10**-3 and Profit_bar_s1>= Profits_ce[p]-10**-4 and Profit_bar_s1<= Profits_ce[p]+10**-4
#
# Max_profit_p=max(List_profits_p)
#######################################################################################################################
#######################################################
## COMPUTE INDIVIDUAL PROFITS ##
#######################################################
# INPUT
# m = number of players
# c = linear objective function coefficients for each player (list of vectors)
# S = list of strategies for each player
# U_p = list of individual profits for each player
# S_new = new strategies to be added to S and to compute individual profit
# OUTPUT
# U_p = list of players individual profits
# S = new set of strategies
def IndUtilities(m, c, Q, S, U_p, S_new):
for p in range(m):
for s in S_new[p]:
U_p[p].append(float(np.dot(c[p],s)-0.5*np.dot(s,np.dot(Q[p][p],s))))
S[p].append(s)
return U_p,S
#######################################################################################################################
#######################################################
## POLYMATRIX PART OF THE PROFITS ##
#######################################################
# INPUT
# m = number of players
# Q = bilinear coefficients in the objective function for each player (list of matrices)
# p = player for which we are fixing the strategy
# U_p = list of individual profits for each player
# U_depend = list of the players' profit
# S = strategies of each player (list)
# s = profile of strategies being fixed
# numb = last strategy fixed
# Numb_stra_S = number of strategies in S[p]
# OUTPUT
# U_depend = matrice of utilities (in fact it is a dictionary)
def Utilities_Poymatrix(m,Q,U_depend,S,S_new,Numb_stra_S):
for p in range(m):
for k in range(p+1,m):
for sp in enumerate(S_new[p]):
for sk in enumerate(S[k]+S_new[k]):
U_depend[p][k][(Numb_stra_S[p]+sp[0],sk[0])] = float(np.dot(sk[1],np.dot(Q[p][k],sp[1])))
U_depend[k][p][(sk[0],Numb_stra_S[p]+sp[0])] = float(np.dot(sp[1],np.dot(Q[k][p],sk[1])))
for k in range(p):
for sp in enumerate(S_new[p]):
for sk in enumerate(S[k]):
U_depend[p][k][(Numb_stra_S[p]+sp[0],sk[0])] = float(np.dot(sk[1],np.dot(Q[p][k],sp[1])))
U_depend[k][p][(sk[0],Numb_stra_S[p]+sp[0])] = float(np.dot(sp[1],np.dot(Q[k][p],sk[1])))
return U_depend
#######################################################################################################################
#######################################################
## COMPUTE Nash Equilibrium ##
#######################################################
# INPUT
# S = set of strategies for each player (list)
# M = (p, numb, sigma)
# Back = computation continues from previous computed equilibrium sigma (if Back = True)
# U_depend = polymatrix
# U_p = individual profits
# m = number of players
# n_I = number of binary variables for each player (list)
# n_C = number of continuous variables for each player (list)
# Numb_stra = size of S; number of strategies available for each player (list)
# opt_solver = 0 then use CLEP, = 1 then use Gurobi
# Supp_Stra = M_pos[2] strategies to consider in the support(new strategies should not be considered = S_new of M_pos)
# OUTPUT
# ne = a Nash equilibrium with strategy S[p][numb] of player p in the support
from itertools import combinations_with_replacement, combinations, product,chain
def ComputeCE(U_depend,U_p,m,n_I,n_C,Numb_stra,opt_solver,m_ce=None):
m_ce = grb.Model("Correlated Equilibrium")
m_ce.setParam("Threads",2)
m_ce.setParam("OutputFlag",False)
m_ce.ModelSense = -1 # maximize
m_ce.update()
# we use the mapping for sigma(player 1 strategy, player 2 strategy, ...
it = np.nditer(np.ones(tuple(Numb_stra)),flags=['multi_index'])
aux_it = [it.multi_index for _ in it]
sigma = {s: m_ce.addVar(lb=0,vtype="C",obj=sum(U_p[p][s[p]]+sum(U_depend[p][k][s[p],s[k]] for k in range(m) if k!=p) for p in range(m))) for s in aux_it}
m_ce.update()
# create constraints
# sigma is a probability distribution
m_ce.addConstr(sum(sigma.values())==1)
m_ce.update()
# correlated equilibria constraints
for p in range(m):
for s1 in range(Numb_stra[p]):
for s2 in range(Numb_stra[p]):
if s1!=s2:
m_ce.addConstr(sum(sigma[s]*(U_p[p][s1] - U_p[p][s2]+sum(U_depend[p][k][s1,s[k]]-U_depend[p][k][s2,s[k]] for k in range(m) if k!=p)) for s in aux_it if s1==s[p]) >=0)
m_ce.update()
m_ce.optimize()
ce = np.zeros(tuple(Numb_stra))
Profits =[0 for p in range(m)]
Profits_pure_strategy = [[0 for _ in range(Numb_stra[p])] for p in range(m)]
if m_ce.status not in [3,4]:
for s in aux_it:
ce[s] = sigma[s].x
for p in range(m):
Profits[p] = Profits[p]+ce[s]*(U_p[p][s[p]]+sum(U_depend[p][k][s[p],s[k]] for k in range(m) if k!=p))
Profits_pure_strategy[p][s[p]] = Profits_pure_strategy[p][s[p]] + ce[s]*(U_p[p][s[p]]+sum(U_depend[p][k][s[p],s[k]] for k in range(m) if k!=p))
return ce, Profits,Profits_pure_strategy
def FeasibilityProblem_Gurobi(m,A_supp, U_depend,U_p,Numb_stra,m_p = None,Profits_ce=[]):
#print "\n\n Solving Problem with Supports: ", A_supp
if m_p == None:
# initiate model
m_p = grb.Model("FeasibilityProblem")
m_p.setParam("Threads", 2)
# no pritting of the output
m_p.setParam( 'OutputFlag', False )
# set objective function direction
m_p.ModelSense = -1 # maximize
m_p.update()
# probability variables
sigma = [{sp:m_p.addVar(lb=0,vtype="C",name="sigma_"+str(p)+"_"+str(sp)) for sp in A_supp[p]} for p in range(m)]
m_p.update()
########################################################################################################
############# WHEN FEASIBILITY PROBLEM HAS MORE THAN ONE SOLUTION ######################################
###### MAXIMIZE THE NUMBER OF VARIABLES WITH POSITIVE PROBABILITY ######################################
# aux = [m_p.addVar(obj = 1, lb=0,vtype="C",name="aux_"+str(p)) for p in range(m)] # aux <= sigma_p_sp
# m_p.update()
# for p, sp in enumerate(A_supp):
# for s in sp:
# m_p.addConstr(aux[p] <= sigma[p][s])
# m_p.update()
########################################################################################################
########################################################################################################
# profit variables
v = [m_p.addVar(lb=-1*grb.GRB.INFINITY,vtype="C",name="v_"+str(p)) for p in range(m)]
m_p.update()
for p in range(m):
m_p.addConstr(grb.quicksum(sigma[p].values())==1)
m_p.update()
for p, S_p in enumerate(Numb_stra):
for sp in range(S_p):
if sp in A_supp[p]:
m_p.addConstr(U_p[p][sp]+grb.quicksum(sigma[k][sk]*U_depend[p][k][(sp,sk)] for k in range(m) if k != p for sk in A_supp[k]) == v[p])
m_p.update()
# vp must be close to Profit of ce
m_p.addConstr(v[p]>=Profits_ce[p]-10**-4)
m_p.update()
m_p.addConstr(v[p]<=Profits_ce[p]+10**-4)
m_p.update()
else:
m_p.addConstr(U_p[p][sp]+grb.quicksum(sigma[k][sk]*U_depend[p][k][(sp,sk)] for k in range(m) if k != p for sk in A_supp[k]) <= v[p])
m_p.update()
#m_p.write("apagar.lp")
m_p.optimize()
ne = []
Profits = []
#print "Solution status for Feasibility Problem: ", m_p.status
if m_p.status not in [3,4]:
ne = [[0 for _ in range(Numb_stra[p])] for p in range(m)]
for p, sp in enumerate(Numb_stra):
for j in range(sp):
if j in A_supp[p]:
ne[p][j]= sigma[p][j].x
Profits.append(v[p].x)
return ne, Profits
if __name__ == "__main__":
# # create normal form game
# #2 players
m = 2
n_I = [2,2]
n_C = [0,0]
n_constr = [2,2]
c = [np.array([0,0]),np.array([0,0])]
Q = [[np.zeros((2,2)), np.array([[5,6],[2,1]])],[np.array([[5,6],[2,1]]),np.zeros((2,2))]]
A = [np.array([[1,1],[-1,-1]]),np.array([[1,1],[-1,-1]])]
b = [np.array([1,-1]), | np.array([1,-1]) | numpy.array |
from __future__ import division
import h5py
import numpy as np
import tempfile
import os
import struct
import re
from .Dataset import Dataset
from .SpeechDataset import SpeechDatasetIterator, LabeledSpeechDatasetIterator, SpeechDatasetFileIterator
def read_filelist(fname):
with open(fname) as fl:
return [l.rstrip('\n') for l in fl]
def all_files_in_directory(directory, file_extension=""):
files = []
def aux(directory):
basedir = os.path.abspath(directory)
subdirlist = []
for item in os.listdir(directory):
if os.path.isfile(os.path.join(basedir, item)):
if item.endswith(file_extension):
files.append(os.path.join(basedir, item))
else:
subdirlist.append(os.path.join(basedir, item))
for subdir in subdirlist:
aux(subdir)
aux(os.path.abspath(directory))
files = [os.path.splitext(os.path.relpath(f, directory))[0] for f in files]
files.sort() # Modifies itself, doesn't return another list or itself :(
return files
def read_HTK_feature_file(fname):
""" Read an HTK file and return the data as a matrix, and a dictionary of file attributes. """
# Read the HTK file to memory
with open(fname, "rb") as htkf:
nSamples, sampPeriod, sampSize, parmKind = struct.unpack(">IIhh", htkf.read(12))
# Is the file compressed?
compressed = parmKind & 0x400 == 0x400
if compressed:
type_to_read = ">i2"
components = sampSize // 2
if parmKind & 0x3f == 5:
A = 32767.0
B = 0.0
else:
A = | np.fromfile(htkf, dtype=">f", count=components) | numpy.fromfile |
import matplotlib.pyplot as plt
plt.rcParams['keymap.pan'].remove('p')
import numpy as np
from keras.models import load_model
import vpnn
class DigitDrawer:
def __init__(self, ax_p):
self.ax = ax_p
self.digit = | np.zeros((28, 28)) | numpy.zeros |
#!/usr/bin/env python
import saloon
import saloon.utils as utils
import numpy as np
import nose
### UTILS ###
def approx_equals(v1, v2):
''' Check equality of floats within +/- 5% '''
rel = v1 / v2
if rel > 1.05 or rel < 0.95:
return False
return True
### TESTS ###
def test_normalize_weights_no_baseline():
''' Test that normalized weights sum to 1 '''
w = utils.normalize_weights(np.random.rand(5), 0)
assert approx_equals(np.sum(w), 1.0)
def test_normalize_weights_with_baseline():
''' Test that normalized weights sum to 1, all weights are nonzero '''
w = utils.normalize_weights(np.random.rand(5), 0.3)
assert approx_equals(np.sum(w), 1.0)
assert min(w) > 0.0
def test_get_weights():
''' Test that weight vector is of the correct dimensions '''
w = utils.get_weights(np.random.rand(5, 1000))
assert w.shape == (5,)
def test_get_weights_relative():
''' Test that arms with better payoffs get more weight '''
s = | np.ones((5, 10)) | numpy.ones |
# Atom Tracing Code for International Workshop and Short Course on the FRONTIERS OF ELECTRON TOMOGRAPHY
# https://www.electron-tomo.com/
import numpy as np
import scipy as sp
import scipy.io as sio
import os
import warnings
def tripleRoll(vol, vec):
return np.roll(np.roll(np.roll(vol, vec[0], axis=0), vec[1], axis=1), vec[2], axis=2)
def peakFind3D(vol, thresh3D):
"""
Find peaks in a 3D volume
vol: an ndarray of values with peaks to find
thresh3D: [0,1] value to set a threshold for size of peak vs. max intensity in image
"""
pLarge = ((vol > tripleRoll(vol, [-1, -1, -1]))
& (vol > tripleRoll(vol, [0, -1, -1]))
& (vol > tripleRoll(vol, [1, -1, -1]))
& (vol > tripleRoll(vol, [-1, 0, -1]))
& (vol > tripleRoll(vol, [1, 0, -1]))
& (vol > tripleRoll(vol, [-1, 1, -1]))
& (vol > tripleRoll(vol, [0, 1, -1]))
& (vol > tripleRoll(vol, [1, 1, -1]))
& (vol > tripleRoll(vol, [0, 0, -1]))
& (vol > tripleRoll(vol, [-1, -1, 0]))
& (vol > tripleRoll(vol, [0, -1, 0]))
& (vol > tripleRoll(vol, [1, -1, 0]))
& (vol > tripleRoll(vol, [-1, 0, 0]))
& (vol > tripleRoll(vol, [1, 0, 0]))
& (vol > tripleRoll(vol, [-1, 1, 0]))
& (vol > tripleRoll(vol, [0, 1, 0]))
& (vol > tripleRoll(vol, [1, 1, 0]))
& (vol > tripleRoll(vol, [-1, -1, 1]))
& (vol > tripleRoll(vol, [0, -1, 1]))
& (vol > tripleRoll(vol, [1, -1, 1]))
& (vol > tripleRoll(vol, [-1, 0, 1]))
& (vol > tripleRoll(vol, [1, 0, 1]))
& (vol > tripleRoll(vol, [-1, 1, 1]))
& (vol > tripleRoll(vol, [0, 1, 1]))
& (vol > tripleRoll(vol, [1, 1, 1]))
& (vol > tripleRoll(vol, [0, 0, 1]))
& (vol > thresh3D * np.max(vol)))
[xp, yp, zp] = np.where(pLarge * vol)
ip = vol[xp, yp, zp]
return {'xp': xp, 'yp': yp, 'zp': zp, 'ip': ip}
def MatrixQuaternionRot(vector, theta):
"""
MatrixQuaternionRot(vector,theta)
Returns a 3x3 rotation matrix [SO(3)] in numpy array (not numpy matrix!)
for rotating "theta" angle around the given "vector" axis.
vector - A non-zero 3-element numpy array representing rotation axis
theta - A real number for rotation angle in "DEGREES"
Author: <NAME>, Dept. of Physics and Astronomy, UCLA
<EMAIL>
"""
theta = theta * np.pi / 180
vector = vector / np.float(np.sqrt( | np.dot(vector, vector) | numpy.dot |
import numpy as nm
blosum62 = {
'-':{'-':1,'A':-4,'C':-4,'B':-4,'E':-4,'D':-4,'G':-4,'F':-4,'I':-4,'H':-4,'K':-4,'M':-4,'L':-4,'N':-4,'Q':-4,'P':-4,'S':-4,'R':-4,'T':-4,'W':-4,'V':-4,'Y':-4,'X':-4,'Z':-4},
'A':{'-':-4,'A':4,'C':0,'B':-2,'E':-1,'D':-2,'G':0,'F':-2,'I':-1,'H':-2,'K':-1,'M':-1,'L':-1,'N':-2,'Q':-1,'P':-1,'S':1,'R':-1,'T':0,'W':-3,'V':0,'Y':-2,'X':-1,'Z':-1},
'C':{'-':-4,'A':0,'C':9,'B':-3,'E':-4,'D':-3,'G':-3,'F':-2,'I':-1,'H':-3,'K':-3,'M':-1,'L':-1,'N':-3,'Q':-3,'P':-3,'S':-1,'R':-3,'T':-1,'W':-2,'V':-1,'Y':-2,'X':-1,'Z':-3},
'B':{'-':-4,'A':-2,'C':-3,'B':4,'E':1,'D':4,'G':-1,'F':-3,'I':-3,'H':0,'K':0,'M':-3,'L':-4,'N':3,'Q':0,'P':-2,'S':0,'R':-1,'T':-1,'W':-4,'V':-3,'Y':-3,'X':-1,'Z':1},
'E':{'-':-4,'A':-1,'C':-4,'B':1,'E':5,'D':2,'G':-2,'F':-3,'I':-3,'H':0,'K':1,'M':-2,'L':-3,'N':0,'Q':2,'P':-1,'S':0,'R':0,'T':-1,'W':-3,'V':-2,'Y':-2,'X':-1,'Z':4},
'D':{'-':-4,'A':-2,'C':-3,'B':4,'E':2,'D':6,'G':-1,'F':-3,'I':-3,'H':-1,'K':-1,'M':-3,'L':-4,'N':1,'Q':0,'P':-1,'S':0,'R':-2,'T':-1,'W':-4,'V':-3,'Y':-3,'X':-1,'Z':1},
'G':{'-':-4,'A':0,'C':-3,'B':-1,'E':-2,'D':-1,'G':6,'F':-3,'I':-4,'H':-2,'K':-2,'M':-3,'L':-4,'N':0,'Q':-2,'P':-2,'S':0,'R':-2,'T':-2,'W':-2,'V':-3,'Y':-3,'X':-1,'Z':-2},
'F':{'-':-4,'A':-2,'C':-2,'B':-3,'E':-3,'D':-3,'G':-3,'F':6,'I':0,'H':-1,'K':-3,'M':0,'L':0,'N':-3,'Q':-3,'P':-4,'S':-2,'R':-3,'T':-2,'W':1,'V':-1,'Y':3,'X':-1,'Z':-3},
'I':{'-':-4,'A':-1,'C':-1,'B':-3,'E':-3,'D':-3,'G':-4,'F':0,'I':4,'H':-3,'K':-3,'M':1,'L':2,'N':-3,'Q':-3,'P':-3,'S':-2,'R':-3,'T':-1,'W':-3,'V':3,'Y':-1,'X':-1,'Z':-3},
'H':{'-':-4,'A':-2,'C':-3,'B':0,'E':0,'D':-1,'G':-2,'F':-1,'I':-3,'H':8,'K':-1,'M':-2,'L':-3,'N':1,'Q':0,'P':-2,'S':-1,'R':0,'T':-2,'W':-2,'V':-3,'Y':2,'X':-1,'Z':0},
'K':{'-':-4,'A':-1,'C':-3,'B':0,'E':1,'D':-1,'G':-2,'F':-3,'I':-3,'H':-1,'K':5,'M':-1,'L':-2,'N':0,'Q':1,'P':-1,'S':0,'R':2,'T':-1,'W':-3,'V':-2,'Y':-2,'X':-1,'Z':1},
'M':{'-':-4,'A':-1,'C':-1,'B':-3,'E':-2,'D':-3,'G':-3,'F':0,'I':1,'H':-2,'K':-1,'M':5,'L':2,'N':-2,'Q':0,'P':-2,'S':-1,'R':-1,'T':-1,'W':-1,'V':1,'Y':-1,'X':-1,'Z':-1},
'L':{'-':-4,'A':-1,'C':-1,'B':-4,'E':-3,'D':-4,'G':-4,'F':0,'I':2,'H':-3,'K':-2,'M':2,'L':4,'N':-3,'Q':-2,'P':-3,'S':-2,'R':-2,'T':-1,'W':-2,'V':1,'Y':-1,'X':-1,'Z':-3},
'N':{'-':-4,'A':-2,'C':-3,'B':3,'E':0,'D':1,'G':0,'F':-3,'I':-3,'H':1,'K':0,'M':-2,'L':-3,'N':6,'Q':0,'P':-2,'S':1,'R':0,'T':0,'W':-4,'V':-3,'Y':-2,'X':-1,'Z':0},
'Q':{'-':-4,'A':-1,'C':-3,'B':0,'E':2,'D':0,'G':-2,'F':-3,'I':-3,'H':0,'K':1,'M':0,'L':-2,'N':0,'Q':5,'P':-1,'S':0,'R':1,'T':-1,'W':-2,'V':-2,'Y':-1,'X':-1,'Z':3},
'P':{'-':-4,'A':-1,'C':-3,'B':-2,'E':-1,'D':-1,'G':-2,'F':-4,'I':-3,'H':-2,'K':-1,'M':-2,'L':-3,'N':-2,'Q':-1,'P':7,'S':-1,'R':-2,'T':-1,'W':-4,'V':-2,'Y':-3,'X':-1,'Z':-1},
'S':{'-':-4,'A':1,'C':-1,'B':0,'E':0,'D':0,'G':0,'F':-2,'I':-2,'H':-1,'K':0,'M':-1,'L':-2,'N':1,'Q':0,'P':-1,'S':4,'R':-1,'T':1,'W':-3,'V':-2,'Y':-2,'X':-1,'Z':0},
'R':{'-':-4,'A':-1,'C':-3,'B':-1,'E':0,'D':-2,'G':-2,'F':-3,'I':-3,'H':0,'K':2,'M':-1,'L':-2,'N':0,'Q':1,'P':-2,'S':-1,'R':5,'T':-1,'W':-3,'V':-3,'Y':-2,'X':-1,'Z':0},
'T':{'-':-4,'A':0,'C':-1,'B':-1,'E':-1,'D':-1,'G':-2,'F':-2,'I':-1,'H':-2,'K':-1,'M':-1,'L':-1,'N':0,'Q':-1,'P':-1,'S':1,'R':-1,'T':5,'W':-2,'V':0,'Y':-2,'X':-1,'Z':-1},
'W':{'-':-4,'A':-3,'C':-2,'B':-4,'E':-3,'D':-4,'G':-2,'F':1,'I':-3,'H':-2,'K':-3,'M':-1,'L':-2,'N':-4,'Q':-2,'P':-4,'S':-3,'R':-3,'T':-2,'W':11,'V':-3,'Y':2,'X':-1,'Z':-3},
'V':{'-':-4,'A':0,'C':-1,'B':-3,'E':-2,'D':-3,'G':-3,'F':-1,'I':3,'H':-3,'K':-2,'M':1,'L':1,'N':-3,'Q':-2,'P':-2,'S':-2,'R':-3,'T':0,'W':-3,'V':4,'Y':-1,'X':-1,'Z':-2},
'Y':{'-':-4,'A':-2,'C':-2,'B':-3,'E':-2,'D':-3,'G':-3,'F':3,'I':-1,'H':2,'K':-2,'M':-1,'L':-1,'N':-2,'Q':-1,'P':-3,'S':-2,'R':-2,'T':-2,'W':2,'V':-1,'Y':7,'X':-1,'Z':-2},
'X':{'-':-4,'A':-1,'C':-1,'B':-1,'E':-1,'D':-1,'G':-1,'F':-1,'I':-1,'H':-1,'K':-1,'M':-1,'L':-1,'N':-1,'Q':-1,'P':-1,'S':-1,'R':-1,'T':-1,'W':-1,'V':-1,'Y':-1,'X':-1,'Z':-1},
'Z':{'-':-4,'A':-1,'C':-3,'B':1,'E':4,'D':1,'G':-2,'F':-3,'I':-3,'H':0,'K':1,'M':-1,'L':-3,'N':0,'Q':3,'P':-1,'S':0,'R':0,'T':-1,'W':-3,'V':-2,'Y':-2,'X':-1,'Z':4}}
def Unite_length_of_sequences(first_seq, secod_seq, third_seq, fourth_seq):
if len(first_seq) < len(secod_seq):
for i in range(len(first_seq) + 1, len(secod_seq) + 1):
first_seq = first_seq + "-"
if len(secod_seq) < len(first_seq):
for i in range(len(secod_seq) + 1, len(first_seq) + 1):
secod_seq = secod_seq + "-"
if len(third_seq) < len(fourth_seq):
for i in range(len(third_seq) + 1, len(fourth_seq) + 1):
third_seq = third_seq + "-"
if len(fourth_seq) < len(third_seq):
for i in range(len(fourth_seq) + 1, len(third_seq) + 1):
fourth_seq = fourth_seq + "-"
return first_seq, secod_seq, third_seq, fourth_seq
match=1
missmatch=-1
gap=-1
def getmsascore(firstseq,secondseq,thirdseq,fourthseq):
listofscores=[]
score=0
sequence1 =firstseq.upper()
sequence2 =secondseq.upper()
sequence3 =thirdseq.upper()
sequence4 =fourthseq.upper()
for i in range(0,len(sequence1)):
for j in range(0,len(sequence3)):
if sequence1[i]==sequence2[i]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score+=match
if sequence1[i]==sequence3[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
elif sequence1[i]=="-" or sequence3[j]=="-":
score+=gap
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
else:
score+=missmatch
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
elif sequence1[i]=="-" or sequence2[i]=="-":
score+=gap
if sequence1[i]==sequence3[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
elif sequence1[i]=="-" or sequence3[j]=="-":
score+=gap
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
else:
score+=missmatch
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
else:
score+=missmatch
if sequence1[i]==sequence3[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
elif sequence1[i]=="-" or sequence3[j]=="-":
score+=gap
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
else:
score+=missmatch
if sequence1[i]==sequence4[j]:
if sequence1[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence1[i]=="-" or sequence4[j]=="-":
score+=gap
else :
score+=missmatch
#second compare
if sequence2[i]==sequence3[j]:
if sequence2[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
if sequence2[i]==sequence4[j]:
if sequence2[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence2[i]=="-" or sequence4[j]:
score+=gap
else:
score+=missmatch
elif sequence2[i]=="-" or sequence3[j]=="-":
score+=gap
if sequence2[i]==sequence4[j]:
if sequence2[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence2[i]=="-" or sequence4[j]:
score+=gap
else:
score+=missmatch
else:
score+=missmatch
if sequence2[i]==sequence4[j]:
if sequence2[i] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence2[i]=="-" or sequence4[j]:
score+=gap
else:
score+=missmatch
# third compare
if sequence3[j]==sequence4[j]:
if sequence3[j] == "-":
# handle gap gap score=0
score += 0
else:
score += match
elif sequence3[j]=="-" or sequence4[j]=="-" :
score+=gap
else:
score+=missmatch
listofscores.append(score)
score=0
return listofscores
#firstseq is the orignal seq to call get score function
#sequence1 is " "+orignal sequence to implemen the matrix
def MSA_DNA(fisrtseq,secondseq,thirdseq,fourthseq,GAP):
fisrtseq,secondseq,thirdseq,fourthseq=Unite_length_of_sequences(fisrtseq,secondseq,thirdseq,fourthseq)
sequence1 = " " + fisrtseq.upper()
sequence2 = " " + secondseq.upper()
sequence3 = " " + thirdseq.upper()
sequence4 = " " + fourthseq.upper()
FirstAlign = []
SecondAlign = []
ThirdAlign = []
FourthAlign = []
NextStep = ""
# k and n is the length of row and colum
#to tracebak the alignment
# k for sequence 1 and 2
#n for sequence 3 and 4
k =len(sequence1)-1
n =len(sequence3)-1
Gappenality=int(GAP)
# initialize first row and colum
Matrix = nm.zeros((len(sequence1), len(sequence3)))
for i in range(len(sequence1)):
Matrix[i][0] = i * Gappenality
for j in range(len(sequence3)):
Matrix[0][j] = j * Gappenality
# this list carry all scores of matrix before calulat left and up score diagonul score
# without add this score to last i-1 and j-1 score in matrix
list_of_score=getmsascore(fisrtseq,secondseq,thirdseq,fourthseq)
index_of_list=0
for i in range(1, len(sequence1)):
for j in range(1, len(sequence3)):
D = list_of_score[index_of_list] + Matrix[i - 1][j - 1]
L = Gappenality + Matrix[i][j - 1]
U = Gappenality + Matrix[i - 1][j]
MaxScoreIndex = nm.argmax([D, L, U]) # Return First Max index
if (MaxScoreIndex == 0):
NextStep += "D"
elif (MaxScoreIndex == 1):
NextStep += "L"
else:
NextStep += "U"
index_of_list += 1
Matrix[i][j] = nm.max([D, L, U])
# Matrix Traceback
Backward_Directions = nm.reshape(list(NextStep), (len(sequence1)-1 , len(sequence3)-1 ))
Backward_Directions = | nm.vstack([["*"] * Backward_Directions.shape[1], Backward_Directions]) | numpy.vstack |
#!/usr/bin/env python
# coding: utf-8
import glob
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
import yaml
from IPython.display import display
from tqdm.notebook import tqdm
from transformers import BertConfig
import BERT_per_label
import BERT_per_lvl
# Data analysis
def data_analysis(data_set):
"""
Dataset analysis of occurrences and histograms of the training and test sets for
:param data_set: dataset to analyze
:return: display of the head of the dataset followed by a textual description fo the different categories levels. Then the same but in plot. Once for training and once for test
"""
print("Dataset :", data_set)
data = pd.read_csv(data_set + "/train.csv") # Load dataset
data = data.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"}) # For DBpedia rename columns
data = data[['Text', "Cat1", "Cat2", "Cat3"]]
display(data.head())
# Function inside function is not ideal but there were problems from global variables when converting from jupyter lab
def plot_histo(column):
"""
Plots a histogram of the frequency of the length for the parameter column, for the training dataset defined in the upper function
:param column: the category to analyse
:return: plot figure
"""
text_len = data[column].str.len()
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.title("Token lenght for {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column, text_len.min(), text_len.max(), text_len.mean()))
def get_info(column):
"""
Label appearance analysis per categories
:param column: the category to analyse
:return: information about how often each label appears
"""
name, count = np.unique(data[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
print("Amount of appearances for {}: \n * unique values {} \n * Minimal: {} appears {} times \n * Maximal: {} appears {} times \n * in average {:.2f} times. \n ".format(
column, len(count), name[count.argmin()], count.min(), name[count.argmax()], count.max(), count.mean()))
print("Training data \nContains {} examples".format(data.shape[0]))
get_info("Cat1")
get_info("Cat2")
get_info("Cat3")
plt.figure(figsize=(20, 5))
plt.subplot(1, 4, 1)
plot_histo("Text")
plt.subplot(1, 4, 2)
plot_histo("Cat1")
plt.subplot(1, 4, 3)
plot_histo("Cat2")
plt.subplot(1, 4, 4)
plot_histo("Cat3")
plt.savefig("./visualizations/" + data_set + "/Data_analysis.svg", dpi=200, format="svg", facecolor="white")
plt.show()
# Same as above but on the test dataset
test = pd.read_csv(data_set + "/test.csv")
test = test.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
def plot_histo(column):
"""
Plots a histogram of the frequency of the length for the parameter column, for the test dataset defined in the upper function
:param column: the category to analyse
:return: plot figure
"""
text_len = test[column].str.len()
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.title("Token lenght for {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column, text_len.min(),
text_len.max(),
text_len.mean()))
def get_info(column):
"""
Label appearance analysis per categories
:param column: the category to analyse
:return: information about how often each label appears
"""
name, count = np.unique(test[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
print(
"Amount of appearances for {}: \n * unique values {} \n * Minimal: {} appears {} times \n * Maximal: {} appears {} times \n * in average {:.2f} times. \n ".format(
column, len(count), name[count.argmin()], count.min(), name[count.argmax()], count.max(), count.mean()))
print("Test data \nContains {} examples".format(test.shape[0]))
get_info("Cat1")
get_info("Cat2")
get_info("Cat3")
plt.figure(figsize=(20, 5))
plt.subplot(1, 4, 1)
plot_histo("Text")
plt.subplot(1, 4, 2)
plot_histo("Cat1")
plt.subplot(1, 4, 3)
plot_histo("Cat2")
plt.subplot(1, 4, 4)
plot_histo("Cat3")
plt.savefig("./visualizations/" + data_set + "/Data_analysis_test.svg", dpi=200, format="svg", facecolor="white")
plt.show()
def plot_sub_cat(dataset, columns, spacer=2):
"""
Plot the amount of appearances of categories labels of a level gropued by columns
:param dataset: dataset to plot
:param columns: list of the form [["Cats to", "group by"], "Cat to plot"]
:param spacer: separation between subclases
:return: plot figure
"""
# auxiliar dataset
df_empty = pd.DataFrame({'A': []})
# Add columns to grup by
df_empty['Text'] = dataset[columns[0][0]]
if len(columns[0]) == 2:
df_empty['Text'] = dataset[columns[0][1]].str.cat(df_empty['Text'], sep=". ")
# Generate upper groups
name, count = np.unique(df_empty['Text'], return_index=False, return_inverse=False, return_counts=True, axis=None)
names_undercat_vec = []
count_undercat_vec = []
entries = 0
# Create groups to plot
for overcat in name:
aux = dataset.loc[df_empty['Text'] == overcat]
names_undercat, count_undercat = np.unique(aux[columns[1]], return_index=False, return_inverse=False, return_counts=True, axis=None)
names_undercat_vec.append(names_undercat)
names_undercat_vec.append(np.repeat(" ", spacer))
count_undercat_vec.append(count_undercat)
entries += len(names_undercat)
# Get label names
plot_labels = [item for sublist in names_undercat_vec for item in sublist][:-2]
indv_len = np.array([len(x) for x in count_undercat_vec])
plot_pos = np.array([len(x) for x in names_undercat_vec][:-1])
plot_pos = np.append(0, np.cumsum(plot_pos))
y_pos = np.arange(len(plot_labels))
# Plot groups
ranges = [range(plot_pos[i], plot_pos[i + 1]) for i in range(0, len(plot_pos) - 1, 2)]
for i, coun in enumerate(count_undercat_vec):
bar_plot = plt.barh(ranges[i], coun, align='center', label=name[i])
plt.title("Amount of appearances for under {} grouped by over categories {}:".format(columns[1], columns[0]))
plt.ylabel("Label")
plt.xscale("log")
plt.xlabel("Amount of appearances")
plt.yticks(y_pos, plot_labels)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
def plot_histo_lim(dataset, column, max_len):
"""
Plot histogram of token length of used data when constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
text_len = np.array([x if x <= max_len else max_len for x in dataset[column].str.len()])
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title("Used token lenght for {} constrained to {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column,
max_len,
text_len.min(),
text_len.max(),
text_len.mean()))
def plot_histo_label_lim(dataset, column, cats, max_len):
"""
Plot histogram of token length of used data depending on categorie levels when constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param cats: categories to analyse
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
df_empty = pd.DataFrame({'A': []})
df_empty['Text'] = dataset['Text']
for cat in cats:
df_empty['Text'] = dataset[cat].str.cat(df_empty['Text'], sep=". ")
text_len = np.array([x if x <= max_len else max_len for x in df_empty['Text'].str.len()])
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Used token lenght for {}, {} as input constrained to {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(
cats, column, max_len, text_len.min(), text_len.max(), text_len.mean()))
def plot_histo(dataset, column, max_len):
"""
Plot histogram of token length of data with an indication where it would be cut if constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
text_len = dataset[column].str.len()
n, _, _ = plt.hist(text_len, bins=text_len.max())
plt.vlines(max_len, 0, n.max(), color='r')
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Token lenght for {}, indicating {} as max len: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(column,
max_len,
text_len.min(),
text_len.max(),
text_len.mean()))
def plot_histo_label(dataset, column, cats, max_len):
"""
Plot histogram of token length of data with an indication where it would be cut if constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param cats: categories to analyse
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
df_empty = pd.DataFrame({'A': []})
df_empty['Text'] = dataset['Text']
for cat in cats:
df_empty['Text'] = dataset[cat].str.cat(df_empty['Text'], sep=". ")
text_len = df_empty['Text'].str.len()
n, _, _ = plt.hist(text_len, bins=text_len.max())
plt.vlines(max_len, 0, n.max(), color='r')
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Token lenght for {}, {} as input, indicating {} as max len: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(
cats, column, max_len, text_len.min(), text_len.max(), text_len.mean()))
def plot_histo_targets(dataset, column):
"""
Histogram of appearances for each string
:param dataset: dataset to analyse
:param column: column from which to extract the strings
:return: horizontal amount histogram
"""
plt.ylabel("Label")
plt.xscale("log")
plt.xlabel("Amount of appearances")
name, count = np.unique(dataset[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
plt.title("Amount of appearances for {}: \n Minimal: {} appears {} times \n Maximal: {} appears {} times".format(column,
name[
count.argmin()],
count.min(),
name[
count.argmax()],
count.max()))
y_pos = np.arange(len(name))
bar_plot = plt.barh(y_pos, count, align='center')
plt.yticks(y_pos, name)
def get_lengths(data_set):
"""
Gets the lengths of the texts for both training and test for a dataset
:param data_set: dataset to analyze
:return: length of each text in dataset
"""
data = pd.read_csv(data_set + "/train.csv")
data = data.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
test = pd.read_csv(data_set + "/test.csv")
test = test.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
all_lengths_ama = pd.concat([data["Text"].str.len(), test["Text"].str.len()])
return all_lengths_ama
def comparative_text_len():
"""
Compare textual token length for both datasets
:return: plot histogram
"""
ama = get_lengths("amazon")
dbp = get_lengths("dbpedia")
plt.figure(figsize=(10, 10))
plt.hist(dbp, bins=int(dbp.max() / 2), label="DBPedia", alpha=1)
plt.hist(ama, bins=int(ama.max() / 2), label="Amazon", alpha=1)
plt.xlim(0, 5000)
plt.yscale("log")
plt.legend()
plt.xlabel("Number of characters per 'Text' input")
plt.ylabel("Amount of ocurances")
def plot_histo_targets_len(dataset, column):
"""
Histogram of length frequency for each string
:param dataset: dataset to analyse
:param column: column from which to extract the strings
:return: horizontal amount histogram
"""
plt.ylabel("Label")
plt.xlabel("Token lenght")
name, count = np.unique(dataset[column], return_index=False, return_inverse=False, return_counts=True, axis=None)
lengths = np.array([len(x) for x in name])
plt.title("Token length for {}: \n Minimal: {} is {} tokens long \n Maximal: {} is {} tokens long".format(column, name[lengths.argmin()], lengths.min(), name[lengths.argmax()], lengths.max()))
y_pos = np.arange(len(name))
bar_plot = plt.barh(y_pos, lengths, align='center')
plt.yticks(y_pos, name)
def plot_histo_lost(dataset, column, cats, max_len):
"""
Plot histogram of token length of lost data depending on categories levels when constraining to a maximal token length
:param dataset: dataset to plot
:param column: column of the dataset to plot
:param cats: categories to analyse
:param max_len: maximal allowed token length, i.e. constrain
:return: histogram of appearances plot
"""
df_empty = pd.DataFrame({'A': []})
df_empty['Text'] = dataset['Text']
if cats != []:
for cat in cats:
df_empty['Text'] = dataset[cat].str.cat(df_empty['Text'], sep=". ")
text_len = np.array([x - max_len for x in df_empty['Text'].str.len() if x > max_len])
plt.hist(text_len, bins=text_len.max())
plt.xlabel("Token length")
plt.ylabel("Amount")
plt.yscale("log")
plt.title(
"Token lenght of lost information for {}, {} as input constrained to {}: \n Minimal: {} \n Maximal: {} \n Average: {:.2f}".format(
cats, column, max_len, text_len.min(), text_len.max(), text_len.mean()))
def data_analysis_fixed_len(data_set, max_len=100):
"""
Plot the results of data analysis for a fixed lenght
:param data_set: dataset to analyse
:param max_len: maximal token length, i.e. constain
:return: Plot with multiple subplots and textual description of the dataset to analyse
"""
print("Dataset :", data_set)
data = pd.read_csv(data_set + "/train.csv")
data = data.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
data = data[['Text', "Cat1", "Cat2", "Cat3"]]
display(data.head())
print("Training data \nContains {} examples".format(data.shape[0]))
spec = gridspec.GridSpec(7, 3, wspace=0.5, hspace=1)
fig = plt.figure(figsize=(40, 30))
fig.add_subplot(spec[0, 0])
plot_histo_targets(data, "Cat1")
fig.add_subplot(spec[0, 1])
plot_histo_targets(data, "Cat2")
fig.add_subplot(spec[0, 2])
plot_histo_targets(data, "Cat3")
fig.add_subplot(spec[1, 0])
plot_histo_targets_len(data, "Cat1")
fig.add_subplot(spec[1, 1])
plot_histo_targets_len(data, "Cat2")
fig.add_subplot(spec[1, 2])
plot_histo_targets_len(data, "Cat3")
fig.add_subplot(spec[2, 0])
plot_histo(data, "Text", max_len)
fig.add_subplot(spec[2, 1])
plot_histo_label(data, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[2, 2])
plot_histo_label(data, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[3, 0])
plot_histo_lim(data, "Text", max_len)
fig.add_subplot(spec[3, 1])
plot_histo_label_lim(data, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[3, 2])
plot_histo_label_lim(data, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[4, 0])
plot_histo_lost(data, "Text", [], max_len)
fig.add_subplot(spec[4, 1])
plot_histo_lost(data, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[4, 2])
plot_histo_lost(data, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[5:, 0])
plot_sub_cat(data, [["Cat1"], "Cat2"])
fig.add_subplot(spec[5:, 2])
plot_sub_cat(data, [["Cat2", "Cat1"], "Cat3"])
plt.savefig("./visualizations/" + data_set + "/Data_analysis_complete_training.png", dpi=200, format="png",
facecolor="white")
plt.show()
test = pd.read_csv(data_set + "/test.csv")
test = test.rename(columns={"text": "Text", "l1": "Cat1", "l2": "Cat2", "l3": "Cat3"})
test = test[['Text', "Cat1", "Cat2", "Cat3"]]
print("Test data \nContains {} examples".format(test.shape[0]))
fig = plt.figure(figsize=(40, 25))
fig.add_subplot(spec[0, 0])
plot_histo_targets(test, "Cat1")
fig.add_subplot(spec[0, 1])
plot_histo_targets(test, "Cat2")
fig.add_subplot(spec[0, 2])
plot_histo_targets(test, "Cat3")
fig.add_subplot(spec[1, 0])
plot_histo_targets_len(test, "Cat1")
fig.add_subplot(spec[1, 1])
plot_histo_targets_len(test, "Cat2")
fig.add_subplot(spec[1, 2])
plot_histo_targets_len(test, "Cat3")
fig.add_subplot(spec[2, 0])
plot_histo(test, "Text", max_len)
fig.add_subplot(spec[2, 1])
plot_histo_label(test, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[2, 2])
plot_histo_label(test, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[3, 0])
plot_histo_lim(test, "Text", max_len)
fig.add_subplot(spec[3, 1])
plot_histo_label_lim(test, "Text", ["Cat1"], max_len)
fig.add_subplot(spec[3, 2])
plot_histo_label_lim(test, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[4, 0])
plot_histo_lost(test, "Text", [], max_len)
fig.add_subplot(spec[4, 1])
plot_histo_lost(test, "Text", ["Cat2"], max_len)
fig.add_subplot(spec[4, 2])
plot_histo_lost(test, "Text", ["Cat2", "Cat1"], max_len)
fig.add_subplot(spec[5:, 0])
plot_sub_cat(test, [["Cat1"], "Cat2"])
fig.add_subplot(spec[5:, 2])
plot_sub_cat(test, [["Cat2", "Cat1"], "Cat3"])
plt.savefig("./visualizations/" + data_set + "/Data_analysis_complete_test.png", dpi=200, format="png",
facecolor="white")
plt.show()
######################################################################
# Result table generator
def pad(list_to_pad):
"""
Pad list for runs of different epoch length.
:param list_to_pad:
:return: padded list
"""
lens = [len(a) for a in list_to_pad]
aux = [np.pad(elem, (0, np.max(lens) - len(elem)), 'edge') for elem in list_to_pad]
return aux
def get_plot_values(list_of_values):
"""
Get 95% confidence interval for plotting https://www.wikiwand.com/en/Confidence_interval
:param list_of_values:
:return: mean, maxim and minim lines for plotting
"""
list_of_values = pad(list_of_values)
std = np.std(list_of_values, axis=0)
mean = np.mean(list_of_values, axis=0)
maxim = mean + 1.96 * (std / np.sqrt(len(mean) + 1)) # np.max(f1_score_list, axis=0)
minim = mean - 1.96 * (std / np.sqrt(len(mean) + 1))
return mean, maxim, minim
def get_model_plot(model):
"""
Load history from file and prepare to plot
:param model: path of model to plot
:return:
"""
# Load histories for all runs
histories = [filename for filename in glob.iglob(model + "/**/rep_and_histo.npz", recursive=True)]
histo_list_acc = []
histo_list_f1 = []
# For each history get the accuracy and f1 score to plot
for hist in histories:
arr = np.load(hist, allow_pickle=True)
histo = arr['hist'].item(0)
try:
histo_list_acc.append(np.array(histo['val_accuracy']))
histo_list_f1.append(np.array(histo['val_f1_score']))
except: # Old DBpedia runs used a custom F! macro score output before I found out tensorflow addons. It would take 2 to 3 weeks to re run the experiments
histo_list_acc.append( | np.array(arr['accu_list']) | numpy.array |
import numpy as np
import scipy.cluster.hierarchy as hi
def sample_multinomial(prob, shape, dim_limit):
assert isinstance(shape, int)
prob = prob / np.sum(prob)
ret = - np.ones(shape, dtype=np.int)
for i in range(shape):
cnt = 0
while cnt < 100:
assign = np.random.choice(len(prob), p=prob)
if np.sum(ret == assign) < dim_limit:
ret[i] = assign
break
cnt += 1
if cnt >= 100:
raise ValueError('Not able to sample multinomial with dim limit within 100 rounds.')
return ret
def sample_categorical(prob):
prob = prob / np.sum(prob)
return np.random.choice(len(prob), p=prob)
def find(pred):
return np.where(pred)[0]
def gumbel():
return -np.log(-np.log(np.random.random()))
def mean_z(z_all, dim_limit):
# use correlation clustering to average group assignments
lz = hi.linkage(z_all.T, 'single', 'hamming')
# not sure why cluster id starts from 1
z = hi.fcluster(lz, 0) - 1
all_cat = np.unique(z)
for a in all_cat:
a_size = | np.sum(a == z) | numpy.sum |
import unittest
from datetime import date
import numpy as np
import pandas as pd
import pint_pandas
from dateutil import relativedelta
from os import path
from scipy import stats
import pint
from table_data_reader import ParameterRepository, growth_coefficients
from table_data_reader.table_handlers import TableParameterLoader
def get_static_path(filename):
"""
Direct copy of the function in eam-core-provenance/directory_test_controller.py
Get the current script directory- which should point to /tests- and join it with the desired filename, then return
"""
directory = path.dirname(path.realpath(__file__))
return path.join(directory, filename)
class CSVParameterLoaderTestCase(unittest.TestCase):
def test_parameter_getvalue_exp(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_v2.csv'), table_handler='csv').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
settings = {'sample_size': 3, 'times': pd.date_range('2016-01-01', '2017-01-01', freq='MS'),
'sample_mean_value': False, 'use_time_series': True}
val = p(settings)
assert abs(stats.shapiro(val)[0] - 0.9) < 0.1
def test_parameter_getvalue_exp_units(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_v2.csv'), table_handler='csv').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
settings = {'sample_size': 3, 'times': pd.date_range('2016-01-01', '2017-01-01', freq='MS'),
'sample_mean_value': False, 'use_time_series': True, 'with_pint_units': True}
val = p(settings).values.data
assert abs(stats.shapiro(val)[0] - 0.9) < 0.1
class PandasCSVParameterLoaderTestCase(unittest.TestCase):
def test_parameter_getvalue_exp(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_v2.csv'), table_handler='pandas').load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
settings = {'sample_size': 3, 'times': pd.date_range('2016-01-01', '2017-01-01', freq='MS'),
'sample_mean_value': False, 'use_time_series': True, 'with_pint_units': True}
val = p(settings).values.data
assert abs(stats.shapiro(val)[0] - 0.9) < 0.1
@unittest.skip('sheets are outdated, updating is effort')
class ExcelParameterLoaderTestCase(unittest.TestCase):
def test_parameter_getvalue_exp(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_v2.xlsx'), table_handler='openpyxl').load_into_repo(
sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
settings = {'sample_size': 3, 'times': pd.date_range('2016-01-01', '2017-01-01', freq='MS'),
'sample_mean_value': False, 'use_time_series': True, 'with_pint_units': True}
val = p(settings).values.data
assert abs(stats.shapiro(val)[0] - 0.9) < 0.1
def test_parameter_getvalue_linear(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_v2.xlsx'), table_handler='openpyxl').load_into_repo(
sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('b')
settings = {'sample_size': 3, 'times': pd.date_range('2010-01-01', '2010-12-01', freq='MS'),
'sample_mean_value': False, 'use_time_series': True, 'with_pint_units': True}
val = p(settings).pint.m
n = val.mean(level='time').mean()
assert n > 0.7
def test_parameter_getvalue_mean(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_v2.xlsx'), table_handler='openpyxl').load_into_repo(
sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
settings = {'sample_size': 3, 'times': pd.date_range('2010-01-01', '2010-12-01', freq='MS'),
'sample_mean_value': True, 'use_time_series': True, 'with_pint_units': True}
val = p(settings).values.data
assert abs(stats.shapiro(val)[0] - 0.9) < 0.1
def test_column_order(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(
sheet_name='shuffle_col_order',
repository=repository)
p = repository.get_parameter('z')
assert p.name == 'z'
assert p.tags == 'x'
def test_choice_single_param(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('choice_var')
assert p() == .9
def test_choice_two_params(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('a')
assert p() in [1, 2]
def test_multiple_choice(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('multiple_choice')
assert p() in [1, 2, 3]
def test_choice_time(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx'),
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10
).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('choice_var')
val = p()
assert (val == .9).all()
def test_choice_two_params_with_time(self):
loader = TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx'),
times=pd.date_range('2009-01-01', '2009-03-01', freq='MS'), size=10)
repository = ParameterRepository()
loader.load_into_repo(sheet_name='Sheet1', repository=repository)
tag_param_dict = repository.find_by_tag('user')
keys = tag_param_dict.keys()
assert 'a' in keys
repository['a']()
def test_uniform(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('b')
val = p()
assert (val >= 2) & (val <= 4)
def test_uniform_time(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx'),
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10
).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('b')
val = p()
assert (val >= 2).all() & (val <= 4).all()
def test_uniform_mean(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('b')
val = p({'sample_mean_value': True, 'sample_size': 5})
assert (val == 3).all()
def test_uniform_mean_growth(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('uniform_dist_growth')
val = p({'sample_mean_value': True, 'sample_size': 5, 'use_time_series': True,
'times': pd.date_range('2009-01-01', '2010-01-01', freq='MS'), 'with_pint_units': True})
val = val.pint.m
assert (val >= 3).all()
def test_parameter_getvalue_with_settings_mean(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('uniform_dist_growth')
settings = {'sample_size': 1, 'sample_mean_value': True, 'use_time_series': True,
'times': pd.date_range('2009-01-01', '2010-01-01', freq='MS'), 'with_pint_units': True}
val = p(settings)
val = val.pint.m
assert abs(stats.shapiro(val)[0] - 0.9) < 0.1
def test_triagular(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx')).load_into_repo(sheet_name='Sheet1',
repository=repository)
p = repository.get_parameter('c')
res = p()
assert (res < 10.) & (res > 3.)
def test_triagular_time(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test_excelparameterloader.xlsx'),
times=pd.date_range('2009-01-01', '2015-05-01', freq='MS'), size=10
).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('c')
res = p()
assert (res < 10.).all() & (res > 3.).all()
def test_triangular_timeseries(self):
repository = ParameterRepository()
TableParameterLoader(filename=get_static_path('test.xlsx')).load_into_repo(sheet_name='Sheet1', repository=repository)
p = repository.get_parameter('c')
settings = {
'use_time_series': True,
'times': pd.date_range('2009-01-01', '2015-05-01',
freq='MS'),
'sample_size': 10, 'with_pint_units': True
# 'cagr': 0,
# 'sample_mean_value': True
}
res = p(settings)
res = res.pint.m
assert (res < 10.).all() & (res > 3.).all()
if __name__ == '__main__':
unittest.main()
class TestCAGRCalculation(unittest.TestCase):
def test_identitical_month(self):
"""
If start and end are identical, we expect an array of one row of ones of sample size
:return:
"""
samples = 3
alpha = 1 # 100 percent p.a.
ref_date = date(2009, 1, 1)
start_date = date(2009, 1, 1)
end_date = date(2009, 1, 1)
a = growth_coefficients(start_date, end_date, ref_date, alpha, samples)
assert np.all(a == | np.ones((samples, 1)) | numpy.ones |
"""Hill's method. This method is based on the method given by Deconinck,
Kiyak, Carter, and Kutz (University of Washington, Seattle University) in
their software package SpectrUW (pronounced spectrum).
This is a numerical method that determines the spectra of a linear operator.
For more information, see:
https://www.sciencedirect.com/science/article/pii/S0378475406002709
Authors: <NAME>, <NAME>
NOTE: This
"""
import stablab
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import scipy.integrate
from itertools import cycle
def get_chebyshev_coefficients(f,a,b,kind=1):
"""Obtain the chebyshev coefficients for the chebyshev polynomial
which interpolates the data f on the chebyshev nodes in the interval
[a,b].
Parameters:
f (ndarray): a 1-dimensional ndarray with the y-values of each
chebyshev node on the interval [a,b]
a (int): the left endpoint of the interval for the interpolant
b (int): the right endpoint of the interval for the interpolant
"""
if len(np.shape(f)) > 1:
raise ValueError('input f must be a 1-dimensional ndarray')
if kind == 1:
N = np.shape(f)[0]
Id2 = (2/N)*np.eye(N)
Id2[0,0] = Id2[0,0]/2
theta = (np.arange(N)+0.5)*np.pi/N
Tcf = Id2 @ np.cos(np.outer(theta, np.arange(N))).T
cf = Tcf @ f
T_x = (2/(b-a)) * (np.tile(np.arange(N),(N,1))
* np.sin(np.outer(theta, np.arange(N)))
/ np.sin(np.outer(theta, np.ones(N))))
fx = T_x @ cf
cf_x = Tcf @ fx
fun = lambda x: eval_cf(x,cf,cf_x,a,b)
else:
raise NotImplementedError("get_chebyshev_coefficients currently only "
"works with chebyshev polynomials of the 1st kind (kind == 1).")
return cf, fun
def eval_cf(x,cf,cf_x,a_x,b_x):
"""Transformation to get Chebyshev coefficients"""
N = np.shape(cf)[0]
xtilde = (x-0.5*(a_x+b_x))/(0.5*(b_x-a_x))
theta = np.arccos(xtilde)
T = np.cos(np.outer(theta, np.arange(N)))
out1 = T @ cf
out2 = (T @ cf_x).T
return out1, out2
def hill_method(N,kappa,p,sol):
"""Executes Hill's method. Five steps are followed to solve the eigenvalue
problem:
1 - Determine the Fourier coefficients
2 - Represent the eigenfunctions using Floquet theory
3 - Construct the bi-infinite Floquet-Fourier difference equation
4 - Truncate the difference equation
5 - Determine the eigenvalues
"""
M = hill_coef(p,sol,N)
X = sol['x'][0,0][0,0]
sx,sy = np.shape(M)
lamda = np.zeros((2*N+1,len(kappa)),dtype=np.complex)
L = np.zeros((2*N+1,2*N+1),dtype=np.complex)
for j in range(len(kappa)):
for n in range(-N,N+1):
for m in range(-N,N+1):
if (n-m) % 2 == 0:
temp = 0
for k in range(sx):
temp += M[k,(n-m)//2+N]*(1j*(kappa[j]+np.pi*m/X))**k
L[n+N,m+N] = temp
lamda[:,j] = np.linalg.eigvals(L)
return lamda
def complex_quad(fun,a,b,**kwargs):
"""A wrapper to scipy.integrate.quad which separately integrates the
real and imaginary parts of the function fun, then puts them together.
"""
real_integral = scipy.integrate.quad(lambda x: scipy.real(fun(x)), a, b,
**kwargs)
imag_integral = scipy.integrate.quad(lambda x: scipy.imag(fun(x)), a, b,
**kwargs)
# Returns the complex value and the error bounds for both calculations
return (real_integral[0] + 1j*imag_integral[0],
real_integral[1:],
imag_integral[1:] )
def quadv(fun,a,b,**kwargs):
"""Uses scipy.integrate.quad on a function fun which returns a vector.
Normally, quad can only be used on a scalar function. Essentially,
quadv vectorizes quad. It also assumes that fun returns complex values.
"""
# Sorry, the following line is basically unreadable. I'm creating a list
# of n functions where each function calls fun, then returns the ith
# value in the vector returned by fun. This way, we can use a map to call
# scipy's quad on each of the values in the vector returned by fun.
func_inpts = [(lambda y: (lambda x: fun(x)[y]))(i) for i in
range(np.shape(fun([1]))[0])]
integrals = map(complex_quad, func_inpts, cycle([a]), cycle([b]))
out = np.array([x[0] for x in integrals])
return out
def hill_coef(p,sol,N):
"""
"""
# index
out = | np.zeros((5,2*N+1),dtype=np.complex) | numpy.zeros |
import sys
import numpy as np
from models.evaluation import compute_proportions_from_predicted_labels
class ACC:
"""
Secondary correction model to correct for label shift (ACC)
"""
def __init__(self):
self._p_pred_given_true = None
self._model = None
def fit(self, model, X, label_vector, weights):
"""
compute a confusion matrix of p(y_hat=i|y=j) values
For binary labels, the matrix contains the true positive rate and true negative rate
:return: a matrix such that M[i,j] = p(y_hat = i | y = j)
"""
self._model = model
n_classes = model.get_n_classes()
self._p_pred_given_true = np.zeros([n_classes, n_classes])
predictions = model.predict(X)
if weights is None:
weights = np.ones_like(label_vector)
for cl in range(n_classes):
sel = np.array(label_vector == cl, dtype=bool)
true_class_sum = np.sum(weights[sel])
pred_class_sums = | np.bincount(predictions[sel], weights=weights[sel], minlength=n_classes) | numpy.bincount |
#%%
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
from itertools import product
import pandas as pd
import numpy as np
import pickle
from math import log2, ceil
import sys
sys.path.append("../../src/")
from lifelong_dnn import LifeLongDNN
from joblib import Parallel, delayed
import tensorflow as tf
import warnings
warnings.filterwarnings(action='once')
#%%
############################
### Main hyperparameters ###
############################
ntrees = 50
hybrid_comp_trees = 25
estimation_set = 0.63
validation_set= 1-estimation_set
num_points_per_task = 5000
num_points_per_forest = 500
reps = 30
task_10_sample = 10*np.array([10, 50, 100, 200, 350, 500])
#%%
def sort_data(data_x, data_y, num_points_per_task, total_task=10, shift=1):
x = data_x.copy()
y = data_y.copy()
idx = [np.where(data_y == u)[0] for u in np.unique(data_y)]
train_x_across_task = []
train_y_across_task = []
test_x_across_task = []
test_y_across_task = []
batch_per_task=5000//num_points_per_task
sample_per_class = num_points_per_task//total_task
test_data_slot=100//batch_per_task
for task in range(total_task):
for batch in range(batch_per_task):
for class_no in range(task*10,(task+1)*10,1):
indx = np.roll(idx[class_no],(shift-1)*100)
if batch==0 and class_no==task*10:
train_x = x[indx[batch*sample_per_class:(batch+1)*sample_per_class],:]
train_y = y[indx[batch*sample_per_class:(batch+1)*sample_per_class]]
test_x = x[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500],:]
test_y = y[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500]]
else:
train_x = np.concatenate((train_x, x[indx[batch*sample_per_class:(batch+1)*sample_per_class],:]), axis=0)
train_y = np.concatenate((train_y, y[indx[batch*sample_per_class:(batch+1)*sample_per_class]]), axis=0)
test_x = np.concatenate((test_x, x[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500],:]), axis=0)
test_y = np.concatenate((test_y, y[indx[batch*test_data_slot+500:(batch+1)*test_data_slot+500]]), axis=0)
train_x_across_task.append(train_x)
train_y_across_task.append(train_y)
test_x_across_task.append(test_x)
test_y_across_task.append(test_y)
return train_x_across_task, train_y_across_task, test_x_across_task, test_y_across_task
# %%
def voter_predict_proba(voter, nodes_across_trees):
def worker(tree_idx):
#get the node_ids_to_posterior_map for this tree
node_ids_to_posterior_map = voter.tree_idx_to_node_ids_to_posterior_map[tree_idx]
#get the nodes of X
nodes = nodes_across_trees[tree_idx]
posteriors = []
node_ids = node_ids_to_posterior_map.keys()
#loop over nodes of X
for node in nodes:
#if we've seen this node before, simply get the posterior
if node in node_ids:
posteriors.append(node_ids_to_posterior_map[node])
#if we haven't seen this node before, simply use the uniform posterior
else:
posteriors.append(np.ones((len(np.unique(voter.classes_)))) / len(voter.classes_))
return posteriors
if voter.parallel:
return Parallel(n_jobs=-1)(
delayed(worker)(tree_idx) for tree_idx in range(voter.n_estimators)
)
else:
return [worker(tree_idx) for tree_idx in range(voter.n_estimators)]
#%%
def estimate_posteriors(l2f, X, representation = 0, decider = 0):
l2f.check_task_idx_(decider)
if representation == "all":
representation = range(l2f.n_tasks)
elif isinstance(representation, int):
representation = np.array([representation])
def worker(transformer_task_idx):
transformer = l2f.transformers_across_tasks[transformer_task_idx]
voter = l2f.voters_across_tasks_matrix[decider][transformer_task_idx]
return voter_predict_proba(voter,transformer(X))
'''if l2f.parallel:
posteriors_across_tasks = np.array(
Parallel(n_jobs=-1)(
delayed(worker)(transformer_task_idx) for transformer_task_idx in representation
)
)
else:'''
posteriors_across_tasks = np.array([worker(transformer_task_idx) for transformer_task_idx in representation])
return posteriors_across_tasks
# %%
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar100.load_data()
data_x = np.concatenate([X_train, X_test])
data_x = data_x.reshape((data_x.shape[0], data_x.shape[1] * data_x.shape[2] * data_x.shape[3]))
data_y = np.concatenate([y_train, y_test])
data_y = data_y[:, 0]
train_x_across_task, train_y_across_task, test_x_across_task, test_y_across_task = sort_data(
data_x,data_y,num_points_per_task
)
# %%
hybrid = np.zeros(reps,dtype=float)
building = np.zeros(reps,dtype=float)
recruiting= np.zeros(reps,dtype=float)
uf = | np.zeros(reps,dtype=float) | numpy.zeros |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
@date: 2019-05-06
@author: Shell.Xu
@copyright: 2019, Shell.Xu <<EMAIL>>
@license: BSD-3-clause
'''
import time
import pprint
import random
import argparse
import itertools
import collections
import numpy as np
def seta(a, s, t):
a[a==s] = t
def mark_xy(m, x, y):
seta(m[x, :], 0, -1)
seta(m[:, y], 0, -1)
i, j = x-x%3, y-y%3
seta(m[i:i+3, j:j+3], 0, -1)
def check(m, f):
for i in range(0, 9):
if f(m[i, :]):
return i, -1
if f(m[:, i]):
return -1, i
for i in range(0, 3):
for j in range(0, 3):
a = m[i*3:i*3+3, j*3:j*3+3]
if f(a.flatten()):
return i, j
class Sudoku(object):
def __init__(self, quiz):
self.quiz = np.array(quiz)
self.cur = self.quiz.copy()
self.flagmaps = np.array([self.cur,]*9)
self.moves = []
for n in range(1, 10):
m = self.flagmaps[n-1, :, :]
for x, y in np.array(np.where(m == n)).transpose():
mark_xy(m, x, y)
def fill(self, x, y, n, c):
if self.cur[x, y] > 0:
raise Exception('number existed in this place: %d, %d' % (x, y))
self.cur[x, y] = n
self.moves.append((x, y, n, c))
self.flagmaps[:, x, y] = n
mark_xy(self.flagmaps[n-1, :, :], x, y)
def is_balanced(self):
ele = list(range(1, 10))
return check(self.cur,
lambda a: sorted(a) != ele) == None
def is_full(self):
return not collections.Counter(self.cur.flatten()).get(0)
def find_pos(self, n):
m = self.flagmaps[n-1, :, :]
r = check(m, lambda a: collections.Counter(a).get(0) == 1)
if r is None:
return
x, y = r
if x == -1:
x = np.where(m[:, y]==0)[0][0]
elif y == -1:
y = np.where(m[x, :]==0)[0][0]
else:
i, j = np.where(m[3*x:3*x+3, 3*y:3*y+3]==0)
x, y = 3*x+i[0], 3*y+j[0]
return x, y
def fill_n(self, n):
r = self.find_pos(n)
while r:
x, y = r
self.fill(x, y, n, 'n')
r = self.find_pos(n)
def fill_one(self):
ele = [-1, -1, -1, -1, -1, -1, -1, -1, 0]
for x in range(0, 9):
for y in range(0, 9):
if self.cur[x, y] != 0:
continue
a = self.flagmaps[:, x, y]
if sorted(a) == ele:
n = np.where(a==0)[0][0]
self.fill(x, y, n+1, 'one')
def resolve(self):
l = -1
while len(self.moves) != l:
l = len(self.moves)
for n in range(1, 10):
self.fill_n(n)
self.fill_one()
def generate_fill(self, n):
m = self.flagmaps[n-1, :, :]
zeros = np.array( | np.where(m == 0) | numpy.where |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Spin circuit solver tests"""
import numpy as np
from qiskit import QuantumCircuit
from qiskit.test import QiskitTestCase
from qiskit_nature.operators.second_quantization import FermionicOp, SpinOp
from qiskit_cold_atom.spins.spin_circuit_solver import SpinCircuitSolver
from qiskit_cold_atom.exceptions import QiskitColdAtomError
class TestSpinCircuitSolver(QiskitTestCase):
"""class to test the SpinCircuitSolver class."""
def setUp(self):
super().setUp()
# Setup the simulator
self.solver = SpinCircuitSolver(spin=3 / 2)
def test_spin_solver_initialization(self):
"""test constructor of SpinCircuitSolver"""
with self.assertRaises(QiskitColdAtomError):
SpinCircuitSolver(spin=2 / 3)
def test_get_initial_state(self):
"""test initialization of the state for the simulation"""
circ = QuantumCircuit(1)
init_state = self.solver.get_initial_state(circ)
target = np.array([1, 0, 0, 0])
self.assertTrue(np.alltrue(init_state.toarray().T == target))
def test_embed_operator(self):
"""test embedding of an operator"""
fer_op = FermionicOp("+-")
spin_op = SpinOp("+-")
num_wires = 4
qargs = [1, 3]
qargs_wrong = [0, 1, 3]
with self.subTest("check operator type"):
with self.assertRaises(QiskitColdAtomError):
self.solver._embed_operator(fer_op, num_wires, qargs)
with self.subTest("check operator wiring"):
with self.assertRaises(QiskitColdAtomError):
self.solver._embed_operator(spin_op, num_wires, qargs_wrong)
with self.subTest("operator embedding"):
embedded_op = self.solver._embed_operator(spin_op, num_wires, qargs)
target_op = SpinOp("+_1 -_3", spin=3 / 2, register_length=4)
self.assertTrue(
set(embedded_op.reduce().to_list()) == set(target_op.reduce().to_list())
)
def test_preprocess_circuit(self):
"""test whether preprocessing of the circuit correctly sets the dimension"""
circ = QuantumCircuit(2)
self.solver.preprocess_circuit(circ)
self.assertEqual(self.solver.dim, 4 ** 2)
def test_draw_shots(self):
"""test drawing of the shots from a measurement distribution"""
circ = QuantumCircuit(2)
self.solver.preprocess_circuit(circ)
with self.subTest("check missing shot number"):
# error because the number of shots is not specified
with self.assertRaises(QiskitColdAtomError):
self.solver.draw_shots(np.ones(16) / 16)
self.solver.shots = 5
with self.subTest("check match of dimensions"):
# error because there is a mismatch in the dimension
with self.assertRaises(QiskitColdAtomError):
self.solver.draw_shots(np.ones(15) / 15)
with self.subTest("formatting of measurement outcomes"):
self.solver.seed = 45
outcomes = self.solver.draw_shots(np.ones(16) / 16)
self.assertEqual(outcomes, ["3 3", "0 2", "0 1", "1 0", "3 1"])
def test_to_operators(self):
"""test the to_operators method inherited form BaseCircuitSolver"""
test_circ = QuantumCircuit(2)
test_circ.lx(0.5, [0, 1])
test_circ.lz2(0.25, 1)
test_circ.measure_all()
with self.subTest("test ignore barriers"):
self.solver.ignore_barriers = False
with self.assertRaises(NotImplementedError):
self.solver.to_operators(test_circ)
self.solver.ignore_barriers = True
with self.subTest("check for gate generators"):
qubit_circ = QuantumCircuit(1)
qubit_circ.h(0)
with self.assertRaises(QiskitColdAtomError):
self.solver.to_operators(qubit_circ)
with self.subTest("gate after previous measurement instruction"):
meas_circ = QuantumCircuit(2)
meas_circ.measure_all()
meas_circ.lx(0.5, 0)
with self.assertRaises(QiskitColdAtomError):
self.solver.to_operators(meas_circ)
with self.subTest("check returned operators"):
operators = self.solver.to_operators(test_circ)
target = [
SpinOp([("X_0", (0.5 + 0j))], spin=3 / 2, register_length=2),
SpinOp([("X_1", (0.5 + 0j))], spin=3 / 2, register_length=2),
SpinOp([("Z_1^2", (0.25 + 0j))], spin=3 / 2, register_length=2),
]
for i, op in enumerate(operators):
self.assertEqual(
set(op.reduce().to_list()), set(target[i].reduce().to_list())
)
def test_call_method(self):
"""test the call method inherited form BaseCircuitSolver that simulates a circuit"""
test_circ = QuantumCircuit(1)
test_circ.lx(np.pi / 2, 0)
test_circ.measure_all()
with self.subTest("running the circuit"):
self.solver.shots = 5
self.solver.seed = 45
simulation = self.solver(test_circ)
self.assertEqual(simulation["memory"], ["3", "2", "1", "0", "1"])
self.assertEqual(simulation["counts"], {"0": 1, "1": 2, "2": 1, "3": 1})
self.assertTrue(
np.allclose(
simulation["statevector"],
np.array(
[
np.sqrt(1 / 8),
-1j * np.sqrt(3 / 8),
-np.sqrt(3 / 8),
1j * np.sqrt(1 / 8),
]
),
)
)
self.assertTrue(
np.allclose(
simulation["unitary"],
np.array(
[
[
np.sqrt(1 / 8),
-1j * np.sqrt(3 / 8),
-np.sqrt(3 / 8),
1j * np.sqrt(1 / 8),
],
[
-1j * np.sqrt(3 / 8),
-np.sqrt(1 / 8),
-1j * np.sqrt(1 / 8),
- | np.sqrt(3 / 8) | numpy.sqrt |
from __future__ import print_function, division
from time import time # timing package
import sys,os
quspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,quspin_path)
# return line number
import inspect
def lineno():
"""Returns the current line number in our program."""
return inspect.currentframe().f_back.f_lineno
from quspin.basis import spin_basis_1d,photon_basis # Hilbert space bases
from quspin.operators import hamiltonian # Hamiltonian and observables
from quspin.tools.Floquet import Floquet, Floquet_t_vec
import numpy as np
from numpy.random import uniform,seed,shuffle,randint # pseudo random numbers
seed()
"""
This script tests the Floquet class.
"""
# matrix scipy's logm does not support complex256 and float128
dtypes={"float32":np.float32,"float64":np.float64,"complex64":np.complex64,"complex128":np.complex128}
atols={"float32":1E-4,"float64":1E-13,"complex64":1E-4,"complex128":1E-13}
def drive(t,Omega,np):
return np.sign(np.cos(Omega*t))
def test():
for _r in range(10): # 10 random realisations
##### define model parameters #####
L=4 # system size
J=1.0 # spin interaction
g=uniform(0.2,1.5) # transverse field
h=uniform(0.2,1.5) # parallel field
Omega=uniform(8.0,10.0) # drive frequency
#
##### set up alternating Hamiltonians #####
# define time-reversal symmetric periodic step drive
drive_args=[Omega,np]
# compute basis in the 0-total momentum and +1-parity sector
basis=spin_basis_1d(L=L,a=1,kblock=0,pblock=1)
# define PBC site-coupling lists for operators
x_field_pos=[[+g,i] for i in range(L)]
x_field_neg=[[-g,i] for i in range(L)]
z_field=[[h,i] for i in range(L)]
J_nn=[[J,i,(i+1)%L] for i in range(L)] # PBC
# static and dynamic lists for time-dep H
static=[["zz",J_nn],["z",z_field],["x",x_field_pos]]
dynamic=[["zz",J_nn,drive,drive_args],
["z",z_field,drive,drive_args],["x",x_field_neg,drive,drive_args]]
# static and dynamic lists for step drive
static1=[["zz",J_nn],["z",z_field]]
static2=[["x",x_field_pos]]
# loop over dtypes
for _i in dtypes.keys():
dtype = dtypes[_i]
atol = atols[_i]
# compute Hamiltonians
H=0.5*hamiltonian(static,dynamic,dtype=dtype,basis=basis)
H1=hamiltonian(static1,[],dtype=dtype,basis=basis)
H2=hamiltonian(static2,[],dtype=dtype,basis=basis)
#
##### define time vector of stroboscopic times with 100 cycles #####
t=Floquet_t_vec(Omega,20,len_T=1) # t.vals=times, t.i=init. time, t.T=drive period
#
##### calculate exact Floquet eigensystem #####
t_list=np.array([0.0,t.T/4.0,3.0*t.T/4.0])+np.finfo(float).eps # times to evaluate H
dt_list=np.array([t.T/4.0,t.T/2.0,t.T/4.0]) # time step durations to apply H for
###
# call Floquet class for evodict a coutinous H from a Hamiltonian object
Floq_Hevolve=Floquet({'H':H,'T':t.T,'atol':1E-16,'rtol':1E-16},n_jobs=2)
EF_Hevolve=Floq_Hevolve.EF # read off quasienergies
# call Floquet class for evodict a step H from a Hamiltonian object
Floq_H=Floquet({'H':H,'t_list':t_list,'dt_list':dt_list},n_jobs=2)
EF_H=Floq_H.EF # read off quasienergies
# call Floquet class for evodict a step H from a list of Hamiltonians
Floq_Hlist=Floquet({'H_list':[H1,H2,H1],'dt_list':dt_list},n_jobs=2) # call Floquet class
EF_Hlist=Floq_Hlist.EF
try:
np.testing.assert_allclose(EF_H,EF_Hlist,atol=atol,err_msg='Failed Floquet object comparison!')
np.testing.assert_allclose(EF_H,EF_Hevolve,atol=atol,err_msg='Failed Floquet object comparison!')
except AssertionError:
print('dtype, (g,h,Omega) =', dtype, (g,h,Omega))
print('exiting in line', lineno()+1)
exit()
###
# call Floquet class for evodict a coutinous H from a Hamiltonian object
Floq_Hevolve=Floquet({'H':H,'T':t.T,'atol':1E-16,'rtol':1E-16},n_jobs= | randint(2) | numpy.random.randint |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 17 18:42:25 2018
@author: <NAME>
"""
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Importing dataset
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
# Implementing Thomson Sampling
m,n = dataset.shape
ad_selected = np.zeros(m) # selected ads by users
N1 = np.zeros(n,dtype=np.float16) # ad i got reward count
N0 = np.zeros(n,dtype=np.float16) #ad i did not get reward count
total_reward = 0
# Implementation in vectorized form
for i in range(0,m):
max_index = 0
theta = np.random.beta(N1+1,N0+1)
max_index = | np.argmax(theta) | numpy.argmax |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 the HERA Project
# Licensed under the MIT License
import pytest
import os
import shutil
import hera_qm.xrfi as xrfi
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData
from pyuvdata import UVCal
import hera_qm.utils as utils
from hera_qm.data import DATA_PATH
from pyuvdata import UVFlag
import glob
test_d_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA')
test_uvfits_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.uvfits')
test_uvh5_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvh5')
test_c_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.omni.calfits')
test_f_file = test_d_file + '.testuvflag.h5'
test_f_file_flags = test_d_file + '.testuvflag.flags.h5' # version in 'flag' mode
test_outfile = os.path.join(DATA_PATH, 'test_output', 'uvflag_testout.h5')
xrfi_path = os.path.join(DATA_PATH, 'test_output')
test_flag_integrations= os.path.join(DATA_PATH, 'a_priori_flags_integrations.yaml')
test_flag_jds= os.path.join(DATA_PATH, 'a_priori_flags_jds.yaml')
test_flag_lsts= os.path.join(DATA_PATH, 'a_priori_flags_lsts.yaml')
test_uvh5_files = ['zen.2457698.40355191.xx.HH.uvh5',
'zen.2457698.40367619.xx.HH.uvh5',
'zen.2457698.40380046.xx.HH.uvh5']
test_c_files = ['zen.2457698.40355191.xx.HH.uvcAA.omni.calfits',
'zen.2457698.40367619.xx.HH.uvcAA.omni.calfits',
'zen.2457698.40380046.xx.HH.uvcAA.omni.calfits']
for cnum, cf, uvf in zip(range(3), test_c_files, test_uvh5_files):
test_c_files[cnum] = os.path.join(DATA_PATH, cf)
test_uvh5_files[cnum] = os.path.join(DATA_PATH, uvf)
pytestmark = pytest.mark.filterwarnings(
"ignore:The uvw_array does not match the expected values given the antenna positions.",
"ignore:telescope_location is not set. Using known values for HERA.",
"ignore:antenna_positions is not set. Using known values for HERA."
)
def test_uvdata():
uv = UVData()
uv.read_miriad(test_d_file)
xant = uv.get_ants()[0]
xrfi.flag_xants(uv, xant)
assert np.all(uv.flag_array[uv.ant_1_array == xant, :, :, :])
assert np.all(uv.flag_array[uv.ant_2_array == xant, :, :, :])
def test_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
xant = uvc.ant_array[0]
xrfi.flag_xants(uvc, xant)
assert np.all(uvc.flag_array[0, :, :, :, :])
def test_uvflag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
xant = uvf.ant_1_array[0]
xrfi.flag_xants(uvf, xant)
assert np.all(uvf.flag_array[uvf.ant_1_array == xant, :, :, :])
assert np.all(uvf.flag_array[uvf.ant_2_array == xant, :, :, :])
def test_input_error():
pytest.raises(ValueError, xrfi.flag_xants, 4, 0)
def test_uvflag_waterfall_error():
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf.to_flag()
pytest.raises(ValueError, xrfi.flag_xants, uvf, 0)
def test_uvflag_not_flag_error():
uvf = UVFlag(test_f_file)
pytest.raises(ValueError, xrfi.flag_xants, uvf, 0)
def test_not_inplace_uvflag():
uvf = UVFlag(test_f_file)
xant = uvf.ant_1_array[0]
uvf2 = xrfi.flag_xants(uvf, xant, inplace=False)
assert np.all(uvf2.flag_array[uvf2.ant_1_array == xant, :, :, :])
assert np.all(uvf2.flag_array[uvf2.ant_2_array == xant, :, :, :])
def test_not_inplace_uvdata():
uv = UVData()
uv.read_miriad(test_d_file)
xant = uv.get_ants()[0]
uv2 = xrfi.flag_xants(uv, xant, inplace=False)
assert np.all(uv2.flag_array[uv2.ant_1_array == xant, :, :, :])
assert np.all(uv2.flag_array[uv2.ant_2_array == xant, :, :, :])
def test_resolve_xrfi_path_given():
dirname = xrfi.resolve_xrfi_path(xrfi_path, test_d_file)
assert xrfi_path == dirname
def test_resolve_xrfi_path_empty():
dirname = xrfi.resolve_xrfi_path('', test_d_file)
assert os.path.dirname(os.path.abspath(test_d_file)) == dirname
def test_resolve_xrfi_path_does_not_exist():
dirname = xrfi.resolve_xrfi_path(os.path.join(xrfi_path, 'foogoo'), test_d_file)
assert os.path.dirname(os.path.abspath(test_d_file)) == dirname
def test_resolve_xrfi_path_jd_subdir():
dirname = xrfi.resolve_xrfi_path('', test_d_file, jd_subdir=True)
expected_dir = os.path.join(os.path.dirname(os.path.abspath(test_d_file)),
'.'.join(os.path.basename(test_d_file).split('.')[0:3])
+ '.xrfi')
assert dirname == expected_dir
assert os.path.exists(expected_dir)
shutil.rmtree(expected_dir)
def test_check_convolve_dims_3D():
# Error if d.ndims != 2
pytest.raises(ValueError, xrfi._check_convolve_dims, np.ones((3, 2, 3)), 1, 2)
def test_check_convolve_dims_1D():
size = 10
d = np.ones(size)
with uvtest.check_warnings(
UserWarning,
match=f"K1 value {size + 1} is larger than the data",
nwarnings=1
):
K = xrfi._check_convolve_dims(d, size + 1)
assert K == size
def test_check_convolve_dims_kernel_not_given():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=["No K1 input provided.", "No K2 input provided"],
nwarnings=2
):
K1, K2 = xrfi._check_convolve_dims(d)
assert K1 == size
assert K2 == size
def test_check_convolve_dims_Kt_too_big():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=f"K1 value {size + 1} is larger than the data",
nwarnings=1,
):
Kt, Kf = xrfi._check_convolve_dims(d, size + 1, size)
assert Kt == size
assert Kf == size
def test_check_convolve_dims_Kf_too_big():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=f"K2 value {size + 1} is larger than the data",
nwarnings=1,
):
Kt, Kf = xrfi._check_convolve_dims(d, size, size + 1)
assert Kt == size
assert Kf == size
def test_check_convolve_dims_K1K2_lt_one():
size = 10
data = np.ones((size, size))
pytest.raises(ValueError, xrfi._check_convolve_dims, data, 0, 2)
pytest.raises(ValueError, xrfi._check_convolve_dims, data, 2, 0)
def test_robus_divide():
a = np.array([1., 1., 1.], dtype=np.float32)
b = np.array([2., 0., 1e-9], dtype=np.float32)
c = xrfi.robust_divide(a, b)
assert np.array_equal(c, np.array([1. / 2., np.inf, np.inf]))
@pytest.fixture(scope='function')
def fake_data():
size = 100
fake_data = np.zeros((size, size))
# yield returns the data and lets us do post test clean up after
yield fake_data
# post-test clean up
del(fake_data)
return
def test_medmin(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# medmin should be .size - 1 for these data
medmin = xrfi.medmin(fake_data)
assert np.allclose(medmin, fake_data.shape[0] - 1)
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.medmin, np.ones((5, 4, 3)))
def test_medminfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# run medmin filt
Kt = 8
Kf = 8
d_filt = xrfi.medminfilt(fake_data, Kt=Kt, Kf=Kf)
# build up "answer" array
ans = np.zeros_like(fake_data)
for i in range(fake_data.shape[1]):
if i < fake_data.shape[0] - Kf:
ans[:, i] = i + (Kf - 1)
else:
ans[:, i] = fake_data.shape[0] - 1
assert np.allclose(d_filt, ans)
def test_detrend_deriv(fake_data):
# make fake data
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = j * i**2 + j**3
# run detrend_deriv in both dimensions
dtdf = xrfi.detrend_deriv(fake_data, df=True, dt=True)
ans = np.ones_like(dtdf)
assert np.allclose(dtdf, ans)
# only run along frequency
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = j**3
df = xrfi.detrend_deriv(fake_data, df=True, dt=False)
ans = np.ones_like(df)
assert np.allclose(df, ans)
# only run along time
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = i**3
dt = xrfi.detrend_deriv(fake_data, df=False, dt=True)
ans = np.ones_like(dt)
assert np.allclose(dt, ans)
# catch error of df and dt both being False
pytest.raises(ValueError, xrfi.detrend_deriv, fake_data, dt=False, df=False)
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.detrend_deriv, np.ones((5, 4, 3)))
def test_detrend_medminfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# run detrend_medminfilt
Kt = 8
Kf = 8
dm = xrfi.detrend_medminfilt(fake_data, Kt=Kt, Kf=Kf)
# read in "answer" array
# this is output that corresponds to .size==100, Kt==8, Kf==8
ans_fn = os.path.join(DATA_PATH, 'test_detrend_medminfilt_ans.txt')
ans = np.loadtxt(ans_fn)
assert np.allclose(ans, dm)
def test_detrend_medfilt():
# make fake data
x = np.sin(np.linspace(0, 2.1 * np.pi, 100))
y = np.cos(np.linspace(0, 5.3 * np.pi, 100))
fake_data = np.outer(x,y)
# run detrend medfilt
Kt = 101
Kf = 101
with uvtest.check_warnings(
UserWarning,
match=[
f"K1 value {Kt} is larger than the data",
f"K2 value {Kf} is larger than the data",
],
nwarnings=2,
):
dm = xrfi.detrend_medfilt(fake_data, None, Kt, Kf)
# read in "answer" array
# this is output that corresponds to .size==100, Kt==101, Kf==101
ans_fn = os.path.join(DATA_PATH, 'test_detrend_medfilt_ans_v2.txt')
ans = np.loadtxt(ans_fn)
np.testing.assert_array_almost_equal(ans, dm)
def test_detrend_medfilt_complex():
# use complex data
x = np.sin(np.linspace(0, 2.1 * np.pi, 100)) + 1.0j * np.cos(np.linspace(0, 1.3 * np.pi, 100))
y = np.cos(np.linspace(0, 5.3 * np.pi, 100)) + 1.0j * np.sin(np.linspace(0, 2.9 * np.pi, 100))
fake_data = np.outer(x,y)
# run detrend_medfilt
Kt = 8
Kf = 8
dm = xrfi.detrend_medfilt(fake_data, Kt=Kt, Kf=Kf)
# read in "answer" array
# this is output that corresponds to .size=100, Kt=8, Kf=8
ans_fn = os.path.join(DATA_PATH, 'test_detrend_medfilt_complex_ans_v2.txt')
ans = np.loadtxt(ans_fn).view('complex')
np.testing.assert_array_almost_equal(ans, dm)
def test_detrend_medfilt_3d_error():
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.detrend_medfilt, np.ones((5, 4, 3)))
def test_detrend_meanfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i**2 * np.ones_like(fake_data[:, i])
# run detrend medfilt
Kt = 8
Kf = 8
dm = xrfi.detrend_meanfilt(fake_data, Kt=Kt, Kf=Kf)
# read in "answer" array
# this is output that corresponds to .size==100, Kt==8, Kf==8
ans_fn = os.path.join(DATA_PATH, 'test_detrend_meanfilt_ans.txt')
ans = np.loadtxt(ans_fn)
assert np.allclose(ans, dm)
def test_detrend_meanfilt_flags(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
ind = int(fake_data.shape[0] / 2)
fake_data[ind, :] = 10000.
flags = np.zeros(fake_data.shape, dtype=np.bool_)
flags[ind, :] = True
# run detrend medfilt
Kt = 8
Kf = 8
dm1 = xrfi.detrend_meanfilt(fake_data, flags=flags, Kt=Kt, Kf=Kf)
# Compare with drastically different flagged values
fake_data[ind, :] = 0
dm2 = xrfi.detrend_meanfilt(fake_data, flags=flags, Kt=Kt, Kf=Kf)
dm2[ind, :] = dm1[ind, :] # These don't have valid values, so don't compare them.
assert np.allclose(dm1, dm2)
def test_zscore_full_array(fake_data):
# Make some fake data
np.random.seed(182)
fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1])
out = xrfi.zscore_full_array(fake_data)
fake_mean = np.mean(fake_data)
fake_std = np.std(fake_data)
assert np.all(out == (fake_data - fake_mean) / fake_std)
def test_zscore_full_array_flags(fake_data):
# Make some fake data
np.random.seed(182)
fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1])
flags = np.zeros(fake_data.shape, dtype=np.bool_)
flags[45, 33] = True
out = xrfi.zscore_full_array(fake_data, flags=flags)
fake_mean = np.mean(np.ma.masked_array(fake_data, flags))
fake_std = np.std(np.ma.masked_array(fake_data, flags))
out_exp = (fake_data - fake_mean) / fake_std
out_exp[45, 33] = np.inf
assert np.all(out == out_exp)
def test_zscore_full_array_modified(fake_data):
# Make some fake data
np.random.seed(182)
fake_data[...] = np.random.randn(fake_data.shape[0], fake_data.shape[1])
out = xrfi.zscore_full_array(fake_data, modified=True)
fake_med = np.median(fake_data)
fake_mad = np.median(np.abs(fake_data - fake_med))
assert np.all(out == (fake_data - fake_med) / (1.486 * fake_mad))
def test_zscore_full_array_modified_complex(fake_data):
# Make some fake data
np.random.seed(182)
rands = np.random.randn(100, 100)
fake_data = rands + 1j * rands
out = xrfi.zscore_full_array(fake_data, modified=True)
fake_med = np.median(rands)
fake_mad = np.sqrt(2) * np.median(np.abs(rands - fake_med))
assert np.allclose(out, (fake_data - fake_med - 1j * fake_med) / (1.486 * fake_mad))
def test_modzscore_1d_no_detrend():
npix = 1000
np.random.seed(182)
data = np.random.randn(npix)
data[50] = 500
out = xrfi.modzscore_1d(data, detrend=False)
assert out.shape == (npix,)
assert np.isclose(out[50], 500, rtol=.2)
assert np.isclose(np.median(np.abs(out)), .67, rtol=.1)
def test_modzscore_1d():
npix = 1000
np.random.seed(182)
data = np.random.randn(npix)
data[50] = 500
data += .1 * np.arange(npix)
out = xrfi.modzscore_1d(data)
assert out.shape == (npix,)
assert np.isclose(out[50], 500, rtol=.2)
assert np.isclose(np.median(np.abs(out)), .67, rtol=.1)
def test_watershed_flag():
# generate a metrics and flag UVFlag object
uv = UVData()
uv.read_miriad(test_d_file)
uvm = UVFlag(uv, history='I made this')
uvf = UVFlag(uv, mode='flag')
# set metric and flag arrays to specific values
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[0, 0, 1, 0] = 7.
uvf.flag_array[0, 0, 0, 0] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[0, 0, :2, 0] = True
assert np.allclose(uvf.flag_array, flag_array)
# test flagging channels adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[:, :, 1, :] = 1.
uvf.flag_array[:, :, 0, :] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_f=0.5, inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[:, :, :2, :] = True
assert np.allclose(uvf.flag_array, flag_array)
# test flagging times adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
times = np.unique(uv.time_array)
inds1 = np.where(uv.time_array == times[0])[0]
inds2 = np.where(uv.time_array == times[1])[0]
uvm.metric_array[inds2, 0, :, 0] = 1.
uvf.flag_array[inds1, 0, :, 0] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_t=0.5, inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[inds1, 0, :, 0] = True
flag_array[inds2, 0, :, 0] = True
assert np.allclose(uvf.flag_array, flag_array)
# test antenna type objects
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvm = UVFlag(uvc, history='I made this')
uvf = UVFlag(uvc, mode='flag')
# set metric and flag arrays to specific values
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[0, 0, 0, 1, 0] = 7.
uvf.flag_array[0, 0, 0, 0, 0] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[0, 0, 0, :2, 0] = True
assert np.allclose(uvf.flag_array, flag_array)
# test flagging channels adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[:, :, 1, :, :] = 1.
uvf.flag_array[:, :, 0, :, :] = True
# run watershed flag
uvf2 = xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_f=0.5, inplace=False)
# check answer
flag_array = np.zeros_like(uvf2.flag_array, dtype=np.bool_)
flag_array[:, :, :2, :, :] = True
assert np.allclose(uvf2.flag_array, flag_array)
del(uvf2)
# test flagging times adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[:, :, :, 1, :] = 1.
uvf.flag_array[:, :, :, 0, :] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_t=0.5, inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[:, :, :, :2, :] = True
assert np.allclose(uvf.flag_array, flag_array)
# test waterfall types
uv = UVData()
uv.read_miriad(test_d_file)
uvm = UVFlag(uv, history='I made this', waterfall=True)
uvf = UVFlag(uv, mode='flag', waterfall=True)
# set metric and flag arrays to specific values
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[0, 1, 0] = 7.
uvf.flag_array[0, 0, 0] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[0, :2, 0] = True
assert np.allclose(uvf.flag_array, flag_array)
# test flagging channels adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[:, 1, :] = 1.
uvf.flag_array[:, 0, :] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_f=0.5, inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[:, :2, :] = True
assert np.allclose(uvf.flag_array, flag_array)
# test flagging times adjacent to fully flagged ones
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvf.flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
uvm.metric_array[1, :, :] = 1.
uvf.flag_array[0, :, :] = True
# run watershed flag
xrfi.watershed_flag(uvm, uvf, nsig_p=2., nsig_t=0.5, inplace=True)
# check answer
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[:2, :, :] = True
assert np.allclose(uvf.flag_array, flag_array)
def test_watershed_flag_errors():
# setup
uv = UVData()
uv.read_miriad(test_d_file)
uvm = UVFlag(uv, history='I made this')
uvf = UVFlag(uv, mode='flag')
uvf2 = UVFlag(uv, mode='flag', waterfall=True)
# pass in objects besides UVFlag
pytest.raises(ValueError, xrfi.watershed_flag, 1, 2)
pytest.raises(ValueError, xrfi.watershed_flag, uvm, 2)
pytest.raises(ValueError, xrfi.watershed_flag, uvm, uvf2)
# set the UVFlag object to have a bogus type
uvm.type = 'blah'
pytest.raises(ValueError, xrfi.watershed_flag, uvm, uvf)
def test_ws_flag_waterfall():
# test 1d
d = np.zeros((10,))
f = np.zeros((10,), dtype=np.bool_)
d[1] = 3.
f[0] = True
f_out = xrfi._ws_flag_waterfall(d, f, nsig=2.)
ans = np.zeros_like(f_out, dtype=np.bool_)
ans[:2] = True
assert np.allclose(f_out, ans)
# another 1D test
metric = np.array([2., 2., 5., 0., 2., 0., 5.])
fin = (metric >= 5.)
fout = xrfi._ws_flag_waterfall(metric, fin)
np.testing.assert_array_equal(fout, [True, True, True, False, False, False, True])
# test 2d
d = np.zeros((10, 10))
f = np.zeros((10, 10), dtype=np.bool_)
d[0, 1] = 3.
d[1, 0] = 3.
f[0, 0] = True
f_out = xrfi._ws_flag_waterfall(d, f, nsig=2.)
ans = np.zeros_like(f_out, dtype=np.bool_)
ans[:2, 0] = True
ans[0, :2] = True
assert np.allclose(f_out, ans)
# catch errors
d1 = np.zeros((10,))
f2 = np.zeros((10, 10), dtype=np.bool_)
pytest.raises(ValueError, xrfi._ws_flag_waterfall, d1, f2)
d3 = np.zeros((5, 4, 3))
f3 = np.zeros((5, 4, 3), dtype=np.bool_)
pytest.raises(ValueError, xrfi._ws_flag_waterfall, d3, f3)
def test_xrfi_waterfall():
# test basic functions
np.random.seed(21)
data = 100 * np.ones((10, 10))
data += np.random.randn(10, 10)
data[3, 3] += 100
data[3, 4] += 3
flags = xrfi.xrfi_waterfall(data)
assert np.sum(flags) == 2
assert flags[3, 3]
assert flags[3, 4]
flags = xrfi.xrfi_waterfall(data, nsig_adj=6.)
assert np.sum(flags) == 1
assert flags[3, 3]
def test_xrfi_waterfall_prior_flags():
# test with prior flags
np.random.seed(21)
data = 100 * np.ones((10, 10))
data += np.random.randn(10, 10)
prior_flags = np.zeros((10, 10), dtype=bool)
prior_flags[3, 3] = True
data[3, 4] += 3
flags = xrfi.xrfi_waterfall(data, flags=prior_flags)
assert np.sum(flags) == 2
assert flags[3, 3]
assert flags[3, 4]
flags = xrfi.xrfi_waterfall(data, flags=prior_flags, nsig_adj=6.)
assert np.sum(flags) == 1
assert flags[3, 3]
def test_xrfi_waterfall_error():
# test errors
data = np.ones((10, 10))
with pytest.raises(KeyError):
xrfi.xrfi_waterfall(data, algorithm='not_an_algorithm')
def test_flag():
# setup
uv = UVData()
uv.read_miriad(test_d_file)
uvm = UVFlag(uv, history='I made this')
# initialize array with specific values
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvm.metric_array[0, 0, 0, 0] = 7.
uvf = xrfi.flag(uvm, nsig_p=6.)
assert uvf.mode == 'flag'
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[0, 0, 0, 0] = True
assert np.allclose(uvf.flag_array, flag_array)
# test channel flagging in baseline type
uvm.metric_array = np.zeros_like(uvm.metric_array)
uvm.metric_array[:, :, 0, :] = 7.
uvm.metric_array[:, :, 1, :] = 3.
uvf = xrfi.flag(uvm, nsig_p=6., nsig_f=2.)
flag_array = np.zeros_like(uvf.flag_array, dtype=np.bool_)
flag_array[:, :, :2, :] = True
assert np.allclose(uvf.flag_array, flag_array)
# test time flagging in baseline type
uvm.metric_array = | np.zeros_like(uvm.metric_array) | numpy.zeros_like |
# MIT License
#
# Copyright (c) 2018-2020 Tskit Developers
# Copyright (c) 2017 University of Oxford
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Tree sequence IO via the tables API.
"""
import base64
import dataclasses
import datetime
import itertools
import json
import sys
import warnings
from dataclasses import dataclass
from typing import Any
from typing import Tuple
import numpy as np
import _tskit
import tskit
import tskit.metadata as metadata
import tskit.provenance as provenance
import tskit.util as util
from tskit import UNKNOWN_TIME
dataclass_options = {"frozen": True}
@dataclass(eq=False, **dataclass_options)
class IndividualTableRow:
__slots__ = ["flags", "location", "parents", "metadata"]
flags: int
location: np.ndarray
parents: np.ndarray
metadata: bytes
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
else:
return all(
(
self.flags == other.flags,
np.array_equal(self.location, other.location),
np.array_equal(self.parents, other.parents),
self.metadata == other.metadata,
)
)
def __neq__(self, other):
return not self.__eq__(other)
@dataclass(**dataclass_options)
class NodeTableRow:
__slots__ = ["flags", "time", "population", "individual", "metadata"]
flags: int
time: float
population: int
individual: int
metadata: bytes
@dataclass(**dataclass_options)
class EdgeTableRow:
__slots__ = ["left", "right", "parent", "child", "metadata"]
left: float
right: float
parent: int
child: int
metadata: bytes
@dataclass(**dataclass_options)
class MigrationTableRow:
__slots__ = ["left", "right", "node", "source", "dest", "time", "metadata"]
left: float
right: float
node: int
source: int
dest: int
time: float
metadata: bytes
@dataclass(**dataclass_options)
class SiteTableRow:
__slots__ = ["position", "ancestral_state", "metadata"]
position: float
ancestral_state: str
metadata: bytes
@dataclass(eq=False, **dataclass_options)
class MutationTableRow:
__slots__ = ["site", "node", "derived_state", "parent", "metadata", "time"]
site: int
node: int
derived_state: str
parent: int
metadata: bytes
time: float
def __eq__(self, other):
return (
isinstance(other, MutationTableRow)
and self.site == other.site
and self.node == other.node
and self.derived_state == other.derived_state
and self.parent == other.parent
and self.metadata == other.metadata
and (
self.time == other.time
or (
util.is_unknown_time(self.time) and util.is_unknown_time(other.time)
)
)
)
@dataclass(**dataclass_options)
class PopulationTableRow:
__slots__ = ["metadata"]
metadata: bytes
@dataclass(**dataclass_options)
class ProvenanceTableRow:
__slots__ = ["timestamp", "record"]
timestamp: str
record: str
@dataclass(**dataclass_options)
class TableCollectionIndexes:
edge_insertion_order: np.ndarray = None
edge_removal_order: np.ndarray = None
def asdict(self):
return {k: v for k, v in dataclasses.asdict(self).items() if v is not None}
@property
def nbytes(self):
return self.edge_insertion_order.nbytes + self.edge_removal_order.nbytes
def keep_with_offset(keep, data, offset):
"""
Used when filtering _offset columns in tables
"""
# We need the astype here for 32 bit machines
lens = np.diff(offset).astype(np.int32)
return (
data[np.repeat(keep, lens)],
np.concatenate(
[
np.array([0], dtype=offset.dtype),
np.cumsum(lens[keep], dtype=offset.dtype),
]
),
)
class BaseTable:
"""
Superclass of high-level tables. Not intended for direct instantiation.
"""
# The list of columns in the table. Must be set by subclasses.
column_names = []
def __init__(self, ll_table, row_class, **kwargs):
self.ll_table = ll_table
self.row_class = row_class
super().__init__(**kwargs)
def _check_required_args(self, **kwargs):
for k, v in kwargs.items():
if v is None:
raise TypeError(f"{k} is required")
@property
def num_rows(self):
return self.ll_table.num_rows
@property
def max_rows(self):
return self.ll_table.max_rows
@property
def max_rows_increment(self):
return self.ll_table.max_rows_increment
@property
def nbytes(self) -> int:
"""
Returns the total number of bytes required to store the data
in this table. Note that this may not be equal to
the actual memory footprint.
"""
# It's not ideal that we run asdict() here to do this as we're
# currently creating copies of the column arrays, so it would
# be more efficient to have dedicated low-level methods. However,
# if we do have read-only views on the underlying memory for the
# column arrays then this will be a perfectly good way of
# computing the nbytes values and the overhead minimal.
d = self.asdict()
nbytes = 0
# Some tables don't have a metadata_schema
metadata_schema = d.pop("metadata_schema", None)
if metadata_schema is not None:
nbytes += len(metadata_schema.encode())
nbytes += sum(col.nbytes for col in d.values())
return nbytes
def equals(self, other, ignore_metadata=False):
"""
Returns True if `self` and `other` are equal. By default, two tables
are considered equal if their columns and metadata schemas are
byte-for-byte identical.
:param other: Another table instance
:param bool ignore_metadata: If True exclude metadata and metadata schemas
from the comparison.
:return: True if other is equal to this table; False otherwise.
:rtype: bool
"""
# Note: most tables support ignore_metadata, we can override for those that don't
ret = False
if type(other) is type(self):
ret = bool(
self.ll_table.equals(other.ll_table, ignore_metadata=ignore_metadata)
)
return ret
def __eq__(self, other):
return self.equals(other)
def __len__(self):
return self.num_rows
def __getattr__(self, name):
if name in self.column_names:
return getattr(self.ll_table, name)
else:
raise AttributeError(
f"{self.__class__.__name__} object has no attribute {name}"
)
def __setattr__(self, name, value):
if name in self.column_names:
d = self.asdict()
d[name] = value
self.set_columns(**d)
else:
object.__setattr__(self, name, value)
def __getitem__(self, index):
"""
Return the specifed row of this table, decoding metadata if it is present.
Supports negative indexing, e.g. ``table[-5]``.
:param int index: the zero-index of the desired row
"""
if index < 0:
index += len(self)
if index < 0 or index >= len(self):
raise IndexError("Index out of bounds")
row = self.ll_table.get_row(index)
try:
row = self.decode_row(row)
except AttributeError:
# This means the class returns the low-level row unchanged.
pass
return self.row_class(*row)
def clear(self):
"""
Deletes all rows in this table.
"""
self.ll_table.clear()
def reset(self):
# Deprecated alias for clear
self.clear()
def truncate(self, num_rows):
"""
Truncates this table so that the only the first ``num_rows`` are retained.
:param int num_rows: The number of rows to retain in this table.
"""
return self.ll_table.truncate(num_rows)
# Pickle support
def __getstate__(self):
return self.asdict()
# Unpickle support
def __setstate__(self, state):
self.__init__()
self.set_columns(**state)
def copy(self):
"""
Returns a deep copy of this table
"""
copy = self.__class__()
copy.set_columns(**self.asdict())
return copy
def asdict(self):
"""
Returns a dictionary mapping the names of the columns in this table
to the corresponding numpy arrays.
"""
ret = {col: getattr(self, col) for col in self.column_names}
# Not all tables have metadata
try:
ret["metadata_schema"] = repr(self.metadata_schema)
except AttributeError:
pass
return ret
def set_columns(self, **kwargs):
"""
Sets the values for each column in this :class:`Table` using
values provided in numpy arrays. Overwrites any data currently stored in
the table.
"""
raise NotImplementedError()
def __str__(self):
headers, rows = self._text_header_and_rows()
return "\n".join("\t".join(row) for row in [headers] + rows)
def _repr_html_(self):
"""
Called by jupyter notebooks to render tables
"""
headers, rows = self._text_header_and_rows(limit=40)
headers = "".join(f"<th>{header}</th>" for header in headers)
rows = (
f"<td><em>... skipped {row[11:]} rows ...</em></td>"
if "__skipped__" in row
else "".join(f"<td>{cell}</td>" for cell in row)
for row in rows
)
rows = "".join(f"<tr>{row}</tr>\n" for row in rows)
return f"""
<div>
<style scoped="">
.tskit-table tbody tr th:only-of-type {{vertical-align: middle;}}
.tskit-table tbody tr th {{vertical-align: top;}}
.tskit-table tbody td {{text-align: right;padding: 0.5em 0.5em;}}
.tskit-table tbody th {{padding: 0.5em 0.5em;}}
</style>
<table border="1" class="tskit-table">
<thead>
<tr>
{headers}
</tr>
</thead>
<tbody>
{rows}
</tbody>
</table>
</div>
"""
class MetadataMixin:
"""
Mixin class for tables that have a metadata column.
"""
def __init__(self):
self.metadata_column_index = [
field.name for field in dataclasses.fields(self.row_class)
].index("metadata")
self._update_metadata_schema_cache_from_ll()
def packset_metadata(self, metadatas):
"""
Packs the specified list of metadata values and updates the ``metadata``
and ``metadata_offset`` columns. The length of the metadatas array
must be equal to the number of rows in the table.
:param list metadatas: A list of metadata bytes values.
"""
packed, offset = util.pack_bytes(metadatas)
d = self.asdict()
d["metadata"] = packed
d["metadata_offset"] = offset
self.set_columns(**d)
@property
def metadata_schema(self) -> metadata.MetadataSchema:
"""
The :class:`tskit.MetadataSchema` for this table.
"""
return self._metadata_schema_cache
@metadata_schema.setter
def metadata_schema(self, schema: metadata.MetadataSchema) -> None:
if not isinstance(schema, metadata.MetadataSchema):
raise TypeError(
"Only instances of tskit.MetadataSchema can be assigned to "
f"metadata_schema, not {type(schema)}"
)
self.ll_table.metadata_schema = repr(schema)
self._update_metadata_schema_cache_from_ll()
def decode_row(self, row: Tuple[Any]) -> Tuple:
return (
row[: self.metadata_column_index]
+ (self._metadata_schema_cache.decode_row(row[self.metadata_column_index]),)
+ row[self.metadata_column_index + 1 :]
)
def _update_metadata_schema_cache_from_ll(self) -> None:
self._metadata_schema_cache = metadata.parse_metadata_schema(
self.ll_table.metadata_schema
)
class IndividualTable(BaseTable, MetadataMixin):
"""
A table defining the individuals in a tree sequence. Note that although
each Individual has associated nodes, reference to these is not stored in
the individual table, but rather reference to the individual is stored for
each node in the :class:`NodeTable`. This is similar to the way in which
the relationship between sites and mutations is modelled.
:warning: The numpy arrays returned by table attribute accesses are **copies**
of the underlying data. In particular, this means that you cannot edit
the values in the columns by updating the attribute arrays.
**NOTE:** this behaviour may change in future.
:ivar flags: The array of flags values.
:vartype flags: numpy.ndarray, dtype=np.uint32
:ivar location: The flattened array of floating point location values. See
:ref:`sec_encoding_ragged_columns` for more details.
:vartype location: numpy.ndarray, dtype=np.float64
:ivar location_offset: The array of offsets into the location column. See
:ref:`sec_encoding_ragged_columns` for more details.
:vartype location_offset: numpy.ndarray, dtype=np.uint32
:ivar parents: The flattened array of parent individual ids. See
:ref:`sec_encoding_ragged_columns` for more details.
:vartype parents: numpy.ndarray, dtype=np.int32
:ivar parents_offset: The array of offsets into the parents column. See
:ref:`sec_encoding_ragged_columns` for more details.
:vartype parents_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata: The flattened array of binary metadata values. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata: numpy.ndarray, dtype=np.int8
:ivar metadata_offset: The array of offsets into the metadata column. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata_schema: The metadata schema for this table's metadata column
:vartype metadata_schema: tskit.MetadataSchema
"""
column_names = [
"flags",
"location",
"location_offset",
"parents",
"parents_offset",
"metadata",
"metadata_offset",
]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.IndividualTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, IndividualTableRow)
def _text_header_and_rows(self, limit=None):
flags = self.flags
location = util.unpack_arrays(self.location, self.location_offset)
parents = util.unpack_arrays(self.parents, self.parents_offset)
metadata = util.unpack_bytes(self.metadata, self.metadata_offset)
headers = ("id", "flags", "location", "parents", "metadata")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
md = base64.b64encode(metadata[j]).decode("utf8")
location_str = ",".join(map(str, location[j]))
parents_str = ",".join(map(str, parents[j]))
rows.append(
"{}\t{}\t{}\t{}\t{}".format(
j, flags[j], location_str, parents_str, md
).split("\t")
)
return headers, rows
def add_row(self, flags=0, location=None, parents=None, metadata=None):
"""
Adds a new row to this :class:`IndividualTable` and returns the ID of the
corresponding individual. Metadata, if specified, will be validated and encoded
according to the table's
:attr:`metadata_schema<tskit.IndividualTable.metadata_schema>`.
:param int flags: The bitwise flags for the new node.
:param array-like location: A list of numeric values or one-dimensional numpy
array describing the location of this individual. If not specified
or None, a zero-dimensional location is stored.
:param array-like parents: A list or array of ids of parent individuals. If not
specified an empty array is stored.
:param object metadata: Any object that is valid metadata for the table's schema.
Defaults to the default metadata value for the table's schema. This is
typically ``{}``. For no schema, ``None``.
:return: The ID of the newly added node.
:rtype: int
"""
if metadata is None:
metadata = self.metadata_schema.empty_value
metadata = self.metadata_schema.validate_and_encode_row(metadata)
return self.ll_table.add_row(
flags=flags, location=location, parents=parents, metadata=metadata
)
def set_columns(
self,
flags=None,
location=None,
location_offset=None,
parents=None,
parents_offset=None,
metadata=None,
metadata_offset=None,
metadata_schema=None,
):
"""
Sets the values for each column in this :class:`IndividualTable` using the
values in the specified arrays. Overwrites any data currently stored in
the table.
The ``flags`` array is mandatory and defines the number of individuals
the table will contain.
The ``location`` and ``location_offset`` parameters must be supplied
together, and meet the requirements for :ref:`sec_encoding_ragged_columns`.
The ``parents`` and ``parents_offset`` parameters must be supplied
together, and meet the requirements for :ref:`sec_encoding_ragged_columns`.
The ``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param flags: The bitwise flags for each individual. Required.
:type flags: numpy.ndarray, dtype=np.uint32
:param location: The flattened location array. Must be specified along
with ``location_offset``. If not specified or None, an empty location
value is stored for each individual.
:type location: numpy.ndarray, dtype=np.float64
:param location_offset: The offsets into the ``location`` array.
:type location_offset: numpy.ndarray, dtype=np.uint32.
:param parents: The flattened parents array. Must be specified along
with ``parents_offset``. If not specified or None, an empty parents array
is stored for each individual.
:type parents: numpy.ndarray, dtype=np.int32
:param parents_offset: The offsets into the ``parents`` array.
:type parents_offset: numpy.ndarray, dtype=np.uint32.
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each individual.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
:param metadata_schema: The encoded metadata schema.
"""
self._check_required_args(flags=flags)
self.ll_table.set_columns(
dict(
flags=flags,
location=location,
location_offset=location_offset,
parents=parents,
parents_offset=parents_offset,
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=metadata_schema,
)
)
def append_columns(
self,
flags=None,
location=None,
location_offset=None,
parents=None,
parents_offset=None,
metadata=None,
metadata_offset=None,
):
"""
Appends the specified arrays to the end of the columns in this
:class:`IndividualTable`. This allows many new rows to be added at once.
The ``flags`` array is mandatory and defines the number of
extra individuals to add to the table.
The ``parents`` and ``parents_offset`` parameters must be supplied
together, and meet the requirements for :ref:`sec_encoding_ragged_columns`.
The ``location`` and ``location_offset`` parameters must be supplied
together, and meet the requirements for :ref:`sec_encoding_ragged_columns`.
The ``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param flags: The bitwise flags for each individual. Required.
:type flags: numpy.ndarray, dtype=np.uint32
:param location: The flattened location array. Must be specified along
with ``location_offset``. If not specified or None, an empty location
value is stored for each individual.
:type location: numpy.ndarray, dtype=np.float64
:param location_offset: The offsets into the ``location`` array.
:type location_offset: numpy.ndarray, dtype=np.uint32.
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each individual.
:param parents: The flattened parents array. Must be specified along
with ``parents_offset``. If not specified or None, an empty parents array
is stored for each individual.
:type parents: numpy.ndarray, dtype=np.int32
:param parents_offset: The offsets into the ``parents`` array.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
"""
self._check_required_args(flags=flags)
self.ll_table.append_columns(
dict(
flags=flags,
location=location,
location_offset=location_offset,
parents=parents,
parents_offset=parents_offset,
metadata=metadata,
metadata_offset=metadata_offset,
)
)
def packset_location(self, locations):
"""
Packs the specified list of location values and updates the ``location``
and ``location_offset`` columns. The length of the locations array
must be equal to the number of rows in the table.
:param list locations: A list of locations interpreted as numpy float64
arrays.
"""
packed, offset = util.pack_arrays(locations)
d = self.asdict()
d["location"] = packed
d["location_offset"] = offset
self.set_columns(**d)
def packset_parents(self, parents):
"""
Packs the specified list of parent values and updates the ``parent``
and ``parent_offset`` columns. The length of the parents array
must be equal to the number of rows in the table.
:param list parents: A list of list of parent ids, interpreted as numpy int32
arrays.
"""
packed, offset = util.pack_arrays(parents, np.int32)
d = self.asdict()
d["parents"] = packed
d["parents_offset"] = offset
self.set_columns(**d)
class NodeTable(BaseTable, MetadataMixin):
"""
A table defining the nodes in a tree sequence. See the
:ref:`definitions <sec_node_table_definition>` for details on the columns
in this table and the
:ref:`tree sequence requirements <sec_valid_tree_sequence_requirements>` section
for the properties needed for a node table to be a part of a valid tree sequence.
:warning: The numpy arrays returned by table attribute accesses are **copies**
of the underlying data. In particular, this means that you cannot edit
the values in the columns by updating the attribute arrays.
**NOTE:** this behaviour may change in future.
:ivar time: The array of time values.
:vartype time: numpy.ndarray, dtype=np.float64
:ivar flags: The array of flags values.
:vartype flags: numpy.ndarray, dtype=np.uint32
:ivar population: The array of population IDs.
:vartype population: numpy.ndarray, dtype=np.int32
:ivar individual: The array of individual IDs that each node belongs to.
:vartype individual: numpy.ndarray, dtype=np.int32
:ivar metadata: The flattened array of binary metadata values. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata: numpy.ndarray, dtype=np.int8
:ivar metadata_offset: The array of offsets into the metadata column. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata_schema: The metadata schema for this table's metadata column
:vartype metadata_schema: tskit.MetadataSchema
"""
column_names = [
"time",
"flags",
"population",
"individual",
"metadata",
"metadata_offset",
]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.NodeTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, NodeTableRow)
def _text_header_and_rows(self, limit=None):
time = self.time
flags = self.flags
population = self.population
individual = self.individual
metadata = util.unpack_bytes(self.metadata, self.metadata_offset)
headers = ("id", "flags", "population", "individual", "time", "metadata")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
md = base64.b64encode(metadata[j]).decode("utf8")
rows.append(
"{}\t{}\t{}\t{}\t{:.14f}\t{}".format(
j, flags[j], population[j], individual[j], time[j], md
).split("\t")
)
return headers, rows
def add_row(self, flags=0, time=0, population=-1, individual=-1, metadata=None):
"""
Adds a new row to this :class:`NodeTable` and returns the ID of the
corresponding node. Metadata, if specified, will be validated and encoded
according to the table's
:attr:`metadata_schema<tskit.NodeTable.metadata_schema>`.
:param int flags: The bitwise flags for the new node.
:param float time: The birth time for the new node.
:param int population: The ID of the population in which the new node was born.
Defaults to :data:`tskit.NULL`.
:param int individual: The ID of the individual in which the new node was born.
Defaults to :data:`tskit.NULL`.
:param object metadata: Any object that is valid metadata for the table's schema.
Defaults to the default metadata value for the table's schema. This is
typically ``{}``. For no schema, ``None``.
:return: The ID of the newly added node.
:rtype: int
"""
if metadata is None:
metadata = self.metadata_schema.empty_value
metadata = self.metadata_schema.validate_and_encode_row(metadata)
return self.ll_table.add_row(flags, time, population, individual, metadata)
def set_columns(
self,
flags=None,
time=None,
population=None,
individual=None,
metadata=None,
metadata_offset=None,
metadata_schema=None,
):
"""
Sets the values for each column in this :class:`NodeTable` using the values in
the specified arrays. Overwrites any data currently stored in the table.
The ``flags``, ``time`` and ``population`` arrays must all be of the same length,
which is equal to the number of nodes the table will contain. The
``metadata`` and ``metadata_offset`` parameters must be supplied together, and
meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param flags: The bitwise flags for each node. Required.
:type flags: numpy.ndarray, dtype=np.uint32
:param time: The time values for each node. Required.
:type time: numpy.ndarray, dtype=np.float64
:param population: The population values for each node. If not specified
or None, the :data:`tskit.NULL` value is stored for each node.
:type population: numpy.ndarray, dtype=np.int32
:param individual: The individual values for each node. If not specified
or None, the :data:`tskit.NULL` value is stored for each node.
:type individual: numpy.ndarray, dtype=np.int32
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
:param metadata_schema: The encoded metadata schema.
"""
self._check_required_args(flags=flags, time=time)
self.ll_table.set_columns(
dict(
flags=flags,
time=time,
population=population,
individual=individual,
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=metadata_schema,
)
)
def append_columns(
self,
flags=None,
time=None,
population=None,
individual=None,
metadata=None,
metadata_offset=None,
):
"""
Appends the specified arrays to the end of the columns in this
:class:`NodeTable`. This allows many new rows to be added at once.
The ``flags``, ``time`` and ``population`` arrays must all be of the same length,
which is equal to the number of nodes that will be added to the table. The
``metadata`` and ``metadata_offset`` parameters must be supplied together, and
meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param flags: The bitwise flags for each node. Required.
:type flags: numpy.ndarray, dtype=np.uint32
:param time: The time values for each node. Required.
:type time: numpy.ndarray, dtype=np.float64
:param population: The population values for each node. If not specified
or None, the :data:`tskit.NULL` value is stored for each node.
:type population: numpy.ndarray, dtype=np.int32
:param individual: The individual values for each node. If not specified
or None, the :data:`tskit.NULL` value is stored for each node.
:type individual: numpy.ndarray, dtype=np.int32
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
"""
self._check_required_args(flags=flags, time=time)
self.ll_table.append_columns(
dict(
flags=flags,
time=time,
population=population,
individual=individual,
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=None,
)
)
class EdgeTable(BaseTable, MetadataMixin):
"""
A table defining the edges in a tree sequence. See the
:ref:`definitions <sec_edge_table_definition>` for details on the columns
in this table and the
:ref:`tree sequence requirements <sec_valid_tree_sequence_requirements>` section
for the properties needed for an edge table to be a part of a valid tree sequence.
:warning: The numpy arrays returned by table attribute accesses are **copies**
of the underlying data. In particular, this means that you cannot edit
the values in the columns by updating the attribute arrays.
**NOTE:** this behaviour may change in future.
:ivar left: The array of left coordinates.
:vartype left: numpy.ndarray, dtype=np.float64
:ivar right: The array of right coordinates.
:vartype right: numpy.ndarray, dtype=np.float64
:ivar parent: The array of parent node IDs.
:vartype parent: numpy.ndarray, dtype=np.int32
:ivar child: The array of child node IDs.
:vartype child: numpy.ndarray, dtype=np.int32
:ivar metadata: The flattened array of binary metadata values. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata: numpy.ndarray, dtype=np.int8
:ivar metadata_offset: The array of offsets into the metadata column. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata_schema: The metadata schema for this table's metadata column
:vartype metadata_schema: tskit.MetadataSchema
"""
column_names = [
"left",
"right",
"parent",
"child",
"metadata",
"metadata_offset",
]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.EdgeTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, EdgeTableRow)
def _text_header_and_rows(self, limit=None):
left = self.left
right = self.right
parent = self.parent
child = self.child
metadata = util.unpack_bytes(self.metadata, self.metadata_offset)
headers = ("id", "left\t", "right\t", "parent", "child", "metadata")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
md = base64.b64encode(metadata[j]).decode("utf8")
rows.append(
"{}\t{:.8f}\t{:.8f}\t{}\t{}\t{}".format(
j, left[j], right[j], parent[j], child[j], md
).split("\t")
)
return headers, rows
def add_row(self, left, right, parent, child, metadata=None):
"""
Adds a new row to this :class:`EdgeTable` and returns the ID of the
corresponding edge. Metadata, if specified, will be validated and encoded
according to the table's
:attr:`metadata_schema<tskit.EdgeTable.metadata_schema>`.
:param float left: The left coordinate (inclusive).
:param float right: The right coordinate (exclusive).
:param int parent: The ID of parent node.
:param int child: The ID of child node.
:param object metadata: Any object that is valid metadata for the table's schema.
Defaults to the default metadata value for the table's schema. This is
typically ``{}``. For no schema, ``None``.
:return: The ID of the newly added edge.
:rtype: int
"""
if metadata is None:
metadata = self.metadata_schema.empty_value
metadata = self.metadata_schema.validate_and_encode_row(metadata)
return self.ll_table.add_row(left, right, parent, child, metadata)
def set_columns(
self,
left=None,
right=None,
parent=None,
child=None,
metadata=None,
metadata_offset=None,
metadata_schema=None,
):
"""
Sets the values for each column in this :class:`EdgeTable` using the values
in the specified arrays. Overwrites any data currently stored in the table.
The ``left``, ``right``, ``parent`` and ``child`` parameters are mandatory,
and must be numpy arrays of the same length (which is equal to the number of
edges the table will contain).
The ``metadata`` and ``metadata_offset`` parameters must be supplied together,
and meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param left: The left coordinates (inclusive).
:type left: numpy.ndarray, dtype=np.float64
:param right: The right coordinates (exclusive).
:type right: numpy.ndarray, dtype=np.float64
:param parent: The parent node IDs.
:type parent: numpy.ndarray, dtype=np.int32
:param child: The child node IDs.
:type child: numpy.ndarray, dtype=np.int32
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
:param metadata_schema: The encoded metadata schema.
"""
self._check_required_args(left=left, right=right, parent=parent, child=child)
self.ll_table.set_columns(
dict(
left=left,
right=right,
parent=parent,
child=child,
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=metadata_schema,
)
)
def append_columns(
self, left, right, parent, child, metadata=None, metadata_offset=None
):
"""
Appends the specified arrays to the end of the columns of this
:class:`EdgeTable`. This allows many new rows to be added at once.
The ``left``, ``right``, ``parent`` and ``child`` parameters are mandatory,
and must be numpy arrays of the same length (which is equal to the number of
additional edges to add to the table). The ``metadata`` and
``metadata_offset`` parameters must be supplied together, and
meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param left: The left coordinates (inclusive).
:type left: numpy.ndarray, dtype=np.float64
:param right: The right coordinates (exclusive).
:type right: numpy.ndarray, dtype=np.float64
:param parent: The parent node IDs.
:type parent: numpy.ndarray, dtype=np.int32
:param child: The child node IDs.
:type child: numpy.ndarray, dtype=np.int32
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
"""
self.ll_table.append_columns(
dict(
left=left,
right=right,
parent=parent,
child=child,
metadata=metadata,
metadata_offset=metadata_offset,
)
)
def squash(self):
"""
Sorts, then condenses the table into the smallest possible number of rows by
combining any adjacent edges.
A pair of edges is said to be `adjacent` if they have the same parent and child
nodes, and if the left coordinate of one of the edges is equal to the right
coordinate of the other edge.
The ``squash`` method modifies an :class:`EdgeTable` in place so that any set of
adjacent edges is replaced by a single edge.
The new edge will have the same parent and child node, a left coordinate
equal to the smallest left coordinate in the set, and a right coordinate
equal to the largest right coordinate in the set.
The new edge table will be sorted in the canonical order (P, C, L, R).
"""
self.ll_table.squash()
class MigrationTable(BaseTable, MetadataMixin):
"""
A table defining the migrations in a tree sequence. See the
:ref:`definitions <sec_migration_table_definition>` for details on the columns
in this table and the
:ref:`tree sequence requirements <sec_valid_tree_sequence_requirements>` section
for the properties needed for a migration table to be a part of a valid tree
sequence.
:warning: The numpy arrays returned by table attribute accesses are **copies**
of the underlying data. In particular, this means that you cannot edit
the values in the columns by updating the attribute arrays.
**NOTE:** this behaviour may change in future.
:ivar left: The array of left coordinates.
:vartype left: numpy.ndarray, dtype=np.float64
:ivar right: The array of right coordinates.
:vartype right: numpy.ndarray, dtype=np.float64
:ivar node: The array of node IDs.
:vartype node: numpy.ndarray, dtype=np.int32
:ivar source: The array of source population IDs.
:vartype source: numpy.ndarray, dtype=np.int32
:ivar dest: The array of destination population IDs.
:vartype dest: numpy.ndarray, dtype=np.int32
:ivar time: The array of time values.
:vartype time: numpy.ndarray, dtype=np.float64
:ivar metadata: The flattened array of binary metadata values. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata: numpy.ndarray, dtype=np.int8
:ivar metadata_offset: The array of offsets into the metadata column. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata_schema: The metadata schema for this table's metadata column
:vartype metadata_schema: tskit.MetadataSchema
"""
column_names = [
"left",
"right",
"node",
"source",
"dest",
"time",
"metadata",
"metadata_offset",
]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.MigrationTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, MigrationTableRow)
def _text_header_and_rows(self, limit=None):
left = self.left
right = self.right
node = self.node
source = self.source
dest = self.dest
time = self.time
metadata = util.unpack_bytes(self.metadata, self.metadata_offset)
headers = ("id", "left", "right", "node", "source", "dest", "time", "metadata")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
md = base64.b64encode(metadata[j]).decode("utf8")
rows.append(
"{}\t{:.8f}\t{:.8f}\t{}\t{}\t{}\t{:.8f}\t{}".format(
j, left[j], right[j], node[j], source[j], dest[j], time[j], md
).split("\t")
)
return headers, rows
def add_row(self, left, right, node, source, dest, time, metadata=None):
"""
Adds a new row to this :class:`MigrationTable` and returns the ID of the
corresponding migration. Metadata, if specified, will be validated and encoded
according to the table's
:attr:`metadata_schema<tskit.MigrationTable.metadata_schema>`.
:param float left: The left coordinate (inclusive).
:param float right: The right coordinate (exclusive).
:param int node: The node ID.
:param int source: The ID of the source population.
:param int dest: The ID of the destination population.
:param float time: The time of the migration event.
:param object metadata: Any object that is valid metadata for the table's schema.
Defaults to the default metadata value for the table's schema. This is
typically ``{}``. For no schema, ``None``.
:return: The ID of the newly added migration.
:rtype: int
"""
if metadata is None:
metadata = self.metadata_schema.empty_value
metadata = self.metadata_schema.validate_and_encode_row(metadata)
return self.ll_table.add_row(left, right, node, source, dest, time, metadata)
def set_columns(
self,
left=None,
right=None,
node=None,
source=None,
dest=None,
time=None,
metadata=None,
metadata_offset=None,
metadata_schema=None,
):
"""
Sets the values for each column in this :class:`MigrationTable` using the values
in the specified arrays. Overwrites any data currently stored in the table.
All parameters except ``metadata`` and ``metadata_offset`` and are mandatory,
and must be numpy arrays of the same length (which is equal to the number of
migrations the table will contain).
The ``metadata`` and ``metadata_offset`` parameters must be supplied together,
and meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param left: The left coordinates (inclusive).
:type left: numpy.ndarray, dtype=np.float64
:param right: The right coordinates (exclusive).
:type right: numpy.ndarray, dtype=np.float64
:param node: The node IDs.
:type node: numpy.ndarray, dtype=np.int32
:param source: The source population IDs.
:type source: numpy.ndarray, dtype=np.int32
:param dest: The destination population IDs.
:type dest: numpy.ndarray, dtype=np.int32
:param time: The time of each migration.
:type time: numpy.ndarray, dtype=np.int64
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each migration.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
:param metadata_schema: The encoded metadata schema.
"""
self._check_required_args(
left=left, right=right, node=node, source=source, dest=dest, time=time
)
self.ll_table.set_columns(
dict(
left=left,
right=right,
node=node,
source=source,
dest=dest,
time=time,
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=metadata_schema,
)
)
def append_columns(
self,
left,
right,
node,
source,
dest,
time,
metadata=None,
metadata_offset=None,
):
"""
Appends the specified arrays to the end of the columns of this
:class:`MigrationTable`. This allows many new rows to be added at once.
All parameters except ``metadata`` and ``metadata_offset`` and are mandatory,
and must be numpy arrays of the same length (which is equal to the number of
additional migrations to add to the table). The ``metadata`` and
``metadata_offset`` parameters must be supplied together, and
meet the requirements for :ref:`sec_encoding_ragged_columns`.
See :ref:`sec_tables_api_binary_columns` for more information and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param left: The left coordinates (inclusive).
:type left: numpy.ndarray, dtype=np.float64
:param right: The right coordinates (exclusive).
:type right: numpy.ndarray, dtype=np.float64
:param node: The node IDs.
:type node: numpy.ndarray, dtype=np.int32
:param source: The source population IDs.
:type source: numpy.ndarray, dtype=np.int32
:param dest: The destination population IDs.
:type dest: numpy.ndarray, dtype=np.int32
:param time: The time of each migration.
:type time: numpy.ndarray, dtype=np.int64
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each migration.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
"""
self.ll_table.append_columns(
dict(
left=left,
right=right,
node=node,
source=source,
dest=dest,
time=time,
metadata=metadata,
metadata_offset=metadata_offset,
)
)
class SiteTable(BaseTable, MetadataMixin):
"""
A table defining the sites in a tree sequence. See the
:ref:`definitions <sec_site_table_definition>` for details on the columns
in this table and the
:ref:`tree sequence requirements <sec_valid_tree_sequence_requirements>` section
for the properties needed for a site table to be a part of a valid tree
sequence.
:warning: The numpy arrays returned by table attribute accesses are **copies**
of the underlying data. In particular, this means that you cannot edit
the values in the columns by updating the attribute arrays.
**NOTE:** this behaviour may change in future.
:ivar position: The array of site position coordinates.
:vartype position: numpy.ndarray, dtype=np.float64
:ivar ancestral_state: The flattened array of ancestral state strings.
See :ref:`sec_tables_api_text_columns` for more details.
:vartype ancestral_state: numpy.ndarray, dtype=np.int8
:ivar ancestral_state_offset: The offsets of rows in the ancestral_state
array. See :ref:`sec_tables_api_text_columns` for more details.
:vartype ancestral_state_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata: The flattened array of binary metadata values. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata: numpy.ndarray, dtype=np.int8
:ivar metadata_offset: The array of offsets into the metadata column. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata_schema: The metadata schema for this table's metadata column
:vartype metadata_schema: tskit.MetadataSchema
"""
column_names = [
"position",
"ancestral_state",
"ancestral_state_offset",
"metadata",
"metadata_offset",
]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.SiteTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, SiteTableRow)
def _text_header_and_rows(self, limit=None):
position = self.position
ancestral_state = util.unpack_strings(
self.ancestral_state, self.ancestral_state_offset
)
metadata = util.unpack_bytes(self.metadata, self.metadata_offset)
headers = ("id", "position", "ancestral_state", "metadata")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
md = base64.b64encode(metadata[j]).decode("utf8")
rows.append(
"{}\t{:.8f}\t{}\t{}".format(
j, position[j], ancestral_state[j], md
).split("\t")
)
return headers, rows
def add_row(self, position, ancestral_state, metadata=None):
"""
Adds a new row to this :class:`SiteTable` and returns the ID of the
corresponding site. Metadata, if specified, will be validated and encoded
according to the table's
:attr:`metadata_schema<tskit.SiteTable.metadata_schema>`.
:param float position: The position of this site in genome coordinates.
:param str ancestral_state: The state of this site at the root of the tree.
:param object metadata: Any object that is valid metadata for the table's schema.
Defaults to the default metadata value for the table's schema. This is
typically ``{}``. For no schema, ``None``.
:return: The ID of the newly added site.
:rtype: int
"""
if metadata is None:
metadata = self.metadata_schema.empty_value
metadata = self.metadata_schema.validate_and_encode_row(metadata)
return self.ll_table.add_row(position, ancestral_state, metadata)
def set_columns(
self,
position=None,
ancestral_state=None,
ancestral_state_offset=None,
metadata=None,
metadata_offset=None,
metadata_schema=None,
):
"""
Sets the values for each column in this :class:`SiteTable` using the values
in the specified arrays. Overwrites any data currently stored in the table.
The ``position``, ``ancestral_state`` and ``ancestral_state_offset``
parameters are mandatory, and must be 1D numpy arrays. The length
of the ``position`` array determines the number of rows in table.
The ``ancestral_state`` and ``ancestral_state_offset`` parameters must
be supplied together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_text_columns` for more information). The
``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information) and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param position: The position of each site in genome coordinates.
:type position: numpy.ndarray, dtype=np.float64
:param ancestral_state: The flattened ancestral_state array. Required.
:type ancestral_state: numpy.ndarray, dtype=np.int8
:param ancestral_state_offset: The offsets into the ``ancestral_state`` array.
:type ancestral_state_offset: numpy.ndarray, dtype=np.uint32.
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
:param metadata_schema: The encoded metadata schema.
"""
self._check_required_args(
position=position,
ancestral_state=ancestral_state,
ancestral_state_offset=ancestral_state_offset,
)
self.ll_table.set_columns(
dict(
position=position,
ancestral_state=ancestral_state,
ancestral_state_offset=ancestral_state_offset,
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=metadata_schema,
)
)
def append_columns(
self,
position,
ancestral_state,
ancestral_state_offset,
metadata=None,
metadata_offset=None,
):
"""
Appends the specified arrays to the end of the columns of this
:class:`SiteTable`. This allows many new rows to be added at once.
The ``position``, ``ancestral_state`` and ``ancestral_state_offset``
parameters are mandatory, and must be 1D numpy arrays. The length
of the ``position`` array determines the number of additional rows
to add the table.
The ``ancestral_state`` and ``ancestral_state_offset`` parameters must
be supplied together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_text_columns` for more information). The
``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information) and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param position: The position of each site in genome coordinates.
:type position: numpy.ndarray, dtype=np.float64
:param ancestral_state: The flattened ancestral_state array. Required.
:type ancestral_state: numpy.ndarray, dtype=np.int8
:param ancestral_state_offset: The offsets into the ``ancestral_state`` array.
:type ancestral_state_offset: numpy.ndarray, dtype=np.uint32.
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
"""
self.ll_table.append_columns(
dict(
position=position,
ancestral_state=ancestral_state,
ancestral_state_offset=ancestral_state_offset,
metadata=metadata,
metadata_offset=metadata_offset,
)
)
def packset_ancestral_state(self, ancestral_states):
"""
Packs the specified list of ancestral_state values and updates the
``ancestral_state`` and ``ancestral_state_offset`` columns. The length
of the ancestral_states array must be equal to the number of rows in
the table.
:param list(str) ancestral_states: A list of string ancestral state values.
"""
packed, offset = util.pack_strings(ancestral_states)
d = self.asdict()
d["ancestral_state"] = packed
d["ancestral_state_offset"] = offset
self.set_columns(**d)
class MutationTable(BaseTable, MetadataMixin):
"""
A table defining the mutations in a tree sequence. See the
:ref:`definitions <sec_mutation_table_definition>` for details on the columns
in this table and the
:ref:`tree sequence requirements <sec_valid_tree_sequence_requirements>` section
for the properties needed for a mutation table to be a part of a valid tree
sequence.
:warning: The numpy arrays returned by table attribute accesses are **copies**
of the underlying data. In particular, this means that you cannot edit
the values in the columns by updating the attribute arrays.
**NOTE:** this behaviour may change in future.
:ivar site: The array of site IDs.
:vartype site: numpy.ndarray, dtype=np.int32
:ivar node: The array of node IDs.
:vartype node: numpy.ndarray, dtype=np.int32
:ivar time: The array of time values.
:vartype time: numpy.ndarray, dtype=np.float64
:ivar derived_state: The flattened array of derived state strings.
See :ref:`sec_tables_api_text_columns` for more details.
:vartype derived_state: numpy.ndarray, dtype=np.int8
:ivar derived_state_offset: The offsets of rows in the derived_state
array. See :ref:`sec_tables_api_text_columns` for more details.
:vartype derived_state_offset: numpy.ndarray, dtype=np.uint32
:ivar parent: The array of parent mutation IDs.
:vartype parent: numpy.ndarray, dtype=np.int32
:ivar metadata: The flattened array of binary metadata values. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata: numpy.ndarray, dtype=np.int8
:ivar metadata_offset: The array of offsets into the metadata column. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata_schema: The metadata schema for this table's metadata column
:vartype metadata_schema: tskit.MetadataSchema
"""
column_names = [
"site",
"node",
"time",
"derived_state",
"derived_state_offset",
"parent",
"metadata",
"metadata_offset",
]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.MutationTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, MutationTableRow)
def _text_header_and_rows(self, limit=None):
site = self.site
node = self.node
parent = self.parent
time = self.time
derived_state = util.unpack_strings(
self.derived_state, self.derived_state_offset
)
metadata = util.unpack_bytes(self.metadata, self.metadata_offset)
headers = ("id", "site", "node", "time", "derived_state", "parent", "metadata")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
md = base64.b64encode(metadata[j]).decode("utf8")
rows.append(
"{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
j, site[j], node[j], time[j], derived_state[j], parent[j], md
).split("\t")
)
return headers, rows
def add_row(self, site, node, derived_state, parent=-1, metadata=None, time=None):
"""
Adds a new row to this :class:`MutationTable` and returns the ID of the
corresponding mutation. Metadata, if specified, will be validated and encoded
according to the table's
:attr:`metadata_schema<tskit.MutationTable.metadata_schema>`.
:param int site: The ID of the site that this mutation occurs at.
:param int node: The ID of the first node inheriting this mutation.
:param str derived_state: The state of the site at this mutation's node.
:param int parent: The ID of the parent mutation. If not specified,
defaults to :attr:`NULL`.
:param object metadata: Any object that is valid metadata for the table's schema.
Defaults to the default metadata value for the table's schema. This is
typically ``{}``. For no schema, ``None``.
:return: The ID of the newly added mutation.
:param float time: The occurrence time for the new mutation. If not specified,
defaults to ``UNKNOWN_TIME``, indicating the time is unknown.
:rtype: int
"""
if metadata is None:
metadata = self.metadata_schema.empty_value
metadata = self.metadata_schema.validate_and_encode_row(metadata)
return self.ll_table.add_row(
site,
node,
derived_state,
parent,
metadata,
UNKNOWN_TIME if time is None else time,
)
def set_columns(
self,
site=None,
node=None,
time=None,
derived_state=None,
derived_state_offset=None,
parent=None,
metadata=None,
metadata_offset=None,
metadata_schema=None,
):
"""
Sets the values for each column in this :class:`MutationTable` using the values
in the specified arrays. Overwrites any data currently stored in the table.
The ``site``, ``node``, ``derived_state`` and ``derived_state_offset``
parameters are mandatory, and must be 1D numpy arrays. The
``site`` and ``node`` (also ``parent`` and ``time``, if supplied) arrays
must be of equal length, and determine the number of rows in the table.
The ``derived_state`` and ``derived_state_offset`` parameters must
be supplied together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_text_columns` for more information). The
``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information) and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param site: The ID of the site each mutation occurs at.
:type site: numpy.ndarray, dtype=np.int32
:param node: The ID of the node each mutation is associated with.
:type node: numpy.ndarray, dtype=np.int32
:param time: The time values for each mutation.
:type time: numpy.ndarray, dtype=np.float64
:param derived_state: The flattened derived_state array. Required.
:type derived_state: numpy.ndarray, dtype=np.int8
:param derived_state_offset: The offsets into the ``derived_state`` array.
:type derived_state_offset: numpy.ndarray, dtype=np.uint32.
:param parent: The ID of the parent mutation for each mutation.
:type parent: numpy.ndarray, dtype=np.int32
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
:param metadata_schema: The encoded metadata schema.
"""
self._check_required_args(
site=site,
node=node,
derived_state=derived_state,
derived_state_offset=derived_state_offset,
)
self.ll_table.set_columns(
dict(
site=site,
node=node,
parent=parent,
time=time,
derived_state=derived_state,
derived_state_offset=derived_state_offset,
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=metadata_schema,
)
)
def append_columns(
self,
site,
node,
derived_state,
derived_state_offset,
parent=None,
time=None,
metadata=None,
metadata_offset=None,
):
"""
Appends the specified arrays to the end of the columns of this
:class:`MutationTable`. This allows many new rows to be added at once.
The ``site``, ``node``, ``derived_state`` and ``derived_state_offset``
parameters are mandatory, and must be 1D numpy arrays. The
``site`` and ``node`` (also ``time`` and ``parent``, if supplied) arrays
must be of equal length, and determine the number of additional
rows to add to the table.
The ``derived_state`` and ``derived_state_offset`` parameters must
be supplied together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_text_columns` for more information). The
``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information) and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param site: The ID of the site each mutation occurs at.
:type site: numpy.ndarray, dtype=np.int32
:param node: The ID of the node each mutation is associated with.
:type node: numpy.ndarray, dtype=np.int32
:param time: The time values for each mutation.
:type time: numpy.ndarray, dtype=np.float64
:param derived_state: The flattened derived_state array. Required.
:type derived_state: numpy.ndarray, dtype=np.int8
:param derived_state_offset: The offsets into the ``derived_state`` array.
:type derived_state_offset: numpy.ndarray, dtype=np.uint32.
:param parent: The ID of the parent mutation for each mutation.
:type parent: numpy.ndarray, dtype=np.int32
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
"""
self.ll_table.append_columns(
dict(
site=site,
node=node,
time=time,
parent=parent,
derived_state=derived_state,
derived_state_offset=derived_state_offset,
metadata=metadata,
metadata_offset=metadata_offset,
)
)
def packset_derived_state(self, derived_states):
"""
Packs the specified list of derived_state values and updates the
``derived_state`` and ``derived_state_offset`` columns. The length
of the derived_states array must be equal to the number of rows in
the table.
:param list(str) derived_states: A list of string derived state values.
"""
packed, offset = util.pack_strings(derived_states)
d = self.asdict()
d["derived_state"] = packed
d["derived_state_offset"] = offset
self.set_columns(**d)
class PopulationTable(BaseTable, MetadataMixin):
"""
A table defining the populations referred to in a tree sequence.
The PopulationTable stores metadata for populations that may be referred to
in the NodeTable and MigrationTable". Note that although nodes
may be associated with populations, this association is stored in
the :class:`NodeTable`: only metadata on each population is stored
in the population table.
:warning: The numpy arrays returned by table attribute accesses are **copies**
of the underlying data. In particular, this means that you cannot edit
the values in the columns by updating the attribute arrays.
**NOTE:** this behaviour may change in future.
:ivar metadata: The flattened array of binary metadata values. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata: numpy.ndarray, dtype=np.int8
:ivar metadata_offset: The array of offsets into the metadata column. See
:ref:`sec_tables_api_binary_columns` for more details.
:vartype metadata_offset: numpy.ndarray, dtype=np.uint32
:ivar metadata_schema: The metadata schema for this table's metadata column
:vartype metadata_schema: tskit.MetadataSchema
"""
column_names = ["metadata", "metadata_offset"]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.PopulationTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, PopulationTableRow)
def add_row(self, metadata=None):
"""
Adds a new row to this :class:`PopulationTable` and returns the ID of the
corresponding population. Metadata, if specified, will be validated and encoded
according to the table's
:attr:`metadata_schema<tskit.PopulationTable.metadata_schema>`.
:param object metadata: Any object that is valid metadata for the table's schema.
Defaults to the default metadata value for the table's schema. This is
typically ``{}``. For no schema, ``None``.
:return: The ID of the newly added population.
:rtype: int
"""
if metadata is None:
metadata = self.metadata_schema.empty_value
metadata = self.metadata_schema.validate_and_encode_row(metadata)
return self.ll_table.add_row(metadata=metadata)
def _text_header_and_rows(self, limit=None):
metadata = util.unpack_bytes(self.metadata, self.metadata_offset)
headers = ("id", "metadata")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
md = base64.b64encode(metadata[j]).decode("utf8")
rows.append((str(j), str(md)))
return headers, rows
def set_columns(self, metadata=None, metadata_offset=None, metadata_schema=None):
"""
Sets the values for each column in this :class:`PopulationTable` using the
values in the specified arrays. Overwrites any data currently stored in the
table.
The ``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information) and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
:param metadata_schema: The encoded metadata schema.
"""
self.ll_table.set_columns(
dict(
metadata=metadata,
metadata_offset=metadata_offset,
metadata_schema=metadata_schema,
)
)
def append_columns(self, metadata=None, metadata_offset=None):
"""
Appends the specified arrays to the end of the columns of this
:class:`PopulationTable`. This allows many new rows to be added at once.
The ``metadata`` and ``metadata_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information) and
:ref:`sec_tutorial_metadata_bulk` for an example of how to prepare metadata.
:param metadata: The flattened metadata array. Must be specified along
with ``metadata_offset``. If not specified or None, an empty metadata
value is stored for each node.
:type metadata: numpy.ndarray, dtype=np.int8
:param metadata_offset: The offsets into the ``metadata`` array.
:type metadata_offset: numpy.ndarray, dtype=np.uint32.
"""
self.ll_table.append_columns(
dict(metadata=metadata, metadata_offset=metadata_offset)
)
class ProvenanceTable(BaseTable):
"""
A table recording the provenance (i.e., history) of this table, so that the
origin of the underlying data and sequence of subsequent operations can be
traced. Each row contains a "record" string (recommended format: JSON) and
a timestamp.
.. todo::
The format of the `record` field will be more precisely specified in
the future.
:ivar record: The flattened array containing the record strings.
:ref:`sec_tables_api_text_columns` for more details.
:vartype record: numpy.ndarray, dtype=np.int8
:ivar record_offset: The array of offsets into the record column. See
:ref:`sec_tables_api_text_columns` for more details.
:vartype record_offset: numpy.ndarray, dtype=np.uint32
:ivar timestamp: The flattened array containing the timestamp strings.
:ref:`sec_tables_api_text_columns` for more details.
:vartype timestamp: numpy.ndarray, dtype=np.int8
:ivar timestamp_offset: The array of offsets into the timestamp column. See
:ref:`sec_tables_api_text_columns` for more details.
:vartype timestamp_offset: numpy.ndarray, dtype=np.uint32
"""
column_names = ["record", "record_offset", "timestamp", "timestamp_offset"]
def __init__(self, max_rows_increment=0, ll_table=None):
if ll_table is None:
ll_table = _tskit.ProvenanceTable(max_rows_increment=max_rows_increment)
super().__init__(ll_table, ProvenanceTableRow)
def equals(self, other, ignore_timestamps=False):
"""
Returns True if `self` and `other` are equal. By default, two provenance
tables are considered equal if their columns are byte-for-byte identical.
:param other: Another provenance table instance
:param bool ignore_timestamps: If True exclude the timestamp column
from the comparison.
:return: True if other is equal to this provenance table; False otherwise.
:rtype: bool
"""
ret = False
if type(other) is type(self):
ret = bool(
self.ll_table.equals(
other.ll_table, ignore_timestamps=ignore_timestamps
)
)
return ret
def add_row(self, record, timestamp=None):
"""
Adds a new row to this ProvenanceTable consisting of the specified record and
timestamp. If timestamp is not specified, it is automatically generated from
the current time.
:param str record: A provenance record, describing the parameters and
environment used to generate the current set of tables.
:param str timestamp: A string timestamp. This should be in ISO8601 form.
"""
if timestamp is None:
timestamp = datetime.datetime.now().isoformat()
# Note that the order of the positional arguments has been reversed
# from the low-level module, which is a bit confusing. However, we
# want the default behaviour here to be to add a row to the table at
# the current time as simply as possible.
return self.ll_table.add_row(record=record, timestamp=timestamp)
def set_columns(
self, timestamp=None, timestamp_offset=None, record=None, record_offset=None
):
"""
Sets the values for each column in this :class:`ProvenanceTable` using the
values in the specified arrays. Overwrites any data currently stored in the
table.
The ``timestamp`` and ``timestamp_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information). Likewise
for the ``record`` and ``record_offset`` columns
:param timestamp: The flattened timestamp array. Must be specified along
with ``timestamp_offset``. If not specified or None, an empty timestamp
value is stored for each node.
:type timestamp: numpy.ndarray, dtype=np.int8
:param timestamp_offset: The offsets into the ``timestamp`` array.
:type timestamp_offset: numpy.ndarray, dtype=np.uint32.
:param record: The flattened record array. Must be specified along
with ``record_offset``. If not specified or None, an empty record
value is stored for each node.
:type record: numpy.ndarray, dtype=np.int8
:param record_offset: The offsets into the ``record`` array.
:type record_offset: numpy.ndarray, dtype=np.uint32.
"""
self.ll_table.set_columns(
dict(
timestamp=timestamp,
timestamp_offset=timestamp_offset,
record=record,
record_offset=record_offset,
)
)
def append_columns(
self, timestamp=None, timestamp_offset=None, record=None, record_offset=None
):
"""
Appends the specified arrays to the end of the columns of this
:class:`ProvenanceTable`. This allows many new rows to be added at once.
The ``timestamp`` and ``timestamp_offset`` parameters must be supplied
together, and meet the requirements for
:ref:`sec_encoding_ragged_columns` (see
:ref:`sec_tables_api_binary_columns` for more information). Likewise
for the ``record`` and ``record_offset`` columns
:param timestamp: The flattened timestamp array. Must be specified along
with ``timestamp_offset``. If not specified or None, an empty timestamp
value is stored for each node.
:type timestamp: numpy.ndarray, dtype=np.int8
:param timestamp_offset: The offsets into the ``timestamp`` array.
:type timestamp_offset: numpy.ndarray, dtype=np.uint32.
:param record: The flattened record array. Must be specified along
with ``record_offset``. If not specified or None, an empty record
value is stored for each node.
:type record: numpy.ndarray, dtype=np.int8
:param record_offset: The offsets into the ``record`` array.
:type record_offset: numpy.ndarray, dtype=np.uint32.
"""
self.ll_table.append_columns(
dict(
timestamp=timestamp,
timestamp_offset=timestamp_offset,
record=record,
record_offset=record_offset,
)
)
def _text_header_and_rows(self, limit=None):
timestamp = util.unpack_strings(self.timestamp, self.timestamp_offset)
record = util.unpack_strings(self.record, self.record_offset)
headers = ("id", "timestamp", "record")
rows = []
if limit is None or self.num_rows <= limit:
indexes = range(self.num_rows)
else:
indexes = itertools.chain(
range(limit // 2),
[-1],
range(self.num_rows - (limit - (limit // 2)), self.num_rows),
)
for j in indexes:
if j == -1:
rows.append(f"__skipped__{self.num_rows-limit}")
else:
rows.append((str(j), str(timestamp[j]), str(record[j])))
return headers, rows
def packset_record(self, records):
"""
Packs the specified list of record values and updates the
``record`` and ``record_offset`` columns. The length
of the records array must be equal to the number of rows in
the table.
:param list(str) records: A list of string record values.
"""
packed, offset = util.pack_strings(records)
d = self.asdict()
d["record"] = packed
d["record_offset"] = offset
self.set_columns(**d)
def packset_timestamp(self, timestamps):
"""
Packs the specified list of timestamp values and updates the
``timestamp`` and ``timestamp_offset`` columns. The length
of the timestamps array must be equal to the number of rows in
the table.
:param list(str) timestamps: A list of string timestamp values.
"""
packed, offset = util.pack_strings(timestamps)
d = self.asdict()
d["timestamp"] = packed
d["timestamp_offset"] = offset
self.set_columns(**d)
class TableCollection:
"""
A collection of mutable tables defining a tree sequence. See the
:ref:`sec_data_model` section for definition on the various tables
and how they together define a :class:`TreeSequence`. Arbitrary
data can be stored in a TableCollection, but there are certain
:ref:`requirements <sec_valid_tree_sequence_requirements>` that must be
satisfied for these tables to be interpreted as a tree sequence.
To obtain an immutable :class:`TreeSequence` instance corresponding to the
current state of a ``TableCollection``, please use the :meth:`.tree_sequence`
method.
:ivar individuals: The individual table.
:vartype individuals: IndividualTable
:ivar nodes: The node table.
:vartype nodes: NodeTable
:ivar edges: The edge table.
:vartype edges: EdgeTable
:ivar migrations: The migration table.
:vartype migrations: MigrationTable
:ivar sites: The site table.
:vartype sites: SiteTable
:ivar mutations: The mutation table.
:vartype mutations: MutationTable
:ivar populations: The population table.
:vartype populations: PopulationTable
:ivar provenances: The provenance table.
:vartype provenances: ProvenanceTable
:ivar index: The edge insertion and removal index.
:ivar sequence_length: The sequence length defining the coordinate
space.
:vartype sequence_length: float
:ivar file_uuid: The UUID for the file this TableCollection is derived
from, or None if not derived from a file.
:vartype file_uuid: str
"""
def __init__(self, sequence_length=0):
self._ll_tables = _tskit.TableCollection(sequence_length)
@property
def individuals(self):
return IndividualTable(ll_table=self._ll_tables.individuals)
@property
def nodes(self):
return NodeTable(ll_table=self._ll_tables.nodes)
@property
def edges(self):
return EdgeTable(ll_table=self._ll_tables.edges)
@property
def migrations(self):
return MigrationTable(ll_table=self._ll_tables.migrations)
@property
def sites(self):
return SiteTable(ll_table=self._ll_tables.sites)
@property
def mutations(self):
return MutationTable(ll_table=self._ll_tables.mutations)
@property
def populations(self):
return PopulationTable(ll_table=self._ll_tables.populations)
@property
def provenances(self):
return ProvenanceTable(ll_table=self._ll_tables.provenances)
@property
def indexes(self):
indexes = self._ll_tables.indexes
return TableCollectionIndexes(**indexes)
@indexes.setter
def indexes(self, indexes):
self._ll_tables.indexes = indexes.asdict()
@property
def sequence_length(self):
return self._ll_tables.sequence_length
@sequence_length.setter
def sequence_length(self, sequence_length):
self._ll_tables.sequence_length = sequence_length
@property
def file_uuid(self):
return self._ll_tables.file_uuid
@property
def metadata_schema(self) -> metadata.MetadataSchema:
"""
The :class:`tskit.MetadataSchema` for this TableCollection.
"""
return metadata.parse_metadata_schema(self._ll_tables.metadata_schema)
@metadata_schema.setter
def metadata_schema(self, schema: metadata.MetadataSchema) -> None:
# Check the schema is a valid schema instance by roundtripping it.
metadata.parse_metadata_schema(repr(schema))
self._ll_tables.metadata_schema = repr(schema)
@property
def metadata(self) -> Any:
"""
The decoded metadata for this TableCollection.
"""
return self.metadata_schema.decode_row(self._ll_tables.metadata)
@metadata.setter
def metadata(self, metadata: Any) -> None:
self._ll_tables.metadata = self.metadata_schema.validate_and_encode_row(
metadata
)
@property
def metadata_bytes(self) -> Any:
"""
The raw bytes of metadata for this TableCollection
"""
return self._ll_tables.metadata
def asdict(self):
"""
Returns a dictionary representation of this TableCollection.
Note: the semantics of this method changed at tskit 0.1.0. Previously a
map of table names to the tables themselves was returned.
"""
ret = {
"encoding_version": (1, 3),
"sequence_length": self.sequence_length,
"metadata_schema": repr(self.metadata_schema),
"metadata": self.metadata_schema.encode_row(self.metadata),
"individuals": self.individuals.asdict(),
"nodes": self.nodes.asdict(),
"edges": self.edges.asdict(),
"migrations": self.migrations.asdict(),
"sites": self.sites.asdict(),
"mutations": self.mutations.asdict(),
"populations": self.populations.asdict(),
"provenances": self.provenances.asdict(),
"indexes": self.indexes.asdict(),
}
return ret
@property
def name_map(self):
"""
Returns a dictionary mapping table names to the corresponding
table instances. For example, the returned dictionary will contain the
key "edges" that maps to an :class:`.EdgeTable` instance.
"""
return {
"edges": self.edges,
"individuals": self.individuals,
"migrations": self.migrations,
"mutations": self.mutations,
"nodes": self.nodes,
"populations": self.populations,
"provenances": self.provenances,
"sites": self.sites,
}
@property
def nbytes(self) -> int:
"""
Returns the total number of bytes required to store the data
in this table collection. Note that this may not be equal to
the actual memory footprint.
"""
return sum(
(
8, # sequence_length takes 8 bytes
len(self.metadata_bytes),
len(repr(self.metadata_schema).encode()),
self.indexes.nbytes,
sum(table.nbytes for table in self.name_map.values()),
)
)
def __banner(self, title):
width = 60
line = "#" * width
title_line = f"# {title}"
title_line += " " * (width - len(title_line) - 1)
title_line += "#"
return line + "\n" + title_line + "\n" + line + "\n"
def __str__(self):
s = self.__banner("Individuals")
s += str(self.individuals) + "\n"
s += self.__banner("Nodes")
s += str(self.nodes) + "\n"
s += self.__banner("Edges")
s += str(self.edges) + "\n"
s += self.__banner("Sites")
s += str(self.sites) + "\n"
s += self.__banner("Mutations")
s += str(self.mutations) + "\n"
s += self.__banner("Migrations")
s += str(self.migrations) + "\n"
s += self.__banner("Populations")
s += str(self.populations) + "\n"
s += self.__banner("Provenances")
s += str(self.provenances)
return s
def equals(
self,
other,
*,
ignore_metadata=False,
ignore_ts_metadata=False,
ignore_provenance=False,
ignore_timestamps=False,
):
"""
Returns True if `self` and `other` are equal. By default, two table
collections are considered equal if their
- ``sequence_length`` properties are identical;
- top-level tree sequence metadata and metadata schemas are
byte-wise identical;
- constituent tables are byte-wise identical.
Some of the requirements in this definition can be relaxed using the
parameters, which can be used to remove certain parts of the data model
from the comparison.
Table indexes are not considered in the equality comparison.
:param TableCollection other: Another table collection.
:param bool ignore_metadata: If True *all* metadata and metadata schemas
will be excluded from the comparison. This includes the top-level
tree sequence and constituent table metadata (default=False).
:param bool ignore_ts_metadata: If True the top-level tree sequence
metadata and metadata schemas will be excluded from the comparison.
If ``ignore_metadata`` is True, this parameter has no effect.
:param bool ignore_provenance: If True the provenance tables are
not included in the comparison.
:param bool ignore_timestamps: If True the provenance timestamp column
is ignored in the comparison. If ``ignore_provenance`` is True, this
parameter has no effect.
:return: True if other is equal to this table collection; False otherwise.
:rtype: bool
"""
ret = False
if type(other) is type(self):
ret = bool(
self._ll_tables.equals(
other._ll_tables,
ignore_metadata=bool(ignore_metadata),
ignore_ts_metadata=bool(ignore_ts_metadata),
ignore_provenance=bool(ignore_provenance),
ignore_timestamps=bool(ignore_timestamps),
)
)
return ret
def __eq__(self, other):
return self.equals(other)
def __getstate__(self):
return self.asdict()
@classmethod
def load(cls, file_or_path):
file, local_file = util.convert_file_like_to_open_file(file_or_path, "rb")
ll_tc = _tskit.TableCollection(1)
ll_tc.load(file)
tc = TableCollection(1)
tc._ll_tables = ll_tc
return tc
def dump(self, file_or_path):
"""
Writes the table collection to the specified path or file object.
:param str file_or_path: The file object or path to write the TreeSequence to.
"""
file, local_file = util.convert_file_like_to_open_file(file_or_path, "wb")
try:
self._ll_tables.dump(file)
finally:
if local_file:
file.close()
# Unpickle support
def __setstate__(self, state):
self.__init__(state["sequence_length"])
self.metadata_schema = tskit.parse_metadata_schema(state["metadata_schema"])
self.metadata = self.metadata_schema.decode_row(state["metadata"])
self.individuals.set_columns(**state["individuals"])
self.nodes.set_columns(**state["nodes"])
self.edges.set_columns(**state["edges"])
self.migrations.set_columns(**state["migrations"])
self.sites.set_columns(**state["sites"])
self.mutations.set_columns(**state["mutations"])
self.populations.set_columns(**state["populations"])
self.provenances.set_columns(**state["provenances"])
@classmethod
def fromdict(self, tables_dict):
tables = TableCollection(tables_dict["sequence_length"])
try:
tables.metadata_schema = tskit.parse_metadata_schema(
tables_dict["metadata_schema"]
)
except KeyError:
pass
try:
tables.metadata = tables.metadata_schema.decode_row(tables_dict["metadata"])
except KeyError:
pass
tables.individuals.set_columns(**tables_dict["individuals"])
tables.nodes.set_columns(**tables_dict["nodes"])
tables.edges.set_columns(**tables_dict["edges"])
tables.migrations.set_columns(**tables_dict["migrations"])
tables.sites.set_columns(**tables_dict["sites"])
tables.mutations.set_columns(**tables_dict["mutations"])
tables.populations.set_columns(**tables_dict["populations"])
tables.provenances.set_columns(**tables_dict["provenances"])
# Indexes must be last as other wise the check for their consistency will fail
try:
tables.indexes = TableCollectionIndexes(**tables_dict["indexes"])
except KeyError:
pass
return tables
def copy(self):
"""
Returns a deep copy of this TableCollection.
:return: A deep copy of this TableCollection.
:rtype: .TableCollection
"""
return TableCollection.fromdict(self.asdict())
def tree_sequence(self):
"""
Returns a :class:`TreeSequence` instance with the structure defined by the
tables in this :class:`TableCollection`. If the table collection is not
in canonical form (i.e., does not meet sorting requirements) or cannot be
interpreted as a tree sequence an exception is raised. The
:meth:`.sort` method may be used to ensure that input sorting requirements
are met. If the table collection does not have indexes they will be
built.
:return: A :class:`TreeSequence` instance reflecting the structures
defined in this set of tables.
:rtype: .TreeSequence
"""
if not self.has_index():
self.build_index()
return tskit.TreeSequence.load_tables(self)
def simplify(
self,
samples=None,
*,
reduce_to_site_topology=False,
filter_populations=True,
filter_individuals=True,
filter_sites=True,
keep_unary=False,
keep_unary_in_individuals=None,
keep_input_roots=False,
record_provenance=True,
filter_zero_mutation_sites=None, # Deprecated alias for filter_sites
):
"""
Simplifies the tables in place to retain only the information necessary
to reconstruct the tree sequence describing the given ``samples``.
This will change the ID of the nodes, so that the node
``samples[k]`` will have ID ``k`` in the result. The resulting
NodeTable will have only the first ``len(samples)`` nodes marked
as samples. The mapping from node IDs in the current set of tables to
their equivalent values in the simplified tables is also returned as a
numpy array. If an array ``a`` is returned by this function and ``u``
is the ID of a node in the input table, then ``a[u]`` is the ID of this
node in the output table. For any node ``u`` that is not mapped into
the output tables, this mapping will equal ``-1``.
Tables operated on by this function must: be sorted (see
:meth:`TableCollection.sort`), have children be born strictly after their
parents, and the intervals on which any node is a child must be
disjoint. Other than this the tables need not satisfy remaining
requirements to specify a valid tree sequence (but the resulting tables
will).
This is identical to :meth:`TreeSequence.simplify` but acts *in place* to
alter the data in this :class:`TableCollection`. Please see the
:meth:`TreeSequence.simplify` method for a description of the remaining
parameters.
:param list[int] samples: A list of node IDs to retain as samples. They
need not be nodes marked as samples in the original tree sequence, but
will constitute the entire set of samples in the returned tree sequence.
If not specified or None, use all nodes marked with the IS_SAMPLE flag.
The list may be provided as a numpy array (or array-like) object
(dtype=np.int32).
:param bool reduce_to_site_topology: Whether to reduce the topology down
to the trees that are present at sites. (Default: False).
:param bool filter_populations: If True, remove any populations that are
not referenced by nodes after simplification; new population IDs are
allocated sequentially from zero. If False, the population table will
not be altered in any way. (Default: True)
:param bool filter_individuals: If True, remove any individuals that are
not referenced by nodes after simplification; new individual IDs are
allocated sequentially from zero. If False, the individual table will
not be altered in any way. (Default: True)
:param bool filter_sites: If True, remove any sites that are
not referenced by mutations after simplification; new site IDs are
allocated sequentially from zero. If False, the site table will not
be altered in any way. (Default: True)
:param bool keep_unary: If True, preserve unary nodes (i.e. nodes with
exactly one child) that exist on the path from samples to root.
(Default: False)
:param bool keep_unary_in_individuals: If True, preserve unary nodes
that exist on the path from samples to root, but only if they are
associated with an individual in the individuals table. Cannot be
specified at the same time as ``keep_unary``. (Default: ``None``,
equivalent to False)
:param bool keep_input_roots: Whether to retain history ancestral to the
MRCA of the samples. If ``False``, no topology older than the MRCAs of the
samples will be included. If ``True`` the roots of all trees in the returned
tree sequence will be the same roots as in the original tree sequence.
(Default: False)
:param bool record_provenance: If True, record details of this call to
simplify in the returned tree sequence's provenance information
(Default: True).
:param bool filter_zero_mutation_sites: Deprecated alias for ``filter_sites``.
:return: A numpy array mapping node IDs in the input tables to their
corresponding node IDs in the output tables.
:rtype: numpy.ndarray (dtype=np.int32)
"""
if filter_zero_mutation_sites is not None:
# Deprecated in msprime 0.6.1.
warnings.warn(
"filter_zero_mutation_sites is deprecated; use filter_sites instead",
FutureWarning,
)
filter_sites = filter_zero_mutation_sites
if samples is None:
flags = self.nodes.flags
samples = np.where(np.bitwise_and(flags, _tskit.NODE_IS_SAMPLE) != 0)[
0
].astype(np.int32)
else:
samples = util.safe_np_int_cast(samples, np.int32)
if keep_unary_in_individuals is None:
keep_unary_in_individuals = False
node_map = self._ll_tables.simplify(
samples,
filter_sites=filter_sites,
filter_individuals=filter_individuals,
filter_populations=filter_populations,
reduce_to_site_topology=reduce_to_site_topology,
keep_unary=keep_unary,
keep_unary_in_individuals=keep_unary_in_individuals,
keep_input_roots=keep_input_roots,
)
if record_provenance:
# TODO replace with a version of https://github.com/tskit-dev/tskit/pull/243
# TODO also make sure we convert all the arguments so that they are
# definitely JSON encodable.
parameters = {"command": "simplify", "TODO": "add simplify parameters"}
self.provenances.add_row(
record=json.dumps(provenance.get_provenance_dict(parameters))
)
return node_map
def link_ancestors(self, samples, ancestors):
"""
Returns an :class:`EdgeTable` instance describing a subset of the genealogical
relationships between the nodes in ``samples`` and ``ancestors``.
Each row ``parent, child, left, right`` in the output table indicates that
``child`` has inherited the segment ``[left, right)`` from ``parent`` more
recently than from any other node in these lists.
In particular, suppose ``samples`` is a list of nodes such that ``time`` is 0
for each node, and ``ancestors`` is a list of nodes such that ``time`` is
greater than 0.0 for each node. Then each row of the output table will show
an interval ``[left, right)`` over which a node in ``samples`` has inherited
most recently from a node in ``ancestors``, or an interval over which one of
these ``ancestors`` has inherited most recently from another node in
``ancestors``.
The following table shows which ``parent->child`` pairs will be shown in the
output of ``link_ancestors``.
A node is a relevant descendant on a given interval if it also appears somewhere
in the ``parent`` column of the outputted table.
======================== ===============================================
Type of relationship Shown in output of ``link_ancestors``
------------------------ -----------------------------------------------
``ancestor->sample`` Always
``ancestor1->ancestor2`` Only if ``ancestor2`` has a relevant descendant
``sample1->sample2`` Always
``sample->ancestor`` Only if ``ancestor`` has a relevant descendant
======================== ===============================================
The difference between ``samples`` and ``ancestors`` is that information about
the ancestors of a node in ``ancestors`` will only be retained if it also has a
relevant descendant, while information about the ancestors of a node in
``samples`` will always be retained.
The node IDs in ``parent`` and ``child`` refer to the IDs in the node table
of the inputted tree sequence.
The supplied nodes must be non-empty lists of the node IDs in the tree sequence:
in particular, they do not have to be *samples* of the tree sequence. The lists
of ``samples`` and ``ancestors`` may overlap, although adding a node from
``samples`` to ``ancestors`` will not change the output. So, setting ``samples``
and ``ancestors`` to the same list of nodes will find all genealogical
relationships within this list.
If none of the nodes in ``ancestors`` or ``samples`` are ancestral to ``samples``
anywhere in the tree sequence, an empty table will be returned.
:param list[int] samples: A list of node IDs to retain as samples.
:param list[int] ancestors: A list of node IDs to use as ancestors.
:return: An :class:`EdgeTable` instance displaying relationships between
the `samples` and `ancestors`.
"""
samples = util.safe_np_int_cast(samples, np.int32)
ancestors = util.safe_np_int_cast(ancestors, np.int32)
ll_edge_table = self._ll_tables.link_ancestors(samples, ancestors)
return EdgeTable(ll_table=ll_edge_table)
def map_ancestors(self, *args, **kwargs):
# A deprecated alias for link_ancestors()
return self.link_ancestors(*args, **kwargs)
def sort(self, edge_start=0):
"""
Sorts the tables in place. This ensures that all tree sequence ordering
requirements listed in the
:ref:`sec_valid_tree_sequence_requirements` section are met, as long
as each site has at most one mutation (see below).
If the ``edge_start`` parameter is provided, this specifies the index
in the edge table where sorting should start. Only rows with index
greater than or equal to ``edge_start`` are sorted; rows before this index
are not affected. This parameter is provided to allow for efficient sorting
when the user knows that the edges up to a given index are already sorted.
The individual, node, population and provenance tables are not affected
by this method.
Edges are sorted as follows:
- time of parent, then
- parent node ID, then
- child node ID, then
- left endpoint.
Note that this sorting order exceeds the
:ref:`edge sorting requirements <sec_edge_requirements>` for a valid
tree sequence. For a valid tree sequence, we require that all edges for a
given parent ID are adjacent, but we do not require that they be listed in
sorted order.
Sites are sorted by position, and sites with the same position retain
their relative ordering.
Mutations are sorted by site ID, and within the same site are sorted by time.
Those with equal or unknown time retain their relative ordering. This does not
currently rearrange tables so that mutations occur after their mutation parents,
which is a requirement for valid tree sequences.
Migrations are sorted by ``time``, ``source``, ``dest``, ``left`` and
``node`` values. This defines a total sort order, such that any permutation
of a valid migration table will be sorted into the same output order.
Note that this sorting order exceeds the
:ref:`migration sorting requirements <sec_migration_requirements>` for a
valid tree sequence, which only requires that migrations are sorted by
time value.
:param int edge_start: The index in the edge table where sorting starts
(default=0; must be <= len(edges)).
"""
self._ll_tables.sort(edge_start)
# TODO add provenance
def canonicalise(self, remove_unreferenced=None):
"""
This puts the tables in *canonical* form - to do this, the individual
and population tables are sorted by the first node that refers to each
(see :meth:`TreeSequence.subset`) Then, the remaining tables are sorted
as in :meth:`.sort`, with the modification that mutations are sorted by
site, then time, then number of descendant mutations (ensuring that
parent mutations occur before children), then node, then original order
in the tables. This ensures that any two tables with the same
information should be identical after canonical sorting.
By default, the method removes sites, individuals, and populations that
are not referenced (by mutations and nodes, respectively). If you wish
to keep these, pass ``remove_unreferenced=False``, but note that
unreferenced individuals and populations are put at the end of the tables
in their original order.
.. seealso::
:meth:`.sort` for sorting edges, mutations, and sites, and
:meth:`.subset` for reordering nodes, individuals, and populations.
:param bool remove_unreferenced: Whether to remove unreferenced sites,
individuals, and populations (default=True).
"""
remove_unreferenced = (
True if remove_unreferenced is None else remove_unreferenced
)
self._ll_tables.canonicalise(remove_unreferenced=remove_unreferenced)
# TODO add provenance
def compute_mutation_parents(self):
"""
Modifies the tables in place, computing the ``parent`` column of the
mutation table. For this to work, the node and edge tables must be
valid, and the site and mutation tables must be sorted (see
:meth:`TableCollection.sort`). This will produce an error if mutations
are not sorted (i.e., if a mutation appears before its mutation parent)
*unless* the two mutations occur on the same branch, in which case
there is no way to detect the error.
The ``parent`` of a given mutation is the ID of the next mutation
encountered traversing the tree upwards from that mutation, or
``NULL`` if there is no such mutation.
.. note:: note: This method does not check that all mutations result
in a change of state, as required; see :ref:`sec_mutation_requirements`.
"""
self._ll_tables.compute_mutation_parents()
# TODO add provenance
def compute_mutation_times(self):
"""
Modifies the tables in place, computing valid values for the ``time`` column of
the mutation table. For this to work, the node and edge tables must be
valid, and the site and mutation tables must be sorted and indexed(see
:meth:`TableCollection.sort` and :meth:`TableCollection.build_index`).
For a single mutation on an edge at a site, the ``time`` assigned to a mutation
by this method is the mid-point between the times of the nodes above and below
the mutation. In the case where there is more than one mutation on an edge for
a site, the times are evenly spread along the edge. For mutations that are
above a root node, the time of the root node is assigned.
The mutation table will be sorted if the new times mean that the original order
is no longer valid.
"""
self._ll_tables.compute_mutation_times()
# TODO add provenance
def deduplicate_sites(self):
"""
Modifies the tables in place, removing entries in the site table with
duplicate ``position`` (and keeping only the *first* entry for each
site), and renumbering the ``site`` column of the mutation table
appropriately. This requires the site table to be sorted by position.
.. warning:: This method does not sort the tables afterwards, so
mutations may no longer be sorted by time.
"""
self._ll_tables.deduplicate_sites()
# TODO add provenance
def delete_sites(self, site_ids, record_provenance=True):
"""
Remove the specified sites entirely from the sites and mutations tables in this
collection. This is identical to :meth:`TreeSequence.delete_sites` but acts
*in place* to alter the data in this :class:`TableCollection`.
:param list[int] site_ids: A list of site IDs specifying the sites to remove.
:param bool record_provenance: If ``True``, add details of this operation
to the provenance table in this TableCollection. (Default: ``True``).
"""
keep_sites = np.ones(len(self.sites), dtype=bool)
site_ids = util.safe_np_int_cast(site_ids, np.int32)
if np.any(site_ids < 0) or np.any(site_ids >= len(self.sites)):
raise ValueError("Site ID out of bounds")
keep_sites[site_ids] = 0
new_as, new_as_offset = keep_with_offset(
keep_sites, self.sites.ancestral_state, self.sites.ancestral_state_offset
)
new_md, new_md_offset = keep_with_offset(
keep_sites, self.sites.metadata, self.sites.metadata_offset
)
self.sites.set_columns(
position=self.sites.position[keep_sites],
ancestral_state=new_as,
ancestral_state_offset=new_as_offset,
metadata=new_md,
metadata_offset=new_md_offset,
)
# We also need to adjust the mutations table, as it references into sites
keep_mutations = keep_sites[self.mutations.site]
new_ds, new_ds_offset = keep_with_offset(
keep_mutations,
self.mutations.derived_state,
self.mutations.derived_state_offset,
)
new_md, new_md_offset = keep_with_offset(
keep_mutations, self.mutations.metadata, self.mutations.metadata_offset
)
# Site numbers will have changed
site_map = np.cumsum(keep_sites, dtype=self.mutations.site.dtype) - 1
# Mutation numbers will change, so the parent references need altering
mutation_map = np.cumsum(keep_mutations, dtype=self.mutations.parent.dtype) - 1
# Map parent == -1 to -1, and check this has worked (assumes tskit.NULL == -1)
mutation_map = np.append(mutation_map, -1).astype(self.mutations.parent.dtype)
assert mutation_map[tskit.NULL] == tskit.NULL
self.mutations.set_columns(
site=site_map[self.mutations.site[keep_mutations]],
node=self.mutations.node[keep_mutations],
time=self.mutations.time[keep_mutations],
derived_state=new_ds,
derived_state_offset=new_ds_offset,
parent=mutation_map[self.mutations.parent[keep_mutations]],
metadata=new_md,
metadata_offset=new_md_offset,
)
if record_provenance:
# TODO replace with a version of https://github.com/tskit-dev/tskit/pull/243
parameters = {"command": "delete_sites", "TODO": "add parameters"}
self.provenances.add_row(
record=json.dumps(provenance.get_provenance_dict(parameters))
)
def delete_intervals(self, intervals, simplify=True, record_provenance=True):
"""
Delete all information from this set of tables which lies *within* the
specified list of genomic intervals. This is identical to
:meth:`TreeSequence.delete_intervals` but acts *in place* to alter
the data in this :class:`TableCollection`.
:param array_like intervals: A list (start, end) pairs describing the
genomic intervals to delete. Intervals must be non-overlapping and
in increasing order. The list of intervals must be interpretable as a
2D numpy array with shape (N, 2), where N is the number of intervals.
:param bool simplify: If True, run simplify on the tables so that nodes
no longer used are discarded. (Default: True).
:param bool record_provenance: If ``True``, add details of this operation
to the provenance table in this TableCollection. (Default: ``True``).
"""
self.keep_intervals(
util.negate_intervals(intervals, 0, self.sequence_length),
simplify=simplify,
record_provenance=False,
)
if record_provenance:
parameters = {"command": "delete_intervals", "TODO": "add parameters"}
self.provenances.add_row(
record=json.dumps(provenance.get_provenance_dict(parameters))
)
def keep_intervals(self, intervals, simplify=True, record_provenance=True):
"""
Delete all information from this set of tables which lies *outside* the
specified list of genomic intervals. This is identical to
:meth:`TreeSequence.keep_intervals` but acts *in place* to alter
the data in this :class:`TableCollection`.
:param array_like intervals: A list (start, end) pairs describing the
genomic intervals to keep. Intervals must be non-overlapping and
in increasing order. The list of intervals must be interpretable as a
2D numpy array with shape (N, 2), where N is the number of intervals.
:param bool simplify: If True, run simplify on the tables so that nodes
no longer used are discarded. Must be ``False`` if input tree sequence
includes migrations. (Default: True).
:param bool record_provenance: If ``True``, add details of this operation
to the provenance table in this TableCollection. (Default: ``True``).
"""
intervals = util.intervals_to_np_array(intervals, 0, self.sequence_length)
edges = self.edges.copy()
self.edges.clear()
migrations = self.migrations.copy()
self.migrations.clear()
keep_sites = np.repeat(False, self.sites.num_rows)
for s, e in intervals:
curr_keep_sites = np.logical_and(
self.sites.position >= s, self.sites.position < e
)
keep_sites = np.logical_or(keep_sites, curr_keep_sites)
keep_edges = np.logical_not(
np.logical_or(edges.right <= s, edges.left >= e)
)
metadata, metadata_offset = keep_with_offset(
keep_edges, edges.metadata, edges.metadata_offset
)
self.edges.append_columns(
left= | np.fmax(s, edges.left[keep_edges]) | numpy.fmax |
import pytest
import numpy as np
from numpy.testing import assert_equal
import axopy.features as features
@pytest.fixture
def array_2d():
return np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
@pytest.fixture
def array_1d():
return np.array([1, 2, 3, 4, 5])
def test_ensure_2d(array_1d, array_2d):
assert_equal(features.util.ensure_2d(array_2d), array_2d)
assert features.util.ensure_2d(array_1d).ndim == 2
@pytest.mark.parametrize('func', [
features.util.inverted_t_window,
features.util.trapezoidal_window,
])
def test_window_func_length(func):
w = func(10)
assert len(w) == 10
def test_rolling_window_1d(array_1d):
out = np.array([[1, 2], [2, 3], [3, 4], [4, 5]])
assert_equal(features.util.rolling_window(array_1d, 2), out)
def test_rolling_window_2d(array_2d):
out = np.array([[[1, 2], [2, 3], [3, 4]], [[5, 6], [6, 7], [7, 8]]])
assert_equal(features.util.rolling_window(array_2d, 2), out)
def test_inverted_t_window():
# default params (n = 8)
truth = np.array([0.5, 1, 1, 1, 1, 1, 0.5, 0.5])
w = features.util.inverted_t_window(8)
assert_equal(w, truth)
# different amplitude (n = 9)
truth = np.array([0.3, 0.3, 1, 1, 1, 1, 0.3, 0.3, 0.3])
w = features.util.inverted_t_window(9, a=0.3)
assert_equal(w, truth)
# different notch time (n = 100)
truth = np.hstack([9*[0.5], np.ones(100-19), 10*[0.5]])
w = features.util.inverted_t_window(100, p=0.1)
assert_equal(w, truth)
def test_trapezoidal_window():
# default params
truth = np.array([0.5, 1, 1, 1, 1, 1, 0.5, 0])
w = features.util.trapezoidal_window(8)
assert_equal(w, truth)
# non-default ramp time
truth = np.array([1/3., 2/3., 1, 1, 1, 1, 2/3., 1/3., 0])
w = features.util.trapezoidal_window(9, p=1/3.)
assert_equal(w, truth)
@pytest.mark.parametrize('func', [
features.mean_absolute_value,
features.mean_value,
features.waveform_length,
features.wilson_amplitude,
features.zero_crossings,
features.slope_sign_changes,
features.root_mean_square,
features.integrated_emg,
features.var,
features.logvar,
features.skewness,
features.kurtosis,
features.sample_entropy
])
def test_feature_io(func):
"""Make sure feature function gets 1D and 2D IO correct."""
n = 100
c = 3
x_n = np.random.randn(n)
x_cn = np.random.randn(c, n)
x_nc = np.random.randn(n, c)
assert not isinstance(func(x_n), np.ndarray) # scalar
assert func(x_n, keepdims=True).shape == (1,)
assert func(x_cn).shape == (c,)
assert func(x_cn, keepdims=True).shape == (c, 1)
assert func(x_nc, axis=0).shape == (c,)
assert func(x_nc, axis=0, keepdims=True).shape == (1, c)
def test_mav():
x = np.array([[0, 2], [0, -4]])
truth = np.array([1, 2])
assert_equal(features.mean_absolute_value(x), truth)
def test_mav1():
x = np.vstack([np.ones(8), np.zeros(8)])
# weights should be [0.5, 1, 1, 1, 1, 1, 0.5, 0.5]
truth = np.array([0.8125, 0])
assert_equal(features.mean_absolute_value(x, weights='mav1'), truth)
def test_mav2():
x = np.vstack([np.ones(8), np.zeros(8)])
# weights should be [0.5, 1, 1, 1, 1, 1, 0.5, 0]
truth = np.array([0.75, 0])
assert_equal(features.mean_absolute_value(x, weights='mav2'), truth)
def test_mav_custom_weights():
x = np.ones((4, 10))
w = np.zeros(x.shape[1])
w[0:2] = 0.4
truth = (2*0.4/x.shape[1])*np.ones(x.shape[0])
assert_equal(features.mean_absolute_value(x, weights=w), truth)
def test_mav_bad_weights():
# weights not one of the built-in types of MAV
with pytest.raises(ValueError):
features.mean_absolute_value(np.zeros(2), weights='asdf')
def test_mav_bad_custom_weights():
# custom weights not the same length as the input data
x = np.zeros((4, 10))
w = np.zeros(5)
with pytest.raises(ValueError):
features.mean_absolute_value(x, weights=w)
def test_mv():
x = np.array([[0, 2], [0, -4]])
truth = np.array([1, -2])
assert_equal(features.mean_value(x), truth)
def test_wl():
x = np.array([[0, 1, 1, -1], [-1, 2.4, 0, 1]])
truth = np.array([3, 6.8])
assert_equal(features.waveform_length(x), truth)
def test_wamp():
x = np.array([[1., 1.3, 1.4, -0.4], [0.2, 0.8, -0.2, 0.2]])
thresh = 0.5
truth = np.array([1, 2])
assert_equal(features.wilson_amplitude(x, thresh), truth)
def test_zc():
x = np.array([[1, -1, -0.5, 0.2], [1, -1, 1, -1]])
# zero threshold
truth_nothresh = np.array([2, 3])
assert_equal(features.zero_crossings(x), truth_nothresh)
# threshold of 1
truth_thresh = np.array([1, 3])
assert_equal(features.zero_crossings(x, threshold=1), truth_thresh)
def test_ssc():
x = np.array([[1, 2, 1.1, 2, 1.2], [1, -1, -0.5, -1.2, 2]])
# zero threshold
truth_nothresh = | np.array([3, 3]) | numpy.array |
#!/usr/bin/env python
# PROGRAM: plot_sst.py
# ----------------------------------------------------------------------------------
# Version 0.18
# 19 August, 2019
# michael.taylor AT reading DOT ac DOT uk
# PYTHON DEBUGGER CONTROL:
#------------------------
# import os; os._exit(0)
# import ipdb
# ipdb.set_trace()
import os.path
import optparse
from optparse import OptionParser
import sys
import numpy as np
import xarray
import pandas as pd
from pandas import Series, DataFrame, Panel
import seaborn as sns; sns.set(style="darkgrid")
import datetime
import matplotlib
import matplotlib.pyplot as plt; plt.close("all")
#import typhon
#from typhon.plots import plot_bitfield
#cmap = 'tab20c' # https://matplotlib.org/users/colormaps
def calc_median(counts,bins):
"""
# -------------------------------
# CALCULATE MEDIUM FROM HISTOGRAM
# -------------------------------
# M_estimated ~ L_m + [ ( N/2 - F_{m-1} ) / f_m] * c
#
# where,
#
# L_m =lower limit of the median bar
# N = is the total number of observations
# F_{m-1} = cumulative frequency (total number of observations) in all bars below the median bar
# f_m = frequency of the median bar
# c = median bar width
"""
M = 0
counts_cumsum = counts.cumsum()
counts_half = counts_cumsum[-1]/2.0
for i in np.arange(0,bins.shape[0]-1):
counts_l = counts_cumsum[i]
counts_r = counts_cumsum[i+1]
if (counts_half >= counts_l) & (counts_half < counts_r):
c = bins[1]-bins[0]
L_m = bins[i+1]
F_m_minus_1 = counts_cumsum[i]
f_m = counts[i+1]
M = L_m + ( (counts_half - F_m_minus_1) / f_m ) * c
return M
def plot_n_sst(times,n_sst_q3,n_sst_q4,n_sst_q5):
"""
# ---------------------------------------
# PLOT CUMULATIVE SST OBSERVATION DENSITY
# ---------------------------------------
"""
ocean_area = 361900000.0
t = np.array(times, dtype=np.datetime64)
years = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D') / 365.0
Q3 = pd.Series(n_sst_q3, index=times).fillna(0) / ocean_area / years
Q4 = pd.Series(n_sst_q4, index=times).fillna(0) / ocean_area / years
Q5 = pd.Series(n_sst_q5, index=times).fillna(0) / ocean_area / years
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.plot(times,df['QL=4 & 5'].cumsum(), drawstyle='steps')
plt.plot(times,df['QL=3'].cumsum(), drawstyle='steps')
plt.tick_params(labelsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
title_str = ' ' + 'QL=3:max=' + "{0:.5f}".format(df['QL=3'].cumsum().max()) + ' ' + 'QL=4 & 5:max=' + "{0:.5f}".format(df['QL=4 & 5'].cumsum().max())
print(title_str)
plt.legend(loc='best')
plt.savefig('n_sst.pdf')
# plt.savefig('n_sst.png', dpi=600)
# plt.savefig('n_sst.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_n_sst_lat(lat_vec,n_sst_q3_lat,n_sst_q4_lat,n_sst_q5_lat):
"""
# ------------------------------------------
# PLOT SST OBSERVATION DENSITY WITH LATITUDE
# ------------------------------------------
"""
interpolation = np.arange(-90,90,1)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q3_lat), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q4_lat), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,lat_vec,n_sst_q5_lat), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = df['QL=4'] + df['QL=5']
df['QL=3 & 4 & 5'] = df['QL=3'] + df['QL=4'] + df['QL=5']
df = df.mask(np.isinf(df))
fig = plt.figure()
plt.fill_between(interpolation, df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(interpolation, df['QL=3'], step="post", alpha=0.4)
plt.plot(interpolation, df['QL=4 & 5'], drawstyle='steps-post', label='QL=4 & 5')
plt.plot(interpolation, df['QL=3'], drawstyle='steps-post', label='QL=3')
ax = plt.gca()
ax.set_xlim([-90,90])
ticks = ax.get_xticks()
ax.set_xticks(np.linspace(-90, 90, 7))
plt.tick_params(labelsize=12)
plt.xlabel("Latitude / $\mathrm{\degree N}$", fontsize=12)
plt.ylabel("Observation density / $\mathrm{km^{-2} \ yr^{-1}}$", fontsize=12)
plt.legend(loc='best')
plt.savefig('n_sst_lat.pdf')
# plt.savefig('n_sst_lat.png', dpi=600)
# plt.savefig('n_sst_lat.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_sst(sst_midpoints,sst_q3_hist,sst_q4_hist,sst_q5_hist):
"""
# ------------------------------
# PLOT HISTOGRAM OF SST + MEDIAN
# ------------------------------
"""
# interpolation = np.arange(260.05,319.95,0.1) # original bin midpoints
i = np.arange(260,320,0.1) # bin edges
n = len(i)
m = 1.0
q3 = m * pd.Series(np.interp(i,sst_midpoints,sst_q3_hist), index=i)
q4 = m * pd.Series(np.interp(i,sst_midpoints,sst_q4_hist), index=i)
q5 = m * pd.Series(np.interp(i,sst_midpoints,sst_q5_hist), index=i)
dq = pd.DataFrame({'QL=3':q3, 'QL=4':q4, 'QL=5':q5})
dq['QL=4 & 5'] = 0.5 * (dq['QL=4'] + dq['QL=5'])
# dq = dq.mask(np.isinf(df))
M3 = calc_median(dq['QL=3'].values,i[0:n])
M4_5 = calc_median(dq['QL=4 & 5'].values,i[0:n])
interpolation = np.arange(260,320,1) # 10x original resolution
n = len(interpolation)
multiplier = 10.0
Q3 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,sst_midpoints,sst_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
fig = plt.figure()
plt.fill_between(interpolation,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(interpolation,df['QL=3'], step="post", alpha=0.4)
plt.plot(interpolation,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(interpolation,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([260,310])
plt.tick_params(labelsize=12)
plt.xlabel("SST / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ K^{-1}}$", fontsize=12)
title_str = 'SST: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_sst.pdf')
# plt.savefig('hist_sst.png', dpi=600)
# plt.savefig('hist_sst.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_sensitivity(sensitivity_midpoints,sensitivity_q3_hist,sensitivity_q4_hist,sensitivity_q5_hist):
"""
# ------------------------------------------------
# PLOT HISTOGRAM OF RETRIEVAL SENSITIVITY + MEDIAN
# ------------------------------------------------
"""
# interpolation = np.arange(0.005,1.995,0.01) # original bin midpoints
interpolation = np.arange(0,2,0.01)
n = len(interpolation)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,sensitivity_midpoints,sensitivity_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
M3 = calc_median(df['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df['QL=4 & 5'].values,interpolation[0:n])
fig = plt.figure()
plt.fill_between(100.0*interpolation,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(100.0*interpolation,df['QL=3'], step="post", alpha=0.4)
plt.plot(100.0*interpolation,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(100.0*interpolation,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([85,110])
plt.tick_params(labelsize=12)
plt.xlabel("Retrieval sensitivity / $\mathrm{\%}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ {\%}^{-1} }$", fontsize=12)
title_str = 'Sensitivity: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_sensitivity.pdf')
# plt.savefig('hist_sensitivity.png', dpi=600)
# plt.savefig('hist_sensitivity.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_total_uncertainty(total_uncertainty_midpoints,total_uncertainty_q3_hist,total_uncertainty_q4_hist,total_uncertainty_q5_hist):
"""
# --------------------------------------------
# PLOT HISTOGRAM OF TOTAL UNCERTAINTY + MEDIAN
# --------------------------------------------
"""
# interpolation = np.arange(0.005,3.995+0.01,0.01) # original bin midpoints
interpolation = np.arange(0,4,0.01)
n = len(interpolation)
multiplier = 1.0
Q3 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist), index=interpolation)
Q4 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist), index=interpolation)
Q5 = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist), index=interpolation)
df = pd.DataFrame({'QL=3':Q3, 'QL=4':Q4, 'QL=5':Q5})
df['QL=4 & 5'] = 0.5 * (df['QL=4'] + df['QL=5'])
# df = df.mask(np.isinf(df))
M3 = calc_median(df['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df['QL=4 & 5'].values,interpolation[0:n])
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
title_str = 'Uncertainty: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty.pdf')
# plt.savefig('hist_total_uncertainty.png', dpi=600)
# plt.savefig('hist_total_uncertainty.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_histogram_total_uncertainty2(total_uncertainty_midpoints,total_uncertainty_q3_hist_avhrr,total_uncertainty_q4_hist_avhrr,total_uncertainty_q5_hist_avhrr,total_uncertainty_q3_hist_atsr,total_uncertainty_q4_hist_atsr,total_uncertainty_q5_hist_atsr):
"""
# --------------------------------------------------------------
# PLOT HISTOGRAM OF TOTAL UNCERTAINTY + MEDIAN FOR AVHRR VS ATSR
# --------------------------------------------------------------
"""
# interpolation = np.arange(0.005,3.995,0.01) # original bin midpoints
interpolation = np.arange(0,4,0.01)
n = len(interpolation)
multiplier = 1.0
Q3_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist_avhrr), index=interpolation)
Q4_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist_avhrr), index=interpolation)
Q5_avhrr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist_avhrr), index=interpolation)
df_avhrr = pd.DataFrame({'QL=3':Q3_avhrr, 'QL=4':Q4_avhrr, 'QL=5':Q5_avhrr})
# df_avhrr['QL=4 & 5'] = 0.5 * (df_avhrr['QL=4'] + df_avhrr['QL=5'])
df_avhrr['QL=4 & 5'] = df_avhrr['QL=5']
# df_avhrr = df_avhrr.mask(np.isinf(df_avhrr))
Q3_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q3_hist_atsr), index=interpolation)
Q4_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q4_hist_atsr), index=interpolation)
Q5_atsr = multiplier * pd.Series(np.interp(interpolation,total_uncertainty_midpoints,total_uncertainty_q5_hist_atsr), index=interpolation)
df_atsr = pd.DataFrame({'QL=3':Q3_atsr, 'QL=4':Q4_atsr, 'QL=5':Q5_atsr})
df_atsr['QL=4 & 5'] = 0.5 * (df_atsr['QL=4'] + df_atsr['QL=5'])
# df_atsr = df_atsr.mask(np.isinf(df_atsr))
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df_avhrr['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df_avhrr['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df_avhrr['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df_avhrr['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
M3 = calc_median(df_avhrr['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df_avhrr['QL=4 & 5'].values,interpolation[0:n])
title_str = 'AVHRR: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty_avhrr.pdf')
# plt.savefig('hist_total_uncertainty_avhrr.png', dpi=600)
# plt.savefig('hist_total_uncertainty_avhrr.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
fig = plt.figure()
plt.fill_between(total_uncertainty_midpoints,df_atsr['QL=4 & 5'], step="post", alpha=0.4)
plt.fill_between(total_uncertainty_midpoints,df_atsr['QL=3'], step="post", alpha=0.4)
plt.plot(total_uncertainty_midpoints,df_atsr['QL=4 & 5'], drawstyle='steps-post')
plt.plot(total_uncertainty_midpoints,df_atsr['QL=3'], drawstyle='steps-post')
ax = plt.gca()
ax.set_xlim([0.0,1.25])
plt.tick_params(labelsize=12)
plt.xlabel("Total uncertainty / $\mathrm{K}$", fontsize=12)
plt.ylabel("Frequency / $\mathrm{\% \ cK^{-1}}$", fontsize=12)
M3 = calc_median(df_atsr['QL=3'].values,interpolation[0:n])
M4_5 = calc_median(df_atsr['QL=4 & 5'].values,interpolation[0:n])
title_str = 'ATSR: QL=3:median=' + "{0:.5f}".format(M3) + ' ' + 'QL=4 & 5:median=' + "{0:.5f}".format(M4_5)
print(title_str)
plt.legend(loc='best')
plt.savefig('hist_total_uncertainty_atsr.pdf')
# plt.savefig('hist_total_uncertainty_atsr.png', dpi=600)
# plt.savefig('hist_total_uncertainty_atsr.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def calc_n_sst_timeseries(satellites):
"""
# ---------------------------------------------------------------
# CALC MEAN OF TIMESERIES OF DAILY OBSERVATION DENSITY PER SENSOR
# ---------------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
df_all = pd.DataFrame()
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_all = df_all.append(df,ignore_index=True)
satellites_avhrr = ['AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
df_avhrr = pd.DataFrame()
for i in range(0,len(satellites_avhrr)):
filename = satellites_avhrr[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_avhrr = df_avhrr.append(df,ignore_index=True)
satellites_atsr = ['AATSR','ATSR1','ATSR2']
df_atsr = pd.DataFrame()
for i in range(0,len(satellites_atsr)):
filename = satellites_atsr[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3, 'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
df_atsr = df_atsr.append(df,ignore_index=True)
return df_all, df_avhrr, df_atsr
def plot_n_sst_timeseries(satellites):
"""
# -------------------------------------------------------
# PLOT TIMESERIES OF DAILY OBSERVATION DENSITY PER SENSOR
# -------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
lab = []
ncolors = len(satellites)
ax1.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
# df['Sum'] = df['Q4'].fillna(0) + df['Q5'].fillna(0)
# df['Sum_mean'] = df['Sum'].resample("1d").sum().fillna(0).rolling(window=31, min_periods=1).median()
# df['Sum_mean'].plot(ax=ax1)
lab.append(labels[i])
ax1.plot(times, df['Sum'], '.', markersize=0.2)
ax1.set_ylim([0,18])
print(labels[i] + "," + str(df['Sum'].mean()) + "," + str(df['Sum'].shape[0]))
plt.tick_params(labelsize=12)
title_str = 'QL=4 & 5'
ax1.set_title(title_str, fontsize=10)
lab = []
ncolors = len(satellites)
ax2.set_prop_cycle('color',[plt.cm.gnuplot2(j) for j in np.linspace(0, 1, ncolors)])
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3})
# df['Q3_mean'] = df['Q3'].resample("1d").sum().rolling(window=31, min_periods=1).median()
# df['Q3_mean'].plot(ax=ax2)
lab.append(labels[i])
ax2.plot(times, df['Q3'], '.', markersize=0.2)
ax2.set_ylim([0,18])
print(labels[i] + "," + str(df['Q3'].mean()) + "," + str(df['Q3'].shape[0]))
plt.tick_params(labelsize=12)
title_str = 'QL=3'
ax2.set_title(title_str, fontsize=10)
fig.legend(lab, fontsize=8, loc=7, markerscale=20, scatterpoints=5)
fig.subplots_adjust(right=0.8)
fig.text(0.01, 0.5, 'Observation density / $\mathrm{km^{-2} \ yr^{-1}}$', va='center', rotation='vertical')
plt.savefig('n_sst_timeseries.pdf')
# plt.savefig('n_sst_timeseries.png', dpi=600)
# plt.savefig('n_sst_timeseries.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
def plot_n_sst_boxplots(satellites):
"""
# --------------------------------------------------------------
# PLOT YEARLY BOXPLOTS FROM DAILY OBSERVATION DENSITY PER SENSOR
# --------------------------------------------------------------
"""
ocean_area = 361900000.0
labels = ['ATSR1','ATSR2','AATSR','NOAA07','NOAA09','NOAA11','NOAA12','NOAA14','NOAA15','NOAA16','NOAA17','NOAA18','NOAA19','METOPA']
satellites = ['ATSR1','ATSR2','AATSR','AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
n_sst_q4 = 365.0 * Q4_duplicates.groupby(Q4_duplicates.index).sum() / ocean_area
n_sst_q5 = 365.0 * Q5_duplicates.groupby(Q5_duplicates.index).sum() / ocean_area
df = DataFrame({'Q4' : n_sst_q4, 'Q5' : n_sst_q5})
df['Sum'] = df['Q4'] + df['Q5']
fig, ax = plt.subplots(figsize=(12,5))
ts = pd.Series(df['Sum'].values, index=times)
sns.boxplot(ts.index.month, ts, ax=ax)
title_str = 'QL=4 & 5:' + labels[i]
ax.set_ylabel('Observation density / $\mathrm{km^{-2} \ yr^{-1}}$')
ax.set_title(title_str, fontsize=10)
file_str = 'n_sst_boxplot_' + labels[i] + '_QL4_5' + '.pdf'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL4_5' + '.png'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL4_5' + '.eps'
plt.savefig(file_str)
# plt.savefig(file_str, dpi=600)
# plt.savefig(file_str, format='eps', rasterized=True, dpi=1200)
plt.close("all")
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
ds = xarray.open_dataset(filename)
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
n_sst_q3 = 365.0 * Q3_duplicates.groupby(Q3_duplicates.index).sum() / ocean_area
df = DataFrame({'Q3' : n_sst_q3})
fig, ax = plt.subplots(figsize=(12,5))
ts = pd.Series(df['Q3'].values, index=times)
sns.boxplot(ts.index.month, ts, ax=ax)
title_str = 'QL=3:' + labels[i]
ax.set_ylabel('Observation density / $\mathrm{km^{-2} \ yr^{-1}}$')
ax.set_title(title_str, fontsize=10)
file_str = 'n_sst_boxplot_' + labels[i] + '_QL3' '.pdf'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL3' '.png'
# file_str = 'n_sst_boxplot_' + labels[i] + '_QL3' '.eps'
plt.savefig(file_str)
# plt.savefig(file_str, dpi=600)
# plt.savefig(file_str, format='eps', rasterized=True, dpi=1200)
plt.close("all")
def calc_lat_fraction():
"""
# ---------------------------------------------------------------
# EXTRACT OCEAN FRACTION WITH LATITUDE FROM L4 OSTIA LANDSEA MASK
# ---------------------------------------------------------------
"""
# mask:source = "NAVOCEANO_landmask_v1.0 EUMETSAT_OSI-SAF_icemask ARCLake_lakemask"
# mask:comment = "water land lake ice"
# mask:flag_masks = 1b, 2b, 4b, 8b, 16b
# mask:summary = "OSTIA L4 product from the ESA SST CCI project, produced using OSTIA reanalysis sytem v3.0"
ds = xarray.open_dataset('landsea_mask.nc')
x = ds.lon
y = ds.lat
z = ds.mask
water = z==1
land = z==2
water_ice = z==9
# water only = 52.42%
# land only = 33.67%
# water + ice = 13.91%
f = 1 - (np.sum(land[0,:,:],axis=1) / len(x)*1.)
lat_vec = y
lat_fraction = f
# exec(open('plot_landsea_mask.py').read())
return lat_vec, lat_fraction
def load_data(lat_vec, lat_fraction):
#
# Rescale ocean_area to total area of surface in each latitude zone
#
ocean_area = 361900000.0 # ETOPO1: km2
R = 6371.0088 # km
# Formula for the area of the Earth between a line of latitude and the north pole (the area of a spherical cap): A = 2*pi*R*h where R is the radius of the earth and h is the perpendicular distance from the plane containing the line of latitude to the pole. We can calculate h using trigonometry: h = R*(1-sin(lat)). The area north of a line of latitude is therefore: A = 2*pi*R^2(1-sin(lat)).
# The area between two lines of latitude is the difference between the area north of one latitude and the area north of the other latitude: A = |2*pi*R^2(1-sin(lat2)) - 2*pi*R^2(1-sin(lat1)) = 2*pi*R^2 |sin(lat1) - sin(lat2)
# The area of a lat-long rectangle is proportional to the difference in the longitudes. The area I just calculated is the area between longitude lines differing by 360 degrees. Therefore the area we seek is: A = 2*pi*R^2 |sin(lat1)-sin(lat2)| |lon1-lon2|/360 = (pi/180)R^2 |sin(lat1)-sin(lat2)| |lon1-lon2|
dlat = 0.05
A = []
N = len(lat_vec)
for i in range(N):
dA = 2. * np.pi * R**2.0 * np.absolute( np.sin(np.pi/180 * (lat_vec[\
i]+dlat/2)) - np.sin(np.pi/180 * (lat_vec[i]-dlat/2)))
A.append(dA)
surface_vec = np.array(A)
ocean_vec = surface_vec * np.array(lat_fraction)
FPE = 100. * (1.0 - np.sum(ocean_vec) / ocean_area)
print('FPE(ETOPO1,ocean_area)=', FPE)
fig, ax = plt.subplots()
plt.plot(lat_vec, surface_vec, label='surface area')
plt.plot(lat_vec, ocean_vec, label='ocean')
plt.legend()
plt.xlabel('Latitude / degrees')
plt.ylabel(r'Area / $km^{2}$')
# title_str = "ETOPO1 ocean_area=" + "{0:.3e}".format(ocean_area) + " calculated=" + "{0:.3e}".format(np.sum(ocean_vec))
# plt.title(title_str)
file_str = "ocean_area.png"
fig.tight_layout()
plt.savefig('ocean_area.pdf')
# plt.savefig('ocean_area.png', dpi=600)
# plt.savefig('ocean_area.eps', format='eps', rasterized=True, dpi=1200)
plt.close('all')
satellites = ['AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G','AATSR','ATSR1','ATSR2']
df = []
for i in range(0,len(satellites)):
filename = satellites[i] + '_summary.nc'
dsi = xarray.open_dataset(filename)
df.append(dsi)
dsi = []
ds = xarray.concat(df, dim='time')
df = []
dates = ds['time']
idx = np.argsort(dates, axis=0)
t = np.array(dates)[idx]
days = (t[-1] - t[0]).astype('timedelta64[D]') / np.timedelta64(1, 'D')
years = days/365.0
times_duplicates = pd.Series(t)
times = times_duplicates.drop_duplicates()
dates = []
days = []
times_duplicates = []
Q3_duplicates = pd.Series(ds['n_sst_q3'].values[idx], index=t)
Q4_duplicates = pd.Series(ds['n_sst_q4'].values[idx], index=t)
Q5_duplicates = pd.Series(ds['n_sst_q5'].values[idx], index=t)
idx = []
n_sst_q3 = Q3_duplicates.groupby(Q3_duplicates.index).sum()
n_sst_q4 = Q4_duplicates.groupby(Q4_duplicates.index).sum()
n_sst_q5 = Q5_duplicates.groupby(Q5_duplicates.index).sum()
Q3_duplicates = []
Q4_duplicates = []
Q5_duplicates = []
#
# CALCULATE CLEAR SKY FRACTION
#
# water only = 52.42% / land only = 33.67% / water + ice = 13.91% / non-land = 66.33%
n_ocean = (0.5242 + 0.1391) * 3600 * 7200 * len(times)
n_q3 = np.sum(n_sst_q3)
n_q4 = np.sum(n_sst_q4)
n_q5 = np.sum(n_sst_q5)
clearsky_q3 = n_q3 / n_ocean
clearsky_q4 = n_q4 / n_ocean
clearsky_q5 = n_q5 / n_ocean
#
# SLICE BY LATITUDE
#
# NB: change: * years rather than / years
n_sst_q3_lat = np.sum(ds['n_sst_q3_lat'],axis=0)[0:3600,] / np.array((lat_fraction * surface_vec) * years)
n_sst_q4_lat = np.sum(ds['n_sst_q4_lat'],axis=0)[0:3600,] / np.array((lat_fraction * surface_vec) * years)
n_sst_q5_lat = np.sum(ds['n_sst_q5_lat'],axis=0)[0:3600,] / np.array((lat_fraction * surface_vec) * years)
gd_q3 = np.isfinite(np.array(n_sst_q3_lat))
gd_q4 = np.isfinite(np.array(n_sst_q4_lat))
gd_q5 = np.isfinite(np.array(n_sst_q5_lat))
n_sst_q3_lat_mean = np.array(n_sst_q3_lat)[gd_q3].mean()
n_sst_q4_lat_mean = np.array(n_sst_q4_lat)[gd_q4].mean()
n_sst_q5_lat_mean = np.array(n_sst_q5_lat)[gd_q5].mean()
print('n_sst_q3_lat_mean=', n_sst_q3_lat_mean)
print('n_sst_q4_lat_mean=', n_sst_q4_lat_mean)
print('n_sst_q5_lat_mean=', n_sst_q5_lat_mean)
#
# CONCATENATE HISTOGRAMS
#
sst_midpoints = ds['sst_midpoints']
sst_q3_hist = 100.0 * np.sum(ds['sst_q3_hist'],axis=0) / np.sum(np.sum(ds['sst_q3_hist'],axis=0))
sst_q4_hist = 100.0 * np.sum(ds['sst_q4_hist'],axis=0) / np.sum(np.sum(ds['sst_q4_hist'],axis=0))
sst_q5_hist = 100.0 * np.sum(ds['sst_q5_hist'],axis=0) / np.sum(np.sum(ds['sst_q5_hist'],axis=0))
sensitivity_midpoints = ds['sensitivity_midpoints']
sensitivity_q3_hist = 100.0 * np.sum(ds['sensitivity_q3_hist'],axis=0) / np.sum(np.sum(ds['sensitivity_q3_hist'],axis=0))
sensitivity_q4_hist = 100.0 * np.sum(ds['sensitivity_q4_hist'],axis=0) / np.sum(np.sum(ds['sensitivity_q4_hist'],axis=0))
sensitivity_q5_hist = 100.0 * np.sum(ds['sensitivity_q5_hist'],axis=0) / np.sum(np.sum(ds['sensitivity_q5_hist'],axis=0))
total_uncertainty_midpoints = ds['total_uncertainty_midpoints']
total_uncertainty_q3_hist = 100.0 * np.sum(ds['total_uncertainty_q3_hist'],axis=0) / np.sum(np.sum(ds['total_uncertainty_q3_hist'],axis=0))
total_uncertainty_q4_hist = 100.0 * np.sum(ds['total_uncertainty_q4_hist'],axis=0) / np.sum(np.sum(ds['total_uncertainty_q4_hist'],axis=0))
total_uncertainty_q5_hist = 100.0 * np.sum(ds['total_uncertainty_q5_hist'],axis=0) / np.sum(np.sum(ds['total_uncertainty_q5_hist'],axis=0))
#
# TOTAL UNCERTAINTY FOR AVHRR AND ATSR SEPARATELY
#
satellites_avhrr = ['AVHRR07_G','AVHRR09_G','AVHRR11_G','AVHRR12_G','AVHRR14_G','AVHRR15_G','AVHRR16_G','AVHRR17_G','AVHRR18_G','AVHRR19_G','AVHRRMTA_G']
df = []
for i in range(0,len(satellites_avhrr)):
filename = satellites_avhrr[i] + '_summary.nc'
dsi = xarray.open_dataset(filename)
df.append(dsi)
dsi = []
ds_avhrr = xarray.concat(df, dim='time')
total_uncertainty_q3_hist_avhrr = 100.0 * np.sum(ds_avhrr['total_uncertainty_q3_hist'],axis=0) / np.sum(np.sum(ds_avhrr['total_uncertainty_q3_hist'],axis=0))
total_uncertainty_q4_hist_avhrr = 100.0 * np.sum(ds_avhrr['total_uncertainty_q4_hist'],axis=0) / np.sum(np.sum(ds_avhrr['total_uncertainty_q4_hist'],axis=0))
total_uncertainty_q5_hist_avhrr = 100.0 * np.sum(ds_avhrr['total_uncertainty_q5_hist'],axis=0) / np.sum(np.sum(ds_avhrr['total_uncertainty_q5_hist'],axis=0))
satellites_atsr = ['AATSR','ATSR1','ATSR2']
df = []
for i in range(0,len(satellites_atsr)):
filename = satellites_atsr[i] + '_summary.nc'
dsi = xarray.open_dataset(filename)
df.append(dsi)
dsi = []
ds_atsr = xarray.concat(df, dim='time')
total_uncertainty_q3_hist_atsr = 100.0 * np.sum(ds_atsr['total_uncertainty_q3_hist'],axis=0) / np.sum(np.sum(ds_atsr['total_uncertainty_q3_hist'],axis=0))
total_uncertainty_q4_hist_atsr = 100.0 * | np.sum(ds_atsr['total_uncertainty_q4_hist'],axis=0) | numpy.sum |
"""
PREDSTORM real time solar wind forecasting from L1 solar wind data
predicting the L1 solar wind and Dst index with analogue ensembles
for similar algorithms see Riley et al. 2017, Owens et al. 2017
Author: <NAME>, IWF Graz, Austria
twitter @chrisoutofspace, https://github.com/IWF-helio
started April 2018, last update August 2019
python 3.7 with sunpy
method
semi-supervised learning: add known intervals of ICMEs, MFRs and CIRs in the training data
helcats lists for ICMEs at Wind since 2007
HSS e.g. https://link.springer.com/article/10.1007%2Fs11207-013-0355-z
https://en.wikipedia.org/wiki/Pattern_recognition
Things to do:
use recarrays!
DSCOVR data:
Nans for missing data should be handled better and interpolated over, OBrien stops with Nans
training data:
use stereo one hour data as training data set, corrected for 1 AU
use VEX and MESSENGER as tests for HelioRing like forecasts, use STEREO at L5 for training data of the last few days
forecast plot:
add approximate levels of Dst for each location to see aurora, taken from ovation prime/worldview and Dst
add Temerin and Li method and kick out Burton/OBrien; make error bars for Dst
take mean of ensemble forecast for final blue line forecast or only best match?
MIT LICENSE
Copyright 2018, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify,
merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
##########################################################################################
####################################### CODE START #######################################
##########################################################################################
################################## INPUT PARAMETERS ######################################
import os
import sys
import getopt
# READ INPUT OPTIONS FROM COMMAND LINE
argv = sys.argv[1:]
opts, args = getopt.getopt(argv,"h",["server", "help"])
server = False
if "--server" in [o for o, v in opts]:
server = True
print("In server mode!")
import matplotlib
if server:
matplotlib.use('Agg') # important for server version, otherwise error when making figures
else:
matplotlib.use('Qt5Agg') # figures are shown on mac
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
from matplotlib.dates import num2date, date2num, DateFormatter
import numpy as np
import time
import pickle
import copy
import pdb
import urllib
import json
import seaborn as sns
import scipy
from scipy import stats
import sunpy.time
import predstorm as ps
from predstorm_l1_input import *
#========================================================================================
#--------------------------------- FUNCTIONS --------------------------------------------
#========================================================================================
def get_dscovr_data_real_old():
"""
Downloads and returns DSCOVR data
data from http://services.swpc.noaa.gov/products/solar-wind/
if needed replace with ACE
http://legacy-www.swpc.noaa.gov/ftpdir/lists/ace/
get 3 or 7 day data
url_plasma='http://services.swpc.noaa.gov/products/solar-wind/plasma-3-day.json'
url_mag='http://services.swpc.noaa.gov/products/solar-wind/mag-3-day.json'
Parameters
==========
None
Returns
=======
(data_minutes, data_hourly)
data_minutes : np.rec.array
Array of interpolated minute data with format:
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')]
data_hourly : np.rec.array
Array of interpolated hourly data with format:
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')]
"""
url_plasma='http://services.swpc.noaa.gov/products/solar-wind/plasma-7-day.json'
url_mag='http://services.swpc.noaa.gov/products/solar-wind/mag-7-day.json'
#download, see URLLIB https://docs.python.org/3/howto/urllib2.html
with urllib.request.urlopen(url_plasma) as url:
pr = json.loads (url.read().decode())
with urllib.request.urlopen(url_mag) as url:
mr = json.loads(url.read().decode())
logger.info('get_dscovr_data_real: DSCOVR plasma data available')
logger.info(str(pr[0]))
logger.info('get_dscovr_data_real: DSCOVR MAG data available')
logger.info(str(mr[0]))
#kill first row which stems from the description part
pr=pr[1:]
mr=mr[1:]
#define variables
#plasma
rptime_str=['']*len(pr)
rptime_num=np.zeros(len(pr))
rpv=np.zeros(len(pr))
rpn=np.zeros(len(pr))
rpt=np.zeros(len(pr))
#mag
rbtime_str=['']*len(mr)
rbtime_num=np.zeros(len(mr))
rbtot=np.zeros(len(mr))
rbzgsm=np.zeros(len(mr))
rbygsm=np.zeros(len(mr))
rbxgsm=np.zeros(len(mr))
#convert variables to numpy arrays
#mag
for k in np.arange(0,len(mr),1):
#handle missing data, they show up as None from the JSON data file
if mr[k][6] is None: mr[k][6]=np.nan
if mr[k][3] is None: mr[k][3]=np.nan
if mr[k][2] is None: mr[k][2]=np.nan
if mr[k][1] is None: mr[k][1]=np.nan
rbtot[k]=float(mr[k][6])
rbzgsm[k]=float(mr[k][3])
rbygsm[k]=float(mr[k][2])
rbxgsm[k]=float(mr[k][1])
#convert time from string to datenumber
rbtime_str[k]=mr[k][0][0:16]
rbtime_num[k]=date2num(datetime.strptime(rbtime_str[k], "%Y-%m-%d %H:%M"))
#plasma
for k in np.arange(0,len(pr),1):
if pr[k][2] is None: pr[k][2]=np.nan
rpv[k]=float(pr[k][2]) #speed
rptime_str[k]=pr[k][0][0:16]
rptime_num[k]=date2num(datetime.strptime(rbtime_str[k], "%Y-%m-%d %H:%M"))
if pr[k][1] is None: pr[k][1]=np.nan
rpn[k]=float(pr[k][1]) #density
if pr[k][3] is None: pr[k][3]=np.nan
rpt[k]=float(pr[k][3]) #temperature
#interpolate to minutes
#rtimes_m=np.arange(rbtime_num[0],rbtime_num[-1],1.0000/(24*60))
rtimes_m= round_to_hour(num2date(rbtime_num[0])) + np.arange(0,len(rbtime_num)) * timedelta(minutes=1)
#convert back to matplotlib time
rtimes_m=date2num(rtimes_m)
rbtot_m=np.interp(rtimes_m,rbtime_num,rbtot)
rbzgsm_m=np.interp(rtimes_m,rbtime_num,rbzgsm)
rbygsm_m=np.interp(rtimes_m,rbtime_num,rbygsm)
rbxgsm_m=np.interp(rtimes_m,rbtime_num,rbxgsm)
rpv_m=np.interp(rtimes_m,rptime_num,rpv)
rpn_m=np.interp(rtimes_m,rptime_num,rpn)
rpt_m=np.interp(rtimes_m,rptime_num,rpt)
#interpolate to hours
#rtimes_h=np.arange(np.ceil(rbtime_num)[0],rbtime_num[-1],1.0000/24.0000)
rtimes_h= round_to_hour(num2date(rbtime_num[0])) + np.arange(0,len(rbtime_num)/(60)) * timedelta(hours=1)
rtimes_h=date2num(rtimes_h)
rbtot_h=np.interp(rtimes_h,rbtime_num,rbtot)
rbzgsm_h=np.interp(rtimes_h,rbtime_num,rbzgsm)
rbygsm_h=np.interp(rtimes_h,rbtime_num,rbygsm)
rbxgsm_h=np.interp(rtimes_h,rbtime_num,rbxgsm)
rpv_h=np.interp(rtimes_h,rptime_num,rpv)
rpn_h=np.interp(rtimes_h,rptime_num,rpn)
rpt_h=np.interp(rtimes_h,rptime_num,rpt)
#make recarrays
data_hourly=np.rec.array([rtimes_h,rbtot_h,rbxgsm_h,rbygsm_h,rbzgsm_h,rpv_h,rpn_h,rpt_h], \
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')])
data_minutes=np.rec.array([rtimes_m,rbtot_m,rbxgsm_m,rbygsm_m,rbzgsm_m,rpv_m,rpn_m,rpt_m], \
dtype=[('time','f8'),('btot','f8'),('bxgsm','f8'),('bygsm','f8'),('bzgsm','f8'),\
('speed','f8'),('den','f8'),('temp','f8')])
return data_minutes, data_hourly
def get_omni_data_old():
"""FORMAT(2I4,I3,I5,2I3,2I4,14F6.1,F9.0,F6.1,F6.0,2F6.1,F6.3,F6.2, F9.0,F6.1,F6.0,2F6.1,F6.3,2F7.2,F6.1,I3,I4,I6,I5,F10.2,5F9.2,I3,I4,2F6.1,2I6,F5.1)
1963 1 0 1771 99 99 999 999 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 999.9 9999999. 999.9 9999. 999.9 999.9 9.999 99.99 9999999. 999.9 9999. 999.9 999.9 9.999 999.99 999.99 999.9 7 23 -6 119 999999.99 99999.99 99999.99 99999.99 99999.99 99999.99 0 3 999.9 999.9 99999 99999 99.9
define variables from OMNI2 dataset
see http://omniweb.gsfc.nasa.gov/html/ow_data.html
omni2_url='ftp://nssdcftp.gsfc.nasa.gov/pub/data/omni/low_res_omni/omni2_all_years.dat'
"""
#check how many rows exist in this file
f=open('data/omni2_all_years.dat')
dataset= len(f.readlines())
#print(dataset)
#global Variables
spot=np.zeros(dataset)
btot=np.zeros(dataset) #floating points
bx=np.zeros(dataset) #floating points
by=np.zeros(dataset) #floating points
bz=np.zeros(dataset) #floating points
bzgsm=np.zeros(dataset) #floating points
bygsm=np.zeros(dataset) #floating points
speed=np.zeros(dataset) #floating points
speedx=np.zeros(dataset) #floating points
speed_phi=np.zeros(dataset) #floating points
speed_theta=np.zeros(dataset) #floating points
dst=np.zeros(dataset) #float
kp=np.zeros(dataset) #float
den=np.zeros(dataset) #float
pdyn=np.zeros(dataset) #float
year=np.zeros(dataset)
day=np.zeros(dataset)
hour=np.zeros(dataset)
t=np.zeros(dataset) #index time
j=0
print('Read OMNI2 data ...')
with open('data/omni2_all_years.dat') as f:
for line in f:
line = line.split() # to deal with blank
#print line #41 is Dst index, in nT
dst[j]=line[40]
kp[j]=line[38]
if dst[j] == 99999: dst[j]=np.NaN
#40 is sunspot number
spot[j]=line[39]
#if spot[j] == 999: spot[j]=NaN
#25 is bulkspeed F6.0, in km/s
speed[j]=line[24]
if speed[j] == 9999: speed[j]=np.NaN
#get speed angles F6.1
speed_phi[j]=line[25]
if speed_phi[j] == 999.9: speed_phi[j]=np.NaN
speed_theta[j]=line[26]
if speed_theta[j] == 999.9: speed_theta[j]=np.NaN
#convert speed to GSE x see OMNI website footnote
speedx[j] = - speed[j] * np.cos(np.radians(speed_theta[j])) * np.cos( | np.radians(speed_phi[j]) | numpy.radians |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
op package provide some common ops for building paddle model.
"""
import paddle
import numpy as np
from pgl.utils.helper import check_is_tensor
import paddle.fluid.core as core
def read_rows(data, index):
"""Slice tensor with given index from dictionary of tensor or tensor
This function helps to slice data from nested dictionary structure.
Args:
data: A dictionary of tensor or tensor
index: A tensor of slicing index
Returns:
Return a dictionary of tensor or tensor.
"""
if data is None:
return None
elif isinstance(data, dict):
new_data = {}
for key, value in data.items():
new_data[key] = read_rows(value, index)
return new_data
else:
return paddle.gather(data, index)
def get_index_from_counts(counts):
"""Return index generated from counts
This function return the index from given counts.
For example, when counts = [ 2, 3, 4], return [0, 2, 5, 9]
Args:
counts: numpy.ndarray of paddle.Tensor
Return:
Return idnex of the counts
"""
if check_is_tensor(counts):
index = paddle.concat(
[
paddle.zeros(
shape=[1, ], dtype=counts.dtype), paddle.cumsum(counts)
],
axis=-1)
else:
index = np.cumsum(counts, dtype="int64")
index = | np.insert(index, 0, 0) | numpy.insert |
"""
Set operations for arrays based on sorting.
:Contains:
unique,
isin,
ediff1d,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: <NAME>
"""
import functools
import numpy as np
from numpy.core import overrides
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
'in1d', 'isin'
]
def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
return (ary, to_end, to_begin)
@array_function_dispatch(_ediff1d_dispatcher)
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, ..., -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
# force a 1d array
ary = np.asanyarray(ary).ravel()
# enforce that the dtype of `ary` is used for the output
dtype_req = ary.dtype
# fast track default case
if to_begin is None and to_end is None:
return ary[1:] - ary[:-1]
if to_begin is None:
l_begin = 0
else:
to_begin = np.asanyarray(to_begin)
if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_end` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_begin = to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
to_end = np.asanyarray(to_end)
if not np.can_cast(to_end, dtype_req, casting="same_kind"):
raise TypeError("dtype of `to_end` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_end = to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
l_diff = max(len(ary) - 1, 0)
result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
result = ary.__array_wrap__(result)
if l_begin > 0:
result[:l_begin] = to_begin
if l_end > 0:
result[l_begin + l_diff:] = to_end
np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
return result
def _unpack_tuple(x):
""" Unpacks one-element tuples for use as return values """
if len(x) == 1:
return x[0]
else:
return x
def _unique_dispatcher(ar, return_index=None, return_inverse=None,
return_counts=None, axis=None):
return (ar,)
@array_function_dispatch(_unique_dispatcher)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : array_like
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
.. versionadded:: 1.9.0
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. Object arrays or structured arrays
that contain objects are not supported if the `axis` kwarg is used. The
default is None.
.. versionadded:: 1.13.0
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
repeat : Repeat elements of an array.
Notes
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
(move the axis to the first dimension to keep the order of the other axes)
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1, 0, 0], [2, 3, 4]])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'], dtype='<U1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'], dtype='<U1')
Reconstruct the input array from the unique values and inverse:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
Reconstruct the input values from the unique values and counts:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> values, counts = np.unique(a, return_counts=True)
>>> values
array([1, 2, 3, 4, 6])
>>> counts
array([1, 3, 1, 1, 1])
>>> np.repeat(values, counts)
array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
"""
ar = np.asanyarray(ar)
if axis is None:
ret = _unique1d(ar, return_index, return_inverse, return_counts)
return _unpack_tuple(ret)
# axis was specified and not None
try:
ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim) from None
# Must reshape to a contiguous 2D array for this to work...
orig_shape, orig_dtype = ar.shape, ar.dtype
ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
ar = np.ascontiguousarray(ar)
dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
# At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
# data type with `m` fields where each field has the data type of `ar`.
# In the following, we create the array `consolidated`, which has
# shape `(n,)` with data type `dtype`.
try:
if ar.shape[1] > 0:
consolidated = ar.view(dtype)
else:
# If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
# a data type with itemsize 0, and the call `ar.view(dtype)` will
# fail. Instead, we'll use `np.empty` to explicitly create the
# array with shape `(len(ar),)`. Because `dtype` in this case has
# itemsize 0, the total size of the result is still 0 bytes.
consolidated = np.empty(len(ar), dtype=dtype)
except TypeError as e:
# There's no good way to do this for object arrays, etc...
msg = 'The axis argument to unique is not supported for dtype {dt}'
raise TypeError(msg.format(dt=ar.dtype)) from e
def reshape_uniq(uniq):
n = len(uniq)
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(n, *orig_shape[1:])
uniq = np.moveaxis(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
return_inverse, return_counts)
output = (reshape_uniq(output[0]),) + output[1:]
return _unpack_tuple(output)
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
"""
Find the unique elements of an array, ignoring shape.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
mask = np.empty(aux.shape, dtype=np.bool_)
mask[:1] = True
mask[1:] = aux[1:] != aux[:-1]
ret = (aux[mask],)
if return_index:
ret += (perm[mask],)
if return_inverse:
imask = np.cumsum(mask) - 1
inv_idx = np.empty(mask.shape, dtype=np.intp)
inv_idx[perm] = imask
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
ret += (np.diff(idx),)
return ret
def _intersect1d_dispatcher(
ar1, ar2, assume_unique=None, return_indices=None):
return (ar1, ar2)
@array_function_dispatch(_intersect1d_dispatcher)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. Will be flattened if not already 1D.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
unique, incorrect results and out-of-bounds indices could result.
Default is False.
return_indices : bool
If True, the indices which correspond to the intersection of the two
arrays are returned. The first instance of a value is used if there are
multiple. Default is False.
.. versionadded:: 1.15.0
Returns
-------
intersect1d : ndarray
Sorted 1D array of common and unique elements.
comm1 : ndarray
The indices of the first occurrences of the common values in `ar1`.
Only provided if `return_indices` is True.
comm2 : ndarray
The indices of the first occurrences of the common values in `ar2`.
Only provided if `return_indices` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
To intersect more than two arrays, use functools.reduce:
>>> from functools import reduce
>>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([3])
To return the indices of the values common to the input arrays
along with the intersected values:
>>> x = np.array([1, 1, 2, 3, 4])
>>> y = np.array([2, 1, 4, 6])
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
>>> x_ind, y_ind
(array([0, 2, 4]), array([1, 0, 2]))
>>> xy, x[x_ind], y[y_ind]
(array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
"""
ar1 = np.asanyarray(ar1)
ar2 = | np.asanyarray(ar2) | numpy.asanyarray |
#coding=utf-8
# This file is licensed under MIT license.
# See the LICENSE file in the project root for more information.
import sys
import numpy as np
import tf
import tf.transformations
from car_msgs.msg import MotionPlanningTarget, CarState
from car_core.common import msgs_helpers, geom_helpers
from frenet import FrenetFrame, path_to_global
from trajectory import Trajectory1D, Trajectory2D
import quintic
import matplotlib.pyplot as plt
from matplotlib.colors import hsv_to_rgb
from collections import deque
# Параметры перебора вариантов
D_MIN = -3.0 # Миимальное значение поперечного положения
D_MAX = 3.0 # Максимальное значение поперечного положения
D_STEP = 3.0 # Шаг переребора поперечных положений
S_MIN = 10.0 # Минимальное значение продольного положения
S_MAX = 10.0 # Максимальное значение продольного положения
S_STEP = 5.0 # Шаг перебора продольных положений
V_MIN = 0.0 # Минимальное значение скорости (относительно текущей скорости)
V_MAX = 0.0 # Максимальное значение скорости (относительно текущей скорости)
V_STEP = 1.0 # Шаг перебора продольных скоростей
T_DEV = 0.0 # Максимальное отклонение времени от примерной оценки, в долях
# ti = [(1-T_DEV)*t_estimate, (1+T_DEV)*t_estimate]
T_STEP = 1 # Шаг перебора времени
# Параметры расчета
T_CALC_STEP = 0.01 # Шаг интерполяции по времени
# Параметры ограничений
MAX_LON_SPEED = 22 # Maximum longitudinal speed
MIN_LON_SPEED = 0 # Minimum longitudinal speed (0, to remove strange results)
MAX_LON_ACC = 1.5 # Maximum longitudinal acceleration
MIN_LON_DEACC = -1.5 # Minimum longitudinal deacceleration (breaking)
MAX_LAT_ACC = 0.1 # Maximum lateral acceleration
MIN_CURV_RADIUS = 0 # Minimum curvature radius
# Cost function coefficients
K_LAT_J = 1
K_LAT_T = 1
K_LAT_D = 1
K_LON_J = 1
K_LON_T = 1
K_LON_S = 1
K_LON_DS = 1
K_LON = 1
K_LAT = 1
IS_PLOT = True
class MotionPlanner:
"""
Performs motion planning using quintic polynoms
"""
def __init__(self, map):
"""
Creates motion planner
Args:
map: object which provides access to environment map
"""
self.__map = map
#self.__lattice = {}
#self.__create_endpoints()
self.__precompute_lattice()
"""
def plan(self, state, target):
if state is None or target is None:
return None
# Calc current Cartesian position, Cartesian velocity, and orientation (yaw)
pos0, vel0, yaw0 = self.__get_state(state.pose, state.linear_speed)
pos1, vel1, yaw1 = self.__get_state(target.pose, target.linear_speed)
# Find closest point on path
np_path = msgs_helpers.path_poses_to_array(target.path.poses)
# start_index = geom_helpers.get_closest_path_point(np_path, pos0)
# Calc position and velocity in Frenet frame
frenet_frame = FrenetFrame(0, np_path[0], np_path[1])
S0, D0 = self.__get_state_in_frenet_frame(frenet_frame, pos0, vel0, [0, 0]) # Initial lon & lat states
S1, D1 = self.__get_state_in_frenet_frame(frenet_frame, pos1, vel1, [0, 0]) # Target lon & lat states
ax = self.__init_ploting(S0, S1, D0, D1)
num_lat_trajectories = 0
num_lon_trajectories = 0
num_glob_trajectoris = 0
cnt = 3
s_step = (S1 - S0)/cnt
self.plan_stage(0.0, S0, S0+s_step, s_step, S1, D0, D1, np_path, ax)
plt.show()
print('Lon trajectories: {}'.format(num_lon_trajectories))
print('Lat trajectories: {}'.format(num_lat_trajectories))
print('Glob trajectories: {}'.format(num_glob_trajectoris))
def plan_stage(self, t0, S0, S1, s_step, s_end, D0, D1, np_path, ax):
num_lat_trajectories = 0
num_lon_trajectories = 0
num_glob_trajectoris = 0
self.plot_lat_dot(ax, t0, D0)
self.plot_lon_dot(ax, t0, S0)
for vi in self.__arange(S1[1]-V_DEV, S1[1]+V_DEV, V_STEP):
for si in self.__arange(S1[0]-S_DEV, S1[0]+S_DEV, S_STEP):
S1i = (si, vi, S1[2])
t_estimate = self.__calc_baseline_coefs(S0, S1)
for ti in self.__arange((1 - T_DEV) * t_estimate, (1 + T_DEV) * t_estimate, T_STEP):
lon_trajectory = self.__calc_lon_trajectory(S0, S1i, ti)
num_lon_trajectories+=1
self.plot_lon(ax, lon_trajectory, t0)
for di in self.__arange(D_MIN, D_MAX, D_STEP):
D1i = (di, D1[1], D1[2])
lat_trajectory = self.__calc_lat_trajectory(D0, D1i, ti)
num_lat_trajectories+=1
self.plot_lat(ax, lat_trajectory, t0)
combined_trajectory = Trajectory2D.from_frenet(lon_trajectory, lat_trajectory)
num_glob_trajectoris+=1
cost = K_LAT * lat_trajectory.cost + K_LON * lon_trajectory.cost
global_trajectory = path_to_global(combined_trajectory, np_path)
self.plot_glob(ax, global_trajectory)
S0_next = np.array([lon_trajectory.x[-1], lon_trajectory.dx[-1], lon_trajectory.ddx[-1]])
D0_next = np.array([lat_trajectory.x[-1], lat_trajectory.dx[-1], lat_trajectory.ddx[-1]])
t0_next = t0+lon_trajectory.t[-1]
S1_next = S1 + s_step
if S1_next[0] <= s_end[0]:
self.plan_stage(t0_next, S0_next, S1_next, s_step, s_end, D0_next, D1, np_path, ax)
"""
# Precompute lattice graph
def __precompute_lattice(self):
ax = self.__init_ploting((S_MIN, V_MIN, 0), (S_MAX, V_MAX, 0), (D_MIN, 0, 0), (D_MAX, 0, 0))
graph = {}
roots = deque()
next_roots = deque()
max_layers = 3
roots.append((0, (0, 15, 0), (0, 0, 0)))
for layer in range(max_layers):
while len(roots) > 0:
root = roots.pop()
self.__precompute_lattice_from_root(root, graph, next_roots)
next_roots, roots = roots, next_roots
for root in graph:
print(root)
for trajectory in graph[root]:
print("\t{}".format(trajectory.end))
self.__plot_trajectory(ax, trajectory)
plt.show()
# Precompute tree of the trajectories from one root
def __precompute_lattice_from_root(self, root, graph, roots):
#ax = self.__init_ploting((S0[0] + S_MIN, S0[1] + V_MIN, 0), (S0[0] + S_MAX, S0[1] + V_MAX, 0), (D_MIN, 0, 0), (D_MAX, 0, 0))
T0, S0, D0 = root
graph[root] = []
for v1i in self.__arange(S0[1] + V_MIN, S0[1] + V_MAX, V_STEP):
if v1i < 0:
continue
for s1i in self.__arange(S0[0] + S_MIN, S0[0] + S_MAX, S_STEP):
S1i = (s1i, v1i, 0)
t_estimate = T0 + self.__estimate_time(S0, S1i)
for ti in self.__arange((1 - T_DEV) * t_estimate, (1 + T_DEV) * t_estimate, T_STEP):
lon_trajectory = self.__calc_sub_trajectory(S0, S1i, T0, ti)
for d1i in self.__arange(D_MIN, D_MAX, D_STEP):
D1i = (d1i, 0, 0)
lat_trajectory = self.__calc_sub_trajectory(D0, D1i, T0, ti)
combined_trajectory = Trajectory2D.from_frenet(lon_trajectory, lat_trajectory)
combined_trajectory.cost = K_LAT * lat_trajectory.cost + K_LON * lon_trajectory.cost
combined_trajectory.end = (ti, S1i, D1i)
graph[root].append(combined_trajectory)
roots.append(combined_trajectory.end)
def __plot_trajectory(self, ax, trajectory):
self.plot_lon(ax, trajectory.raw_lon)
self.plot_lat(ax, trajectory.raw_lat)
self.plot_glob(ax, trajectory)
def gen_hsv(self, cnt):
hsv = np.full((cnt, 3), 1.0)
hsv[:, 0] = np.linspace(0.0, 0.8, cnt)
return [hsv_to_rgb(color) for color in hsv]
def plot_glob(self, ax, global_trajectory, color=None):
if IS_PLOT:
color = '#ff0000' #color if color is not None else ('#ff0000' if global_trajectory.ok else '#aaaaaa')
ax[3][0].plot(global_trajectory.pos[:, 0], global_trajectory.pos[:, 1], color=color, alpha=0.5)
def plot_lat(self, ax, lat_trajectory, t0=0, color=None):
if IS_PLOT:
color = '#ff0000' #color if color is not None else ('#ff0000' if lat_trajectory.ok else '#aaaaaa')
ax[0][0].plot(t0 + lat_trajectory.t, lat_trajectory.x, color=color)
ax[1][0].plot(t0 + lat_trajectory.t, lat_trajectory.dx, color=color)
ax[2][0].plot(t0 + lat_trajectory.t, lat_trajectory.ddx, color=color)
def plot_lon(self, ax, lon_trajectory, t0=0, color=None):
if IS_PLOT:
color = '#ff0000' #color if color is not None else ('#ff0000' if lon_trajectory.ok else '#aaaaaa')
ax[0][1].plot(t0 + lon_trajectory.t, lon_trajectory.x, color=color)
ax[1][1].plot(t0 + lon_trajectory.t, lon_trajectory.dx, color=color)
ax[2][1].plot(t0 + lon_trajectory.t, lon_trajectory.ddx, color=color)
def plot_lat_dot(self, ax, t0, dot):
ax[0][0].plot([t0], [dot[0]], 'ob')
ax[1][0].plot([t0], [dot[1]], 'ob')
ax[2][0].plot([t0], [dot[2]], 'ob')
def plot_lon_dot(self, ax, t0, dot):
ax[0][1].plot([t0], [dot[0]], 'ob')
ax[1][1].plot([t0], [dot[1]], 'ob')
ax[2][1].plot([t0], [dot[2]], 'ob')
# Get the car's pos, vel, yaw from the car_msgs/CarState
# pos - position (vector)
# vel - velocity (vector)
# yaw - orientation angle (scalar)
def __get_state(self, pose, linear_speed):
pos = msgs_helpers.point_to_array(pose.position)
q = msgs_helpers.quaternion_to_array(pose.orientation)
m = tf.transformations.quaternion_matrix(q)
vel = (np.matmul(m, [1, 0, 0, 0]) * linear_speed)[:2]
yaw = tf.transformations.euler_from_quaternion(q)[2]
return pos, vel, yaw
# Calculate lateral and longitudinal states
# (d, d', d'') and (s, s', s'') in Frenet Frame
# for given pos, vel, acc in Cartesian frame
def __get_state_in_frenet_frame(self, frame, pos, vel, acc):
pos_f = frame.point_to(pos)
vel_f = frame.vector_to(vel)
acc_f = frame.vector_to(acc)
s = | np.array([pos_f[0], vel_f[0], acc_f[0]]) | numpy.array |
#!/usr/bin/env python3
import matplotlib.pyplot as plt
import numpy as np
def main():
n1 = 1024
n2 = 512
data = | np.load("data_tum.npy") | numpy.load |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.