prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""Implementation of Vector AutoRegressive Model"""
from operator import itemgetter
import numpy as np
from scipy.linalg import solve_triangular
from scipy.stats import f as ftest
from numpy.linalg import det
from arch.unitroot import PhillipsPerron
from marketlearn.causality_network.vector_ar.varbase import Base
from marketlearn.learning.linear_models.linear_regression import LinearRegression
class BiVariateVar(Base):
"""
Implementation of bi-variate Vector AutoRegressive Model or order 1
Note:
- After a VAR model is specified, granger causality tests can be
performed
- Assumes input is log prices whose difference (returns) is stationary
"""
def __init__(self, fit_intercept: bool = True,
degree: int = 1):
"""
Constructor used to intialize the VAR model
Currently only supports lag of 1
:param fit_intercept: (bool) Flag to add bias (True by default)
:param degree: (int) Lag (1 by default)
"""
self.fit_intercept = fit_intercept
self.degree = degree
self.lr = LinearRegression(fit_intercept=fit_intercept)
self.run = False
self.temp_resid = None
self.lag_order = None
self.k_params = None
self.ddof = None
self.theta = None
self.predictions = None
self.residuals = None
self.design = None
self.response = None
def fit(self, x, y, p=1, coint=False) -> 'BiVariateVar':
"""
Fits the model to training data
:param x: (np.array) The first variable log returns.
:param y: (np.array) The second variable in log returns
:param p: (int) The lagged order
:return: (object) Class after fitting
"""
# Create the multivariate response
Y = np.concatenate((x[:, np.newaxis], y[:, np.newaxis]), axis=1)
n_samples, _ = Y.shape
if p == 0:
# Just fit on intercept if any
Z = | np.ones(n_samples) | numpy.ones |
import datetime
import numpy as np
import pandas as pd
import networkx as nx
from . import dtutil
TYPE_VARIABLE = "variable"
TYPE_BINARY_VARIABLE = "binary"
TYPE_COUNTABLE_VARIABLE = "countable"
TYPE_TIMESERIES_EVENT = "tsevent"
class Variable(object):
"""Continuous Variable that follows Linear model.
Causal effects simply affects the values additively.
"""
def __init__(self, node_id, node_data, index, defaults, observable=True):
self._node_id = node_id
self._node_data = node_data
self._index = index
self._defaults = defaults
self._observable = observable
self._size = len(self._index)
self._values = None
@property
def values(self):
return self._values
def get_df(self):
return pd.DataFrame(self._values, index=self._index)
def _get_spec(self, key, default_value):
if key in self._node_data:
return self._node_data[key]
elif key in self._defaults:
return self._defaults[key]
else:
return default_value
def _rand(self):
rand_type = self._get_spec("noise_type", None)
if rand_type is None:
return self._rand_default()
elif rand_type == "gaussian":
return self._rand_gauss()
elif rand_type == "laplace":
return self._rand_laplace()
elif rand_type == "uniform":
return self._rand_uniform()
elif rand_type == "poisson":
return self._rand_poisson()
def _rand_default(self):
return self._rand_gauss()
def _rand_gauss(self):
scale = self._get_spec("gaussian_scale", 1)
loc = self._get_spec("gaussian_loc", 0)
return np.random.normal(loc, scale, self._size)
def _rand_laplace(self):
scale = self._get_spec("laplace_scale", 1)
loc = self._get_spec("laplace_loc", 0)
return np.random.laplace(loc, scale, self._size)
def _rand_uniform(self):
umin = self._get_spec("uniform_min", 0)
umax = self._get_spec("uniform_max", 1)
return | np.random.uniform(umin, umax, self._size) | numpy.random.uniform |
from sympy import symbols, diff, simplify, Matrix, N
import numpy as np
from task5 import get_lagrange_dt
from task1 import get_inverse
X1, X2, X3, x1, x2, x3, t = symbols('X1 X2 X3 x1 x2 x3 t')
def get_xKk(eq1, eq2, eq3):
inv = get_inverse(eq1, eq2, eq3)
t1 = np.pi / 4
xKk = [
[diff(inv[X1], x1).subs({t: t1}), diff(inv[X1], x2).subs({t: t1}), diff(inv[X1], x3).subs({t: t1})],
[diff(inv[X2], x1).subs({t: t1}), diff(inv[X2], x2).subs({t: t1}), diff(inv[X2], x3).subs({t: t1})],
[diff(inv[X3], x1).subs({t: t1}), diff(inv[X3], x2).subs({t: t1}), diff(inv[X3], x3).subs({t: t1})]
]
#xKk = np.around(np.array(xKk).astype(float), decimals = 3)
return | np.array(xKk) | numpy.array |
#-------------------------------------------------------------------------------
#
# Time conversion utilities - test
#
# Author: <NAME> Chen <<EMAIL>>
#
# Original Author: <NAME> <<EMAIL>>
#-------------------------------------------------------------------------------
# Copyright (C) 2019 Geoist team
#
#-------------------------------------------------------------------------------
# pylint: disable=missing-docstring, invalid-name, too-few-public-methods
from math import modf, floor
from numpy import array, vectorize, inf, nan
from numpy.random import uniform
from numpy.testing import assert_allclose
from unittest import TestCase, main
from geoist.magmod._pytimeconv import (
decimal_year_to_mjd2000, mjd2000_to_decimal_year,
mjd2000_to_year_fraction,
)
class TestMjd2000ToYearFraction(TestCase):
@staticmethod
def reference(value):
return vectorize(_mjd2000_to_year_fraction)(value)
@staticmethod
def eval(value):
return mjd2000_to_year_fraction(value)
@staticmethod
def _assert(tested, expected):
assert_allclose(tested, expected, rtol=1e-14, atol=1e-11)
def test_mjd2000_to_year_fraction_far_range(self):
values = uniform(-730487., 730485., (100, 100))
self._assert(self.eval(values), self.reference(values))
def test_mjd2000_to_year_fraction_near_range(self):
values = | uniform(-36524., 36525., (100, 100)) | numpy.random.uniform |
import jax.numpy as jnp
from jax import grad, vmap, hessian, jit
from jax.config import config;
config.update("jax_enable_x64", True)
# numpy
import numpy as onp
from numpy import random
import argparse
import logging
import datetime
from time import time
import os
# solving -grad(a*grad u) + alpha u^m = f on torus, to be completed
def get_parser():
parser = argparse.ArgumentParser(description='NonLinElliptic equation GP solver')
parser.add_argument("--freq_a", type=float, default = 1.0)
parser.add_argument("--freq_u", type=float, default = 4.0)
parser.add_argument("--alpha", type=float, default = 1.0)
parser.add_argument("--m", type = int, default = 3)
parser.add_argument("--dim", type = int, default = 2)
parser.add_argument("--kernel", type=str, default="periodic")
parser.add_argument("--sigma-scale", type = float, default = 0.25)
# sigma = args.sigma-scale*sqrt(dim)
parser.add_argument("--N_domain", type = int, default = 1000)
parser.add_argument("--nugget", type = float, default = 1e-10)
parser.add_argument("--GNsteps", type = int, default = 3)
parser.add_argument("--logroot", type=str, default='./logs/')
parser.add_argument("--randomseed", type=int, default=9999)
parser.add_argument("--num_exp", type=int, default=1)
args = parser.parse_args()
return args
@jit
def get_GNkernel_train(x,y,wx0,wx1,wxg,wy0,wy1,wyg,d,sigma):
# wx0 * delta_x + wxg * nabla delta_x + wx1 * Delta delta_x
return wx0*wy0*kappa(x,y,d,sigma) + wx0*wy1*Delta_y_kappa(x,y,d,sigma) + wy0*wx1*Delta_x_kappa(x,y,d,sigma) + wx1*wy1*Delta_x_Delta_y_kappa(x,y,d,sigma) + wx0*D_wy_kappa(x,y,d,sigma,wyg) + wy0*D_wx_kappa(x,y,d,sigma,wxg) + wx1*Delta_x_D_wy_kappa(x,y,d,sigma,wyg) + wy1*D_wx_Delta_y_kappa(x,y,d,sigma,wxg) + D_wx_D_wy_kappa(x,y,d,sigma,wxg,wyg)
@jit
def get_GNkernel_train_boundary(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
@jit
def get_GNkernel_val_predict(x,y,wy0,wy1,wyg,d,sigma):
return wy0*kappa(x,y,d,sigma) + wy1*Delta_y_kappa(x,y,d,sigma) + D_wy_kappa(x,y,d,sigma,wyg)
def assembly_Theta(X_domain, w0, w1, wg, sigma):
# X_domain, dim: N_domain*d;
# w0 col vec: coefs of Diracs, dim: N_domain;
# w1 coefs of Laplacians, dim: N_domain
N_domain,d = onp.shape(X_domain)
Theta = onp.zeros((N_domain,N_domain))
XdXd0 = onp.reshape(onp.tile(X_domain,(1,N_domain)),(-1,d))
XdXd1 = onp.tile(X_domain,(N_domain,1))
arr_wx0 = onp.reshape(onp.tile(w0,(1,N_domain)),(-1,1))
arr_wx1 = onp.reshape(onp.tile(w1,(1,N_domain)),(-1,1))
arr_wxg = onp.reshape(onp.tile(wg,(1,N_domain)),(-1,d))
arr_wy0 = onp.tile(w0,(N_domain,1))
arr_wy1 = onp.tile(w1,(N_domain,1))
arr_wyg = onp.tile(wg,(N_domain,1))
val = vmap(lambda x,y,wx0,wx1,wxg,wy0,wy1,wyg: get_GNkernel_train(x,y,wx0,wx1,wxg,wy0,wy1,wyg,d,sigma))(XdXd0,XdXd1,arr_wx0,arr_wx1,arr_wxg,arr_wy0,arr_wy1,arr_wyg)
Theta[:N_domain,:N_domain] = onp.reshape(val, (N_domain,N_domain))
return Theta
def assembly_Theta_value_predict(X_infer, X_domain, w0, w1, wg, sigma):
N_infer, d = onp.shape(X_infer)
N_domain, _ = onp.shape(X_domain)
Theta = onp.zeros((N_infer,N_domain))
XiXd0 = onp.reshape(onp.tile(X_infer,(1,N_domain)),(-1,d))
XiXd1 = onp.tile(X_domain,(N_infer,1))
arr_wy0 = onp.tile(w0,(N_infer,1))
arr_wy1 = onp.tile(w1,(N_infer,1))
arr_wyg = onp.tile(wg,(N_infer,1))
val = vmap(lambda x,y,wy0,wy1,wyg: get_GNkernel_val_predict(x,y,wy0,wy1,wyg,d,sigma))(XiXd0,XiXd1,arr_wy0,arr_wy1,arr_wyg)
Theta[:N_infer,:N_domain] = onp.reshape(val, (N_infer,N_domain))
return Theta
def GPsolver(X_domain, sigma, nugget, sol_init, GN_step = 4):
# N_domain, d = onp.shape(X_domain)
sol = sol_init
rhs_f = vmap(f)(X_domain)[:,onp.newaxis]
wg = -vmap(grad_a)(X_domain) #size?
w1 = -vmap(a)(X_domain)[:,onp.newaxis]
time_begin = time()
for i in range(GN_step):
w0 = alpha*m*(sol**(m-1))
Theta_train = assembly_Theta(X_domain, w0, w1, wg, sigma)
Theta_test = assembly_Theta_value_predict(X_domain, X_domain, w0, w1, wg, sigma)
rhs = rhs_f + alpha*(m-1)*(sol**m)
sol = Theta_test @ (onp.linalg.solve(Theta_train + nugget*onp.diag(onp.diag(Theta_train)),rhs))
total_mins = (time() - time_begin) / 60
logging.info(f'[Timer] GP iteration {i+1}/{GN_step}, finished in {total_mins:.2f} minutes')
return sol
def sample_points(N_domain, d, choice = 'random'):
X_domain = onp.random.uniform(low=0.0, high=1.0, size=(N_domain,d))
return X_domain
def logger(args, level = 'INFO'):
log_root = args.logroot + 'VarCoefEllipticTorus'
log_name = 'dim' + str(args.dim) + '_kernel' + str(args.kernel)
logdir = os.path.join(log_root, log_name)
os.makedirs(logdir, exist_ok=True)
log_para = 'alpha' + str(args.alpha) + 'm' + str(args.m) + 'sigma-scale' + str(args.sigma_scale) + '_Ndomain' + str(args.N_domain) + '_nugget' + str(args.nugget).replace(".","") + '_freqa' + str(args.freq_a) + '_frequ' + str(args.freq_u) + '_numexp' + str(args.num_exp)
date = str(datetime.datetime.now())
log_base = date[date.find("-"):date.rfind(".")].replace("-", "").replace(":", "").replace(" ", "_")
filename = log_para + '_' + log_base + '.log'
logging.basicConfig(level=logging.__dict__[level],
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(logdir+'/'+filename),
logging.StreamHandler()]
)
return logdir+'/'+filename
def set_random_seeds(args):
random_seed = args.randomseed
random.seed(random_seed)
if __name__ == '__main__':
## get argument parser
args = get_parser()
filename = logger(args, level = 'INFO')
logging.info(f'argument is {args}')
@jit
def a(x):
return jnp.exp(jnp.sin(jnp.sum(args.freq_a * jnp.cos(2*jnp.pi*x))))
@jit
def grad_a(x):
return grad(a)(x)
@jit
def u(x):
return jnp.sin(jnp.sum(args.freq_u * jnp.cos(2*jnp.pi*x)))
@jit
def f(x):
return -a(x) * jnp.trace(hessian(u)(x))-jnp.sum(grad(a)(x) * grad(u)(x)) + alpha*(u(x)**m)
@jit
def g(x):
return u(x)
alpha = args.alpha
m = args.m
logging.info(f"[Equation] alpha: {alpha}, m: {m}")
logging.info(f"[Function] frequency of a: {args.freq_a}, frequency of u: {args.freq_u}")
if args.kernel == "periodic":
from kernels.periodic_kernel import *
d = args.dim
N_domain = args.N_domain
ratio = args.sigma_scale
sigma = ratio*onp.sqrt(d)
nugget = args.nugget
GN_step = args.GNsteps
logging.info(f'GN step: {GN_step}, d: {d}, sigma: {sigma}, number of points: N_domain {N_domain}, kernel: {args.kernel}, nugget: {args.nugget}')
logging.info(f"***** Total number of random experiments {args.num_exp} *****")
err_2_all = []
err_inf_all = []
for idx_exp in range(args.num_exp):
logging.info(f"[Experiment] number: {idx_exp}")
args.randomseed = idx_exp
set_random_seeds(args)
logging.info(f"[Seeds] random seeds: {args.randomseed}")
X_domain = sample_points(N_domain, d, choice = 'random')
sol_init = onp.random.randn(args.N_domain,1)
sol = GPsolver(X_domain, sigma, nugget, sol_init, GN_step = GN_step)
logging.info('[Calculating errs at collocation points ...]')
sol_truth = vmap(u)(X_domain)[:,onp.newaxis]
err = abs(sol-sol_truth)
err_2 = onp.linalg.norm(err,'fro')/(onp.sqrt(N_domain))
err_2_all.append(err_2)
err_inf = onp.max(err)
err_inf_all.append(err_inf)
logging.info(f'[L infinity error] {err_inf}')
logging.info(f'[L2 error] {err_2}')
logging.info(f'[Average L infinity error] {onp.mean(err_inf_all)}')
logging.info(f'[Average L2 error] { | onp.mean(err_2_all) | numpy.mean |
import unittest
import numpy as np
import sys,os
import matplotlib.pyplot as plt
from mocu.utils.utils import *
from mocu.utils.costfunctions import *
from mocu.src.experimentaldesign import *
import warnings
class MocuTestMultivariate(unittest.TestCase):
"""
Class for tests involving mocu sampling with multivariable X/Y/Theta.
"""
@classmethod
def setUpClass(self):
warnings.filterwarnings("ignore")
# Prior knowledge: discrete ranges/distributions for (theta,psi)
theta = [1,3]
rho_theta = [1./3 , 2./3]
Theta = dict(zip(['theta','rho_theta'] , [theta,rho_theta]))
psi = | np.linspace(-4,0,101) | numpy.linspace |
"""
Timing and Telemetry Data - :mod:`fastf1.core`
==============================================
The Fast-F1 core is a collection of functions and data objects for accessing
and analyzing F1 timing and telemetry data.
Data Objects
------------
All data is provided through the following data objects:
.. autosummary::
:nosignatures:
Weekend
Session
Laps
Lap
Telemetry
SessionResults
DriverResult
The :class:`Session` object is mainly used as an entry point for loading
timing data and telemetry data. The :class:`Session` can create a
:class:`Laps` object which contains all timing, track and session status
data for a whole session.
Usually you will be using :func:`get_session` to get a :class:`Session`
object.
The :class:`Laps` object holds detailed information about multiples laps.
The :class:`Lap` object holds the same information as :class:`Laps` but only
for one single lap. When selecting a single lap from a :class:`Laps` object,
an object of type :class:`Lap` will be returned.
Apart from only providing data, the :class:`Laps`, :class:`Lap` and
:class:`Telemetry` objects implement various methods for selecting and
analyzing specific parts of the data.
Functions
---------
.. autosummary::
:nosignatures:
get_session
get_round
"""
import collections
from functools import cached_property
import logging
import warnings
import numpy as np
import pandas as pd
import fastf1
from fastf1 import api, ergast
from fastf1.utils import recursive_dict_get, to_timedelta
logging.basicConfig(level=logging.INFO, style='{',
format="{module: <8} {levelname: >10} \t{message}")
D_LOOKUP = [[44, 'HAM', 'Mercedes'], [77, 'BOT', 'Mercedes'],
[55, 'SAI', 'Ferrari'], [16, 'LEC', 'Ferrari'],
[33, 'VER', 'Red Bull'], [11, 'PER', 'Red Bull'],
[3, 'RIC', 'McLaren'], [4, 'NOR', 'McLaren'],
[5, 'VET', '<NAME>'], [18, 'STR', 'Aston Martin'],
[14, 'ALO', 'Alpine'], [31, 'OCO', 'Alpine'],
[22, 'TSU', 'AlphaTauri'], [10, 'GAS', 'AlphaTauri'],
[47, 'MSC', 'Haas F1 Team'], [9, 'MAZ', 'Haas F1 Team'],
[7, 'RAI', '<NAME>o'], [99, 'GIO', 'Alfa Romeo'],
[6, 'LAT', 'Williams'], [63, 'RUS', 'Williams']]
def get_session(*args, **kwargs):
"""
.. deprecated:: 2.2
replaced by :func:`fastf1.get_session`
"""
# TODO remove
warnings.warn("`fastf1.core.get_session` has been deprecated and will be"
"removed in a future version.\n"
"Use `fastf1.get_session` instead.", FutureWarning)
from fastf1 import events
return events.get_session(*args, **kwargs)
def get_round(year, match):
"""
.. deprecated:: 2.2
will be removed without replacement;
Use :func:`fastf1.get_event` instead to get an
:class:`~fastf1.events.Event` object which provides
information including the round number for the event.
"""
# TODO remove
warnings.warn("_func:`fastf1.core.get_round` has been deprecated and will "
"be removed without replacement in a future version.\n"
"Use :func:`fastf1.get_event` instead to get an "
":class:`~fastf1.events.Event` object which provides "
"information including the round number for the event.",
FutureWarning)
from fastf1 import events
event = events.get_event(year, match)
return event.RoundNumber
class Telemetry(pd.DataFrame):
"""Multi-channel time series telemetry data
The object can contain multiple telemetry channels. Multiple telemetry objects with different channels
can be merged on time. Each telemetry channel is one dataframe column.
Partial telemetry (e.g. for one lap only) can be obtained through various methods for slicing the data.
Additionally, methods for adding common computed data channels are available.
The following telemetry channels existed in the original API data:
- **Car data**:
- `Speed` (float): Car speed
- `RPM` (int): Car RPM
- `nGear` (int): Car gear number
- `Throttle` (float): 0-100 Throttle pedal pressure
- `Brake` (float): 0-100 Brake pedal pressure
- `DRS` (int): DRS indicator (See :meth:`car_data` for more info)
- **Position data**:
- `X` (float): X position
- `Y` (float): Y position
- `Z` (float): Z position
- `Status` (string): Flag - OffTrack/OnTrack
- **For both of the above**:
- `Time` (timedelta): Time (0 is start of the data slice)
- `SessionTime` (timedelta): Time elapsed since the start of the session
- `Date` (datetime): The full date + time at which this sample was created
- `Source` (str): Flag indicating how this sample was created:
- 'car': sample from original api car data
- 'pos': sample from original api position data
- 'interpolated': this sample was artificially created; all values are computed/interpolated
Example:
A sample's source is indicated as 'car'. It contains
values for speed, rpm and x, y, z coordinates.
Originally, this sample (with its timestamp) was received
when loading car data.
This means that the speed and rpm value are original
values as received from the api. The coordinates are
interpolated for this sample.
All methods of :class:`Telemetry` which resample or
interpolate data will preserve and adjust the source flag
correctly when modifying data.
Through merging/slicing it is possible to obtain any combination of telemetry channels!
The following additional computed data channels can be added:
- Distance driven between two samples:
:meth:`add_differential_distance`
- Distance driven since the first sample:
:meth:`add_distance`
- Relative distance driven since the first sample:
:meth:`add_relative_distance`
- Distance to driver ahead and car number of said driver:
:meth:`add_driver_ahead`
.. note:: See the separate explanation concerning the various definitions of 'Time' for more information on the
three date and time related channels: :ref:`time-explanation`
Slicing this class will return :class:`Telemetry` again for slices containing multiple rows. Single rows will be
returned as :class:`pandas.Series`.
Args:
*args (any): passed through to `pandas.DataFrame` superclass
session (:class:`Session`): Instance of associated session object. Required for full functionality!
driver (str): Driver number as string. Required for full functionality!
**kwargs (any): passed through to `pandas.DataFrame` superclass
"""
TELEMETRY_FREQUENCY = 'original'
"""Defines the frequency used when resampling the telemetry data. Either
the string ``'original'`` or an integer to specify a frequency in Hz."""
_CHANNELS = {
'X': {'type': 'continuous', 'missing': 'quadratic'},
'Y': {'type': 'continuous', 'missing': 'quadratic'},
'Z': {'type': 'continuous', 'missing': 'quadratic'},
'Status': {'type': 'discrete'},
'Speed': {'type': 'continuous', 'missing': 'linear'}, # linear is often required as quadratic overshoots
'RPM': {'type': 'continuous', 'missing': 'linear'}, # on sudden changes like sudden pedal application)
'Throttle': {'type': 'continuous', 'missing': 'linear'},
'Brake': {'type': 'discrete'},
'DRS': {'type': 'discrete'},
'nGear': {'type': 'discrete'},
'Source': {'type': 'excluded'}, # special case, custom handling
'Date': {'type': 'excluded'}, # special case, used as the index during resampling
'Time': {'type': 'excluded'}, # special case, Time/SessionTime recalculated from 'Date'
'SessionTime': {'type': 'excluded'},
'Distance': {'type': 'continuous', 'missing': 'quadratic'},
'RelativeDistance': {'type': 'continuous', 'missing': 'quadratic'},
'DifferentialDistance': {'type': 'continuous', 'missing': 'quadratic'},
'DriverAhead': {'type': 'discrete'},
'DistanceToDriverAhead': {'type': 'continuous', 'missing': 'linear'}
}
"""Known telemetry channels which are supported by default"""
_metadata = ['session', 'driver']
def __init__(self, *args, session=None, driver=None, **kwargs):
super().__init__(*args, **kwargs)
self.session = session
self.driver = driver
@property
def _constructor(self):
def _new(*args, **kwargs):
return Telemetry(*args, **kwargs).__finalize__(self)
return _new
@property
def base_class_view(self):
"""For a nicer debugging experience; can view DataFrame through this property in various IDEs"""
return pd.DataFrame(self)
def join(self, *args, **kwargs):
"""Wraps :mod:`pandas.DataFrame.join` and adds metadata propagation.
When calling `self.join` metadata will be propagated from self to the joined dataframe.
"""
meta = dict()
for var in self._metadata:
meta[var] = getattr(self, var)
ret = super().join(*args, **kwargs)
for var, val in meta.items():
setattr(ret, var, val)
return ret
def merge(self, *args, **kwargs):
"""Wraps :mod:`pandas.DataFrame.merge` and adds metadata propagation.
When calling `self.merge` metadata will be propagated from self to the merged dataframe.
"""
meta = dict()
for var in self._metadata:
meta[var] = getattr(self, var)
ret = super().merge(*args, **kwargs)
for var, val in meta.items():
setattr(ret, var, val)
return ret
def slice_by_mask(self, mask, pad=0, pad_side='both'):
"""Slice self using a boolean array as a mask.
Args:
mask (array-like): Array of boolean values with the same length as self
pad (int): Number of samples used for padding the sliced data
pad_side (str): Where to pad the data; possible options: 'both', 'before', 'after'
Returns:
:class:`Telemetry`
"""
if pad:
if pad_side in ('both', 'before'):
i_left_pad = max(0, np.min(np.where(mask)) - pad)
else:
i_left_pad = np.min(np.where(mask))
if pad_side in ('both', 'after'):
i_right_pad = min(len(mask), np.max(np.where(mask)) + pad)
else:
i_right_pad = np.max(np.where(mask))
mask[i_left_pad: i_right_pad + 1] = True
data_slice = self.loc[mask].copy()
return data_slice
def slice_by_lap(self, ref_laps, pad=0, pad_side='both', interpolate_edges=False):
"""Slice self to only include data from the provided lap or laps.
.. note:: Self needs to contain a 'SessionTime' column.
.. note:: When slicing with an instance of :class:`Laps` as a reference, the data will be sliced by first and
last lap. Missing laps in between will not be considered and data for these will still be included in
the sliced result.
Args:
ref_laps (Lap or Laps): The lap/laps by which to slice self
pad (int): Number of samples used for padding the sliced data
pad_side (str): Where to pad the data; possible options: 'both', 'before', 'after
interpolate_edges (bool): Add an interpolated sample at the beginning and end to exactly
match the provided time window.
Returns:
:class:`Telemetry`
"""
if isinstance(ref_laps, Laps) and len(ref_laps) > 1:
if 'DriverNumber' not in ref_laps.columns:
ValueError("Laps is missing 'DriverNumber'. Cannot return telemetry for unknown driver.")
if not len(ref_laps['DriverNumber'].unique()) <= 1:
raise ValueError("Cannot create telemetry for multiple drivers at once!")
end_time = ref_laps['Time'].max()
start_time = ref_laps['LapStartTime'].min()
elif isinstance(ref_laps, (Lap, Laps)):
if isinstance(ref_laps, Laps): # one lap in Laps
ref_laps = ref_laps.iloc[0] # needs to be handled as a single lap
if 'DriverNumber' not in ref_laps.index:
ValueError("Lap is missing 'DriverNumber'. Cannot return telemetry for unknown driver.")
end_time = ref_laps['Time']
start_time = ref_laps['LapStartTime']
else:
raise TypeError("Attribute 'ref_laps' needs to be an instance of `Lap` or `Laps`")
return self.slice_by_time(start_time, end_time, pad, pad_side, interpolate_edges)
def slice_by_time(self, start_time, end_time, pad=0, pad_side='both', interpolate_edges=False):
"""Slice self to only include data in a specific time frame.
.. note:: Self needs to contain a 'SessionTime' column. Slicing by time use the 'SessionTime' as its reference.
Args:
start_time (Timedelta): Start of the section
end_time (Timedelta): End of the section
pad (int): Number of samples used for padding the sliced data
pad_side (str): Where to pad the data; possible options: 'both', 'before', 'after
interpolate_edges (bool): Add an interpolated sample at the beginning and end to exactly
match the provided time window.
Returns:
:class:`Telemetry`
"""
if interpolate_edges:
edges = Telemetry({'SessionTime': (start_time, end_time),
'Date': (start_time + self.session.t0_date, end_time + self.session.t0_date)},
session=self.session)
d = self.merge_channels(edges)
else:
d = self.copy() # TODO no copy?
sel = ((d['SessionTime'] <= end_time) & (d['SessionTime'] >= start_time))
if np.any(sel):
data_slice = d.slice_by_mask(sel, pad, pad_side)
if 'Time' in data_slice.columns:
# shift time to 0 so laps can overlap
data_slice.loc[:, 'Time'] = data_slice['SessionTime'] - start_time
return data_slice
return Telemetry()
def merge_channels(self, other, frequency=None):
"""Merge telemetry objects containing different telemetry channels.
The two objects don't need to have a common time base. The data will be merged, optionally resampled and
missing values will be interpolated.
:attr:`Telemetry.TELEMETRY_FREQUENCY` determines if and how the data is resampled. This can be overridden using
the `frequency` keyword fo this method.
Merging and resampling:
If the frequency is 'original', data will not be resampled. The two objects will be merged and all
timestamps of both objects are kept. Values will be interpolated so that all telemetry channels contain
valid data for all timestamps. This is the default and recommended option.
If the frequency is specified as an integer in Hz the data will be merged as before. After that, the merged
time base will be resampled from the first value on at the specified frequency. Afterwards, the data will
be interpolated to fit the new time base. This means that usually most if not all values of the data will
be interpolated values. This is detrimental for overall accuracy.
Interpolation:
Missing values after merging will be interpolated for all known telemetry channels using
:meth:`fill_missing`. Different interpolation methods are used depending on what kind of data the channel
contains. For example, forward fill is used to interpolated 'nGear' while linear interpolation is used
for 'RPM' interpolation.
.. note :: Unknown telemetry channels will be merged but missing values will not be interpolated. This can
either be done manually or a custom telemetry channel can be added using :meth:`register_new_channel`.
.. note :: Do not resample data multiple times. Always resample based on the original data
to preserve accuracy
Args:
other (:class:`Telemetry` or :class:`pandas.DataFrame`): Object to be merged with self
frequency (str or int): Optional frequency to overwrite global preset. (Either string 'original' or integer
for a frequency in Hz)
Returns:
:class:`Telemetry`
"""
# merge the data and interpolate missing; 'Date' needs to be the index
data = self.set_index('Date')
other = other.set_index('Date')
# save dtypes before merging so they can be restored after merging
# necessary for example because merging produces NaN values which would cause an int column to become float
# but it can be converted back to int after interpolating missing values
dtype_map = dict()
for df in data, other:
for col in df.columns:
if col not in dtype_map.keys():
dtype_map[col] = df[col].dtype
# Exclude columns existing on both dataframes from one dataframe before merging (cannot merge with duplicates)
on_both_columns = set(other.columns).intersection(set(data.columns))
merged = other.merge(data[data.columns.difference(on_both_columns, sort=False)],
how='outer', left_index=True, right_index=True, sort=True)
# now use the previously excluded columns to update the missing values in the merged dataframe
for col in on_both_columns:
merged[col].update(data[col])
if 'Driver' in merged.columns and len(merged['Driver'].unique()) > 1:
raise ValueError("Cannot merge multiple drivers")
if not frequency:
frequency = data.TELEMETRY_FREQUENCY
i = data.get_first_non_zero_time_index()
if i is None:
raise ValueError("No valid 'Time' data. Cannot resample!")
ref_date = merged.index[i]
# data needs to be resampled/interpolated differently, depending on what kind of data it is
# how to handle which column is defined in self._CHANNELS
if frequency == 'original':
# no resampling but still interpolation due to merging
merged = merged.fill_missing()
merged = merged.reset_index().rename(columns={'index': 'Date'}) # make 'Date' a column again
else:
frq = f'{1 / frequency}S'
resampled_columns = dict()
for ch in self._CHANNELS.keys():
if ch not in merged.columns:
continue
sig_type = self._CHANNELS[ch]['type']
if sig_type == 'continuous':
missing = self._CHANNELS[ch]['missing']
res = merged.loc[:, ch] \
.resample(frq, origin=ref_date).mean().interpolate(method=missing, fill_value='extrapolate')
elif sig_type == 'discrete':
res = merged.loc[:, ch].resample(frq, origin=ref_date).ffill().ffill().bfill()
# first ffill is a method of the resampler object and will ONLY ffill values created during
# resampling but not already existing NaN values. NaN values already existed because of merging,
# therefore call ffill a second time as a method of the returned series to fill these too
# only use bfill after ffill to fix first row
else:
continue
resampled_columns[ch] = res
res_source = merged.loc[:, 'Source'].resample(frq, origin=ref_date).asfreq().fillna(value='interpolation')
resampled_columns['Source'] = res_source
# join resampled columns and make 'Date' a column again
merged = Telemetry(resampled_columns, session=self.session).reset_index().rename(columns={'index': 'Date'})
# recalculate the time columns
merged['SessionTime'] = merged['Date'] - self.session.t0_date
merged['Time'] = merged['SessionTime'] - merged['SessionTime'].iloc[0]
# restore data types from before merging
for col in dtype_map.keys():
try:
merged.loc[:, col] = merged.loc[:, col].astype(dtype_map[col])
except ValueError:
logging.warning(f"Failed to preserve data type for column '{col}' while merging telemetry.")
return merged
def resample_channels(self, rule=None, new_date_ref=None, **kwargs):
"""Resample telemetry data.
Convenience method for frequency conversion and resampling. Up and down sampling of data is supported.
'Date' and 'SessionTime' need to exist in the data. 'Date' is used as the main time reference.
There are two ways to use this method:
- Usage like :meth:`pandas.DataFrame.resample`: In this case you need to specify the 'rule' for resampling
and any additional keywords will be passed on to :meth:`pandas.Series.resample` to create a new time
reference. See the pandas method to see which options are available.
- using the 'new_date_ref' keyword a :class:`pandas.Series` containing new values for date
(dtype :class:`pandas.Timestamp`) can be provided. The existing data will be resampled onto this new
time reference.
Args:
rule (optional, str): Resampling rule for :meth:`pandas.Series.resample`
new_date_ref (optional, pandas.Series): New custom Series of reference dates
**kwargs (optional, any): Only in combination with 'rule'; additional parameters for
:meth:`pandas.Series.resample`
"""
if rule is not None and new_date_ref is not None:
raise ValueError("You can only specify one of 'rule' or 'new_index'")
if rule is None and new_date_ref is None:
raise ValueError("You need to specify either 'rule' or 'new_index'")
if new_date_ref is None:
st = pd.Series(index=pd.DatetimeIndex(self['Date']), dtype=int).resample(rule, **kwargs).asfreq()
new_date_ref = pd.Series(st.index)
new_tel = Telemetry(session=self.session, driver=self.driver, columns=self.columns)
new_tel.loc[:, 'Date'] = new_date_ref
combined_tel = self.merge_channels(Telemetry({'Date': new_date_ref}, session=self.session))
mask = combined_tel['Date'].isin(new_date_ref)
new_tel = combined_tel.loc[mask, :]
return new_tel
def fill_missing(self):
"""Calculate missing values in self.
Only known telemetry channels will be interpolated. Unknown channels are skipped and returned unmodified.
Interpolation will be done according to the default mapping and according to options specified for
registered custom channels. For example:
| Linear interpolation will be used for continuous values (Speed, RPM)
| Forward-fill will be used for discrete values (Gear, DRS, ...)
See :meth:`register_new_channel` for adding custom channels.
"""
ret = self.copy()
for ch in self._CHANNELS.keys():
if ch not in self.columns:
continue
sig_type = self._CHANNELS[ch]['type']
if sig_type == 'continuous': # yes, this is necessary to prevent pandas from crashing
if ret[ch].dtype == 'object':
warnings.warn("Interpolation not possible for telemetry "
"channel because dtype is 'object'")
missing = self._CHANNELS[ch]['missing']
ret.loc[:, ch] = ret.loc[:, ch] \
.interpolate(method=missing, limit_direction='both', fill_value='extrapolate')
elif sig_type == 'discrete':
ret.loc[:, ch] = ret.loc[:, ch].ffill().ffill().bfill()
# first ffill is a method of the resampler object and will ONLY ffill values created during
# resampling but not already existing NaN values. NaN values already existed because of merging,
# therefore call ffill a second time as a method of the returned series to fill these too
# only use bfill after ffill to fix first row
if 'Source' in ret.columns:
ret.loc[:, 'Source'] = ret.loc[:, 'Source'].fillna(value='interpolation')
if 'Date' in self.columns:
ret['SessionTime'] = ret['Date'] - self.session.t0_date
elif isinstance(ret.index, pd.DatetimeIndex):
ret['SessionTime'] = ret.index - self.session.t0_date # assume index is Date
ret['Time'] = ret['SessionTime'] - ret['SessionTime'].iloc[0]
return ret
@classmethod
def register_new_channel(cls, name, signal_type, interpolation_method=None):
"""Register a custom telemetry channel.
Registered telemetry channels are automatically interpolated when merging or resampling data.
Args:
name (str): Telemetry channel/column name
signal_type (str): One of three possible signal types:
- 'continuous': Speed, RPM, Distance, ...
- 'discrete': DRS, nGear, status values, ...
- 'excluded': Data channel will be ignored during resampling
interpolation_method (optional, str): The interpolation method
which should be used. Can only be specified and is required
in combination with ``signal_type='continuous'``. See
:meth:`pandas.Series.interpolate` for possible interpolation
methods.
"""
if signal_type not in ('discrete', 'continuous', 'excluded'):
raise ValueError(f"Unknown signal type {signal_type}.")
if signal_type == 'continuous' and interpolation_method is None:
raise ValueError("signal_type='continuous' requires interpolation_method to be specified.")
cls._CHANNELS[name] = {'type': signal_type, 'missing': interpolation_method}
def get_first_non_zero_time_index(self):
"""Return the first index at which the 'Time' value is not zero or NA/NaT"""
# find first row where time is not zero; usually this is the first row but sometimes.....
i_arr = np.where((self['Time'] != pd.Timedelta(0)) & ~pd.isna(self['Time']))[0]
if i_arr.size != 0:
return np.min(i_arr)
return None
def add_differential_distance(self, drop_existing=True):
"""Add column 'DifferentialDistance' to self.
This column contains the distance driven between subsequent samples.
Calls :meth:`calculate_differential_distance` and joins the result
with self.
Args:
drop_existing (bool): Drop and recalculate column if it already exists
Returns:
:class:`Telemetry`: self joined with new column or self if column exists and `drop_existing` is False.
"""
if ('DifferentialDistance' in self.columns) and not drop_existing:
return self
new_dif_dist = pd.DataFrame(
{'DifferentialDistance': self.calculate_differential_distance()}
)
if 'DifferentialDistance' in self.columns:
return self.drop(labels='DifferentialDistance', axis=1) \
.join(new_dif_dist, how='outer')
return self.join(new_dif_dist, how='outer')
def add_distance(self, drop_existing=True):
"""Add column 'Distance' to self.
This column contains the distance driven since the first sample of self in meters.
The data is produced by integrating the differential distance between subsequent laps.
You should not apply this function to telemetry of many laps simultaneously to reduce integration error.
Instead apply it only to single laps or few laps at a time!
Calls :meth:`integrate_distance` and joins the result with self.
Args:
drop_existing (bool): Drop and recalculate column if it already exists
Returns:
:class:`Telemetry`: self joined with new column or self if column exists and `drop_existing` is False.
"""
if ('Distance' in self.columns) and not drop_existing:
return self
new_dist = pd.DataFrame({'Distance': self.integrate_distance()})
if 'Distance' in self.columns:
return self.drop(labels='Distance', axis=1).join(new_dist, how='outer')
return self.join(new_dist, how='outer')
def add_relative_distance(self, drop_existing=True):
"""Add column 'RelativeDistance' to self.
This column contains the distance driven since the first sample as
a floating point number where ``0.0`` is the first sample of self
and ``1.0`` is the last sample.
This is calculated the same way as 'Distance' (see: :meth:`add_distance`). The same warnings apply.
Args:
drop_existing (bool): Drop and recalculate column if it already exists
Returns:
:class:`Telemetry`: self joined with new column or self if column exists and `drop_existing` is False.
"""
if 'RelativeDistance' in self.columns:
if drop_existing:
d = self.drop(labels='RelativeDistance', axis=1)
else:
return self
else:
d = self
if 'Distance' in d.columns:
rel_dist = d.loc[:, 'Distance'] / d.loc[:, 'Distance'].iloc[-1]
else:
dist = d.integrate_distance()
rel_dist = dist / dist.iloc[-1]
return d.join(pd.DataFrame({'RelativeDistance': rel_dist}), how='outer')
def add_driver_ahead(self, drop_existing=True):
"""Add column 'DriverAhead' and 'DistanceToDriverAhead' to self.
DriverAhead: Driver number of the driver ahead as string
DistanceToDriverAhead: Distance to next car ahead in meters
.. note:: Cars in the pit lane are currently not excluded from the data. They will show up when overtaken on
pit straight even if they're not technically in front of the car. A fix for this is TBD with other
improvements.
This should only be applied to data of single laps or few laps at a time to reduce integration error.
For longer time spans it should be applied per lap and the laps
should be merged afterwards.
If you absolutely need to apply it to a whole session, use the legacy implementation. Note that data of
the legacy implementation will be considerably less smooth. (see :mod:`fastf1.legacy`)
Calls :meth:`calculate_driver_ahead` and joins the result with self.
Args:
drop_existing (bool): Drop and recalculate column if it already exists
Returns:
:class:`Telemetry`: self joined with new column or self if column exists and `drop_existing` is False.
"""
if 'DriverAhead' in self.columns and 'DistanceToDriverAhead' in self.columns:
if drop_existing:
d = self.drop(labels='DriverAhead', axis=1) \
.drop(labels='DistanceToDriverAhead', axis=1)
else:
return self
else:
d = self
drv_ahead, dist = self.calculate_driver_ahead()
return d.join(pd.DataFrame({'DriverAhead': drv_ahead,
'DistanceToDriverAhead': dist},
index=d.index), how='outer')
def calculate_differential_distance(self):
"""Calculate the distance between subsequent samples of self.
Distance is in meters
Returns:
:class:`pandas.Series`
"""
if not all([col in self.columns for col in ('Speed', 'Time')]):
raise ValueError("Telemetry does not contain required channels 'Time' and 'Speed'.")
if self.size != 0:
dt = self['Time'].dt.total_seconds().diff()
dt.iloc[0] = self['Time'].iloc[0].total_seconds()
ds = self['Speed'] / 3.6 * dt
return ds
else:
return pd.Series()
def integrate_distance(self):
"""Return the distance driven since the first sample of self.
Distance is in meters. The data is produce by integration. Integration error will stack up when used for
long slices of data. This should therefore only be used for data of single laps or few laps at a time.
Returns:
:class:`pd.Series`
"""
ds = self.calculate_differential_distance()
if not ds.empty:
return ds.cumsum()
else:
return pd.Series()
def calculate_driver_ahead(self):
"""Calculate driver ahead and distance to driver ahead.
Driver ahead: Driver number of the driver ahead as string
Distance to driver ahead: Distance to the car ahead in meters
.. note:: This gives a smoother/cleaner result than the legacy implementation but WILL introduce
integration error when used over long distances (more than one or two laps may sometimes be considered
a long distance). If in doubt, do sanity checks (against the legacy version or in another way).
Returns:
driver ahead (numpy.array), distance to driver ahead (numpy.array)
"""
t_start = self['SessionTime'].iloc[0]
t_end = self['SessionTime'].iloc[-1]
combined_distance = pd.DataFrame()
# Assume the following lap profile as a catch all for all drivers
#
# |------ Lap before ------|------ n Laps between ------|------ Lap after ------|
# ^ ^
# t_start t_end
# Integration of the distance needs to start at the finish line so that there exists a common zero point
# Therefore find the "lap before" which is the lap during which the telemetry slice starts and the "lap after"
# where the telemetry slice ends
# Integrate distance over all relevant laps and slice by t_start and t_end after to get the interesting
# part only
own_laps = self.session.laps[self.session.laps['DriverNumber'] == self.driver]
first_lap_number = (own_laps[own_laps['LapStartTime'] <= t_start])['LapNumber'].iloc[-1]
for drv in self.session.drivers:
# find correct first relevant lap; very important for correct zero point in distance
drv_laps = self.session.laps[self.session.laps['DriverNumber'] == drv]
if drv_laps.empty: # Only include drivers who participated in this session
continue
drv_laps_before = drv_laps[(drv_laps['LapStartTime'] <= t_start)]
if not drv_laps_before.empty:
lap_n_before = drv_laps_before['LapNumber'].iloc[-1]
if lap_n_before < first_lap_number:
# driver is behind on track an therefore will cross the finish line AFTER self
# therefore above check for LapStartTime <= t_start is wrong
# the first relevant lap is the first lap with LapStartTime > t_start which is lap_n_before += 1
lap_n_before += 1
else:
lap_n_before = min(drv_laps['LapNumber'])
# find last relevant lap so as to no do too much unnecessary calculation later
drv_laps_after = drv_laps[drv_laps['Time'] >= t_end]
lap_n_after = drv_laps_after['LapNumber'].iloc[0] \
if not drv_laps_after.empty \
else max(drv_laps['LapNumber'])
relevant_laps = drv_laps[(drv_laps['LapNumber'] >= lap_n_before) & (drv_laps['LapNumber'] <= lap_n_after)]
if relevant_laps.empty:
continue
# first slice by lap and calculate distance, so that distance is zero at finish line
drv_tel = self.session.car_data[drv].slice_by_lap(relevant_laps).add_distance() \
.loc[:, ('SessionTime', 'Distance')].rename(columns={'Distance': drv})
# now slice again by time to only get the relevant time frame
drv_tel = drv_tel.slice_by_time(t_start, t_end)
if drv_tel.empty:
continue
drv_tel = drv_tel.set_index('SessionTime')
combined_distance = combined_distance.join(drv_tel, how='outer')
# create driver map for array
drv_map = combined_distance.loc[:, combined_distance.columns != self.driver].columns.to_numpy()
own_dst = combined_distance.loc[:, self.driver].to_numpy()
other_dst = combined_distance.loc[:, combined_distance.columns != self.driver].to_numpy()
# replace distance with nan if it does not change
# prepend first row before diff so that array size stays the same; but missing first sample because of that
other_dst[np.diff(other_dst, n=1, axis=0, prepend=other_dst[0, :].reshape((1, -1))) == 0] = np.nan
# resize own_dst to match shape of other_dst for easy subtraction
own_dst = np.repeat(own_dst.reshape((-1, 1)), other_dst.shape[1], axis=1)
delta_dst = other_dst - own_dst
delta_dst[np.isnan(delta_dst)] = np.inf # substitute nan with inf, else nan is returned as min
delta_dst[delta_dst < 0] = np.inf # remove cars behind so that neg numbers are not returned as min
index_ahead = np.argmin(delta_dst, axis=1)
drv_ahead = np.array([drv_map[i] for i in index_ahead])
drv_ahead[np.all(delta_dst == np.inf, axis=1)] = '' # remove driver from all inf rows
dist_to_drv_ahead = np.array([delta_dst[i, index_ahead[i]] for i in range(len(index_ahead))])
dist_to_drv_ahead[ | np.all(delta_dst == np.inf, axis=1) | numpy.all |
import anndata
import multiprocessing as mp
import numpy as np
import os
import pandas as pd
import pytest
import rpy2.robjects.packages
import rpy2.robjects.pandas2ri
import scipy.sparse as ss
import scipy.stats as st
import scmodes
import scmodes.benchmark.gof
from .fixtures import test_data
ashr = rpy2.robjects.packages.importr('ashr')
rpy2.robjects.pandas2ri.activate()
def test__gof():
np.random.seed(0)
mu = 10
px = st.poisson(mu=mu)
x = px.rvs(size=100)
d, p = scmodes.benchmark.gof._gof(x, cdf=px.cdf, pmf=px.pmf)
assert d >= 0
assert 0 <= p <= 1
def test__rpp():
np.random.seed(0)
mu = 10
px = st.poisson(mu=mu)
x = px.rvs(size=100)
F = px.cdf(x - 1)
f = px.pmf(x)
vals = scmodes.benchmark.gof._rpp(F, f)
assert vals.shape == x.shape
def test_gof_point(test_data):
x = test_data
res = scmodes.benchmark.gof_point(x)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
def test_gamma_cdf():
np.random.seed(0)
x = st.nbinom(n=10, p=.1).rvs(size=100)
Fx = scmodes.benchmark.gof._zig_cdf(x, size=1, log_mu=-5, log_phi=-1)
assert Fx.shape == x.shape
assert np.isfinite(Fx).all()
assert (Fx >= 0).all()
assert (Fx <= 1).all()
def test_zig_cdf():
np.random.seed(0)
x = st.nbinom(n=10, p=.1).rvs(size=100)
Fx = scmodes.benchmark.gof._zig_cdf(x, size=1, log_mu=-5, log_phi=-1, logodds=-3)
assert Fx.shape == x.shape
assert (Fx >= 0).all()
assert (Fx <= 1).all()
def test_zig_pmf_cdf():
x = np.arange(50)
import scmodes.benchmark.gof
size = 1000
log_mu=-5
log_phi=-1
logodds=-1
Fx = scmodes.benchmark.gof._zig_cdf(x, size=size, log_mu=log_mu, log_phi=log_phi, logodds=logodds)
Fx_1 = scmodes.benchmark.gof._zig_cdf(x - 1, size=size, log_mu=log_mu, log_phi=log_phi, logodds=logodds)
fx = scmodes.benchmark.gof._zig_pmf(x, size=size, log_mu=log_mu, log_phi=log_phi, logodds=logodds)
assert np.isclose(Fx - Fx_1, fx).all()
def test_gof_gamma(test_data):
x = test_data
res = scmodes.benchmark.gof_gamma(x)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
def test_gof_gamma_size(test_data):
x = test_data
s = 1 + np.median(x, axis=1).reshape(-1, 1)
res = scmodes.benchmark.gof_gamma(x, s=s, lr=1e-3)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
def test_gof_gamma_adata(test_data):
x = test_data
y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns))
res = scmodes.benchmark.gof_gamma(y, lr=1e-3)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
assert (res.index == x.columns).all()
def test_gof_gamma_adata_key(test_data):
x = test_data
y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns))
res = scmodes.benchmark.gof_gamma(y, key=0, lr=1e-3)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
assert (res.index == x.columns).all()
def test_gof_zig(test_data):
x = test_data
res = scmodes.benchmark.gof_zig(x)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
def test_gof_zig_size(test_data):
x = test_data
s = 1 + np.median(x, axis=1).reshape(-1, 1)
res = scmodes.benchmark.gof_zig(x, s=s, lr=1e-3)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
def test_gof_zig_adata(test_data):
x = test_data
y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns))
res = scmodes.benchmark.gof_zig(y, lr=1e-3)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
assert (res.index == x.columns).all()
def test_gof_zig_adata_key(test_data):
x = test_data
y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns))
res = scmodes.benchmark.gof_zig(y, key=0, lr=1e-3)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
assert (res.index == x.columns).all()
def test__ash_pmf(test_data):
x = test_data
gene = 'ENSG00000116251'
xj = x[gene]
size = x.sum(axis=1)
lam = xj / size
fit = ashr.ash_workhorse(
# these are ignored by ash
pd.Series(np.zeros(xj.shape)),
1,
outputlevel=pd.Series(['fitted_g', 'data']),
# numpy2ri doesn't DTRT, so we need to use pandas
lik=ashr.lik_pois(y=xj, scale=size, link='identity'),
mixsd=pd.Series(np.geomspace(lam.min() + 1e-8, lam.max(), 25)),
mode=pd.Series([lam.min(), lam.max()]))
res = scmodes.benchmark.gof._ash_pmf(xj, fit)
assert res.shape == xj.shape
assert np.isfinite(res).all()
assert (res >= 0).all()
assert (res <= 1).all()
def test__ash_cdf(test_data):
x = test_data
gene = 'ENSG00000116251'
xj = x[gene]
size = x.sum(axis=1)
lam = xj / size
fit = ashr.ash_workhorse(
# these are ignored by ash
pd.Series(np.zeros(xj.shape)),
1,
outputlevel=pd.Series(['fitted_g', 'data']),
# numpy2ri doesn't DTRT, so we need to use pandas
lik=ashr.lik_pois(y=xj, scale=size, link='identity'),
mixsd=pd.Series(np.geomspace(lam.min() + 1e-8, lam.max(), 25)),
mode=pd.Series([lam.min(), lam.max()]))
res = scmodes.benchmark.gof._ash_cdf(xj, fit, s=size)
assert np.isfinite(res).all()
assert (res >= 0).all()
assert (res <= 1).all()
def test__ash_cdf_pmf(test_data):
x = test_data
gene = 'ENSG00000116251'
xj = x[gene]
size = x.sum(axis=1)
lam = xj / size
fit = ashr.ash_workhorse(
# these are ignored by ash
pd.Series(np.zeros(xj.shape)),
1,
outputlevel=pd.Series(['fitted_g', 'data']),
# numpy2ri doesn't DTRT, so we need to use pandas
lik=ashr.lik_pois(y=xj, scale=size, link='identity'),
mixsd=pd.Series(np.geomspace(lam.min() + 1e-8, lam.max(), 25)),
mode=pd.Series([lam.min(), lam.max()]))
Fx = scmodes.benchmark.gof._ash_cdf(xj, fit, s=size)
Fx_1 = scmodes.benchmark.gof._ash_cdf(xj - 1, fit, s=size)
fx = scmodes.benchmark.gof._ash_pmf(xj, fit)
assert np.isclose(Fx - Fx_1, fx).all()
def test__gof_unimodal(test_data):
x = test_data
gene = 'ENSG00000116251'
k, d, p = scmodes.benchmark.gof._gof_unimodal(gene, x[gene], x.sum(axis=1))
assert k == gene
assert np.isfinite(d)
assert d >= 0
assert np.isfinite(p)
assert 0 <= p <= 1
def test_gof_unimodal(test_data):
x = test_data
res = scmodes.benchmark.gof_unimodal(x)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
def test_gof_unimodal_size(test_data):
x = test_data
s = x.sum(axis=1)
res = scmodes.benchmark.gof_unimodal(x, s=s)
assert res.shape[0] == x.shape[1]
assert np.isfinite(res['stat']).all()
assert np.isfinite(res['p']).all()
def test__point_expfam_cdf(test_data):
x = test_data
s = x.sum(axis=1)
xj = x['ENSG00000116251']
res = scmodes.ebpm.ebpm_point_expfam(xj, s)
F = scmodes.benchmark.gof._point_expfam_cdf(xj.values.ravel(), res=res, size=s)
assert np.isfinite(F).all()
assert (F >= 0).all()
assert (F <= 1).all()
def test__point_expfam_pmf(test_data):
x = test_data
s = x.sum(axis=1)
xj = x['ENSG00000116251']
res = scmodes.ebpm.ebpm_point_expfam(xj, s)
f = scmodes.benchmark.gof._point_expfam_pmf(xj.values.ravel(), res=res, size=s)
assert np.isfinite(f).all()
assert (f >= 0).all()
assert (f <= 1).all()
def test__point_expfam_cdf_pmf(test_data):
x = test_data
s = x.sum(axis=1)
xj = x['ENSG00000116251']
res = scmodes.ebpm.ebpm_point_expfam(xj, s)
F = scmodes.benchmark.gof._point_expfam_cdf(xj.values.ravel(), res=res, size=s)
F_1 = scmodes.benchmark.gof._point_expfam_cdf(xj.values.ravel() - 1, res=res, size=s)
f = scmodes.benchmark.gof._point_expfam_pmf(xj.values.ravel(), res=res, size=s)
assert np.isclose(F - F_1, f).all()
def test__gof_npmle(test_data):
x = test_data
gene = 'ENSG00000116251'
k, d, p = scmodes.benchmark.gof._gof_npmle(gene, x[gene], x.sum(axis=1))
assert k == gene
assert np.isfinite(d)
assert d >= 0
assert | np.isfinite(p) | numpy.isfinite |
#!/usr/bin/env python
# A Global import to make code python 2 and 3 compatible
from __future__ import print_function
def make_graphs(graph_dir, mat_dict, centroids, aparc_names, n_rand=1000): #mat_dict comes from make_corr_matrices.py
'''
A function that makes all the required graphs from the correlation
matrices in mat_dict. These include the full graph with all
connections including weights, and binarized graphs at 30 different
costs betwen 1% to 30%. These graphs are fully connected because the
minimum spanning tree is used before the strongest edges are added
up to the required density.
If the graphs do not already exist they are saved as gpickle files in
graph_dir. If they do exist then they're read in from those files.
In addition, files with values for the nodal topological measures and
global topological measures are created and saved or loaded as
appropriate.
The function requires the centroids and aparc_names values in order
to calculate the nodal measures. The value n_rand is the number of
random graphs to calculate for the global and nodal measure
calculations.
The function returns a dictionary of graphs, nodal measures and
global measures
'''
#==========================================================================
# IMPORTS
#==========================================================================
import os
import networkx as nx
import numpy as np
import pickle
#==========================================================================
# Print to screen what you're up to
#==========================================================================
print ("--------------------------------------------------")
print ("Making or loading graphs")
#==========================================================================
# Create an empty dictionary
#==========================================================================
graph_dict = {}
#==========================================================================
# Loop through all the matrices in mat_dict
#==========================================================================
for k in mat_dict.keys():
print (' {}'.format(k))
# Read in the matrix
M = mat_dict[k]
# Get the covars name
mat_name, covars_name = k.split('_COVARS_')
#-------------------------------------------------------------------------
# Make the full graph first
#-------------------------------------------------------------------------
# Define the graph's file name and its dictionary key
g_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'Graph_{}_COST_100.gpickle'.format(mat_name))
g_key = '{}_COST_100'.format(k)
print (' Loading COST: 100',)
# If it already exists just read it in from the pickled file
if os.path.isfile(g_filename):
graph_dict[g_key] = nx.read_gpickle(g_filename)
# Otherwise you'll have to create it using the graph_at_cost function above
else:
graph_dict[g_key] = full_graph(M)
# Save it as a gpickle file so you don't have to do this next time!
dirname = os.path.dirname(g_filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
nx.write_gpickle(graph_dict[g_key], g_filename)
#-------------------------------------------------------------------------
# Then for all the different costs between 1% and 30%
#-------------------------------------------------------------------------
for cost in [2] + range(5,21,5):
#-------------------------------------------------------------------------
# Define the graph's file name along with those of the the associated
# global and nodal dictionaries
#-------------------------------------------------------------------------
g_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'Graph_{}_COST_{:02.0f}.gpickle'.format(mat_name, cost))
global_dict_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'GlobalDict_{}_COST_{:02.0f}.p'.format(mat_name, cost))
nodal_dict_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'NodalDict_{}_COST_{:02.0f}.p'.format(mat_name, cost))
rich_club_filename = os.path.join(graph_dir,
'COVARS_{}'.format(covars_name),
'RichClub_{}_COST_{:02.0f}.p'.format(mat_name, cost))
g_key = '{}_COST_{:02.0f}'.format(k, cost)
#-------------------------------------------------------------------------
# Make or load the graph
#-------------------------------------------------------------------------
# If the graph already exists just read it in from the pickled file
if os.path.isfile(g_filename):
graph_dict[g_key] = nx.read_gpickle(g_filename)
# Otherwise you'll have to create it using the graph_at_cost function
else:
graph_dict[g_key] = graph_at_cost(M, cost)
# Save it as a gpickle file so you don't have to do this next time!
nx.write_gpickle(graph_dict[g_key], g_filename)
#-------------------------------------------------------------------------
# Make or load the global and nodal measures dictionaries
#-------------------------------------------------------------------------
# If the rich_club measures files already exists just read it
# and the nodal and global measures files in
if os.path.isfile(rich_club_filename):
# Print to screen so you know where you're up to
if cost == 20:
print ('- {:02.0f}'.format(cost))
else:
print ('- {:02.0f}'.format(cost),)
graph_dict['{}_GlobalMeasures'.format(g_key)] = pickle.load(open(global_dict_filename))
graph_dict['{}_NodalMeasures'.format(g_key)] = pickle.load(open(nodal_dict_filename))
graph_dict['{}_RichClub'.format(g_key)] = pickle.load(open(rich_club_filename))
# Otherwise you'll have to create them using the calculate_global_measures
# and calculate_nodal_measures functions
else:
G = graph_dict[g_key]
print ('\n Calculating COST: {:02.0f}'.format(cost))
# You need to calculate the same nodal partition for the global
# and nodal measures
nodal_partition = calc_nodal_partition(G)
# And you'll also want the same list of random graphs
R_list, R_nodal_partition_list = make_random_list(G, n_rand=n_rand)
graph_dict['{}_GlobalMeasures'.format(g_key)] = calculate_global_measures(G,
R_list=R_list,
nodal_partition=nodal_partition,
R_nodal_partition_list=R_nodal_partition_list)
(graph_dict[g_key],
graph_dict['{}_NodalMeasures'.format(g_key)]) = calculate_nodal_measures(G,
centroids,
aparc_names,
nodal_partition=nodal_partition)
graph_dict['{}_RichClub'.format(g_key)] = rich_club(G, R_list=R_list)
# Save them as pickle files so you don't have to do this next time!
pickle.dump(graph_dict['{}_GlobalMeasures'.format(g_key)], open(global_dict_filename, "wb"))
pickle.dump(graph_dict['{}_NodalMeasures'.format(g_key)], open(nodal_dict_filename, "wb"))
pickle.dump(graph_dict['{}_RichClub'.format(g_key)], open(rich_club_filename, "wb"))
nx.write_gpickle(graph_dict[g_key], g_filename)
# Return the full graph dictionary
return graph_dict
def full_graph(M):
'''
Very easy, set the diagonals to 0
and then save the graph
'''
import numpy as np
import networkx as nx
# Make a copy of the matrix
thr_M = np.copy(M)
# Set all diagonal values to 0
thr_M[np.diag_indices_from(thr_M)] = 0
# Read this full matrix into a graph G
G = nx.from_numpy_matrix(thr_M)
return G
def graph_at_cost(M, cost):
'''
A function that first creates the minimum spanning tree
for the graph, and then adds in edges according to their
connection strength up to a particular cost
'''
import numpy as np
import networkx as nx
# Make a copy of the matrix
thr_M = np.copy(M)
# Set all diagonal values to 0
thr_M[np.diag_indices_from(thr_M)] = 0
# Multiply all values by -1 because the minimum spanning tree
# looks for the smallest distance - not the largest correlation!
thr_M = thr_M*-1
# Read this full matrix into a graph G
G = nx.from_numpy_matrix(thr_M)
# Make a list of all the sorted edges in the full matrix
G_edges_sorted = [ edge for edge in sorted(G.edges(data = True), key = lambda edge_info: edge_info[2]['weight']) ]
# Calculate minimum spanning tree and make a list of the mst_edges
mst = nx.minimum_spanning_tree(G)
mst_edges = mst.edges(data = True)
# Create a list of edges that are *not* in the mst
# (because you don't want to add them in twice!)
G_edges_sorted_notmst = [ edge for edge in G_edges_sorted if not edge in mst_edges ]
# Figure out the number of edges you want to keep for this
# particular cost. You have to round this number because it
# won't necessarily be an integer, and you have to subtract
# the number of edges in the minimum spanning tree because we're
# going to ADD this number of edges to it
n_edges = (cost/100.0) * len(G_edges_sorted)
n_edges = np.int(np.around(n_edges))
n_edges = n_edges - len(mst.edges())
# If your cost is so small that your minimum spanning tree already covers it
# then you can't do any better than the MST and you'll just have to return
# it with an accompanying error message
if n_edges < 0:
print ('Unable to calculate matrix at this cost - minimum spanning tree is too large')
# Otherwise, add in the appropriate number of edges (n_edges)
# from your sorted list (G_edges_sorted_notmst)
else:
mst.add_edges_from(G_edges_sorted_notmst[:n_edges])
# And return the *updated* minimum spanning tree
# as your graph
return mst
def make_random_list(G, n_rand=10):
'''
A little (but useful) function to wrap
around random_graph and return a list of
random graphs (matched for degree distribution)
that can be passed to multiple calculations so
you don't have to do it multiple times
'''
R_list = []
R_nodal_partition_list = []
print (' Creating {} random graphs - may take a little while'.format(n_rand))
for i in range(n_rand):
if len(R_list) <= i:
R_list += [ random_graph(G) ]
R_nodal_partition_list += [ calc_nodal_partition(R_list[i]) ]
return R_list, R_nodal_partition_list
def calculate_global_measures(G, R_list=None, n_rand=10, nodal_partition=None, R_nodal_partition_list=None):
'''
A wrapper function that calls a bunch of useful functions
and reports a plethora of network measures for the real graph
G, and for n random graphs that are matched on degree distribution
(unless otherwise stated)
'''
import networkx as nx
import numpy as np
#==== SET UP ======================
# If you haven't already calculated random graphs
# or you haven't given this function as many random
# graphs as it is expecting then calculate a random
# graph here
if R_list is None:
R_list, R_nodal_partition_list = make_random_list(n_rand)
else:
n = len(R_list)
# If you haven't passed the nodal partition
# then calculate it here
if not nodal_partition:
nodal_partition = calc_nodal_partition(G)
#==== MEASURES ====================
global_measures_dict = {}
#---- Clustering coefficient ------
global_measures_dict['C'] = nx.average_clustering(G)
rand_array = np.ones(n)
for i in range(n):
rand_array[i] = nx.average_clustering(R_list[i])
global_measures_dict['C_rand'] = rand_array
#---- Shortest path length --------
global_measures_dict['L'] = nx.average_shortest_path_length(G)
rand_array = np.ones(n)
for i in range(n):
rand_array[i] = nx.average_shortest_path_length(R_list[i])
global_measures_dict['L_rand'] = rand_array
#---- Assortativity ---------------
global_measures_dict['a'] = np.mean(nx.degree_assortativity_coefficient(G))
rand_array = np.ones(n)
for i in range(n):
rand_array[i] = np.mean(nx.degree_assortativity_coefficient(R_list[i]))
global_measures_dict['a_rand'] = rand_array
#---- Modularity ------------------
global_measures_dict['M'] = calc_modularity(G, nodal_partition)
rand_array = np.ones(n)
for i in range(n):
rand_array[i] = calc_modularity(R_list[i], R_nodal_partition_list[i])
global_measures_dict['M_rand'] = rand_array
#---- Efficiency ------------------
global_measures_dict['E'] = calc_efficiency(G)
rand_array = np.ones(n)
for i in range(n):
rand_array[i] = calc_efficiency(R_list[i])
global_measures_dict['E_rand'] = rand_array
#---- Small world -----------------
sigma_array = np.ones(n)
for i in range(n):
sigma_array[i] = ( ( global_measures_dict['C'] / global_measures_dict['C_rand'][i] )
/ ( global_measures_dict['L'] / global_measures_dict['L_rand'][i] ) )
global_measures_dict['sigma'] = sigma_array
global_measures_dict['sigma_rand'] = 1.0
return global_measures_dict
def calculate_nodal_measures(G, centroids, aparc_names, nodal_partition=None, names_308_style=True):
'''
A function which returns a dictionary of numpy arrays for a graph's
* degree
* participation coefficient
* average distance
* total distance
* clustering
* closeness
* interhemispheric proportion
* name
If you have names in 308 style (as described in Whitaker, Vertes et al 2016)
then you can also add in
* hemisphere
* 34_name (Desikan Killiany atlas region)
* 68_name (Desikan Killiany atlas region with hemisphere)
'''
import numpy as np
import networkx as nx
#==== SET UP ======================
# If you haven't passed the nodal partition
# then calculate it here
if not nodal_partition:
nodal_partition = calc_nodal_partition(G)
#==== MEASURES ====================
nodal_dict = {}
#---- Degree ----------------------
deg = G.degree().values()
nodal_dict['degree'] = list(deg)
#---- Closeness -------------------
closeness = nx.closeness_centrality(G).values()
nodal_dict['closeness'] = list(closeness)
#---- Betweenness -----------------
betweenness = nx.betweenness_centrality(G).values()
nodal_dict['betweenness'] = list(betweenness)
#---- Shortest path length --------
L = shortest_path(G).values()
nodal_dict['shortest_path'] = list(L)
#---- Clustering ------------------
clustering = nx.clustering(G).values()
nodal_dict['clustering'] = list(clustering)
#---- Participation coefficent ----
#---- and module assignment -------
partition, pc_dict = participation_coefficient(G, nodal_partition)
nodal_dict['module'] = list(partition.values())
nodal_dict['pc'] = list(pc_dict.values())
#---- Euclidean distance and ------
#---- interhem proporition --------
G = assign_nodal_distance(G, centroids)
average_dist = nx.get_node_attributes(G, 'average_dist').values()
total_dist = nx.get_node_attributes(G, 'total_dist').values()
interhem_prop = nx.get_node_attributes(G, 'interhem_proportion').values()
nodal_dict['average_dist'] = list(average_dist)
nodal_dict['total_dist'] = list(total_dist)
nodal_dict['interhem_prop'] = list(interhem_prop)
#---- Names -----------------------
G = assign_node_names(G, aparc_names, names_308_style=names_308_style)
name = nx.get_node_attributes(G, 'name').values()
nodal_dict['name'] = list(name)
if names_308_style:
name_34 = nx.get_node_attributes(G, 'name_34').values()
name_68 = nx.get_node_attributes(G, 'name_68').values()
hemi = nx.get_node_attributes(G, 'hemi').values()
nodal_dict['name_34'] = list(name_34)
nodal_dict['name_68'] = list(name_68)
nodal_dict['hemi'] = list(hemi)
return G, nodal_dict
def random_graph(G, Q=10):
'''
Create a random graph that preserves degree distribution
by swapping pairs of edges (double edge swap).
Inputs:
G: networkx graph
Q: constant that determines how many swaps to conduct
for every edge in the graph
Default Q =10
Returns:
R: networkx graph
CAVEAT: If it is not possible in 15 attempts to create a
connected random graph then this code will just return the
original graph (G). This means that if you come to look at
the values that are an output of calculate_global_measures
and see that the values are the same for the random graph
as for the main graph it is not necessarily the case that
the graph is random, it may be that the graph was so low cost
(density) that this code couldn't create an appropriate random
graph!
This should only happen for ridiculously low cost graphs that
wouldn't make all that much sense to investigate anyway...
so if you think carefully it shouldn't be a problem.... I hope!
'''
import networkx as nx
# Copy the graph
R = G.copy()
# Calculate the number of edges and set a constant
# as suggested in the nx documentation
E = R.number_of_edges()
# Start with assuming that the random graph is not connected
# (because it might not be after the first permuatation!)
connected=False
attempt=0
# Keep making random graphs until they are connected!
while not connected and attempt < 15:
# Now swap some edges in order to preserve the degree distribution
nx.double_edge_swap(R,Q*E,max_tries=Q*E*10)
# Check that this graph is connected! If not, start again
connected = nx.is_connected(R)
if not connected:
attempt +=1
if attempt == 15:
print (' ** Attempt aborted - can not randomise graph **')
R = G.copy()
return R
def calc_modularity(G, nodal_partition):
'''
A function that calculates modularity from the best partition
of a graph using the louvain method
'''
import community
modularity = community.modularity(nodal_partition, G)
return modularity
def calc_nodal_partition(G):
'''
You only need to create the nodal partition using the
community module once. It takes a while and can be
different every time you try so it's best to save a
partition and use that for any subsequent calculations
'''
import community
# Make sure the edges are binarized
for u,v,d in G.edges(data=True):
d['weight']=1
# Now calculate the best partition
nodal_partition = community.best_partition(G)
return nodal_partition
def calc_efficiency(G):
'''
A little wrapper to calculate global efficiency
'''
import networkx as nx
E=0.0
for node in G:
path_length=nx.single_source_shortest_path_length(G, node)
E += 1.0/sum(path_length.values())
return E
def participation_coefficient(G, nodal_partition):
'''
Computes the participation coefficient for each node (Guimera et al. 2005).
Returns dictionary of the participation coefficient for each node.
'''
# Import the modules you'll need
import networkx as nx
import numpy as np
# Reverse the dictionary because the output of Louvain is "backwards"
# meaning it saves the module per node, rather than the nodes in each
# module
module_partition = {}
for m,n in zip(nodal_partition.values(),nodal_partition.keys()):
try:
module_partition[m].append(n)
except KeyError:
module_partition[m] = [n]
# Create an empty dictionary for the participation
# coefficients
pc_dict = {}
all_nodes = set(G.nodes())
# Print a little note to the screen because it can take a long
# time to run this code
print (' Calculating participation coefficient - may take a little while')
# Loop through modules
for m in module_partition.keys():
# Get the set of nodes in this module
mod_list = set(module_partition[m])
# Loop through each node (source node) in this module
for source in mod_list:
# Calculate the degree for the source node
degree = float(nx.degree(G=G, nbunch=source))
# Calculate the number of these connections
# that are to nodes in *other* modules
count = 0
for target in mod_list:
# If the edge is in there then increase the counter by 1
if (source, target) in G.edges():
count += 1
# This gives you the within module degree
wm_degree = float(count)
# The participation coeficient is 1 - the square of
# the ratio of the within module degree and the total degree
pc = 1 - ((float(wm_degree) / float(degree))**2)
# Save the participation coefficient to the dictionary
pc_dict[source] = pc
return nodal_partition, pc_dict
def assign_nodal_distance(G, centroids):
'''
Give each node in the graph their
x, y, z coordinates and then calculate the eucledian
distance for every edge that connects to each node
Also calculate the number of interhemispheric edges
(defined as edges which different signs for the x
coordinate
Returns the graph
'''
import networkx as nx
import numpy as np
from scipy.spatial import distance
# First assign the x, y, z values to each node
for i, node in enumerate(G.nodes()):
G.node[node]['x'] = centroids[i, 0]
G.node[node]['y'] = centroids[i, 1]
G.node[node]['z'] = centroids[i, 2]
G.node[node]['centroids'] = centroids[i, :]
# Loop through every node in turn
for i, node in enumerate(G.nodes()):
# Loop through the edges connecting to this node
# Note that "node1" should always be exactly the same
# as "node", I've just used another name to keep
# the code clear (which I may not have achieved given
# that I thought this comment was necesary...)
for node1, node2 in G.edges(nbunch=[node]):
# Calculate the eulidean distance for this edge
cent1 = G.node[node1]['centroids']
cent2 = G.node[node2]['centroids']
dist = distance.euclidean(cent1, cent2)
# And assign this value to the edge
G.edge[node1][node2]['euclidean'] = dist
# Also figure out whether this edge is interhemispheric
# by multiplying the x values. If x1 * x2 is negative
# then the nodes are in different hemispheres.
x1 = G.node[node1]['x']
x2 = G.node[node2]['x']
if x1*x2 > 0:
G.edge[node1][node2]['interhem'] = 0
else:
G.edge[node1][node2]['interhem'] = 1
# Create two nodal attributes (average distance and
# total distance) by summarizing the euclidean distance
# for all edges which connect to the node
euc_list = [ G.edge[m][n]['euclidean'] for m, n in G.edges(nbunch=node) ]
G.node[node]['average_dist'] = np.mean(euc_list)
G.node[node]['total_dist'] = np.sum(euc_list)
# Create an interhem nodal attribute by getting the average
# of the interhem values for all edges which connect to the node
interhem_list = [ G.edge[m][n]['interhem'] for m, n in G.edges(nbunch=node) ]
G.node[node]['interhem_proportion'] = | np.mean(interhem_list) | numpy.mean |
"""Rotations in three dimensions - SO(3).
See :doc:`rotations` for more information.
"""
import warnings
import math
import numpy as np
from numpy.testing import assert_array_almost_equal
unitx = np.array([1.0, 0.0, 0.0])
unity = np.array([0.0, 1.0, 0.0])
unitz = np.array([0.0, 0.0, 1.0])
R_id = np.eye(3)
a_id = np.array([1.0, 0.0, 0.0, 0.0])
q_id = np.array([1.0, 0.0, 0.0, 0.0])
q_i = np.array([0.0, 1.0, 0.0, 0.0])
q_j = np.array([0.0, 0.0, 1.0, 0.0])
q_k = np.array([0.0, 0.0, 0.0, 1.0])
e_xyz_id = np.array([0.0, 0.0, 0.0])
e_zyx_id = np.array([0.0, 0.0, 0.0])
p0 = np.array([0.0, 0.0, 0.0])
eps = 1e-7
def norm_vector(v):
"""Normalize vector.
Parameters
----------
v : array-like, shape (n,)
nd vector
Returns
-------
u : array, shape (n,)
nd unit vector with norm 1 or the zero vector
"""
norm = np.linalg.norm(v)
if norm == 0.0:
return v
else:
return np.asarray(v) / norm
def norm_matrix(R):
"""Normalize rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix with small numerical errors
Returns
-------
R : array, shape (3, 3)
Normalized rotation matrix
"""
R = np.asarray(R)
c2 = R[:, 1]
c3 = norm_vector(R[:, 2])
c1 = norm_vector(np.cross(c2, c3))
c2 = norm_vector(np.cross(c3, c1))
return np.column_stack((c1, c2, c3))
def norm_angle(a):
"""Normalize angle to (-pi, pi].
Parameters
----------
a : float or array-like, shape (n,)
Angle(s) in radians
Returns
-------
a_norm : float or array-like, shape (n,)
Normalized angle(s) in radians
"""
# Source of the solution: http://stackoverflow.com/a/32266181
return -((np.pi - np.asarray(a)) % (2.0 * np.pi) - np.pi)
def norm_axis_angle(a):
"""Normalize axis-angle representation.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle). The length
of the axis vector is 1 and the angle is in [0, pi). No rotation
is represented by [1, 0, 0, 0].
"""
angle = a[3]
norm = np.linalg.norm(a[:3])
if angle == 0.0 or norm == 0.0:
return np.array([1.0, 0.0, 0.0, 0.0])
res = np.empty(4)
res[:3] = a[:3] / norm
angle = norm_angle(angle)
if angle < 0.0:
angle *= -1.0
res[:3] *= -1.0
res[3] = angle
return res
def norm_compact_axis_angle(a):
"""Normalize compact axis-angle representation.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z).
The angle is in [0, pi). No rotation is represented by [0, 0, 0].
"""
angle = np.linalg.norm(a)
if angle == 0.0:
return np.zeros(3)
axis = a / angle
return axis * norm_angle(angle)
def perpendicular_to_vectors(a, b):
"""Compute perpendicular vector to two other vectors.
Parameters
----------
a : array-like, shape (3,)
3d vector
b : array-like, shape (3,)
3d vector
Returns
-------
c : array-like, shape (3,)
3d vector that is orthogonal to a and b
"""
return np.cross(a, b)
def perpendicular_to_vector(a):
"""Compute perpendicular vector to one other vector.
There is an infinite number of solutions to this problem. Thus, we
restrict the solutions to [1, 0, z] and return [0, 0, 1] if the
z component of a is 0.
Parameters
----------
a : array-like, shape (3,)
3d vector
Returns
-------
b : array-like, shape (3,)
A 3d vector that is orthogonal to a. It does not necessarily have
unit length.
"""
if abs(a[2]) < eps:
return np.copy(unitz)
# Now that we solved the problem for [x, y, 0], we can solve it for all
# other vectors by restricting solutions to [1, 0, z] and find z.
# The dot product of orthogonal vectors is 0, thus
# a[0] * 1 + a[1] * 0 + a[2] * z == 0 or -a[0] / a[2] = z
return np.array([1.0, 0.0, -a[0] / a[2]])
def angle_between_vectors(a, b, fast=False):
"""Compute angle between two vectors.
Parameters
----------
a : array-like, shape (n,)
nd vector
b : array-like, shape (n,)
nd vector
fast : bool, optional (default: False)
Use fast implementation instead of numerically stable solution
Returns
-------
angle : float
Angle between a and b
"""
if len(a) != 3 or fast:
return np.arccos(
np.clip(np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)),
-1.0, 1.0))
else:
return np.arctan2(np.linalg.norm(np.cross(a, b)), np.dot(a, b))
def vector_projection(a, b):
"""Orthogonal projection of vector a on vector b.
Parameters
----------
a : array-like, shape (3,)
Vector a that will be projected on vector b
b : array-like, shape (3,)
Vector b on which vector a will be projected
Returns
-------
a_on_b : array, shape (3,)
Vector a
"""
b_norm_squared = np.dot(b, b)
if b_norm_squared == 0.0:
return np.zeros(3)
return np.dot(a, b) * b / b_norm_squared
def random_vector(random_state=np.random.RandomState(0), n=3):
"""Generate an nd vector with normally distributed components.
Each component will be sampled from :math:`\mathcal{N}(\mu=0, \sigma=1)`.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
n : int, optional (default: 3)
Number of vector components
Returns
-------
v : array-like, shape (n,)
Random vector
"""
return random_state.randn(n)
def random_axis_angle(random_state=np.random.RandomState(0)):
"""Generate random axis-angle.
The angle will be sampled uniformly from the interval :math:`[0, \pi)`
and each component of the rotation axis will be sampled from
:math:`\mathcal{N}(\mu=0, \sigma=1)` and than the axis will be normalized
to length 1.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
"""
angle = np.pi * random_state.rand()
a = np.array([0, 0, 0, angle])
a[:3] = norm_vector(random_state.randn(3))
return a
def random_compact_axis_angle(random_state=np.random.RandomState(0)):
"""Generate random compact axis-angle.
The angle will be sampled uniformly from the interval :math:`[0, \pi)`
and each component of the rotation axis will be sampled from
:math:`\mathcal{N}(\mu=0, \sigma=1)` and than the axis will be normalized
to length 1.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
"""
a = random_axis_angle(random_state)
return a[:3] * a[3]
def random_quaternion(random_state=np.random.RandomState(0)):
"""Generate random quaternion.
Parameters
----------
random_state : np.random.RandomState, optional (default: random seed 0)
Random number generator
Returns
-------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
"""
return norm_vector(random_state.randn(4))
def cross_product_matrix(v):
"""Generate the cross-product matrix of a vector.
The cross-product matrix :math:`\\boldsymbol{V}` satisfies the equation
.. math::
\\boldsymbol{V} \\boldsymbol{w} = \\boldsymbol{v} \\times
\\boldsymbol{w}
It is a skew-symmetric (antisymmetric) matrix, i.e.
:math:`-\\boldsymbol{V} = \\boldsymbol{V}^T`.
Parameters
----------
v : array-like, shape (3,)
3d vector
Returns
-------
V : array-like, shape (3, 3)
Cross-product matrix
"""
return np.array([[0.0, -v[2], v[1]],
[v[2], 0.0, -v[0]],
[-v[1], v[0], 0.0]])
def check_skew_symmetric_matrix(V, tolerance=1e-6, strict_check=True):
"""Input validation of a skew-symmetric matrix.
Check whether the transpose of the matrix is its negative:
.. math::
V^T = -V
Parameters
----------
V : array-like, shape (3, 3)
Cross-product matrix
tolerance : float, optional (default: 1e-6)
Tolerance threshold for checks.
strict_check : bool, optional (default: True)
Raise a ValueError if V.T is not numerically close enough to -V.
Otherwise we print a warning.
Returns
-------
V : array-like, shape (3, 3)
Validated cross-product matrix
"""
V = np.asarray(V, dtype=np.float)
if V.ndim != 2 or V.shape[0] != 3 or V.shape[1] != 3:
raise ValueError("Expected skew-symmetric matrix with shape (3, 3), "
"got array-like object with shape %s" % (V.shape,))
if not np.allclose(V.T, -V, atol=tolerance):
error_msg = ("Expected skew-symmetric matrix, but it failed the test "
"V.T = %r\n-V = %r" % (V.T, -V))
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
return V
def check_matrix(R, tolerance=1e-6, strict_check=True):
"""Input validation of a rotation matrix.
We check whether R multiplied by its inverse is approximately the identity
matrix and the determinant is approximately 1.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
tolerance : float, optional (default: 1e-6)
Tolerance threshold for checks. Default tolerance is the same as in
assert_rotation_matrix(R).
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
R : array, shape (3, 3)
Validated rotation matrix
"""
R = np.asarray(R, dtype=np.float)
if R.ndim != 2 or R.shape[0] != 3 or R.shape[1] != 3:
raise ValueError("Expected rotation matrix with shape (3, 3), got "
"array-like object with shape %s" % (R.shape,))
RRT = np.dot(R, R.T)
if not np.allclose(RRT, np.eye(3), atol=tolerance):
error_msg = ("Expected rotation matrix, but it failed the test "
"for inversion by transposition. np.dot(R, R.T) "
"gives %r" % RRT)
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
R_det = np.linalg.det(R)
if abs(R_det - 1) > tolerance:
error_msg = ("Expected rotation matrix, but it failed the test "
"for the determinant, which should be 1 but is %g; "
"that is, it probably represents a rotoreflection"
% R_det)
if strict_check:
raise ValueError(error_msg)
else:
warnings.warn(error_msg)
return R
def check_axis_angle(a):
"""Input validation of axis-angle representation.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
a : array, shape (4,)
Validated axis of rotation and rotation angle: (x, y, z, angle)
"""
a = np.asarray(a, dtype=np.float)
if a.ndim != 1 or a.shape[0] != 4:
raise ValueError("Expected axis and angle in array with shape (4,), "
"got array-like object with shape %s" % (a.shape,))
return norm_axis_angle(a)
def check_compact_axis_angle(a):
"""Input validation of compact axis-angle representation.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
a : array, shape (3,)
Validated axis of rotation and rotation angle: angle * (x, y, z)
"""
a = np.asarray(a, dtype=np.float)
if a.ndim != 1 or a.shape[0] != 3:
raise ValueError("Expected axis and angle in array with shape (3,), "
"got array-like object with shape %s" % (a.shape,))
return norm_compact_axis_angle(a)
def check_quaternion(q, unit=True):
"""Input validation of quaternion representation.
Parameters
----------
q : array-like, shape (4,)
Quaternion to represent rotation: (w, x, y, z)
unit : bool, optional (default: True)
Normalize the quaternion so that it is a unit quaternion
Returns
-------
q : array-like, shape (4,)
Validated quaternion to represent rotation: (w, x, y, z)
"""
q = np.asarray(q, dtype=np.float)
if q.ndim != 1 or q.shape[0] != 4:
raise ValueError("Expected quaternion with shape (4,), got "
"array-like object with shape %s" % (q.shape,))
if unit:
return norm_vector(q)
else:
return q
def check_quaternions(Q, unit=True):
"""Input validation of quaternion representation.
Parameters
----------
Q : array-like, shape (n_steps, 4)
Quaternions to represent rotations: (w, x, y, z)
unit : bool, optional (default: True)
Normalize the quaternions so that they are unit quaternions
Returns
-------
Q : array-like, shape (n_steps, 4)
Validated quaternions to represent rotations: (w, x, y, z)
"""
Q_checked = np.asarray(Q, dtype=np.float)
if Q_checked.ndim != 2 or Q_checked.shape[1] != 4:
raise ValueError(
"Expected quaternion array with shape (n_steps, 4), got "
"array-like object with shape %s" % (Q_checked.shape,))
if unit:
for i in range(len(Q)):
Q_checked[i] = norm_vector(Q_checked[i])
return Q_checked
def matrix_from_two_vectors(a, b):
"""Compute rotation matrix from two vectors.
We assume that the two given vectors form a plane so that we can compute
a third, orthogonal vector with the cross product.
The x-axis will point in the same direction as a, the y-axis corresponds
to the normalized vector rejection of b on a, and the z-axis is the
cross product of the other basis vectors.
Parameters
----------
a : array-like, shape (3,)
First vector, must not be 0
b : array-like, shape (3,)
Second vector, must not be 0 or parallel to v1
Returns
-------
R : array, shape (3, 3)
Rotation matrix
"""
if np.linalg.norm(a) == 0:
raise ValueError("a must not be the zero vector.")
if np.linalg.norm(b) == 0:
raise ValueError("b must not be the zero vector.")
c = perpendicular_to_vectors(a, b)
if np.linalg.norm(c) == 0:
raise ValueError("a and b must not be parallel.")
a = norm_vector(a)
b_on_a_projection = vector_projection(b, a)
b_on_a_rejection = b - b_on_a_projection
b = norm_vector(b_on_a_rejection)
c = norm_vector(c)
return np.column_stack((a, b, c))
def matrix_from_axis_angle(a):
"""Compute rotation matrix from axis-angle.
This is called exponential map or Rodrigues' formula.
This typically results in an active rotation matrix.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
a = check_axis_angle(a)
ux, uy, uz, theta = a
c = math.cos(theta)
s = math.sin(theta)
ci = 1.0 - c
R = np.array([[ci * ux * ux + c,
ci * ux * uy - uz * s,
ci * ux * uz + uy * s],
[ci * uy * ux + uz * s,
ci * uy * uy + c,
ci * uy * uz - ux * s],
[ci * uz * ux - uy * s,
ci * uz * uy + ux * s,
ci * uz * uz + c],
])
# This is equivalent to
# R = (np.eye(3) * np.cos(a[3]) +
# (1.0 - np.cos(a[3])) * a[:3, np.newaxis].dot(a[np.newaxis, :3]) +
# cross_product_matrix(a[:3]) * np.sin(a[3]))
return R
def matrix_from_compact_axis_angle(a):
"""Compute rotation matrix from compact axis-angle.
This is called exponential map or Rodrigues' formula.
This typically results in an active rotation matrix.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
a = axis_angle_from_compact_axis_angle(a)
return matrix_from_axis_angle(a)
def matrix_from_quaternion(q):
"""Compute rotation matrix from quaternion.
This typically results in an active rotation matrix.
Parameters
----------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
q = check_quaternion(q)
uq = norm_vector(q)
w, x, y, z = uq
x2 = 2.0 * x * x
y2 = 2.0 * y * y
z2 = 2.0 * z * z
xy = 2.0 * x * y
xz = 2.0 * x * z
yz = 2.0 * y * z
xw = 2.0 * x * w
yw = 2.0 * y * w
zw = 2.0 * z * w
R = np.array([[1.0 - y2 - z2, xy - zw, xz + yw],
[xy + zw, 1.0 - x2 - z2, yz - xw],
[xz - yw, yz + xw, 1.0 - x2 - y2]])
return R
def matrix_from_angle(basis, angle):
"""Compute passive rotation matrix from rotation about basis vector.
Parameters
----------
basis : int from [0, 1, 2]
The rotation axis (0: x, 1: y, 2: z)
angle : float
Rotation angle
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
c = np.cos(angle)
s = np.sin(angle)
if basis == 0:
R = np.array([[1.0, 0.0, 0.0],
[0.0, c, s],
[0.0, -s, c]])
elif basis == 1:
R = np.array([[c, 0.0, -s],
[0.0, 1.0, 0.0],
[s, 0.0, c]])
elif basis == 2:
R = np.array([[c, s, 0.0],
[-s, c, 0.0],
[0.0, 0.0, 1.0]])
else:
raise ValueError("Basis must be in [0, 1, 2]")
return R
passive_matrix_from_angle = matrix_from_angle
def active_matrix_from_angle(basis, angle):
"""Compute active rotation matrix from rotation about basis vector.
Parameters
----------
basis : int from [0, 1, 2]
The rotation axis (0: x, 1: y, 2: z)
angle : float
Rotation angle
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
c = np.cos(angle)
s = np.sin(angle)
if basis == 0:
R = np.array([[1.0, 0.0, 0.0],
[0.0, c, -s],
[0.0, s, c]])
elif basis == 1:
R = np.array([[c, 0.0, s],
[0.0, 1.0, 0.0],
[-s, 0.0, c]])
elif basis == 2:
R = np.array([[c, -s, 0.0],
[s, c, 0.0],
[0.0, 0.0, 1.0]])
else:
raise ValueError("Basis must be in [0, 1, 2]")
return R
def matrix_from_euler_xyz(e):
"""Compute passive rotation matrix from intrinsic xyz Tait-Bryan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, y'-, and z''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = passive_matrix_from_angle(0, alpha).dot(
passive_matrix_from_angle(1, beta)).dot(
passive_matrix_from_angle(2, gamma))
return R
def matrix_from_euler_zyx(e):
"""Compute passive rotation matrix from intrinsic zyx Tait-Bryan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, y'-, and x''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
gamma, beta, alpha = e
R = passive_matrix_from_angle(2, gamma).dot(
passive_matrix_from_angle(1, beta)).dot(
passive_matrix_from_angle(0, alpha))
return R
def active_matrix_from_intrinsic_euler_xzx(e):
"""Compute active rotation matrix from intrinsic xzx Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, z'-, and x''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, alpha).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(0, gamma))
return R
def active_matrix_from_extrinsic_euler_xzx(e):
"""Compute active rotation matrix from extrinsic xzx Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, z-, and x-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, gamma).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(0, alpha))
return R
def active_matrix_from_intrinsic_euler_xyx(e):
"""Compute active rotation matrix from intrinsic xyx Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, y'-, and x''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, alpha).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(0, gamma))
return R
def active_matrix_from_extrinsic_euler_xyx(e):
"""Compute active rotation matrix from extrinsic xyx Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, y-, and x-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, gamma).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(0, alpha))
return R
def active_matrix_from_intrinsic_euler_yxy(e):
"""Compute active rotation matrix from intrinsic yxy Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, x'-, and y''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, alpha).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(1, gamma))
return R
def active_matrix_from_extrinsic_euler_yxy(e):
"""Compute active rotation matrix from extrinsic yxy Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, x-, and y-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, gamma).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(1, alpha))
return R
def active_matrix_from_intrinsic_euler_yzy(e):
"""Compute active rotation matrix from intrinsic yzy Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, z'-, and y''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, alpha).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(1, gamma))
return R
def active_matrix_from_extrinsic_euler_yzy(e):
"""Compute active rotation matrix from extrinsic yzy Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, z-, and y-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, gamma).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(1, alpha))
return R
def active_matrix_from_intrinsic_euler_zyz(e):
"""Compute active rotation matrix from intrinsic zyz Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, y'-, and z''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, alpha).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(2, gamma))
return R
def active_matrix_from_extrinsic_euler_zyz(e):
"""Compute active rotation matrix from extrinsic zyz Euler angles.
.. warning::
This function was not implemented correctly in versions 1.3 and 1.4
as the order of the angles was reversed, which actually corresponds
to intrinsic rotations. This has been fixed in version 1.5.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, y-, and z-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, gamma).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(2, alpha))
return R
def active_matrix_from_intrinsic_euler_zxz(e):
"""Compute active rotation matrix from intrinsic zxz Euler angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, x'-, and z''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, alpha).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(2, gamma))
return R
def active_matrix_from_extrinsic_euler_zxz(e):
"""Compute active rotation matrix from extrinsic zxz Euler angles.
.. warning::
This function was not implemented correctly in versions 1.3 and 1.4
as the order of the angles was reversed, which actually corresponds
to intrinsic rotations. This has been fixed in version 1.5.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, x-, and z-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, gamma).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(2, alpha))
return R
def active_matrix_from_intrinsic_euler_xzy(e):
"""Compute active rotation matrix from intrinsic xzy Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, z'-, and y''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, alpha).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(1, gamma))
return R
def active_matrix_from_extrinsic_euler_xzy(e):
"""Compute active rotation matrix from extrinsic xzy Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, z-, and y-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, gamma).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(0, alpha))
return R
def active_matrix_from_intrinsic_euler_xyz(e):
"""Compute active rotation matrix from intrinsic xyz Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, y'-, and z''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, alpha).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(2, gamma))
return R
def active_matrix_from_extrinsic_euler_xyz(e):
"""Compute active rotation matrix from extrinsic xyz Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around x-, y-, and z-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, gamma).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(0, alpha))
return R
def active_matrix_from_intrinsic_euler_yxz(e):
"""Compute active rotation matrix from intrinsic yxz Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, x'-, and z''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, alpha).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(2, gamma))
return R
def active_matrix_from_extrinsic_euler_yxz(e):
"""Compute active rotation matrix from extrinsic yxz Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, x-, and z-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, gamma).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(1, alpha))
return R
def active_matrix_from_intrinsic_euler_yzx(e):
"""Compute active rotation matrix from intrinsic yzx Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, z'-, and x''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, alpha).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(0, gamma))
return R
def active_matrix_from_extrinsic_euler_yzx(e):
"""Compute active rotation matrix from extrinsic yzx Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around y-, z-, and x-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, gamma).dot(
active_matrix_from_angle(2, beta)).dot(
active_matrix_from_angle(1, alpha))
return R
def active_matrix_from_intrinsic_euler_zyx(e):
"""Compute active rotation matrix from intrinsic zyx Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, y'-, and x''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, alpha).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(0, gamma))
return R
def active_matrix_from_extrinsic_euler_zyx(e):
"""Compute active rotation matrix from extrinsic zyx Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, y-, and x-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(0, gamma).dot(
active_matrix_from_angle(1, beta)).dot(
active_matrix_from_angle(2, alpha))
return R
def active_matrix_from_intrinsic_euler_zxy(e):
"""Compute active rotation matrix from intrinsic zxy Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, x'-, and y''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(2, alpha).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(1, gamma))
return R
def active_matrix_from_extrinsic_euler_zxy(e):
"""Compute active rotation matrix from extrinsic zxy Cardan angles.
Parameters
----------
e : array-like, shape (3,)
Angles for rotation around z-, x-, and y-axes (extrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
alpha, beta, gamma = e
R = active_matrix_from_angle(1, gamma).dot(
active_matrix_from_angle(0, beta)).dot(
active_matrix_from_angle(2, alpha))
return R
def active_matrix_from_extrinsic_roll_pitch_yaw(rpy):
"""Compute active rotation matrix from extrinsic roll, pitch, and yaw.
Parameters
----------
rpy : array-like, shape (3,)
Angles for rotation around x- (roll), y- (pitch), and z-axes (yaw),
extrinsic rotations
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
return active_matrix_from_extrinsic_euler_xyz(rpy)
def matrix_from(R=None, a=None, q=None, e_xyz=None, e_zyx=None):
"""Compute rotation matrix from another representation.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
e_xyz : array-like, shape (3,)
Angles for rotation around x-, y'-, and z''-axes (intrinsic rotations)
e_zyx : array-like, shape (3,)
Angles for rotation around z-, y'-, and x''-axes (intrinsic rotations)
Returns
-------
R : array-like, shape (3, 3)
Rotation matrix
"""
if R is not None:
return R
if a is not None:
return matrix_from_axis_angle(a)
if q is not None:
return matrix_from_quaternion(q)
if e_xyz is not None:
return matrix_from_euler_xyz(e_xyz)
if e_zyx is not None:
return matrix_from_euler_zyx(e_zyx)
raise ValueError("Cannot compute rotation matrix from no rotation.")
def _general_intrinsic_euler_from_active_matrix(
R, n1, n2, n3, proper_euler, strict_check=True):
"""General algorithm to extract intrinsic euler angles from a matrix.
The implementation is based on SciPy's implementation:
https://github.com/scipy/scipy/blob/master/scipy/spatial/transform/rotation.pyx
Parameters
----------
R : array-like, shape (3, 3)
Active rotation matrix
n1 : array, shape (3,)
First rotation axis (basis vector)
n2 : array, shape (3,)
Second rotation axis (basis vector)
n3 : array, shape (3,)
Third rotation axis (basis vector)
proper_euler : bool
Is this an Euler angle convention or a Cardan / Tait-Bryan convention?
Proper Euler angles rotate about the same axis twice, for example,
z, y', and z''.
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
euler_angles : array, shape (3,)
Extracted intrinsic rotation angles in radians about the axes
n1, n2, and n3 in this order. The first and last angle are
normalized to [-pi, pi]. The middle angle is normalized to
either [0, pi] (proper Euler angles) or [-pi/2, pi/2]
(Cardan / Tait-Bryan angles).
References
----------
Shuster, Markley: General Formula for Extracting the Euler Angles,
https://arc.aiaa.org/doi/abs/10.2514/1.16622
"""
D = check_matrix(R, strict_check=strict_check)
# Differences to the paper:
# - we call the angles alpha, beta, and gamma
# - we obtain angles from intrinsic rotations, thus some matrices are
# transposed like in SciPy's implementation
# Step 2
# - Equation 5
n1_cross_n2 = np.cross(n1, n2)
lmbda = np.arctan2(
np.dot(n1_cross_n2, n3),
np.dot(n1, n3)
)
# - Equation 6
C = np.vstack((n2, n1_cross_n2, n1))
# Step 3
# - Equation 8
CDCT = np.dot(np.dot(C, D), C.T)
O = np.dot(CDCT, active_matrix_from_angle(0, lmbda).T)
# Step 4
# - Equation 10a
beta = lmbda + np.arccos(O[2, 2])
safe1 = abs(beta - lmbda) >= np.finfo(float).eps
safe2 = abs(beta - lmbda - np.pi) >= np.finfo(float).eps
if safe1 and safe2: # Default case, no gimbal lock
# Step 5
# - Equation 10b
alpha = np.arctan2(O[0, 2], -O[1, 2])
# - Equation 10c
gamma = np.arctan2(O[2, 0], O[2, 1])
# Step 7
if proper_euler:
valid_beta = 0.0 <= beta <= np.pi
else: # Cardan / Tait-Bryan angles
valid_beta = -0.5 * np.pi <= beta <= 0.5 * np.pi
# - Equation 12
if not valid_beta:
alpha += np.pi
beta = 2.0 * lmbda - beta
gamma -= np.pi
else:
# Step 6 - Handle gimbal locks
# a)
gamma = 0.0
if not safe1:
# b)
alpha = np.arctan2(O[1, 0] - O[0, 1], O[0, 0] + O[1, 1])
else:
# c)
alpha = np.arctan2(O[1, 0] + O[0, 1], O[0, 0] - O[1, 1])
euler_angles = norm_angle([alpha, beta, gamma])
return euler_angles
def euler_xyz_from_matrix(R, strict_check=True):
"""Compute xyz Euler angles from passive rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Passive rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e_xyz : array-like, shape (3,)
Angles for rotation around x-, y'-, and z''-axes (intrinsic rotations)
"""
R = check_matrix(R, strict_check=strict_check)
if np.abs(R[0, 2]) != 1.0:
# NOTE: There are two solutions: angle2 and pi - angle2!
angle2 = np.arcsin(-R[0, 2])
angle1 = np.arctan2(R[1, 2] / np.cos(angle2), R[2, 2] / np.cos(angle2))
angle3 = np.arctan2(R[0, 1] / np.cos(angle2), R[0, 0] / np.cos(angle2))
else:
if R[0, 2] == 1.0:
angle3 = 0.0
angle2 = -np.pi / 2.0
angle1 = np.arctan2(-R[1, 0], -R[2, 0])
else:
angle3 = 0.0
angle2 = np.pi / 2.0
angle1 = np.arctan2(R[1, 0], R[2, 0])
return np.array([angle1, angle2, angle3])
def euler_zyx_from_matrix(R, strict_check=True):
"""Compute zyx Euler angles from passive rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Passive rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e_zyx : array-like, shape (3,)
Angles for rotation around z-, y'-, and x''-axes (intrinsic rotations)
"""
R = check_matrix(R, strict_check=strict_check)
if np.abs(R[2, 0]) != 1.0:
# NOTE: There are two solutions: angle2 and pi - angle2!
angle2 = np.arcsin(R[2, 0])
angle3 = np.arctan2(-R[2, 1] / np.cos(angle2),
R[2, 2] / np.cos(angle2))
angle1 = np.arctan2(-R[1, 0] / np.cos(angle2),
R[0, 0] / np.cos(angle2))
else:
if R[2, 0] == 1.0:
angle3 = 0.0
angle2 = np.pi / 2.0
angle1 = np.arctan2(R[0, 1], -R[0, 2])
else:
angle3 = 0.0
angle2 = -np.pi / 2.0
angle1 = np.arctan2(R[0, 1], R[0, 2])
return np.array([angle1, angle2, angle3])
def intrinsic_euler_xzx_from_active_matrix(R, strict_check=True):
"""Compute intrinsic xzx Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array, shape (3,)
Angles for rotation around x-, z'-, and x''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unitz, unitx, True, strict_check)
def extrinsic_euler_xzx_from_active_matrix(R, strict_check=True):
"""Compute active rotation matrix from extrinsic xzx Euler angles.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around x-, z-, and x-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unitz, unitx, True, strict_check)[::-1]
def intrinsic_euler_xyx_from_active_matrix(R, strict_check=True):
"""Compute intrinsic xyx Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array, shape (3,)
Angles for rotation around x-, y'-, and x''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unity, unitx, True, strict_check)
def extrinsic_euler_xyx_from_active_matrix(R, strict_check=True):
"""Compute extrinsic xyx Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array, shape (3,)
Angles for rotation around x-, y-, and x-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unity, unitx, True, strict_check)[::-1]
def intrinsic_euler_yxy_from_active_matrix(R, strict_check=True):
"""Compute intrinsic yxy Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, x'-, and y''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitx, unity, True, strict_check)
def extrinsic_euler_yxy_from_active_matrix(R, strict_check=True):
"""Compute extrinsic yxy Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, x-, and y-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitx, unity, True, strict_check)[::-1]
def intrinsic_euler_yzy_from_active_matrix(R, strict_check=True):
"""Compute intrinsic yzy Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, z'-, and y''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitz, unity, True, strict_check)
def extrinsic_euler_yzy_from_active_matrix(R, strict_check=True):
"""Compute extrinsic yzy Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, z-, and y-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitz, unity, True, strict_check)[::-1]
def intrinsic_euler_zyz_from_active_matrix(R, strict_check=True):
"""Compute intrinsic zyz Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around z-, y'-, and z''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unity, unitz, True, strict_check)
def extrinsic_euler_zyz_from_active_matrix(R, strict_check=True):
"""Compute extrinsic zyz Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around z-, y-, and z-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unity, unitz, True, strict_check)[::-1]
def intrinsic_euler_zxz_from_active_matrix(R, strict_check=True):
"""Compute intrinsic zxz Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around z-, x'-, and z''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unitx, unitz, True, strict_check)
def extrinsic_euler_zxz_from_active_matrix(R, strict_check=True):
"""Compute extrinsic zxz Euler angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around z-, x-, and z-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unitx, unitz, True, strict_check)[::-1]
def intrinsic_euler_xzy_from_active_matrix(R, strict_check=True):
"""Compute intrinsic xzy Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around x-, z'-, and y''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unitz, unity, False, strict_check)
def extrinsic_euler_xzy_from_active_matrix(R, strict_check=True):
"""Compute extrinsic xzy Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around x-, z-, and y-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitz, unitx, False, strict_check)[::-1]
def intrinsic_euler_xyz_from_active_matrix(R, strict_check=True):
"""Compute intrinsic xyz Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around x-, y'-, and z''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unity, unitz, False, strict_check)
def extrinsic_euler_xyz_from_active_matrix(R, strict_check=True):
"""Compute extrinsic xyz Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around x-, y-, and z-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unity, unitx, False, strict_check)[::-1]
def intrinsic_euler_yxz_from_active_matrix(R, strict_check=True):
"""Compute intrinsic yxz Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, x'-, and z''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitx, unitz, False, strict_check)
def extrinsic_euler_yxz_from_active_matrix(R, strict_check=True):
"""Compute extrinsic yxz Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, x-, and z-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unitx, unity, False, strict_check)[::-1]
def intrinsic_euler_yzx_from_active_matrix(R, strict_check=True):
"""Compute intrinsic yzx Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, z'-, and x''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitz, unitx, False, strict_check)
def extrinsic_euler_yzx_from_active_matrix(R, strict_check=True):
"""Compute extrinsic yzx Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around y-, z-, and x-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unitz, unity, False, strict_check)[::-1]
def intrinsic_euler_zyx_from_active_matrix(R, strict_check=True):
"""Compute intrinsic zyx Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array, shape (3,)
Angles for rotation around z-, y'-, and x''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unity, unitx, False, strict_check)
def extrinsic_euler_zyx_from_active_matrix(R, strict_check=True):
"""Compute extrinsic zyx Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array, shape (3,)
Angles for rotation around z-, y-, and x-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitx, unity, unitz, False, strict_check)[::-1]
def intrinsic_euler_zxy_from_active_matrix(R, strict_check=True):
"""Compute intrinsic zxy Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array, shape (3,)
Angles for rotation around z-, x'-, and y''-axes (intrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unitz, unitx, unity, False, strict_check)
def extrinsic_euler_zxy_from_active_matrix(R, strict_check=True):
"""Compute extrinsic zxy Cardan angles from active rotation matrix.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
e : array-like, shape (3,)
Angles for rotation around z-, x-, and y-axes (extrinsic rotations)
"""
return _general_intrinsic_euler_from_active_matrix(
R, unity, unitx, unitz, False, strict_check)[::-1]
def axis_angle_from_matrix(R, strict_check=True):
"""Compute axis-angle from rotation matrix.
This operation is called logarithmic map. Note that there are two possible
solutions for the rotation axis when the angle is 180 degrees (pi).
We usually assume active rotations.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle). The angle is
constrained to [0, pi].
"""
R = check_matrix(R, strict_check=strict_check)
angle = np.arccos((np.trace(R) - 1.0) / 2.0)
if angle == 0.0: # R == np.eye(3)
return np.array([1.0, 0.0, 0.0, 0.0])
a = np.empty(4)
# We can usually determine the rotation axis by inverting Rodrigues'
# formula. Subtracting opposing off-diagonal elements gives us
# 2 * sin(angle) * e,
# where e is the normalized rotation axis.
axis_unnormalized = np.array(
[R[2, 1] - R[1, 2], R[0, 2] - R[2, 0], R[1, 0] - R[0, 1]])
if abs(angle - np.pi) < 1e-4: # np.trace(R) close to -1
# The threshold is a result from this discussion:
# https://github.com/rock-learning/pytransform3d/issues/43
# The standard formula becomes numerically unstable, however,
# Rodrigues' formula reduces to R = I + 2 (ee^T - I), with the
# rotation axis e, that is, ee^T = 0.5 * (R + I) and we can find the
# squared values of the rotation axis on the diagonal of this matrix.
# We can still use the original formula to reconstruct the signs of
# the rotation axis correctly.
a[:3] = np.sqrt(0.5 * (np.diag(R) + 1.0)) * np.sign(axis_unnormalized)
else:
a[:3] = axis_unnormalized
# The norm of axis_unnormalized is 2.0 * np.sin(angle), that is, we
# could normalize with a[:3] = a[:3] / (2.0 * np.sin(angle)),
# but the following is much more precise for angles close to 0 or pi:
a[:3] /= np.linalg.norm(a[:3])
a[3] = angle
return a
def axis_angle_from_quaternion(q):
"""Compute axis-angle from quaternion.
This operation is called logarithmic map.
We usually assume active rotations.
Parameters
----------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle). The angle is
constrained to [0, pi) so that the mapping is unique.
"""
q = check_quaternion(q)
p = q[1:]
p_norm = np.linalg.norm(p)
if p_norm < np.finfo(float).eps:
return np.array([1.0, 0.0, 0.0, 0.0])
else:
axis = p / p_norm
angle = (2.0 * np.arccos(q[0]),)
return np.hstack((axis, angle))
def axis_angle_from_compact_axis_angle(a):
"""Compute axis-angle from compact axis-angle representation.
We usually assume active rotations.
Parameters
----------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z).
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle). The angle is
constrained to [0, pi].
"""
a = check_compact_axis_angle(a)
angle = np.linalg.norm(a)
if angle == 0.0:
return np.array([1.0, 0.0, 0.0, 0.0])
else:
axis = a / angle
return np.hstack((axis, (angle,)))
def axis_angle_from_two_directions(a, b):
"""Compute axis-angle representation from two direction vectors.
The rotation will transform direction vector a to direction vector b.
The direction vectors don't have to be normalized as this will be
done internally. Note that there is more than one possible solution.
Parameters
----------
a : array-like, shape (3,)
First direction vector
b : array-like, shape (3,)
Second direction vector
Returns
-------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle). The angle is
constrained to [0, pi].
"""
a = norm_vector(a)
b = norm_vector(b)
cos_angle = a.dot(b)
if abs(-1.0 - cos_angle) < eps:
# For 180 degree rotations we have an infinite number of solutions,
# but we have to pick one axis.
axis = perpendicular_to_vector(a)
else:
axis = np.cross(a, b)
aa = np.empty(4)
aa[:3] = norm_vector(axis)
aa[3] = np.arccos(cos_angle)
return norm_axis_angle(aa)
def compact_axis_angle(a):
"""Compute 3-dimensional axis-angle from a 4-dimensional one.
In a 3-dimensional axis-angle, the 4th dimension (the rotation) is
represented by the norm of the rotation axis vector, which means we
transform :math:`\\left( \\boldsymbol{\hat{e}}, \\theta \\right)` to
:math:`\\theta \\boldsymbol{\hat{e}}`.
We usually assume active rotations.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle).
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z) (compact
representation).
"""
a = check_axis_angle(a)
return a[:3] * a[3]
def compact_axis_angle_from_matrix(R):
"""Compute compact axis-angle from rotation matrix.
This operation is called logarithmic map. Note that there are two possible
solutions for the rotation axis when the angle is 180 degrees (pi).
We usually assume active rotations.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z). The angle is
constrained to [0, pi].
"""
a = axis_angle_from_matrix(R)
return compact_axis_angle(a)
def compact_axis_angle_from_quaternion(q):
"""Compute compact axis-angle from quaternion (logarithmic map).
We usually assume active rotations.
Parameters
----------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
Returns
-------
a : array-like, shape (3,)
Axis of rotation and rotation angle: angle * (x, y, z). The angle is
constrained to [0, pi].
"""
a = axis_angle_from_quaternion(q)
return compact_axis_angle(a)
def quaternion_from_matrix(R, strict_check=True):
"""Compute quaternion from rotation matrix.
We usually assume active rotations.
.. warning::
When computing a quaternion from the rotation matrix there is a sign
ambiguity: q and -q represent the same rotation.
Parameters
----------
R : array-like, shape (3, 3)
Rotation matrix
strict_check : bool, optional (default: True)
Raise a ValueError if the rotation matrix is not numerically close
enough to a real rotation matrix. Otherwise we print a warning.
Returns
-------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
"""
R = check_matrix(R, strict_check=strict_check)
q = np.empty(4)
# Source: http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
trace = np.trace(R)
if trace > 0.0:
sqrt_trace = np.sqrt(1.0 + trace)
q[0] = 0.5 * sqrt_trace
q[1] = 0.5 / sqrt_trace * (R[2, 1] - R[1, 2])
q[2] = 0.5 / sqrt_trace * (R[0, 2] - R[2, 0])
q[3] = 0.5 / sqrt_trace * (R[1, 0] - R[0, 1])
else:
if R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:
sqrt_trace = np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])
q[0] = 0.5 / sqrt_trace * (R[2, 1] - R[1, 2])
q[1] = 0.5 * sqrt_trace
q[2] = 0.5 / sqrt_trace * (R[1, 0] + R[0, 1])
q[3] = 0.5 / sqrt_trace * (R[0, 2] + R[2, 0])
elif R[1, 1] > R[2, 2]:
sqrt_trace = np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])
q[0] = 0.5 / sqrt_trace * (R[0, 2] - R[2, 0])
q[1] = 0.5 / sqrt_trace * (R[1, 0] + R[0, 1])
q[2] = 0.5 * sqrt_trace
q[3] = 0.5 / sqrt_trace * (R[2, 1] + R[1, 2])
else:
sqrt_trace = np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])
q[0] = 0.5 / sqrt_trace * (R[1, 0] - R[0, 1])
q[1] = 0.5 / sqrt_trace * (R[0, 2] + R[2, 0])
q[2] = 0.5 / sqrt_trace * (R[2, 1] + R[1, 2])
q[3] = 0.5 * sqrt_trace
return q
def quaternion_from_axis_angle(a):
"""Compute quaternion from axis-angle.
This operation is called exponential map.
We usually assume active rotations.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: (x, y, z, angle)
Returns
-------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
"""
a = check_axis_angle(a)
theta = a[3]
q = np.empty(4)
q[0] = np.cos(theta / 2)
q[1:] = np.sin(theta / 2) * a[:3]
return q
def quaternion_from_compact_axis_angle(a):
"""Compute quaternion from compact axis-angle (exponential map).
We usually assume active rotations.
Parameters
----------
a : array-like, shape (4,)
Axis of rotation and rotation angle: angle * (x, y, z)
Returns
-------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
"""
a = axis_angle_from_compact_axis_angle(a)
return quaternion_from_axis_angle(a)
def quaternion_xyzw_from_wxyz(q_wxyz):
"""Converts from w, x, y, z to x, y, z, w convention.
Parameters
----------
q_wxyz : array-like, shape (4,)
Quaternion with scalar part before vector part
Returns
-------
q_xyzw : array-like, shape (4,)
Quaternion with scalar part after vector part
"""
q_wxyz = check_quaternion(q_wxyz)
return np.array([q_wxyz[1], q_wxyz[2], q_wxyz[3], q_wxyz[0]])
def quaternion_wxyz_from_xyzw(q_xyzw):
"""Converts from x, y, z, w to w, x, y, z convention.
Parameters
----------
q_xyzw : array-like, shape (4,)
Quaternion with scalar part after vector part
Returns
-------
q_wxyz : array-like, shape (4,)
Quaternion with scalar part before vector part
"""
q_xyzw = check_quaternion(q_xyzw)
return np.array([q_xyzw[3], q_xyzw[0], q_xyzw[1], q_xyzw[2]])
def quaternion_integrate(Qd, q0=np.array([1.0, 0.0, 0.0, 0.0]), dt=1.0):
"""Integrate angular velocities to quaternions.
Parameters
----------
A : array-like, shape (n_steps, 3)
Angular velocities in a compact axis-angle representation. Each angular
velocity represents the rotational offset after one unit of time.
q0 : array-like, shape (4,), optional (default: [1, 0, 0, 0])
Unit quaternion to represent initial rotation: (w, x, y, z)
dt : float, optional (default: 1)
Time interval between steps.
Returns
-------
Q : array-like, shape (n_steps, 4)
Quaternions to represent rotations: (w, x, y, z)
"""
Q = np.empty((len(Qd), 4))
Q[0] = q0
for t in range(1, len(Qd)):
qd = (Qd[t] + Qd[t - 1]) / 2.0
Q[t] = concatenate_quaternions(
quaternion_from_compact_axis_angle(dt * qd), Q[t - 1])
return Q
def quaternion_gradient(Q, dt=1.0):
"""Time-derivatives of a sequence of quaternions.
Note that this function does not provide the exact same functionality for
quaternions as [NumPy's gradient
function](https://numpy.org/doc/stable/reference/generated/numpy.gradient.html)
for positions. Gradients are always computed as central differences except
the first and last gradient. We additionally accept a parameter dt that
defines the time interval between each quaternion. Note that this means
that we expect this to be constant for the whole sequence.
Parameters
----------
Q : array-like, shape (n_steps, 4)
Quaternions to represent rotations: (w, x, y, z)
dt : float, optional (default: 1)
Time interval between steps. If you have non-constant dt, you can pass
1 and manually divide angular velocities by their corresponding time
interval afterwards.
Returns
-------
A : array-like, shape (n_steps, 3)
Angular velocities in a compact axis-angle representation. Each angular
velocity represents the rotational offset after one unit of time.
"""
Q = check_quaternions(Q)
Qd = np.empty((len(Q), 3))
Qd[0] = compact_axis_angle_from_quaternion(
concatenate_quaternions(Q[1], q_conj(Q[0]))) / dt
for t in range(1, len(Q) - 1):
# divided by two because of central differences
Qd[t] = compact_axis_angle_from_quaternion(
concatenate_quaternions(Q[t + 1], q_conj(Q[t - 1]))) / (2.0 * dt)
Qd[-1] = compact_axis_angle_from_quaternion(
concatenate_quaternions(Q[-1], q_conj(Q[-2]))) / dt
return Qd
def concatenate_quaternions(q1, q2):
"""Concatenate two quaternions.
We use Hamilton's quaternion multiplication.
Suppose we want to apply two extrinsic rotations given by quaternions
q1 and q2 to a vector v. We can either apply q2 to v and then q1 to
the result or we can concatenate q1 and q2 and apply the result to v.
Parameters
----------
q1 : array-like, shape (4,)
First quaternion
q2 : array-like, shape (4,)
Second quaternion
Returns
-------
q12 : array-like, shape (4,)
Quaternion that represents the concatenated rotation q1 * q2
"""
q1 = check_quaternion(q1, unit=False)
q2 = check_quaternion(q2, unit=False)
q12 = np.empty(4)
q12[0] = q1[0] * q2[0] - np.dot(q1[1:], q2[1:])
q12[1:] = q1[0] * q2[1:] + q2[0] * q1[1:] + np.cross(q1[1:], q2[1:])
return q12
def q_prod_vector(q, v):
"""Apply rotation represented by a quaternion to a vector.
We use Hamilton's quaternion multiplication.
Parameters
----------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
v : array-like, shape (3,)
3d vector
Returns
-------
w : array-like, shape (3,)
3d vector
"""
q = check_quaternion(q)
t = 2 * np.cross(q[1:], v)
return v + q[0] * t + np.cross(q[1:], t)
def q_conj(q):
"""Conjugate of quaternion.
The conjugate of a unit quaternion inverts the rotation represented by
this unit quaternion. The conjugate of a quaternion q is often denoted
as q*.
Parameters
----------
q : array-like, shape (4,)
Unit quaternion to represent rotation: (w, x, y, z)
Returns
-------
q_c : array-like, shape (4,)
Conjugate (w, -x, -y, -z)
"""
q = check_quaternion(q)
return | np.array([q[0], -q[1], -q[2], -q[3]]) | numpy.array |
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PART OF THIS FILE AT ALL TIMES.
#!/usr/bin/env python
import argparse
import logging
import os
import sys
import time
import cv2
import numpy as np
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
logger = logging.getLogger()
logger.setLevel(logging.INFO)
USE_L2_METRIC = False
def load_pairs(pairs_path):
# print("...Reading pairs.")
pairs = []
with open(pairs_path, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split()
pairs.append(pair)
assert(len(pairs) == 6000)
return np.array(pairs)
def pairs_info(pair):
suffix = 'jpg'
if len(pair) == 3:
name1 = "{}_{}.{}".format(pair[0], pair[1].zfill(4), suffix)
name2 = "{}_{}.{}".format(pair[0], pair[2].zfill(4), suffix)
same = 1
elif len(pair) == 4:
name1 = "{}_{}.{}".format(pair[0], pair[1].zfill(4), suffix)
name2 = "{}_{}.{}".format(pair[2], pair[3].zfill(4), suffix)
same = 0
else:
raise Exception(
"Unexpected pair length: {}".format(len(pair)))
return (name1, name2, same)
def eval_acc(threshold, diff):
y_true = []
y_predict = []
for d in diff:
same = 1 if float(d[2]) > threshold else 0
y_predict.append(same)
y_true.append(int(d[3]))
y_true = np.array(y_true)
y_predict = np.array(y_predict)
accuracy = accuracy_score(y_true, y_predict)
return accuracy
def find_best_threshold(thresholds, predicts):
best_threshold = best_acc = 0
for threshold in thresholds:
accuracy = eval_acc(threshold, predicts)
if accuracy >= best_acc:
best_acc = accuracy
best_threshold = threshold
return best_threshold
def acc(predicts):
# print("...Computing accuracy.")
#folds = KFold(n=6000, n_folds=10, shuffle=False)
folds = KFold(10, False)
if USE_L2_METRIC:
thresholds = np.arange(170, 180, 0.5)
else:
thresholds = np.arange(-1.0, 1.0, 0.005)
accuracy = []
thd = []
# print(predicts)
for idx, (train, test) in enumerate(folds.split(predicts)):
# logging.info("processing fold {}...".format(idx))
best_thresh = find_best_threshold(thresholds, predicts[train])
accuracy.append(eval_acc(best_thresh, predicts[test]))
thd.append(best_thresh)
return accuracy,thd
def get_predict_file(features, pair_path, write=False):
pairs = load_pairs(pair_path)
predicts = []
for pair in pairs:
name1, name2, same = pairs_info(pair)
# logging.info("processing name1:{} <---> name2:{}".format(name1, name2))
f1 = features[name1]
f2 = features[name2]
dis = np.dot(f1, f2)/np.linalg.norm(f1)/np.linalg.norm(f2)
predicts.append([name1, name2, str(dis), str(same)])
# print 'Done generate predict file!'
return np.array(predicts)
def print_result(predicts):
accuracy, threshold = acc(predicts)
# logging.info("10-fold accuracy is:\n{}\n".format(accuracy))
# logging.info("10-fold threshold is:\n{}\n".format(threshold))
# print("mean threshold is {:.4f}".format(np.mean(threshold)))
print("mean is {:.4f}, var is {:.4f}".format(np.mean(accuracy), np.std(accuracy)))
return np.mean(accuracy)
def test(features, pair_path, write=False):
start = time.time()
predicts = get_predict_file(features, pair_path, write)
accuracy, threshold = acc(predicts)
print("mean is {:.4f}, var is {:.4f}, time: {}s".format(np.mean(accuracy), | np.std(accuracy) | numpy.std |
import numpy as np
a = np.array([3,6,2,6,2])
b = np.array([3,1,5,6,21])
print(a+b) # se suman con sus respectivos indices
print("\033[1;33m"+"----------------------------------------"+'\033[0;m')
a = np.array([3,6,2,6,2])
b = np.array([3,1,5,6,21])
print(a>b) # Verifica con sus respectivos indices
print("\033[1;33m"+"----------------------------------------"+'\033[0;m')
a = np.array([3,1,5,6,21])
print(a.argmax()) # Obtiene la posicion del numero mas alto
print("\033[1;33m"+"----------------------------------------"+'\033[0;m')
a = | np.array([3,1,5,6,21]) | numpy.array |
from UQpy.SampleMethods.RSS.rss import RSS
from UQpy.SampleMethods.STS import RectangularSTS
import numpy as np
import scipy.stats as stats
import copy
class RectangularRSS(RSS):
"""
Executes Refined Stratified Sampling using Rectangular Stratification.
``RectangularRSS`` is a child class of ``RSS``. ``RectangularRSS`` takes in all parameters defined in the parent
``RSS`` class with differences note below. Only those inputs and attributes that differ from the parent class
are listed below. See documentation for ``RSS`` for additional details.
**Inputs:**
* **sample_object** (``RectangularSTS`` object):
The `sample_object` for ``RectangularRSS`` must be an object of the ``RectangularSTS`` class.
**Methods:**
"""
def __init__(self, sample_object=None, runmodel_object=None, krig_object=None, local=False, max_train_size=None,
step_size=0.005, qoi_name=None, n_add=1, nsamples=None, random_state=None, verbose=False):
if not isinstance(sample_object, RectangularSTS):
raise NotImplementedError("UQpy Error: sample_object must be an object of the RectangularSTS class.")
self.strata_object = copy.deepcopy(sample_object.strata_object)
super().__init__(sample_object=sample_object, runmodel_object=runmodel_object, krig_object=krig_object,
local=local, max_train_size=max_train_size, step_size=step_size, qoi_name=qoi_name,
n_add=n_add, nsamples=nsamples, random_state=random_state, verbose=verbose)
def run_rss(self):
"""
Overwrites the ``run_rss`` method in the parent class to perform refined stratified sampling with rectangular
strata. It is an instance method that does not take any additional input arguments. See
the ``RSS`` class for additional details.
"""
if self.runmodel_object is not None:
self._gerss()
else:
self._rss()
self.weights = self.strata_object.volume
def _gerss(self):
"""
This method generates samples using Gradient Enhanced Refined Stratified Sampling.
"""
if self.verbose:
print('UQpy: Performing GE-RSS with rectangular stratification...')
# Initialize the vector of gradients at each training point
dy_dx = np.zeros((self.nsamples, np.size(self.training_points[1])))
# Primary loop for adding samples and performing refinement.
for i in range(self.samples.shape[0], self.nsamples, self.n_add):
p = min(self.n_add, self.nsamples - i) # Number of points to add in this iteration
# If the quantity of interest is a dictionary, convert it to a list
qoi = [None] * len(self.runmodel_object.qoi_list)
if type(self.runmodel_object.qoi_list[0]) is dict:
for j in range(len(self.runmodel_object.qoi_list)):
qoi[j] = self.runmodel_object.qoi_list[j][self.qoi_name]
else:
qoi = self.runmodel_object.qoi_list
# ################################
# --------------------------------
# 1. Determine the strata to break
# --------------------------------
# Compute the gradients at the existing sample points
if self.max_train_size is None or len(
self.training_points) <= self.max_train_size or i == self.samples.shape[0]:
# Use the entire sample set to train the surrogate model (more expensive option)
dy_dx[:i] = self.estimate_gradient(np.atleast_2d(self.training_points),
np.atleast_2d(np.array(qoi)),
self.strata_object.seeds +
0.5 * self.strata_object.widths)
else:
# Use only max_train_size points to train the surrogate model (more economical option)
# Find the nearest neighbors to the most recently added point
from sklearn.neighbors import NearestNeighbors
knn = NearestNeighbors(n_neighbors=self.max_train_size)
knn.fit(np.atleast_2d(self.training_points))
neighbors = knn.kneighbors( | np.atleast_2d(self.training_points[-1]) | numpy.atleast_2d |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 26 14:20:58 2017
AFM test for Raybin
"""
#%% Imports
import tkinter as tk
from tkinter import filedialog, ttk
import os
import sys
import lmfit
from PIL import Image
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
import scipy
from skimage import restoration, morphology, filters, feature
import matplotlib.pyplot as plt
plt.rcParams['animation.ffmpeg_path'] = r'C:\ffmpeg\bin\ffmpeg.exe'
import matplotlib.animation as manimation
try:
from igor.binarywave import load as loadibw
except: print('You will be unable to open Asylum data without igor')
#%% Defs
#%%
def AutoDetect( FileName , Opt ):
"""
Attempt to autodetect the instrument used to collect the image
Currently supports the Zeiss Merlin
V0.2
0.1 - Orig
0.2 - Asylum AFM added
"""
if Opt.FExt == ".ibw": # file is igor
if Opt.Machine!="Asylum AFM":Opt.Machine="Asylum AFM"; # avoid race condition in parallel loop
RawData= loadibw(FileName)['wave']
Labels = RawData['labels'][2]
Labels = [i.decode("utf-8") for i in Labels] # make it strings
# Need to add a selector here for future height/phase
AFMIndex=[ i for i, s in enumerate(Labels) if Opt.AFMLayer in s] #they index from 1????
AFMIndex= AFMIndex[0]-1 # fix that quick
imarray = RawData['wData'][:,:,AFMIndex]
#slow scan is column in original data
# AFM data has to be leveled :(
TArray=imarray.transpose() # necessary so that slow scan Y and fast scan is X EG afm tip goes along row > < then down to next row etc
if Opt.AFMLevel == 1: #median leveling
MeanSlow=imarray.mean(axis=0) # this calculates the mean of each slow scan row
Mean=imarray.mean() # mean of everything
MeanOffset=MeanSlow-Mean # determine the offsets
imfit=imarray-MeanOffset # adjust the image
elif Opt.AFMLevel==2: # median of dif leveling
DMean=np.diff(TArray,axis=0).mean(axis=1)
# calc the 1st order diff from one row to next. Then average these differences
DMean=np.insert(DMean,0,0) # the first row we don't want to adjust so pop in a 0
imfit = imarray-DMean
elif Opt.AFMLevel==3: # Polynomial leveling
imfit = np.zeros(imarray.shape)
FastInd = np.arange(imarray.shape[0]) # this is 0 - N rows
for SlowInd in np.arange(imarray.shape[1]): # for each column eg slowscan axis
Slow = imarray[:, SlowInd]
PCoef = np.polyfit(FastInd, Slow, Opt.AFMPDeg)
POffset = np.polyval(PCoef , FastInd)
imfit[:, SlowInd] = imarray[:, SlowInd] - POffset
else: imfit=imarray
#Brightness/Contrast RESET needed for denoising. Need to figure out how to keep track of this? add an opt?
# imfit = imfit - imfit.min()
# imfit = imfit*255/imfit.max()
if Opt.NmPP!=RawData['wave_header']['sfA'][0]*1e9:Opt.NmPP=RawData['wave_header']['sfA'][0]*1e9 # hopefully avoid issues with parallel
RawData.clear()
imarray=imfit
else:
im= Image.open(FileName)
if im.mode!="P":
im=im.convert(mode='P')
print("Image was not in the original format, and has been converted back to grayscale. Consider using the original image.")
imarray = np.array(im)
SkimFile = open( FileName ,'rb')
MetaF=exifread.process_file(SkimFile)
SkimFile.close()
try:
Opt.FInfo=str(MetaF['Image Tag 0x8546'].values);
Opt.NmPP=float(Opt.FInfo[17:30])*10**9;
Opt.Machine="Merlin";
except:
pass
# if Opt.NmPP!=0:
# print("Instrument was autodetected as %s, NmPP is %f \n" % (Opt.Machine ,Opt.NmPP) )
# else:
# print("Instrument was not detected, and NmPP was not set. Please set NmPP and rerun")
return(imarray);
def PeakPara(LineIn, NmPP, CValley, SetFWidth):
Length = LineIn.size
gmodel = lmfit.Model(gaussian)
Inits = gmodel.make_params()
FPeak = np.zeros(CValley.shape)
FPWidth = np.zeros(CValley.shape)
GradCurve = np.diff(np.sign((np.gradient(LineIn))))
# this just says where sign of 1D changes
for pp in range(len(CValley)): #loop through peak positions (guesstimates)
PCur = int(CValley[pp]) # this is our current *peak* guesstimate
# first goal : Refine the peak guesstimate for this line
FitWidth = SetFWidth # look at local area only
PLow = int(np.maximum((PCur-FitWidth),0))
PHigh = int(np.min((PCur+FitWidth+1,Length-1)))
try:PCur = int(np.arange(PLow,PHigh)[np.argmax(GradCurve[PLow:PHigh])+1])
except:pass
# set peak as the minimum (max 2nd div)
# the +1 is to fix the derivative offset from data
#now expand our range to the next domains
FitWidth = SetFWidth * 2
PLow = int(np.maximum((PCur-FitWidth),0))
PHigh = int(np.min((PCur+FitWidth+1,Length-1)))
# now fix the point to the Right of the valley
# Remember we are looking for mim of second derivative +1 is to fix diff offset
PHigh = int(PCur + np.argmin(GradCurve[PCur:PHigh]) +1)
# now fix the point to the left of the valley.
#Do the flip so the first point we find is closest to peak
# do -1 cus we're moving otherway
PLow = int(PCur - np.argmin(np.flip(GradCurve[PLow:PCur],0)) -1)
# PLow is now the max peak to the left of the current valley
# PHigh is now the max peak to the right of the current valley
LocalCurve = abs((LineIn[PLow:PHigh]-max(LineIn[PLow:PHigh])))
# this just flips the curve to be right side up with a minimum of 0
# so we can map it onto the typical gaussian
Inits['amp']=lmfit.Parameter(name='amp', value= max(LocalCurve))
Inits['wid']=lmfit.Parameter(name='wid', value= FitWidth)
Inits['cen']=lmfit.Parameter(name='cen', value= PCur, min=PCur-7, max=PCur+7)
try:
Res = gmodel.fit(LocalCurve, Inits, x=np.arange(PLow,PHigh))
FPeak[pp] = Res.best_values['cen']*NmPP
FPWidth[pp] = abs(np.copy(Res.best_values['wid']*2.35482*NmPP)) # FWHM in NM
if (abs(Res.best_values['cen'] - PCur) > 5) or (Res.best_values['wid'] > 50) or (Res.best_values['cen']==PCur):
FPWidth[pp] = np.nan
FPeak[pp] = np.nan
except:
FPWidth[pp] = np.nan
FPeak[pp] = np.nan
return( FPeak, FPWidth)
#%%
def onclick(event):
global XPeak
XPeak = np.append(XPeak, event.xdata)
FPlot1.axvline(x=event.xdata,linewidth=2, color='r')
plt.draw()
def onkey(event):
global Block
global XPeak
sys.stdout.flush()
if event.key == "escape":
Block = 0
FPlot.canvas.mpl_disconnect(cid) # disconnect both click
FPlot.canvas.mpl_disconnect(kid) # and keypress events
if event.key == "c":
XPeak = np.array([])
FPlot1.cla()
FPlot1.plot(Xplot,RawComp,'b',Xplot,SavFil,'k')
FPlot1.legend(['Raw','Filt.'],loc="upper right")
plt.draw()
#%%
def gaussian(x, amp, cen, wid):
return amp * np.exp(-(x-cen)**2 / (2*wid**2))
#%%
def Hooks(x, K, Offset):
return 0.5*x**2*K+Offset
#%%
if __name__ == '__main__':
# OK
Vers = "AAA"
# Will hold options
class Opt:
pass
# WIll hold outputs
class Output:
pass
#%% Default options
#IndividualLog =1; # Write a log for each sample?
CombLog = 1 # If One write a combined log, if two clean it out each time(don't append)
#ShowImage = 0 # Show images?
plt.ioff()
Opt.Boltz = 8.617e-5 # boltzmann, using eV/K here
Opt.NmPP = 0 # Nanometers per pixel scaling (will autodetect)
Opt.l0 = 50 # nanometer l0
Opt.DomPerTrench = 7 # how many domains are there in a trench?
#%% AFM Settings
Opt.AFMLayer = "Phase" #Matched Phase ZSensor
Opt.AFMLevel = 3 # 0 = none 1 = Median 2= Median of Dif 3 = polyfit
Opt.AFMPDeg = 1 # degree of polynomial.
# Autocorrelation max shift
Opt.ACCutoff = 50
Opt.ACSize = 400
Opt.AngMP = 5 # Do a midpoint average based on this many points
# EG AngMP = 5 then 1 2 3 4 5, 3 will be calc off angle 1 - 5
Opt.Machine = "Unknown"
#%% Plot Options
Opt.TDriftP = 0;
#%% Select Files
FOpen=tk.Tk()
currdir = os.getcwd()
FNFull = tk.filedialog.askopenfilename(parent=FOpen, title='Please select a file', multiple=1)
FOpen.withdraw()
#if len(FNFull) > 0:
# print("You chose %s" % FNFull)
#%% Do Once
ImNum = 0
Opt.Name = FNFull[ImNum] # this hold the full file name
Opt.FPath, Opt.BName= os.path.split(Opt.Name) # File Path/ File Name
(Opt.FName, Opt.FExt) = os.path.splitext(Opt.BName) # File name/File Extension split
firstim = AutoDetect( FNFull[ImNum], Opt)
Shap0 =firstim.shape[0]
Shap1 = firstim.shape[1]
RawIn = np.zeros((Shap0,Shap1,len(FNFull)))
RawComb = np.zeros((Shap0,Shap1*len(FNFull)))
XPeak = np.array([])
Block = 1
#%%
print(FNFull[0])
Opt.TempC = float(input('Temperature in Celsius : '))
Opt.Temp = Opt.TempC+273.15
#%% Import data ( can't be parallelized as it breaks the importer )
for ii in range(0, len(FNFull)):
try:
if Opt.NmPPSet!=0:Opt.NmPP=Opt.NmPPSet # so if we set one then just set it
except:
pass
RawIn[:,:,ii]=AutoDetect( FNFull[ii], Opt) # autodetect the machine, nmpp and return the raw data array
print('Loading raw data %i/%i' %(ii,len(FNFull)))
#if ii%2 == 1:
# RawIn[:,:,ii] = np.flipud(RawIn[:,:,ii]) # if odd flip upside down
RawComb[:,ii*Shap1:(ii+1)*Shap1] = RawIn[:,:,ii] # make one array with all data in an order not used do as seperate experiments
#%% Find the markers between arrays
# per image
for ImNum in range(0, len(FNFull)):
Opt.Name = FNFull[ImNum] # this hold the full file name
Opt.FPath, Opt.BName= os.path.split(Opt.Name) # File Path/ File Name
(Opt.FName, Opt.FExt) = os.path.splitext(Opt.BName) # File name/File Extension split
# Make output folder if needed
try:
os.stat(os.path.join(Opt.FPath,"output"))
except:
os.mkdir(os.path.join(Opt.FPath,"output"))
RawComp = RawIn[:,:,ImNum].sum(axis=1) # sum along the channels to get a good idea where peaks are
RawTop = RawIn[:,:5,ImNum].sum(axis=1)
RawBot = RawIn[:,-5:,ImNum].sum(axis=1)
#%%
SavFil = scipy.signal.savgol_filter(RawComp,5,2,axis = 0)
TopFil = scipy.signal.savgol_filter(RawTop,5,2,axis = 0)
BotFil = scipy.signal.savgol_filter(RawBot,5,2,axis = 0)
D1SavFil = scipy.signal.savgol_filter(RawComp,5,2,deriv = 1,axis = 0)
D2SavFil = scipy.signal.savgol_filter(RawComp,5,2, deriv = 2,axis = 0)
FPlot = plt.figure(figsize=(8,3))
FPlot.clf()
FPlot.suptitle('Click the first domain (valley) for each trench. Then hit ESCAPE \n'
+'Hit c to Cancel and restart if you did an oopsy')
Xplot = range(0, Shap0)
FPlot1 = FPlot.add_subplot(211)
FPlot1.plot(Xplot,RawComp,'b',Xplot,SavFil,'k')
FPlot1.legend(['Raw','Filt.'],loc="upper right")
FPlot2 = FPlot.add_subplot(212)
FPlot2.plot(Xplot,D1SavFil,'r',Xplot,D2SavFil,'k')
FPlot2.legend(['1st Deriv','2nd Deriv'],loc="upper right")
FPlot.show()
if Block == 1:
cid = FPlot.canvas.mpl_connect('button_press_event', onclick)
kid = FPlot.canvas.mpl_connect('key_press_event', onkey)
while Block == 1:
plt.pause(1)
FPlot.savefig(os.path.join(Opt.FPath,"output",Opt.FName + "SVG.png"), dpi=300)
FPlot.clf()
plt.close(FPlot)
# What is the peak corresponding to pattern?
#%%
#Following is per image. Find the peaks
#PatPeak = np.zeros((100,RawComp.shape[1]))
#PolyPeak = np.zeros((150,RawComp.shape[1]))
PSpace = int(np.floor(Opt.l0/Opt.NmPP*.5)) # peaks must be at least 70% of L0 apart
Peak = np.zeros((0,0))
Valley = np.zeros((0,0))
PatSep = np.zeros((1,1))#start with zero as a potential separator
for xx in range(len(SavFil)-1):
if D1SavFil[xx]*D1SavFil[xx+1] <= 0: # if 1D changes signs
if (D2SavFil[xx]+D2SavFil[xx+1])/2 <= 0: # if 2D is neg
if (SavFil[xx]+SavFil[xx+1])/2 < SavFil.max()*.6 : # if we're above 3k then it's the pattern
Peak = np.append(Peak,xx)
else:
PatSep=np.append(PatSep,xx)
else:
Valley = np.append(Valley,xx)
#% add the last value as a potential pat separator
PatSep = np.append(PatSep,len(SavFil)-1)
#%% use manual sep clicked earlier to do the clean up
CPatSep = np.zeros_like(XPeak)
for xx in range(XPeak.size):
CPatSep[xx] = Valley[np.argmin(abs(Valley-XPeak[xx]))]
CPatSep = np.unique(CPatSep) # just in case we accidentally double clicked a peak
CValley = np.zeros(int(CPatSep.size*Opt.DomPerTrench))
for xx in range(CPatSep.size):
CPatSepInd = np.nonzero(Valley == CPatSep[xx])[0][0]
CValley[Opt.DomPerTrench*xx:Opt.DomPerTrench*(xx+1)] = Valley[CPatSepInd:(CPatSepInd+Opt.DomPerTrench)]
CPatSep -= 5 #scoot our separators off the peak
CPatSep = np.append(CPatSep, len(SavFil)-1)
CBins = np.digitize(CValley,CPatSep)
BinCount = np.histogram(CValley,CPatSep)[0]
MidInd = np.zeros(CPatSep.size-1,dtype='uint')
EdgeInd = np.zeros((CPatSep.size-1)*2,dtype='uint')
for bb in range(CPatSep.size-1):
MidInd[bb] = bb*Opt.DomPerTrench+((Opt.DomPerTrench-1)/2)
EdgeInd[bb*2] = bb*Opt.DomPerTrench
EdgeInd[bb*2+1] = (bb+1)*Opt.DomPerTrench-1
#%%
FitWidth = int(Opt.l0/Opt.NmPP*.5)
#FitWidth = int(4)
#% Parallel cus gotta go fast
print('\n Image '+Opt.FName+'\n')
__spec__ = "ModuleSpec(name='builtins', loader=<class '_frozen_importlib.BuiltinImporter'>)"
POut = Parallel(n_jobs=-1,verbose=5)(delayed(PeakPara)(RawIn[:,tt,ImNum], Opt.NmPP, CValley, FitWidth) # backend='threading' removed
for tt in range(RawIn.shape[1]))
FPTuple, FPWTuple = zip(*POut)
FPeak = np.array(FPTuple).transpose()
FPWidth = np.array(FPWTuple).transpose()
print('Done')
#Everything past here is already in Nanometers. PEAK FITTING OUTPUTS IT
#%% Save filtered data
np.savetxt(os.path.join(Opt.FPath,"output",Opt.FName + "FitPeak.csv"),FPeak,delimiter=',')
np.savetxt(os.path.join(Opt.FPath,"output",Opt.FName + "FitFWHM.csv"),FPWidth,delimiter=',')
#%% Calc Displacement
FDisp = ((FPeak.transpose() - np.nanmean(FPeak,axis=1)).transpose())
FPWidthDisp = ((FPWidth.transpose() - np.nanmean(FPWidth,axis=1)).transpose())
#%% Do thermal drift correction
XTD = np.arange(FDisp.shape[1])
YTD = np.nanmean(FDisp,axis=0)
TDFit = np.polyfit(XTD,YTD,1)
TDPlot = np.polyval(TDFit,XTD)
if Opt.TDriftP == 1:
TDF, TDAx = plt.subplots()
TDF.suptitle('Thermal Drift y=%f*x+%f' % (TDFit[0],TDFit[1]))
TDAx.plot(XTD,YTD,XTD,TDPlot)
TDF.savefig(os.path.join(Opt.FPath,"output",Opt.FName + "ThermalDrift.png"), dpi=600)
TDF.clf()
plt.close(TDF)
#%% now correct the data for drift
FDispCorrect = (FDisp - TDPlot)
#%% move on
PanDisp = pd.DataFrame(data=FDispCorrect.transpose())
PanWidth = pd.DataFrame(data=FPWidth.transpose())
#StackDisp
#StackWidth
#%% Cross Corref
StackDisp = FDispCorrect.transpose()[:,0:Opt.DomPerTrench]
StackWidth = FPWidth.transpose()[:,0:Opt.DomPerTrench]
for xx in np.arange(1,CPatSep.size-1):
StackDisp=np.concatenate( (StackDisp,FDispCorrect.transpose()[:,xx*Opt.DomPerTrench:(xx+1)*Opt.DomPerTrench]) )
StackWidth=np.concatenate((StackWidth,FPWidth.transpose()[:,xx*Opt.DomPerTrench:(xx+1)*Opt.DomPerTrench]))
PDStackD = pd.DataFrame(data=StackDisp)
PDStackW = pd.DataFrame(data=StackWidth)
StackD1O = np.zeros((0,2)) # 1 over correlation
StackW1O = np.zeros((0,2)) # ditto for width
StackD2O = np.zeros((0,2)) # 2 over correlation
StackW2O = np.zeros((0,2)) # ditto for width
StackD3O = np.zeros((0,2)) # 3 over correlation
StackW3O = np.zeros((0,2)) # ditto for width
for nn in range(Opt.DomPerTrench-1):
StackD1O = np.append( StackD1O, np.array((PDStackD.values[:,nn],PDStackD.values[:,nn+1])).transpose(),axis = 0 )
StackW1O = np.append( StackW1O, np.array((PDStackW.values[:,nn],PDStackW.values[:,nn+1])).transpose(),axis = 0 )
if nn < Opt.DomPerTrench-2:
StackD2O = np.append( StackD2O, np.array((PDStackD.values[:,nn],PDStackD.values[:,nn+2])).transpose(),axis = 0 )
StackW2O = np.append( StackW2O, np.array((PDStackW.values[:,nn],PDStackW.values[:,nn+2])).transpose(),axis = 0 )
if nn < Opt.DomPerTrench-3:
StackD3O = np.append( StackD3O, np.array((PDStackD.values[:,nn],PDStackD.values[:,nn+3])).transpose(),axis = 0 )
StackW3O = np.append( StackW3O, np.array((PDStackW.values[:,nn],PDStackW.values[:,nn+3])).transpose(),axis = 0 )
CCDisp = PDStackD.corr() # calcualte cross correlations
CCWidth = PDStackW.corr() # calcualte cross correlations
#%% Plot Pek Cross Corr
CCF , CCAx = plt.subplots()
CCF.suptitle('Peak displacement correlations (Pearson)')
CCIm = CCAx.imshow(CCDisp, cmap="seismic_r", vmin=-1, vmax=1)
CCF.colorbar(CCIm)
CCF.savefig(os.path.join(Opt.FPath,"output",Opt.FName + "DisplacementCC.png"), dpi=300)
CCF.clf()
plt.close(CCF)
#%%
CCWidthF , CCWidthAx = plt.subplots()
CCWidthF.suptitle('FWHM correlations (Pearson)')
CCWidthIm = CCWidthAx.imshow(CCWidth, cmap="seismic_r", vmin=-1, vmax=1)
CCWidthF.colorbar(CCWidthIm)
CCWidthF.savefig(os.path.join(Opt.FPath,"output",Opt.FName + "WidthCC.png"), dpi=300)
CCWidthF.clf()
plt.close(CCWidthF)
#%%
CrossCorF , CrossCorAx = plt.subplots(1,3, figsize=(12,4))
CrossCorF.suptitle('1st Order Correlations, 2nd Order Correlations, Third Order Correlations')
CrossCorAx[0].hexbin(StackD1O[:,0],StackD1O[:,1],gridsize=20,extent=(-10, 10, -10, 10))
CrossCorAx[0].set_aspect('equal')
CrossCorAx[1].hexbin(StackD2O[:,0],StackD2O[:,1],gridsize=20,extent=(-10, 10, -10, 10))
CrossCorAx[1].set_aspect('equal')
CrossCorAx[2].hexbin(StackD3O[:,0],StackD3O[:,1],gridsize=20,extent=(-10, 10, -10, 10))
CrossCorAx[2].set_aspect('equal')
CrossCorF.savefig(os.path.join(Opt.FPath,"output",Opt.FName + "CrossCor.png"), dpi=600)
#%%
CrossCorF.clf()
plt.close(CrossCorF)
#%% Autocorrelation Opt.AcSize
ACPeak = pd.DataFrame()
CCPeak = pd.DataFrame()
ACMSD = pd.DataFrame()
CCMSD = pd.DataFrame()
ACWidth = pd.DataFrame()
CCWidth = pd.DataFrame()
for lag in range(Opt.ACSize):
ACPeak = ACPeak.append( PanDisp.corrwith(PanDisp.shift(periods=lag)).rename('lag%i' %lag))
CCPeak = CCPeak.append( PanDisp.corrwith(PanDisp.shift(periods=lag).shift(1,axis=1)).rename('lag%i' %lag))
ACMSD = ACMSD.append(((PanDisp.shift(periods=lag)-PanDisp)**2).mean().rename('lag%i' %lag))
ACWidth = ACWidth.append( PanWidth.corrwith(PanWidth.shift(periods=lag)).rename('lag%i' %lag))
CCWidth = CCWidth.append( PanWidth.corrwith(PanWidth.shift(periods=lag).shift(1,axis=1)).rename('lag%i' %lag))
#%% Power Spectral Density
PSDPeak = np.abs(np.fft.rfft(FDispCorrect))**2
PSDWidth = np.abs(np.fft.rfft(FPWidthDisp))**2
PSDFreq = np.fft.rfftfreq(FPeak.shape[1],0.05) # Sampling rate is 20 hz so sample time = .05?
PSDCk = (4*PSDPeak-PSDWidth)/(4*PSDPeak+PSDWidth)
PSDF , PSDAx = plt.subplots(nrows=3, sharex = True)
PSDF.suptitle('Power Spectral Density (Position, Width and CK)')
PSDAx[0].loglog(PSDFreq[1:], | np.nanmean(PSDPeak, axis=0) | numpy.nanmean |
import numpy as np
from math import pi
import openmc
import pytest
from tests.testing_harness import HashedPyAPITestHarness
@pytest.fixture
def model():
model = openmc.model.Model()
fuel = openmc.Material()
fuel.set_density('g/cm3', 10.0)
fuel.add_nuclide('U235', 1.0)
zr = openmc.Material()
zr.set_density('g/cm3', 1.0)
zr.add_nuclide('Zr90', 1.0)
model.materials.extend([fuel, zr])
box1 = openmc.model.rectangular_prism(10.0, 10.0)
box2 = openmc.model.rectangular_prism(20.0, 20.0, boundary_type='reflective')
top = openmc.ZPlane(z0=10.0, boundary_type='vacuum')
bottom = openmc.ZPlane(z0=-10.0, boundary_type='vacuum')
cell1 = openmc.Cell(fill=fuel, region=box1 & +bottom & -top)
cell2 = openmc.Cell(fill=zr, region=~box1 & box2 & +bottom & -top)
model.geometry = openmc.Geometry([cell1, cell2])
model.settings.batches = 5
model.settings.inactive = 0
model.settings.particles = 1000
# Create meshes
mesh_1d = openmc.RegularMesh()
mesh_1d.dimension = [5]
mesh_1d.lower_left = [-7.5]
mesh_1d.upper_right = [7.5]
mesh_2d = openmc.RegularMesh()
mesh_2d.dimension = [5, 5]
mesh_2d.lower_left = [-7.5, -7.5]
mesh_2d.upper_right = [7.5, 7.5]
mesh_3d = openmc.RegularMesh()
mesh_3d.dimension = [5, 5, 5]
mesh_3d.lower_left = [-7.5, -7.5, -7.5]
mesh_3d.upper_right = [7.5, 7.5, 7.5]
dx = dy = dz = 15 / 5
reg_mesh_exp_vols = np.full(mesh_3d.dimension, dx*dy*dz)
np.testing.assert_equal(mesh_3d.volumes, reg_mesh_exp_vols)
recti_mesh = openmc.RectilinearMesh()
recti_mesh.x_grid = np.linspace(-7.5, 7.5, 18)
recti_mesh.y_grid = np.linspace(-7.5, 7.5, 18)
recti_mesh.z_grid = np.logspace(0, np.log10(7.5), 11)
dx = dy = 15 / 17
dz = np.diff(np.logspace(0, np.log10(7.5), 11))
dxdy = np.full(recti_mesh.dimension[:2], dx*dy)
recti_mesh_exp_vols = | np.multiply.outer(dxdy, dz) | numpy.multiply.outer |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import math
import time
import numpy as np
import torch as th
import torch.nn.functional as F
import torch.optim as optim
from ogb.nodeproppred import DglNodePropPredDataset, Evaluator
from scipy import io
from sklearn import metrics
import itertools
import matplotlib.colors as colors
from matplotlib import pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator
from models import AGNN
global device, in_feats, n_classes, epsilon
device = None
in_feats, n_classes = None, None
epsilon = 1 - math.log(2)
def gen_model(args):
norm = "both" if args.use_norm else "none"
if args.use_labels:
model = AGNN(
in_feats + n_classes,
n_classes,
n_hidden=args.n_hidden,
n_layers=args.n_layers,
n_heads=args.n_heads,
activation=F.relu,
dropout=args.dropout,
attn_drop=args.attn_drop,
norm=norm,
)
else:
model = AGNN(
in_feats,
n_classes,
n_hidden=args.n_hidden,
n_layers=args.n_layers,
n_heads=args.n_heads,
activation=F.relu,
dropout=args.dropout,
attn_drop=args.attn_drop,
norm=norm,
)
return model
def cross_entropy(x, labels):
y = F.cross_entropy(x, labels[:, 0], reduction="none")
y = th.log(epsilon + y) - math.log(epsilon)
return th.mean(y)
def compute_acc(pred, labels, evaluator):
return evaluator.eval({"y_pred": pred.argmax(dim=-1, keepdim=True), "y_true": labels})["acc"]
def add_labels(feat, labels, idx):
onehot = th.zeros([feat.shape[0], n_classes]).to(device)
onehot[idx, labels[idx, 0]] = 1
return th.cat([feat, onehot], dim=-1)
def adjust_learning_rate(optimizer, lr, epoch):
if epoch <= 50:
for param_group in optimizer.param_groups:
param_group["lr"] = lr * epoch / 50
def train(model, graph, labels, train_idx, optimizer, use_labels):
model.train()
feat = graph.ndata["feat"]
if use_labels:
mask_rate = 0.5
mask = th.rand(train_idx.shape) < mask_rate
train_labels_idx = train_idx[mask]
train_pred_idx = train_idx[~mask]
feat = add_labels(feat, labels, train_labels_idx)
else:
mask_rate = 0.5
mask = th.rand(train_idx.shape) < mask_rate
train_pred_idx = train_idx[mask]
optimizer.zero_grad()
pred = model(graph, feat)
loss = cross_entropy(pred[train_pred_idx], labels[train_pred_idx])
loss.backward()
th.nn.utils.clip_grad_norm(model.parameters(),10)
optimizer.step()
return loss, pred
@th.no_grad()
def evaluate(model, graph, labels, train_idx, val_idx, test_idx, use_labels, evaluator):
model.eval()
feat = graph.ndata["feat"]
if use_labels:
feat = add_labels(feat, labels, train_idx)
pred = model(graph, feat)
train_loss = cross_entropy(pred[train_idx], labels[train_idx])
val_loss = cross_entropy(pred[val_idx], labels[val_idx])
test_loss = cross_entropy(pred[test_idx], labels[test_idx])
return (
compute_acc(pred[train_idx], labels[train_idx], evaluator),
compute_acc(pred[val_idx], labels[val_idx], evaluator),
compute_acc(pred[test_idx], labels[test_idx], evaluator),
train_loss,
val_loss,
test_loss,
)
def count_parameters(args):
model = gen_model(args)
print([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
return sum([np.prod(p.size()) for p in model.parameters() if p.requires_grad])
# %% Define class with the model arguments
class args:
cpu = True #Run cpu only if true. This overrides the gpu value
gpu = 0 #Change number if different GPU device ID
n_runs = 1 #Number of model runs
n_epochs = 1000 #2000 #Number of epochs
use_labels = False #Use labels in the training set as input features
use_norm = False #Use symmetrically normalized adjacency matrix
lr = 0.002 #0.002 Learning rate
n_layers = 2 #3 #Number of layers
n_heads = 1 #3
n_hidden = 256 #256
dropout = 0.75 #0.75
attn_drop = 0.05
wd = 0
log_every = 1 #print result every log_every-th epoch
#plot_curves = True
# Define folder to save plots and model in
foldername = "test"
# set cpu or gpu
if args.cpu:
device = th.device("cpu")
else:
device = th.device("cuda:%d" % args.gpu)
# load data
data = DglNodePropPredDataset(name="ogbn-arxiv")
evaluator = Evaluator(name="ogbn-arxiv")
splitted_idx = data.get_idx_split()
train_idx, val_idx, test_idx = splitted_idx["train"], splitted_idx["valid"], splitted_idx["test"]
graph, labels = data[0]
# add reverse edges
srcs, dsts = graph.all_edges()
graph.add_edges(dsts, srcs)
# add self-loop
print(f"Total edges before adding self-loop {graph.number_of_edges()}")
graph = graph.remove_self_loop().add_self_loop()
print(f"Total edges after adding self-loop {graph.number_of_edges()}")
in_feats = graph.ndata["feat"].shape[1]
n_classes = (labels.max() + 1).item()
# graph.create_format_()
train_idx = train_idx.to(device)
val_idx = val_idx.to(device)
test_idx = test_idx.to(device)
labels = labels.to(device)
graph = graph.to(device)
# %% Run the model
val_accs = []
test_accs = []
# define model and optimizer
model = gen_model(args)
model = model.to(device)
optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wd)
# training loop
total_time = 0
best_val_acc, best_test_acc, best_val_loss = 0, 0, float("inf")
#save accuracy and loss values
accs, train_accs, val_accs, test_accs = [], [], [], []
losses, train_losses, val_losses, test_losses = [], [], [], []
for epoch in range(1, args.n_epochs + 1):
print("Starting Epoch ", epoch)
tic = time.time()
adjust_learning_rate(optimizer, args.lr, epoch)
loss, pred = train(model, graph, labels, train_idx, optimizer, args.use_labels)
acc = compute_acc(pred[train_idx], labels[train_idx], evaluator)
train_acc, val_acc, test_acc, train_loss, val_loss, test_loss = evaluate(
model, graph, labels, train_idx, val_idx, test_idx, args.use_labels, evaluator
)
toc = time.time()
total_time += toc - tic
print("Epoch run-time ", toc-tic)
if val_loss < best_val_loss:
best_val_loss = val_loss
best_val_acc = val_acc
best_test_acc = test_acc
if epoch % args.log_every == 0:
print(f"\nEpoch: {epoch}/{args.n_epochs}")
print(
f"Loss: {loss.item():.4f}, Acc: {acc:.4f}\n"
f"Train/Val/Test loss: {train_loss:.4f}/{val_loss:.4f}/{test_loss:.4f}\n"
f"Train/Val/Test/Best val/Best test acc: {train_acc:.4f}/{val_acc:.4f}/{test_acc:.4f}/{best_val_acc:.4f}/{best_test_acc:.4f}"
)
for l, e in zip(
[accs, train_accs, val_accs, test_accs, losses, train_losses, val_losses, test_losses],
[acc, train_acc, val_acc, test_acc, loss.item(), train_loss, val_loss, test_loss],
):
l.append(e)
# %% Printouts
print("*" * 50)
print(f"Average epoch time: {total_time / args.n_epochs}")
print(f"Total Time: {total_time}")
print(f"Test acc: {best_test_acc}")
print()
print("Val Accs:", best_val_acc)
print("Test Accs:", best_test_acc)
print(f"Number of params: {count_parameters(args)}")
# %% Generate plots of accuracy and loss vs epochs
fig = plt.figure(figsize=(15, 12))
ax = fig.gca()
ax.tick_params(labelright=True)
for y, label in zip([train_accs, val_accs, test_accs], ["train acc", "val acc", "test acc"]):
plt.plot(range(args.n_epochs), y, label=label)
ax.legend(prop={'size': 20})
ax.tick_params(axis='both', labelsize = 20)
plt.title("Accuracy vs Epochs", fontsize=30)
plt.ylabel('Accuracy', fontsize=20)
plt.xlabel('Epochs', fontsize=20)
plt.grid(which="major", color="silver", linestyle="dotted")
plt.grid(which="minor", color="silver", linestyle="dotted")
#plt.tight_layout()
plt.savefig(foldername + "/gat_accuracy.png", bbox_inches='tight')
plt.show()
fig = plt.figure(figsize=(15, 12))
ax = fig.gca()
ax.tick_params(labelright=True)
for y, label in zip([train_losses, val_losses, test_losses],
["train loss", "val loss", "test loss"]):
plt.plot(range(args.n_epochs), y, label=label)
ax.legend(prop={'size': 20})
ax.tick_params(axis='both', labelsize = 20)
plt.title("Loss vs Epochs", fontsize=30)
plt.ylabel('Loss', fontsize=20)
plt.xlabel('Epochs', fontsize=20)
plt.grid(which="major", color="silver", linestyle="dotted")
plt.grid(which="minor", color="silver", linestyle="dotted")
#plt.tight_layout()
plt.savefig(foldername + "/gat_loss.png", bbox_inches='tight')
plt.show()
# %% Generate histogram of predicted labels
category_names = ["cs.AI", "cs.AR", "cs.CC", "cs.CE", "cs.CG", "cs.CL", "cs.CR", "cs.CV", "cs.CY",
"cs.DB", "cs.DC", "cs.DL", "cs.DM", "cs.DS", "cs.ET", "cs.FL", "cs.GL", "cs.GR",
"cs.GT", "cs.HC", "cs.IR", "cs.IT", "cs.LG", "cs.LO", "cs.MA", "cs.MM", "cs.MS",
"cs.NA", "cs.NE", "cs.NI", "cs.OH", "cs.OS", "cs.PF", "cs.PL", "cs.RO", "cs.SC",
"cs.SD", "cs.SE", "cs.SI", "cs.SY"]
# Get predicted categories
feat = graph.ndata["feat"]
pred = model(graph, feat)
pred = pred.argmax(dim=-1, keepdim=True)
# Split predicted cateogories by train, validate and test sets
train_pred = th.flatten(pred[train_idx]).numpy()
val_pred = th.flatten(pred[val_idx]).numpy()
test_pred = th.flatten(pred[test_idx]).numpy()
# Get the ground truth labels for train set for sorting order later
train_labels = th.flatten(labels[train_idx]).numpy()
true_train_freq, train_freq, val_freq, test_freq = [], [], [], []
for i in range(n_classes):
true_train_freq.append(np.count_nonzero(train_labels==i))
train_freq.append(np.count_nonzero(train_pred==i))
val_freq.append(np.count_nonzero(val_pred==i))
test_freq.append(np.count_nonzero(test_pred==i))
train_freq, val_freq, test_freq = | np.array(train_freq) | numpy.array |
import time
import numpy as np
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import connected_components
import pandas as pd
from rdkit import Chem
from . import chem
class BlockMoleculeData:
def __init__(self):
self.blockidxs = [] # indexes of every block
self.blocks = [] # rdkit molecule objects for every
self.slices = [0] # atom index at which every block starts
self.numblocks = 0
self.jbonds = [] # [block1, block2, bond1, bond2]
self.stems = [] # [block1, bond1]
self._mol = None
def add_block(self, block_idx, block, block_r, stem_idx, atmidx):
"""
:param block_idx:
:param block:
:param block_r:
:param stem_idx:
:param atmidx:
:return:
"""
self.blockidxs.append(block_idx)
self.blocks.append(block)
self.slices.append(self.slices[-1] + block.GetNumAtoms())
self.numblocks += 1
[self.stems.append([self.numblocks-1,r]) for r in block_r[1:]]
if len(self.blocks)==1:
self.stems.append([self.numblocks-1, block_r[0]])
else:
if stem_idx is None:
assert atmidx is not None, "need stem or atom idx"
stem_idx = np.where(self.stem_atmidxs==atmidx)[0][0]
else:
assert atmidx is None, "can't use stem and atom indices at the same time"
stem = self.stems[stem_idx]
bond = [stem[0], self.numblocks-1, stem[1], block_r[0]]
self.stems.pop(stem_idx)
self.jbonds.append(bond)
# destroy properties
self._mol = None
return None
def delete_blocks(self, block_mask):
"""
:param block_mask:
:return:
"""
# update number of blocks
self.numblocks = np.sum(np.asarray(block_mask, dtype=np.int32))
self.blocks = list(np.asarray(self.blocks)[block_mask])
self.blockidxs = list(np.asarray(self.blockidxs)[block_mask])
# update junction bonds
reindex = np.cumsum(np.asarray(block_mask,np.int32)) - 1
jbonds = []
for bond in self.jbonds:
if block_mask[bond[0]] and block_mask[bond[1]]:
jbonds.append(np.array([reindex[bond[0]], reindex[bond[1]], bond[2], bond[3]]))
self.jbonds = jbonds
# update r-groups
stems = []
for stem in self.stems:
if block_mask[stem[0]]:
stems.append(np.array([reindex[stem[0]],stem[1]]))
self.stems = stems
# update slices
natms = [block.GetNumAtoms() for block in self.blocks]
self.slices = [0] + list(np.cumsum(natms))
# destroy properties
self._mol = None
return reindex
def remove_jbond(self, jbond_idx=None, atmidx=None):
if jbond_idx is None:
assert atmidx is not None, "need jbond or atom idx"
jbond_idx = np.where(self.jbond_atmidxs == atmidx)[0][0]
else:
assert atmidx is None, "can't use stem and atom indices at the same time"
# find index of the junction bond to remove
jbond = self.jbonds.pop(jbond_idx)
# find the largest connected component; delete rest
jbonds = np.asarray(self.jbonds, dtype=np.int32)
jbonds = jbonds.reshape([len(self.jbonds),4]) # handle the case when single last jbond was deleted
graph = csr_matrix((np.ones(self.numblocks-2),
(jbonds[:,0], jbonds[:,1])),
shape=(self.numblocks, self.numblocks))
_, components = connected_components(csgraph=graph, directed=False, return_labels=True)
block_mask = components==np.argmax(np.bincount(components))
reindex = self.delete_blocks(block_mask)
if block_mask[jbond[0]]:
stem = np.asarray([reindex[jbond[0]], jbond[2]])
else:
stem = np.asarray([reindex[jbond[1]], jbond[3]])
self.stems.append(stem)
atmidx = self.slices[stem[0]] + stem[1]
return atmidx
@property
def stem_atmidxs(self):
stems = np.asarray(self.stems)
if stems.shape[0]==0:
stem_atmidxs = np.array([])
else:
stem_atmidxs = np.asarray(self.slices)[stems[:,0]] + stems[:,1]
return stem_atmidxs
@property
def jbond_atmidxs(self):
jbonds = np.asarray(self.jbonds)
if jbonds.shape[0]==0:
jbond_atmidxs = np.array([])
else:
jbond_atmidxs = np.stack([np.concatenate([np.asarray(self.slices)[jbonds[:,0]] + jbonds[:,2]]),
np.concatenate([np.asarray(self.slices)[jbonds[:,1]] + jbonds[:,3]])],1)
return jbond_atmidxs
@property
def mol(self):
if self._mol == None:
self._mol, _ = chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)
return self._mol
@property
def smiles(self):
return Chem.MolToSmiles(self.mol)
class MolMDP:
def __init__(self, blocks_file):
blocks = pd.read_json(blocks_file)
self.block_smi = blocks["block_smi"].to_list()
self.block_rs = blocks["block_r"].to_list()
self.block_nrs = np.asarray([len(r) for r in self.block_rs])
self.block_mols = [Chem.MolFromSmiles(smi) for smi in blocks["block_smi"]]
self.block_natm = np.asarray([b.GetNumAtoms() for b in self.block_mols])
self.reset()
@property
def num_blocks(self):
"number of possible buildoing blocks in molMDP"
return len(self.block_smi)
def reset(self):
self.molecule = BlockMoleculeData()
return None
def add_block(self, block_idx, stem_idx=None, atmidx=None):
assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), "unknown block"
self.molecule.add_block(block_idx,
block=self.block_mols[block_idx],
block_r=self.block_rs[block_idx],
stem_idx=stem_idx, atmidx=atmidx)
return None
def remove_jbond(self, jbond_idx=None, atmidx=None):
atmidx = self.molecule.remove_jbond(jbond_idx, atmidx)
return atmidx
def random_walk(self, length):
done = False
while not done:
if self.molecule.numblocks==0:
block_idx = np.random.choice(np.arange(self.num_blocks))
stem_idx = None
self.add_block(block_idx=block_idx, stem_idx=stem_idx)
if self.molecule.numblocks >= length:
if self.molecule.slices[-1] > 1:
done = True
else:
self.reset()
elif len(self.molecule.stems) > 0:
block_idx = np.random.choice( | np.arange(self.num_blocks) | numpy.arange |
#!/usr/bin/env python3
"""Construct a "swipe" trajectory to bring the object back to the centre."""
import argparse
import time
import progressbar
import numpy as np
from scipy.spatial.transform import Rotation
import trifinger_simulation
def min_jerk_trajectory(current, setpoint, frequency, avg_speed):
"""Compute minimum jerk trajectory.
Based on [Mika's tech blog](https://mika-s.github.io/python/control-theory/trajectory-generation/2017/12/06/trajectory-generation-with-a-minimum-jerk-trajectory.html)
License: CC BY-SA 3.0
"""
trajectory = []
trajectory_derivative = []
num_steps = | np.linalg.norm(current - setpoint) | numpy.linalg.norm |
import json
import os
from PIL import Image
import argparse
import time
import numpy as np
import sys
import csv
layer = None
def _prof_summary(report):
sums=dict()
counts=dict()
summary=[]
for line in [v for v in report.split('\n') if v]:
row = [v for v in line.split(' ') if v]
name=row[0]
val=float(row[1])
new_val = sums.get(name,0) + val
new_cnt =counts.get(name,0) + 1
sums[name ] = new_val
counts[name] = new_cnt
for name in sums:
summary.append((name,sums[name],counts[name]))
summary.sort(key = lambda x:x[1])
print("Summary:")
print("------")
for r in summary:
print("%10.5f %5d %s" % ( r[1],r[2],r[0]))
print("------")
def export_torch_model(model,batch,path,opset = None):
import torch
inp = torch.randn(batch,3,224,224)
model.eval()
torch.onnx.export(model,inp,path,input_names = ["data"],output_names=["prob"],opset_version=opset)
class CaffeModel(object):
def __init__(self,proto,params,device = -1):
import caffe
if device >= 0:
caffe.set_mode_gpu()
caffe.set_device(device)
self.net = caffe.Net(proto,caffe.TEST)
self.net.copy_from(params)
def eval(self,batch):
global layer
data = self.net.blobs[self.net.inputs[0]]
lname = layer if layer else self.net.outputs[0]
prob = self.net.blobs[lname]
if data.data.shape[0] != batch.shape[0]:
data.reshape(*batch.shape)
self.net.reshape()
np.copyto(data.data,batch)
host_prob = np.zeros(prob.data.shape,dtype=np.float32)
self.net.forward()
np.copyto(host_prob,prob.data)
return host_prob
class DLPrimModel(object):
def __init__(self,proto,params,device):
import dlprim as dp
caffe_model=dp.CaffeModel();
caffe_model.load(proto,params)
self.ctx = dp.Context(device)
self.net = dp.Net(self.ctx)
#self.net.keep_intermediate_tensors = True
self.net.mode = dp.PREDICT
base_path = proto.replace('.prototxt','')
with open(base_path +'.json','w') as f:
f.write(caffe_model.network)
f.write('\n')
self.net.load_model(caffe_model)
self.net.save_parameters(base_path + '.dlp')
def eval(self,batch):
import dlprim as dp
data = self.net.tensor(self.net.input_names[0])
if data.shape[0] != batch.shape[0]:
data.reshape(dp.Shape(*batch.shape))
self.net.reshape()
global layer
lname = layer if layer else self.net.output_names[0]
prob = self.net.tensor(lname)
prob_cpu = np.zeros(prob.shape,dtype=np.float32)
q = self.ctx.make_execution_context(0)
data.to_device(batch,q)
self.net.forward(q)
prob.to_host(prob_cpu,q)
q.finish()
return prob_cpu
def predict_on_images(model,images,config):
tw = 224
th = 224
mean = config['mean']
mean = | np.array(mean) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: <NAME> <<EMAIL>>
# Copyright (C) 2017 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking the poincare module from the models package.
"""
import functools
import logging
import unittest
import numpy as np
from gensim.models.keyedvectors import KeyedVectors, REAL, pseudorandom_weak_vector
from gensim.test.utils import datapath
import gensim.models.keyedvectors
logger = logging.getLogger(__name__)
class TestKeyedVectors(unittest.TestCase):
def setUp(self):
self.vectors = KeyedVectors.load_word2vec_format(datapath('euclidean_vectors.bin'), binary=True)
self.model_path = datapath("w2v_keyedvectors_load_test.modeldata")
self.vocab_path = datapath("w2v_keyedvectors_load_test.vocab")
def test_most_similar(self):
"""Test most_similar returns expected results."""
expected = [
'conflict',
'administration',
'terrorism',
'call',
'israel'
]
predicted = [result[0] for result in self.vectors.most_similar('war', topn=5)]
self.assertEqual(expected, predicted)
def test_most_similar_vector(self):
"""Can we pass vectors to most_similar directly?"""
positive = self.vectors.vectors[0:5]
most_similar = self.vectors.most_similar(positive=positive)
assert most_similar is not None
def test_most_similar_parameter_types(self):
"""Are the positive/negative parameter types are getting interpreted correctly?"""
partial = functools.partial(self.vectors.most_similar, topn=5)
position = partial('war', 'peace')
position_list = partial(['war'], ['peace'])
keyword = partial(positive='war', negative='peace')
keyword_list = partial(positive=['war'], negative=['peace'])
#
# The above calls should all yield identical results.
#
assert position == position_list
assert position == keyword
assert position == keyword_list
def test_most_similar_cosmul_parameter_types(self):
"""Are the positive/negative parameter types are getting interpreted correctly?"""
partial = functools.partial(self.vectors.most_similar_cosmul, topn=5)
position = partial('war', 'peace')
position_list = partial(['war'], ['peace'])
keyword = partial(positive='war', negative='peace')
keyword_list = partial(positive=['war'], negative=['peace'])
#
# The above calls should all yield identical results.
#
assert position == position_list
assert position == keyword
assert position == keyword_list
def test_vectors_for_all_list(self):
"""Test vectors_for_all returns expected results with a list of keys."""
words = [
'conflict',
'administration',
'terrorism',
'an out-of-vocabulary word',
'another out-of-vocabulary word',
]
vectors_for_all = self.vectors.vectors_for_all(words)
expected = 3
predicted = len(vectors_for_all)
assert expected == predicted
expected = self.vectors['conflict']
predicted = vectors_for_all['conflict']
assert | np.allclose(expected, predicted) | numpy.allclose |
#!/usr/bin/env python3
import os
import copy
# BEGIN THREAD SETTINGS this sets the number of threads used by numpy in the program (should be set to 1 for Parts 1 and 3)
implicit_num_threads = 2
os.environ["OMP_NUM_THREADS"] = str(implicit_num_threads)
os.environ["MKL_NUM_THREADS"] = str(implicit_num_threads)
os.environ["OPENBLAS_NUM_THREADS"] = str(implicit_num_threads)
# END THREAD SETTINGS
import pickle
import numpy as np
import numpy
from numpy import random
import scipy
import matplotlib
import mnist
import pickle
matplotlib.use('agg')
from matplotlib import pyplot as plt
import threading
import time
from scipy.special import softmax
from tqdm import tqdm
mnist_data_directory = os.path.join(os.path.dirname(__file__), "data")
# TODO add any additional imports and global variables
# SOME UTILITY FUNCTIONS that you may find to be useful, from my PA3 implementation
# feel free to use your own implementation instead if you prefer
def multinomial_logreg_error(Xs, Ys, W):
predictions = numpy.argmax(numpy.dot(W, Xs), axis=0)
error = numpy.mean(predictions != numpy.argmax(Ys, axis=0))
return error
def multinomial_logreg_grad_i(Xs, Ys, ii, gamma, W, dtype=np.float64):
WdotX = numpy.dot(W, Xs[:,ii])
expWdotX = numpy.exp(WdotX - numpy.amax(WdotX, axis=0), dtype=dtype)
softmaxWdotX = expWdotX / numpy.sum(expWdotX, axis=0, dtype=dtype)
return numpy.dot(softmaxWdotX - Ys[:,ii], Xs[:,ii].transpose()) / len(ii) + gamma * W
# END UTILITY FUNCTIONS
def load_MNIST_dataset():
PICKLE_FILE = os.path.join(mnist_data_directory, "MNIST.pickle")
try:
dataset = pickle.load(open(PICKLE_FILE, 'rb'))
except:
# load the MNIST dataset
mnist_data = mnist.MNIST(mnist_data_directory, return_type="numpy", gz=True)
Xs_tr, Lbls_tr = mnist_data.load_training();
Xs_tr = Xs_tr.transpose() / 255.0
Ys_tr = numpy.zeros((10, 60000))
for i in range(60000):
Ys_tr[Lbls_tr[i], i] = 1.0 # one-hot encode each label
# shuffle the training data
numpy.random.seed(4787)
perm = numpy.random.permutation(60000)
Xs_tr = numpy.ascontiguousarray(Xs_tr[:,perm])
Ys_tr = numpy.ascontiguousarray(Ys_tr[:,perm])
Xs_te, Lbls_te = mnist_data.load_testing();
Xs_te = Xs_te.transpose() / 255.0
Ys_te = numpy.zeros((10, 10000))
for i in range(10000):
Ys_te[Lbls_te[i], i] = 1.0 # one-hot encode each label
Xs_te = numpy.ascontiguousarray(Xs_te)
Ys_te = numpy.ascontiguousarray(Ys_te)
dataset = (Xs_tr, Ys_tr, Xs_te, Ys_te)
pickle.dump(dataset, open(PICKLE_FILE, 'wb'))
return dataset
# SGD + Momentum (adapt from Programming Assignment 3)
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# beta momentum hyperparameter
# B minibatch size
# num_epochs number of epochs (passes through the training set) to run
#
# returns the final model arrived at at the end of training
def sgd_mss_with_momentum(Xs, Ys, gamma, W0, alpha, beta, B, num_epochs):
# TODO students should use their implementation from programming assignment 3
# or adapt this version, which is from my own solution to programming assignment 3
models = []
(d, n) = Xs.shape
V = numpy.zeros(W0.shape)
W = W0
# print("Running minibatch sequential-scan SGD with momentum")
for it in tqdm(range(num_epochs)):
for ibatch in range(int(n/B)):
ii = range(ibatch*B, (ibatch+1)*B)
V = beta * V - alpha * multinomial_logreg_grad_i(Xs, Ys, ii, gamma, W)
W = W + V
# if ((ibatch+1) % monitor_period == 0):
# models.append(W)
return models
# SGD + Momentum (No Allocation) => all operations in the inner loop should be a
# call to a numpy.____ function with the "out=" argument explicitly specified
# so that no extra allocations occur
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# beta momentum hyperparameter
# B minibatch size
# num_epochs number of epochs (passes through the training set) to run
# monitor_period how frequently, in terms of batches (not epochs) to output the parameter vector
#
# returns the final model arrived at at the end of training
def sgd_mss_with_momentum_noalloc(Xs, Ys, gamma, W0, alpha, beta, B, num_epochs):
(d, n) = Xs.shape
(c, d) = W0.shape
# TODO students should initialize the parameter vector W and pre-allocate any needed arrays here
Y_temp = np.zeros((c,B))
W_temp = np.zeros(W0.shape)
amax_temp = np.zeros(B)
softmax_temp = np.zeros((c,B))
V = np.zeros(W0.shape)
g = np.zeros(W0.shape)
X_batch = []
Y_batch = []
for i in range(n // B):
ii = [(i*B + j) for j in range(B)]
X_batch.append(np.ascontiguousarray(Xs[:,ii]))
Y_batch.append(np.ascontiguousarray(Ys[:,ii]))
# print("Running minibatch sequential-scan SGD with momentum (no allocation)")
for it in tqdm(range(num_epochs)):
for i in range(int(n/B)):
# ii = range(ibatch*B, (ibatch+1)*B)
# TODO this section of code should only use numpy operations with the "out=" argument specified (students should implement this)
np.matmul(W0, X_batch[i], out=Y_temp)
# WdotX = numpy.dot(W0, Xs[:,ii])
# expWdotX = numpy.exp(WdotX - numpy.amax(WdotX, axis=0), dtype=dtype)
# softmaxWdotX = expWdotX / numpy.sum(expWdotX, axis=0, dtype=dtype)
np.amax(Y_temp, axis=0, out=amax_temp)
np.subtract(Y_temp, amax_temp, out=softmax_temp)
np.exp(softmax_temp, out=softmax_temp)
np.sum(softmax_temp, axis=0, out=amax_temp)
np.divide(softmax_temp, amax_temp, out=softmax_temp)
# numpy.dot(softmaxWdotX - Ys[:,ii], Xs[:,ii].transpose()) / len(ii) + gamma * W
np.subtract(softmax_temp, Y_batch[i], out=Y_temp)
# Y_temp = softmax(Y_temp, axis=0) - Y_batch[i]
np.matmul(Y_temp, X_batch[i].T, out=W_temp)
np.divide(W_temp, B, out=W_temp)
np.multiply(gamma, W0, out=g)
np.add(W_temp, g, out=g)
# g = W_temp / B + gamma * W0
np.multiply(V, beta, out=V)
np.multiply(g, alpha, out=g)
# V = (beta * V) - (alpha * g)
np.subtract(V, g, out=V)
np.add(V, W0, out=W0)
# W0 = W0 + V
return W0
# SGD + Momentum (threaded)
#
# Xs training examples (d * n)
# Ys training labels (c * n)
# gamma L2 regularization constant
# W0 the initial value of the parameters (c * d)
# alpha step size/learning rate
# beta momentum hyperparameter
# B minibatch size
# num_epochs number of epochs (passes through the training set) to run
# monitor_period how frequently, in terms of batches (not epochs) to output the parameter vector
# num_threads how many threads to use
#
# returns the final model arrived at at the end of training
def sgd_mss_with_momentum_threaded(Xs, Ys, gamma, W0, alpha, beta, B, num_epochs, num_threads):
(d, n) = Xs.shape
(c, d) = W0.shape
# TODO perform any global setup/initialization/allocation (students should implement this)
g = [W0 for i in range(num_threads)]
Bt = B//num_threads
W_temp1 = np.zeros(W0.shape)
# construct the barrier object
iter_barrier = threading.Barrier(num_threads + 1)
# a function for each thread to run
def thread_main(ithread):
# TODO perform any per-thread allocations
for it in range(num_epochs):
W_temp = np.zeros(W0.shape)
amax_temp = np.zeros(Bt)
softmax_temp = np.zeros((c,Bt))
for ibatch in range(int(n/B)):
# TODO work done by thread in each iteration; this section of code should primarily use numpy operations with the "out=" argument specified (students should implement this)
ii = range(ibatch*B + ithread*Bt, ibatch*B + (ithread+1)*Bt)
iter_barrier.wait()
np.dot(g[ithread], Xs[:,ii], out=softmax_temp)
| np.amax(softmax_temp, axis=0, out=amax_temp) | numpy.amax |
import os #운영체제와 상호 작용하기 위한 라이브 러리
import sys
import xml.etree.ElementTree as ET #xml을 tree 형태로 읽어오는 라이브러리, as ET는 라이브러리를 단축시키기 위한 명령어
import shutil # 파일 및 디렉터리 작업을 수행하는 데 사용할 모듈 (파일 복사 이동 )
import random # 난수 형성 함수
from xml.dom import minidom #xml 에 접근하기 위한 함수
import operator # list의 차를 구하기 위한 라이브 러리
import math # 표준편차를 구하기위한 수학계산 라이브러리
import numpy as np # 표준편차를 구하기위한 수학계산 라이브러리
from matplotlib import pyplot as plt
import pandas as pd
from collections import Counter
import csv
import pickle
from collections import Counter #list 중복값 카운팅 하기
import time
start = time.time()
# sys.stdout = open('output.txt','a') #print 로 출력된 내용을 텍스트로 저장함
answer = ['car','person']
attr = ['width', 'height', 'box']
car_array = []
person_array = []
# zz=[]
# zz1=[]
car_count = 0
person_count = 0
total_box = 0
total_xml = 0
total_box_count = 0
total_car = 0
total_person = 0
person_w = []
person_h = []
car_w = []
car_h = []
other =0
c_w = []
c_h = []
p_w = []
p_h = []
p_b = []
c_b = []
c_height_list = []
c_width_list = []
c_box_len_list = []
p_height_list = []
p_width_list = []
p_box_len_list = []
path_list = []
file_list = []
name_list = []
dir_len = len(path_list)
root_path = "D:/backup/DataSet"
target_path = "D:/backup/DataSet"
parser = ET.XMLParser(encoding="utf-8") # XMLParser는 xml에서 원하는 구문을 출력할수있게 해준다
rootpath = r"D:\backup\DataSet"
xmlRootpath = r'D:\backup\DataSet'
xmlList = []
coun=0
for (path, dir, files) in os.walk(xmlRootpath):
for file in files:
if file.endswith(".xml"):
# for x in os.listdir(root_path):
# if x.endswith('xml'):
empty = os.path.join(root_path,path)
path_list.append(empty)
total_xml += 1
tree = ET.parse(os.path.join(empty, file)) # x에 대한정보를 가져온다
root = tree.getroot() # 문서의 최상단을 가리킴 여기서는 annotations
#print(os.path.join(empty, file))
for child in root.findall("object"): #root(annotations) 아래 자식image 의 객수만큼 반복한다)
bndbox=child.find('bndbox')
name = child.find('name').text
total_box_count += 1
xmin = int(float(bndbox.find("xmin").text))
ymin = int(float(bndbox.find("ymin").text))
xmax = int(float(bndbox.find("xmax").text))
ymax = int(float(bndbox.find("ymax").text))
# print(name+' %s %s %s %s' %(xmin, ymin, xmax, ymax)) #5
if name == "person":
person_count += 1
total_person += 1
p_w = abs(float(xmin)-float(xmax))
p_h = abs(float(ymin)-float(ymax))
p_b = abs(float(p_w*p_h))
# print("person width=" + str(p_w), "person height" + str(p_h))
# print("label:person"+" xmin:"+str(xmin)+" ymin:"+str(ymin)+" xmax:"+str(xmax)+" ymax:"+str(ymax)+" width=" + str(p_w), " height" + str(p_h))
person_w.append(p_w)
person_h.append(p_h)
p_box_len_list.append(p_b)
elif name == "car":
car_count += 1
total_car += 1
c_w = abs(float(xmin)-float(xmax))
c_h = abs(float(ymin)-float(ymax))
c_b = abs(float(c_w * c_h))
# print("car width=" + str(c_w), "car height=" + str(c_h))
#print("label:car" + " xmin:" + str(xmin) + " ymin:" + str(ymin) + " xmax:" + str(xmax) + " ymax:" + str(ymax) + " width=" + str(c_w), " height" + str(c_h))
car_w.append(c_w)
car_h.append(c_h)
c_box_len_list.append(c_b)
else:
other += 1
# print("car num:"+str(car_count)+" person num:"+str(person_count))
person_array.append(person_count)
car_array.append(car_count)
car_count=0
person_count=0
person_np = np.array(person_array)
car_np = np.array(car_array)
# np.max(arr) max 값 구하기
x = np.array(person_w)#person_width
y = np.array(person_h)#person_height
z = x*y #person _w*h
zz = list(z)
# print(z)
x1 = np.array(car_w) #car width
y1 = np.array(car_h) #car height
z1 = x1*y1 #car_w*h
zz1 = list(z1)
a = round(np.average(x),2)
b = round(np.average(y),2)
c = round(np.average(z),2)
a1 = round(np.average(x1),2)
b1 = round(np.average(y1),2)
c1 = round( | np.average(z1) | numpy.average |
import sys
import os
sys.path.append(
os.path.abspath(
os.path.join(os.path.abspath(__file__), '..', '..')
)
)
import fitting
import kernel
import regression
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mu, sig):
return 1 / np.sqrt(2 * np.pi * sig ** 2) * np.exp(-0.5 * (x - mu) ** 2 / (sig ** 2))
argv = sys.argv
if len(argv) != 2:
print("Usage: python test_regression.py fit_linear|fit_by_linear|fit_gaussian_process"
"|fit_sparse_linear|fit_dual_gaussian_process|fit_relevance_vector")
sys.exit(-1)
if argv[1] == 'fit_linear':
I = 50
x = np.linspace(1, 9, I)
phi = np.array([[7], [-0.5]])
sig = 0.6
w_test = np.zeros((I, 1))
X = np.append(np.ones((I, 1)), x.reshape((I, 1)), axis=1)
X_phi = X @ phi
w = np.random.normal(X_phi, sig)
fit_phi, fit_sig = regression.fit_linear(X.transpose(), w)
granularity = 500
domain = np.linspace(0, 10, granularity)
X, Y = np.meshgrid(domain, domain)
XX = np.append(np.ones((granularity, 1)),
domain.reshape((granularity, 1)), axis=1)
temp = XX @ fit_phi
Z = np.zeros((granularity, granularity))
for j in range(granularity):
mu = temp[j, 0]
for i in range(granularity):
ww = domain[i]
Z[i, j] = gaussian(ww, mu, fit_sig)
plt.figure("Fit linear regression")
plt.pcolor(X, Y, Z)
plt.scatter(x, w, edgecolors="w")
plt.show()
elif argv[1] == "fit_by_linear":
granularity = 500
a = -5
b = 5
domain = np.linspace(a, b, granularity)
X, Y = np.meshgrid(domain, domain)
x = X.reshape((X.size, 1))
y = Y.reshape((Y.size, 1))
xy_matrix = np.append(x, y, axis=1)
# Compute the prior 2D normal distribution over phi
mu_1 = np.array([0, 0])
var_prior = 6
covariance_1 = var_prior * | np.eye(2) | numpy.eye |
#!/usr/bin/env python
import time
import glfw
import numpy as np
from operator import itemgetter
from mujoco_py import const, MjViewer
from mujoco_worldgen.util.types import store_args
from envs.hns.ma_policy.util import listdict2dictnp
from functools import reduce
import pdb
import torch
import copy
def handle_dict_obs(keys, order_obs, mask_order_obs, dict_obs, num_agents, num_hiders):
# obs = []
# share_obs = []
for i, key in enumerate(order_obs):
if key in keys:
if mask_order_obs[i] == None:
temp_share_obs = dict_obs[key].reshape(num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = dict_obs[key].reshape(num_agents,-1).copy()
temp_mask = dict_obs[mask_order_obs[i]].copy()
temp_obs = dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros((mins_temp_mask.sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
# obs.append(reshape_obs)
# share_obs.append(reshape_share_obs)
# obs = np.array(obs)[:,num_hiders:]
# share_obs = np.array(share_obs)[:,num_hiders:]
obs = reshape_obs[num_hiders:]
share_obs = reshape_share_obs[num_hiders:]
return obs, share_obs
def splitobs(obs, keepdims=True):
'''
Split obs into list of single agent obs.
Args:
obs: dictionary of numpy arrays where first dim in each array is agent dim
'''
n_agents = obs[list(obs.keys())[0]].shape[0]
return [{k: v[[i]] if keepdims else v[i] for k, v in obs.items()} for i in range(n_agents)]
class PolicyViewer(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.ob = env.reset()
for policy in self.policies:
policy.reset()
assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
while self.duration is None or time.time() < self.end_time:
if len(self.policies) == 1:
action, _ = self.policies[0].act(self.ob)
else:
self.ob = splitobs(self.ob, keepdims=False)
ob_policy_idx = np.split(np.arange(len(self.ob)), len(self.policies))
actions = []
for i, policy in enumerate(self.policies):
inp = itemgetter(*ob_policy_idx[i])(self.ob)
inp = listdict2dictnp([inp] if ob_policy_idx[i].shape[0] == 1 else inp)
ac, info = policy.act(inp)
actions.append(ac)
action = listdict2dictnp(actions, keepdims=True)
self.ob, rew, done, env_info = self.env.step(action)
self.total_rew += rew
if done or env_info.get('discard_episode', False):
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_hs_single(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, all_args, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.dict_obs = env.reset()
#for policy in self.policies:
# policy.reset()
assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','food_obs','observation_self']
self.mask_order_obs = ['mask_aa_obs','mask_ab_obs','mask_ar_obs','mask_af_obs',None]
'''
# self.order_obs = ['agent_qpos_qvel', 'box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
# self.mask_order_obs = ['mask_aa_obs', 'mask_ab_obs','mask_ar_obs',None,None,None]
self.order_obs = ['agent_qpos_qvel', 'box_obs','ramp_obs','construction_site_obs', 'observation_self']
self.mask_order_obs = [None,None,None,None,None]
self.keys = self.env.observation_space.spaces.keys()
self.num_agents = 2
self.num_hiders = 1
self.num_seekers = 1
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
reshape_obs, reshape_share_obs = handle_dict_obs(self.keys, self.order_obs, self.mask_order_obs, self.dict_obs, self.num_agents, self.num_hiders)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
while self.duration is None or time.time() < self.end_time:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_seekers):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
# rearrange action
action_movement = []
action_pull = []
action_glueall = []
for k in range(self.num_hiders):
#action_movement.append(np.random.randint(11, size=3)) #hider随机游走
action_movement.append(np.array([5,5,5])) #hider静止不动
action_pull.append(0)
action_glueall.append(0)
for k in range(self.num_seekers):
action_movement.append(actions[k][0][:3])
action_pull.append(np.int(actions[k][0][3]))
action_glueall.append(np.int(actions[k][0][4]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
self.total_rew += rew
self.obs = []
self.share_obs = []
reshape_obs, reshape_share_obs = handle_dict_obs(self.keys, self.order_obs, self.mask_order_obs, self.dict_obs, self.num_agents, self.num_hiders)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False):
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.dict_obs = self.env.reset()
self.obs = []
self.share_obs = []
reshape_obs, reshape_share_obs = handle_dict_obs(self.keys, self.order_obs, self.mask_order_obs, self.dict_obs, self.num_agents, self.num_hiders)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_bl(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, args, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.args = args
self.num_agents = args.num_agents
self.total_rew = 0.0
self.dict_obs = env.reset()
self.eval_num = 10
self.eval_episode = 0
self.success_rate_sum = 0
self.step = 0
self.H = 5
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
self.order_obs = ['agent_qpos_qvel', 'box_obs', 'ramp_obs', 'construction_site_obs', 'observation_self']
self.mask_order_obs = [None, None, None, None, None]
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
# generate the obs space
obs_shape = []
obs_dim = 0
for key in self.order_obs:
if key in self.env.observation_space.spaces.keys():
space = list(self.env.observation_space[key].shape)
if len(space)<2:
space.insert(0,1)
obs_shape.append(space)
obs_dim += reduce(lambda x,y:x*y,space)
obs_shape.insert(0,obs_dim)
split_shape = obs_shape[1:]
self.policies[0].base.obs_shape = obs_shape
self.policies[0].base.encoder_actor.embedding.split_shape = split_shape
self.policies[0].base.encoder_critic.embedding.split_shape = split_shape
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
while (self.duration is None or time.time() < self.end_time) and self.eval_episode < self.eval_num:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_agents):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
action_movement = []
action_pull = []
action_glueall = []
for agent_id in range(self.num_agents):
action_movement.append(actions[agent_id][0][:self.action_movement_dim[agent_id]])
action_glueall.append(int(actions[agent_id][0][self.action_movement_dim[agent_id]]))
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull.append(int(actions[agent_id][0][-1]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
self.step += 1
#READ INFO
self.test_lock_rate[self.step] = env_info['lock_rate']
self.test_return_rate[self.step] = env_info['return_rate']
if env_info['lock_rate'] == 1:
self.test_success_rate[self.step] = env_info['return_rate']
else:
self.test_success_rate[self.step] = 0
# print("Step %d Lock Rate"%self.step, self.test_lock_rate[self.step])
# print("Step %d Return Rate"%self.step, self.test_return_rate[self.step])
# print("Step %d Success Rate"%self.step, self.test_success_rate[self.step])
#print(self.dict_obs['box_obs'][0][0])
self.total_rew += rew
self.is_lock = self.test_lock_rate[self.step]
self.is_return = self.test_return_rate[self.step]
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False) or self.step >= self.args.episode_length - 1:
self.eval_episode += 1
self.success_rate_sum += np.mean(self.test_success_rate[-self.H:])
print("Test Episode %d/%d Success Rate:"%(self.eval_episode, self.eval_num), np.mean(self.test_success_rate[-self.H:]))
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
self.add_overlay(const.GRID_TOPRIGHT, "Lock", str(self.is_lock))
self.add_overlay(const.GRID_TOPRIGHT, "Return", str(self.is_return))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
if self.eval_episode == self.eval_num:
print("Mean Success Rate:", self.success_rate_sum / self.eval_num)
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.dict_obs = self.env.reset()
self.obs = []
self.share_obs = []
# reset the buffer
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
self.step = 0
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_bl_good_case(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, args, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.args = args
self.num_agents = args.num_agents
self.total_rew = 0.0
# init starts
self.eval_num = 1
self.eval_episode = 0
self.success_rate_sum = 0
self.step = 0
self.H = 5
buffer_length = 2000
boundary = args.grid_size-2
boundary_quadrant = [round(args.grid_size / 2), args.grid_size-3, 1, round(args.grid_size/2)-3]
start_boundary = [round(args.grid_size / 2), args.grid_size-3, 1, round(args.grid_size/2)-3] # x1,x2,y1,y2 qudrant set
last_node = node_buffer(args.num_agents, args.num_boxes, buffer_length,
archive_initial_length=args.n_rollout_threads,
reproduction_num=160,
max_step=1,
start_boundary=start_boundary,
boundary=boundary,
boundary_quadrant=boundary_quadrant)
#self.starts = last_node.produce_good_case(self.eval_num, start_boundary, args.num_agents, args.num_boxes)
self.starts = [[np.array([16, 4]), np.array([21, 2]), np.array([22, 2]), np.array([16, 4])]]
print("[starts]", self.starts[0])
self.dict_obs = env.reset(self.starts[0])
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
self.order_obs = ['agent_qpos_qvel', 'box_obs', 'ramp_obs', 'construction_site_obs', 'observation_self']
self.mask_order_obs = [None, None, None, None, None]
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
# generate the obs space
obs_shape = []
obs_dim = 0
for key in self.order_obs:
if key in self.env.observation_space.spaces.keys():
space = list(self.env.observation_space[key].shape)
if len(space)<2:
space.insert(0,1)
obs_shape.append(space)
obs_dim += reduce(lambda x,y:x*y,space)
obs_shape.insert(0,obs_dim)
split_shape = obs_shape[1:]
self.policies[0].base.obs_shape = obs_shape
self.policies[0].base.encoder_actor.embedding.split_shape = split_shape
self.policies[0].base.encoder_critic.embedding.split_shape = split_shape
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
while self.duration is None or time.time() < self.end_time or self.eval_episode <= self.eval_num:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_agents):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
action_movement = []
action_pull = []
action_glueall = []
for agent_id in range(self.num_agents):
action_movement.append(actions[agent_id][0][:self.action_movement_dim[agent_id]])
action_glueall.append(int(actions[agent_id][0][self.action_movement_dim[agent_id]]))
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull.append(int(actions[agent_id][0][-1]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
self.step += 1
#READ INFO
self.test_lock_rate[self.step] = env_info['lock_rate']
self.test_return_rate[self.step] = env_info['return_rate']
if env_info['lock_rate'] == 1:
self.test_success_rate[self.step] = env_info['return_rate']
else:
self.test_success_rate[self.step] = 0
#print(self.dict_obs['box_obs'][0][0])
self.total_rew += rew
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False) or self.step >= self.args.episode_length - 1:
self.eval_episode += 1
self.success_rate_sum += np.mean(self.test_success_rate[-self.H:])
print("Test Episode %d/%d Success Rate:"%(self.eval_episode, self.eval_num), np.mean(self.test_success_rate[-self.H:]))
if self.eval_episode == self.eval_num:
break
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
if self.eval_episode == self.eval_num:
print("Mean Success Rate:", self.success_rate_sum / self.eval_num)
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
print("[starts]", self.starts[self.eval_episode])
self.dict_obs = self.env.reset(self.starts[self.eval_episode])
self.obs = []
self.share_obs = []
# reset the buffer
self.test_lock_rate = np.zeros(self.args.episode_length)
self.test_return_rate = np.zeros(self.args.episode_length)
self.test_success_rate = np.zeros(self.args.episode_length)
self.step = 0
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_sc(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.dict_obs = env.reset()
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','food_obs','observation_self']
self.mask_order_obs = ['mask_aa_obs','mask_ab_obs','mask_ar_obs','mask_af_obs',None]
'''
self.order_obs = ['box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
self.mask_order_obs = ['mask_ab_obs','mask_ar_obs',None,None,None]
self.num_agents = 1
for agent_id in range(self.num_agents):
# deal with dict action space
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
self.masks = np.ones((1, self.num_agents, 1)).astype(np.float32)
if self.duration is not None:
self.end_time = time.time() + self.duration
self.total_rew_avg = 0.0
self.n_episodes = 0
self.obs = []
self.share_obs = []
print(self.dict_obs)
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
print(self.obs)
print(self.share_obs)
while self.duration is None or time.time() < self.end_time:
values = []
actions= []
recurrent_hidden_statess = []
recurrent_hidden_statess_critic = []
with torch.no_grad():
for agent_id in range(self.num_agents):
self.policies[0].eval()
value, action, action_log_prob, recurrent_hidden_states, recurrent_hidden_states_critic = self.policies[0].act(agent_id,
torch.tensor(self.share_obs[:,agent_id,:]),
torch.tensor(self.obs[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states[:,agent_id,:]),
torch.tensor(self.recurrent_hidden_states_critic[:,agent_id,:]),
torch.tensor(self.masks[:,agent_id,:]))
values.append(value.detach().cpu().numpy())
actions.append(action.detach().cpu().numpy())
recurrent_hidden_statess.append(recurrent_hidden_states.detach().cpu().numpy())
recurrent_hidden_statess_critic.append(recurrent_hidden_states_critic.detach().cpu().numpy())
action_movement = []
action_pull = []
action_glueall = []
for agent_id in range(self.num_agents):
action_movement.append(actions[agent_id][0][:self.action_movement_dim[agent_id]])
action_glueall.append(int(actions[agent_id][0][self.action_movement_dim[agent_id]]))
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull.append(int(actions[agent_id][0][-1]))
action_movement = np.stack(action_movement, axis = 0)
action_glueall = np.stack(action_glueall, axis = 0)
if 'action_pull' in self.env.action_space.spaces.keys():
action_pull = np.stack(action_pull, axis = 0)
one_env_action = {'action_movement': action_movement, 'action_pull': action_pull, 'action_glueall': action_glueall}
print(action_pull)
self.dict_obs, rew, done, env_info = self.env.step(one_env_action)
print(self.dict_obs)
self.total_rew += rew
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.array(recurrent_hidden_statess).transpose(1,0,2)
self.recurrent_hidden_states_critic = np.array(recurrent_hidden_statess_critic).transpose(1,0,2)
if done or env_info.get('discard_episode', False):
self.reset_increment()
if self.display_window:
self.add_overlay(const.GRID_TOPRIGHT, "Reset env; (current seed: {})".format(self.seed), "N - next / P - previous ")
self.add_overlay(const.GRID_TOPRIGHT, "Reward", str(self.total_rew))
if hasattr(self.env.unwrapped, "viewer_stats"):
for k, v in self.env.unwrapped.viewer_stats.items():
self.add_overlay(const.GRID_TOPRIGHT, k, str(v))
self.env.render()
def reset_increment(self):
self.total_rew_avg = (self.n_episodes * self.total_rew_avg + self.total_rew) / (self.n_episodes + 1)
self.n_episodes += 1
print(f"Reward: {self.total_rew} (rolling average: {self.total_rew_avg})")
self.total_rew = 0.0
self.seed += 1
self.env.seed(self.seed)
self.dict_obs = self.env.reset()
self.obs = []
self.share_obs = []
for i, key in enumerate(self.order_obs):
if key in self.env.observation_space.spaces.keys():
if self.mask_order_obs[i] == None:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_obs = temp_share_obs.copy()
else:
temp_share_obs = self.dict_obs[key].reshape(self.num_agents,-1).copy()
temp_mask = self.dict_obs[self.mask_order_obs[i]].copy()
temp_obs = self.dict_obs[key].copy()
mins_temp_mask = ~temp_mask
temp_obs[mins_temp_mask]=np.zeros(((mins_temp_mask).sum(),temp_obs.shape[2]))
temp_obs = temp_obs.reshape(self.num_agents,-1)
if i == 0:
reshape_obs = temp_obs.copy()
reshape_share_obs = temp_share_obs.copy()
else:
reshape_obs = np.concatenate((reshape_obs,temp_obs),axis=1)
reshape_share_obs = np.concatenate((reshape_share_obs,temp_share_obs),axis=1)
self.obs.append(reshape_obs)
self.share_obs.append(reshape_share_obs)
self.obs = np.array(self.obs).astype(np.float32)
self.share_obs = np.array(self.share_obs).astype(np.float32)
self.recurrent_hidden_states = np.zeros((1, self.num_agents, 64)).astype(np.float32)
self.recurrent_hidden_states_critic = np.zeros((1, self.num_agents, 64)).astype(np.float32)
#for policy in self.policies:
# policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
class PolicyViewer_bc(MjViewer):
'''
PolicyViewer runs a policy with an environment and optionally displays it.
env - environment to run policy in
policy - policy object to run
display_window - if true, show the graphical viewer
seed - environment seed to view
duration - time in seconds to run the policy, run forever if duration=None
'''
@store_args
def __init__(self, env, policies, display_window=True, seed=None, duration=None):
if seed is None:
self.seed = env.seed()[0]
else:
self.seed = seed
env.seed(seed)
self.total_rew = 0.0
self.dict_obs = env.reset()
#for policy in self.policies:
# policy.reset()
#assert env.metadata['n_actors'] % len(policies) == 0
if hasattr(env, "reset_goal"):
self.goal = env.reset_goal()
super().__init__(self.env.unwrapped.sim)
# TO DO: remove circular dependency on viewer object. It looks fishy.
self.env.unwrapped.viewer = self
if self.render and self.display_window:
self.env.render()
def key_callback(self, window, key, scancode, action, mods):
super().key_callback(window, key, scancode, action, mods)
# Trigger on keyup only:
if action != glfw.RELEASE:
return
# Increment experiment seed
if key == glfw.KEY_N:
self.reset_increment()
# Decrement experiment trial
elif key == glfw.KEY_P:
print("Pressed P")
self.seed = max(self.seed - 1, 0)
self.env.seed(self.seed)
self.ob = self.env.reset()
for policy in self.policies:
policy.reset()
if hasattr(self.env, "reset_goal"):
self.goal = self.env.reset_goal()
self.update_sim(self.env.unwrapped.sim)
def run(self):
self.action_movement_dim = []
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','food_obs','observation_self']
self.mask_order_obs = ['mask_aa_obs','mask_ab_obs','mask_ar_obs','mask_af_obs',None]
'''
'''
self.order_obs = ['box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
self.mask_order_obs = [None,'mask_ar_obs',None,None,None]
'''
self.order_obs = ['agent_qpos_qvel','box_obs','ramp_obs','construction_site_obs','vector_door_obs', 'observation_self']
self.mask_order_obs = [None,None,'mask_ar_obs',None,None,None]
self.num_agents = 2
for agent_id in range(self.num_agents):
action_movement = self.env.action_space['action_movement'][agent_id].nvec
self.action_movement_dim.append(len(action_movement))
self.masks = | np.ones((1, self.num_agents, 1)) | numpy.ones |
import sys
import operator
import pytest
import ctypes
import gc
import warnings
import numpy as np
from numpy.core._rational_tests import rational
from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
@pytest.mark.parametrize("dtype",
['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
"Float128", "Complex128"])
def test_numeric_style_types_are_invalid(self, dtype):
with assert_raises(TypeError):
np.dtype(dtype)
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
# But it is currently an equivalent cast:
assert np.can_cast(x, y, casting="equiv")
class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
@pytest.mark.parametrize(["obj", "dtype", "expected"],
[([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
(3, "(3)f4,", [3, 3, 3]),
(np.float64(2), "(2)f4,", [2, 2]),
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
(["1", "2"], "(2)i,", None)])
def test_subarray_list(self, obj, dtype, expected):
dtype = np.dtype(dtype)
res = np.array(obj, dtype=dtype)
if expected is None:
# iterate the 1-d list to fill the array
expected = np.empty(len(obj), dtype=dtype)
for i in range(len(expected)):
expected[i] = obj[i]
assert_array_equal(res, expected)
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), | np.dtype(('<f4', (3, 2))) | numpy.dtype |
import os
import torch
import random
import copy
import csv
from glob import glob
from PIL import Image
import numpy as np
from scipy import ndimage
import SimpleITK as sitk
from skimage import measure
from skimage.transform import resize
from torch.utils.data import Dataset
import torchvision.transforms as transforms
NORMALIZATION_STATISTICS = {"luna16": [[0.2563873675129015, 0.2451283333368983]],
"self_learning_cubes_32": [[0.11303308354465243, 0.12595135887180803]],
"self_learning_cubes_64": [[0.11317437834743148, 0.12611378817031038]],
"lidc": [[0.23151727, 0.2168428080133056]],
"luna_fpr": [[0.18109835972793722, 0.1853707675313153]],
"lits_seg": [[0.46046468844492944, 0.17490586272419967]],
"pe": [[0.26125720740546626, 0.20363551346695796]],
"pe16": [[0.2887357771623902, 0.24429971299033243]],
# [[0.29407377554678416, 0.24441741466975556]], ->256x256x128
"brats": [[0.28239742604241436, 0.22023889204407615]],
"luna16_lung": [[0.1968134997129321, 0.20734707135528743]]}
# ---------------------------------------------2D Data augmentation---------------------------------------------
class Augmentation():
def __init__(self, normalize):
if normalize.lower() == "imagenet":
self.normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
elif normalize.lower() == "chestx-ray":
self.normalize = transforms.Normalize([0.5056, 0.5056, 0.5056], [0.252, 0.252, 0.252])
elif normalize.lower() == "none":
self.normalize = None
else:
print("mean and std for [{}] dataset do not exist!".format(normalize))
exit(-1)
def get_augmentation(self, augment_name, mode, *args):
try:
aug = getattr(Augmentation, augment_name)
return aug(self, mode, *args)
except:
print("Augmentation [{}] does not exist!".format(augment_name))
exit(-1)
def basic(self, mode):
transformList = []
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def _basic_crop(self, transCrop, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomCrop(transCrop))
else:
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_crop_224(self, mode):
transCrop = 224
return self._basic_crop(transCrop, mode)
def _basic_resize(self, size, mode="train"):
transformList = []
transformList.append(transforms.Resize(size))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_resize_224(self, mode):
size = 224
return self._basic_resize(size, mode)
def _basic_crop_rot(self, transCrop, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomCrop(transCrop))
transformList.append(transforms.RandomRotation(7))
else:
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_crop_rot_224(self, mode):
transCrop = 224
return self._basic_crop_rot(transCrop, mode)
def _basic_crop_flip(self, transCrop, transResize, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
else:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_crop_flip_224(self, mode):
transCrop = 224
transResize = 256
return self._basic_crop_flip(transCrop, transResize, mode)
def _basic_rdcrop_flip(self, transCrop, transResize, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomResizedCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
else:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def basic_rdcrop_flip_224(self, mode):
transCrop = 224
transResize = 256
return self._basic_rdcrop_flip(transCrop, transResize, mode)
def _full(self, transCrop, transResize, mode="train", test_augment=True):
transformList = []
if mode == "train":
transformList.append(transforms.RandomResizedCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
transformList.append(transforms.RandomRotation(7))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "valid":
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "test":
if test_augment:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.TenCrop(transCrop))
transformList.append(
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
if self.normalize is not None:
transformList.append(transforms.Lambda(lambda crops: torch.stack([self.normalize(crop) for crop in crops])))
else:
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
transformSequence = transforms.Compose(transformList)
return transformSequence
def full_224(self, mode, test_augment=True):
transCrop = 224
transResize = 256
return self._full(transCrop, transResize, mode, test_augment=test_augment)
def full_448(self, mode):
transCrop = 448
transResize = 512
return self._full(transCrop, transResize, mode)
def _full_colorjitter(self, transCrop, transResize, mode="train"):
transformList = []
if mode == "train":
transformList.append(transforms.RandomResizedCrop(transCrop))
transformList.append(transforms.RandomHorizontalFlip())
transformList.append(transforms.RandomRotation(7))
transformList.append(transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "valid":
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.CenterCrop(transCrop))
transformList.append(transforms.ToTensor())
if self.normalize is not None:
transformList.append(self.normalize)
elif mode == "test":
transformList.append(transforms.Resize(transResize))
transformList.append(transforms.TenCrop(transCrop))
transformList.append(
transforms.Lambda(lambda crops: torch.stack([transforms.ToTensor()(crop) for crop in crops])))
if self.normalize is not None:
transformList.append(transforms.Lambda(lambda crops: torch.stack([self.normalize(crop) for crop in crops])))
transformSequence = transforms.Compose(transformList)
return transformSequence
def full_colorjitter_224(self, mode):
transCrop = 224
transResize = 256
return self._full_colorjitter(transCrop, transResize, mode)
# ---------------------------------------------3D Data Normalization--------------------------------------------
def channel_wise_normalize_3d(data, mean_std):
num_data = data.shape[0]
num_channel = data.shape[1]
if len(mean_std) == 1:
mean_std = [mean_std[0]] * num_channel
normalized_data = []
for i in range(num_data):
img = data[i, ...]
normalized_img = []
for j in range(num_channel):
img_per_channel = img[j, ...]
mean, std = mean_std[j][0], mean_std[j][1]
_img = (img_per_channel - mean) / std
normalized_img.append(_img)
normalized_data.append(normalized_img)
return np.array(normalized_data)
# ---------------------------------------------Downstream ChestX-ray14------------------------------------------
class ChestX_ray14(Dataset):
def __init__(self, pathImageDirectory, pathDatasetFile, augment, num_class=14, anno_percent=100):
self.img_list = []
self.img_label = []
self.augment = augment
with open(pathDatasetFile, "r") as fileDescriptor:
line = True
while line:
line = fileDescriptor.readline()
if line:
lineItems = line.split()
imagePath = os.path.join(pathImageDirectory, lineItems[0])
imageLabel = lineItems[1:num_class + 1]
imageLabel = [int(i) for i in imageLabel]
self.img_list.append(imagePath)
self.img_label.append(imageLabel)
indexes = np.arange(len(self.img_list))
if anno_percent < 100:
random.Random(99).shuffle(indexes)
num_data = int(indexes.shape[0] * anno_percent / 100.0)
indexes = indexes[:num_data]
_img_list, _img_label = copy.deepcopy(self.img_list), copy.deepcopy(self.img_label)
self.img_list = []
self.img_label = []
for i in indexes:
self.img_list.append(_img_list[i])
self.img_label.append(_img_label[i])
def __getitem__(self, index):
imagePath = self.img_list[index]
imageData = Image.open(imagePath).convert('RGB')
imageLabel = torch.FloatTensor(self.img_label[index])
if self.augment != None: imageData = self.augment(imageData)
return imageData, imageLabel
def __len__(self):
return len(self.img_list)
# ---------------------------------------------Downstream CheXpert------------------------------------------
class CheXpert(Dataset):
def __init__(self, pathImageDirectory, pathDatasetFile, augment, num_class=14,
uncertain_label="LSR-Ones", unknown_label=0, anno_percent=100):
self.img_list = []
self.img_label = []
self.augment = augment
assert uncertain_label in ["Ones", "Zeros", "LSR-Ones", "LSR-Zeros"]
self.uncertain_label = uncertain_label
with open(pathDatasetFile, "r") as fileDescriptor:
csvReader = csv.reader(fileDescriptor)
next(csvReader, None)
for line in csvReader:
imagePath = os.path.join(pathImageDirectory, line[0])
label = line[5:]
for i in range(num_class):
if label[i]:
a = float(label[i])
if a == 1:
label[i] = 1
elif a == 0:
label[i] = 0
elif a == -1: # uncertain label
label[i] = -1
else:
label[i] = unknown_label # unknown label
self.img_list.append(imagePath)
imageLabel = [int(i) for i in label]
self.img_label.append(imageLabel)
indexes = np.arange(len(self.img_list))
if anno_percent < 100:
random.Random(99).shuffle(indexes)
num_data = int(indexes.shape[0] * anno_percent / 100.0)
indexes = indexes[:num_data]
_img_list, _img_label = copy.deepcopy(self.img_list), copy.deepcopy(self.img_label)
self.img_list = []
self.img_label = []
for i in indexes:
self.img_list.append(_img_list[i])
self.img_label.append(_img_label[i])
def __getitem__(self, index):
imagePath = self.img_list[index]
imageData = Image.open(imagePath).convert('RGB')
label = []
for l in self.img_label[index]:
if l == -1:
if self.uncertain_label == "Ones":
label.append(1)
elif self.uncertain_label == "Zeros":
label.append(0)
elif self.uncertain_label == "LSR-Ones":
label.append(random.uniform(0.55, 0.85))
elif self.uncertain_label == "LSR-Zeros":
label.append(random.uniform(0, 0.3))
else:
label.append(l)
imageLabel = torch.FloatTensor(label)
if self.augment != None: imageData = self.augment(imageData)
return imageData, imageLabel
def __len__(self):
return len(self.img_list)
# ---------------------------------------------------NPY DataSet------------------------------------------------
class NPYDataLoader(Dataset):
def __init__(self, data):
self.data_x, self.data_y = data
def __len__(self):
return self.data_x.shape[0]
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return self.data_x[idx, ...], self.data_y[idx, ...]
# --------------------------------------------Downstream LUNA FPR 3D--------------------------------------------
def LUNA_FPR_3D(data_dir, fold, input_size, hu_range, crop=True, normalization=None, set="data", anno_percent=100,
shuffle=True):
input_rows, input_cols, input_deps = input_size[0], input_size[1], input_size[2]
hu_min, hu_max = hu_range[0], hu_range[1]
def load_image(data_dir, fold, input_rows, input_cols, hu_min, hu_max, crop=True):
positives, negatives = [], []
for subset in fold:
LUNA16_PROCESSED_DIR_POS = os.path.join(data_dir, "subset" + str(subset), "positives")
LUNA16_PROCESSED_DIR_NEG = os.path.join(data_dir, "subset" + str(subset), "negatives")
positive_file_list = glob(os.path.join(LUNA16_PROCESSED_DIR_POS, "*.npy"))
negative_file_list = glob(os.path.join(LUNA16_PROCESSED_DIR_NEG, "*.npy"))
positive_index = [x for x in range(len(positive_file_list))]
negative_index = [x for x in range(len(negative_file_list))]
if shuffle:
random.shuffle(positive_index)
random.shuffle(negative_index)
for i in range(min(len(positive_file_list), len(negative_file_list))):
im_pos_ = np.load(positive_file_list[positive_index[i]])
im_neg_ = np.load(negative_file_list[negative_index[i]])
if crop:
im_pos = np.zeros((input_rows, input_cols, im_pos_.shape[-1]), dtype="float")
im_neg = np.zeros((input_rows, input_cols, im_pos_.shape[-1]), dtype="float")
for z in range(im_pos_.shape[-1]):
im_pos[:, :, z] = resize(im_pos_[:, :, z], (input_rows, input_cols), preserve_range=True)
im_neg[:, :, z] = resize(im_neg_[:, :, z], (input_rows, input_cols), preserve_range=True)
else:
im_pos, im_neg = im_pos_, im_neg_
im_pos[im_pos < hu_min] = hu_min
im_pos[im_pos > hu_max] = hu_max
im_neg[im_neg < hu_min] = hu_min
im_neg[im_neg > hu_max] = hu_max
im_pos = (im_pos - hu_min) / (hu_max - hu_min)
im_neg = (im_neg - hu_min) / (hu_max - hu_min)
positives.append(im_pos)
negatives.append(im_neg)
positives, negatives = np.array(positives), np.array(negatives)
positives, negatives = np.expand_dims(positives, axis=-1), np.expand_dims(negatives, axis=-1)
return positives, negatives
x_pos, x_neg = load_image(data_dir, fold, input_rows, input_cols, hu_min, hu_max, crop=crop)
x_data = np.concatenate((x_pos, x_neg), axis=0)
y_data = np.concatenate((np.ones((x_pos.shape[0],)),
np.zeros((x_neg.shape[0],)),
), axis=0)
x_data = np.expand_dims(np.squeeze(x_data), axis=1)
if normalization is not None and normalization.lower() != "none":
mean_std = NORMALIZATION_STATISTICS[normalization.lower()]
x_data = channel_wise_normalize_3d(x_data, mean_std=mean_std)
if anno_percent < 100:
ind_list = [i for i in range(x_data.shape[0])]
random.Random(99).shuffle(ind_list)
num_data = int(x_data.shape[0] * anno_percent / 100.0)
x_data = x_data[ind_list[:num_data], ...]
y_data = y_data[ind_list[:num_data], ...]
print("x_{}: {} | {:.2f} ~ {:.2f}".format(set, x_data.shape, np.min(x_data), np.max(x_data)))
print("y_{}: {} | {:.2f} ~ {:.2f}".format(set, y_data.shape, np.min(y_data), np.max(y_data)))
return x_data, y_data
# ----------------------------------------------Downstream LIDC 3D----------------------------------------------
def LIDC_3D(data_dir, set, normalization=None, anno_percent=100):
x_data = np.squeeze(np.load(os.path.join(data_dir, 'x_' + set + '_64x64x32.npy')))
y_data = np.squeeze(np.load(os.path.join(data_dir, 'm_' + set + '_64x64x32.npy')))
x_data = np.expand_dims(x_data, axis=1)
y_data = np.expand_dims(y_data, axis=1)
if normalization is not None and normalization.lower() != "none":
mean_std = NORMALIZATION_STATISTICS[normalization.lower()]
x_data = channel_wise_normalize_3d(x_data, mean_std=mean_std)
if anno_percent < 100:
ind_list = [i for i in range(x_data.shape[0])]
random.Random(99).shuffle(ind_list)
num_data = int(x_data.shape[0] * anno_percent / 100.0)
x_data = x_data[ind_list[:num_data], ...]
y_data = y_data[ind_list[:num_data], ...]
print("x_{}: {} | {:.2f} ~ {:.2f}".format(set, x_data.shape, np.min(x_data), np.max(x_data)))
print("y_{}: {} | {:.2f} ~ {:.2f}".format(set, y_data.shape, np.min(y_data), np.max(y_data)))
return x_data, y_data
# ----------------------------------------------Downstream LiTS 3D----------------------------------------------
def LiTS_3D(data_path, id_list, obj="liver", normalization=None, anno_percent=100,
input_size=(64, 64, 32), hu_range=(-1000.0, 1000.0), status=None):
def load_data_npy(data_path, id_list, obj="liver", input_size=(64, 64, 32), hu_range=(-1000.0, 1000.0), status=None):
x_data, y_data = [], []
input_rows, input_cols, input_deps = input_size[0], input_size[1], input_size[2]
hu_min, hu_max = hu_range[0], hu_range[1]
for patient_id in id_list:
Vol = np.load(os.path.join(data_path, "volume-" + str(patient_id) + ".npy"))
Vol[Vol > hu_max] = hu_max
Vol[Vol < hu_min] = hu_min
Vol = (Vol - hu_min) / (hu_max - hu_min)
Vol = np.expand_dims(Vol, axis=0)
Mask = np.load(os.path.join(data_path, "segmentation-" + str(patient_id) + ".npy"))
liver_mask, lesion_mask = copy.deepcopy(Mask), copy.deepcopy(Mask)
liver_mask[Mask > 0.5] = 1
liver_mask[Mask <= 0.5] = 0
lesion_mask[Mask > 1] = 1
lesion_mask[Mask <= 1] = 0
Mask = np.concatenate((np.expand_dims(liver_mask, axis=0), np.expand_dims(lesion_mask, axis=0)), axis=0)
if obj == "liver":
for i in range(input_rows - 1, Vol.shape[1] - input_rows + 1, input_rows):
for j in range(input_cols - 1, Vol.shape[2] - input_cols + 1, input_cols):
for k in range(input_deps - 1, Vol.shape[3] - input_deps + 1, input_deps):
if np.sum(Mask[0, i:i + input_rows, j:j + input_cols,
k:k + input_deps]) > 0 or random.random() < 0.01:
x_data.append(Vol[:, i:i + input_rows, j:j + input_cols, k:k + input_deps])
y_data.append(Mask[:, i:i + input_rows, j:j + input_cols, k:k + input_deps])
if np.sum(Mask[0]) > 1000:
cx, cy, cz = ndimage.measurements.center_of_mass(np.squeeze(Mask[0]))
# print(cx, cy, cz)
cx, cy, cz = int(cx), int(cy), int(cz)
for delta_x in range(-10, 20, 20):
for delta_y in range(-10, 20, 20):
for delta_z in range(-5, 10, 10):
if cx + delta_x - int(input_rows / 2) < 0 or cx + delta_x + int(input_rows / 2) > Vol.shape[1] - 1 or \
cy + delta_y - int(input_cols / 2) < 0 or cy + delta_y + int(input_cols / 2) > Vol.shape[2] - 1 or \
cz + delta_z - int(input_deps / 2) < 0 or cz + delta_z + int(input_deps / 2) > Vol.shape[3] - 1:
pass
else:
x_data.append(Vol[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
y_data.append(Mask[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
elif obj == "lesion":
if np.sum(Mask[1]) > 0:
labels = measure.label(Mask[1], neighbors=8, background=0)
for label in np.unique(labels):
if label == 0:
continue
labelMask = np.zeros(Mask[1].shape, dtype="int")
labelMask[labels == label] = 1
cx, cy, cz = ndimage.measurements.center_of_mass(np.squeeze(labelMask))
cx, cy, cz = int(cx), int(cy), int(cz)
if labelMask[cx, cy, cz] == 1:
for delta_x in range(-5, 5, 5):
for delta_y in range(-5, 5, 5):
for delta_z in range(-3, 3, 3):
if cx + delta_x - int(input_rows / 2) < 0 or cx + delta_x + int(input_rows / 2) > Vol.shape[1] - 1 \
or \
cy + delta_y - int(input_cols / 2) < 0 or cy + delta_y + int(input_cols / 2) > Vol.shape[2] - 1 \
or \
cz + delta_z - int(input_deps / 2) < 0 or cz + delta_z + int(input_deps / 2) > Vol.shape[3] - 1:
pass
else:
x_data.append(
Vol[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
y_data.append(
Mask[:, cx + delta_x - int(input_rows / 2):cx + delta_x + int(input_rows / 2), \
cy + delta_y - int(input_cols / 2):cy + delta_y + int(input_cols / 2), \
cz + delta_z - int(input_deps / 2):cz + delta_z + int(input_deps / 2)])
else:
print("Objetc [{}] does not exist!".format(obj))
return | np.array(x_data) | numpy.array |
'''
Classes for representing tomograms with segmentations
'''
import os
import copy
import numpy as np
import scipy as sp
from shutil import rmtree
from .utils import *
from pyorg import pexceptions, sub, disperse_io
from pyorg.globals.utils import unpickle_obj
from pyorg import globals as gl
try:
import pickle as pickle
except:
import pickle
__author__ = '<NAME>'
##### Global variables
NM3_TO_UM3 = 1e-9
# GLOBAL FUNCTIONS
# Clean an directory contents (directory is preserved)
# dir: directory path
def clean_dir(dir):
for root, dirs, files in os.walk(dir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
rmtree(os.path.join(root, d))
# PARALLEL PROCESSES
# CLASSES
############################################################################
# Class for a Segmentation: set of voxel in a tomogram
#
class Segmentation(object):
def __init__(self, tomo, lbl):
"""
:param tomo: tomogram which contains the segmentation
:param lbl: label for the segmentation
"""
# Input parsing
self.__ids = np.where(tomo == lbl)
self.__vcount = len(self.__ids[0])
assert self.__vcount > 0
self.__lbl = lbl
# Pre-compute bounds for accelerate computations
self.__bounds = np.zeros(shape=6, dtype=np.float32)
self.__update_bounds()
#### Set/Get functionality
def get_ids(self):
return self.__ids
def get_label(self):
return self.__lbl
def get_voxels_count(self):
"""
:return: the number of voxel in the segmentation
"""
return self.__vcount
def get_bounds(self):
"""
:return: surface bounds (x_min, x_max, y_min, y_max, z_min, z_max) as array
"""
return self.__bounds
#### External functionality
def bound_in_bounds(self, bounds):
"""
Check if the object's bound are at least partially in another bound
:param bounds: input bound
:return:
"""
x_over, y_over, z_over = True, True, True
if (self.__bounds[0] > bounds[1]) or (self.__bounds[1] < bounds[0]):
x_over = False
if (self.__bounds[2] > bounds[3]) or (self.__bounds[3] < bounds[2]):
y_over = False
if (self.__bounds[4] > bounds[5]) or (self.__bounds[5] < bounds[4]):
y_over = False
return x_over and y_over and z_over
def point_in_bounds(self, point):
"""
Check if a point within filament's bounds
:param point: point to check
:return:
"""
x_over, y_over, z_over = True, True, True
if (self.__bounds[0] > point[0]) or (self.__bounds[1] < point[0]):
x_over = False
if (self.__bounds[2] > point[1]) or (self.__bounds[3] < point[1]):
y_over = False
if (self.__bounds[4] > point[2]) or (self.__bounds[5] < point[2]):
y_over = False
return x_over and y_over and z_over
def pickle(self, fname):
"""
VTK attributes requires a special treatment during pickling
:param fname: file name ended with .pkl
:return:
"""
# Dump pickable objects and store the file names of the unpickable objects
stem, ext = os.path.splitext(fname)
self.__vtp_fname = stem + '_curve.vtp'
pkl_f = open(fname, 'w')
try:
pickle.dump(self, pkl_f)
finally:
pkl_f.close()
# INTERNAL FUNCTIONALITY AREA
def __update_bounds(self):
self.__bounds[0], self.__bounds[1] = self.__ids[0].min(), self.__ids[0].max()
self.__bounds[2], self.__bounds[3] = self.__ids[1].min(), self.__ids[1].max()
self.__bounds[4], self.__bounds[5] = self.__ids[2].min(), self.__ids[2].max()
############################################################################
# Class for a OMSegmentation: oriented membrane segmentation.
# Contains two segmentations, one with the membrane the other with the lumen
#
class OMSegmentation(object):
def __init__(self, tomo_mb, tomo_lm, lbl):
"""
:param tomo_mb: tomogram with the membrane (None is allowed)
:param tomo_lm: tomogram with the lumen
:param lbl:
"""
# Input parsing
self.__ids_lm = np.where(tomo_lm == lbl)
self.__vcount_lm = len(self.__ids_lm[0])
self.__lbl = lbl
assert self.__vcount_lm > 0
if tomo_mb is None:
self.__ids_mb = self.__ids_lm
self.__vcount_mb = self.__vcount_lm
else:
self.__ids_mb = np.where(tomo_mb == lbl)
self.__vcount_mb = len(self.__ids_mb[0])
assert self.__vcount_mb > 0
# Pre-compute bounds for accelerate computations
self.__bounds = np.zeros(shape=6, dtype=np.float32)
self.__update_bounds()
#### Set/Get functionality
def get_label(self):
"""
:return: an integer label
"""
return self.__lbl
def get_ids(self, mode='lm'):
"""
:param mode: 'mb' or 'lumen' ids
:return: segmented voxel indices an array with 4 dimension (N,X,Y,Z)
"""
assert (mode == 'mb') or (mode == 'lm')
if mode == 'mb':
return self.__ids_mb
elif mode == 'lm':
return self.__ids_lm
def get_voxels_count(self, mode='mb'):
"""
:param mode: to count 'mb' or 'lumen' voxels
:return: the number of voxel in the segmentation
"""
assert (mode == 'mb') or (mode == 'lm')
if mode == 'mb':
return self.__vcount_mb
elif mode == 'lm':
return self.__vcount_lm
def get_bounds(self):
"""
:return: surface bounds (x_min, x_max, y_min, y_max, z_min, z_max) as array
"""
return self.__bounds
#### External functionality
def bound_in_bounds(self, bounds):
"""
Check if the object's bound are at least partially in another bound
:param bounds: input bound
:return:
"""
hold_bounds = self.__bounds
x_over, y_over, z_over = True, True, True
if (hold_bounds[0] > bounds[1]) or (hold_bounds[1] < bounds[0]):
x_over = False
if (hold_bounds[2] > bounds[3]) or (hold_bounds[3] < bounds[2]):
y_over = False
if (hold_bounds[4] > bounds[5]) or (hold_bounds[5] < bounds[4]):
y_over = False
return x_over and y_over and z_over
def point_in_bounds(self, point):
"""
Check if a point within filament's bounds
:param point: point to check
:return:
"""
hold_bounds = self.__bounds
x_over, y_over, z_over = True, True, True
if (hold_bounds[0] > point[0]) or (hold_bounds[1] < point[0]):
x_over = False
if (hold_bounds[2] > point[1]) or (hold_bounds[3] < point[1]):
y_over = False
if (hold_bounds[4] > point[2]) or (hold_bounds[5] < point[2]):
y_over = False
return x_over and y_over and z_over
def pickle(self, fname):
"""
VTK attributes requires a special treatment during pickling
:param fname: file name ended with .pkl
:return:
"""
# Dump pickable objects and store the file names of the unpickable objects
stem, ext = os.path.splitext(fname)
self.__vtp_fname = stem + '_curve.vtp'
pkl_f = open(fname, 'w')
try:
pickle.dump(self, pkl_f)
finally:
pkl_f.close()
# INTERNAL FUNCTIONALITY AREA
def __update_bounds(self):
bounds_mb, bounds_lm = np.zeros(shape=6, dtype=np.float32), np.zeros(shape=6, dtype=np.float32)
bounds_mb[0], bounds_mb[1] = self.__ids_mb[0].min(), self.__ids_mb[0].max()
bounds_mb[2], bounds_mb[3] = self.__ids_mb[1].min(), self.__ids_mb[1].max()
bounds_mb[4], bounds_mb[5] = self.__ids_mb[2].min(), self.__ids_mb[2].max()
bounds_lm[0], bounds_lm[1] = self.__ids_lm[0].min(), self.__ids_lm[0].max()
bounds_lm[2], bounds_lm[3] = self.__ids_lm[1].min(), self.__ids_lm[1].max()
bounds_lm[4], bounds_lm[5] = self.__ids_lm[2].min(), self.__ids_lm[2].max()
if bounds_mb[0] < bounds_lm[0]:
self.__bounds[0] = bounds_mb[0]
else:
self.__bounds[0] = bounds_lm[0]
if bounds_mb[1] < bounds_lm[1]:
self.__bounds[1] = bounds_mb[1]
else:
self.__bounds[1] = bounds_lm[1]
if bounds_mb[2] < bounds_lm[2]:
self.__bounds[2] = bounds_mb[2]
else:
self.__bounds[2] = bounds_lm[2]
if bounds_mb[3] < bounds_lm[3]:
self.__bounds[3] = bounds_mb[3]
else:
self.__bounds[3] = bounds_lm[3]
if bounds_mb[4] < bounds_lm[4]:
self.__bounds[4] = bounds_mb[4]
else:
self.__bounds[4] = bounds_lm[4]
if bounds_mb[5] < bounds_lm[5]:
self.__bounds[5] = bounds_mb[5]
else:
self.__bounds[5] = bounds_lm[5]
############################################################################
# Class for tomograms with oriented membrane segmentations
#
class TomoOMSegmentations(object):
def __init__(self, name, voi_mb=None, voi_lm=None, max_dst=0, res=1):
"""
:param name: name to identify the tomogram
:param voi_mb: if None (default) the membrane tomogram is loaded from tomo_fname, otherwise this is actually
the input tomogram
:param voi_lm: if None (default) the lumen tomogram is loaded from tomo_fname, otherwise this is actually
the input tomogram
:param max_dst: maximum distance to lumen border for membrane segmentation (in segmentation pixels)
:param res: input value
"""
# Input parsing
if not isinstance(name, str):
error_msg = 'Input is not a string.'
raise pexceptions.PySegInputError(expr='__init__ (TomoOMSegmentations)', msg=error_msg)
if (voi_mb is not None) and (not isinstance(voi_mb, np.ndarray)):
error_msg = 'Input VOI for membranes must be an numpy.ndarray.'
raise pexceptions.PySegInputError(expr='__init__ (TomoOMSegmentations)', msg=error_msg)
if (voi_lm is not None) and (not isinstance(voi_lm, np.ndarray)):
error_msg = 'Input VOI for lumen must be an numpy.ndarray.'
raise pexceptions.PySegInputError(expr='__init__ (TomoOMSegmentations)', msg=error_msg)
self.__name = name
self.__segs = list()
# Create the lumen's label field
if voi_mb.shape != voi_lm.shape:
error_msg = 'Input tomograms for membranes and lumen must have the same sizes.'
raise pexceptions.PySegInputError(expr='__init__ (TomoOMSegmentations)', msg=error_msg)
self.__lbl_voi_lm, nlbls = sp.ndimage.label(voi_lm, structure=np.ones(shape=(3, 3, 3)))
lbls_lm = list(range(1, nlbls+1))
# disperse_io.save_numpy(self.__lbl_voi_lm, '/fs/pool/pool-ruben/antonio/filaments/ltomos_omsegs/test/hold_lm.mrc')
hold_lm = sp.ndimage.morphology.binary_dilation(voi_lm > 0)
dst_field_lm, dst_ids_lm = sp.ndimage.morphology.distance_transform_edt(hold_lm, return_distances=True,
return_indices=True)
# hold_lm = sp.ndimage.morphology.binary_dilation(voi_lm == 0)
# dst_field_inv_lm, dst_ids_inv_lm = sp.ndimage.morphology.distance_transform_edt(hold_lm, return_distances=True,
# return_indices=True)
# Set lumen labels to membrane segmentation
mb_ids = np.where(voi_mb)
self.__lbl_voi_mb = np.zeros(shape=voi_mb.shape, dtype=np.int32)
for x, y, z in zip(mb_ids[0], mb_ids[1], mb_ids[2]):
hold_dst = dst_field_lm[x, y, z]
if (hold_dst > 0) and (hold_dst <= max_dst):
x_idx, y_idx, z_idx = dst_ids_lm[:, x, y, z]
x_l = x_idx - 2
if x_l <= 0:
x_l = 0
x_h = x_idx + 3
if x_h >= self.__lbl_voi_mb.shape[0]:
x_h = self.__lbl_voi_mb.shape[0]
y_l = y_idx - 2
if y_l <= 0:
y_l = 0
y_h = y_idx + 3
if y_h >= self.__lbl_voi_mb.shape[1]:
y_h = self.__lbl_voi_mb.shape[1]
z_l = z_idx - 2
if z_l <= 0:
z_l = 0
z_h = z_idx + 3
if z_h >= self.__lbl_voi_mb.shape[2]:
z_h = self.__lbl_voi_mb.shape[2]
hold_lbls_lm = self.__lbl_voi_lm[x_l:x_h, y_l:y_h, z_l:z_h]
try:
hold_lbl_lm = np.argmax(np.bincount(hold_lbls_lm[hold_lbls_lm > 0]))
self.__lbl_voi_mb[x, y, z] = hold_lbl_lm
except ValueError:
pass # print 'jol 1'
# else:
# hold_dst_inv = dst_field_inv_lm[x, y, z]
# if (hold_dst_inv > 0) and (hold_dst_inv <= max_dst):
# x_idx, y_idx, z_idx = dst_ids_inv_lm[:, x, y, z]
# x_l = x_idx - 2
# if x_l <= 0:
# x_l = 0
# x_h = x_idx + 3
# if x_h >= self.__lbl_voi_mb.shape[0]:
# x_h = self.__lbl_voi_mb.shape[0]
# y_l = y_idx - 2
# if y_l <= 0:
# y_l = 0
# y_h = y_idx + 3
# if y_h >= self.__lbl_voi_mb.shape[1]:
# y_h = self.__lbl_voi_mb.shape[1]
# z_l = z_idx - 2
# if z_l <= 0:
# z_l = 0
# z_h = z_idx + 3
# if z_h >= self.__lbl_voi_mb.shape[2]:
# z_h = self.__lbl_voi_mb.shape[2]
# hold_lbls_lm = self.__lbl_voi_lm[x_l:x_h, y_l:y_h, z_l:z_h]
# try:
# hold_lbl_lm = np.argmax(np.bincount(hold_lbls_lm[hold_lbls_lm > 0]))
# self.__lbl_voi_mb[x, y, z] = hold_lbl_lm
# except ValueError:
# pass # print 'Jol 2'
# else:
# pass # print 'Jol 3'
# Create the segmentations
for lbl_lm in lbls_lm:
try:
self.__segs.append(OMSegmentation(self.__lbl_voi_mb, self.__lbl_voi_lm, lbl_lm))
except AssertionError:
continue
# Pixel size settings
self.set_resolution(res)
# GET/SET AREA
def set_resolution(self, res):
"""
Set resolution in nm/pixel
:param res: input value
:return:
"""
assert res > 0
self.__res = res
self.__res_3 = self.__res * self.__res * self.__res
def get_voi(self, mode='mb'):
"""
Get the tomograms with the segmentations VOI
:param mode: 'mb' membrane, 'lm' lumen, 'mb-lm' membrane and lumen fused
:return: a binary ndarray
"""
if mode == 'mb':
return self.__lbl_voi_mb > 0
elif mode == 'lm':
return self.__lbl_voi_lm > 0
elif mode == 'mb-lm':
return (self.__voi_mb + self.__voi_lm) > 0
else:
error_msg = 'Input mode not valid: ' + str(mode)
raise pexceptions.PySegInputError(expr='get_voi (TomoOMSegmentations)', msg=error_msg)
def get_lbl_voi(self, mode='mb'):
"""
Get the labeled tomograms with the segmentations
:param mode: 'mb' membrane, 'lm' lumen
:return: an ndarray with the segmentations labeled
"""
if mode == 'mb':
return self.__lbl_voi_mb
elif mode == 'lm':
return self.__lbl_voi_lm
else:
error_msg = 'Input mode not valid: ' + str(mode)
raise pexceptions.PySegInputError(expr='get_voi (TomoOMSegmentations)', msg=error_msg)
def get_tomo_name(self):
return self.__name
def get_segmentations(self):
return self.__segs
def get_num_segmentations(self):
return len(self.__segs)
def get_tomo_vol(self):
"""
Compute the volume of the whole tomogram (um**3)
:return: a float value
"""
return np.asarray(self.__lbl_voi_mb.shape, dtype=np.float).prod() \
* self.__res * self.__res * self.__res * NM3_TO_UM3
# EXTERNAL FUNCTIONALITY AREA
def delete_segmentation(self, seg_ids):
"""
Remove segmentations from the list
:param seg_ids: integer ids of the segmentations (their position in the current list of segmentations)
:return: None
"""
# Loop for keeping survivors
hold_segs = list()
for i in range(len(self.__segs)):
if not i in seg_ids:
hold_segs.append(self.__segs[i])
# Updating the list of segmentations
self.__segs = hold_segs
def pickle(self, fname):
"""
:param fname: file name ended with .pkl
:return:
"""
pkl_f = open(fname, 'w')
try:
pickle.dump(self, pkl_f)
finally:
pkl_f.close()
def compute_voi_volume(self, mode='mb'):
"""
Compute voi volume in voxels
:param mode: 'mb' membrane, 'lm' lumen, 'mb-lm' membrane and lumen fused
:return: the volume (um**3) for each organelle
"""
if mode == 'mb':
return self.__voi_mb.sum() * self.__res_3 * NM3_TO_UM3
elif mode == 'lm':
return self.__voi_lm.sum() * self.__res_3 * NM3_TO_UM3
elif mode == 'mb-lm':
return (self.__voi_mb.sum() + self.__voi_lm.sum()) * self.__res_3 * NM3_TO_UM3
else:
error_msg = 'Input mode not valid: ' + str(mode)
raise pexceptions.PySegInputError(expr='compute_voi_volume (TomoOMSegmentations)', msg=error_msg)
def compute_seg_volumes(self, mode='mb'):
"""
Compute the volumes for each segmentation
:param mode: 'mb' membrane, 'lm' lumen
:return: an array with the volume (um**3) for each organelle
"""
vols = np.zeros(shape=len(self.__segs), dtype=np.float32)
for i, seg in enumerate(self.__segs):
vols[i] = seg.get_voxels_count(mode) * self.__res * self.__res * self.__res * NM3_TO_UM3
return vols
def compute_om_seg_dsts(self):
"""
Computes the distance among the different oriented membrane segmentations
:return: a 3D array (tomogram segmemntation) where each voxels encodes the distance to the closes membrane
segmentation, background pixels are set to zero.
"""
# Initialization
dsts_field = | np.zeros(shape=self.__lbl_voi_lm.shape, dtype=np.float32) | numpy.zeros |
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import numpy as np
import itertools
from io import BytesIO
from numpy.testing import assert_array_equal, assert_array_almost_equal, dec
# Decorator to skip tests requiring save / load if scipy not available for mat
# files
from ..optpkg import optional_package
_, have_scipy, _ = optional_package('scipy')
scipy_skip = | dec.skipif(not have_scipy, 'scipy not available') | numpy.testing.dec.skipif |
"""
SUH-SPH interpolation comparison
==================================
"""
import numpy as np
from bfieldtools.mesh_conductor import MeshConductor, StreamFunction
from mayavi import mlab
import trimesh
import matplotlib.pyplot as plt
from bfieldtools.sphtools import basis_fields as sphfield
from bfieldtools.sphtools import field as sph_field_eval
from bfieldtools.sphtools import basis_potentials, potential
import mne
from bfieldtools.viz import plot_data_on_vertices, plot_mesh
#%%
SAVE_DIR = "./MNE interpolation/"
#%%
EVOKED = True
with np.load(SAVE_DIR + "mne_data.npz", allow_pickle=True) as data:
p = data["p"]
n = data["n"]
mesh = trimesh.Trimesh(vertices=data["vertices"], faces=data["faces"])
if EVOKED:
evoked = mne.Evoked(SAVE_DIR + "left_auditory-ave.fif")
i0, i1 = evoked.time_as_index(0.08)[0], evoked.time_as_index(0.09)[0]
field = evoked.data[:, i0:i1].mean(axis=1)
else:
# take "data" from lead field matrix, i.e, topography of a single dipole
from mne.datasets import sample
import os
data_path = sample.data_path()
raw_fname = data_path + "/MEG/sample/sample_audvis_raw.fif"
trans = data_path + "/MEG/sample/sample_audvis_raw-trans.fif"
src = data_path + "/subjects/sample/bem/sample-oct-6-src.fif"
bem = data_path + "/subjects/sample/bem/sample-5120-5120-5120-bem-sol.fif"
subjects_dir = os.path.join(data_path, "subjects")
# Note that forward solutions can also be read with read_forward_solution
fwd = mne.make_forward_solution(
raw_fname, trans, src, bem, meg=True, eeg=False, mindist=5.0, n_jobs=2
)
# Take only magnetometers
mags = np.array([n[-1] == "1" for n in fwd["sol"]["row_names"]])
L = fwd["sol"]["data"][mags, :]
# Take the first dipole
field = L[:, 56]
#%% radius for inner/outer sph
R = np.min(np.linalg.norm(p, axis=1)) - 0.02
#%%
lmax = 7 # maximum degree
Bca, Bcb = sphfield(p, lmax, normalization="energy", R=R)
# sph-components at sensors
Bca_sensors = np.einsum("ijk,ij->ik", Bca, n)
Bcb_sensors = np.einsum("ijk,ij->ik", Bcb, n)
#%% Visualize sph components at the helmet
# idx = 20
# evoked1 = evoked.copy()
# evoked1.data[:, :] = np.tile(Bca_sensors[:, idx].T, (evoked.times.shape[0], 1)).T
# evoked1.plot_topomap(times=0.080, ch_type="mag", colorbar=False)
# evoked1 = evoked.copy()
# evoked1.data[:, :] = np.tile(Bcb_sensors[:, idx].T, (evoked.times.shape[0], 1)).T
# evoked1.plot_topomap(times=0.080, ch_type="mag", colorbar=False)
#%% calculate inner sph-coeffients with pinv
PINV = True
if PINV:
alpha = np.linalg.pinv(Bca_sensors, rcond=1e-15) @ field
else:
# Calculate using regularization
ssa = np.linalg.svd(Bca_sensors @ Bca_sensors.T, False, False)
reg_exp = 6
_lambda = np.max(ssa) * (10 ** (-reg_exp))
# angular-Laplacian in the sph basis is diagonal
La = np.diag([l * (l + 1) for l in range(1, lmax + 1) for m in range(-l, l + 1)])
BB = Bca_sensors.T @ Bca_sensors + _lambda * La
alpha = np.linalg.solve(BB, Bca_sensors.T @ field)
# Reconstruct field in helmet
# reco_sph = np.zeros(field.shape)
# i = 0
# for l in range(1, lmax + 1):
# for m in range(-1 * l, l + 1):
# reco_sph += alpha[i] * Bca_sensors[:, i]
# i += 1
# Produces the same result as the loop
reco_sph = Bca_sensors @ alpha
print(
"SPH-reconstruction relative error:",
np.linalg.norm(reco_sph - field) / np.linalg.norm(field),
)
#%%
##%% Fit the surface current for the auditory evoked response using pinv
# c = MeshConductor(mesh_obj=mesh, basis_name="suh", N_suh=35)
# M = c.mass
# B_sensors = np.einsum("ijk,ij->ik", c.B_coupling(p), n)
#
#
# asuh = np.linalg.pinv(B_sensors, rcond=1e-15) @ field
#
# s = StreamFunction(asuh, c)
# b_filt = B_sensors @ s
#%% Suh fit
c = MeshConductor(mesh_obj=mesh, basis_name="suh", N_suh=150)
M = c.mass
B_sensors = np.einsum("ijk,ij->ik", c.B_coupling(p), n)
ss = np.linalg.svd(B_sensors @ B_sensors.T, False, False)
reg_exp = 1
plot_this = True
rel_errors = []
_lambda = np.max(ss) * (10 ** (-reg_exp))
# Laplacian in the suh basis is diagonal
BB = B_sensors.T @ B_sensors + _lambda * (-c.laplacian) / np.max(abs(c.laplacian))
a = np.linalg.solve(BB, B_sensors.T @ field)
s = StreamFunction(a, c)
reco_suh = B_sensors @ s
print(
"SUH-reconstruction relative error:",
np.linalg.norm(reco_suh - field) / np.linalg.norm(field),
)
f = mlab.figure(bgcolor=(1, 1, 1))
surf = s.plot(False, figure=f)
surf.actor.mapper.interpolate_scalars_before_mapping = True
surf.module_manager.scalar_lut_manager.number_of_colors = 16
#%% Plot the evoked and the reconsctructions
# evoked1 = evoked.copy()
# evoked1.data[:, :] = np.tile(field.T, (evoked.times.shape[0], 1)).T
# evoked1.plot_topomap(times=0.080, ch_type="mag")
# evoked1 = evoked.copy()
# evoked1.data[:, :] = np.tile(reco_sph.T, (evoked.times.shape[0], 1)).T
# evoked1.plot_topomap(times=0.080, ch_type="mag")
# evoked1 = evoked.copy()
# evoked1.data[:, :] = np.tile(reco_suh.T, (evoked.times.shape[0], 1)).T
# evoked1.plot_topomap(times=0.080, ch_type="mag")
#%% Plot spectra
fig, ax = plt.subplots(1, 1)
ax.plot(alpha ** 2)
L = np.zeros((0,))
M = np.zeros((0,))
for l in range(1, lmax + 1):
m_l = | np.arange(-l, l + 1, step=1, dtype=np.int_) | numpy.arange |
import sys
import numpy as np
import random
from os.path import join
from seisflows.tools import unix
from seisflows.workflow.inversion import inversion
from scipy.fftpack import fft, fftfreq
from seisflows.tools.array import loadnpy, savenpy
from seisflows.tools.seismic import setpar, setpararray
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
system = sys.modules['seisflows_system']
solver = sys.modules['seisflows_solver']
optimize = sys.modules['seisflows_optimize']
class inversion_se(inversion):
""" Waveform inversion with source encoding
"""
def check(self):
super().check()
# get random source
if 'RANDOM_OVER_IT' not in PAR:
setattr(PAR, 'RANDOM_OVER_IT', 1)
# increase frequency over iterations
if 'FREQ_INCREASE_PER_IT' not in PAR:
setattr(PAR, 'FREQ_INCREASE_PER_IT', 0)
# maximum frequency shift over iterations
if 'MAX_FREQ_SHIFT' not in PAR:
setattr(PAR, 'MAX_FREQ_SHIFT', None)
# number of frequency per event
if 'NFREQ_PER_EVENT' not in PAR:
setattr(PAR, 'NFREQ_PER_EVENT', 1)
# default number of super source
if 'NSRC' not in PAR:
setattr(PAR, 'NSRC', 1)
# number of timesteps after steady state
NTPSS = int(round(1/((PAR.FREQ_MAX-PAR.FREQ_MIN)/PAR.NEVT/PAR.NFREQ_PER_EVENT)/PAR.DT))
if 'NTPSS' in PAR:
assert(PATH.NTPSS == NTPSS)
else:
setattr(PAR, 'NTPSS', NTPSS)
print('Number of timesteps after steady state:', NTPSS)
def setup(self):
super().setup()
unix.mkdir(join(PATH.FUNC, 'residuals'))
unix.mkdir(join(PATH.GRAD, 'residuals'))
def initialize(self):
""" Prepares for next model update iteration
"""
self.write_model(path=PATH.GRAD, suffix='new')
if PAR.RANDOM_OVER_IT or optimize.iter == 1:
self.get_random_frequencies()
print('Generating synthetics')
system.run('solver', 'eval_func',
hosts='all',
path=PATH.GRAD)
self.write_misfit(path=PATH.GRAD, suffix='new')
def clean(self):
super().clean()
unix.mkdir(join(PATH.FUNC, 'residuals'))
unix.mkdir(join(PATH.GRAD, 'residuals'))
def get_random_frequencies(self):
""" Randomly assign a unique frequency for each source
"""
# ref preprocess/ortho.py setup()
ntpss = PAR.NTPSS
dt = PAR.DT
nt = PAR.NT
nrec = PAR.NREC
nevt = PAR.NEVT
nfpe = PAR.NFREQ_PER_EVENT
nsrc = nevt * nfpe
freq_min = float(PAR.FREQ_MIN)
freq_max = float(PAR.FREQ_MAX)
# read data processed py ortho
freq_idx = loadnpy(PATH.ORTHO + '/freq_idx')
freq = loadnpy(PATH.ORTHO + '/freq')
sff_obs = loadnpy(PATH.ORTHO + '/sff_obs')
ft_obs = loadnpy(PATH.ORTHO + '/ft_obs')
nfreq = len(freq_idx)
# ntrace = ft_obs.shape[3]
# declaring arrays
ft_obs_se = np.zeros((nfreq, nrec), dtype=complex) # encoded frequency of observed seismpgram
# frequency processing
# TODO freq_mask
freq_mask_se = np.ones((nfreq, nrec))
freq_shift = (optimize.iter - 1) * PAR.FREQ_INCREASE_PER_IT
if PAR.MAX_FREQ_SHIFT != None:
freq_shift = min(freq_shift, PAR.MAX_FREQ_SHIFT)
# random frequency
freq_range = np.linspace(freq_min + freq_shift, freq_max + freq_shift, nsrc + 1)[:-1]
freq_thresh = (freq_max - freq_min) / nsrc / 20
rdm_idx = random.sample(range(0, nsrc), nsrc) # randomly assign frequencies
freq_rdm = freq_range[rdm_idx]
# assign frequencies
stf_filenames = [None] * nsrc
for ifpe in range(nfpe):
for ievt in range(nevt):
isrc = ifpe * nevt + ievt # index of sourrce
f0 = freq_rdm[isrc] # central frequency of source
# get sinus source time function
T = 2 * np.pi * dt * | np.linspace(0, nt - 1, nt) | numpy.linspace |
import numpy as np
def check_uv(u, v):
"""
Returns weightings for frequencies u and v
for anisotropic surfaces
"""
if abs(u) + abs(v) == 0:
return 4.
elif u * v == 0:
return 2.
return 1.
vcheck = np.vectorize(check_uv)
def wave_function(x, u, Lx):
"""
Wave in Fourier sum
"""
coeff = 2 * np.pi / Lx
if u >= 0:
return np.cos(coeff * u * x)
return np.sin(coeff * abs(u) * x)
def d_wave_function(x, u, Lx):
"""
First derivative of wave in Fourier sum wrt x
"""
coeff = 2 * np.pi / Lx
if u >= 0:
return - coeff * u * np.sin(coeff * u * x)
return coeff * abs(u) * np.cos(coeff * abs(u) * x)
def dd_wave_function(x, u, Lx):
"""
Second derivative of wave in Fouier sum wrt x
"""
coeff = 2 * np.pi / Lx
return - coeff ** 2 * u ** 2 * wave_function(x, u, Lx)
def cos_sin_indices(u_array):
"""Return indices of wave function arrays for
both cos and sin functions"""
cos_indices = np.argwhere(u_array >= 0)
sin_indices = np.argwhere(u_array < 0)
return cos_indices, sin_indices
def wave_function_array(x, u_array, Lx):
"""
Returns numpy array of all waves in Fourier sum
"""
coeff = 2 * np.pi / Lx
q = coeff * np.abs(u_array) * x
cos_indices, sin_indices = cos_sin_indices(u_array)
f_array = np.zeros(u_array.shape)
f_array[cos_indices] += np.cos(q[cos_indices])
f_array[sin_indices] += np.sin(q[sin_indices])
return f_array
def d_wave_function_array(x, u_array, Lx):
"""
d_wave_function_array(x, u_array, Lx)
Returns numpy array of all derivatives of waves
in Fourier sum
"""
coeff = 2 * np.pi / Lx
q = coeff * np.abs(u_array) * x
cos_indices, sin_indices = cos_sin_indices(u_array)
f_array = np.zeros(u_array.shape)
f_array[cos_indices] -= np.sin(q[cos_indices])
f_array[sin_indices] += np.cos(q[sin_indices])
f_array *= coeff * np.abs(u_array)
return f_array
def dd_wave_function_array(x, u_array, Lx):
"""Returns numpy array of all second derivatives
of waves in Fourier sum"""
coeff = 2 * np.pi / Lx
f_array = wave_function_array(x, u_array, Lx)
return - coeff ** 2 * u_array ** 2 * f_array
def wave_arrays(qm):
"""Return full arrays of each (u, v) 2D wave frequency
combination for a given maximum frequency, `qm`"""
v_mat, u_mat = np.meshgrid(
np.arange(-qm, qm + 1),
np.arange(-qm, qm + 1)
)
u_array = u_mat.flatten()
v_array = v_mat.flatten()
return u_array, v_array
def wave_indices(qu, u_array, v_array):
"""Return indices of both u_array and v_array that contain
waves resulting from truncation of `qu` upper bound
frequency"""
wave_mask = (
(u_array >= -qu) * (u_array <= qu)
* (v_array >= -qu) * (v_array <= qu)
)
indices = | np.argwhere(wave_mask) | numpy.argwhere |
"""Copyright 2020 Huawei Technologies Co., Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import numpy as np
import copy
import math
class WHENet(object):
"""WHENet"""
def __init__(self, camera_width, camera_height, whenet_model):
self.whenet = whenet_model
self.camera_height = camera_height
self.camera_width = camera_width
def inference(self, nparryList, box_width, box_height):
"""
WHENet preprocessing, inference and postprocessing
Args:
nparryList: result from YOLO V3, which is detected head area
box_width: width of the detected area
box_height: height of the detected area
Returns:
return yaw pitch roll value values in numpy format
"""
resultList_whenet = self.whenet.execute([nparryList])
# postprocessing: convert model output to yaw pitch roll value
yaw, pitch, roll = self.whenet_angle(resultList_whenet)
print('Yaw, pitch, roll angles: ', yaw, pitch, roll)
# obtain coordinate points from head pose angles for plotting
return self.whenet_draw(yaw, pitch, roll,
tdx=box_width, tdy=box_height, size=200)
def softmax(self, x):
"""softmax"""
x -= np.max(x, axis=1, keepdims=True)
a = np.exp(x)
b = np.sum(np.exp(x), axis=1, keepdims=True)
return a / b
def whenet_draw(self, yaw, pitch, roll, tdx=None, tdy=None, size=200):
"""
Plot lines based on yaw pitch roll values
Args:
yaw, pitch, roll: values of angles
tdx, tdy: center of detected head area
Returns:
graph: locations of three lines
"""
# taken from hopenet
pitch = pitch * np.pi / 180
yaw = -(yaw * np.pi / 180)
roll = roll * np.pi / 180
tdx = tdx
tdy = tdy
# X-Axis pointing to right. drawn in red
x1 = size * (math.cos(yaw) * math.cos(roll)) + tdx
y1 = size * (math.cos(pitch) * math.sin(roll) + math.cos(roll)
* math.sin(pitch) * math.sin(yaw)) + tdy
# Y-Axis | drawn in green
x2 = size * (-math.cos(yaw) * math.sin(roll)) + tdx
y2 = size * (math.cos(pitch) * math.cos(roll) - math.sin(pitch)
* math.sin(yaw) * math.sin(roll)) + tdy
# Z-Axis (out of the screen) drawn in blue
x3 = size * (math.sin(yaw)) + tdx
y3 = size * (-math.cos(yaw) * math.sin(pitch)) + tdy
return {
"yaw_x": x1,
"yaw_y": y1,
"pitch_x": x2,
"pitch_y": y2,
"roll_x": x3,
"roll_y": y3
}
def whenet_angle(self, resultList_whenet):
"""
Obtain yaw pitch roll value in degree based on the output of model
Args:
resultList_whenet: result of WHENet
Returns:
yaw_predicted, pitch_predicted, roll_predicted: yaw pitch roll values
"""
yaw = resultList_whenet[0]
yaw = | np.reshape(yaw, (1, 120, 1, 1)) | numpy.reshape |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = IntervalIndex.from_arrays([0, 1], [1, 2])
self.index_with_nan = IntervalIndex.from_tuples(
[(0, 1), np.nan, (1, 2)])
self.indices = dict(intervalIndex=tm.makeIntervalIndex(10))
def create_index(self, closed='right'):
return IntervalIndex.from_breaks(range(11), closed=closed)
def create_index_with_nan(self, closed='right'):
mask = [True, False] + [True] * 8
return IntervalIndex.from_arrays(
np.where(mask, np.arange(10), np.nan),
np.where(mask, np.arange(1, 11), np.nan), closed=closed)
def test_constructors(self, closed, name):
left, right = Index([0, 1, 2, 3]), Index([1, 2, 3, 4])
ivs = [Interval(l, r, closed=closed) for l, r in lzip(left, right)]
expected = IntervalIndex._simple_new(
left=left, right=right, closed=closed, name=name)
result = IntervalIndex(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(ivs, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_arrays(
left.values, right.values, closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
lzip(left, right), closed=closed, name=name)
tm.assert_index_equal(result, expected)
result = Index(ivs, name=name)
assert isinstance(result, IntervalIndex)
tm.assert_index_equal(result, expected)
# idempotent
tm.assert_index_equal(Index(expected), expected)
tm.assert_index_equal(IntervalIndex(expected), expected)
result = IntervalIndex.from_intervals(expected)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_intervals(
expected.values, name=expected.name)
tm.assert_index_equal(result, expected)
left, right = expected.left, expected.right
result = IntervalIndex.from_arrays(
left, right, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
result = IntervalIndex.from_tuples(
expected.to_tuples(), closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
breaks = expected.left.tolist() + [expected.right[-1]]
result = IntervalIndex.from_breaks(
breaks, closed=expected.closed, name=expected.name)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [[np.nan], [np.nan] * 2, [np.nan] * 50])
def test_constructors_nan(self, closed, data):
# GH 18421
expected_values = np.array(data, dtype=object)
expected_idx = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_idx.closed == closed
tm.assert_numpy_array_equal(expected_idx.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks([np.nan] + data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_idx)
tm.assert_numpy_array_equal(result.values, expected_values)
@pytest.mark.parametrize('data', [
[],
np.array([], dtype='int64'),
np.array([], dtype='float64'),
np.array([], dtype=object)])
def test_constructors_empty(self, data, closed):
# GH 18421
expected_dtype = data.dtype if isinstance(data, np.ndarray) else object
expected_values = np.array([], dtype=object)
expected_index = IntervalIndex(data, closed=closed)
# validate the expected index
assert expected_index.empty
assert expected_index.closed == closed
assert expected_index.dtype.subtype == expected_dtype
tm.assert_numpy_array_equal(expected_index.values, expected_values)
result = IntervalIndex.from_tuples(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_breaks(data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
result = IntervalIndex.from_arrays(data, data, closed=closed)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
if closed == 'right':
# Can't specify closed for IntervalIndex.from_intervals
result = IntervalIndex.from_intervals(data)
tm.assert_index_equal(result, expected_index)
tm.assert_numpy_array_equal(result.values, expected_values)
def test_constructors_errors(self):
# scalar
msg = ('IntervalIndex\(...\) must be called with a collection of '
'some kind, 5 was passed')
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex(5)
# not an interval
msg = ("type <(class|type) 'numpy.int64'> with value 0 "
"is not an interval")
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex([0, 1])
with tm.assert_raises_regex(TypeError, msg):
IntervalIndex.from_intervals([0, 1])
# invalid closed
msg = "invalid options for 'closed': invalid"
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays([0, 1], [1, 2], closed='invalid')
# mismatched closed within intervals
msg = 'intervals must all be closed on the same side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_intervals([Interval(0, 1),
Interval(1, 2, closed='left')])
with tm.assert_raises_regex(ValueError, msg):
Index([Interval(0, 1), Interval(2, 3, closed='left')])
# mismatched closed inferred from intervals vs constructor.
msg = 'conflicting values for closed'
with tm.assert_raises_regex(ValueError, msg):
iv = [Interval(0, 1, closed='both'), Interval(1, 2, closed='both')]
IntervalIndex(iv, closed='neither')
# no point in nesting periods in an IntervalIndex
msg = 'Period dtypes are not supported, use a PeriodIndex instead'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(
pd.period_range('2000-01-01', periods=3))
# decreasing breaks/arrays
msg = 'left side of interval must be <= right side'
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_breaks(range(10, -1, -1))
with tm.assert_raises_regex(ValueError, msg):
IntervalIndex.from_arrays(range(10, -1, -1), range(9, -2, -1))
def test_constructors_datetimelike(self, closed):
# DTI / TDI
for idx in [pd.date_range('20130101', periods=5),
pd.timedelta_range('1 day', periods=5)]:
result = IntervalIndex.from_breaks(idx, closed=closed)
expected = IntervalIndex.from_breaks(idx.values, closed=closed)
tm.assert_index_equal(result, expected)
expected_scalar_type = type(idx[0])
i = result[0]
assert isinstance(i.left, expected_scalar_type)
assert isinstance(i.right, expected_scalar_type)
def test_constructors_error(self):
# non-intervals
def f():
IntervalIndex.from_intervals([0.997, 4.0])
pytest.raises(TypeError, f)
def test_properties(self, closed):
index = self.create_index(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
tm.assert_index_equal(index.left, Index(np.arange(10)))
tm.assert_index_equal(index.right, Index(np.arange(1, 11)))
tm.assert_index_equal(index.mid, Index(np.arange(0.5, 10.5)))
assert index.closed == closed
ivs = [Interval(l, r, closed) for l, r in zip(range(10), range(1, 11))]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
# with nans
index = self.create_index_with_nan(closed=closed)
assert len(index) == 10
assert index.size == 10
assert index.shape == (10, )
expected_left = Index([0, np.nan, 2, 3, 4, 5, 6, 7, 8, 9])
expected_right = expected_left + 1
expected_mid = expected_left + 0.5
tm.assert_index_equal(index.left, expected_left)
tm.assert_index_equal(index.right, expected_right)
tm.assert_index_equal(index.mid, expected_mid)
assert index.closed == closed
ivs = [Interval(l, r, closed) if notna(l) else np.nan
for l, r in zip(expected_left, expected_right)]
expected = np.array(ivs, dtype=object)
tm.assert_numpy_array_equal(np.asarray(index), expected)
tm.assert_numpy_array_equal(index.values, expected)
def test_with_nans(self, closed):
index = self.create_index(closed=closed)
assert not index.hasnans
result = index.isna()
expected = np.repeat(False, len(index))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.repeat(True, len(index))
tm.assert_numpy_array_equal(result, expected)
index = self.create_index_with_nan(closed=closed)
assert index.hasnans
result = index.isna()
expected = np.array([False, True] + [False] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
result = index.notna()
expected = np.array([True, False] + [True] * (len(index) - 2))
tm.assert_numpy_array_equal(result, expected)
def test_copy(self, closed):
expected = self.create_index(closed=closed)
result = expected.copy()
assert result.equals(expected)
result = expected.copy(deep=True)
assert result.equals(expected)
assert result.left is not expected.left
def test_ensure_copied_data(self, closed):
# exercise the copy flag in the constructor
# not copying
index = self.create_index(closed=closed)
result = IntervalIndex(index, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='same')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='same')
# by-definition make a copy
result = IntervalIndex.from_intervals(index.values, copy=False)
tm.assert_numpy_array_equal(index.left.values, result.left.values,
check_same='copy')
tm.assert_numpy_array_equal(index.right.values, result.right.values,
check_same='copy')
def test_equals(self, closed):
expected = IntervalIndex.from_breaks(np.arange(5), closed=closed)
assert expected.equals(expected)
assert expected.equals(expected.copy())
assert not expected.equals(expected.astype(object))
assert not expected.equals(np.array(expected))
assert not expected.equals(list(expected))
assert not expected.equals([1, 2])
assert not expected.equals(np.array([1, 2]))
assert not expected.equals(pd.date_range('20130101', periods=2))
expected_name1 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='foo')
expected_name2 = IntervalIndex.from_breaks(
np.arange(5), closed=closed, name='bar')
assert expected.equals(expected_name1)
assert expected_name1.equals(expected_name2)
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
expected_other_closed = IntervalIndex.from_breaks(
np.arange(5), closed=other_closed)
assert not expected.equals(expected_other_closed)
def test_astype(self, closed):
idx = self.create_index(closed=closed)
for dtype in [np.int64, np.float64, 'datetime64[ns]',
'datetime64[ns, US/Eastern]', 'timedelta64',
'period[M]']:
pytest.raises(ValueError, idx.astype, dtype)
result = idx.astype(object)
tm.assert_index_equal(result, Index(idx.values, dtype='object'))
assert not idx.equals(result)
assert idx.equals(IntervalIndex.from_intervals(result))
result = idx.astype('interval')
tm.assert_index_equal(result, idx)
assert result.equals(idx)
result = idx.astype('category')
expected = pd.Categorical(idx, ordered=True)
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize('klass', [list, tuple, np.array, pd.Series])
def test_where(self, closed, klass):
idx = self.create_index(closed=closed)
cond = [True] * len(idx)
expected = idx
result = expected.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * len(idx[1:])
expected = IntervalIndex([np.nan] + idx[1:].tolist())
result = idx.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_delete(self, closed):
expected = IntervalIndex.from_breaks(np.arange(1, 11), closed=closed)
result = self.create_index(closed=closed).delete(0)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('data', [
interval_range(0, periods=10, closed='neither'),
interval_range(1.7, periods=8, freq=2.5, closed='both'),
interval_range(Timestamp('20170101'), periods=12, closed='left'),
interval_range(Timedelta('1 day'), periods=6, closed='right'),
IntervalIndex.from_tuples([('a', 'd'), ('e', 'j'), ('w', 'z')]),
IntervalIndex.from_tuples([(1, 2), ('a', 'z'), (3.14, 6.28)])])
def test_insert(self, data):
item = data[0]
idx_item = IntervalIndex([item])
# start
expected = idx_item.append(data)
result = data.insert(0, item)
tm.assert_index_equal(result, expected)
# end
expected = data.append(idx_item)
result = data.insert(len(data), item)
tm.assert_index_equal(result, expected)
# mid
expected = data[:3].append(idx_item).append(data[3:])
result = data.insert(3, item)
tm.assert_index_equal(result, expected)
# invalid type
msg = 'can only insert Interval objects and NA into an IntervalIndex'
with tm.assert_raises_regex(ValueError, msg):
data.insert(1, 'foo')
# invalid closed
msg = 'inserted item must be closed on the same side as the index'
for closed in {'left', 'right', 'both', 'neither'} - {item.closed}:
with tm.assert_raises_regex(ValueError, msg):
bad_item = Interval(item.left, item.right, closed=closed)
data.insert(1, bad_item)
# GH 18295 (test missing)
na_idx = IntervalIndex([np.nan], closed=data.closed)
for na in (np.nan, pd.NaT, None):
expected = data[:1].append(na_idx).append(data[1:])
result = data.insert(1, na)
tm.assert_index_equal(result, expected)
def test_take(self, closed):
index = self.create_index(closed=closed)
result = index.take(range(10))
tm.assert_index_equal(result, index)
result = index.take([0, 0, 1])
expected = IntervalIndex.from_arrays(
[0, 0, 1], [1, 1, 2], closed=closed)
tm.assert_index_equal(result, expected)
def test_unique(self, closed):
# unique non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_unique
# unique overlapping - distinct endpoints
idx = IntervalIndex.from_tuples([(0, 1), (0.5, 1.5)], closed=closed)
assert idx.is_unique
# unique overlapping - shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_unique
# unique nested
idx = IntervalIndex.from_tuples([(-1, 1), (-2, 2)], closed=closed)
assert idx.is_unique
# duplicate
idx = IntervalIndex.from_tuples(
[(0, 1), (0, 1), (2, 3)], closed=closed)
assert not idx.is_unique
# unique mixed
idx = IntervalIndex.from_tuples([(0, 1), ('a', 'b')], closed=closed)
assert idx.is_unique
# duplicate mixed
idx = IntervalIndex.from_tuples(
[(0, 1), ('a', 'b'), (0, 1)], closed=closed)
assert not idx.is_unique
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_unique
def test_monotonic(self, closed):
# increasing non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (2, 3), (4, 5)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing non-overlapping
idx = IntervalIndex.from_tuples(
[(4, 5), (2, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered non-overlapping
idx = IntervalIndex.from_tuples(
[(0, 1), (4, 5), (2, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping
idx = IntervalIndex.from_tuples(
[(0, 2), (0.5, 2.5), (1, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping
idx = IntervalIndex.from_tuples(
[(1, 3), (0.5, 2.5), (0, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# unordered overlapping
idx = IntervalIndex.from_tuples(
[(0.5, 2.5), (0, 2), (1, 3)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# increasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(1, 2), (1, 3), (2, 3)], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert not idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# decreasing overlapping shared endpoints
idx = pd.IntervalIndex.from_tuples(
[(2, 3), (1, 3), (1, 2)], closed=closed)
assert not idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
# stationary
idx = IntervalIndex.from_tuples([(0, 1), (0, 1)], closed=closed)
assert idx.is_monotonic
assert not idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert not idx._is_strictly_monotonic_decreasing
# empty
idx = IntervalIndex([], closed=closed)
assert idx.is_monotonic
assert idx._is_strictly_monotonic_increasing
assert idx.is_monotonic_decreasing
assert idx._is_strictly_monotonic_decreasing
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr(self):
i = IntervalIndex.from_tuples([(0, 1), (1, 2)], closed='right')
expected = ("IntervalIndex(left=[0, 1],"
"\n right=[1, 2],"
"\n closed='right',"
"\n dtype='interval[int64]')")
assert repr(i) == expected
i = IntervalIndex.from_tuples((Timestamp('20130101'),
Timestamp('20130102')),
(Timestamp('20130102'),
Timestamp('20130103')),
closed='right')
expected = ("IntervalIndex(left=['2013-01-01', '2013-01-02'],"
"\n right=['2013-01-02', '2013-01-03'],"
"\n closed='right',"
"\n dtype='interval[datetime64[ns]]')")
assert repr(i) == expected
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_max_seq_item_setting(self):
super(TestIntervalIndex, self).test_repr_max_seq_item_setting()
@pytest.mark.xfail(reason='not a valid repr as we use interval notation')
def test_repr_roundtrip(self):
super(TestIntervalIndex, self).test_repr_roundtrip()
def test_get_item(self, closed):
i = IntervalIndex.from_arrays((0, 1, np.nan), (1, 2, np.nan),
closed=closed)
assert i[0] == Interval(0.0, 1.0, closed=closed)
assert i[1] == Interval(1.0, 2.0, closed=closed)
assert isna(i[2])
result = i[0:1]
expected = IntervalIndex.from_arrays((0.,), (1.,), closed=closed)
tm.assert_index_equal(result, expected)
result = i[0:2]
expected = IntervalIndex.from_arrays((0., 1), (1., 2.), closed=closed)
tm.assert_index_equal(result, expected)
result = i[1:3]
expected = IntervalIndex.from_arrays((1., np.nan), (2., np.nan),
closed=closed)
tm.assert_index_equal(result, expected)
def test_get_loc_value(self):
pytest.raises(KeyError, self.index.get_loc, 0)
assert self.index.get_loc(0.5) == 0
assert self.index.get_loc(1) == 0
assert self.index.get_loc(1.5) == 1
assert self.index.get_loc(2) == 1
pytest.raises(KeyError, self.index.get_loc, -1)
pytest.raises(KeyError, self.index.get_loc, 3)
idx = IntervalIndex.from_tuples([(0, 2), (1, 3)])
assert idx.get_loc(0.5) == 0
assert idx.get_loc(1) == 0
tm.assert_numpy_array_equal(idx.get_loc(1.5),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(idx.get_loc(2)),
np.array([0, 1], dtype='int64'))
assert idx.get_loc(3) == 1
pytest.raises(KeyError, idx.get_loc, 3.5)
idx = IntervalIndex.from_arrays([0, 2], [1, 3])
pytest.raises(KeyError, idx.get_loc, 1.5)
def slice_locs_cases(self, breaks):
# TODO: same tests for more index types
index = IntervalIndex.from_breaks([0, 1, 2], closed='right')
assert index.slice_locs() == (0, 2)
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(0, 0.5) == (0, 1)
assert index.slice_locs(start=1) == (0, 2)
assert index.slice_locs(start=1.2) == (1, 2)
assert index.slice_locs(end=1) == (0, 1)
assert index.slice_locs(end=1.1) == (0, 2)
assert index.slice_locs(end=1.0) == (0, 1)
assert index.slice_locs(-1, -1) == (0, 0)
index = IntervalIndex.from_breaks([0, 1, 2], closed='neither')
assert index.slice_locs(0, 1) == (0, 1)
assert index.slice_locs(0, 2) == (0, 2)
assert index.slice_locs(0.5, 1.5) == (0, 2)
assert index.slice_locs(1, 1) == (1, 1)
assert index.slice_locs(1, 2) == (1, 2)
index = IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)],
closed='both')
assert index.slice_locs(1, 1) == (0, 1)
assert index.slice_locs(1, 2) == (0, 2)
def test_slice_locs_int64(self):
self.slice_locs_cases([0, 1, 2])
def test_slice_locs_float64(self):
self.slice_locs_cases([0.0, 1.0, 2.0])
def slice_locs_decreasing_cases(self, tuples):
index = IntervalIndex.from_tuples(tuples)
assert index.slice_locs(1.5, 0.5) == (1, 3)
assert index.slice_locs(2, 0) == (1, 3)
assert index.slice_locs(2, 1) == (1, 3)
assert index.slice_locs(3, 1.1) == (0, 3)
assert index.slice_locs(3, 3) == (0, 2)
assert index.slice_locs(3.5, 3.3) == (0, 1)
assert index.slice_locs(1, -3) == (2, 3)
slice_locs = index.slice_locs(-1, -1)
assert slice_locs[0] == slice_locs[1]
def test_slice_locs_decreasing_int64(self):
self.slice_locs_cases([(2, 4), (1, 3), (0, 2)])
def test_slice_locs_decreasing_float64(self):
self.slice_locs_cases([(2., 4.), (1., 3.), (0., 2.)])
def test_slice_locs_fails(self):
index = IntervalIndex.from_tuples([(1, 2), (0, 1), (2, 3)])
with pytest.raises(KeyError):
index.slice_locs(1, 2)
def test_get_loc_interval(self):
assert self.index.get_loc(Interval(0, 1)) == 0
assert self.index.get_loc(Interval(0, 0.5)) == 0
assert self.index.get_loc(Interval(0, 1, 'left')) == 0
pytest.raises(KeyError, self.index.get_loc, Interval(2, 3))
pytest.raises(KeyError, self.index.get_loc,
Interval(-1, 0, 'left'))
def test_get_indexer(self):
actual = self.index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(self.index)
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
index = IntervalIndex.from_breaks([0, 1, 2], closed='left')
actual = index.get_indexer([-1, 0, 0.5, 1, 1.5, 2, 3])
expected = np.array([-1, 0, 0, 1, 1, -1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index[:1])
expected = np.array([0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(index)
expected = np.array([-1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_subintervals(self):
# TODO: is this right?
# return indexers for wholly contained subintervals
target = IntervalIndex.from_breaks(np.linspace(0, 2, 5))
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='p')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.67, 1.33, 2])
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index.get_indexer(target[[0, -1]])
expected = np.array([0, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
target = IntervalIndex.from_breaks([0, 0.33, 0.67, 1], closed='left')
actual = self.index.get_indexer(target)
expected = np.array([0, 0, 0], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_contains(self):
# Only endpoints are valid.
i = IntervalIndex.from_arrays([0, 1], [1, 2])
# Invalid
assert 0 not in i
assert 1 not in i
assert 2 not in i
# Valid
assert Interval(0, 1) in i
assert Interval(0, 2) in i
assert Interval(0, 0.5) in i
assert Interval(3, 5) not in i
assert Interval(-1, 0, closed='left') not in i
def testcontains(self):
# can select values that are IN the range of a value
i = IntervalIndex.from_arrays([0, 1], [1, 2])
assert i.contains(0.1)
assert i.contains(0.5)
assert i.contains(1)
assert i.contains(Interval(0, 1))
assert i.contains(Interval(0, 2))
# these overlaps completely
assert i.contains(Interval(0, 3))
assert i.contains(Interval(1, 3))
assert not i.contains(20)
assert not i.contains(-20)
def test_dropna(self, closed):
expected = IntervalIndex.from_tuples(
[(0.0, 1.0), (1.0, 2.0)], closed=closed)
ii = IntervalIndex.from_tuples([(0, 1), (1, 2), np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
ii = IntervalIndex.from_arrays(
[0, 1, np.nan], [1, 2, np.nan], closed=closed)
result = ii.dropna()
tm.assert_index_equal(result, expected)
def test_non_contiguous(self, closed):
index = IntervalIndex.from_tuples([(0, 1), (2, 3)], closed=closed)
target = [0.5, 1.5, 2.5]
actual = index.get_indexer(target)
expected = np.array([0, -1, 1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
assert 1.5 not in index
def test_union(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(13), closed=closed)
result = index.union(other)
tm.assert_index_equal(result, expected)
result = other.union(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.union(index), index)
tm.assert_index_equal(index.union(index[:1]), index)
def test_intersection(self, closed):
index = self.create_index(closed=closed)
other = IntervalIndex.from_breaks(range(5, 13), closed=closed)
expected = IntervalIndex.from_breaks(range(5, 11), closed=closed)
result = index.intersection(other)
tm.assert_index_equal(result, expected)
result = other.intersection(index)
tm.assert_index_equal(result, expected)
tm.assert_index_equal(index.intersection(index), index)
def test_difference(self, closed):
index = self.create_index(closed=closed)
tm.assert_index_equal(index.difference(index[:1]), index[1:])
def test_symmetric_difference(self, closed):
idx = self.create_index(closed=closed)
result = idx[1:].symmetric_difference(idx[:-1])
expected = IntervalIndex([idx[0], idx[-1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('op_name', [
'union', 'intersection', 'difference', 'symmetric_difference'])
def test_set_operation_errors(self, closed, op_name):
index = self.create_index(closed=closed)
set_op = getattr(index, op_name)
# test errors
msg = ('can only do set operations between two IntervalIndex objects '
'that are closed on the same side')
with tm.assert_raises_regex(ValueError, msg):
set_op(Index([1, 2, 3]))
for other_closed in {'right', 'left', 'both', 'neither'} - {closed}:
other = self.create_index(closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
set_op(other)
def test_isin(self, closed):
index = self.create_index(closed=closed)
expected = np.array([True] + [False] * (len(index) - 1))
result = index.isin(index[:1])
tm.assert_numpy_array_equal(result, expected)
result = index.isin([index[0]])
tm.assert_numpy_array_equal(result, expected)
other = IntervalIndex.from_breaks(np.arange(-2, 10), closed=closed)
expected = np.array([True] * (len(index) - 1) + [False])
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
for other_closed in {'right', 'left', 'both', 'neither'}:
other = self.create_index(closed=other_closed)
expected = np.repeat(closed == other_closed, len(index))
result = index.isin(other)
tm.assert_numpy_array_equal(result, expected)
result = index.isin(other.tolist())
tm.assert_numpy_array_equal(result, expected)
def test_comparison(self):
actual = Interval(0, 1) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = Interval(0.5, 1.5) < self.index
expected = np.array([False, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > Interval(0.5, 1.5)
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index <= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index >= self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index < self.index
expected = np.array([False, False])
tm.assert_numpy_array_equal(actual, expected)
actual = self.index > self.index
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == IntervalIndex.from_breaks([0, 1, 2], 'left')
tm.assert_numpy_array_equal(actual, expected)
actual = self.index == self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index.values == self.index
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index <= self.index.values
tm.assert_numpy_array_equal(actual, np.array([True, True]))
actual = self.index != self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index > self.index.values
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index.values > self.index
tm.assert_numpy_array_equal(actual, np.array([False, False]))
# invalid comparisons
actual = self.index == 0
tm.assert_numpy_array_equal(actual, np.array([False, False]))
actual = self.index == self.index.left
tm.assert_numpy_array_equal(actual, np.array([False, False]))
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index > 0
with tm.assert_raises_regex(TypeError, 'unorderable types'):
self.index <= 0
with pytest.raises(TypeError):
self.index > np.arange(2)
with pytest.raises(ValueError):
self.index > np.arange(3)
def test_missing_values(self, closed):
idx = Index([np.nan, Interval(0, 1, closed=closed),
Interval(1, 2, closed=closed)])
idx2 = IntervalIndex.from_arrays(
[np.nan, 0, 1], [np.nan, 1, 2], closed=closed)
assert idx.equals(idx2)
with pytest.raises(ValueError):
IntervalIndex.from_arrays(
[np.nan, 0, 1], np.array([0, 1, 2]), closed=closed)
tm.assert_numpy_array_equal(isna(idx),
np.array([True, False, False]))
def test_sort_values(self, closed):
index = self.create_index(closed=closed)
result = index.sort_values()
tm.assert_index_equal(result, index)
result = index.sort_values(ascending=False)
tm.assert_index_equal(result, index[::-1])
# with nan
index = IntervalIndex([Interval(1, 2), np.nan, Interval(0, 1)])
result = index.sort_values()
expected = IntervalIndex([Interval(0, 1), Interval(1, 2), np.nan])
tm.assert_index_equal(result, expected)
result = index.sort_values(ascending=False)
expected = IntervalIndex([np.nan, Interval(1, 2), Interval(0, 1)])
tm.assert_index_equal(result, expected)
def test_datetime(self):
dates = date_range('2000', periods=3)
idx = IntervalIndex.from_breaks(dates)
tm.assert_index_equal(idx.left, dates[:2])
tm.assert_index_equal(idx.right, dates[-2:])
expected = date_range('2000-01-01T12:00', periods=2)
tm.assert_index_equal(idx.mid, expected)
assert Timestamp('2000-01-01T12') not in idx
assert Timestamp('2000-01-01T12') not in idx
target = date_range('1999-12-31T12:00', periods=7, freq='12H')
actual = idx.get_indexer(target)
expected = np.array([-1, -1, 0, 0, 1, 1, -1], dtype='intp')
tm.assert_numpy_array_equal(actual, expected)
def test_append(self, closed):
index1 = IntervalIndex.from_arrays([0, 1], [1, 2], closed=closed)
index2 = IntervalIndex.from_arrays([1, 2], [2, 3], closed=closed)
result = index1.append(index2)
expected = IntervalIndex.from_arrays(
[0, 1, 1, 2], [1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
result = index1.append([index1, index2])
expected = IntervalIndex.from_arrays(
[0, 1, 0, 1, 1, 2], [1, 2, 1, 2, 2, 3], closed=closed)
tm.assert_index_equal(result, expected)
msg = ('can only append two IntervalIndex objects that are closed '
'on the same side')
for other_closed in {'left', 'right', 'both', 'neither'} - {closed}:
index_other_closed = IntervalIndex.from_arrays(
[0, 1], [1, 2], closed=other_closed)
with tm.assert_raises_regex(ValueError, msg):
index1.append(index_other_closed)
def test_is_non_overlapping_monotonic(self, closed):
# Should be True in all cases
tpls = [(0, 1), (2, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is True
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is True
# Should be False in all cases (overlapping)
tpls = [(0, 2), (1, 3), (4, 5), (6, 7)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False in all cases (non-monotonic)
tpls = [(0, 1), (2, 3), (6, 7), (4, 5)]
idx = IntervalIndex.from_tuples(tpls, closed=closed)
assert idx.is_non_overlapping_monotonic is False
idx = IntervalIndex.from_tuples(tpls[::-1], closed=closed)
assert idx.is_non_overlapping_monotonic is False
# Should be False for closed='both', overwise True (GH16560)
if closed == 'both':
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is False
else:
idx = IntervalIndex.from_breaks(range(4), closed=closed)
assert idx.is_non_overlapping_monotonic is True
class TestIntervalRange(object):
def test_construction_from_numeric(self, closed, name):
# combinations of start/end/periods without freq
expected = IntervalIndex.from_breaks(
np.arange(0, 6), name=name, closed=closed)
result = interval_range(start=0, end=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=5, periods=5, name=name, closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with freq
expected = IntervalIndex.from_tuples([(0, 2), (2, 4), (4, 6)],
name=name, closed=closed)
result = interval_range(start=0, end=6, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=0, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=6, periods=3, freq=2, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
expected = IntervalIndex.from_tuples([(0.0, 1.5), (1.5, 3.0)],
name=name, closed=closed)
result = interval_range(start=0, end=4, freq=1.5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timestamp(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-06')
breaks = date_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-07')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2017-01-08')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with non-fixed freq
freq = 'M'
start, end = Timestamp('2017-01-01'), Timestamp('2017-12-31')
breaks = date_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=11, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timestamp('2018-01-15')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_construction_from_timedelta(self, closed, name):
# combinations of start/end/periods without freq
start, end = Timedelta('1 day'), Timedelta('6 days')
breaks = timedelta_range(start=start, end=end)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=5, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# combinations of start/end/periods with fixed freq
freq = '2D'
start, end = Timedelta('1 day'), Timedelta('7 days')
breaks = timedelta_range(start=start, end=end, freq=freq)
expected = IntervalIndex.from_breaks(breaks, name=name, closed=closed)
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(start=start, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
result = interval_range(end=end, periods=3, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
# output truncates early if freq causes end to be skipped.
end = Timedelta('7 days 1 hour')
result = interval_range(start=start, end=end, freq=freq, name=name,
closed=closed)
tm.assert_index_equal(result, expected)
def test_constructor_coverage(self):
# float value for periods
expected = pd.interval_range(start=0, periods=10)
result = pd.interval_range(start=0, periods=10.5)
tm.assert_index_equal(result, expected)
# equivalent timestamp-like start/end
start, end = Timestamp('2017-01-01'), Timestamp('2017-01-15')
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pydatetime(),
end=end.to_pydatetime())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timestamp
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1),
DateOffset(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
# equivalent timedelta-like start/end
start, end = Timedelta(days=1), Timedelta(days=10)
expected = pd.interval_range(start=start, end=end)
result = pd.interval_range(start=start.to_pytimedelta(),
end=end.to_pytimedelta())
tm.assert_index_equal(result, expected)
result = pd.interval_range(start=start.asm8, end=end.asm8)
tm.assert_index_equal(result, expected)
# equivalent freq with timedelta
equiv_freq = ['D', Day(), Timedelta(days=1), timedelta(days=1)]
for freq in equiv_freq:
result = pd.interval_range(start=start, end=end, freq=freq)
tm.assert_index_equal(result, expected)
def test_errors(self):
# not enough params
msg = ('Of the three parameters: start, end, and periods, '
'exactly two must be specified')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0)
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=5)
with tm.assert_raises_regex(ValueError, msg):
interval_range(periods=2)
with tm.assert_raises_regex(ValueError, msg):
interval_range()
# too many params
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=5, periods=6)
# mixed units
msg = 'start, end, freq need to be type compatible'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timestamp('20130101'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=Timedelta('1 day'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timedelta('1 day'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timestamp('20130101'),
end=Timestamp('20130110'), freq=2)
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'), end=10, freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timestamp('20130110'), freq='D')
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=Timedelta('1 day'),
end=Timedelta('10 days'), freq=2)
# invalid periods
msg = 'periods must be a number, got foo'
with tm.assert_raises_regex(TypeError, msg):
interval_range(start=0, periods='foo')
# invalid start
msg = 'start must be numeric or datetime-like, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start='foo', periods=10)
# invalid end
msg = r'end must be numeric or datetime-like, got \(0, 1\]'
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Interval(0, 1), periods=10)
# invalid freq for datetime-like
msg = 'freq must be numeric or convertible to DateOffset, got foo'
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=0, end=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(start=Timestamp('20130101'), periods=10, freq='foo')
with tm.assert_raises_regex(ValueError, msg):
interval_range(end=Timedelta('1 day'), periods=10, freq='foo')
class TestIntervalTree(object):
def setup_method(self, method):
gentree = lambda dtype: IntervalTree(np.arange(5, dtype=dtype),
np.arange(5, dtype=dtype) + 2)
self.tree = gentree('int64')
self.trees = {dtype: gentree(dtype)
for dtype in ['int32', 'int64', 'float32', 'float64']}
def test_get_loc(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(tree.get_loc(1),
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(tree.get_loc(2)),
np.array([0, 1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_loc(-1)
def test_get_indexer(self):
for dtype, tree in self.trees.items():
tm.assert_numpy_array_equal(
tree.get_indexer(np.array([1.0, 5.5, 6.5])),
np.array([0, 4, -1], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([3.0]))
def test_get_indexer_non_unique(self):
indexer, missing = self.tree.get_indexer_non_unique(
np.array([1.0, 2.0, 6.5]))
tm.assert_numpy_array_equal(indexer[:1],
np.array([0], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[1:3]),
np.array([0, 1], dtype='int64'))
tm.assert_numpy_array_equal(np.sort(indexer[3:]),
np.array([-1], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([2], dtype='int64'))
def test_duplicates(self):
tree = IntervalTree([0, 0, 0], [1, 1, 1])
tm.assert_numpy_array_equal(np.sort(tree.get_loc(0.5)),
np.array([0, 1, 2], dtype='int64'))
with pytest.raises(KeyError):
tree.get_indexer(np.array([0.5]))
indexer, missing = tree.get_indexer_non_unique(np.array([0.5]))
tm.assert_numpy_array_equal(np.sort(indexer),
np.array([0, 1, 2], dtype='int64'))
tm.assert_numpy_array_equal(missing, np.array([], dtype='int64'))
def test_get_loc_closed(self):
for closed in ['left', 'right', 'both', 'neither']:
tree = IntervalTree([0], [1], closed=closed)
for p, errors in [(0, tree.open_left),
(1, tree.open_right)]:
if errors:
with pytest.raises(KeyError):
tree.get_loc(p)
else:
tm.assert_numpy_array_equal(tree.get_loc(p),
np.array([0], dtype='int64'))
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="int type mismatch on 32bit")
def test_get_indexer_closed(self):
x = | np.arange(1000, dtype='float64') | numpy.arange |
# set environment variable:
# export COCOTB_REDUCED_LOG_FMT=true to prettify output so that it fits on the screen width
import random
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import Join
import numpy as np
from collections import defaultdict
from collections import deque
import dill
import sys
from pprint import pprint as pp
from rl_utils import get_state_reward, tick, reset, get_state_reward, Switcher, ACTION_NAME_MAP, initialize_counters, initialize_inputs
def epsilon_greedy(Q, state, nA, eps):
"""Selects epsilon-greedy action for supplied state.
Params
======
Q (dictionary): action-value function
state (int): current state
nA (int): number actions in the environment
eps (float): epsilon
"""
if random.random() > eps: # select greedy action with probability epsilon
return | np.argmax(Q[state]) | numpy.argmax |
import os, sys
import inspect
currentdir = os.path.dirname(os.path.abspath(
inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import numpy as np
import pytest
import librosa
import soundpy as sp
test_dir = 'test_audio/'
test_audiofile = '{}audio2channels.wav'.format(test_dir)
test_traffic = '{}traffic.wav'.format(test_dir)
test_python = '{}python.wav'.format(test_dir)
test_horn = '{}car_horn.wav'.format(test_dir)
samples_48000, sr_48000 = librosa.load(test_audiofile, sr=48000)
samples_44100, sr_44100 = librosa.load(test_audiofile, sr=44100)
samples_22050, sr_22050 = librosa.load(test_audiofile, sr=22050)
samples_16000, sr_16000 = librosa.load(test_audiofile, sr=16000)
samples_8000, sr_8000 = librosa.load(test_audiofile, sr=8000)
def test_shape_samps_channels_mono():
input_data = np.array([1,2,3,4,5])
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_correct():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(5,2)
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data, output_data)
def test_shape_samps_channels_stereo_incorrect():
input_data = np.array([1,2,3,4,5,6,7,8,9,10]).reshape(2,5)
output_data = sp.dsp.shape_samps_channels(input_data)
assert np.array_equal(input_data.T, output_data)
def test_calc_phase():
np.random.seed(seed=0)
rand_fft = np.random.random(2) + np.random.random(2) * 1j
phase = sp.dsp.calc_phase(rand_fft)
value1 = np.array([0.67324134+0.73942281j, 0.79544405+0.60602703j])
assert np.allclose(value1, phase)
def test_calc_phase_framelength10_default():
frame_length = 10
time = np.arange(0, 10, 0.1)
signal = np.sin(time)[:frame_length]
fft_vals = np.fft.fft(signal)
phase = sp.dsp.calc_phase(fft_vals)
value1 = np.array([ 1. +0.j, -0.37872566+0.92550898j])
assert np.allclose(value1, phase[:2])
def test_calc_phase_framelength10_radiansTrue():
frame_length = 10
time = np.arange(0, 10, 0.1)
signal = np.sin(time)[:frame_length]
fft_vals = np.fft.fft(signal)
phase = sp.dsp.calc_phase(fft_vals, radians = True)
value1 = np.array([ 0., 1.95921533])
assert np.allclose(value1, phase[:2])
def test_reconstruct_whole_spectrum():
x = np.array([3.,2.,1.,0.,0.,0.,0.])
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x)
expected = np.array([3., 2., 1., 0., 1., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == len(x)
def test_reconstruct_whole_spectrum_input4_nfft7():
x = np.array([3.,2.,1.,0.])
n_fft = 7
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
expected = np.array([3., 2., 1., 0., 1., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_reconstruct_whole_spectrum_input4_nfft6():
x = np.array([3.,2.,1.,0.])
n_fft= 6
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
print(x_reconstructed)
expected = np.array([3., 2., 1., 0., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_reconstruct_whole_spectrum_input4_nfft5():
x = np.array([3.,2.,1.,0.])
n_fft = 5
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
print(x_reconstructed)
expected = np.array([3., 2., 1., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_reconstruct_whole_spectrum_input4_nfft14():
x = np.array([3.,2.,1.,0.])
n_fft = 14
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x, n_fft=n_fft)
print(x_reconstructed)
expected = np.array([3., 2., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 2., 3.])
assert np.array_equal(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_reconstruct_whole_spectrum_complexvals():
np.random.seed(seed=0)
x_complex = np.random.random(2) + np.random.random(2) * 1j
n_fft = int(2*len(x_complex))
x_reconstructed = sp.dsp.reconstruct_whole_spectrum(x_complex,
n_fft = n_fft)
expected = np.array([0.5488135 +0.60276338j, 0.71518937+0.54488318j, 0. +0.j, 0.5488135 +0.60276338j])
print(x_reconstructed)
assert np.allclose(expected, x_reconstructed)
assert len(x_reconstructed) == n_fft
def test_overlap_add():
enhanced_matrix = np.ones((4, 4))
frame_length = 4
overlap = 2
sig = sp.dsp.overlap_add(enhanced_matrix, frame_length, overlap)
expected = np.array([1., 1., 2., 2., 2., 2., 2., 2., 1., 1.])
assert np.array_equal(expected, sig)
def test_overlap_add():
enhanced_matrix = np.ones((4, 4))
frame_length = 4
overlap = 1
sig = sp.dsp.overlap_add(enhanced_matrix, frame_length, overlap)
expected = np.array([1., 1., 1., 2., 1., 1., 2., 1., 1., 2., 1., 1., 1.])
assert np.array_equal(expected, sig)
def test_overlap_add_complexvals():
enhanced_matrix = np.ones((4, 4),dtype=np.complex)
frame_length = 4
overlap = 1
sig = sp.dsp.overlap_add(enhanced_matrix, frame_length, overlap)
expected = np.array([1.+0.j, 1.+0.j, 1.+0.j, 2.+0.j, 1.+0.j, 1.+0.j,
2.+0.j, 1.+0.j, 1.+0.j, 2.+0.j,1.+0.j, 1.+0.j, 1.+0.j])
assert sig.dtype == expected.dtype
def test_overlap_add_framelength_mismatch():
enhanced_matrix = np.ones((4, 4))
frame_length = 3
overlap = 1
with pytest.raises(TypeError):
sig = sp.dsp.overlap_add(enhanced_matrix,
frame_length,
overlap)
def test_calc_num_subframes_fullframes():
expected = 5
subframes = sp.dsp.calc_num_subframes(30,10,5)
assert expected == subframes
def test_calc_num_subframes_mismatchframes():
expected = 5
subframes = sp.dsp.calc_num_subframes(33,10,5)
print(subframes)
assert expected == subframes
def test_calc_num_subframes_mismatchframes_zeropad():
expected = 6
subframes = sp.dsp.calc_num_subframes(33,10,5, zeropad=True)
print(subframes)
assert expected == subframes
def test_generate_sound_default():
data, sr = sp.dsp.generate_sound()
expected1 = np.array([0., 0.06260483, 0.12366658, 0.18168021, 0.2352158 ])
expected2 = 2000
expected3 = 8000
assert np.allclose(expected1, data[:5])
assert len(data) == expected2
assert sr == expected3
def test_generate_sound_freq5():
sound, sr = sp.dsp.generate_sound(freq=5, amplitude=0.5, sr=5, dur_sec=1)
expected1 = np.array([ 0.000000e+00, 5.000000e-01,
3.061617e-16, -5.000000e-01, -6.123234e-16])
expected_sr = 5
expected_len = expected_sr * 1
assert np.allclose(expected1, sound)
assert sr == expected_sr
assert len(sound) == expected_len
def test_get_time_points():
time = sp.dsp.get_time_points(dur_sec = 0.1, sr=50)
expected = np.array([0. ,0.025 ,0.05 , 0.075, 0.1 ])
assert np.allclose(time, expected)
def test_generate_noise():
noise = sp.dsp.generate_noise(5, random_seed=0)
expected = np.array([0.04410131, 0.01000393, 0.02446845, 0.05602233, 0.04668895])
assert np.allclose(expected, noise)
def test_set_signal_length_longer():
input_samples = np.array([1,2,3,4,5])
samples = sp.dsp.set_signal_length(input_samples, numsamps = 8)
expected = np.array([1,2,3,4,5,0,0,0])
assert len(samples) == 8
assert np.array_equal(samples, expected)
def test_set_signal_length_shorter():
input_samples = np.array([1,2,3,4,5])
samples = sp.dsp.set_signal_length(input_samples, numsamps = 4)
expected = | np.array([1,2,3,4]) | numpy.array |
import torch, random, json
import numpy as np
def load_vectors(vecfile, dim=300, unk_rand=True, seed=0):
'''
Loads saved vectors;
:param vecfile: the name of the file to load the vectors from.
:return: a numpy array of all the vectors.
'''
vecs = np.load(vecfile)
np.random.seed(seed)
if unk_rand:
vecs = np.vstack((vecs, np.random.randn(dim))) # <unk> -> V-2 ??
else:
vecs = np.vstack((vecs, np.zeros(dim))) # <unk> -> V - 2??
vecs = np.vstack((vecs, np.zeros(dim))) # pad -> V-1 ???
vecs = vecs.astype(float, copy=False)
return vecs
def prepare_batch_with_sentences_in_rev(sample_batched, max_tok_len=35, pos_filtered=False, **kwargs):
'''
Prepares a batch of data, preserving sentence boundaries and returning the sentences in reverse order.
Also returns reversed text and topic.
:param sample_batched: a list of dictionaries, where each is a sample
:param max_sen_len: the maximum # sentences allowed
:param padding_idx: the index for padding for dummy sentences
:param pos_filtered: a flag determining whether to return pos filtered text
:return: the text batch, has shape (S, B, Tij) where S is the max sen len, B is the batch size,
and Tij is the number of tokens in sentence i of batch element j;
the topic batch, a list of all topic instances;
the a list of labels for the post, topic instances
AND (depending on flag)
the text pos filtered, with the same shape as the text batches
'''
topic_batch = torch.tensor([b['topic'] for b in sample_batched])
labels = [torch.tensor(b['label']) for b in sample_batched]
max_sen_len = kwargs['max_sen_len']
padding_idx = kwargs['padding_idx']
text_batch = []
text_batch_pos2filter = dict()
sens = []
txt_lens_lst = []
for i in range(1, max_sen_len + 1):
sn_idx = max_sen_len - i
s_lst = []
s_lst_pos_D = dict()
si = []
s_len_lst = []
for b in sample_batched:
if len(b['text']) > sn_idx:
s_lst.append(torch.tensor([b['text'][sn_idx]]))
si.append(1)
else:
s_lst.append(torch.tensor([[padding_idx] * max_tok_len]))
si.append(0)
s_len_lst.append(b['txt_l'][sn_idx])
if kwargs.get('use_conn', False):
for pos in b['text_pos2filtered']:
s_lst_pos = s_lst_pos_D.get(pos, [])
if len(b['text_pos_filtered'][pos]) > sn_idx and len(b['text_pos_filtered'][pos][sn_idx]) > 0:
s_lst_pos.append(torch.tensor([b['text_pos_filtered'][pos][sn_idx]]))
else:
s_lst_pos.append(torch.tensor([[padding_idx] * max_tok_len]))
text_batch.append(torch.cat(s_lst, dim=0))
if kwargs.get('use_conn', False):
for t in s_lst_pos_D:
text_batch_pos2filter[t] = text_batch_pos2filter.get(t, [])
text_batch_pos2filter[t].append(torch.cat(s_lst_pos_D[t], dim=0))
sens.append(si)
txt_lens_lst.append(s_len_lst)
txt_lens = txt_lens_lst # (S, B, T)?
top_lens = [b['top_l'] for b in sample_batched]
args = {'text': text_batch, 'topic': topic_batch,
'labels': labels, 'sentence_mask': sens,
'txt_l': txt_lens, 'top_l': top_lens}
if pos_filtered:
args['text_pos2filter'] = text_batch_pos2filter
return args
def prepare_batch_with_reverse(sample_batched, **kwargs):
'''
Prepares a batch of data to be used in training or evaluation. Includes the text reversed.
:param sample_batched: a list of dictionaries, where each is a sample
:param pos_filtered: a flag determining whether to return pos filtered text
:return: a list of all the post instances (sorted in decreasing order of len),
a list of all topic instances (corresponding to the sorted posts),
a list of all the post instances (sored in decreasing order of len), reversed
a list of all topic instances (corresponding to the sorted posts, reversed
a list of labels for the post,topic instances
AND (depending on flag)
a list of all posts instances with only certain POS (sorted in dec order of len)
a list of all post instances reversed with only certain POS (sorted in dec order of len)
'''
text_lens = np.array([b['txt_l'] for b in sample_batched])
text_batch = torch.tensor([b['text'] for b in sample_batched])
topic_batch = torch.tensor([b['topic'] for b in sample_batched])
labels = [b['label'] for b in sample_batched]
top_lens = [b['top_l'] for b in sample_batched]
raw_text_batch = [b['ori_text'] for b in sample_batched]
raw_top_batch = [b['ori_topic'] for b in sample_batched]
args = {'text': text_batch, 'topic': topic_batch, 'labels': labels,
'txt_l': text_lens, 'top_l': top_lens,
'ori_text': raw_text_batch, 'ori_topic': raw_top_batch}
if kwargs.get('use_conn', False):
text_pos2filtered = dict()
for t in sample_batched[0]['text_pos2filtered']:
text_pos2filtered[t] = torch.tensor([b['text_pos2filtered'][t] for b in sample_batched])
args['text_pos2filter'] = text_pos2filtered
return args
def prepare_batch_raw(sample_batched, **kwargs):
text_lens = | np.array([b['txt_l'] for b in sample_batched]) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, utils
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
#
# Should match original Detectron code at
# https://github.com/facebookresearch/Detectron/blob/master/lib/ops/collect_and_distribute_fpn_rpn_proposals.py
#
def boxes_area(boxes):
"""Compute the area of an array of boxes."""
w = (boxes[:, 2] - boxes[:, 0] + 1)
h = (boxes[:, 3] - boxes[:, 1] + 1)
areas = w * h
assert | np.all(areas >= 0) | numpy.all |
from unittest import TestCase
from src.layers import Linear
import tensorflow as tf
import numpy as np
class TestLinearLayer(TestCase):
def test_compilation_and_build(self):
# We define a small model, if no errors are raised, test passes.
# Build MLP model
model = tf.keras.models.Sequential([Linear(5), Linear(5)])
# Compile
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"],
)
# Initialize
model.build(input_shape=(32, 8))
def test_forward_propagation(self):
# We define a small model and pass in a dummy input. If shape is correct,
# no nans and no infinite values appear in the output, test passes.
# Build MLP model
model = tf.keras.models.Sequential([Linear(5), Linear(5)])
# Compile
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="adam",
metrics=["accuracy"],
)
# Initialize
model.build(input_shape=(32, 8))
# Dummy input
X = np.random.randn(32, 8)
y_hat = model(X).numpy()
self.assertEquals((32, 5), y_hat.shape)
self.assertEquals(False, np.isnan(y_hat.sum()))
self.assertEquals(True, np.isfinite(y_hat.sum()))
def test_gradient_check(self):
# We define a small model and pass in a dummy input, get the loss, fit, and
# get the loss again. If the loss decreases, the test passes
model = tf.keras.models.Sequential([Linear(5), Linear(5)])
# Compile
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam",
)
# Initialize
model.build(input_shape=(32, 8))
# Dummy input
X = | np.random.randn(32, 8) | numpy.random.randn |
# -*- coding: UTF-8 -*-
# @Author : <NAME>
# @Email : <EMAIL>
""" KDA
Reference:
"Toward Dynamic User Intention: Temporal Evolutionary Effects of Item Relations in Sequential Recommendation"
Chenyang Wang et al., TOIS'2021.
CMD example:
python main.py --model_name KDA --emb_size 64 --include_attr 1 --freq_rand 0 --lr 1e-3 --l2 1e-6 --num_heads 4 \
--history_max 20 --dataset 'Grocery_and_Gourmet_Food'
"""
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
from utils import layers
from models.BaseModel import SequentialModel
from helpers.DFTReader import DFTReader
class KDA(SequentialModel):
reader = 'DFTReader'
extra_log_args = ['num_layers', 'num_heads', 'gamma', 'freq_rand', 'include_val']
@staticmethod
def parse_model_args(parser):
parser.add_argument('--emb_size', type=int, default=64,
help='Size of embedding vectors.')
parser.add_argument('--neg_head_p', type=float, default=0.5,
help='The probability of sampling negative head entity.')
parser.add_argument('--num_layers', type=int, default=1,
help='Number of self-attention layers.')
parser.add_argument('--num_heads', type=int, default=1,
help='Number of attention heads.')
parser.add_argument('--gamma', type=float, default=-1,
help='Coefficient of KG loss (-1 for auto-determine).')
parser.add_argument('--attention_size', type=int, default=10,
help='Size of attention hidden space.')
parser.add_argument('--pooling', type=str, default='average',
help='Method of pooling relational history embeddings: average, max, attention')
parser.add_argument('--include_val', type=int, default=1,
help='Whether include relation value in the relation representation')
return SequentialModel.parse_model_args(parser)
def __init__(self, args, corpus):
self.relation_num = corpus.n_relations
self.entity_num = corpus.n_entities
self.freq_x = corpus.freq_x
self.freq_dim = args.n_dft // 2 + 1
self.freq_rand = args.freq_rand
self.emb_size = args.emb_size
self.neg_head_p = args.neg_head_p
self.layer_num = args.num_layers
self.head_num = args.num_heads
self.attention_size = args.attention_size
self.pooling = args.pooling.lower()
self.include_val = args.include_val
self.gamma = args.gamma
if self.gamma < 0:
self.gamma = len(corpus.relation_df) / len(corpus.all_df)
super().__init__(args, corpus)
def _define_params(self):
self.user_embeddings = nn.Embedding(self.user_num, self.emb_size)
self.entity_embeddings = nn.Embedding(self.entity_num, self.emb_size)
self.relation_embeddings = nn.Embedding(self.relation_num, self.emb_size)
# First-level aggregation
self.relational_dynamic_aggregation = RelationalDynamicAggregation(
self.relation_num, self.freq_dim, self.relation_embeddings, self.include_val, self.device
)
# Second-level aggregation
self.attn_head = layers.MultiHeadAttention(self.emb_size, self.head_num, bias=False)
self.W1 = nn.Linear(self.emb_size, self.emb_size)
self.W2 = nn.Linear(self.emb_size, self.emb_size)
self.dropout_layer = nn.Dropout(self.dropout)
self.layer_norm = nn.LayerNorm(self.emb_size)
# Pooling
if self.pooling == 'attention':
self.A = nn.Linear(self.emb_size, self.attention_size)
self.A_out = nn.Linear(self.attention_size, 1, bias=False)
# Prediction
self.item_bias = nn.Embedding(self.item_num, 1)
def actions_before_train(self):
if not self.freq_rand:
dft_freq_real = torch.tensor(np.real(self.freq_x)) # R * n_freq
dft_freq_imag = torch.tensor(np.imag(self.freq_x))
self.relational_dynamic_aggregation.freq_real.weight.data.copy_(dft_freq_real)
self.relational_dynamic_aggregation.freq_imag.weight.data.copy_(dft_freq_imag)
def forward(self, feed_dict):
self.check_list = []
prediction = self.rec_forward(feed_dict)
out_dict = {'prediction': prediction}
if feed_dict['phase'] == 'train':
kg_prediction = self.kg_forward(feed_dict)
out_dict['kg_prediction'] = kg_prediction
return out_dict
def rec_forward(self, feed_dict):
u_ids = feed_dict['user_id'] # B
i_ids = feed_dict['item_id'] # B * -1
v_ids = feed_dict['item_val'] # B * -1 * R
history = feed_dict['history_items'] # B * H
delta_t_n = feed_dict['history_delta_t'].float() # B * H
batch_size, seq_len = history.shape
u_vectors = self.user_embeddings(u_ids)
i_vectors = self.entity_embeddings(i_ids)
v_vectors = self.entity_embeddings(v_ids) # B * -1 * R * V
his_vectors = self.entity_embeddings(history) # B * H * V
"""
Relational Dynamic History Aggregation
"""
valid_mask = (history > 0).view(batch_size, 1, seq_len, 1)
context = self.relational_dynamic_aggregation(
his_vectors, delta_t_n, i_vectors, v_vectors, valid_mask) # B * -1 * R * V
"""
Multi-layer Self-attention
"""
for i in range(self.layer_num):
residual = context
# self-attention
context = self.attn_head(context, context, context)
# feed forward
context = self.W1(context)
context = self.W2(context.relu())
# dropout, residual and layer_norm
context = self.dropout_layer(context)
context = self.layer_norm(residual + context)
"""
Pooling Layer
"""
if self.pooling == 'attention':
query_vectors = context * u_vectors[:, None, None, :] # B * -1 * R * V
user_attention = self.A_out(self.A(query_vectors).tanh()).squeeze(-1) # B * -1 * R
user_attention = (user_attention - user_attention.max()).softmax(dim=-1)
his_vector = (context * user_attention[:, :, :, None]).sum(dim=-2) # B * -1 * V
elif self.pooling == 'max':
his_vector = context.max(dim=-2).values # B * -1 * V
else:
his_vector = context.mean(dim=-2) # B * -1 * V
"""
Prediction
"""
i_bias = self.item_bias(i_ids).squeeze(-1)
prediction = ((u_vectors[:, None, :] + his_vector) * i_vectors).sum(dim=-1)
prediction = prediction + i_bias
return prediction.view(feed_dict['batch_size'], -1)
def kg_forward(self, feed_dict):
head_ids = feed_dict['head_id'].long() # B * -1
tail_ids = feed_dict['tail_id'].long() # B * -1
value_ids = feed_dict['value_id'].long() # B
relation_ids = feed_dict['relation_id'].long() # B
head_vectors = self.entity_embeddings(head_ids)
tail_vectors = self.entity_embeddings(tail_ids)
value_vectors = self.entity_embeddings(value_ids)
relation_vectors = self.relation_embeddings(relation_ids)
# DistMult
if self.include_val:
prediction = (head_vectors * (relation_vectors + value_vectors)[:, None, :] * tail_vectors).sum(-1)
else:
prediction = (head_vectors * relation_vectors[:, None, :] * tail_vectors).sum(-1)
return prediction
def loss(self, out_dict):
predictions = out_dict['prediction']
pos_pred, neg_pred = predictions[:, 0], predictions[:, 1:]
neg_softmax = (neg_pred - neg_pred.max()).softmax(dim=1)
rec_loss = -((pos_pred[:, None] - neg_pred).sigmoid() * neg_softmax).sum(dim=1).log().mean()
predictions = out_dict['kg_prediction']
pos_pred, neg_pred = predictions[:, 0], predictions[:, 1:]
neg_softmax = (neg_pred - neg_pred.max()).softmax(dim=1)
kg_loss = -((pos_pred[:, None] - neg_pred).sigmoid() * neg_softmax).sum(dim=1).log().mean()
loss = rec_loss + self.gamma * kg_loss
return loss
class Dataset(SequentialModel.Dataset):
def __init__(self, model, corpus, phase):
super().__init__(model, corpus, phase)
if self.phase == 'train':
self.kg_data, self.neg_heads, self.neg_tails = None, None, None
def _prepare(self):
# Prepare item-to-value dict
item_val = self.corpus.item_meta_df.copy()
item_val[self.corpus.item_relations] = 0 # set the value of natural item relations to None
for idx, r in enumerate(self.corpus.attr_relations):
base = self.corpus.n_items + np.sum(self.corpus.attr_max[:idx])
item_val[r] = item_val[r].apply(lambda x: x + base).astype(int)
item_vals = item_val[self.corpus.relations].values # this ensures the order is consistent to relations
self.item_val_dict = dict()
for item, vals in zip(item_val['item_id'].values, item_vals.tolist()):
self.item_val_dict[item] = [0] + vals # the first dimension None for the virtual relation
super()._prepare()
def _get_feed_dict(self, index):
feed_dict = super()._get_feed_dict(index)
feed_dict['item_val'] = [self.item_val_dict[item] for item in feed_dict['item_id']]
delta_t = self.data['time'][index] - feed_dict['history_times']
feed_dict['history_delta_t'] = DFTReader.norm_time(delta_t, self.corpus.t_scalar)
if self.phase == 'train':
feed_dict['head_id'] = np.concatenate([[self.kg_data['head'][index]], self.neg_heads[index]])
feed_dict['tail_id'] = np.concatenate([[self.kg_data['tail'][index]], self.neg_tails[index]])
feed_dict['relation_id'] = self.kg_data['relation'][index]
feed_dict['value_id'] = self.kg_data['value'][index]
return feed_dict
def generate_kg_data(self) -> pd.DataFrame:
rec_data_size = len(self)
replace = (rec_data_size > len(self.corpus.relation_df))
kg_data = self.corpus.relation_df.sample(n=rec_data_size, replace=replace).reset_index(drop=True)
kg_data['value'] = np.zeros(len(kg_data), dtype=int) # default for None
tail_select = kg_data['tail'].apply(lambda x: x < self.corpus.n_items)
item_item_df = kg_data[tail_select]
item_attr_df = kg_data.drop(item_item_df.index)
item_attr_df['value'] = item_attr_df['tail'].values
sample_tails = list() # sample items sharing the same attribute
for head, val in zip(item_attr_df['head'].values, item_attr_df['tail'].values):
share_attr_items = self.corpus.share_attr_dict[val]
tail_idx = np.random.randint(len(share_attr_items))
sample_tails.append(share_attr_items[tail_idx])
item_attr_df['tail'] = sample_tails
kg_data = pd.concat([item_item_df, item_attr_df], ignore_index=True)
return kg_data
def actions_before_epoch(self):
super().actions_before_epoch()
self.kg_data = self.generate_kg_data()
heads, tails = self.kg_data['head'].values, self.kg_data['tail'].values
relations, vals = self.kg_data['relation'].values, self.kg_data['value'].values
self.neg_heads = np.random.randint(1, self.corpus.n_items, size=(len(self.kg_data), self.model.num_neg))
self.neg_tails = np.random.randint(1, self.corpus.n_items, size=(len(self.kg_data), self.model.num_neg))
for i in range(len(self.kg_data)):
item_item_relation = (tails[i] <= self.corpus.n_items)
for j in range(self.model.num_neg):
if | np.random.rand() | numpy.random.rand |
"""
Linear mixed effects models for Statsmodels
The data are partitioned into disjoint groups. The probability model
for group i is:
Y = X*beta + Z*gamma + epsilon
where
* n_i is the number of observations in group i
* Y is a n_i dimensional response vector
* X is a n_i x k_fe design matrix for the fixed effects
* beta is a k_fe-dimensional vector of fixed effects slopes
* Z is a n_i x k_re design matrix for the random effects
* gamma is a k_re-dimensional random vector with mean 0
and covariance matrix Psi; note that each group
gets its own independent realization of gamma.
* epsilon is a n_i dimensional vector of iid normal
errors with mean 0 and variance sigma^2; the epsilon
values are independent both within and between groups
Y, X and Z must be entirely observed. beta, Psi, and sigma^2 are
estimated using ML or REML estimation, and gamma and epsilon are
random so define the probability model.
The mean structure is E[Y|X,Z] = X*beta. If only the mean structure
is of interest, GEE is a good alternative to mixed models.
The primary reference for the implementation details is:
<NAME>, <NAME> (1988). "Newton Raphson and EM algorithms for
linear mixed effects models for repeated measures data". Journal of
the American Statistical Association. Volume 83, Issue 404, pages
1014-1022.
See also this more recent document:
http://econ.ucsb.edu/~doug/245a/Papers/Mixed%20Effects%20Implement.pdf
All the likelihood, gradient, and Hessian calculations closely follow
Lindstrom and Bates.
The following two documents are written more from the perspective of
users:
http://lme4.r-forge.r-project.org/lMMwR/lrgprt.pdf
http://lme4.r-forge.r-project.org/slides/2009-07-07-Rennes/3Longitudinal-4.pdf
Notation:
* `cov_re` is the random effects covariance matrix (referred to above
as Psi) and `scale` is the (scalar) error variance. For a single
group, the marginal covariance matrix of endog given exog is scale*I
+ Z * cov_re * Z', where Z is the design matrix for the random
effects in one group.
Notes:
1. Three different parameterizations are used here in different
places. The regression slopes (usually called `fe_params`) are
identical in all three parameterizations, but the variance parameters
differ. The parameterizations are:
* The "natural parameterization" in which cov(endog) = scale*I + Z *
cov_re * Z', as described above. This is the main parameterization
visible to the user.
* The "profile parameterization" in which cov(endog) = I +
Z * cov_re1 * Z'. This is the parameterization of the profile
likelihood that is maximized to produce parameter estimates.
(see Lindstrom and Bates for details). The "natural" cov_re is
equal to the "profile" cov_re1 times scale.
* The "square root parameterization" in which we work with the
Cholesky factor of cov_re1 instead of cov_re1 directly.
All three parameterizations can be "packed" by concatenating fe_params
together with the lower triangle of the dependence structure. Note
that when unpacking, it is important to either square or reflect the
dependence structure depending on which parameterization is being
used.
2. The situation where the random effects covariance matrix is
singular is numerically challenging. Small changes in the covariance
parameters may lead to large changes in the likelihood and
derivatives.
3. The optimization strategy is to first use OLS to get starting
values for the mean structure. Then we optionally perform a few EM
steps, followed by optionally performing a few steepest ascent steps.
This is followed by conjugate gradient optimization using one of the
scipy gradient optimizers. The EM and steepest ascent steps are used
to get adequate starting values for the conjugate gradient
optimization, which is much faster.
"""
import numpy as np
import statsmodels.base.model as base
from scipy.optimize import fmin_ncg, fmin_cg, fmin_bfgs, fmin
from statsmodels.tools.decorators import cache_readonly
from scipy.stats.distributions import norm
import pandas as pd
import patsy
from statsmodels.compat.collections import OrderedDict
from statsmodels.compat import range
import warnings
from statsmodels.tools.sm_exceptions import ConvergenceWarning
from statsmodels.base._penalties import Penalty
from statsmodels.compat.numpy import np_matrix_rank
from pandas import DataFrame
def _get_exog_re_names(exog_re):
if isinstance(exog_re, (pd.Series, pd.DataFrame)):
return exog_re.columns.tolist()
return ["Z{0}".format(k + 1) for k in range(exog_re.shape[1])]
class MixedLMParams(object):
"""
This class represents a parameter state for a mixed linear model.
Parameters
----------
k_fe : integer
The number of covariates with fixed effects.
k_re : integer
The number of covariates with random effects.
use_sqrt : boolean
If True, the covariance matrix is stored using as the lower
triangle of its Cholesky square root, otherwise it is stored
as the lower triangle of the covariance matrix.
Notes
-----
This object represents the parameter state for the model in which
the scale parameter has been profiled out.
"""
def __init__(self, k_fe, k_re, use_sqrt=True):
self.k_fe = k_fe
self.k_re = k_re
self.k_re2 = k_re * (k_re + 1) / 2
self.k_tot = self.k_fe + self.k_re2
self.use_sqrt = use_sqrt
self._ix = np.tril_indices(self.k_re)
self._params = np.zeros(self.k_tot)
def from_packed(params, k_fe, use_sqrt):
"""
Factory method to create a MixedLMParams object based on the
given packed parameter vector.
Parameters
----------
params : array-like
The mode parameters packed into a single vector.
k_fe : integer
The number of covariates with fixed effects
use_sqrt : boolean
If True, the random effects covariance matrix is stored as
its Cholesky factor, otherwise the lower triangle of the
covariance matrix is stored.
Returns
-------
A MixedLMParams object.
"""
k_re2 = len(params) - k_fe
k_re = (-1 + np.sqrt(1 + 8*k_re2)) / 2
if k_re != int(k_re):
raise ValueError("Length of `packed` not compatible with value of `fe`.")
k_re = int(k_re)
pa = MixedLMParams(k_fe, k_re, use_sqrt)
pa.set_packed(params)
return pa
from_packed = staticmethod(from_packed)
def from_components(fe_params, cov_re=None, cov_re_sqrt=None,
use_sqrt=True):
"""
Factory method to create a MixedLMParams object from given
values for each parameter component.
Parameters
----------
fe_params : array-like
The fixed effects parameter (a 1-dimensional array).
cov_re : array-like
The random effects covariance matrix (a square, symmetric
2-dimensional array).
cov_re_sqrt : array-like
The Cholesky (lower triangular) square root of the random
effects covariance matrix.
use_sqrt : boolean
If True, the random effects covariance matrix is stored as
the lower triangle of its Cholesky factor, otherwise the
lower triangle of the covariance matrix is stored.
Returns
-------
A MixedLMParams object.
"""
k_fe = len(fe_params)
k_re = cov_re.shape[0]
pa = MixedLMParams(k_fe, k_re, use_sqrt)
pa.set_fe_params(fe_params)
pa.set_cov_re(cov_re)
return pa
from_components = staticmethod(from_components)
def copy(self):
"""
Returns a copy of the object.
"""
obj = MixedLMParams(self.k_fe, self.k_re, self.use_sqrt)
obj.set_packed(self.get_packed().copy())
return obj
def get_packed(self, use_sqrt=None):
"""
Returns the model parameters packed into a single vector.
Parameters
----------
use_sqrt : None or bool
If None, `use_sqrt` has the value of this instance's
`use_sqrt`. Otherwise it is set to the given value.
"""
if (use_sqrt is None) or (use_sqrt == self.use_sqrt):
return self._params
pa = self._params.copy()
cov_re = self.get_cov_re()
if use_sqrt:
L = np.linalg.cholesky(cov_re)
pa[self.k_fe:] = L[self._ix]
else:
pa[self.k_fe:] = cov_re[self._ix]
return pa
def set_packed(self, params):
"""
Sets the packed parameter vector to the given vector, without
any validity checking.
"""
self._params = params
def get_fe_params(self):
"""
Returns the fixed effects paramaters as a ndarray.
"""
return self._params[0:self.k_fe]
def set_fe_params(self, fe_params):
"""
Set the fixed effect parameters to the given vector.
"""
self._params[0:self.k_fe] = fe_params
def set_cov_re(self, cov_re=None, cov_re_sqrt=None):
"""
Set the random effects covariance matrix to the given value.
Parameters
----------
cov_re : array-like
The random effects covariance matrix.
cov_re_sqrt : array-like
The Cholesky square root of the random effects covariance
matrix. Only the lower triangle is read.
Notes
-----
The first of `cov_re` and `cov_re_sqrt` that is not None is
used.
"""
if cov_re is not None:
if self.use_sqrt:
cov_re_sqrt = np.linalg.cholesky(cov_re)
self._params[self.k_fe:] = cov_re_sqrt[self._ix]
else:
self._params[self.k_fe:] = cov_re[self._ix]
elif cov_re_sqrt is not None:
if self.use_sqrt:
self._params[self.k_fe:] = cov_re_sqrt[self._ix]
else:
cov_re = np.dot(cov_re_sqrt, cov_re_sqrt.T)
self._params[self.k_fe:] = cov_re[self._ix]
def get_cov_re(self):
"""
Returns the random effects covariance matrix.
"""
pa = self._params[self.k_fe:]
cov_re = np.zeros((self.k_re, self.k_re))
cov_re[self._ix] = pa
if self.use_sqrt:
cov_re = np.dot(cov_re, cov_re.T)
else:
cov_re = (cov_re + cov_re.T) - np.diag(np.diag(cov_re))
return cov_re
# This is a global switch to use direct linear algebra calculations
# for solving factor-structured linear systems and calculating
# factor-structured determinants. If False, use the
# Sherman-Morrison-Woodbury update which is more efficient for
# factor-structured matrices. Should be False except when testing.
_no_smw = False
def _smw_solve(s, A, AtA, B, BI, rhs):
"""
Solves the system (s*I + A*B*A') * x = rhs for x and returns x.
Parameters
----------
s : scalar
See above for usage
A : square symmetric ndarray
See above for usage
AtA : square ndarray
A.T * A
B : square symmetric ndarray
See above for usage
BI : square symmetric ndarray
The inverse of `B`. Can be None if B is singular
rhs : ndarray
See above for usage
Returns
-------
x : ndarray
See above
If the global variable `_no_smw` is True, this routine uses direct
linear algebra calculations. Otherwise it uses the
Sherman-Morrison-Woodbury identity to speed up the calculation.
"""
# Direct calculation
if _no_smw or BI is None:
mat = np.dot(A, np.dot(B, A.T))
# Add constant to diagonal
mat.flat[::mat.shape[0]+1] += s
return np.linalg.solve(mat, rhs)
# Use SMW identity
qmat = BI + AtA / s
u = np.dot(A.T, rhs)
qmat = np.linalg.solve(qmat, u)
qmat = np.dot(A, qmat)
rslt = rhs / s - qmat / s**2
return rslt
def _smw_logdet(s, A, AtA, B, BI, B_logdet):
"""
Use the matrix determinant lemma to accelerate the calculation of
the log determinant of s*I + A*B*A'.
Parameters
----------
s : scalar
See above for usage
A : square symmetric ndarray
See above for usage
AtA : square matrix
A.T * A
B : square symmetric ndarray
See above for usage
BI : square symmetric ndarray
The inverse of `B`; can be None if B is singular.
B_logdet : real
The log determinant of B
Returns
-------
The log determinant of s*I + A*B*A'.
"""
p = A.shape[0]
if _no_smw or BI is None:
mat = np.dot(A, np.dot(B, A.T))
# Add constant to diagonal
mat.flat[::p+1] += s
_, ld = np.linalg.slogdet(mat)
return ld
ld = p * np.log(s)
qmat = BI + AtA / s
_, ld1 = np.linalg.slogdet(qmat)
return B_logdet + ld + ld1
class MixedLM(base.LikelihoodModel):
"""
An object specifying a linear mixed effects model. Use the `fit`
method to fit the model and obtain a results object.
Parameters
----------
endog : 1d array-like
The dependent variable
exog : 2d array-like
A matrix of covariates used to determine the
mean structure (the "fixed effects" covariates).
groups : 1d array-like
A vector of labels determining the groups -- data from
different groups are independent
exog_re : 2d array-like
A matrix of covariates used to determine the variance and
covariance structure (the "random effects" covariates). If
None, defaults to a random intercept for each group.
use_sqrt : bool
If True, optimization is carried out using the lower
triangle of the square root of the random effects
covariance matrix, otherwise it is carried out using the
lower triangle of the random effects covariance matrix.
missing : string
The approach to missing data handling
Notes
-----
The covariates in `exog` and `exog_re` may (but need not)
partially or wholly overlap.
`use_sqrt` should almost always be set to True. The main use case
for use_sqrt=False is when complicated patterns of fixed values in
the covariance structure are set (using the `free` argument to
`fit`) that cannot be expressed in terms of the Cholesky factor L.
"""
def __init__(self, endog, exog, groups, exog_re=None,
use_sqrt=True, missing='none', **kwargs):
self.use_sqrt = use_sqrt
# Some defaults
self.reml = True
self.fe_pen = None
self.re_pen = None
# If there is one covariate, it may be passed in as a column
# vector, convert these to 2d arrays.
# TODO: Can this be moved up in the class hierarchy?
# yes, it should be done up the hierarchy
if exog is not None and exog.ndim == 1:
exog = exog[:,None]
if exog_re is not None and exog_re.ndim == 1:
exog_re = exog_re[:,None]
# Calling super creates self.endog, etc. as ndarrays and the
# original exog, endog, etc. are self.data.endog, etc.
super(MixedLM, self).__init__(endog, exog, groups=groups,
exog_re=exog_re, missing=missing,
**kwargs)
self.k_fe = exog.shape[1] # Number of fixed effects parameters
if exog_re is None:
# Default random effects structure (random intercepts).
self.k_re = 1
self.k_re2 = 1
self.exog_re = np.ones((len(endog), 1), dtype=np.float64)
self.data.exog_re = self.exog_re
self.data.param_names = self.exog_names + ['Intercept']
else:
# Process exog_re the same way that exog is handled
# upstream
# TODO: this is wrong and should be handled upstream wholly
self.data.exog_re = exog_re
self.exog_re = np.asarray(exog_re)
if not self.data._param_names:
# HACK: could've been set in from_formula already
# needs refactor
(self.data.param_names,
self.data.exog_re_names) = self._make_param_names(exog_re)
# Model dimensions
# Number of random effect covariates
self.k_re = exog_re.shape[1]
# Number of covariance parameters
self.k_re2 = self.k_re * (self.k_re + 1) // 2
self.k_params = self.k_fe + self.k_re2
# Convert the data to the internal representation, which is a
# list of arrays, corresponding to the groups.
group_labels = list(set(groups))
group_labels.sort()
row_indices = dict((s, []) for s in group_labels)
for i,g in enumerate(groups):
row_indices[g].append(i)
self.row_indices = row_indices
self.group_labels = group_labels
self.n_groups = len(self.group_labels)
# Split the data by groups
self.endog_li = self.group_list(self.endog)
self.exog_li = self.group_list(self.exog)
self.exog_re_li = self.group_list(self.exog_re)
# Precompute this.
self.exog_re2_li = [np.dot(x.T, x) for x in self.exog_re_li]
# The total number of observations, summed over all groups
self.n_totobs = sum([len(y) for y in self.endog_li])
# why do it like the above?
self.nobs = len(self.endog)
# Set the fixed effects parameter names
if self.exog_names is None:
self.exog_names = ["FE%d" % (k + 1) for k in
range(self.exog.shape[1])]
def _make_param_names(self, exog_re):
exog_names = list(self.exog_names)
exog_re_names = _get_exog_re_names(exog_re)
param_names = []
jj = self.k_fe
for i in range(exog_re.shape[1]):
for j in range(i + 1):
if i == j:
param_names.append(exog_re_names[i] + " RE")
else:
param_names.append(exog_re_names[j] + " x " +
exog_re_names[i] + " RE")
jj += 1
return exog_names + exog_re_names, exog_re_names
@classmethod
def from_formula(cls, formula, data, re_formula=None, subset=None,
*args, **kwargs):
"""
Create a Model from a formula and dataframe.
Parameters
----------
formula : str or generic Formula object
The formula specifying the model
data : array-like
The data for the model. See Notes.
re_formula : string
A one-sided formula defining the variance structure of the
model. The default gives a random intercept for each
group.
subset : array-like
An array-like object of booleans, integers, or index
values that indicate the subset of df to use in the
model. Assumes df is a `pandas.DataFrame`
args : extra arguments
These are passed to the model
kwargs : extra keyword arguments
These are passed to the model with one exception. The
``eval_env`` keyword is passed to patsy. It can be either a
:class:`patsy:patsy.EvalEnvironment` object or an integer
indicating the depth of the namespace to use. For example, the
default ``eval_env=0`` uses the calling namespace. If you wish
to use a "clean" environment set ``eval_env=-1``.
Returns
-------
model : Model instance
Notes
------
`data` must define __getitem__ with the keys in the formula
terms args and kwargs are passed on to the model
instantiation. E.g., a numpy structured or rec array, a
dictionary, or a pandas DataFrame.
If `re_formula` is not provided, the default is a random
intercept for each group.
This method currently does not correctly handle missing
values, so missing values should be explicitly dropped from
the DataFrame before calling this method.
"""
if "groups" not in kwargs.keys():
raise AttributeError("'groups' is a required keyword argument in MixedLM.from_formula")
# If `groups` is a variable name, retrieve the data for the
# groups variable.
if type(kwargs["groups"]) == str:
kwargs["groups"] = np.asarray(data[kwargs["groups"]])
if re_formula is not None:
eval_env = kwargs.get('eval_env', None)
if eval_env is None:
eval_env = 1
elif eval_env == -1:
from patsy import EvalEnvironment
eval_env = EvalEnvironment({})
exog_re = patsy.dmatrix(re_formula, data, eval_env=eval_env)
exog_re_names = exog_re.design_info.column_names
exog_re = np.asarray(exog_re)
else:
exog_re = np.ones((data.shape[0], 1),
dtype=np.float64)
exog_re_names = ["Intercept RE"]
mod = super(MixedLM, cls).from_formula(formula, data,
subset=None,
exog_re=exog_re,
*args, **kwargs)
mod.data.param_names = mod.exog_names + exog_re_names
mod.data.exog_re_names = exog_re_names
return mod
def group_list(self, array):
"""
Returns `array` split into subarrays corresponding to the
grouping structure.
"""
if array.ndim == 1:
return [np.array(array[self.row_indices[k]])
for k in self.group_labels]
else:
return [np.array(array[self.row_indices[k], :])
for k in self.group_labels]
def fit_regularized(self, start_params=None, method='l1', alpha=0,
ceps=1e-4, ptol=1e-6, maxit=200, **fit_kwargs):
"""
Fit a model in which the fixed effects parameters are
penalized. The dependence parameters are held fixed at their
estimated values in the unpenalized model.
Parameters
----------
method : string of Penalty object
Method for regularization. If a string, must be 'l1'.
alpha : array-like
Scalar or vector of penalty weights. If a scalar, the
same weight is applied to all coefficients; if a vector,
it contains a weight for each coefficient. If method is a
Penalty object, the weights are scaled by alpha. For L1
regularization, the weights are used directly.
ceps : positive real scalar
Fixed effects parameters smaller than this value
in magnitude are treaded as being zero.
ptol : positive real scalar
Convergence occurs when the sup norm difference
between successive values of `fe_params` is less than
`ptol`.
maxit : integer
The maximum number of iterations.
fit_kwargs : keywords
Additional keyword arguments passed to fit.
Returns
-------
A MixedLMResults instance containing the results.
Notes
-----
The covariance structure is not updated as the fixed effects
parameters are varied.
The algorithm used here for L1 regularization is a"shooting"
or cyclic coordinate descent algorithm.
If method is 'l1', then `fe_pen` and `cov_pen` are used to
obtain the covariance structure, but are ignored during the
L1-penalized fitting.
References
----------
<NAME>., <NAME>. and <NAME>. Regularized
Paths for Generalized Linear Models via Coordinate
Descent. Journal of Statistical Software, 33(1) (2008)
http://www.jstatsoft.org/v33/i01/paper
http://statweb.stanford.edu/~tibs/stat315a/Supplements/fuse.pdf
"""
if type(method) == str and (method.lower() != 'l1'):
raise ValueError("Invalid regularization method")
# If method is a smooth penalty just optimize directly.
if isinstance(method, Penalty):
# Scale the penalty weights by alpha
method.alpha = alpha
fit_kwargs.update({"fe_pen": method})
return self.fit(**fit_kwargs)
if np.isscalar(alpha):
alpha = alpha * np.ones(self.k_fe, dtype=np.float64)
# Fit the unpenalized model to get the dependence structure.
mdf = self.fit(**fit_kwargs)
fe_params = mdf.fe_params
cov_re = mdf.cov_re
scale = mdf.scale
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
for itr in range(maxit):
fe_params_s = fe_params.copy()
for j in range(self.k_fe):
if abs(fe_params[j]) < ceps:
continue
# The residuals
fe_params[j] = 0.
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
# The loss function has the form
# a*x^2 + b*x + pwt*|x|
a, b = 0., 0.
for k, lab in enumerate(self.group_labels):
exog = self.exog_li[k]
ex_r = self.exog_re_li[k]
ex2_r = self.exog_re2_li[k]
resid = resid_all[self.row_indices[lab]]
x = exog[:,j]
u = _smw_solve(scale, ex_r, ex2_r, cov_re,
cov_re_inv, x)
a += np.dot(u, x)
b -= 2 * np.dot(u, resid)
pwt1 = alpha[j]
if b > pwt1:
fe_params[j] = -(b - pwt1) / (2 * a)
elif b < -pwt1:
fe_params[j] = -(b + pwt1) / (2 * a)
if np.abs(fe_params_s - fe_params).max() < ptol:
break
# Replace the fixed effects estimates with their penalized
# values, leave the dependence parameters in their unpenalized
# state.
params_prof = mdf.params.copy()
params_prof[0:self.k_fe] = fe_params
scale = self.get_scale(fe_params, mdf.cov_re_unscaled)
# Get the Hessian including only the nonzero fixed effects,
# then blow back up to the full size after inverting.
hess = self.hessian_full(params_prof)
pcov = np.nan * np.ones_like(hess)
ii = np.abs(params_prof) > ceps
ii[self.k_fe:] = True
ii = np.flatnonzero(ii)
hess1 = hess[ii, :][:, ii]
pcov[np.ix_(ii,ii)] = np.linalg.inv(-hess1)
results = MixedLMResults(self, params_prof, pcov / scale)
results.fe_params = fe_params
results.cov_re = cov_re
results.scale = scale
results.cov_re_unscaled = mdf.cov_re_unscaled
results.method = mdf.method
results.converged = True
results.cov_pen = self.cov_pen
results.k_fe = self.k_fe
results.k_re = self.k_re
results.k_re2 = self.k_re2
return MixedLMResultsWrapper(results)
def _reparam(self):
"""
Returns parameters of the map converting parameters from the
form used in optimization to the form returned to the user.
Returns
-------
lin : list-like
Linear terms of the map
quad : list-like
Quadratic terms of the map
Notes
-----
If P are the standard form parameters and R are the
modified parameters (i.e. with square root covariance),
then P[i] = lin[i] * R + R' * quad[i] * R
"""
k_fe, k_re, k_re2 = self.k_fe, self.k_re, self.k_re2
k_tot = k_fe + k_re2
ix = np.tril_indices(self.k_re)
lin = []
for k in range(k_fe):
e = np.zeros(k_tot)
e[k] = 1
lin.append(e)
for k in range(k_re2):
lin.append(np.zeros(k_tot))
quad = []
for k in range(k_tot):
quad.append(np.zeros((k_tot, k_tot)))
ii = np.tril_indices(k_re)
ix = [(a,b) for a,b in zip(ii[0], ii[1])]
for i1 in range(k_re2):
for i2 in range(k_re2):
ix1 = ix[i1]
ix2 = ix[i2]
if (ix1[1] == ix2[1]) and (ix1[0] <= ix2[0]):
ii = (ix2[0], ix1[0])
k = ix.index(ii)
quad[k_fe+k][k_fe+i2, k_fe+i1] += 1
for k in range(k_tot):
quad[k] = 0.5*(quad[k] + quad[k].T)
return lin, quad
def hessian_sqrt(self, params):
"""
Returns the Hessian matrix of the log-likelihood evaluated at
a given point, calculated with respect to the parameterization
in which the random effects covariance matrix is represented
through its Cholesky square root.
Parameters
----------
params : MixedLMParams or array-like
The model parameters. If array-like, must contain packed
parameters that are compatible with this model.
Returns
-------
The Hessian matrix of the profile log likelihood function,
evaluated at `params`.
Notes
-----
If `params` is provided as a MixedLMParams object it may be of
any parameterization.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.use_sqrt)
score0 = self.score_full(params)
hess0 = self.hessian_full(params)
params_vec = params.get_packed(use_sqrt=True)
lin, quad = self._reparam()
k_tot = self.k_fe + self.k_re2
# Convert Hessian to new coordinates
hess = 0.
for i in range(k_tot):
hess += 2 * score0[i] * quad[i]
for i in range(k_tot):
vi = lin[i] + 2*np.dot(quad[i], params_vec)
for j in range(k_tot):
vj = lin[j] + 2*np.dot(quad[j], params_vec)
hess += hess0[i, j] * np.outer(vi, vj)
return hess
def loglike(self, params):
"""
Evaluate the (profile) log-likelihood of the linear mixed
effects model.
Parameters
----------
params : MixedLMParams, or array-like.
The parameter value. If array-like, must be a packed
parameter vector compatible with this model.
Returns
-------
The log-likelihood value at `params`.
Notes
-----
This is the profile likelihood in which the scale parameter
`scale` has been profiled out.
The input parameter state, if provided as a MixedLMParams
object, can be with respect to any parameterization.
"""
if type(params) is not MixedLMParams:
params = MixedLMParams.from_packed(params, self.k_fe,
self.use_sqrt)
fe_params = params.get_fe_params()
cov_re = params.get_cov_re()
try:
cov_re_inv = np.linalg.inv(cov_re)
except np.linalg.LinAlgError:
cov_re_inv = None
_, cov_re_logdet = np.linalg.slogdet(cov_re)
# The residuals
expval = np.dot(self.exog, fe_params)
resid_all = self.endog - expval
likeval = 0.
# Handle the covariance penalty
if self.cov_pen is not None:
likeval -= self.cov_pen.func(cov_re, cov_re_inv)
# Handle the fixed effects penalty
if self.fe_pen is not None:
likeval -= self.fe_pen.func(fe_params)
xvx, qf = 0., 0.
for k, lab in enumerate(self.group_labels):
exog = self.exog_li[k]
ex_r = self.exog_re_li[k]
ex2_r = self.exog_re2_li[k]
resid = resid_all[self.row_indices[lab]]
# Part 1 of the log likelihood (for both ML and REML)
ld = _smw_logdet(1., ex_r, ex2_r, cov_re, cov_re_inv,
cov_re_logdet)
likeval -= ld / 2.
# Part 2 of the log likelihood (for both ML and REML)
u = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv, resid)
qf += np.dot(resid, u)
# Adjustment for REML
if self.reml:
mat = _smw_solve(1., ex_r, ex2_r, cov_re, cov_re_inv,
exog)
xvx += np.dot(exog.T, mat)
if self.reml:
likeval -= (self.n_totobs - self.k_fe) * np.log(qf) / 2.
_,ld = | np.linalg.slogdet(xvx) | numpy.linalg.slogdet |
import contextlib
import copy
import warnings
import numpy as np
from skrobot.coordinates.dual_quaternion import DualQuaternion
from skrobot.coordinates.math import _check_valid_rotation
from skrobot.coordinates.math import _check_valid_translation
from skrobot.coordinates.math import _wrap_axis
from skrobot.coordinates.math import angle_between_vectors
from skrobot.coordinates.math import cross_product
from skrobot.coordinates.math import matrix2quaternion
from skrobot.coordinates.math import matrix_log
from skrobot.coordinates.math import normalize_vector
from skrobot.coordinates.math import quaternion2matrix
from skrobot.coordinates.math import quaternion_multiply
from skrobot.coordinates.math import quaternion_normalize
from skrobot.coordinates.math import random_rotation
from skrobot.coordinates.math import random_translation
from skrobot.coordinates.math import rotate_matrix
from skrobot.coordinates.math import rotation_angle
from skrobot.coordinates.math import rotation_matrix
from skrobot.coordinates.math import rpy2quaternion
from skrobot.coordinates.math import rpy_angle
def transform_coords(c1, c2, out=None):
"""Return Coordinates by applying c1 to c2 from the left
Parameters
----------
c1 : skrobot.coordinates.Coordinates
c2 : skrobot.coordinates.Coordinates
Coordinates
c3 : skrobot.coordinates.Coordinates or None
Output argument. If this value is specified, the results will be
in-placed.
Returns
-------
Coordinates(pos=translation, rot=q) : skrobot.coordinates.Coordinates
new coordinates
Examples
--------
>>> from skrobot.coordinates import Coordinates
>>> from skrobot.coordinates import transform_coords
>>> from numpy import pi
>>> c1 = Coordinates()
>>> c2 = Coordinates()
>>> c3 = transform_coords(c1, c2)
>>> c3.translation
array([0., 0., 0.])
>>> c3.rotation
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> c1 = Coordinates().translate([0.1, 0.2, 0.3]).rotate(pi / 3.0, 'x')
>>> c2 = Coordinates().translate([0.3, -0.3, 0.1]).rotate(pi / 2.0, 'y')
>>> c3 = transform_coords(c1, c2)
>>> c3.translation
array([ 0.4 , -0.03660254, 0.09019238])
>>> c3.rotation
>>> c3.rotation
array([[ 1.94289029e-16, 0.00000000e+00, 1.00000000e+00],
[ 8.66025404e-01, 5.00000000e-01, -1.66533454e-16],
[-5.00000000e-01, 8.66025404e-01, 2.77555756e-17]])
"""
if out is None:
out = Coordinates()
elif not isinstance(out, Coordinates):
raise TypeError("Input type should be skrobot.coordinates.Coordinates")
out.translation = c1.translation + np.dot(c1.rotation, c2.translation)
out.rotation = quaternion_normalize(
quaternion_multiply(c1.quaternion, c2.quaternion))
return out
class Coordinates(object):
"""Coordinates class to manipulate rotation and translation.
Parameters
----------
pos : list or numpy.ndarray
shape of (3,) translation vector. or
4x4 homogeneous transformation matrix.
If the homogeneous transformation matrix is given,
`rot` will be overwritten.
rot : list or numpy.ndarray
we can take 3x3 rotation matrix or
[yaw, pitch, roll] or
quaternion [w, x, y, z] order
name : str or None
name of this coordinates
"""
def __init__(self,
pos=[0, 0, 0],
rot=np.eye(3),
name=None,
hook=None):
if (isinstance(pos, list) or isinstance(pos, np.ndarray)):
T = np.array(pos, dtype=np.float64)
if T.shape == (4, 4):
pos = T[:3, 3]
rot = T[:3, :3]
self.rotation = rot
self.translation = pos
if name is None:
name = ''
self.name = name
self.parent = None
self._hook = hook if hook else lambda: None
@contextlib.contextmanager
def disable_hook(self):
hook = self._hook
self._hook = lambda: None
try:
yield
finally:
self._hook = hook
@property
def rotation(self):
"""Return rotation matrix of this coordinates.
Returns
-------
self._rotation : numpy.ndarray
3x3 rotation matrix
Examples
--------
>>> import numpy as np
>>> from skrobot.coordinates import Coordinates
>>> c = Coordinates()
>>> c.rotation
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> c.rotate(np.pi / 2.0, 'y')
>>> c.rotation
array([[ 2.22044605e-16, 0.00000000e+00, 1.00000000e+00],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00],
[-1.00000000e+00, 0.00000000e+00, 2.22044605e-16]])
"""
self._hook()
return self._rotation
@rotation.setter
def rotation(self, rotation):
"""Set rotation of this coordinate
This setter checkes the given rotation and set it this coordinate.
Parameters
----------
rotation : list or numpy.ndarray
we can take 3x3 rotation matrix or
rpy angle [yaw, pitch, roll] or
quaternion [w, x, y, z] order
"""
rotation = np.array(rotation)
# Convert quaternions
if rotation.shape == (4,):
self._q = np.array([q for q in rotation])
if np.abs(np.linalg.norm(self._q) - 1.0) > 1e-3:
raise ValueError('Invalid quaternion. Must be '
'norm 1.0, get {}'.
format(np.linalg.norm(self._q)))
rotation = quaternion2matrix(self._q)
elif rotation.shape == (3,):
# Convert [yaw-pitch-roll] to rotation matrix
self._q = rpy2quaternion(rotation)
rotation = quaternion2matrix(self._q)
else:
self._q = matrix2quaternion(rotation)
# Convert lists and tuples
if type(rotation) in (list, tuple):
rotation = np.array(rotation).astype(np.float32)
_check_valid_rotation(rotation)
self._rotation = rotation * 1.
@property
def translation(self):
"""Return translation of this coordinates.
Returns
-------
self._translation : numpy.ndarray
vector shape of (3, ). unit is [m]
Examples
--------
>>> from skrobot.coordinates import Coordinates
>>> c = Coordinates()
>>> c.translation
array([0., 0., 0.])
>>> c.translate([0.1, 0.2, 0.3])
>>> c.translation
array([0.1, 0.2, 0.3])
"""
self._hook()
return self._translation
@translation.setter
def translation(self, translation):
"""Set translation of this coordinate
This setter checkes the given translation and set it this coordinate.
Parameters
----------
translation : list or tuple or numpy.ndarray
shape of (3,) translation vector
"""
# Convert lists to translation arrays
if type(translation) in (list, tuple) and len(translation) == 3:
translation = np.array([t for t in translation]).astype(np.float64)
_check_valid_translation(translation)
self._translation = translation.squeeze() * 1.
@property
def name(self):
"""Return this coordinate's name
Returns
-------
self._name : str
name of this coordinate
"""
return self._name
@name.setter
def name(self, name):
"""Setter of this coordinate's name
Parameters
----------
name : str
name of this coordinate
"""
if not isinstance(name, str):
raise TypeError('name should be string, get {}'.
format(type(name)))
self._name = name
@property
def dimension(self):
"""Return dimension of this coordinate
Returns
-------
len(self.translation) : int
dimension of this coordinate
"""
return len(self.translation)
def changed(self):
"""Return False
This is used for CascadedCoords compatibility
Returns
-------
False : bool
always return False
"""
return False
def translate(self, vec, wrt='local'):
"""Translate this coordinates.
Note that this function changes this coordinates self.
So if you don't want to change this class, use copy_worldcoords()
Parameters
----------
vec : list or numpy.ndarray
shape of (3,) translation vector. unit is [m] order.
wrt : str or Coordinates (optional)
translate with respect to wrt.
Examples
--------
>>> import numpy as np
>>> from skrobot.coordinates import Coordinates
>>> c = Coordinates()
>>> c.translation
array([0., 0., 0.], dtype=float32)
>>> c.translate([0.1, 0.2, 0.3])
>>> c.translation
array([0.1, 0.2, 0.3], dtype=float32)
>>> c = Coordinates()
>>> c.copy_worldcoords().translate([0.1, 0.2, 0.3])
>>> c.translation
array([0., 0., 0.], dtype=float32)
>>> c = Coordinates().rotate(np.pi / 2.0, 'y')
>>> c.translate([0.1, 0.2, 0.3])
>>> c.translation
array([ 0.3, 0.2, -0.1])
>>> c = Coordinates().rotate(np.pi / 2.0, 'y')
>>> c.translate([0.1, 0.2, 0.3], 'world')
>>> c.translation
array([0.1, 0.2, 0.3])
"""
vec = np.array(vec, dtype=np.float64)
return self.newcoords(
self.rotation,
self.parent_orientation(vec, wrt) + self.translation)
def transform_vector(self, v):
""""Return vector represented at world frame.
Vector v given in the local coords is converted to world
representation.
Parameters
----------
v : numpy.ndarray
3d vector.
We can take batch of vector like (batch_size, 3)
Returns
-------
transformed_point : numpy.ndarray
transformed point
"""
v = np.array(v, dtype=np.float64)
if v.ndim == 2:
return (np.matmul(self.rotation, v.T)
+ self.translation.reshape(3, -1)).T
return np.matmul(self.rotation, v) + self.translation
def inverse_transform_vector(self, vec):
"""Transform vector in world coordinates to local coordinates
Parameters
----------
vec : numpy.ndarray
3d vector.
We can take batch of vector like (batch_size, 3)
Returns
-------
transformed_point : numpy.ndarray
transformed point
"""
vec = np.array(vec, dtype=np.float64)
if vec.ndim == 2:
return (np.matmul(self.rotation.T, vec.T)
- np.matmul(
self.rotation.T, self.translation).reshape(3, -1)).T
return np.matmul(self.rotation.T, vec) - \
np.matmul(self.rotation.T, self.translation)
def inverse_transformation(self, dest=None):
"""Return a invese transformation of this coordinate system.
Create a new coordinate with inverse transformation of this
coordinate system.
.. math::
\\left(
\\begin{array}{ccc}
R^{-1} & - R^{-1} p \\\\
0 & 1
\\end{array}
\\right)
Parameters
----------
dest : None or skrobot.coordinates.Coordinates
If dest is given, the result of transformation
is in-placed to dest.
Returns
-------
dest : skrobot.coordinates.Coordinates
result of inverse transformation.
"""
if dest is None:
dest = Coordinates()
dest.rotation = self.rotation.T
dest.translation = np.matmul(dest.rotation, self.translation)
dest.translation = -1.0 * dest.translation
return dest
def transformation(self, c2, wrt='local'):
c2 = c2.worldcoords()
c1 = self.worldcoords()
inv = c1.inverse_transformation()
if wrt == 'local' or wrt == self:
transform_coords(inv, c2, inv)
elif wrt == 'parent' or \
wrt == self.parent or \
wrt == 'world':
transform_coords(c2, inv, inv)
elif isinstance(wrt, Coordinates):
xw = wrt.worldcoords()
transform_coords(c2, inv, inv)
transform_coords(xw.inverse_transformation(), inv, inv)
transform_coords(inv, xw, inv)
else:
raise ValueError('wrt {} not supported'.format(wrt))
return inv
def T(self):
"""Return 4x4 homogeneous transformation matrix.
Returns
-------
matrix : numpy.ndarray
homogeneous transformation matrix shape of (4, 4)
Examples
--------
>>> from numpy import pi
>>> from skrobot.coordinates import make_coords
>>> c = make_coords()
>>> c.T()
array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]])
>>> c.translate([0.1, 0.2, 0.3])
>>> c.rotate(pi / 2.0, 'y')
array([[ 2.22044605e-16, 0.00000000e+00, 1.00000000e+00,
1.00000000e-01],
[ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00,
2.00000000e-01],
[-1.00000000e+00, 0.00000000e+00, 2.22044605e-16,
3.00000000e-01],
[ 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
1.00000000e+00]])
"""
matrix = np.zeros((4, 4), dtype=np.float64)
matrix[3, 3] = 1.0
matrix[:3, :3] = self.rotation
matrix[:3, 3] = self.translation
return matrix
@property
def quaternion(self):
"""Property of quaternion
Returns
-------
self._q : numpy.ndarray
[w, x, y, z] quaternion
Examples
--------
>>> from numpy import pi
>>> from skrobot.coordinates import make_coords
>>> c = make_coords()
>>> c.quaternion
array([1., 0., 0., 0.])
>>> c.rotate(pi / 3, 'y').rotate(pi / 5, 'z')
>>> c.quaternion
array([0.8236391 , 0.1545085 , 0.47552826, 0.26761657])
"""
return self._q
@property
def dual_quaternion(self):
"""Property of DualQuaternion
Return DualQuaternion representation of this coordinate.
Returns
-------
DualQuaternion : skrobot.coordinates.dual_quaternion.DualQuaternion
DualQuaternion representation of this coordinate
"""
qr = normalize_vector(self.quaternion)
x, y, z = self.translation
qd = quaternion_multiply(np.array([0, x, y, z]), qr) * 0.5
return DualQuaternion(qr, qd)
def parent_orientation(self, v, wrt):
if wrt == 'local' or wrt == self:
return np.matmul(self.rotation, v)
if wrt == 'parent' \
or wrt == self.parent \
or wrt == 'world':
return v
if coordinates_p(wrt):
return np.matmul(wrt.worldrot(), v)
raise ValueError('wrt {} not supported'.format(wrt))
def rotate_vector(self, v):
"""Rotate 3-dimensional vector using rotation of this coordinate
Parameters
----------
v : numpy.ndarray
vector shape of (3,)
Returns
-------
np.matmul(self.rotation, v) : numpy.ndarray
rotated vector
Examples
--------
>>> from skrobot.coordinates import Coordinates
>>> from numpy import pi
>>> c = Coordinates().rotate(pi, 'z')
>>> c.rotate_vector([1, 2, 3])
array([-1., -2., 3.])
"""
return np.matmul(self.rotation, v)
def inverse_rotate_vector(self, v):
return np.matmul(v, self.rotation)
def transform(self, c, wrt='local'):
"""Transform this coordinates by coords based on wrt
Note that this function changes this coordinates
translation and rotation.
If you would like not to change this coordinates,
Please use copy_worldcoords()
Parameters
----------
c : skrobot.coordinates.Coordinates
coordinate
wrt : str or skrobot.coordinates.Coordinates
If wrt is 'local' or self, multiply c from the right.
If wrt is 'world' or 'parent' or self.parent,
transform c with respect to worldcoord.
If wrt is Coordinates, transform c with respect to c.
Returns
-------
self : skrobot.coordinates.Coordinates
return this coordinate
Examples
--------
"""
if wrt == 'local' or wrt == self:
# multiply c from the right
transform_coords(self, c, self)
elif wrt == 'parent' or wrt == self.parent \
or wrt == 'world':
# multiply c from the left
transform_coords(c, self, self)
elif isinstance(wrt, Coordinates):
transform_coords(wrt.inverse_transformation(), self, self)
transform_coords(c, self, self)
transform_coords(wrt.worldcoords(), self, self)
else:
raise ValueError('transform wrt {} is not supported'.format(wrt))
return self
def move_coords(self, target_coords, local_coords):
"""Transform this coordinate so that local_coords to target_coords.
Parameters
----------
target_coords : skrobot.coordinates.Coordinates
target coords.
local_coords : skrobot.coordinates.Coordinates
local coords to be aligned.
Returns
-------
self.worldcoords() : skrobot.coordinates.Coordinates
world coordinates.
"""
self.transform(
local_coords.transformation(target_coords), local_coords)
return self.worldcoords()
def rpy_angle(self):
"""Return a pair of rpy angles of this coordinates.
Returns
-------
rpy_angle(self.rotation) : tuple(numpy.ndarray, numpy.ndarray)
a pair of rpy angles. See also skrobot.coordinates.math.rpy_angle
Examples
--------
>>> import numpy as np
>>> from skrobot.coordinates import Coordinates
>>> c = Coordinates().rotate(np.pi / 2.0, 'x').rotate(np.pi / 3.0, 'z')
>>> r.rpy_angle()
(array([ 3.84592537e-16, -1.04719755e+00, 1.57079633e+00]),
array([ 3.14159265, -2.0943951 , -1.57079633]))
"""
return rpy_angle(self.rotation)
def axis(self, ax):
ax = _wrap_axis(ax)
return self.rotate_vector(ax)
def difference_position(self, coords,
translation_axis=True):
"""Return differences in positoin of given coords.
Parameters
----------
coords : skrobot.coordinates.Coordinates
given coordinates
translation_axis : str or bool or None (optional)
we can take 'x', 'y', 'z', 'xy', 'yz', 'zx', 'xx', 'yy', 'zz',
True or False(None).
Returns
-------
dif_pos : numpy.ndarray
difference position of self coordinates and coords
considering translation_axis.
Examples
--------
>>> from skrobot.coordinates import Coordinates
>>> from skrobot.coordinates import transform_coords
>>> from numpy import pi
>>> c1 = Coordinates().translate([0.1, 0.2, 0.3]).rotate(
... pi / 3.0, 'x')
>>> c2 = Coordinates().translate([0.3, -0.3, 0.1]).rotate(
... pi / 2.0, 'y')
>>> c1.difference_position(c2)
array([ 0.2 , -0.42320508, 0.3330127 ])
>>> c1 = Coordinates().translate([0.1, 0.2, 0.3]).rotate(0, 'x')
>>> c2 = Coordinates().translate([0.3, -0.3, 0.1]).rotate(
... pi / 3.0, 'x')
>>> c1.difference_position(c2)
array([ 0.2, -0.5, -0.2])
"""
dif_pos = self.inverse_transform_vector(coords.worldpos())
translation_axis = _wrap_axis(translation_axis)
dif_pos[translation_axis == 1] = 0.0
return dif_pos
def difference_rotation(self, coords,
rotation_axis=True):
"""Return differences in rotation of given coords.
Parameters
----------
coords : skrobot.coordinates.Coordinates
given coordinates
rotation_axis : str or bool or None (optional)
we can take 'x', 'y', 'z', 'xx', 'yy', 'zz', 'xm', 'ym', 'zm',
'xy', 'yx', 'yz', 'zy', 'zx', 'xz', True or False(None).
Returns
-------
dif_rot : numpy.ndarray
difference rotation of self coordinates and coords
considering rotation_axis.
Examples
--------
>>> from numpy import pi
>>> from skrobot.coordinates import Coordinates
>>> from skrobot.coordinates.math import rpy_matrix
>>> coord1 = Coordinates()
>>> coord2 = Coordinates(rot=rpy_matrix(pi / 2.0, pi / 3.0, pi / 5.0))
>>> coord1.difference_rotation(coord2)
array([-0.32855112, 1.17434985, 1.05738936])
>>> coord1.difference_rotation(coord2, rotation_axis=False)
array([0, 0, 0])
>>> coord1.difference_rotation(coord2, rotation_axis='x')
array([0. , 1.36034952, 0.78539816])
>>> coord1.difference_rotation(coord2, rotation_axis='y')
array([0.35398131, 0. , 0.97442695])
>>> coord1.difference_rotation(coord2, rotation_axis='z')
array([-0.88435715, 0.74192175, 0. ])
Using mirror option ['xm', 'ym', 'zm'], you can
allow differences of mirror direction.
>>> coord1 = Coordinates()
>>> coord2 = Coordinates().rotate(pi, 'x')
>>> coord1.difference_rotation(coord2, 'xm')
array([-2.99951957e-32, 0.00000000e+00, 0.00000000e+00])
>>> coord1 = Coordinates()
>>> coord2 = Coordinates().rotate(pi / 2.0, 'x')
>>> coord1.difference_rotation(coord2, 'xm')
array([-1.57079633, 0. , 0. ])
"""
def need_mirror_for_nearest_axis(coords0, coords1, ax):
a0 = coords0.axis(ax)
a1 = coords1.axis(ax)
a1_mirror = - a1
dr1 = angle_between_vectors(a0, a1, normalize=False) \
* normalize_vector(cross_product(a0, a1))
dr1m = angle_between_vectors(a0, a1_mirror, normalize=False) \
* normalize_vector(cross_product(a0, a1_mirror))
return np.linalg.norm(dr1) < np.linalg.norm(dr1m)
if rotation_axis in ['x', 'y', 'z']:
a0 = self.axis(rotation_axis)
a1 = coords.axis(rotation_axis)
if np.abs(np.linalg.norm(np.array(a0) - np.array(a1))) < 0.001:
dif_rot = np.array([0, 0, 0], 'f')
else:
dif_rot = np.matmul(
self.worldrot().T,
angle_between_vectors(a0, a1, normalize=False)
* normalize_vector(cross_product(a0, a1)))
elif rotation_axis in ['xx', 'yy', 'zz']:
ax = rotation_axis[0]
a0 = self.axis(ax)
a2 = coords.axis(ax)
if not need_mirror_for_nearest_axis(self, coords, ax):
a2 = - a2
dif_rot = np.matmul(
self.worldrot().T,
angle_between_vectors(a0, a2, normalize=False)
* normalize_vector(cross_product(a0, a2)))
elif rotation_axis in ['xy', 'yx', 'yz', 'zy', 'zx', 'xz']:
if rotation_axis in ['xy', 'yx']:
ax1 = 'z'
ax2 = 'x'
elif rotation_axis in ['yz', 'zy']:
ax1 = 'x'
ax2 = 'y'
else:
ax1 = 'y'
ax2 = 'z'
a0 = self.axis(ax1)
a1 = coords.axis(ax1)
dif_rot = np.matmul(
self.worldrot().T,
angle_between_vectors(a0, a1, normalize=False)
* normalize_vector(cross_product(a0, a1)))
norm = np.linalg.norm(dif_rot)
if np.isclose(norm, 0.0):
self_coords = self.copy_worldcoords()
else:
self_coords = self.copy_worldcoords().rotate(norm, dif_rot)
a0 = self_coords.axis(ax2)
a1 = coords.axis(ax2)
dif_rot = np.matmul(
self_coords.worldrot().T,
angle_between_vectors(a0, a1, normalize=False)
* normalize_vector(cross_product(a0, a1)))
elif rotation_axis in ['xm', 'ym', 'zm']:
rot = coords.worldrot()
ax = rotation_axis[0]
if not need_mirror_for_nearest_axis(self, coords, ax):
rot = rotate_matrix(rot, np.pi, ax)
dif_rot = matrix_log(np.matmul(self.worldrot().T, rot))
elif rotation_axis is False or rotation_axis is None:
dif_rot = np.array([0, 0, 0])
elif rotation_axis is True:
dif_rotmatrix = np.matmul(self.worldrot().T,
coords.worldrot())
dif_rot = matrix_log(dif_rotmatrix)
else:
raise ValueError
return dif_rot
def rotate_with_matrix(self, mat, wrt='local'):
"""Rotate this coordinate by given rotation matrix.
This is a subroutine of self.rotate function.
Parameters
----------
mat : numpy.ndarray
rotation matrix shape of (3, 3)
wrt : str or skrobot.coordinates.Coordinates
with respect to.
Returns
-------
self : skrobot.coordinates.Coordinates
"""
if wrt == 'local' or wrt == self:
rot = np.matmul(self.rotation, mat)
self.newcoords(rot, self.translation)
elif wrt == 'parent' or wrt == self.parent or \
wrt == 'world' or wrt is None or \
wrt == worldcoords:
rot = np.matmul(mat, self.rotation)
self.newcoords(rot, self.translation)
elif isinstance(wrt, Coordinates):
r2 = wrt.worldrot()
r2t = r2.T
r2t = np.matmul(mat, r2t)
r2t = np.matmul(r2, r2t)
self.rotation = np.matmul(r2t, self.rotation)
else:
raise ValueError('wrt {} is not supported'.format(wrt))
return self
def rotate(self, theta, axis=None, wrt='local'):
"""Rotate this coordinate by given theta and axis.
This coordinate system is rotated relative to theta radians
around the `axis` axis.
Note that this function does not change a position of this coordinate.
If you want to rotate this coordinates around with world frame,
you can use `transform` function.
Please see examples.
Parameters
----------
theta : float
relartive rotation angle in radian.
axis : str or None or numpy.ndarray
axis of rotation.
The value of `axis` is represented as `wrt` frame.
wrt : str or skrobot.coordinates.Coordinates
Returns
-------
self : skrobot.coordinates.Coordinates
Examples
--------
>>> from skrobot.coordinates import Coordinates
>>> from numpy import pi
>>> c = Coordinates()
>>> c.translate((1.0, 0, 0))
>>> c.rotate(pi / 2.0, 'z', wrt='local')
>>> c.translation
array([1., 0., 0.])
>>> c.transform(Coordinates().rotate(np.pi / 2.0, 'z'), wrt='world')
>>> c.translation
array([0., 1., 0.])
"""
if isinstance(axis, list) or isinstance(axis, np.ndarray):
self.rotate_with_matrix(
rotation_matrix(theta, axis), wrt)
elif axis is None or axis is False:
self.rotate_with_matrix(theta, wrt)
elif wrt == 'local' or wrt == self:
self.rotation = rotate_matrix(self.rotation, theta, axis)
elif wrt == 'parent' or wrt == 'world':
self.rotation = rotate_matrix(self.rotation, theta,
axis, True)
elif isinstance(wrt, Coordinates): # C1'=C2*R*C2(-1)*C1
self.rotate_with_matrix(
rotation_matrix(theta, axis), wrt)
else:
raise ValueError('wrt {} not supported'.format(wrt))
return self.newcoords(self.rotation, self.translation)
def copy(self):
"""Return a deep copy of the Coordinates."""
return self.copy_coords()
def copy_coords(self):
"""Return a deep copy of the Coordinates."""
return Coordinates(pos=copy.deepcopy(self.worldpos()),
rot=copy.deepcopy(self.worldrot()))
def coords(self):
"""Return a deep copy of the Coordinates."""
return self.copy_coords()
def worldcoords(self):
"""Return thisself"""
self._hook()
return self
def copy_worldcoords(self):
"""Return a deep copy of the Coordinates."""
return self.coords()
def worldrot(self):
"""Return rotation of this coordinate
See also skrobot.coordinates.Coordinates.rotation
Returns
-------
self.rotation : numpy.ndarray
rotation matrix of this coordinate
"""
return self.rotation
def worldpos(self):
"""Return translation of this coordinate
See also skrobot.coordinates.Coordinates.translation
Returns
-------
self.translation : numpy.ndarray
translation of this coordinate
"""
return self.translation
def newcoords(self, c, pos=None):
"""Update of coords is always done through newcoords."""
if pos is not None:
self.rotation = copy.deepcopy(c)
self.translation = copy.deepcopy(pos)
else:
self.rotation = copy.deepcopy(c.rotation)
self.translation = copy.deepcopy(c.translation)
return self
def __mul__(self, other_c):
"""Return Transformed Coordinates.
Note that this function creates new Coordinates and
does not change translation and rotation, unlike transform function.
Parameters
----------
other_c : skrobot.coordinates.Coordinates
input coordinates.
Returns
-------
out : skrobot.coordinates.Coordinates
transformed coordinates multiplied other_c from the right.
T = T_{self} T_{other_c}.
"""
return transform_coords(self, other_c)
def __pow__(self, exponent):
"""Return exponential homogeneous matrix.
If exponent equals -1, return inverse transformation of this coords.
Parameters
----------
exponent : numbers.Number
exponent value.
If exponent equals -1, return inverse transformation of this
coords.
In current, support only -1 case.
Returns
-------
out : skrobot.coordinates.Coordinates
output.
"""
if np.isclose(exponent, -1):
return self.inverse_transformation()
raise NotImplementedError
def __repr__(self):
return self.__str__()
def __str__(self):
self.worldrot()
pos = self.worldpos()
self.rpy = rpy_angle(self.rotation)[0]
if self.name:
prefix = self.__class__.__name__ + ':' + self.name
else:
prefix = self.__class__.__name__
return "#<{0} {1} "\
"{2:.3f} {3:.3f} {4:.3f} / {5:.1f} {6:.1f} {7:.1f}>".\
format(prefix,
hex(id(self)),
pos[0],
pos[1],
pos[2],
self.rpy[0],
self.rpy[1],
self.rpy[2])
class CascadedCoords(Coordinates):
def __init__(self, parent=None, *args, **kwargs):
super(CascadedCoords, self).__init__(*args, **kwargs)
self.manager = self
self._changed = True
self._descendants = []
self._worldcoords = Coordinates(pos=self.translation,
rot=self.rotation,
hook=self.update)
self.parent = parent
if parent is not None:
# Because we must self.parent = parent in this case,
# force=True is required.
parent.assoc(self, force=True)
@property
def descendants(self):
return self._descendants
def assoc(self, child, relative_coords=None, force=False,
**kwargs):
"""Associate child coords to this coordinate.
If `relative_coords` is `None`, the translation and rotation
of childcoord in the world coordinate system do not change.
If `relative_coords` is specified, childcoord is assoced
at translation and rotation of `relative_coords`.
By default, if child is already assoced to some other coords,
raise an exception. But if `force` is `True`, you can overwrite
the existing assoc relation.
Parameters
----------
child : CascadedCoords
child coordinate.
relative_coords : None or Coordinates
child coordinate's relative coordinate.
force : bool
predicate for overwriting the existing assoc-relation
Returns
-------
child : CascadedCoords
assoced child.
"""
if 'c' in kwargs:
warnings.warn(
'Argument `c` is deprecated. '
'Please use `relative_coords` instead',
DeprecationWarning)
relative_coords = kwargs['c']
is_invalid_assoc = (child.parent is not None) and (not force)
if is_invalid_assoc:
msg = "child already has an assoc relation with '{0}'."\
" To overwrite this, please specify force=True."\
.format(child.parent.name)
raise RuntimeError(msg)
if not (child in self.descendants):
if relative_coords is None:
relative_coords = self.worldcoords().transformation(
child.worldcoords())
child.parent = self
child.newcoords(relative_coords)
self._descendants.append(child)
return child
def dissoc(self, child):
if child in self.descendants:
c = child.worldcoords().copy_coords()
self._descendants.remove(child)
child.parent = None
child.newcoords(c)
def newcoords(self, c, pos=None):
super(CascadedCoords, self).newcoords(c, pos)
self.changed()
return self
def changed(self):
if self._changed is False:
self._changed = True
return [c.changed() for c in self.descendants]
return [False]
def parentcoords(self):
if self.parent:
return self.parent.worldcoords()
return worldcoords
def transform_vector(self, v):
return self.worldcoords().transform_vector(v)
def inverse_transform_vector(self, v):
return self.worldcoords().inverse_transform_vector(v)
def rotate_with_matrix(self, matrix, wrt):
if wrt == 'local' or wrt == self:
self.rotation = np.dot(self.rotation, matrix)
return self.newcoords(self.rotation, self.translation)
elif wrt == 'parent' or wrt == self.parent:
self.rotation = np.matmul(matrix, self.rotation)
return self.newcoords(self.rotation, self.translation)
else:
parent_coords = self.parentcoords()
parent_rot = parent_coords.rotation
if isinstance(wrt, Coordinates):
wrt_rot = wrt.worldrot()
matrix = np.matmul(wrt_rot, matrix)
matrix = np.matmul(matrix, wrt_rot.T)
matrix = np.matmul(matrix, parent_rot)
matrix = np.matmul(parent_rot.T, matrix)
self.rotation = | np.matmul(matrix, self.rotation) | numpy.matmul |
import numpy as np
from . import covRadii
from . import frag
from . import optExceptions
from . import physconst as pc
from . import v3d
from .addIntcos import connectivityFromDistances, addCartesianIntcos
from .printTools import print_opt
class MOLSYS(object): # new-style classes required for getter/setters
def __init__(self, fragments, fb_fragments=None, intcos=None):
# ordinary fragments with internal structure
self._fragments = []
if fragments:
self._fragments = fragments
# fixed body fragments defined by Euler/rotation angles
self._fb_fragments = []
if fb_fragments:
self._fb_fragments = fb_fragments
def __str__(self):
s = ''
for iF, F in enumerate(self._fragments):
s += "Fragment %d\n" % (iF + 1)
s += F.__str__()
for iB, B in enumerate(self._fb_fragments):
s += "Fixed boxy Fragment %d\n" % (iB + 1)
s += B.__str__()
return s
@classmethod
def fromPsi4Molecule(cls, mol):
print_opt("\n\tGenerating molecular system for optimization from PSI4.\n")
NF = mol.nfragments()
print_opt("\t%d Fragments in PSI4 molecule object.\n" % NF)
frags = []
for iF in range(NF):
fragMol = mol.extract_subsets(iF + 1)
fragNatom = fragMol.natom()
print_opt("\tCreating fragment %d with %d atoms\n" % (iF + 1, fragNatom))
fragGeom = np.zeros((fragNatom, 3), float)
fragGeom[:] = fragMol.geometry()
#fragZ = np.zeros( fragNatom, int)
fragZ = []
for i in range(fragNatom):
fragZ.append(int(fragMol.Z(i)))
#fragZ[i] = fragMol.Z(i)
fragMasses = np.zeros(fragNatom, float)
for i in range(fragNatom):
fragMasses[i] = fragMol.mass(i)
frags.append(frag.FRAG(fragZ, fragGeom, fragMasses))
return cls(frags)
@property
def Natom(self):
s = 0
for F in self._fragments:
s += F.Natom
return s
@property
def Nfragments(self):
return len(self._fragments) + len(self._fb_fragments)
# Return overall index of first atom in fragment, beginning 0,1,...
def frag_1st_atom(self, iF):
if iF >= len(self._fragments):
return ValueError()
start = 0
for i in range(0, iF):
start += self._fragments[i].Natom
return start
def frag_atom_range(self, iF):
start = self.frag_1st_atom(iF)
return range(start, start + self._fragments[iF].Natom)
# accepts absolute atom index, returns fragment index
def atom2frag_index(self, atom_index):
for iF, F in enumerate(self._fragments):
if atom_index in self.frag_atom_range(iF):
return iF
raise optExceptions.OPT_FAIL("atom2frag_index: atom_index impossibly large")
# Given a list of atoms, return all the fragments to which they belong
def atomList2uniqueFragList(self, atomList):
fragList = []
for a in atomList:
f = self.atom2frag_index(a)
if f not in fragList:
fragList.append(f)
return fragList
@property
def geom(self):
geom = np.zeros((self.Natom, 3), float)
for iF, F in enumerate(self._fragments):
row = self.frag_1st_atom(iF)
geom[row:(row + F.Natom), :] = F.geom
return geom
@geom.setter
def geom(self, newgeom):
for iF, F in enumerate(self._fragments):
row = self.frag_1st_atom(iF)
F.geom[:] = newgeom[row:(row + F.Natom), :]
@property
def masses(self):
m = | np.zeros(self.Natom, float) | numpy.zeros |
d=[0.473045614952593,
0.447736468125885,
0.428985913357516,
0.420760369024789,
0.41515124352135,
0.405833793375885,
0.397526168918568,
0.394253238719156,
0.380917877188419,
0.330384285055802,
0.326863689244717,
0.327041417142448,
0.323839959911076,
0.321145648600365,
0.317525426500985,
0.316773847322366,
0.310774706528829,
0.307701107902387,
0.306279591776957,
0.305636809626806,
0.305085583722019,
0.300419285479095,
0.298809515219522,
0.295884244372572,
0.29218159873572,
0.289437474625885,
0.288242644225782,
0.28692067005731,
0.283844122914778,
0.282990704879198,
0.28017749282843,
0.274036119895761,
0.267471195712531,
0.259408710782269,
0.257203625892448
]
sigma=0.10895354
import numpy as np
import matplotlib.pyplot as plt
import math
def P(x):
return math.exp(-x**2/(2*2*sigma**2))/(2*math.pi**0.5*sigma)
plt.rcParams['figure.figsize'] = (8.0, 4.0)
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
plt.rcParams['text.usetex']=False
nx= | np.arange(-0.7,0.7,0.0005) | numpy.arange |
import os
import numpy as np
from rs_embed import EmbeddingData
from query.models import Labeler
LANDMARKS_DIR = '/app/data/landmarks'
LANDMARKS_PATH = os.path.join(LANDMARKS_DIR, 'landmarks_binary.bin')
ID_PATH = os.path.join(LANDMARKS_DIR, 'landmarks_ids.bin')
LANDMARKS_DIM = 272
def _load():
id_file_size = os.path.getsize(ID_PATH)
assert id_file_size % 8 == 0, \
'Id file size is not a multiple of sizeof(u64)'
n = int(id_file_size / 8)
emb_file_size = os.path.getsize(LANDMARKS_PATH)
assert emb_file_size % 4 == 0, \
'Embedding file size is a multiple of sizeof(f32)'
d = int((emb_file_size / 4) / (id_file_size / 8))
assert emb_file_size % d == 0, \
'Embedding file size is a multiple of d={}'.format(d)
emb_data = EmbeddingData(ID_PATH, LANDMARKS_PATH, LANDMARKS_DIM)
assert emb_data.count() == n, \
'Count does not match expected: {} != {}'.format(n, emb_data.count())
return emb_data
_LANDMARKS_DATA = _load()
LABELER = Labeler.objects.get(name='fan2d')
class LandmarksWrapper():
def __init__(self, landmarks, landmarks_id, labeler):
self.landmarks = np.frombuffer(
np.array(landmarks, dtype=np.float32).tobytes(),
dtype=np.float64).reshape(68, 2)
self.id = landmarks_id
self.labeler = labeler
# Slice values for each set of landmarks
FACE_OUTLINE = (0, 17)
RIGHT_EYEBROW = (17, 22)
LEFT_EYEBROW = (22, 27)
NOSE_BRIDGE = (27, 31)
NOSE_BOTTOM = (31, 36)
RIGHT_EYE = (36, 42)
LEFT_EYE = (42, 48)
OUTER_LIPS = (48, 60)
INNER_LIPS = (60, 68)
def _get_landmarks(self, slice_values):
return self.landmarks[slice_values[0]:slice_values[1]]
def face_outline(self):
return self._get_landmarks(self.FACE_OUTLINE)
def right_eyebrow(self):
return self._get_landmarks(self.RIGHT_EYEBROW)
def left_eyebrow(self):
return self._get_landmarks(self.LEFT_EYEBROW)
def nose_bridge(self):
return self._get_landmarks(self.NOSE_BRIDGE)
def nose_bottom(self):
return self._get_landmarks(self.NOSE_BOTTOM)
def right_eye(self):
return self._get_landmarks(self.RIGHT_EYE)
def left_eye(self):
return self._get_landmarks(self.LEFT_EYE)
def outer_lips(self):
return self._get_landmarks(self.OUTER_LIPS)
def inner_lips(self):
return self._get_landmarks(self.INNER_LIPS)
def get(faces_qs):
"""Generator of Face objects -> list of LandmarksWrapper objects."""
faces_qs = list(faces_qs)
ids = [f.id for f in faces_qs]
# get returns list of (id, landmarks bytes)
result = _LANDMARKS_DATA.get(ids)
assert len(result) == len(faces_qs), "{} != {}".format(
len(result), len(faces_qs))
return [
LandmarksWrapper( | np.array(landmarks_id_bytes[1]) | numpy.array |
# coding: utf-8
import yt
import numpy as np
from yt.fields.api import ValidateParameter
from mpl_toolkits.axes_grid1 import AxesGrid
from yt.utilities.physical_constants import mp, kb
from yt import derived_field
from yt.units.yt_array import YTQuantity
from yt.funcs import just_one
from scipy.spatial.distance import euclidean
from yt.fields.derived_field import \
ValidateSpatial
def unit_override():
return {"length_unit":(1,"Rsun"),
"time_unit":(6.955e+05 ,"s"),
"mass_unit":(3.36427433875e+17,"g"),
"magnetic_unit":(1.121e-02,"G")}
def sim_parameters():
return {"gamma":1.05}
def create_fields(ds):
units_override = {"length_unit":(1,"Rsun"),
"time_unit":(6.955e+05 ,"s"),
"mass_unit":(3.36427433875e+17,"g"),
"magnetic_unit":(1.121e-02,"G")}
def _radialvelocity(field, data):
return data['velocity_x']*data['x']/data['radius'] + \
data['velocity_y']*data['y']/data['radius'] + \
data['velocity_z']*data['z']/data['radius']
ds.add_field(('gas', "radialvelocity"),
function=_radialvelocity,
units="cm/s",
take_log=False)
def _sound_speed(field, data):
gamma = 1.05
ftype = field.name[0]
tr = gamma * data[ftype, "pressure"] / data[ftype, "density"]
return np.sqrt(tr)
ds.add_field(('gas', "sound_speed"),
function=_sound_speed,
units="cm/s",
take_log=False)
def _mach_number(field, data):
""" M{|v|/c_sound} """
ftype = field.name[0]
return data[ftype, "velocity_magnitude"] / data[ftype, "sound_speed"]
ds.add_field(('gas', "mach_number"),
function=_mach_number,
units="",
take_log=False)
def _temperature(field, data):
return (data["gas", "pressure"]*mp)/(2.0*data["gas", "density"]*kb)
ds.add_field(('gas', "temperature"),
function=_temperature,
units="K",
take_log=True)
def _radius_planet(field, data):
a = 0.047*1.496e+13/6.955e+10
shift = data.ds.arr(np.ones_like(data['x']))*a
x_planet = data['x'] - shift
return np.sqrt(x_planet*x_planet \
+ data['y']*data['y'] \
+ data['z']*data['z'])
ds.add_field(('index', "radius_planet"),
function=_radius_planet,
units="cm",
take_log=False)
def _ni(field, data):
return data["density"]/(1.09*mp)
ds.add_field(("gas", "ni"),
function=_ni,
units="cm**-3")
def _BGx1(field, data):
B0s = YTQuantity(2.0, "G")
B0p = YTQuantity(1.0, "G")
Rs = YTQuantity(6.955e+10, "cm")
Rp = YTQuantity(1.5*0.10045*Rs, "cm")
a = YTQuantity(0.047, "au").in_units("cm")
center = data.get_field_parameter('center')
x1 = data["x"].in_units('cm')
x2 = data["y"].in_units('cm')
x3 = data["z"].in_units('cm')
rs = np.sqrt(x1*x1 + x2*x2 + x3*x3)
rp = np.sqrt((x1-a)*(x1-a) + x2*x2 + x3*x3)
BGx1 = data.ds.arr(np.zeros_like(data["magnetic_field_x"]), "G")
BGx1 = 3.0*x1*x3*B0s*Rs**3*rs**(-5) + 3.0*(x1 - a)*x3*B0p*Rp**3*rp**(-5)
BGx1[rs <= Rs] = 3.0*x1[rs <= Rs]*x3[rs <= Rs]*B0s*Rs**3*rs[rs <= Rs]**(-5)
BGx1[rs <= 0.5*Rs] = 0.0
BGx1[rp <= Rp] = 3.0*(x1[rp <= Rp] - a)*x3[rp <= Rp]\
*B0p*Rp**3*rp[rp <= Rp]**(-5)
BGx1[rp <= 0.5*Rp] = 0.0
return BGx1
ds.add_field(("gas", "BGx1"),
function=_BGx1,
units="G",
take_log=False)
def _BGx2(field, data):
B0s = YTQuantity(2.0, "G")
B0p = YTQuantity(1.0, "G")
Rs = YTQuantity(6.955e+10, "cm")
Rp = YTQuantity(1.5*0.10045*Rs, "cm")
a = YTQuantity(0.047, "au").in_units("cm")
center = data.get_field_parameter('center')
x1 = data["x"].in_units('cm')
x2 = data["y"].in_units('cm')
x3 = data["z"].in_units('cm')
rs = np.sqrt(x1*x1 + x2*x2 + x3*x3)
rp = np.sqrt((x1-a)*(x1-a) + x2*x2 + x3*x3)
BGx2 = data.ds.arr(np.zeros_like(data["magnetic_field_y"]), "G")
BGx2 = 3.0*x3*x2*B0s*Rs**3*rs**(-5) + 3.0*x3*x2*B0p*Rp**3*rp**(-5)
BGx2[rs <= Rs] = 3.0*x3[rs <= Rs]*x2[rs <= Rs]\
*B0s*Rs**3*rs[rs <= Rs]**(-5)
BGx2[rs <= 0.5*Rs] = 0.0
BGx2[rp <= Rp] = 3.0*x3[rp <= Rp]*x2[rp <= Rp]\
*B0p*Rp**3*rp[rp <= Rp]**(-5)
BGx2[rp <= 0.5*Rp] = 0.0
return BGx2
ds.add_field(("gas", "BGx2"),
function=_BGx2,
units="G",
take_log=False)
def _BGx3(field, data):
B0s = YTQuantity(2.0, "G")
B0p = YTQuantity(1.0, "G")
Rs = YTQuantity(6.955e+10, "cm")
Rp = YTQuantity(1.5*0.10045*Rs, "cm")
a = YTQuantity(0.047, "au").in_units("cm")
x1 = data["x"].in_units('cm')
x2 = data["y"].in_units('cm')
x3 = data["z"].in_units('cm')
rs = | np.sqrt(x1*x1 + x2*x2 + x3*x3) | numpy.sqrt |
from abc import ABC, abstractmethod
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
import os
import json
import copy
import utils
OUT_PATH = './out/'
class ForecastModel(ABC):
"""
Abstract superclass of all probabilistic forecasting models.
"""
@abstractmethod
def __init__(self, y, t, u=None, ID='', seed=0, global_model=False):
self.seed = seed
self.global_model = global_model
self.s_d = 48
self.s_w = self.s_d * 7
# Maximum forecast horizon
self.max_horizon = self.s_w
self.y = y
self.t = t
if u is not None and u.ndim == 1:
u = u[:, np.newaxis]
self.u = u
# Mean, maximum and minimum value of the measurements
self.y_mean = np.nanmean(y, axis=0)
self.y_max = np.nanmax(y, axis=0)
self.y_min = np.nanmin(y, axis=0)
# Maximum, minimum, mean and std value of the input
if u is not None:
self.u_max = np.nanmax(u, axis=0, keepdims=True)
self.u_min = | np.nanmin(u, axis=0, keepdims=True) | numpy.nanmin |
import numpy as np
import pandas as pd
from bilby.core.prior import TruncatedNormal, PriorDict
from agn_utils.data_formetter import ld_to_dl
BOUNDS = dict(
cos_theta_1=(-1, 1),
cos_theta_12=(-1, 1),
)
PRIOR_VOLUME = (
(BOUNDS["cos_theta_1"][1] - BOUNDS["cos_theta_1"][0])
* (BOUNDS["cos_theta_12"][1] - BOUNDS["cos_theta_12"][0])
)
def simulate_posterior(sample, fractional_sigma=0.1, n_samples=1000):
posterior = pd.DataFrame()
for key in sample:
if key in BOUNDS:
bound = BOUNDS[key]
else:
bound = (-np.inf, np.inf)
sigma = sample[key] * fractional_sigma
new_true = TruncatedNormal(
mu=sample[key], sigma=sigma, minimum=bound[0], maximum=bound[1]
).sample()
posterior[key] = TruncatedNormal(
mu=new_true, sigma=sigma, minimum=bound[0], maximum=bound[1]
).sample(n_samples)
posterior["prior"] = 1 / PRIOR_VOLUME
return posterior
def simulate_population_posteriors(sig1=5, sig12=5, number_events=10, n_samp=50000, fractional_sigma=1):
pop_prior = PriorDict(dict(
cos_theta_1=TruncatedNormal(mu=1, sigma=sig1, minimum=-1, maximum=1),
cos_theta_12=TruncatedNormal(mu=1, sigma=sig12, minimum=-1, maximum=1)
))
params = pop_prior.keys()
posteriors = {p: [] for p in params}
trues = {p: [] for p in params}
for i in range(number_events):
true = pop_prior.sample()
posterior = simulate_posterior(true, n_samples=n_samp, fractional_sigma=1)
for p in params:
posteriors[p].append(posterior[p].values)
trues[p].append(true[p])
for p in params:
posteriors[p] = np.array(posteriors[p])
trues[p] = np.array(trues[p])
return dict(
trues=trues,
posteriors=posteriors
)
def simulate_exact_population_posteriors(sig1=5, sig12=5, number_events=10, n_samp=10000):
pop_prior = PriorDict(dict(
cos_tilt_1=TruncatedNormal(mu=1, sigma=sig1, minimum=-1, maximum=1),
cos_theta_12=TruncatedNormal(mu=1, sigma=sig12, minimum=-1, maximum=1)
))
posteriors = [pop_prior.sample(n_samp) for _ in range(number_events)]
posteriors = ld_to_dl(posteriors)
posteriors = {k: | np.array(v) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_ProjectionVGSub [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ProjectionVGSub&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-subordinated-brownian-motion).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
from numpy import arange, array, zeros, diff, abs, log, exp, sqrt, tile, r_, atleast_2d, newaxis
from numpy import sum as npsum, min as npmin, max as npmax
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, subplots, ylabel, \
xlabel, title, xticks
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import struct_to_dict, datenum, save_plot
from intersect_matlab import intersect
from EffectiveScenarios import EffectiveScenarios
from ConditionalFP import ConditionalFP
from MMFP import MMFP
from VG import VG
from ShiftedVGMoments import ShiftedVGMoments
# -
# ## Upload databases
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_OptionStrategy'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_OptionStrategy'), squeeze_me=True)
OptionStrategy = struct_to_dict(db['OptionStrategy'])
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_VIX'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_VIX'), squeeze_me=True)
VIX = struct_to_dict(db['VIX'])
# -
# ## Merge data
# +
# invariants (daily P&L)
pnl = OptionStrategy.cumPL
epsi = diff(pnl)
dates_x = array([datenum(i) for i in OptionStrategy.Dates])
dates_x = dates_x[1:]
# conditioning variable (VIX)
z = VIX.value
dates_z = VIX.Date
# merging datasets
[dates, i_epsi, i_z] = intersect(dates_x, dates_z)
pnl = pnl[i_epsi + 1]
epsi = epsi[i_epsi]
z = z[i_z]
t_ = len(epsi)
# -
# ## Compute the Flexible Probabilities conditioned via Entropy Pooling
# +
# prior
lam = log(2) / 1800 # half life 5y
prior = exp(-lam*abs(arange(t_, 1 + -1, -1))).reshape(1,-1)
prior = prior / npsum(prior)
# conditioner
VIX = namedtuple('VIX', 'Series TargetValue Leeway')
VIX.Series = z.reshape(1,-1)
VIX.TargetValue = atleast_2d(z[-1])
VIX.Leeway = 0.35
# flexible probabilities conditioned via EP
p = ConditionalFP(VIX, prior)
# effective number of scenarios
typ = namedtuple('type','Entropy')
typ.Entropy = 'Exp'
ens = EffectiveScenarios(p, typ)
# -
# ## Estimation of shifted-VG model
# +
# initial guess on parameters
shift0 = 0
theta0 = 0
sigma0 = 0.01
nu0 = 1
par0 = [shift0, theta0, sigma0, nu0]
# calibration
HFP = namedtuple('HFP', ['FlexProbs','Scenarios'])
HFP.FlexProbs = p
HFP.Scenarios = epsi
par = MMFP(HFP, 'SVG', par0)
shift = par.c
theta = par.theta
sigma = par.sigma
nu = par.nu
# #changing parameterization from {theta,sigma, nu} to {c,m,g}
# [c, m, g] = ParamChangeVG(theta,sigma,nu)
# -
# ## Initialize projection variables
tau = 15 # investment horizon
dt = 1 / 75 # infinitesimal step for simulations
t_j = | arange(0,tau+dt,dt) | numpy.arange |
#! /usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2019 * Ltd. All rights reserved.
#
# Editor : VIM
# File name : evaluate.py
# Author : YunYang1994
# Created date: 2019-02-21 15:30:26
# Description :
#
#================================================================
import cv2
import os
import shutil
import numpy as np
import tensorflow as tf
import core.utils as utils
from core.config import cfg
from core.yolov3 import YOLOV3
os.environ["CUDA_VISIBLE_DEVICES"]="0"
class YoloTest(object):
def __init__(self):
self.input_size = cfg.TEST.INPUT_SIZE
self.anchor_per_scale = cfg.YOLO.ANCHOR_PER_SCALE
self.classes = utils.read_class_names(cfg.YOLO.CLASSES)
self.num_classes = len(self.classes)
self.anchors = np.array(utils.get_anchors(cfg.YOLO.ANCHORS))
self.score_threshold = cfg.TEST.SCORE_THRESHOLD
self.iou_threshold = cfg.TEST.IOU_THRESHOLD
self.moving_ave_decay = cfg.YOLO.MOVING_AVE_DECAY
self.annotation_path = cfg.TEST.ANNOT_PATH
self.weight_file = cfg.TEST.WEIGHT_FILE
self.write_image = cfg.TEST.WRITE_IMAGE
self.write_image_path = cfg.TEST.WRITE_IMAGE_PATH
self.show_label = cfg.TEST.SHOW_LABEL
with tf.name_scope('input'):
self.input_data = tf.placeholder(dtype=tf.float32, name='input_data')
self.trainable = tf.placeholder(dtype=tf.bool, name='trainable')
model = YOLOV3(self.input_data, self.trainable)
self.pred_sbbox, self.pred_mbbox, self.pred_lbbox = model.pred_sbbox, model.pred_mbbox, model.pred_lbbox
with tf.name_scope('ema'):
ema_obj = tf.train.ExponentialMovingAverage(self.moving_ave_decay)
self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
self.saver = tf.train.Saver(ema_obj.variables_to_restore())
self.saver.restore(self.sess, self.weight_file)
def predict(self, image):
org_image = | np.copy(image) | numpy.copy |
import os
import numpy as np
import h5py as h5
import glob
import shutil
from .data_reader import DataReader_pred
from .predict_fn import pred_fn
import pkg_resources
model_dir = pkg_resources.resource_filename('phasenet', os.path.join('model', '190703-214543'))
script_path = os.path.dirname(os.path.realpath(__file__))
def format_data_hdf5(data, root_PN_inputs='.', filename='data.h5'):
"""Format data for PhasetNet (hdf5).
Save the data array in an hdf5 file such that PhaseNet can process it.
Parameters
-------------
data: (n_stations, 3, n_samples) nd.array
Numpy array with the continuous 3-component seismic data
on which we want to pick the P- and S-wave arrivals.
root_PN_inputs: string, default to '.'
Path to the root folder where formatted data will be stored.
filename: string, default to 'data.h5'
Name of the file listing the filenames of all 3-component
time series to process.
"""
import h5py as h5
with h5.File(os.path.join(root_PN_inputs, filename), 'w') as f:
f.create_group('data')
for i in range(data.shape[0]):
# place the component axis at the end
three_comp_data = np.swapaxes(data[i, ...], 0, 1)
f['data'].create_dataset(f'sample{i}', data=three_comp_data)
def format_data_ram(data):
"""Format data for PhasetNet.
Build the data dictionary for PhaseNet.
Parameters
-------------
data: (n_stations, 3, n_samples) nd.array
Numpy array with the continuous 3-component seismic data
on which we want to pick the P- and S-wave arrivals.
"""
data_pn = {}
for i in range(data.shape[0]):
data_pn[f'sample{i}'] = np.swapaxes(data[i, ...], 0, 1)
return data_pn
def run_pred(input_length,
model_path=model_dir,
data=None,
data_path='./dataset/waveform_pred/',
log_dir='./dataset/log/',
data_file='./dataset/data.h5',
format='hdf5',
amplitude=False,
batch_size=1,
threshold_P=0.6,
threshold_S=0.6,
**kwargs):
"""Run PhaseNet and fetch its raw output: the P and S probabilities.
Results are stored at the user-defined location `output_filename`. Extra
kwargs are passed to `phasenet.predict_fn.pred_fn`.
Parameters
------------
input_length: int
Duration, in samples, of the 3-component seismograms.
model_path: string, default to '/home/ebeauce/PhaseNet/model/190703-214543'
Path to the trained model. It is of course necessary to change the
default value to the adequate value on your machine (e.g. where you
downloaded PhaseNet).
data_path: string, default to './dataset/waveform_pred/'
Path to the folder with the 3-component seismograms in npz files.
log_dir: string, default to './dataset/log/'
data_list: string, default to './dataset/data_list.csv'
output_filename: string, default to './prediction.npy'
Name of the file with PhaseNet's outputs.
batch_size: int, default to 1
Number of 3-component seismograms processed by PhaseNet
at once. This should to take into account the machine's RAM.
threshold_P: float, default to 0.6
P-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_P`, a detection is triggered.
threshold_S: float, default to 0.6
S-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_S`, a detection is triggered.
"""
if format == 'hdf5':
data_reader = DataReader_pred(
format='hdf5',
data_list='', # not used with hdf5 format
hdf5_file=data_file,
hdf5_group='data',
amplitude=amplitude)
elif format == 'ram':
data_reader = DataReader_pred(
format='ram',
data=data,
amplitude=amplitude)
PhaseNet_proba, PhaseNet_picks = pred_fn(
data_reader, model_dir=model_path, log_dir=log_dir,
batch_size=batch_size, input_length=input_length,
min_p_prob=threshold_P, min_s_prob=threshold_S,
**kwargs)
if format == 'hdf5':
# PhaseNet does not take care of closing the hdf5 file
data_reader.h5.close()
return PhaseNet_proba, PhaseNet_picks
def automatic_picking(data,
station_names,
PN_base=None,
PN_dataset_name=None,
format='ram',
mini_batch_size=126,
threshold_P=0.6,
threshold_S=0.6,
**kwargs):
"""Wrapper function to call PhaseNet from a python script.
Extra kwargs are passed to `phasenet.predict_fn.pred_fn`.
Parameters
-----------
data: (n_events, n_stations, 3, n_samples) nd.array
Numpy array with the continuous 3-component seismograms of
`n_events` earthquakes recorded at a network of `n_stations`
stations.
station_names: list or array of strings
Name of the `n_stations` stations of the array, in the same
order as given in `data`.
PN_base: string, default to None
Path to the root folder where PhaseNet formatted data will
be stored. Required if `format='ram'`.
PN_dataset_name: string, default to None
Name of the folder, inside `PN_base`, where the formatted data
of a given experiment will be stored. Required if `format='ram'`.
mini_batch_size: int, default to 126
Number of 3-component seismograms processed by PhaseNet
at once. This should to take into account the machine's RAM.
threshold_P: float, default to 0.6
P-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_P`, a detection is triggered.
threshold_S: float, default to 0.6
S-wave identification threshold. When PhaseNet's raw output
(proba) exceeds `threshold_S`, a detection is triggered.
Returns
---------
PhaseNet_probas: (n_events, n_stations, n_samples, 2) numpy.narray, float
Probabilities of P- and S-wave arrival on the continuous time axis.
PhaseNet_probas[..., 0] is the P-wave probability.
PhaseNet_probas[..., 1] is the S-wave probability.
PhaseNet_picks: dictionary
Dictionary with four fields: 'P_proba', 'P_picks',
'S_proba', 'S_picks'. Each of these fields contains
another dictionary with one entry per station. Finally,
the content of each PhaseNet_picks[field][station] is an
(n_events, numpy.ndarrays) array of arrays with all picks and
associated probabilities for each event.
"""
if format == 'hdf5':
if not os.path.isdir(PN_base):
print(f'Creating the formatted data root folder at {PN_base}')
os.mkdir(PN_base)
# clean up input/output directories if necessary
root_PN_inputs = os.path.join(PN_base, PN_dataset_name)
if not os.path.isdir(root_PN_inputs):
print(f'Creating the experiment root folder at {root_PN_inputs}')
os.mkdir(root_PN_inputs)
else:
PN_base = ''
root_PN_inputs = ''
# assume the data were provided in the shape
# (n_events x n_stations x 3-comp x time_duration)
n_events = data.shape[0]
n_stations = data.shape[1]
input_length = data.shape[3]
# for efficiency, we merge the event and the station axes
batch_size = n_events*n_stations
print('n events: {:d}, n stations: {:d}, batch size (n events x n stations): {:d}'.
format(n_events, n_stations, batch_size))
data = data.reshape(batch_size, 3, input_length)
# make sure the minibatch size is not larger than the
# total number of traces
minibatch_size = min(mini_batch_size, batch_size)
# generate the input files necessary for PhaseNet
if format == 'hdf5':
format_data_hdf5(data, root_PN_inputs=root_PN_inputs)
data_pn = None
elif format == 'ram':
data_pn = format_data_ram(data)
# call PhaseNet
PhaseNet_proba, PhaseNet_picks = run_pred(
input_length,
data_file=os.path.join(root_PN_inputs, 'data.h5'),
log_dir=os.path.join(root_PN_inputs, 'log'),
batch_size=mini_batch_size,
threshold_P=threshold_P,
threshold_S=threshold_S,
format=format,
data=data_pn,
**kwargs)
# the new PhaseNet_proba is an array of time series with [..., 0] = proba of P arrival
# and [..., 1] = proba of S arrival (the original [..., 0] was simply 1 - Pp - Ps)
PhaseNet_proba = PhaseNet_proba.reshape((n_events, n_stations, input_length, 3))[..., 1:]
PhaseNet_picks = PhaseNet_picks.reshape((n_events, n_stations, 2, 2))
# return picks in a comprehensive python dictionary
picks = {}
picks['P_picks'] = {}
picks['P_proba'] = {}
picks['S_picks'] = {}
picks['S_proba'] = {}
for s in range(n_stations):
# (n_events, arrays): array of arrays with all detected P-arrival picks
picks['P_picks'][station_names[s]] = PhaseNet_picks[:, s, 0, 0]
# (n_events, arrays): array of arrays with probabilities of all detected P-arrival picks
picks['P_proba'][station_names[s]] = PhaseNet_picks[:, s, 0, 1]
# (n_events, arrays): array of arrays with all detected S-arrival picks
picks['S_picks'][station_names[s]] = PhaseNet_picks[:, s, 1, 0]
# (n_events, arrays): array of arrays with probabilities of all detected S-arrival picks
picks['S_proba'][station_names[s]] = PhaseNet_picks[:, s, 1, 1]
if format == 'hdf5':
# clean up when done
shutil.rmtree(root_PN_inputs)
return PhaseNet_proba, picks
# --------------------------------------------------------------------------------
# The following functions were tailored for template matching applications
# --------------------------------------------------------------------------------
def get_best_picks(picks, buffer_length=50):
"""Filter picks to keep the best one on each 3-comp seismogram.
"""
for st in picks['P_picks'].keys():
for n in range(len(picks['P_picks'][st])):
pp = picks['P_picks'][st][n]
ps = picks['S_picks'][st][n]
# ----------------
# remove picks form the buffer length
valid_P_picks = picks['P_picks'][st][n] > int(buffer_length)
valid_S_picks = picks['S_picks'][st][n] > int(buffer_length)
picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks]
picks['S_picks'][st][n] = picks['S_picks'][st][n][valid_S_picks]
picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks]
picks['S_proba'][st][n] = picks['S_proba'][st][n][valid_S_picks]
# take only the highest probability trigger
if len(picks['S_picks'][st][n]) > 0:
best_S_trigger = picks['S_proba'][st][n].argmax()
picks['S_picks'][st][n] = picks['S_picks'][st][n][best_S_trigger]
picks['S_proba'][st][n] = picks['S_proba'][st][n][best_S_trigger]
# update P picks: keep only those that are before the best S pick
valid_P_picks = picks['P_picks'][st][n] < picks['S_picks'][st][n]
picks['P_picks'][st][n] = picks['P_picks'][st][n][valid_P_picks]
picks['P_proba'][st][n] = picks['P_proba'][st][n][valid_P_picks]
else:
# if no valid S pick: fill in with nan
picks['S_picks'][st][n] = np.nan
picks['S_proba'][st][n] = np.nan
if len(picks['P_picks'][st][n]) > 0:
best_P_trigger = picks['P_proba'][st][n].argmax()
picks['P_picks'][st][n] = picks['P_picks'][st][n][best_P_trigger]
picks['P_proba'][st][n] = picks['P_proba'][st][n][best_P_trigger]
else:
# if no valid P pick: fill in with nan
picks['P_picks'][st][n] = np.nan
picks['P_proba'][st][n] = np.nan
# convert picks to float to allow NaNs
picks['P_picks'][st] = np.float32(picks['P_picks'][st])
picks['S_picks'][st] = np.float32(picks['S_picks'][st])
picks['P_proba'][st] = np.float32(picks['P_proba'][st])
picks['S_proba'][st] = | np.float32(picks['S_proba'][st]) | numpy.float32 |
# -*- coding: utf-8 -*-
"""
License: MIT
@author: gaj
E-mail: <EMAIL>
Paper References:
[1] <NAME>, <NAME>, and <NAME>, “Improving component substitution Pansharpening through multivariate regression of MS+Pan data,”
IEEE Transactions on Geoscience and Remote Sensing, vol. 45, no. 10, pp. 3230–3239, October 2007.
[2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>, “A Critical Comparison Among Pansharpening Algorithms”,
IEEE Transaction on Geoscience and Remote Sensing, 2014.
"""
import numpy as np
from methods.utils import upsample_interp23
import cv2
def estimation_alpha(pan, hs, mode='global'):
if mode == 'global':
IHC = np.reshape(pan, (-1, 1))
ILRC = np.reshape(hs, (hs.shape[0]*hs.shape[1], hs.shape[2]))
alpha = np.linalg.lstsq(ILRC, IHC)[0]
elif mode == 'local':
patch_size = 32
all_alpha = []
print(pan.shape)
for i in range(0, hs.shape[0]-patch_size, patch_size):
for j in range(0, hs.shape[1]-patch_size, patch_size):
patch_pan = pan[i:i+patch_size, j:j+patch_size, :]
patch_hs = hs[i:i+patch_size, j:j+patch_size, :]
IHC = np.reshape(patch_pan, (-1, 1))
ILRC = np.reshape(patch_hs, (-1, hs.shape[2]))
local_alpha = np.linalg.lstsq(ILRC, IHC)[0]
all_alpha.append(local_alpha)
all_alpha = np.array(all_alpha)
alpha = np.mean(all_alpha, axis=0, keepdims=False)
return alpha
def GSA(pan, hs):
M, N, c = pan.shape
m, n, C = hs.shape
ratio = int(np.round(M/m))
print('get sharpening ratio: ', ratio)
assert int(np.round(M/m)) == int(np.round(N/n))
#upsample
u_hs = upsample_interp23(hs, ratio)
#remove means from u_hs
means = | np.mean(u_hs, axis=(0, 1)) | numpy.mean |
"""
Classic cart-pole system implemented by <NAME> et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
from scipy.integrate import ode
g = 9.8 # gravity
force_mag = 10.0
tau = 0.02 # seconds between state updates
# cart
m_cart = 1
# pole 1
l_1 = 1 # length
m_1 = 0.1 # mass
# pole 2
l_2 = 1 # length
m_2 = 0.1 # mass
def f(time, state, input):
x = state[0]
x_dot = state[1]
theta_1 = state[2]
theta_1_dot = state[3]
theta_2 = state[4]
theta_2_dot = state[5]
x_dot_dot = ((l_1 * l_2 * m_2 * np.sin(theta_1 - theta_2) * theta_1_dot ** 2
+ g * l_2 * m_2 * | np.sin(theta_2) | numpy.sin |
import numpy as np
############################
# simulation helpers #
############################
def simulate_pulse(IF_freq, chi, k, Ts, Td, power):
I = [0]
Q = [0]
# solve numerically a simplified version of the readout resonator
for t in range(Ts):
I.append(I[-1] + (power / 2 - k * I[-1] + Q[-1] * chi))
Q.append(Q[-1] + (power / 2 - k * Q[-1] - I[-1] * chi))
for t in range(Td - 1):
I.append(I[-1] + (-k * I[-1] + Q[-1] * chi))
Q.append(Q[-1] + (-k * Q[-1] - I[-1] * chi))
I = np.array(I)
Q = | np.array(Q) | numpy.array |
# http://github.com/timestocome
# adapted from:
# https://github.com/maxpumperla/betago
# https://www.manning.com/books/deep-learning-and-the-game-of-go
# attempt to solve MNIST by averaging number images
import numpy as np
from load_mnist import load_data
from layers import sigmoid_double
from matplotlib import pyplot as plt
# compute average over all samples in training set
def average_digit(data, digit):
filtered_data = [x[0] for x in data if np.argmax(x[1]) == digit]
filtered_array = np.asarray(filtered_data)
return np.average(filtered_array, axis=0)
train, test = load_data()
# train, test on digit 8
avg_eight = average_digit(train, 8)
# image showing 8 average
img = (np.reshape(avg_eight, (28, 28)))
plt.imshow(img)
plt.show()
# test a few random samples
x_3 = train[2][0]
x_18 = train[17][0]
# check distance between average 8 and random samples
W = np.transpose(avg_eight)
np.dot(W, x_3)
np.dot(W, x_18)
# make predictions
def predict(x, W, b):
return sigmoid_double( | np.dot(W, x) | numpy.dot |
import numpy as np
metric_optimum = {
"MAE": "min",
"MSE": "min",
"accuracy": "max",
"sensitivity": "max",
"specificity": "max",
"PPV": "max",
"NPV": "max",
"BA": "max",
"loss": "min",
}
class MetricModule:
def __init__(self, metrics, n_classes=2):
self.n_classes = n_classes
# Check if wanted metrics are implemented
list_fn = [
method_name
for method_name in dir(MetricModule)
if callable(getattr(MetricModule, method_name))
]
self.metrics = dict()
for metric in metrics:
if f"{metric.lower()}_fn" in list_fn:
self.metrics[metric] = getattr(MetricModule, f"{metric.lower()}_fn")
else:
raise ValueError(
f"The metric {metric} is not implemented in the module"
)
def apply(self, y, y_pred):
"""
This is a function to calculate the different metrics based on the list of true label and predicted label
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(Dict[str:float]) metrics results
"""
if y is not None and y_pred is not None:
results = dict()
y = np.array(y)
y_pred = np.array(y_pred)
for metric_key, metric_fn in self.metrics.items():
metric_args = list(metric_fn.__code__.co_varnames)
if "class_number" in metric_args:
for class_number in range(self.n_classes):
results[f"{metric_key}-{class_number}"] = metric_fn(
y, y_pred, class_number
)
else:
results[metric_key] = metric_fn(y, y_pred)
else:
results = dict()
return results
@staticmethod
def mae_fn(y, y_pred):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(float) mean absolute error
"""
return np.mean(np.abs(y - y_pred))
@staticmethod
def mse_fn(y, y_pred):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(float) mean squared error
"""
return np.mean(np.square(y - y_pred))
@staticmethod
def accuracy_fn(y, y_pred):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
Returns:
(float) accuracy
"""
true = np.sum(y_pred == y)
return true / len(y)
@staticmethod
def sensitivity_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) sensitivity
"""
true_positive = np.sum((y_pred == class_number) & (y == class_number))
false_negative = np.sum((y_pred != class_number) & (y == class_number))
if (true_positive + false_negative) != 0:
return true_positive / (true_positive + false_negative)
else:
return 0.0
@staticmethod
def specificity_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) specificity
"""
true_negative = np.sum((y_pred != class_number) & (y != class_number))
false_positive = np.sum((y_pred == class_number) & (y != class_number))
if (false_positive + true_negative) != 0:
return true_negative / (false_positive + true_negative)
else:
return 0.0
@staticmethod
def ppv_fn(y, y_pred, class_number):
"""
Args:
y (List): list of labels
y_pred (List): list of predictions
class_number (int): number of the class studied
Returns:
(float) positive predictive value
"""
true_positive = np.sum((y_pred == class_number) & (y == class_number))
false_positive = | np.sum((y_pred == class_number) & (y != class_number)) | numpy.sum |
from bw2calc.errors import (
OutsideTechnosphere,
NonsquareTechnosphere,
EmptyBiosphere,
InconsistentGlobalIndex,
)
from bw2calc.lca import LCA
from pathlib import Path
import bw_processing as bwp
import json
import numpy as np
import pytest
from collections.abc import Mapping
fixture_dir = Path(__file__).resolve().parent / "fixtures"
######
### Basic functionality
######
def test_example_db_basic():
mapping = dict(json.load(open(fixture_dir / "bw2io_example_db_mapping.json")))
print(mapping)
packages = [
fixture_dir / "bw2io_example_db.zip",
fixture_dir / "ipcc_simple.zip",
]
lca = LCA(
{mapping["Driving an electric car"]: 1},
data_objs=packages,
)
lca.lci()
lca.lcia()
assert lca.supply_array.sum()
assert lca.technosphere_matrix.sum()
assert lca.score
def test_basic():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
answer = np.zeros((2,))
answer[lca.dicts.activity[101]] = 1
answer[lca.dicts.activity[102]] = 0.5
assert np.allclose(answer, lca.supply_array)
def test_basic_negative_production():
pass
def test_basic_substitution():
pass
def test_basic_nonunitary_production():
pass
def test_circular_inputs():
pass
######
### __init__
######
def test_invalid_datapackage():
packages = ["basic_fixture.zip"]
with pytest.raises(TypeError):
LCA({1: 1}, data_objs=packages)
def test_demand_not_mapping():
packages = [fixture_dir / "basic_fixture.zip"]
with pytest.raises(ValueError):
LCA((1, 1), data_objs=packages)
def test_demand_mapping_but_not_dict():
class M(Mapping):
def __getitem__(self, key):
return 1
def __iter__(self):
return iter((1,))
def __len__(self):
return 1
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA(M(), data_objs=packages)
lca.lci()
answer = np.zeros((2,))
answer[lca.dicts.activity[101]] = 1
answer[lca.dicts.activity[102]] = 0.5
assert np.allclose(answer, lca.supply_array)
######
### __next__
######
def test_next_data_array():
packages = [fixture_dir / "array_sequential.zip"]
lca = LCA({1: 1}, data_objs=packages, use_arrays=True)
lca.lci()
lca.lcia()
for x in range(1, 5):
assert lca.biosphere_matrix.sum() == x
next(lca)
def test_next_only_vectors():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
lca.lcia()
current = lca.characterized_inventory.sum()
next(lca)
assert lca.characterized_inventory.sum() == current
def test_next_plain_monte_carlo():
packages = [
fixture_dir / "mc_basic.zip",
]
mc = LCA({3: 1}, data_objs=packages, use_distributions=True)
mc.lci()
mc.lcia()
first = mc.score
next(mc)
assert first != mc.score
def test_next_monte_carlo_as_iterator():
packages = [
fixture_dir / "mc_basic.zip",
]
mc = LCA({3: 1}, data_objs=packages, use_distributions=True)
mc.lci()
mc.lcia()
for _, _ in zip(mc, range(10)):
assert mc.score > 0
def test_next_monte_carlo_all_matrices_change():
packages = [
fixture_dir / "mc_basic.zip",
]
mc = LCA({3: 1}, data_objs=packages, use_distributions=True)
mc.lci()
mc.lcia()
a = [
mc.technosphere_matrix.sum(),
mc.biosphere_matrix.sum(),
mc.characterization_matrix.sum(),
]
next(mc)
b = [
mc.technosphere_matrix.sum(),
mc.biosphere_matrix.sum(),
mc.characterization_matrix.sum(),
]
print(a, b)
for x, y in zip(a, b):
assert x != y
######
### build_demand_array
######
def test_build_demand_array():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
assert lca.demand_array.shape == (2,)
assert lca.demand_array.sum() == 1
assert lca.demand_array[lca.dicts.product[1]] == 1
def test_build_demand_array_pass_dict():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
lca.build_demand_array({2: 5})
assert lca.demand_array.shape == (2,)
assert lca.demand_array.sum() == 5
assert lca.demand_array[lca.dicts.product[2]] == 5
def test_build_demand_array_outside_technosphere():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({100: 1}, data_objs=packages)
with pytest.raises(OutsideTechnosphere):
lca.lci()
def test_build_demand_array_activity_not_product():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({101: 1}, data_objs=packages)
with pytest.raises(ValueError):
lca.lci()
def test_build_demand_array_pass_object():
packages = [fixture_dir / "basic_fixture.zip"]
class Foo:
pass
obj = Foo()
with pytest.raises(ValueError):
LCA(obj, data_objs=packages)
######
### load_lci_data
######
def test_load_lci_data():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
tm = np.array([[1, 0], [-0.5, 1]])
assert np.allclose(lca.technosphere_matrix.toarray(), tm)
assert lca.dicts.product[1] == 0
assert lca.dicts.product[2] == 1
assert lca.dicts.activity[101] == 0
assert lca.dicts.activity[102] == 1
assert lca.dicts.biosphere[1] == 0
def test_load_lci_data_nonsquare_technosphere():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5, 2, 3])
indices_array = np.array(
[(1, 101), (2, 102), (2, 101), (3, 101), (3, 102)], dtype=bwp.INDICES_DTYPE
)
flip_array = np.array([0, 0, 1, 1, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
lca = LCA({1: 1}, data_objs=[dp])
with pytest.raises(NonsquareTechnosphere):
lca.lci()
# lca.lci()
# tm = np.array([
# [1, 0],
# [-0.5, 1],
# [-2, -3]
# ])
# assert np.allclose(lca.technosphere_matrix.toarray(), tm)
# assert lca.dicts.product[1] == 0
# assert lca.dicts.product[2] == 1
# assert lca.dicts.product[3] == 2
# assert lca.dicts.activity[101] == 0
# assert lca.dicts.activity[102] == 1
def test_load_lci_data_empty_biosphere_warning():
lca = LCA({1: 1}, data_objs=[fixture_dir / "empty_biosphere.zip"])
with pytest.warns(UserWarning):
lca.lci()
######
### remap_inventory_dicts
######
def test_remap_inventory_dicts():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA(
{1: 1},
data_objs=packages,
remapping_dicts={"product": {1: ("foo", "bar")}, "biosphere": {1: "z"}},
)
lca.lci()
lca.remap_inventory_dicts()
tm = np.array([[1, 0], [-0.5, 1]])
assert np.allclose(lca.technosphere_matrix.toarray(), tm)
assert lca.dicts.product[("foo", "bar")] == 0
assert lca.dicts.product[2] == 1
assert lca.dicts.activity[101] == 0
assert lca.dicts.activity[102] == 1
assert lca.dicts.biosphere["z"] == 0
######
### load_lcia_data
######
def test_load_lcia_data():
packages = [fixture_dir / "basic_fixture.zip"]
lca = LCA({1: 1}, data_objs=packages)
lca.lci()
lca.lcia()
cm = np.array([[1]])
assert np.allclose(lca.characterization_matrix.toarray(), cm)
def test_load_lcia_data_multiple_characterization_packages():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2, 3])
indices_array = np.array([(1, 101), (2, 102), (3, 101)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(1, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
data_array = np.array([2])
indices_array = np.array([(3, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="second-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
cm = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 2]])
assert np.allclose(lca.characterization_matrix.toarray(), cm)
assert lca.dicts.biosphere[1] == 0
assert lca.dicts.biosphere[2] == 1
assert lca.dicts.biosphere[3] == 2
def test_load_lcia_data_inconsistent_globals():
# Activities: 101, 102
# Products: 1, 2
# Biosphere flows: 201, 202
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(201, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
data_array = np.array([10])
indices_array = np.array([(202, 1)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="second-characterization",
indices_array=indices_array,
global_index=1,
nrows=1,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
with pytest.raises(InconsistentGlobalIndex):
lca.lcia()
def test_load_lcia_data_none_global_value():
# Should include all because no filter
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(201, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=None,
nrows=1,
)
data_array = np.array([10])
indices_array = np.array([(202, 1)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="second-characterization",
indices_array=indices_array,
global_index=None,
nrows=1,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
assert lca.characterization_matrix.sum() == 11
def test_load_lcia_data_nonglobal_filtered():
# Activities: 101, 102
# Products: 1, 2
# Biosphere flows: 201, 202
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(201, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
data_array = np.array([10])
indices_array = np.array([(202, 1)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="second-characterization",
indices_array=indices_array,
global_index=0,
nrows=1,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
assert lca.characterization_matrix.sum() == 1
######
### Warnings on uncommon inputs
######
@pytest.mark.filterwarnings("ignore:no biosphere")
def test_empty_biosphere_lcia():
lca = LCA({1: 1}, data_objs=[fixture_dir / "empty_biosphere.zip"])
lca.lci()
assert lca.biosphere_matrix.shape[0] == 0
with pytest.raises(EmptyBiosphere):
lca.lcia()
def test_lca_has():
mapping = dict(json.load(open(fixture_dir / "bw2io_example_db_mapping.json")))
packages = [
fixture_dir / "bw2io_example_db.zip",
fixture_dir / "ipcc_simple.zip",
]
lca = LCA(
{mapping["Driving an electric car"]: 1},
data_objs=packages,
)
lca.lci()
lca.lcia()
assert lca.has("technosphere")
assert lca.has("characterization")
assert not lca.has("foo")
######
### normalize
######
def test_lca_with_normalization():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1, 10])
indices_array = np.array([(201, 0), (202, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
)
data_array = np.array([10, 4])
indices_array = np.array([(201, 0), (202, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="normalization_matrix",
data_array=data_array,
name="nm",
indices_array=indices_array,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
assert lca.score == 11
lca.normalize()
assert lca.score == 1 * 10 + 10 * 4
assert lca.normalization_matrix.shape == (2, 2)
assert lca.normalization_matrix.sum() == 14
######
### weighting
######
def test_lca_with_weighting():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1, 10])
indices_array = np.array([(201, 0), (202, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
)
data_array = np.array([4])
indices_array = np.array([(0, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="weighting_matrix",
data_array=data_array,
name="wm",
indices_array=indices_array,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
assert lca.score == 11
lca.weight()
assert lca.score == 11 * 4
assert lca.weighting_matrix.shape == (2, 2)
assert lca.weighting_matrix.sum() == 8
def test_lca_with_weighting_deprecation():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1, 10])
indices_array = np.array([(201, 0), (202, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
)
data_array = np.array([4])
indices_array = np.array([(0, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="weighting_matrix",
data_array=data_array,
name="wm",
indices_array=indices_array,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
with pytest.deprecated_call():
lca.weighting()
def test_lca_with_weighting_and_normalization():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2])
indices_array = np.array([(201, 101), (202, 102)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1, 10])
indices_array = np.array([(201, 0), (202, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
)
data_array = np.array([10, 4])
indices_array = np.array([(201, 0), (202, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="normalization_matrix",
data_array=data_array,
name="nm",
indices_array=indices_array,
)
data_array = np.array([8])
indices_array = np.array([(0, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="weighting_matrix",
data_array=data_array,
name="wm",
indices_array=indices_array,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
assert lca.score == 11
lca.normalize()
assert lca.score == (1 * 10 + 10 * 4)
lca.weighting()
assert lca.score == (1 * 10 + 10 * 4) * 8
######
### switch_method
######
def test_switch_method():
dp = bwp.create_datapackage()
data_array = np.array([1, 1, 0.5])
indices_array = np.array([(1, 101), (2, 102), (2, 101)], dtype=bwp.INDICES_DTYPE)
flip_array = np.array([0, 0, 1], dtype=bool)
dp.add_persistent_vector(
matrix="technosphere_matrix",
data_array=data_array,
name="technosphere",
indices_array=indices_array,
flip_array=flip_array,
)
data_array = np.array([1, 2, 3])
indices_array = np.array([(1, 101), (2, 102), (3, 101)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="biosphere_matrix",
data_array=data_array,
name="biosphere",
indices_array=indices_array,
)
data_array = np.array([1])
indices_array = np.array([(1, 0)], dtype=bwp.INDICES_DTYPE)
dp.add_persistent_vector(
matrix="characterization_matrix",
data_array=data_array,
name="first-characterization",
indices_array=indices_array,
global_index=0,
)
lca = LCA({1: 1}, data_objs=[dp])
lca.lci()
lca.lcia()
cm = np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
assert np.allclose(lca.characterization_matrix.toarray(), cm)
assert len(lca.packages) == 1
ndp = bwp.create_datapackage()
data_array = | np.array([10]) | numpy.array |
from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from sklearn.utils import check_random_state
def group_lasso_dataset_generator(n_samples=100, n_features=100,
gaussian_noise=0.5, random_state=None):
"""
Generates synthetic data for group lasso tests.
This function generates a matrix generated from 7 basic atoms, grouped
as [0, 1, 3], [2, 4, 5], linearly combined with random weights.
A certain level of gaussian noise is added to the signal.
Parameters
----------
n_samples: int, optional
Number of samples for the output matrix.
n_features: int, optional
Number of features the output matrix must have.
gaussian_noise: float, optional
The level of noise to add to the synthetic data.
random_state: RandomState or int, optional
RandomState or seed used to generate RandomState for the
reproducibility of data. If None each time RandomState is randomly
initialised.
Returns
-------
array_like, shape=(n_samples, n_features)
Generated matrix of data
array_like, shape=(n_samples, 7)
Coefficients
array_like, shape=(7, n_features)
Dictionary
"""
rnd = check_random_state(random_state)
number_of_atoms = 6
atoms = np.empty([n_features, number_of_atoms])
t = np.linspace(0, 1, n_features)
atoms[:, 0] = signal.sawtooth(2 * np.pi * 5 * t)
atoms[:, 1] = np.sin(2 * np.pi * t)
atoms[:, 2] = np.sin(2 * np.pi * t - 15)
atoms[:, 3] = signal.gaussian(n_features, 5)
atoms[:, 4] = signal.square(2 * np.pi * 5 * t)
atoms[:, 5] = np.abs(np.sin(2 * np.pi * t))
groups = [[0, 1, 3], [2, 4, 5]]
signals = np.empty((n_samples, n_features))
coefficients = np.zeros((n_samples, number_of_atoms))
for i in range(n_samples // 2):
coeffs = rnd.random_sample(len(groups[0])) * 10
coefficients[i, groups[0]] = coeffs
for i in range(n_samples // 2, n_samples):
coeffs = rnd.random_sample(len(groups[1])) * 10
coefficients[i, groups[1]] = coeffs
signals = coefficients.dot(atoms.T)
return signals, coefficients, atoms.T
def sparse_signal_generator(n_samples, n_features, frequencies,
support_atoms, shift=True):
# TODO: sistemare questa documentazione
""" The following function generates signals using sawtooth and sin
Parameters
-------------------
n_samples : int
number of signals to be generated
n_features : int
length of the time series (number of points)
frequencies :
number of frequencies (to be used for the def of atoms)
support_atoms:
qualcosa
shift :
if true shifted atoms, else fixed
Returns
-------------------
multichannel_matrix : np.array(n_features, n_samples)
matrix of signals
atoms_matrix : np.array(n_features, number_of_atoms)
matrix of signals
"""
f_array = np.linspace(4. / n_features, 40. / n_features, frequencies)
atom_shape = 2
if shift:
n_shifts = n_features - support_atoms
else:
n_shifts = 1
n_atoms = frequencies * atom_shape * n_shifts
_low = int(0.4 * n_atoms)
_high = int(0.7 * n_atoms)
selected_atoms = np.random.randint(low=_low, high=_high, size=(10,))
atoms = np.zeros((n_features, n_atoms))
time_vector = np.arange(support_atoms)
diff_supp = n_features - support_atoms
for i in range(frequencies):
temp1 = np.sin(f_array[i] * time_vector)
temp2 = signal.sawtooth(f_array[i] * time_vector)
norm1 = np.linalg.norm(np.pad(temp1, (0, diff_supp), mode='constant'))
norm2 = np.linalg.norm(np.pad(temp2, (0, diff_supp), mode='constant'))
for j in range(n_shifts):
atoms[:, i * n_shifts + j] = np.pad(temp1, (j, diff_supp - j),
mode='constant') / norm1
atoms[:, i * n_shifts + j + frequencies * n_shifts] = \
np.pad(temp2, (j, diff_supp - j), mode='constant') / norm2
multichannel_signal = np.zeros((n_features, n_samples))
for i in range(n_samples):
random_atoms = np.random.choice(selected_atoms, size=5)
weight = 10 * np.random.randn(5, )
multichannel_signal[:, i] = np.dot(atoms[:, random_atoms], weight)
np.save('signal_gen', multichannel_signal)
np.save('atom_gen', atoms)
return multichannel_signal, atoms
def synthetic_data_non_negative(gaussian_noise=1, random_state=None):
"""
Generates synthetic non-negative data for dictionary learning tests.
This function generates a matrix generated from 7 basic atoms linearly
combined with random weights sparse over the atoms. A certain level of
gaussian noise is added to the signal.
Parameters
----------
gaussian_noise: float, optional
The level of noise to add to the synthetic data.
random_state: RandomState or int, optional
RandomState or seed used to generate RandomState for the
reproducibility of data. If None each time RandomState is randomly
initialised.
Returns
-------
array_like, shape=(80, 96)
Generated matrix of data
array_like, shape=(80, 7)
Coefficients
array_like, shape=(7, 96)
Dictionary
"""
number_of_features = 96
number_of_samples = 80
number_of_atoms = 7
rnd = check_random_state(random_state)
atoms = np.empty([number_of_features, number_of_atoms])
atoms[:, 0] = np.transpose(
np.concatenate((np.ones([30, 1]), np.zeros([66, 1]))))
atoms[:, 1] = np.transpose(
np.concatenate((np.zeros([60, 1]), np.ones([36, 1]))))
atoms[:, 2] = np.transpose(np.concatenate(
(np.zeros([24, 1]), np.ones([30, 1]), np.zeros([42, 1]))))
atoms[:, 3] = signal.gaussian(96, 5)
atoms[:, 4] = np.transpose(np.concatenate((np.zeros([17, 1]),
np.ones([15, 1]),
np.zeros([30, 1]),
| np.ones([24, 1]) | numpy.ones |
'''
File name: quaternion_utils.py
Programmed by: <NAME>
Date: 2019-09-28
Tools for dealing with scalar-first, transform, unit,
right quaternions with Malcolm Shuster's conventions.
'''
from numpy import array, zeros, cross, dot, concatenate, sin, cos
from numpy.linalg import norm
def qcomp(q1, q2):
''' Compose two quaternions.'''
q1s, q1v = q1[0], q1[1:]
q2s, q2v = q2[0], q2[1:]
s = q1s*q2s - dot(q2v, q1v)
v = q1s*q2v + q2s*q1v - cross(q1v, q2v)
return concatenate([array([s]), v])
def qnorm(q):
''' Normalize a quaternion. '''
return q/norm(q) if | norm(q) | numpy.linalg.norm |
import random
import cv2
import numpy as np
import tifffile as tiff
import earthpy.plot as ep
import matplotlib.pyplot as plt
from skimage import measure
from skimage import filters
def normalize(img):
min = img.min()
max = img.max()
x = 2.0 * (img - min) / (max - min) - 1.0
return x
def get_rand_patch(img, mask, sz=160, channel = None):
"""
:param img: ndarray with shape (x_sz, y_sz, num_channels)
:param mask: binary ndarray with shape (x_sz, y_sz, num_classes)
:param sz: size of random patch
:param Channels 0: Buildings , 1: Roads & Tracks, 2: Trees , 3: Crops, 4: Water
:return: patch with shape (sz, sz, num_channels)
"""
assert len(img.shape) == 3 and img.shape[0] > sz and img.shape[1] > sz and img.shape[0:2] == mask.shape[0:2]
xc = random.randint(0, img.shape[0] - sz)
yc = random.randint(0, img.shape[1] - sz)
patch_img = img[xc:(xc + sz), yc:(yc + sz)]
patch_mask = mask[xc:(xc + sz), yc:(yc + sz)]
# Apply some random transformations
random_transformation = np.random.randint(1,8)
if random_transformation == 1: # reverse first dimension
patch_img = patch_img[::-1,:,:]
patch_mask = patch_mask[::-1,:,:]
elif random_transformation == 2: # reverse second dimension
patch_img = patch_img[:,::-1,:]
patch_mask = patch_mask[:,::-1,:]
elif random_transformation == 3: # transpose(interchange) first and second dimensions
patch_img = patch_img.transpose([1,0,2])
patch_mask = patch_mask.transpose([1,0,2])
elif random_transformation == 4:
patch_img = np.rot90(patch_img, 1)
patch_mask = np.rot90(patch_mask, 1)
elif random_transformation == 5:
patch_img = np.rot90(patch_img, 2)
patch_mask = np.rot90(patch_mask, 2)
elif random_transformation == 6:
patch_img = np.rot90(patch_img, 3)
patch_mask = np.rot90(patch_mask, 3)
else:
pass
if channel=='all':
return patch_img, patch_mask
if channel !='all':
patch_mask = patch_mask[:,:,channel]
return patch_img, patch_mask
def get_patches(x_dict, y_dict, n_patches, sz=160, channel = 'all'):
"""
:param Channels 0: Buildings , 1: Roads & Tracks, 2: Trees , 3: Crops, 4: Water or 'all'
"""
x = list()
y = list()
total_patches = 0
while total_patches < n_patches:
img_id = random.sample(x_dict.keys(), 1)[0]
img = x_dict[img_id]
mask = y_dict[img_id]
img_patch, mask_patch = get_rand_patch(img, mask, sz, channel)
x.append(img_patch)
y.append(mask_patch)
total_patches += 1
print('Generated {} patches'.format(total_patches))
return np.array(x), np.array(y)
def load_data(path = './data/'):
"""
:param path: the path of the dataset which includes mband and gt_mband folders
:return: X_DICT_TRAIN, Y_DICT_TRAIN, X_DICT_VALIDATION, Y_DICT_VALIDATION
"""
trainIds = [str(i).zfill(2) for i in range(1, 25)] # all availiable ids: from "01" to "24"
X_DICT_TRAIN = dict()
Y_DICT_TRAIN = dict()
X_DICT_VALIDATION = dict()
Y_DICT_VALIDATION = dict()
print('Reading images')
for img_id in trainIds:
img_m = normalize(tiff.imread(path + 'mband/{}.tif'.format(img_id)).transpose([1, 2, 0]))
mask = tiff.imread(path + 'gt_mband/{}.tif'.format(img_id)).transpose([1, 2, 0]) / 255
train_xsz = int(3/4 * img_m.shape[0]) # use 75% of image as train and 25% for validation
X_DICT_TRAIN[img_id] = img_m[:train_xsz, :, :]
Y_DICT_TRAIN[img_id] = mask[:train_xsz, :, :]
X_DICT_VALIDATION[img_id] = img_m[train_xsz:, :, :]
Y_DICT_VALIDATION[img_id] = mask[train_xsz:, :, :]
#print(img_id + ' read')
print('Images are read')
return X_DICT_TRAIN, Y_DICT_TRAIN, X_DICT_VALIDATION, Y_DICT_VALIDATION
def plot_train_data(X_DICT_TRAIN, Y_DICT_TRAIN, image_number = 12):
labels =['Orginal Image with the 8 bands', 'Ground Truths: Buildings', 'Ground Truths: Roads & Tracks', 'Ground Truths: Trees' , 'Ground Truths: Crops', 'Ground Truths: Water']
image_number = str(image_number).zfill(2)
number_of_GTbands = Y_DICT_TRAIN[image_number].shape[2]
f, axarr = plt.subplots(1, number_of_GTbands + 1, figsize=(25,25))
band_indices = [0, 1, 2]
print('Image shape is: ',X_DICT_TRAIN[image_number].shape)
print("Ground Truth's shape is: ",Y_DICT_TRAIN[image_number].shape)
ep.plot_rgb(X_DICT_TRAIN[image_number].transpose([2,0,1]),
rgb=band_indices,
title=labels[0],
stretch=True,
ax=axarr[0])
for i in range(0, number_of_GTbands):
axarr[i+1].imshow(Y_DICT_TRAIN[image_number][:,:,i])
#print(labels[i+1])
axarr[i+1].set_title(labels[i+1])
plt.show()
def Abs_sobel_thresh(image,orient='x',thresh=(40,250) ,sobel_kernel=3):
gray=image#cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
if orient=='x':
#the operator calculates the derivatives of the pixel values along the horizontal direction to make a filter.
sobel=cv2.Sobel(gray,cv2.CV_64F,1,0,ksize= sobel_kernel)
if (orient=='y'):
sobel=cv2.Sobel(gray,cv2.CV_64F,0,1,ksize= sobel_kernel)
abs_sobel=np.absolute(sobel)
scaled_sobel=(255*abs_sobel/np.max(abs_sobel))
grad_binary= | np.zeros_like(scaled_sobel) | numpy.zeros_like |
from torch.utils.data import DataLoader
from dataio.loader import get_dataset, get_dataset_path
from dataio.transformation import get_dataset_transformation
from utils.util import json_file_to_pyobj
from utils.visualiser import Visualiser
from models import get_model
import os, time
# import matplotlib
# matplotlib.use('Agg')
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import math, numpy
import numpy as np
from scipy.misc import imresize
from skimage.transform import resize
def plotNNFilter(units, figure_id, interp='bilinear', colormap=cm.jet, colormap_lim=None, title=''):
plt.ion()
filters = units.shape[2]
n_columns = round(math.sqrt(filters))
n_rows = math.ceil(filters / n_columns) + 1
fig = plt.figure(figure_id, figsize=(n_rows*3,n_columns*3))
fig.clf()
for i in range(filters):
ax1 = plt.subplot(n_rows, n_columns, i+1)
plt.imshow(units[:,:,i].T, interpolation=interp, cmap=colormap)
plt.axis('on')
ax1.set_xticklabels([])
ax1.set_yticklabels([])
plt.colorbar()
if colormap_lim:
plt.clim(colormap_lim[0],colormap_lim[1])
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
plt.suptitle(title)
def plotNNFilterOverlay(input_im, units, figure_id, interp='bilinear',
colormap=cm.jet, colormap_lim=None, title='', alpha=0.8):
plt.ion()
filters = units.shape[2]
fig = plt.figure(figure_id, figsize=(5,5))
fig.clf()
for i in range(filters):
plt.imshow(input_im[:,:,0], interpolation=interp, cmap='gray')
plt.imshow(units[:,:,i], interpolation=interp, cmap=colormap, alpha=alpha)
plt.axis('off')
plt.colorbar()
plt.title(title, fontsize='small')
if colormap_lim:
plt.clim(colormap_lim[0],colormap_lim[1])
plt.subplots_adjust(wspace=0, hspace=0)
plt.tight_layout()
# plt.savefig('{}/{}.png'.format(dir_name,time.time()))
## Load options
PAUSE = .01
#config_name = 'config_sononet_attention_fs8_v6.json'
#config_name = 'config_sononet_attention_fs8_v8.json'
#config_name = 'config_sononet_attention_fs8_v9.json'
#config_name = 'config_sononet_attention_fs8_v10.json'
#config_name = 'config_sononet_attention_fs8_v11.json'
#config_name = 'config_sononet_attention_fs8_v13.json'
#config_name = 'config_sononet_attention_fs8_v14.json'
#config_name = 'config_sononet_attention_fs8_v15.json'
#config_name = 'config_sononet_attention_fs8_v16.json'
#config_name = 'config_sononet_grid_attention_fs8_v1.json'
config_name = 'config_sononet_grid_attention_fs8_deepsup_v1.json'
config_name = 'config_sononet_grid_attention_fs8_deepsup_v2.json'
config_name = 'config_sononet_grid_attention_fs8_deepsup_v3.json'
config_name = 'config_sononet_grid_attention_fs8_deepsup_v4.json'
# config_name = 'config_sononet_grid_att_fs8_avg.json'
config_name = 'config_sononet_grid_att_fs8_avg_v2.json'
# config_name = 'config_sononet_grid_att_fs8_avg_v3.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v4.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v5.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v5.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v6.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v7.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v8.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v9.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v10.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v11.json'
#config_name = 'config_sononet_grid_att_fs8_avg_v12.json'
config_name = 'config_sononet_grid_att_fs8_avg_v12_scratch.json'
config_name = 'config_sononet_grid_att_fs4_avg_v12.json'
#config_name = 'config_sononet_grid_attention_fs8_v3.json'
json_opts = json_file_to_pyobj('/vol/bitbucket/js3611/projects/transfer_learning/ultrasound/configs_2/{}'.format(config_name))
train_opts = json_opts.training
dir_name = os.path.join('visualisation_debug', config_name)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
os.makedirs(os.path.join(dir_name,'pos'))
os.makedirs(os.path.join(dir_name,'neg'))
# Setup the NN Model
model = get_model(json_opts.model)
if hasattr(model.net, 'classification_mode'):
model.net.classification_mode = 'attention'
if hasattr(model.net, 'deep_supervised'):
model.net.deep_supervised = False
# Setup Dataset and Augmentation
dataset_class = get_dataset(train_opts.arch_type)
dataset_path = get_dataset_path(train_opts.arch_type, json_opts.data_path)
dataset_transform = get_dataset_transformation(train_opts.arch_type, opts=json_opts.augmentation)
# Setup Data Loader
dataset = dataset_class(dataset_path, split='train', transform=dataset_transform['valid'])
data_loader = DataLoader(dataset=dataset, num_workers=1, batch_size=1, shuffle=True)
# test
for iteration, data in enumerate(data_loader, 1):
model.set_input(data[0], data[1])
cls = dataset.label_names[int(data[1])]
model.validate()
pred_class = model.pred[1]
pred_cls = dataset.label_names[int(pred_class)]
#########################################################
# Display the input image and Down_sample the input image
input_img = model.input[0,0].cpu().numpy()
#input_img = numpy.expand_dims(imresize(input_img, (fmap_size[0], fmap_size[1]), interp='bilinear'), axis=2)
input_img = numpy.expand_dims(input_img, axis=2)
# plotNNFilter(input_img, figure_id=0, colormap="gray")
plotNNFilterOverlay(input_img, numpy.zeros_like(input_img), figure_id=0, interp='bilinear',
colormap=cm.jet, title='[GT:{}|P:{}]'.format(cls, pred_cls),alpha=0)
chance = np.random.random() < 0.01 if cls == "BACKGROUND" else 1
if cls != pred_cls:
plt.savefig('{}/neg/{:03d}.png'.format(dir_name,iteration))
elif cls == pred_cls and chance:
plt.savefig('{}/pos/{:03d}.png'.format(dir_name,iteration))
#########################################################
# Compatibility Scores overlay with input
attentions = []
for i in [1,2]:
fmap = model.get_feature_maps('compatibility_score%d'%i, upscale=False)
if not fmap:
continue
# Output of the attention block
fmap_0 = fmap[0].squeeze().permute(1,2,0).cpu().numpy()
fmap_size = fmap_0.shape
# Attention coefficient (b x c x w x h x s)
attention = fmap[1].squeeze().cpu().numpy()
attention = attention[:, :]
#attention = numpy.expand_dims(resize(attention, (fmap_size[0], fmap_size[1]), mode='constant', preserve_range=True), axis=2)
attention = numpy.expand_dims(resize(attention, (input_img.shape[0], input_img.shape[1]), mode='constant', preserve_range=True), axis=2)
# this one is useless
#plotNNFilter(fmap_0, figure_id=i+3, interp='bilinear', colormap=cm.jet, title='compat. feature %d' %i)
plotNNFilterOverlay(input_img, attention, figure_id=i, interp='bilinear', colormap=cm.jet, title='[GT:{}|P:{}] compat. {}'.format(cls,pred_cls,i), alpha=0.5)
attentions.append(attention)
#plotNNFilterOverlay(input_img, attentions[0], figure_id=4, interp='bilinear', colormap=cm.jet, title='[GT:{}|P:{}] compat. (all)'.format(cls, pred_cls), alpha=0.5)
plotNNFilterOverlay(input_img, | numpy.mean(attentions,0) | numpy.mean |
import os
import PIL
import numpy as np
import scipy.sparse
import subprocess
import _pickle as cPickle
import math
import glob
from .imdb import imdb
from .imdb import ROOT_DIR
from ..utils.cython_bbox import bbox_overlaps
from ..utils.boxes_grid import get_boxes_grid
# TODO: make fast_rcnn irrelevant
# >>>> obsolete, because it depends on sth outside of this project
from ..fast_rcnn.config import cfg
from ..rpn_msr.generate_anchors import generate_anchors
# <<<< obsolete
class kitti(imdb):
def __init__(self, image_set, kitti_path=None):
imdb.__init__(self, 'kitti_' + image_set)
self._image_set = image_set
self._kitti_path = self._get_default_path() if kitti_path is None \
else kitti_path
self._data_path = os.path.join(self._kitti_path, 'data_object_image_2')
self._classes = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
self._class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index_new()
# Default to roidb handler
if cfg.IS_RPN:
self._roidb_handler = self.gt_roidb
else:
self._roidb_handler = self.region_proposal_roidb
# num of subclasses
if image_set == 'train' or image_set == 'val':
self._num_subclasses = 125 + 24 + 24 + 1
prefix = 'validation'
else:
self._num_subclasses = 227 + 36 + 36 + 1
prefix = 'test'
self.config = {'top_k': 100000}
# statistics for computing recall
self._num_boxes_all = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_covered = np.zeros(self.num_classes, dtype=np.int)
self._num_boxes_proposal = 0
assert os.path.exists(self._kitti_path), \
'KITTI path does not exist: {}'.format(self._kitti_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self.image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
# set the prefix
if self._image_set == 'test':
prefix = 'testing/image_2'
else:
prefix = 'training/image_2'
image_path = os.path.join(self._data_path, prefix, index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
obsolete, using _load_image_set_index_new instead
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._kitti_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.rstrip('\n') for x in f.readlines()]
return image_index
def _load_image_set_index_new(self):
"""
Load the indexes listed in this dataset's image set file.
"""
image_set_file = os.path.join(self._kitti_path, 'training/image_2/')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
image_index = os.listdir(image_set_file)
image_set_file = image_set_file + '*.png'
image_index = glob.glob(image_set_file)
return image_index
def _get_default_path(self):
"""
Return the default path where KITTI is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'KITTI')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.SUBCLS_NAME + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print ('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_kitti_voxel_exemplar_annotation(index)
for index in self.image_index]
if cfg.IS_RPN:
# print out recall
for i in range(1, self.num_classes):
print( '{}: Total number of boxes {:d}'.format(self.classes[i], self._num_boxes_all[i]))
print ('{}: Number of boxes covered {:d}'.format(self.classes[i], self._num_boxes_covered[i]))
print ('{}: Recall {:f}'.format(self.classes[i], float(self._num_boxes_covered[i]) / float(self._num_boxes_all[i])))
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print( 'wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def _load_kitti_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI format.
"""
if self._image_set == 'test':
lines = []
else:
filename = os.path.join(self._data_path, 'training', 'label_2', index + '.txt')
lines = []
with open(filename) as f:
for line in f:
line = line.replace('Van', 'Car')
words = line.split()
cls = words[0]
truncation = float(words[1])
occlusion = int(words[2])
height = float(words[7]) - float(words[5])
if cls in self._class_to_ind and truncation < 0.5 and occlusion < 3 and height > 25:
lines.append(line)
num_objs = len(lines)
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
boxes[ix, :] = [float(n) for n in words[4:8]]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
anchors = generate_anchors()
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2 + 1 + 0.5)
height = np.floor((height - 1) / 2 + 1 + 0.5)
width = np.round((image_width * scale - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
# gt boxes
gt_boxes = boxes * scale
# 1. Generate proposals from bbox deltas and shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = num_anchors
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
# compute overlap
overlaps_grid = bbox_overlaps(all_anchors.astype(np.float), gt_boxes.astype(np.float))
# check how many gt boxes are covered by anchors
if num_objs != 0:
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where((gt_classes == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[fg_inds] == i)[0])
return {'boxes' : boxes,
'gt_classes': gt_classes,
'gt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps' : overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
def _load_kitti_voxel_exemplar_annotation(self, index):
"""
Load image and bounding boxes info from txt file in the KITTI voxel exemplar format.
"""
if self._image_set == 'train':
prefix = 'validation'
elif self._image_set == 'trainval':
prefix = 'test'
else:
return self._load_kitti_annotation(index)
filename = os.path.join(self._kitti_path, cfg.SUBCLS_NAME, prefix, index + '.txt')
assert os.path.exists(filename), \
'Path does not exist: {}'.format(filename)
# the annotation file contains flipped objects
lines = []
lines_flipped = []
with open(filename) as f:
for line in f:
words = line.split()
subcls = int(words[1])
is_flip = int(words[2])
if subcls != -1:
if is_flip == 0:
lines.append(line)
else:
lines_flipped.append(line)
num_objs = len(lines)
# store information of flipped objects
assert (num_objs == len(lines_flipped)), 'The number of flipped objects is not the same!'
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
for ix, line in enumerate(lines_flipped):
words = line.split()
subcls = int(words[1])
gt_subclasses_flipped[ix] = subcls
boxes = np.zeros((num_objs, 4), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
for ix, line in enumerate(lines):
words = line.split()
cls = self._class_to_ind[words[0]]
subcls = int(words[1])
boxes[ix, :] = [float(n) for n in words[3:7]]
gt_classes[ix] = cls
gt_subclasses[ix] = subcls
overlaps[ix, cls] = 1.0
subindexes[ix, cls] = subcls
subindexes_flipped[ix, cls] = gt_subclasses_flipped[ix]
overlaps = scipy.sparse.csr_matrix(overlaps)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
if cfg.IS_RPN:
if cfg.IS_MULTISCALE:
# compute overlaps between grid boxes and gt boxes in multi-scales
# rescale the gt boxes
boxes_all = np.zeros((0, 4), dtype=np.float32)
for scale in cfg.TRAIN.SCALES:
boxes_all = np.vstack((boxes_all, boxes * scale))
gt_classes_all = np.tile(gt_classes, len(cfg.TRAIN.SCALES))
# compute grid boxes
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
boxes_grid, _, _ = get_boxes_grid(image_height, image_width)
# compute overlap
overlaps_grid = bbox_overlaps(boxes_grid.astype(np.float), boxes_all.astype(np.float))
# check how many gt boxes are covered by grids
if num_objs != 0:
index = np.tile(range(num_objs), len(cfg.TRAIN.SCALES))
max_overlaps = overlaps_grid.max(axis = 0)
fg_inds = []
for k in range(1, self.num_classes):
fg_inds.extend(np.where((gt_classes_all == k) & (max_overlaps >= cfg.TRAIN.FG_THRESH[k-1]))[0])
index_covered = np.unique(index[fg_inds])
for i in range(self.num_classes):
self._num_boxes_all[i] += len(np.where(gt_classes == i)[0])
self._num_boxes_covered[i] += len(np.where(gt_classes[index_covered] == i)[0])
else:
assert len(cfg.TRAIN.SCALES_BASE) == 1
scale = cfg.TRAIN.SCALES_BASE[0]
feat_stride = 16
# faster rcnn region proposal
base_size = 16
ratios = [3.0, 2.0, 1.5, 1.0, 0.75, 0.5, 0.25]
scales = 2**np.arange(1, 6, 0.5)
anchors = generate_anchors(base_size, ratios, scales)
num_anchors = anchors.shape[0]
# image size
s = PIL.Image.open(self.image_path_from_index(index)).size
image_height = s[1]
image_width = s[0]
# height and width of the heatmap
height = np.round((image_height * scale - 1) / 4.0 + 1)
height = | np.floor((height - 1) / 2 + 1 + 0.5) | numpy.floor |
import numpy as np
import matplotlib.pyplot as plt
def addGaussianNoise(d, para, testMode='n'):
# This function add noises as follows:
# d_i = d_i with probability 1-para['rate']
# d_i = d_i + \epsilon \xi_i with probability para['rate']
# where \xi_i follow the standard normal distribution,
# para['rate'] is the corruption percentage,
# para['noise_level'] is the noise level,
#
# The output:
# d: the noisy data;
# sig: is the covariance of the added noises.
#
# Ref: <NAME>, A variational Bayesian method to inverse problems with implusive noise,
# Journal of Computational Physics, 231, 2012, 423-435 (Page 428, Section 4).
if testMode == 'y':
np.random.seed(1)
noise_level = para['noise_level']
len_d = len(d)
r = para['rate']
noise = | np.random.normal(0, 1, len_d) | numpy.random.normal |
import argparse
import pickle
from collections import defaultdict
from pprint import pprint
from itertools import combinations
import numpy as np
from scipy.stats import wilcoxon
if __name__ == "__main__":
np.set_printoptions(precision=2)
parser = argparse.ArgumentParser()
parser.add_argument('inputs', help='List of pickle files with an ndarray representing a confusion matrix.')
args = parser.parse_args()
table = defaultdict(lambda: defaultdict(dict))
with open(args.inputs) as g:
for l in g:
with open(l.rstrip(), 'rb') as f:
_cm = pickle.load(f)
name = l.split('/')[-1].split('.')[0].split('_')[0]
protocol = l.split('/')[-3]
corpus = l.split('/')[-2]
correct = | np.trace(_cm) | numpy.trace |
# -*- coding: utf-8 -*-
"""
This code evaluates the outputs from calibrated BusSim
@author: geomlk
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
os.chdir("/Users/minhkieu/Documents/Github/dust/Projects/ABM_DA/bussim/")
'''
Step 1: Load calibration results
'''
def load_calibrated_params_IncreaseRate(IncreaseRate):
name0 = ['./Calibration/BusSim_Model2_calibration_IncreaseRate_',str(IncreaseRate),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params, best_mean_model2,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f)
name0 = ['./Calibration/BusSim_Model1_calibration_IncreaseRate_',str(IncreaseRate),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params, best_mean_model1,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f)
return best_mean_model1,best_mean_model2
def load_calibrated_params_maxDemand(maxDemand):
name0 = ['./Calibration/BusSim_Model2_calibration_static_maxDemand_',str(maxDemand),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params, best_mean_model2,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f)
name0 = ['./Calibration/BusSim_Model1_calibration_static_maxDemand_',str(maxDemand),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params, best_mean_model1,Sol_archived_mean,Sol_archived_std,PI_archived = pickle.load(f)
return best_mean_model1,best_mean_model2
'''
Step 2: Load synthetic real-time data
'''
def load_actual_params_IncreaseRate(IncreaseRate):
#load up a model from a Pickle
name0 = ['./Data/Realtime_data_IncreaseRate_',str(IncreaseRate),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params,t,x,GroundTruth = pickle.load(f)
return model_params,t,x
def load_actual_params_maxDemand(maxDemand):
#load up a model from a Pickle
name0 = ['./Data/Realtime_data_static_maxDemand_',str(maxDemand),'.pkl']
str1 = ''.join(name0)
with open(str1, 'rb') as f:
model_params,t,x,GroundTruth = pickle.load(f)
return model_params,t,x
#define RMSE function
def rmse(yhat,y):
return np.sqrt(np.square( | np.subtract(yhat, y) | numpy.subtract |
import os
import numpy as np
from gym import utils, spaces
from gym.envs.mujoco import mujoco_env
def body_index(model, body_name):
return model.body_names.index(body_name)
def body_pos(model, body_name):
ind = body_index(model, body_name)
return model.body_pos[ind]
def body_quat(model, body_name):
ind = body_index(model, body_name)
return model.body_quat[ind]
def body_frame(env, body_name):
"""
Returns the rotation matrix to convert to the frame of the named body
"""
ind = body_index(env.model, body_name)
b = env.data.body_xpos[ind]
q = env.data.body_xquat[ind]
qr, qi, qj, qk = q
s = np.square(q).sum()
R = np.array([
[1 - 2 * s * (qj ** 2 + qk ** 2), 2 * s * (qi * qj - qk * qr), 2 * s * (qi * qk + qj * qr)],
[2 * s * (qi * qj + qk * qr), 1 - 2 * s * (qi ** 2 + qk ** 2), 2 * s * (qj * qk - qi * qr)],
[2 * s * (qi * qk - qj * qr), 2 * s * (qj * qk + qi * qr), 1 - 2 * s * (qi ** 2 + qj ** 2)]
])
return R
class YumiReacherEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.high = np.array([40, 35, 30, 20, 15, 10, 10])
self.low = -self.high
self.wt = 0.0
self.we = 0.0
root_dir = os.path.dirname(__file__)
xml_path = os.path.join(root_dir, 'yumi', 'yumi.xml')
mujoco_env.MujocoEnv.__init__(self, xml_path, 1)
utils.EzPickle.__init__(self)
# Manually define this to let a be in [-1, 1]^d
self.action_space = spaces.Box(low=-np.ones(7) * 2, high=np.ones(7) * 2, dtype=np.float32)
self.init_params()
def init_params(self, wt=0.9, x=0.0, y=0.0, z=0.2):
"""
:param wt: Float in range (0, 1), weight on euclidean loss
:param x, y, z: Position of goal
"""
self.wt = wt
self.we = 1 - wt
qpos = self.init_qpos
qpos[-3:] = [x, y, z]
qvel = self.init_qvel
self.set_state(qpos, qvel)
def step(self, a):
a_real = a * self.high / 2
self.do_simulation(a_real, self.frame_skip)
reward = self._reward(a_real)
done = False
ob = self._get_obs()
return ob, reward, done, {}
def _reward(self, a):
eef = self.get_body_com('gripper_r_base')
goal = self.get_body_com('goal')
goal_distance = np.linalg.norm(eef - goal)
# This is the norm of the joint angles
# The ** 4 is to create a "flat" region around [0, 0, 0, ...]
q_norm = np.linalg.norm(self.sim.data.qpos.flat[:7]) ** 4 / 100.0
# TODO in the future
# f_desired = np.eye(3)
# f_current = body_frame(self, 'gripper_r_base')
reward = -(
self.wt * goal_distance * 2.0 + # Scalars here is to make this part of the reward approx. [0, 1]
self.we * np.linalg.norm(a) / 40 +
q_norm
)
return reward
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[:7],
| np.clip(self.sim.data.qvel.flat[:7], -10, 10) | numpy.clip |
import bisect
import numpy as np
########################################
# Algorithms
########################################
# Compute indices of slice of sorted data which fit into the provided range
def slice_sorted(data, rng):
return [
bisect.bisect_left(data, rng[0]),
bisect.bisect_right(data, rng[1])]
# Take a list of strings, return a unique list of strings of the same length
# Non-unique strings will be appended their index at the end
# It is guaranteed that index increments with position in the list
def string_list_pad_unique(data1D, suffix=''):
d = {}
rez = []
for elem in data1D:
if elem not in d.keys():
d[elem] = 0
rez += [elem]
else:
d[elem] += 1
rez += [elem + suffix + str(d[elem])]
return rez
########################################
# Permutation operations
########################################
# Finds permutation map A->B of elements of two arrays, which are permutations of each other
def perm_map_arr(a, b):
return np.where(b.reshape(b.size, 1) == a)[1]
# Same as perm_map_arr, but for string characters
def perm_map_str(a, b):
return perm_map_arr(np.array(list(a)), np.array(list(b)))
########################################
# Set operations
########################################
# Returns a list only containing unique items
# Unlike Set(), order of unique items from the original list is preserved
def unique_ordered(lst):
return list(dict.fromkeys(lst))
# Returns set subtraction of s1 - s2, preserving order of s1
def unique_subtract(s1, s2):
rez = [s for s in s1 if s not in s2]
if type(s1) == list:
return rez
elif type(s1) == str:
return "".join(rez)
elif type(s1) == tuple:
return tuple(rez)
else:
raise ValueError("Unexpected Type", type(s1))
########################################
# Non-uniform dimension array lists
########################################
# Test if a given dimension is part of a dimension order
def assert_get_dim_idx(dimOrd, trgDim, label="TASK_NAME", canonical=False):
if trgDim in dimOrd:
return dimOrd.index(trgDim)
else:
if canonical:
dimNameDict = {
"p": "processes (aka channels)",
"s": "samples (aka times)",
"r": "repetitions (aka trials)"
}
raise ValueError(label, "requires", dimNameDict[trgDim], "dimension; have", dimOrd)
else:
raise ValueError(label, "not found", trgDim, "in", dimOrd)
########################################
# Non-uniform dimension array lists
########################################
def set_list_shapes(lst, axis=None):
if axis is None:
return list(set([elem.shape for elem in lst]))
else:
return list(set([elem.shape[axis] for elem in lst]))
def list_assert_get_uniform_shape(lst, axis=None):
if len(lst) == 0:
raise ValueError("Got empty list")
shapes = set_list_shapes(lst, axis)
if len(shapes) > 1:
raise ValueError("Expected uniform shapes for axis", axis, "; got", shapes)
return next(iter(shapes))
########################################
# Multivariate dimension operations
########################################
# Transpose data dimensions given permutation of axis labels
# If augment option is on, then extra axis of length 1 are added when missing
def numpy_transpose_byorder(data, orderSrc, orderTrg, augment=False):
if data.ndim != len(orderSrc):
raise ValueError("Incompatible data", data.shape, "and order", orderSrc)
if not augment:
if sorted(orderSrc) != sorted(orderTrg):
raise ValueError('Cannot transform', orderSrc, "to", orderTrg)
return data.transpose(perm_map_str(orderSrc, orderTrg))
else:
if not set(orderSrc).issubset(set(orderTrg)):
raise ValueError('Cannot augment', orderSrc, "to", orderTrg)
nIncr = len(orderTrg) - len(orderSrc)
newShape = data.shape + tuple([1]*nIncr)
newOrder = orderSrc + unique_subtract(orderTrg, orderSrc)
return data.reshape(newShape).transpose(perm_map_str(newOrder, orderTrg))
# Return original shape, but replace all axis that have been reduced with 1s
# So final shape looks as if it is of the same dimension as original
# Useful for broadcasting reduced arrays onto original arrays
def numpy_shape_reduced_axes(shapeOrig, reducedAxis):
if reducedAxis is None: # All axes have been reduced
return tuple([1]*len(shapeOrig))
else:
if not isinstance(reducedAxis, tuple):
reducedAxis = (reducedAxis,)
shapeNew = list(shapeOrig)
for idx in reducedAxis:
shapeNew[idx] = 1
return tuple(shapeNew)
# Add extra dimensions of size 1 to array at given locations
def numpy_add_empty_axes(x, axes):
newShape = list(x.shape)
for axis in axes:
newShape.insert(axis, 1)
return x.reshape(tuple(newShape))
# Reshape array by merging all dimensions between l and r
def numpy_merge_dimensions(data, l, r):
shOrig = list(data.shape)
shNew = tuple(shOrig[:l] + [np.prod(shOrig[l:r])] + shOrig[r:])
return data.reshape(shNew)
# Move a dimension from one place to another
# Example1: [0,1,2,3,4,5], 3, 1 -> [0,3,1,2,4,5]
# Example2: [0,1,2,3,4,5], 1, 3 -> [0,2,3,1,4,5]
def numpy_move_dimension(data, axisOld, axisNew):
ord = list(np.arange(data.ndim))
if axisOld == axisNew:
return data
elif axisOld > axisNew:
ordNew = ord[:axisNew] + [axisOld] + ord[axisNew:axisOld] + ord[axisOld+1:]
else:
ordNew = ord[:axisOld] + ord[axisOld+1:axisNew+1] + [axisOld] + ord[axisNew+1:]
return data.transpose(ordNew)
# Specify exact indices for some axis of the array
# Returns array of smaller dimension
def numpy_take_all(a, axes, indices):
slices = tuple(indices[axes.index(i)] if i in axes else slice(None) for i in range(a.ndim))
return a[slices]
# Take list whose values are either None or arrays of the same shape
# Figure out shape, replace None with np.nan arrays of that shape
# Convert whole thing to array
def numpy_nonelist_to_array(lst):
noneIdxs = np.array([elem is None for elem in lst]).astype(bool)
if np.all(~noneIdxs):
shapes = set_list_shapes(lst, axis=None)
if len(shapes) != 1:
raise ValueError("List should contain arrays of the same shape, have", shapes)
# All arrays present
return np.array(lst)
if np.all(noneIdxs):
raise ValueError("List only contains None values, can't figure out shape")
# For all normal arrays, check that their shape is the same
trgShape = list_assert_get_uniform_shape([elem for elem in lst if elem is not None])
baseDim = len(trgShape)
if baseDim == 0:
# have a list of scalar values
return np.array([val if val is not None else np.nan for val in lst])
else:
# Replace all None's with NAN arrays of correct shape
nonePatch = np.full(trgShape, np.nan)
return np.array([e if e is not None else nonePatch for e in lst])
# Assign each string to one key out of provided
# If no keys found, assign special key
# If more than 1 key found, raise error
def bin_data_by_keys(strLst, keys):
keysArr = np.array(keys, dtype=object)
rez = []
for s in strLst:
matchKeys = np.array([k in s for k in keys], dtype=bool)
nMatch = | np.sum(matchKeys) | numpy.sum |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 30 15:51:43 2019
@author: <NAME>
@email: <EMAIL>
This code is for DNN training
Explained in paper:
<NAME>, <NAME>, Deep Self-Supervised Hierarchical Clustering for Speaker Diarization, Interspeech, 2020
Check main function: train_with_threshold , to run for different iterations
"""
import os
import sys
import numpy as np
import random
import pickle
import subprocess
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.optim as optim
from models_train_ahc import weight_initialization,Deep_Ahc_model
import torch.utils.data as dloader
from arguments import read_arguments as params
from pdb import set_trace as bp
sys.path.insert(0,'services/')
import kaldi_io
import services.agglomerative as ahc
from services.path_integral_clustering import PIC_ami,PIC_org_threshold,PIC_org, PIC_callhome, PIC_callhome_threshold, PIC_ami_threshold
sys.path.insert(0,'tools_diar/steps/libs')
# read arguments
opt = params()
#select device
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpuid
# torch.manual_seed(777) # reproducibility
loss_lamda = opt.alpha
dataset=opt.dataset
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# device = 'cpu'
print(device)
# Model defined here
def normalize(system):
# to make zero mean and unit variance
my_mean = np.mean(system)
my_std = np.std(system)
system = system-my_mean
system /= my_std
return system
def compute_affinity_loss(output,cluster,lamda):
mylist = np.arange(len(cluster))
# print('mylist:',mylist)
loss=0.0
biglamda=0.0
for k,c in enumerate(cluster):
for i in range(len(c)-1):
for j in range(i+1,len(c)):
nlist=np.delete(mylist,k,0)
# bp()
try:
ind=np.random.choice(nlist)
except:
bp()
b = cluster[ind]
# bp()
bi = i % len(b)
# min(i,abs(i-len(b)))
loss += -output[c[i],c[j]]+ lamda*output[c[i],b[bi]]+ lamda*output[c[j],b[bi]]
biglamda +=1
return loss/biglamda
def mostFrequent(arr, n):
# Insert all elements in Hash.
Hash = dict()
for i in range(n):
if arr[i] in Hash.keys():
Hash[arr[i]] += 1
else:
Hash[arr[i]] = 1
# find the max frequency
max_count = 0
res = -1
for i in Hash:
if (max_count < Hash[i]):
res = i
max_count = Hash[i]
return res
class Deep_AHC:
def __init__(self,data,pldamodel,fname,reco2utt,xvecdimension,model,optimizer,n_prime,writer=None):
self.reco2utt = reco2utt
self.xvecdimension = xvecdimension
self.model = model
self.optimizer = optimizer
self.n_prime = n_prime
self.fname = fname
self.final =0
self.forcing_label = 0
self.results_dict={}
self.pldamodel = pldamodel
self.data = data
self.lamda = 0.0
self.K = 30
self.z = 0.1
def write_results_dict(self, output_file):
"""Writes the results in label file"""
f = self.fname
output_label = open(output_file+'/'+f+'.labels','w')
hypothesis = self.results_dict[f]
meeting_name = f
reco = self.reco2utt.split()[0]
utts = self.reco2utt.rstrip().split()[1:]
if reco == meeting_name:
for j,utt in enumerate(utts):
towrite = utt +' '+str(hypothesis[j])+'\n'
output_label.writelines(towrite)
output_label.close()
rttm_channel=0
segmentsfile = opt.segments+'/'+f+'.segments'
python = opt.which_python
cmd = '{} tools_diar/diarization/make_rttm.py --rttm-channel 0 {} {}/{}.labels {}/{}.rttm' .format(python,segmentsfile,output_file,f,output_file,f)
os.system(cmd)
def compute_score(self,rttm_gndfile,rttm_newfile,outpath,overlap):
fold_local='services/'
scorecode='score.py -r '
# print('--------------------------------------------------')
if not overlap:
cmd=opt.which_python +' '+ fold_local + 'dscore-master/' + scorecode + rttm_gndfile + ' --ignore_overlaps --collar 0.25 -s ' + rttm_newfile + ' > ' + outpath + '.txt'
os.system(cmd)
else:
cmd=opt.which_python + ' '+ fold_local + 'dscore-master/' + scorecode + rttm_gndfile + ' -s ' + rttm_newfile + ' > ' + outpath + '.txt'
os.system(cmd)
# print('----------------------------------------------------')
# subprocess.check_call(cmd,stderr=subprocess.STDOUT)
# print('scoring ',rttm_gndfile)
bashCommand="cat {}.txt | grep OVERALL |awk '{{print $4}}'".format(outpath)
output=subprocess.check_output(bashCommand,shell=True)
return float(output.decode('utf-8').rstrip())
# output = subprocess.check_output(['bash','-c', bashCommand])
def compute_loss(self,A,minibatch,lamda):
loss = 0.0
weight = 1
for m in minibatch:
loss += -weight*A[m[0],m[1]]+lamda*(A[m[0],m[2]]+A[m[1],m[2]])+ 1.0
# print('sum loss : ',loss)
return loss/len(minibatch)
def compute_minibatches(self,A,cluster,labels,mergeind=[],cleanind = []):
triplets = []
hard = 0
random_sample = 1
multiple = 0
for ind,k in enumerate(cluster):
neg = np.where(labels!=ind)[0]
for i,a in enumerate(k[:-1]):
for p in k[i+1:]:
Aavg = (A[a,neg]+A[p,neg])/2.0
if hard:
neg_ind = np.argmax(Aavg)
fetch_negatives = neg[neg_ind]
triplets.append([a,p,fetch_negatives])
if random_sample:
max_10 = random.randint(0,len(Aavg)-1)
max_neg = min(max_10,len(Aavg)-1)
fetch_negatives = neg[max_neg]
triplets.append([a,p,fetch_negatives])
if multiple:
max_neg = np.random.randint(1, len(Aavg), size=(10,))
#neg_indices = np.argsort(Aavg,axis=None)[::-1][max_neg-1]
fetch_negatives = neg[max_neg]
for n in fetch_negatives:
triplets.append([a,p,n])
random.shuffle(triplets)
if len(triplets)==0:
ValueError("No triplets generated!")
triplets = np.array(triplets)
N = len(triplets)
N1=0
num_batches = min(opt.N_batches,N)
N1 = N -(N % num_batches)
batchsize = int(N1/num_batches)
print('batchsize:',batchsize)
minibatches = triplets[:N1].reshape(-1,batchsize,3)
return minibatches,batchsize
def compute_minibatches_train(self,period,A,cluster,labels,overlap_ind=[],clusterlen = []):
triplets = []
random_sample = 0
multiple = 1
if len(overlap_ind)>0:
last_ind = -1
else:
last_ind = None
trainlabels = np.arange(len(labels))
anch_count_full = int(np.mean(clusterlen))
max_triplets = (anch_count_full-1)**2
pos_count = anch_count_full
print('anch_count_full:',anch_count_full)
triplets_cluster = []
triplets_cluster_size = np.zeros((len(cluster[:last_ind]),),dtype=int)
ind_count = 0
for ind,k in enumerate(cluster[:last_ind]):
# if len(k)<10:
# continue
triplets = []
train_neg = trainlabels[np.where(labels[trainlabels]!=ind)[0]]
traink = k
anch_count = min(anch_count_full,len(traink))
# bp()
possible_triplets = (anch_count-1)**2
num_negatives = int(max_triplets/possible_triplets) # find number of negatives needed to keep balance between small clusters vs large
for i,a in enumerate(traink[:anch_count-1]): # mining number of triplets based on smallest/largest cluster
for p in traink[i+1:anch_count]:
if len(train_neg)!=0:
if random_sample:
max_10 = random.randint(0,len(train_neg))
max_neg = min(max_10,len(train_neg)-1)
fetch_negatives = train_neg[max_neg]
triplets.append([a,p,fetch_negatives])
if multiple:
minsize=min(num_negatives,len(train_neg))
max_neg = np.random.randint(0, len(train_neg)-1, size=(minsize,))
#neg_indices = np.argsort(Aavg,axis=None)[::-1][max_neg-1]
fetch_negatives = train_neg[max_neg]
for n in fetch_negatives:
triplets.append([a,p,n])
else:
triplets.append([a,p,0])
triplets_cluster.append(triplets)
triplets_cluster_size[ind_count] = len(triplets)
ind_count = ind_count + 1
triplets_cluster_size = triplets_cluster_size[:ind_count]
triplets_cluster = np.array(triplets_cluster)
N1=0
batches = 1
if batches >= min(triplets_cluster_size):
batches = int(batches/2)
num_batches = min(batches,min(triplets_cluster_size))
print('num_batches:',num_batches)
minibatches = []
# bp()
for k,triplets in enumerate(triplets_cluster):
# cluster_batchsize = batchratio[k]
N = len(triplets)
random.shuffle(triplets)
N1 = N -(N % num_batches)
cluster_batchsize = int(N1/num_batches)
# N1 = int(N1/cluster_batchsize)*cluster_batchsize
print('cluster_batchsize:',cluster_batchsize)
triplets_n1 = np.array(triplets[:N1])
minibatches.append(triplets_n1.reshape(num_batches,-1,3))
print('minibatch shape:',minibatches[k].shape)
minibatches_full = []
for i in range(num_batches):
for k,triplets in enumerate(triplets_cluster):
# bp()
if k==0:
minibatches_full.append(minibatches[k][i].tolist())
else:
minibatches_full[i].extend(minibatches[k][i].tolist())
print('batchsize of batch {}: {}'.format(i,len(minibatches_full[i])))
batchsize = len(minibatches_full[0])
print('batchsize:',batchsize)
print("total triplets : ",len(minibatches_full)*batchsize)
# return minibatches_full,batchsize
return np.array(minibatches_full)
def compute_minibatches_train_valid(self,A,cluster,labels,overlap_ind=[],clusterlen = []):
triplets = []
tripletsval = []
# labels= labels[np.arange(len(labels))!=cluster[-1]]
overlap_label = len(clusterlen)-1
clean_ind = np.where(labels!=overlap_label)[0]
# labels = labels [clean_ind]
trainlabels = []
val_labels = []
train = 0.8
random_sample = 0
random_sample_val = 1
multiple = 1
if len(overlap_ind)>0:
last_ind = -1
else:
last_ind = None
for i,k in enumerate(cluster[:last_ind]):
traink = k[:int(len(k)*train)]
valk = k[int(len(k)*train):]
trainlabels.extend(traink)
val_labels.extend(valk)
trainlabels = np.array(trainlabels)
val_labels = np.array(val_labels)
anch_count_full = int(train * np.mean(clusterlen))
max_triplets = (anch_count_full-1)**2
print('anch_count_full:',anch_count_full)
triplets_cluster = []
triplets_cluster_size = np.zeros((len(cluster[:last_ind]),),dtype=int)
ind_count = 0
for ind,k in enumerate(cluster[:last_ind]):
if len(k)<10:
continue
triplets = []
train_neg = trainlabels[np.where(labels[trainlabels]!=ind)[0]]
# neg = trainlabels[neg]
# train_neg = neg[neg == np.array(trainlabels)]
traink = k[:int(len(k)*train)]
valk = k[int(len(k)*train):]
anch_count = min(anch_count_full,len(traink))
# bp()
possible_triplets = (anch_count-1)**2
num_negatives = int(max_triplets/possible_triplets) # find number of negatives needed to keep balance between small clusters vs large
for i,a in enumerate(traink[:anch_count-1]): # mining number of triplets based on smallest/largest cluster
for p in traink[i+1:anch_count]:
if len(train_neg)!=0:
if random_sample:
max_10 = random.randint(0,len(train_neg))
max_neg = min(max_10,len(train_neg)-1)
fetch_negatives = train_neg[max_neg]
triplets.append([a,p,fetch_negatives])
if multiple:
minsize=min(num_negatives,len(train_neg))
max_neg = np.random.randint(0, len(train_neg)-1, size=(minsize,))
#neg_indices = np.argsort(Aavg,axis=None)[::-1][max_neg-1]
fetch_negatives = train_neg[max_neg]
for n in fetch_negatives:
triplets.append([a,p,n])
else:
triplets.append([a,p,0])
val_neg = val_labels[ | np.where(labels[val_labels]!=ind) | numpy.where |
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import timeit
import time
import datetime
from tqdm import tqdm
def activation(input_array,function='sigmoid'):
if function =='sigmoid':
return 1/(1 + np.exp(-input_array))
np.random.seed(8888)
x1 = np.linspace(-5,5, 400)
x2 = np.linspace(-5,5, 400)
np.random.shuffle(x1)
np.random.shuffle(x2)
d = x1**2 + x2**2
# Normalize d 0.2~0.8
d_max = np.max(d)
d_min = np.min(d)
d = (d-d_min)/(d_max-d_min)*(0.8-0.2)+0.2
#---------------- Input data ------------------------------
num_in = 2
#----------------Hiddent Layer 1 ---------------------
num_L1 = 10
bias_L1 = np.random.uniform(-0.5,0.5,[num_L1,1])#5 1
w_L1 = np.random.uniform(-0.5,0.5,[num_in,num_L1])#2 5
#---------------- Output -----------------------------
num_out = 1
bias_out = np.random.uniform(-0.5,0.5,[num_out,1])# 1 1
w_out = np.random.uniform(-0.5,0.5,[num_L1,num_out])# 5 1
#---------------- Parameter --------------------------
eta = 0.01
mom = 0.9
epoch = 250000
Eav_train = np.zeros([epoch])
Eav_test = np.zeros([epoch])
dw_out = temp1 = np.zeros([num_L1,num_out]) #5 1
dbias_out = temp2 = np.zeros([num_out,1])#1 1
dw_L1 = temp3 = np.zeros([num_in,num_L1])#2 5
dbias_L1 = temp4 = np.zeros([num_L1,1])# 5 1
#---------------- Traning ----------------------------
t0 = timeit.default_timer()
now = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
pbar = tqdm(total =epoch)
for i in range(epoch):
#--------------- Feed Forward -------------------
e = np.zeros([300])
E_train = np.zeros([300])
for j in range(300):
#X = np.array([x1[j],x2[j]]).reshape(2,1)# 2 1
X = np.array([x1[j],x2[j]]).reshape(2,1)# 2 1
L1 = activation(np.dot(np.transpose(w_L1),X) + bias_L1,'sigmoid')#5 1
out = activation(np.dot(np.transpose(L1),w_out) + bias_out,'sigmoid')#1 1
#--------------- Back Propagation-----------------
e[j] = (d[j]-out) #1 1
E_train[j] = 0.5 * e[j]**2
locg_k = e[j] * (out*(1-out))# 1 1
temp2 = temp2 + mom * dbias_out + eta * locg_k * 1 #1 1
temp1 = temp1 + mom * dw_out + eta * locg_k * L1 #5 1
locg_j = L1*(1-L1) * locg_k * w_out# 5 1
temp4 = temp4 + mom * dbias_L1 + eta * locg_j * 1 # 5 1
temp3 = temp3 + mom * dw_L1 + eta * np.dot(X,np.transpose(locg_j))#2 5
dbias_out = temp2/300
dw_out = temp1/300
dbias_L1 = temp4/300
dw_L1 = temp3/300
temp1 = np.zeros([num_L1,num_out]) #5 1
temp2 = np.zeros([num_out,1])#1 1
temp3 = np.zeros([num_in,num_L1])#2 5
temp4 = np.zeros([num_L1,1])# 5 1
#---------- New weight --------------
bias_out = bias_out + dbias_out
w_out = w_out + dw_out
bias_L1 = bias_L1 + dbias_L1
w_L1 = w_L1 + dw_L1
#---------- Eave_train
Eav_train[i] = np.mean(E_train)
#---------- Test data loss ---------------
E_test = np.zeros([100])
for j in range(100):
X = np.array([x1[300+j],x2[300+j]]).reshape(2,1)# 2 1
L1 = activation(np.dot(np.transpose(w_L1),X) + bias_L1,'sigmoid')#5 1
out = activation(np.dot(np.transpose(L1),w_out) + bias_out,'sigmoid')#1 1
E_test = 0.5*( d[300+j] - out )**2
Eav_test[i] = np.mean(E_test)
if i % 1000==0:
pbar.update(1000)
pbar.close()
t1 =(timeit.default_timer()-t0)
print('Training time: {} min'.format((t1/60)))
#--------- Predict data --------------
y_predict = np.zeros([100])
E_predict = np.zeros([100])
for j in range(100):
X = np.array([x1[300+j],x2[300+j]]).reshape(2,1)# 2 1
L1 = activation(np.dot(np.transpose(w_L1),X) + bias_L1,'sigmoid')#5 1
out = activation(np.dot(np.transpose(L1),w_out) + bias_out,'sigmoid')#1 1
y_predict[j] = out
E_predict[j] = 0.5*( d[300+j] - out )**2
Eav_predict = | np.mean(E_predict) | numpy.mean |
from spikeextractors import RecordingExtractor
from spikeextractors.extraction_tools import check_get_traces_args
from .basepreprocessorrecording import BasePreprocessorRecordingExtractor
import numpy as np
from scipy.interpolate import interp1d
class RemoveArtifactsRecording(BasePreprocessorRecordingExtractor):
preprocessor_name = 'RemoveArtifacts'
def __init__(self, recording, triggers, ms_before=0.5, ms_after=3.0, mode='zeros', fit_sample_spacing=1.):
self._triggers = np.array(triggers)
self._ms_before = ms_before
self._ms_after = ms_after
self._mode = mode
self._fit_sample_spacing = fit_sample_spacing
BasePreprocessorRecordingExtractor.__init__(self, recording)
self._kwargs = {'recording': recording.make_serialized_dict(), 'triggers': triggers,
'ms_before': ms_before, 'ms_after': ms_after, 'mode': mode,
'fit_sample_spacing': fit_sample_spacing}
@check_get_traces_args
def get_traces(self, channel_ids=None, start_frame=None, end_frame=None, return_scaled=True):
traces = self._recording.get_traces(channel_ids=channel_ids,
start_frame=start_frame,
end_frame=end_frame,
return_scaled=return_scaled)
triggers = self._triggers[(self._triggers > start_frame) & (self._triggers < end_frame)] - start_frame
pad = [int(self._ms_before * self.get_sampling_frequency() / 1000),
int(self._ms_after * self.get_sampling_frequency() / 1000)]
traces = traces.copy()
if self._mode == 'zeros':
for trig in triggers:
if trig - pad[0] > 0 and trig + pad[1] < end_frame - start_frame:
traces[:, trig - pad[0]:trig + pad[1]] = 0
elif trig - pad[0] <= 0 and trig + pad[1] >= end_frame - start_frame:
traces = 0
elif trig - pad[0] <= 0:
traces[:, :trig + pad[1]] = 0
elif trig + pad[1] >= end_frame - start_frame:
traces[:, trig - pad[0]:] = 0
else:
sample_freq = self._recording.get_sampling_frequency()
# generate indices for evenly spaced fit points before and after gap
fit_sample_range = int(((sample_freq / 1000) * self._fit_sample_spacing * 2) + 1)
fit_sample_interval = int(self._fit_sample_spacing * (sample_freq / 1000))
fit_samples = np.array(range(0, fit_sample_range, fit_sample_interval))
rev_fit_samples = fit_sample_range - fit_samples
triggers = np.array(triggers).astype(int)
for trig in triggers:
pre_data_end_idx = trig - pad[0] - 1
post_data_start_idx = trig + pad[1] + 1
# Generate fit points from the sample points determined
pre_idx = pre_data_end_idx - rev_fit_samples + 1
post_idx = post_data_start_idx + fit_samples
# Get indices of the gap to fill
gap_idx = np.array(range(pre_data_end_idx + 1, post_data_start_idx + 0))
# Make sure we are not going out of bounds
gap_idx = gap_idx[gap_idx >= 0]
gap_idx = gap_idx[gap_idx < len(traces[0])]
# correct for out of bounds indices on both sides:
if np.max(post_idx) >= len(traces[0]):
post_idx = post_idx[post_idx < len(traces[0])]
if | np.min(pre_idx) | numpy.min |
"""
('Best hyper-parameter option: ', [0.0002296913506475621, 'relu', 0.005450020325607934, [128, 32]])
('Min validation loss: ', 0.5709372162818909)
"""
from itertools import permutations
from scipy.optimize import minimize
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
import random
import torch
import torch.nn as nn
import sys
sys.path.append('human2robot/')
from constrained_optimization import constraints, objective, piecewise_constraints, piecewise_objective
try: from execute import execGesture
except: pass
from data_processing import decompose, normalize, split, smooth
from Human import HumanInterface
from io_routines import readCSV, saveNetwork
try: from NAO import NAOInterface
except: pass
from network import Net, numpy2tensor
from setting import *
def generateHiddenLayerOptions(width_options=[32 * i for i in range(1,5)]):
options = []
for length in range(1, len(width_options)+1):
options.extend(permutations(width_options, length))
return list(map(list, options))
def choices(N, n):
token = list(range(N))
res = []
for _ in range(n):
ele = random.choice(token)
token.remove(ele)
res.append(ele)
return res
human_interface = HumanInterface.createFromBVH('dataset/BVH/human_skeletion.bvh')
try: nao_interface = NAOInterface(IP=P_NAO_IP, PORT=P_NAO_PORT)
except:
try: nao_interface = NAOInterface(IP=NAO_IP, PORT=NAO_PORT)
except: pass
fingerIndex = []
fingerJointList = ['RightHandThumb1', 'RightHandThumb2', 'RightHandThumb3',
'RightHandIndex1', 'RightHandIndex2', 'RightHandIndex3',
'RightHandMiddle1', 'RightHandMiddle2', 'RightHandMiddle3',
'RightHandRing1', 'RightHandRing2', 'RightHandRing3',
'RightHandPinky1', 'RightHandPinky2', 'RightHandPinky3',
'LeftHandThumb1', 'LeftHandThumb2', 'LeftHandThumb3',
'LeftHandIndex1', 'LeftHandIndex2', 'LeftHandIndex3',
'LeftHandMiddle1', 'LeftHandMiddle2', 'LeftHandMiddle3',
'LeftHandRing1', 'LeftHandRing2', 'LeftHandRing3',
'LeftHandPinky1', 'LeftHandPinky2', 'LeftHandPinky3',
]
for fingerJoint in fingerJointList:
index = human_interface.getStartAngleIndex(fingerJoint)
fingerIndex.extend([i for i in range(index, index+3)])
# Read dataset
talk_list = map(readCSV, talkfile)
talk = np.vstack(talk_list); talk = np.delete(talk, fingerIndex, axis=1)
talk, _ = normalize(talk); _, human_pca = decompose(talk)
human = readCSV('dataset/Human_overlap.csv'); human = np.delete(human, fingerIndex, axis=1)
human_test = readCSV('dataset/Human_test.csv'); human_test = np.delete(human_test, fingerIndex, axis=1)
nao = readCSV('dataset/NAO_overlap.csv')
nao_test = readCSV('dataset/NAO_test.csv')
n = np.size(human, 0)
if n != np.size(nao, 0):
sys.exit("Numbers of input and target are different.")
human, human_scaler = normalize(human); human = human_pca.transform(human)
nao, nao_scaler = normalize(nao)
__human_test__ = human_pca.transform(human_scaler.transform(human_test))
__nao_test__ = nao_scaler.transform(nao_test)
__human_test__ = torch.from_numpy(__human_test__).float()
__nao_test__ = torch.from_numpy(__nao_test__).float()
human_train, human_val, nao_train, nao_val = train_test_split(human, nao, test_size=0.2, random_state=1000)
human_train = torch.from_numpy(human_train).float()
human_val = torch.from_numpy(human_val).float()
nao_train = torch.from_numpy(nao_train).float()
nao_val = torch.from_numpy(nao_val).float()
keywords = ['Learning Rate', 'Activation Function', 'Weight Decay', 'Hidden Layers', 'Dropout Rate','Validation Error']
lr_range = (-4, 0)
reg_range = (-5, -1)
hl_options = generateHiddenLayerOptions()
af_options = ['relu', 'leaky_relu', 'tanh', 'sigmoid']
try:
file = open("random_search_result_with_dr_sigmoid_included.csv", 'r')
lines = file.readlines()
file.close()
if len(lines) == 0:
file = open("random_search_result_random_search_result_with_dr_sigmoid_includedwith_dr.csv", 'w')
file.writelines(', '.join(keywords) + '\n')
else:
file = open("random_search_result_with_dr_sigmoid_included.csv", 'a')
except:
file = open("random_search_result_with_dr_sigmoid_included.csv", 'w')
file.writelines(', '.join(keywords) + '\n')
best_search = None
best_val_error = np.inf
num_search = 1000
for i in range(num_search):
lr = 10 ** random.uniform(*lr_range)
reg = 10 ** random.uniform(*reg_range)
dr = random.uniform(0, 1)
hidden_layers = random.choice(hl_options)
af = random.choice(af_options)
net = Net(n_input=np.size(human, 1), n_hidden=hidden_layers, n_output= | np.size(nao, 1) | numpy.size |
import typing
import numpy as np
from audmath.core.utils import polyval
def inverse_normal_distribution(
y: typing.Union[float, typing.Sequence[float], np.ndarray],
) -> typing.Union[float, np.ndarray]:
r"""Inverse normal distribution.
Returns the argument :math:`x`
for which the area under the Gaussian probability density function
is equal to :math:`y`.
It returns :math:`\text{nan}`
if :math:`y \notin [0, 1]`.
The area under the Gaussian probability density function is given by:
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2 / 2)\,\text{d}t
This function is a :mod:`numpy` port
of the `Cephes C code`_.
<NAME> `implemented it in pure Python`_ under GPL-3 by.
The output is identical to the implementation
provided by :func:`scipy.special.ndtri`,
and :func:`scipy.stats.norm.ppf`,
and allows you
to avoid installing
and importing :mod:`scipy`.
:func:`audmath.inverse_normal_distribution`
is slower for large arrays
as the following comparison of execution times
on a standard PC show.
.. table::
========== ======= =======
Samples scipy audmath
========== ======= =======
10.000 0.00s 0.01s
100.000 0.00s 0.09s
1.000.000 0.02s 0.88s
10.000.000 0.25s 9.30s
========== ======= =======
.. _Cephes C code: https://github.com/jeremybarnes/cephes/blob/60f27df395b8322c2da22c83751a2366b82d50d1/cprob/ndtri.c
.. _implemented it in pure Python: https://github.com/dougthor42/PyErf/blob/cf38a2c62556cbd4927c9b3f5523f39b6a492472/pyerf/pyerf.py#L183-L287
Args:
y: input value
Returns:
inverted input
Example:
>>> inverse_normal_distribution([0.05, 0.4, 0.6, 0.95])
array([-1.64485363, -0.2533471 , 0.2533471 , 1.64485363])
""" # noqa: E501
if isinstance(y, np.ndarray):
y = y.copy()
y = np.atleast_1d(y)
x = np.zeros(y.shape)
switch_sign = np.ones(y.shape)
# Handle edge cases
idx1 = y == 0
x[idx1] = -np.Inf
idx2 = y == 1
x[idx2] = np.Inf
idx3 = y < 0
x[idx3] = np.NaN
idx4 = y > 1
x[idx4] = np.NaN
non_valid = np.array([any(i) for i in zip(idx1, idx2, idx3, idx4)])
# Return if no other values are left
if non_valid.sum() == len(x):
return _force_float(x)
switch_sign[non_valid] = 0
# Constants to avoid recalculation
ROOT_2PI = np.sqrt(2 * np.pi)
EXP_NEG2 = | np.exp(-2) | numpy.exp |
#!/usr/bin/env python
"""
By <NAME>
Geoazur
<EMAIL>
Created : Jul 22, 2016
Last modified: Jul 22, 2016
Translate finite source model in SRCMOD .fsp format to CMTSOLUTION format
"""
# Import Python Libraries
import numpy as np
import re
def fh(lon, lat, z, strike, distance):
"""
Given a start point (lon lat), bearing (degrees), and distance (m),
calculates the destination point (lon lat)
"""
theta = strike
delta = distance / 6371000.
theta = theta * np.pi / 180.
lat1 = lat * np.pi / 180.
lon1 = lon * np.pi / 180.
lat2 = np.arcsin( np.sin(lat1) * np.cos(delta) + \
np.cos(lat1) * np.sin(delta) * np.cos(theta) )
lon2 = lon1 + np.arctan2( np.sin(theta) * np.sin(delta) * | np.cos(lat1) | numpy.cos |
from datetime import timedelta
from functools import partial
import itertools
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
from zipline.pipeline.loaders.earnings_estimates import (
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import pytest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date, sids, tuples, end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples, columns=[SID_FIELD_NAME, "estimate", "knowledge_date"])
df = df.pivot_table(
columns=SID_FIELD_NAME, values="estimate", index="knowledge_date", dropna=False
)
df = df.reindex(pd.date_range(start_date, end_date))
# Index name is lost during reindex.
df.index = df.index.rename("knowledge_date")
df["at_date"] = end_date.tz_localize("utc")
df = df.set_index(["at_date", df.index.tz_localize("utc")]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp("2014-12-28", tz="utc")
END_DATE = pd.Timestamp("2015-02-04", tz="utc")
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError("make_loader")
@classmethod
def make_events(cls):
raise NotImplementedError("make_events")
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days,
self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
"s" + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(
cls.events, {column.name: val for column, val in cls.columns.items()}
)
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate1": [1.0, 2.0],
"estimate2": [3.0, 4.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def make_expected_out(cls):
raise NotImplementedError("make_expected_out")
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp("2015-01-15", tz="utc"),
end_date=pd.Timestamp("2015-01-15", tz="utc"),
)
assert_frame_equal(results.sort_index(1), self.expected_out.sort_index(1))
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-10"),
"estimate1": 1.0,
"estimate2": 3.0,
FISCAL_QUARTER_FIELD_NAME: 1.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp("2015-01-20"),
"estimate1": 2.0,
"estimate2": 4.0,
FISCAL_QUARTER_FIELD_NAME: 2.0,
FISCAL_YEAR_FIELD_NAME: 2015.0,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp("2015-01-15", tz="utc"), cls.sid0),)
),
)
dummy_df = pd.DataFrame(
{SID_FIELD_NAME: 0},
columns=[
SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
"estimate",
],
index=[0],
)
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {
c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1, bad_dataset2, good_dataset)
for c in dataset.columns
}
p = Pipeline(columns)
err_msg = (
r"Passed invalid number of quarters -[0-9],-[0-9]; "
r"must pass a number of quarters >= 0"
)
with pytest.raises(ValueError, match=err_msg):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with pytest.raises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = [
"split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof",
]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(
itertools.product(
(
NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
),
)
)
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: "event_date",
Estimates.fiscal_quarter: "fiscal_quarter",
Estimates.fiscal_year: "fiscal_year",
Estimates.estimate: "estimate",
}
with pytest.raises(ValueError):
loader(
dummy_df,
{column.name: val for column, val in columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"),
)
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp("2015-01-28", tz="utc")
q1_knowledge_dates = [
pd.Timestamp("2015-01-01"),
pd.Timestamp("2015-01-04"),
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-11"),
]
q2_knowledge_dates = [
pd.Timestamp("2015-01-14"),
pd.Timestamp("2015-01-17"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-23"),
]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
] # One day late
q2_release_dates = [
pd.Timestamp("2015-01-25"), # One day early
pd.Timestamp("2015-01-26"),
]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates + cls.q2_knowledge_dates, 4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (
q1e1 < q1e2
and q2e1 < q2e2
# All estimates are < Q2's event, so just constrain Q1
# estimates.
and q1e1 < cls.q1_release_dates[0]
and q1e2 < cls.q1_release_dates[0]
):
sid_estimates.append(
cls.create_estimates_df(q1e1, q1e2, q2e1, q2e2, sid)
)
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates + sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-13"), pd.Timestamp("2015-01-26")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-26"),
],
"estimate": [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid,
}
)
@classmethod
def create_estimates_df(cls, q1e1, q1e2, q2e1, q2e2, sid):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
"estimate": [0.1, 0.2, 0.3, 0.4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
}
)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert sid_estimates.isnull().all().all()
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[
self.get_expected_estimate(
q1_knowledge[
q1_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
q2_knowledge[
q2_knowledge[TS_FIELD_NAME] <= date.tz_localize(None)
],
date.tz_localize(None),
).set_index([[date]])
for date in sid_estimates.index
],
axis=0,
)
sid_estimates.index = all_expected.index.copy()
assert_equal(all_expected[sid_estimates.columns], sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >= comparable_date
):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self, q1_knowledge, q2_knowledge, comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (
not q2_knowledge.empty
and q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q2_knowledge.iloc[-1:]
elif (
not q1_knowledge.empty
and q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <= comparable_date
):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns, index=[comparable_date])
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp("2015-01-01"), pd.Timestamp("2015-01-06")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-10"),
pd.Timestamp("2015-01-20"),
],
"estimate": [1.0, 2.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015],
}
)
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(
columns=[cls.columns[col] + "1" for col in cls.columns]
+ [cls.columns[col] + "2" for col in cls.columns],
index=cls.trading_days,
)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ("1", "2")
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(expected[expected_name])
else:
expected[expected_name] = expected[expected_name].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge(
[
{c.name + "1": c.latest for c in dataset1.columns},
{c.name + "2": c.latest for c in dataset2.columns},
]
)
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + "1" for col in self.columns]
q2_columns = [col.name + "2" for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(
sorted(np.array(q1_columns + q2_columns)), sorted(results.columns.values)
)
assert_equal(
self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1),
)
class NextEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp(
"2015-01-11", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp("2015-01-11", tz="UTC") : pd.Timestamp(
"2015-01-20", tz="UTC"
),
raw_name + "1",
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ["estimate", "event_date"]:
expected.loc[
pd.Timestamp("2015-01-06", tz="UTC") : pd.Timestamp(
"2015-01-10", tz="UTC"
),
col_name + "2",
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-09", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 2
expected.loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_QUARTER_FIELD_NAME + "2",
] = 3
expected.loc[
pd.Timestamp("2015-01-01", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC"),
FISCAL_YEAR_FIELD_NAME + "2",
] = 2015
return expected
class PreviousEstimateMultipleQuarters(WithEstimateMultipleQuarters, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp(
"2015-01-19", tz="UTC"
)
] = cls.events[raw_name].iloc[0]
expected[raw_name + "1"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ["estimate", "event_date"]:
expected[col_name + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = cls.events[col_name].iloc[0]
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 4
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-12", tz="UTC") : pd.Timestamp("2015-01-20", tz="UTC")
] = 2014
expected[FISCAL_QUARTER_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 1
expected[FISCAL_YEAR_FIELD_NAME + "2"].loc[
pd.Timestamp("2015-01-20", tz="UTC") :
] = 2015
return expected
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame(
{
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
]
* 2,
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-20"),
],
"estimate": [11.0, 12.0, 21.0] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6,
}
)
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError("assert_compute")
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=pd.Timestamp("2015-01-13", tz="utc"),
# last event date we have
end_date=pd.Timestamp("2015-01-14", tz="utc"),
)
class PreviousVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextVaryingNumEstimates(WithVaryingNumEstimates, ZiplineTestCase):
def assert_compute(self, estimate, today):
if today == pd.Timestamp("2015-01-13", tz="utc"):
assert_array_equal(estimate[:, 0], np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0], np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1], np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
window_test_start_date = pd.Timestamp("2015-01-05")
critical_dates = [
pd.Timestamp("2015-01-09", tz="utc"),
pd.Timestamp("2015-01-15", tz="utc"),
pd.Timestamp("2015-01-20", tz="utc"),
pd.Timestamp("2015-01-26", tz="utc"),
pd.Timestamp("2015-02-05", tz="utc"),
pd.Timestamp("2015-02-10", tz="utc"),
]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-02-10"),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp("2015-01-18"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-04-01"),
],
"estimate": [100.0, 101.0] + [200.0, 201.0] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-15"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-01-22"),
pd.Timestamp("2015-02-05"),
pd.Timestamp("2015-02-05"),
],
"estimate": [110.0, 111.0] + [310.0, 311.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10,
}
)
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-07"),
cls.window_test_start_date,
pd.Timestamp("2015-01-17"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-02-10"),
pd.Timestamp("2015-02-10"),
],
"estimate": [120.0, 121.0] + [220.0, 221.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20,
}
)
concatted = pd.concat(
[sid_0_timeline, sid_10_timeline, sid_20_timeline]
).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [
sid for i in range(len(sids) - 1) for sid in range(sids[i], sids[i + 1])
] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids(),
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(
self, start_date, num_announcements_out
):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date)
- self.trading_days.get_loc(self.window_test_start_date)
+ 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = (
timelines[num_announcements_out]
.loc[today]
.reindex(trading_days[: today_idx + 1])
.values
)
timeline_start_idx = len(today_timeline) - window_len
assert_almost_equal(estimate, today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp("2015-02-10", tz="utc"),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-20")),
],
pd.Timestamp("2015-01-21"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111, pd.Timestamp("2015-01-22")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311, pd.Timestamp("2015-02-05")),
(20, 221, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-09"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-19")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
],
pd.Timestamp("2015-01-20"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-01-22")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, 310, pd.Timestamp("2015-01-09")),
(10, 311, pd.Timestamp("2015-01-15")),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-11")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-12", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(20, 221, pd.Timestamp("2015-01-17")),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedWindows(WithEstimateWindows):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows and with
split adjustments.
"""
split_adjusted_asof_date = pd.Timestamp("2015-01-14")
@classmethod
def make_events(cls):
# Add an extra sid that has a release before the split-asof-date in
# order to test that we're reversing splits correctly in the previous
# case (without an overwrite) and in the next case (with an overwrite).
sid_30 = pd.DataFrame(
{
TS_FIELD_NAME: [
cls.window_test_start_date,
pd.Timestamp("2015-01-09"),
# For Q2, we want it to start early enough
# that we can have several adjustments before
# the end of the first quarter so that we
# can test un-adjusting & readjusting with an
# overwrite.
cls.window_test_start_date,
# We want the Q2 event date to be enough past
# the split-asof-date that we can have
# several splits and can make sure that they
# are applied correctly.
pd.Timestamp("2015-01-20"),
],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-20"),
],
"estimate": [130.0, 131.0, 230.0, 231.0],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 30,
}
)
# An extra sid to test no splits before the split-adjusted-asof-date.
# We want an event before and after the split-adjusted-asof-date &
# timestamps for data points also before and after
# split-adjsuted-asof-date (but also before the split dates, so that
# we can test that splits actually get applied at the correct times).
sid_40 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-15")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [140.0, 240.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 40,
}
)
# An extra sid to test all splits before the
# split-adjusted-asof-date. All timestamps should be before that date
# so that we have cases where we un-apply and re-apply splits.
sid_50 = pd.DataFrame(
{
TS_FIELD_NAME: [pd.Timestamp("2015-01-09"), pd.Timestamp("2015-01-12")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-02-10"),
],
"estimate": [150.0, 250.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 50,
}
)
return pd.concat(
[
# Slightly hacky, but want to make sure we're using the same
# events as WithEstimateWindows.
cls.__base__.make_events(),
sid_30,
sid_40,
sid_50,
]
)
@classmethod
def make_splits_data(cls):
# For sid 0, we want to apply a series of splits before and after the
# split-adjusted-asof-date we well as between quarters (for the
# previous case, where we won't see any values until after the event
# happens).
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (-1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 100),
"effective_date": (
pd.Timestamp("2014-01-01"), # Filter out
# Split before Q1 event & after first estimate
pd.Timestamp("2015-01-07"),
# Split before Q1 event
pd.Timestamp("2015-01-09"),
# Split before Q1 event
pd.Timestamp("2015-01-13"),
# Split before Q1 event
pd.Timestamp("2015-01-15"),
# Split before Q1 event
pd.Timestamp("2015-01-18"),
# Split after Q1 event and before Q2 event
pd.Timestamp("2015-01-30"),
# Filter out - this is after our date index
pd.Timestamp("2016-01-01"),
),
}
)
sid_10_splits = pd.DataFrame(
{
SID_FIELD_NAME: 10,
"ratio": (0.2, 0.3),
"effective_date": (
# We want a split before the first estimate and before the
# split-adjusted-asof-date but within our calendar index so
# that we can test that the split is NEVER applied.
pd.Timestamp("2015-01-07"),
# Apply a single split before Q1 event.
pd.Timestamp("2015-01-20"),
),
}
)
# We want a sid with split dates that collide with another sid (0) to
# make sure splits are correctly applied for both sids.
sid_20_splits = pd.DataFrame(
{
SID_FIELD_NAME: 20,
"ratio": (
0.4,
0.5,
0.6,
0.7,
0.8,
0.9,
),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
pd.Timestamp("2015-01-30"),
),
}
)
# This sid has event dates that are shifted back so that we can test
# cases where an event occurs before the split-asof-date.
sid_30_splits = pd.DataFrame(
{
SID_FIELD_NAME: 30,
"ratio": (8, 9, 10, 11, 12),
"effective_date": (
# Split before the event and before the
# split-asof-date.
pd.Timestamp("2015-01-07"),
# Split on date of event but before the
# split-asof-date.
pd.Timestamp("2015-01-09"),
# Split after the event, but before the
# split-asof-date.
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-15"),
pd.Timestamp("2015-01-18"),
),
}
)
# No splits for a sid before the split-adjusted-asof-date.
sid_40_splits = pd.DataFrame(
{
SID_FIELD_NAME: 40,
"ratio": (13, 14),
"effective_date": (
pd.Timestamp("2015-01-20"),
pd.Timestamp("2015-01-22"),
),
}
)
# No splits for a sid after the split-adjusted-asof-date.
sid_50_splits = pd.DataFrame(
{
SID_FIELD_NAME: 50,
"ratio": (15, 16),
"effective_date": (
pd.Timestamp("2015-01-13"),
pd.Timestamp("2015-01-14"),
),
}
)
return pd.concat(
[
sid_0_splits,
sid_10_splits,
sid_20_splits,
sid_30_splits,
sid_40_splits,
sid_50_splits,
]
)
class PreviousWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat(
[
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
# Undo all adjustments that haven't happened yet.
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-12")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11, pd.Timestamp("2015-01-09")),
(40, 140.0, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, np.NaN, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-01-21")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-22", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 111 * 0.3, pd.Timestamp("2015-01-22")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-04")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-01-20")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-01-20")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-01-09")),
(50, 150.0, pd.Timestamp("2015-01-09")),
],
end_date,
)
for end_date in pd.date_range("2015-02-05", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 201, pd.Timestamp("2015-02-10")),
(10, 311 * 0.3, pd.Timestamp("2015-02-05")),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 250.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_previous = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-09", "2015-01-19")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
],
end_date,
)
for end_date in pd.date_range("2015-01-20", "2015-02-09")
]
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 101 * 7, pd.Timestamp("2015-02-10")),
(10, np.NaN, pd.Timestamp("2015-02-05")),
(20, 121 * 0.7 * 0.8 * 0.9, pd.Timestamp("2015-02-10")),
(30, 131 * 11 * 12, pd.Timestamp("2015-01-20")),
(40, 140.0 * 13 * 14, pd.Timestamp("2015-02-10")),
(50, 150.0, pd.Timestamp("2015-02-10")),
],
pd.Timestamp("2015-02-10"),
)
]
)
return {1: oneq_previous, 2: twoq_previous}
class NextWithSplitAdjustedWindows(WithSplitAdjustedWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextSplitAdjustedEarningsEstimatesLoader(
events,
columns,
split_adjustments_loader=cls.adjustment_reader,
split_adjusted_column_names=["estimate"],
split_adjusted_asof=cls.split_adjusted_asof_date,
)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 130 * 1 / 10, cls.window_test_start_date),
(30, 131 * 1 / 10, pd.Timestamp("2015-01-09")),
(40, 140, pd.Timestamp("2015-01-09")),
(50, 150.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-09")),
],
pd.Timestamp("2015-01-09"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 1 / 4, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 5 / 3, cls.window_test_start_date),
(20, 121 * 5 / 3, pd.Timestamp("2015-01-07")),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 15 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-12"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0 * 1 / 16, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-13"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp("2015-01-07")),
(30, 230, cls.window_test_start_date),
(40, np.NaN, pd.Timestamp("2015-01-10")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-14"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5, cls.window_test_start_date),
(10, 110, pd.Timestamp("2015-01-09")),
(10, 111, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7, cls.window_test_start_date),
(20, 121 * 0.7, pd.Timestamp("2015-01-07")),
(30, 230 * 11, cls.window_test_start_date),
(40, 240, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 100 * 5 * 6, cls.window_test_start_date),
(0, 101, pd.Timestamp("2015-01-20")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 120 * 0.7 * 0.8, cls.window_test_start_date),
(20, 121 * 0.7 * 0.8, pd.Timestamp("2015-01-07")),
(30, 230 * 11 * 12, cls.window_test_start_date),
(30, 231, pd.Timestamp("2015-01-20")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-20"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-21"),
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 110 * 0.3, pd.Timestamp("2015-01-09")),
(10, 111 * 0.3, pd.Timestamp("2015-01-12")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-01-22"),
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-23", "2015-01-29")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, 310 * 0.3, pd.Timestamp("2015-01-09")),
(10, 311 * 0.3, pd.Timestamp("2015-01-15")),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-01-30", "2015-02-05")
]
),
pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
end_date,
)
for end_date in pd.date_range("2015-02-06", "2015-02-09")
]
),
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6 * 7, pd.Timestamp("2015-01-12")),
(0, 201, pd.Timestamp("2015-02-10")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8 * 0.9, cls.window_test_start_date),
(20, 221 * 0.8 * 0.9, pd.Timestamp("2015-01-17")),
(40, 240 * 13 * 14, pd.Timestamp("2015-01-15")),
(50, 250.0, pd.Timestamp("2015-01-12")),
],
pd.Timestamp("2015-02-10"),
),
]
)
twoq_next = pd.concat(
[
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, 230 * 1 / 10, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
(50, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-09"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 1 / 4, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 5 / 3, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-12"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-13", "2015-01-14")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-15", "2015-01-16")
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, 200 * 5 * 6, pd.Timestamp("2015-01-12")),
(10, np.NaN, cls.window_test_start_date),
(20, 220 * 0.7 * 0.8, cls.window_test_start_date),
(20, 221 * 0.8, pd.Timestamp("2015-01-17")),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
pd.Timestamp("2015-01-20"),
)
]
+ [
cls.create_expected_df_for_factor_compute(
[
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date),
(30, np.NaN, cls.window_test_start_date),
(40, np.NaN, cls.window_test_start_date),
],
end_date,
)
for end_date in pd.date_range("2015-01-21", "2015-02-10")
]
)
return {1: oneq_next, 2: twoq_next}
class WithSplitAdjustedMultipleEstimateColumns(WithEstimates):
"""
ZiplineTestCase mixin for having multiple estimate columns that are
split-adjusted to make sure that adjustments are applied correctly.
Attributes
----------
test_start_date : pd.Timestamp
The start date of the test.
test_end_date : pd.Timestamp
The start date of the test.
split_adjusted_asof : pd.Timestamp
The split-adjusted-asof-date of the data used in the test, to be used
to create all loaders of test classes that subclass this mixin.
Methods
-------
make_expected_timelines_1q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range for
each column. Only for 1 quarter out.
make_expected_timelines_2q_out -> dict[pd.Timestamp -> dict[str ->
np.array]]
The expected array of results for each date of the date range. For 2
quarters out, so only for the column that is requested to be loaded
with 2 quarters out.
Tests
-----
test_adjustments_with_multiple_adjusted_columns
Tests that if you have multiple columns, we still split-adjust
correctly.
test_multiple_datasets_different_num_announcements
Tests that if you have multiple datasets that ask for a different
number of quarters out, and each asks for a different estimates column,
we still split-adjust correctly.
"""
END_DATE = pd.Timestamp("2015-02-10", tz="utc")
test_start_date = pd.Timestamp("2015-01-06", tz="utc")
test_end_date = pd.Timestamp("2015-01-12", tz="utc")
split_adjusted_asof = pd.Timestamp("2015-01-08")
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: "event_date",
MultipleColumnsEstimates.fiscal_quarter: "fiscal_quarter",
MultipleColumnsEstimates.fiscal_year: "fiscal_year",
MultipleColumnsEstimates.estimate1: "estimate1",
MultipleColumnsEstimates.estimate2: "estimate2",
}
@classmethod
def make_events(cls):
sid_0_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-09"),
pd.Timestamp("2015-01-12"),
],
"estimate1": [1100.0, 1200.0],
"estimate2": [2100.0, 2200.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
}
)
# This is just an extra sid to make sure that we apply adjustments
# correctly for multiple columns when we have multiple sids.
sid_1_events = pd.DataFrame(
{
# We only want a stale KD here so that adjustments
# will be applied.
TS_FIELD_NAME: [pd.Timestamp("2015-01-05"), pd.Timestamp("2015-01-05")],
EVENT_DATE_FIELD_NAME: [
pd.Timestamp("2015-01-08"),
pd.Timestamp("2015-01-11"),
],
"estimate1": [1110.0, 1210.0],
"estimate2": [2110.0, 2210.0],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 1,
}
)
return pd.concat([sid_0_events, sid_1_events])
@classmethod
def make_splits_data(cls):
sid_0_splits = pd.DataFrame(
{
SID_FIELD_NAME: 0,
"ratio": (0.3, 3.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
sid_1_splits = pd.DataFrame(
{
SID_FIELD_NAME: 1,
"ratio": (0.4, 4.0),
"effective_date": (
pd.Timestamp("2015-01-07"),
pd.Timestamp("2015-01-09"),
),
}
)
return pd.concat([sid_0_splits, sid_1_splits])
@classmethod
def make_expected_timelines_1q_out(cls):
return {}
@classmethod
def make_expected_timelines_2q_out(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithSplitAdjustedMultipleEstimateColumns, cls).init_class_fixtures()
cls.timelines_1q_out = cls.make_expected_timelines_1q_out()
cls.timelines_2q_out = cls.make_expected_timelines_2q_out()
def test_adjustments_with_multiple_adjusted_columns(self):
dataset = MultipleColumnsQuartersEstimates(1)
timelines = self.timelines_1q_out
window_len = 3
class SomeFactor(CustomFactor):
inputs = [dataset.estimate1, dataset.estimate2]
window_length = window_len
def compute(self, today, assets, out, estimate1, estimate2):
assert_almost_equal(estimate1, timelines[today]["estimate1"])
assert_almost_equal(estimate2, timelines[today]["estimate2"])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({"est": SomeFactor()}),
start_date=self.test_start_date,
# last event date we have
end_date=self.test_end_date,
)
def test_multiple_datasets_different_num_announcements(self):
dataset1 = MultipleColumnsQuartersEstimates(1)
dataset2 = MultipleColumnsQuartersEstimates(2)
timelines_1q_out = self.timelines_1q_out
timelines_2q_out = self.timelines_2q_out
window_len = 3
class SomeFactor1(CustomFactor):
inputs = [dataset1.estimate1]
window_length = window_len
def compute(self, today, assets, out, estimate1):
| assert_almost_equal(estimate1, timelines_1q_out[today]["estimate1"]) | numpy.testing.assert_almost_equal |
import unittest
import numpy as np
from pynif3d.utils.transforms import rotation_mat, translation_mat
class TestTransform(unittest.TestCase):
def test_translation_mat(self):
pred = translation_mat(0)
real = | np.identity(4) | numpy.identity |
#!/usr/bin/env python3
"""
deep neural network class
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
class DeepNeuralNetwork:
"""
Deep neuarl network class
"""
def __init__(self, nx, layers):
"""
deep neural network constructor
"""
if type(nx) is not int:
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
if type(layers) is not list or not layers:
raise TypeError("layers must be a list of positive integers")
self.__L = len(layers)
self.__cache = {}
self.__weights = {}
for i in range(len(layers)):
if layers[i] <= 0:
raise TypeError("layers must be a list of positive integers")
if i == 0:
self.__weights['W' + str(i + 1)
] = np.random.randn(layers[i], nx) *\
np.sqrt(2/nx)
else:
self.__weights['W' + str(i + 1)] = \
np.random.randn(layers[i], layers[i-1]) * \
| np.sqrt(2/layers[i-1]) | numpy.sqrt |
import numpy as np
from ._CFunctions import _CGetCon2020Params,_CSetCon2020Params
import ctypes as ct
def _GetCFG():
'''
Get the current config dictionary
'''
eqtype = ct.c_char_p(" ".encode('utf-8'))
mui = np.zeros(1,dtype='float64')
irho = np.zeros(1,dtype='float64')
r0 = np.zeros(1,dtype='float64')
r1 = np.zeros(1,dtype='float64')
d = np.zeros(1,dtype='float64')
xt = np.zeros(1,dtype='float64')
xp = np.zeros(1,dtype='float64')
Edwards = np.zeros(1,dtype='bool')
ErrChk = np.zeros(1,dtype='bool')
CartIn = np.zeros(1,dtype='bool')
CartOut = np.zeros(1,dtype='bool')
_CGetCon2020Params(mui,irho,r0,r1,d,xt,xp,eqtype,Edwards,ErrChk,
CartIn,CartOut)
cfg = {}
cfg['mu_i'] = mui[0]
cfg['i_rho'] = irho[0]
cfg['r0'] = r0[0]
cfg['r1'] = r1[0]
cfg['d'] = d[0]
cfg['xt'] = xt[0]
cfg['xp'] = xp[0]
cfg['Edwards'] = Edwards[0]
cfg['error_check'] = ErrChk[0]
cfg['CartesianIn'] = CartIn[0]
cfg['CartesianOut'] = CartOut[0]
cfg['equation_type'] = eqtype.value.decode()
return cfg
def _SetCFG(cfg):
'''
Set the model config using a dictionary.
'''
eqtype = ct.c_char_p(cfg['equation_type'].encode('utf-8'))
mui = np.array([cfg['mu_i']],dtype='float64')
irho = np.array([cfg['i_rho']],dtype='float64')
r0 = np.array([cfg['r0']],dtype='float64')
r1 = np.array([cfg['r1']],dtype='float64')
d = np.array([cfg['d']],dtype='float64')
xt = np.array([cfg['xt']],dtype='float64')
xp = np.array([cfg['xp']],dtype='float64')
Edwards = np.array([cfg['Edwards']],dtype='bool')
ErrChk = np.array([cfg['error_check']],dtype='bool')
CartIn = np.array([cfg['CartesianOut']],dtype='bool')
CartOut = | np.array([cfg['CartesianOut']],dtype='bool') | numpy.array |
"""Module define operation with functions
sym_elems and magn_sym_elems
sym_elems: numpy.shape[13, n_elems]
[numerator_x, numerator_y, numerator_z, denominator_xyz,
r_11, r_12, r_13,
r_21, r_22, r_23,
r_31, r_32, r_33]
magn_sym_elems: [22, n_elems]
[numerator_x, numerator_y, numerator_z, denominator_xyz,
r_11, r_12, r_13,
r_21, r_22, r_23,
r_31, r_32, r_33,
m_11, m_12, m_13,
m_21, m_22, m_23,
m_31, m_32, m_33]
Functions
---------
- form_symm_elems_by_b_i_r_ij
- calc_numerators_denominator_for_b_i
- calc_common_denominator
- calc_rational_sum
- transform_to_p1
"""
import numpy
# from cryspy.A_functions_base.function_1_strings import
def form_symm_elems_by_b_i_r_ij(b_i, r_ij):
"""
Parameters
----------
b_i : fractions
DESCRIPTION.
r_ij : int
DESCRIPTION.
Returns
-------
sym_elems : TYPE
[b_num_1, b_num_2, b_num_3, r_11, r_12, r_13, r_21, r_22, r_23,
r_31, r_32, r_33]
"""
b_num_1, b_num_2, b_num_3, b_den = calc_numerators_denominator_for_b_i(*b_i)
(r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33) = r_ij
n_1st = 13
n_2nd = len(r_11)
sym_elems = numpy.zeros(shape=(n_1st, n_2nd), dtype=int)
sym_elems[0, :] = b_num_1
sym_elems[1, :] = b_num_2
sym_elems[2, :] = b_num_3
sym_elems[3, :] = b_den
sym_elems[4, :] = numpy.array(r_11, dtype=int)
sym_elems[5, :] = numpy.array(r_12, dtype=int)
sym_elems[6, :] = numpy.array(r_13, dtype=int)
sym_elems[7, :] = | numpy.array(r_21, dtype=int) | numpy.array |
import control
import numpy as np
import scipy.linalg
def solve_riccati(A, B, Q, R):
"""
Solves discrete ARE, returns gain matrix K s.t. u = +K*x
Faster implementation than control.dlqr for systems with large n (state_dim)
"""
n = A.shape[0]
m = B.shape[1]
P = | np.zeros((n, n)) | numpy.zeros |
##################################################
#ASM# module "plotting" for package "common" #ASM#
##################################################
#TODO: Fix undo/redo comparison operations of PlotHistory
#TODO: enhance some matplotlib functions
"""
This module assists in many matplotlib related tasks, such as managing plot objects.
It automatically imports all from matplotlib.pylab as well as from numpy.
Note: If variable *plot_format* is set (in dict __main__._IP.user_ns) to
a valid matplotlib.use() format, e.g. 'PS', this will be implemented.
Otherwise, the user will be prompted for the plot backend.
Setting plot_format=None will bypass this behavior and use the default
renderer.
"""
#_________________________________________Imports_________________________________________
import os
import sys
import re
import copy
import types
from common.log import Logger
from common import misc
import numpy
np=numpy
__module_name__=__name__
from matplotlib import pyplot,axes,colors
from matplotlib import pyplot as plt
#---- Colormaps stored in files
cdict = {'red': ((0.0, 0.0, 0.0),
(0.35,0.0, 0.0),
(0.5, 1, 1),
(0.65, .9, .9),
(0.95, 0.5, .5),
(1.0, .4, .4)),
'green': ((0.0, 0.0, 0.0),
(0.35,0.0, 0.0),
(0.5, 1, 1),
(0.65,0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0, .4, .4),
(0.05, 0.5, 0.5),
(0.35, 0.9, 0.9),
(0.5, 1, 1),
(0.65,0.0, 0.0),
(1.0, 0.0, 0.0))
}
pyplot.register_cmap(name='BWR', data=cdict)
cdict = {'red': ((0.0, 0.0, 0.0),
(0.2,0.0, 0.0),
(0.5, 1, 1),
(0.8, .9, .9),
(0.95, 0.5, .5),
(1.0, .4, .4)),
'green': ((0.0, 0.0, 0.0),
(0.2,0.0, 0.0),
(0.5, 1, 1),
(0.8,0.0, 0.0),
(1.0, 0.0, 0.0)),
'blue': ((0, .4, .4),
(0.05, 0.5, 0.5),
(0.2, 0.9, 0.9),
(0.5, 1, 1),
(0.8,0.0, 0.0),
(1.0, 0.0, 0.0))
}
pyplot.register_cmap(name='BWR2', data=cdict)
cdict = {'blue': ((0.0, 0.0, 0.0),
(0.2,0.0, 0.0),
(0.5, 1, 1),
(0.8, .9, .9),
(0.95, 0.5, .5),
(1.0, .4, .4)),
'green': ((0.0, 0.0, 0.0),
(0.2,0.0, 0.0),
(0.5, 1, 1),
(0.8,0.0, 0.0),
(1.0, 0.0, 0.0)),
'red': ((0, .4, .4),
(0.05, 0.5, 0.5),
(0.2, 0.9, 0.9),
(0.5, 1, 1),
(0.8,0.0, 0.0),
(1.0, 0.0, 0.0))
}
pyplot.register_cmap(name='BWR2_r', data=cdict)
##Load all colormaps found in `common/colormaps` directory##
# The format of these files should be 4 columns: x, r, g, b
# All columns should range from 0 to 1.
cmap_dir=os.path.join(os.path.dirname(__file__),'colormaps')
for file in os.listdir(cmap_dir):
if file.endswith('.csv'):
cmap_name=re.sub('\.csv$','',file)
cmap_mat=misc.extract_array(open(os.path.join(cmap_dir,file)))
x=cmap_mat[:,0]; r=cmap_mat[:,1]; g=cmap_mat[:,2]; b=cmap_mat[:,3]
rtuples=numpy.vstack((x,r,r)).transpose().tolist()
gtuples=numpy.vstack((x,g,g)).transpose().tolist()
btuples=numpy.vstack((x,b,b)).transpose().tolist()
cdit={'red':rtuples,'green':gtuples,'blue':btuples}
pyplot.register_cmap(name=cmap_name,data=cdit)
r=r[::-1]; g=g[::-1]; b=b[::-1]
rtuples_r=numpy.vstack((x,r,r)).transpose().tolist()
gtuples_r=numpy.vstack((x,g,g)).transpose().tolist()
btuples_r=numpy.vstack((x,b,b)).transpose().tolist()
cdit_r={'red':rtuples_r,'green':gtuples_r,'blue':btuples_r}
pyplot.register_cmap(name=cmap_name+'_r',data=cdit_r)
Logger.write('Registered colormaps "%s" and "%s_r"...'%((cmap_name,)*2))
# ----- Colormaps tabulated
# New matplotlib colormaps by <NAME>, <NAME>,
# and (in the case of viridis) <NAME>.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
cmaps = {}
for (name, data) in (('magma', _magma_data),
('inferno', _inferno_data),
('plasma', _plasma_data),
('viridis', _viridis_data)):
cmaps[name] = ListedColormap(data, name=name)
pyplot.register_cmap(name=name,cmap=cmaps[name])
# ----- Plotting functions
_color_index_=0
all_colors=['b','g','r','c','m','y','k','teal','gray','navy']
def next_color():
global _color_index_
color=all_colors[_color_index_%len(all_colors)]
_color_index_+=1
return color
###################################
#ASM# 2. function figure_list #ASM#
###################################
def figure_list():
"""
This function uses some internal wizardry to return a list of the current figure objects.
GLOBALS USED: matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
"""
import matplotlib._pylab_helpers
lst = []
for manager in matplotlib._pylab_helpers.Gcf.get_all_fig_managers():
lst.append(manager.canvas.figure)
return lst
def get_properties(obj,verbose='yes'):
"""
This function returns a dictionary of artist object properties and their corresponding values.
*obj: artist object
*verbose: set to 'yes' to spout error messages for properties whose values could not be obtained.
DEFAULT: 'no'
"""
props_to_get=[]
for attrib in dir(obj):
if attrib[0:4]=='get_': props_to_get.append(attrib.replace('get_',''))
values=[]
props_used=[]
for prop in props_to_get:
##Getp sometimes fails requiring two arguments, but these properties are not important##
try:
values.append(getp(obj,prop))
props_used.append(prop)
except TypeError:
if 'y' in verbose: print('ALERT: Couldn\'t retrieve property '+prop+'.')
return dict([(props_used[i],values[i]) for i in range(len(values))])
def set_properties(obj,prop_dict,verbose='yes'):
"""
This function takes an object and and sets its properties according to the property dictionary input.
If, for any entry in the dictionary, the property or method to set it does not exist, it will be skipped
over.
*obj: artist object
*prop_dict: a property dictionary of the sort returned by get_properties()
*verbose: set to 'yes' to spout error messages for properties which could not be set
DEFAULT: 'no'
"""
misc.check_vars(prop_dict,dict)
for key in list(prop_dict.keys()):
try: pyplot.setp(obj,key,prop_dict[key])
except AttributeError:
if 'y' in verbose: Logger.warning('Property "%s" could not be set.'%key)
return obj
def minor_ticks(nx=5,ny=5,x=True,y=True):
"""
Sets *n* minor tick marks per major tick for the x and y axes of the current figure.
*nx: integer number of minor ticks for x, DEFAULT: 5
*ny: integer number of minor ticks for y, DEFAULT: 5
*x: True/False, DEFAULT: True
*y: True/False, DEFAULT: True
"""
ax = pyplot.gca()
if x:
ax.xaxis.set_major_locator(pyplot.AutoLocator())
x_major = ax.xaxis.get_majorticklocs()
dx_minor = (x_major[-1]-x_major[0])/(len(x_major)-1)/nx
ax.xaxis.set_minor_locator(pyplot.MultipleLocator(dx_minor))
if y:
ax.yaxis.set_major_locator(pyplot.AutoLocator())
y_major = ax.yaxis.get_majorticklocs()
dy_minor = (y_major[-1]-y_major[0])/(len(y_major)-1)/ny
ax.yaxis.set_minor_locator(pyplot.MultipleLocator(dy_minor))
pyplot.plot()
return
def axes_limits(xlims=None,ylims=None,auto=False):
"""
Sets limits for the x and y axes.
*xlims: tuple of (xmin,xmax)
*ylims: tuple of (ymin,ymax)
*auto: set to True to turn on autoscaling for both axes
"""
ax=pyplot.gca()
ax.set_autoscale_on(auto)
if xlims!=None: ax.set_xlim(xlims[0],xlims[1])
if ylims!=None: ax.set_ylim(ylims[0],ylims[1])
pyplot.draw();return
#totally fucked- axes heights are crazy
def grid_axes(nplots, xstart=0.15, xstop=.85, spacing=0.02,
bottom=0.1, top = 0.85, widths=None, **kwargs):
"""
Generates a series of plots neighboring each other horizontally and with common
y offset and height values.
nplots - the number of plots to create
xstart - the left margin of the first plot
DEFAULT: .05 <- permits visibility of axis label
xstop - the right margin of the last plot
DEFAULT: 1 <- entire figure
spacing - the amount of space between plots
DEFAULT: .075
bottom - the bottom margin of the row of plots
top - the top margin of the row of plots
widths - specify the width of each plot. By default plots are evenly spaced, but
if a list of factors is supplied the plots will be adjusted in width. Note
that if the total adds up to more than the allotted area, RuntimeError is
raised.
kwargs - passed to figure.add_axes method
"""
###Check types###
input_list=(nplots,xstart,xstop,spacing,bottom,top,widths)
type_list=[(int,list)] #for nplots
type_list.extend([(int,float,list)]*5) #for xstart, xstop, spacing, bottom, top
type_list.append((list,type(None))) #for widths
type_list=tuple(type_list)
misc.check_vars(input_list,type_list,protect=[list])
###Grid bottom and top arguments equally for each row if necessary###
if type(nplots)==list: #if we want more than one row
nrows=len(nplots)
vsize=((top-bottom+nrows*spacing))/float(nrows)
the_bottom=bottom
if type(bottom)!=list: #If user hasn't set widths
bottom=[the_bottom+(vsize)*i+spacing*bool(i) for i in range(nrows)]
bottom.reverse() #top to bottom
if type(top)!=list:
top=[the_bottom+vsize*(i+1) for i in range(nrows)]
top.reverse() #top to bottom
###Make sure widths is properly formatted###
if widths!=None:
for i in range(len(widths)):
if type(widths[i])==list: #specific widths for plots in row
widths[i]=tuple(widths[i]) #turn to tuple to prevent iteration into
###Define what to do for each row###
fig=pyplot.gcf()
def row_of_axes(nplots_row,\
xstart,xstop,spacing,\
bottom,top,widths,kwargs,index):
##Check that we haven't iterated too deep##
Logger.raiseException('Provide input values for rows and columns only (e.g. lists of depth 2).',\
unless=(len(index)<2),\
exception=IndexError)
##Check format of widths##
if widths==None: widths=[1]*nplots_row
elif hasattr(widths,'__len__'): #expect a tuple
print(len(widths),nplots_row)
Logger.raiseException('When providing *widths* keyword, provide a plot width for each intended sub-plot in each row.',\
unless=(len(widths)==nplots_row),\
exception=IndexError)
else: widths=tuple(widths)*nplots_row
###Axes values###
avg_width=(xstop-xstart-spacing*(nplots_row-1))/float(nplots_row)
height=top-bottom
xpos=xstart
###Weighted widths###
weighted_widths=[]
for j in range(nplots_row):
weighted_width=avg_width*widths[j]
weighted_widths.append(weighted_width)
true_widths=[width/float(sum(weighted_widths))*(nplots_row*avg_width) \
for width in weighted_widths]
###Make new axes in row###
row_axes=[]
for j in range(nplots_row):
width=true_widths[j]
rect=[xpos, bottom, width, height]
new_axis=fig.add_axes(rect, **kwargs)
xpos+=width+spacing
row_axes.append(new_axis)
return row_axes
###Apply to all rows###
new_axes=misc.apply_to_array(row_of_axes,nplots,xstart,xstop,spacing,bottom,top,widths,kwargs,\
protect=[tuple,dict])
pyplot.plot()
return new_axes
def colorline(x, y, z=None, cmap=plt.get_cmap('copper'), \
norm=plt.Normalize(0.0, 1.0),
linewidth=3, alpha=1.0, **kwargs):
"""
http://nbviewer.ipython.org/github/dpsanders/matplotlib-examples/blob/master/colorline.ipynb
http://matplotlib.org/examples/pylab_examples/multicolored_line.html
Plot a colored line with coordinates x and y
Optionally specify colors in the array z
Optionally specify a colormap, a norm function and a line width
"""
import matplotlib.collections as mcoll
def make_segments(x, y):
"""
Create list of line segments from x and y coordinates, in the correct format
for LineCollection: an array of the form numlines x (points per line) x 2 (x
and y) array
"""
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = | np.concatenate([points[:-1], points[1:]], axis=1) | numpy.concatenate |
'''
Functions that resample the training data.
'''
import numpy as np
def validation_data(X, y, fit_kwargs, ratio=1):
""" Set validation data from the samples remaining in the pool. """
one_ind = np.where(y == 1)[0]
zero_ind = np.where(y == 0)[0]
n_one = len(one_ind)
n_zero = len(zero_ind)
n_zero_add = min(n_zero, int(ratio*n_one))
zero_add = zero_ind[np.random.choice(n_zero, n_zero_add, replace=False)]
all_ind = | np.append(one_ind, zero_add) | numpy.append |
#
# Author: <NAME>
#
# Copyright (c) 2019 Adobe Systems Incorporated. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code adapted from https://github.com/rguthrie3/BiLSTM-CRF/blob/master/model.py
# and from https://github.com/neulab/cmu-ner/blob/master/models/decoders.py
import dynet as dy
import numpy as np
class CRFDecoder:
def __init__(self, model, src_output_dim, tag_emb_dim, tag_size, constraints=None):
self.model = model
self.start_id = tag_size
self.end_id = tag_size + 1
self.tag_size = tag_size + 2
tag_size = tag_size + 2
# optional: transform the hidden space of src encodings into the tag embedding space
self.W_src2tag_readout = model.add_parameters((tag_emb_dim, src_output_dim))
self.b_src2tag_readout = model.add_parameters((tag_emb_dim))
self.b_src2tag_readout.zero()
self.W_scores_readout2tag = model.add_parameters((tag_size, tag_emb_dim))
self.b_scores_readout2tag = model.add_parameters((tag_size))
self.b_scores_readout2tag.zero()
# (to, from), trans[i] is the transition score to i
init_transition_matrix = np.random.randn(tag_size, tag_size) # from, to
# init_transition_matrix[self.start_id, :] = -1000.0
# init_transition_matrix[:, self.end_id] = -1000.0
init_transition_matrix[self.end_id, :] = -1000.0
init_transition_matrix[:, self.start_id] = -1000.0
if constraints is not None:
init_transition_matrix = self._constrained_transition_init(init_transition_matrix, constraints)
# print init_transition_matrix
self.transition_matrix = model.lookup_parameters_from_numpy(init_transition_matrix)
self.interpolation = True # args.interp_crf_score
if self.interpolation:
self.W_weight_transition = model.add_parameters((1, tag_emb_dim))
self.b_weight_transition = model.add_parameters((1))
self.b_weight_transition.zero()
def learn(self, src_enc, tgt_tags):
return self.decode_loss(src_enc, [tgt_tags])
def tag(self, src_enc):
return self.decoding(src_enc)[1]
def _constrained_transition_init(self, transition_matrix, contraints):
'''
:param transition_matrix: numpy array, (from, to)
:param contraints: [[from_indexes], [to_indexes]]
:return: newly initialized transition matrix
'''
for cons in contraints:
transition_matrix[cons[0], cons[1]] = -1000.0
return transition_matrix
def _log_sum_exp_dim_0(self, x):
# numerically stable log_sum_exp
dims = x.dim()
max_score = dy.max_dim(x, 0) # (dim_1, batch_size)
if len(dims[0]) == 1:
max_score_extend = max_score
else:
max_score_reshape = dy.reshape(max_score, (1, dims[0][1]), batch_size=dims[1])
max_score_extend = dy.concatenate([max_score_reshape] * dims[0][0])
x = x - max_score_extend
exp_x = dy.exp(x)
# (dim_1, batch_size), if no dim_1, return ((1,), batch_size)
log_sum_exp_x = dy.log(dy.mean_dim(exp_x, d=[0], b=False) * dims[0][0])
return log_sum_exp_x + max_score
def forward_alg(self, tag_scores):
''' Forward DP for CRF.
tag_scores (list of batched dy.Tensor): (tag_size, batchsize)
'''
# Be aware: if a is lookup_parameter with 2 dimension, then a[i] returns one row;
# if b = dy.parameter(a), then b[i] returns one column; which means dy.parameter(a) already transpose a
transpose_transition_score = self.transition_matrix#.expr(update=True)
# transpose_transition_score = dy.transpose(transition_score)
# alpha(t', s) = the score of sequence from t=0 to t=t' in log space
# np_init_alphas = -100.0 * np.ones((self.tag_size, batch_size))
# np_init_alphas[self.start_id, :] = 0.0
# alpha_tm1 = dy.inputTensor(np_init_alphas, batched=True)
alpha_tm1 = transpose_transition_score[self.start_id] + tag_scores[0]
# self.transition_matrix[i]: from i, column
# transpose_score[i]: to i, row
# transpose_score: to, from
for tag_score in tag_scores[1:]:
# extend for each transit <to>
alpha_tm1 = dy.concatenate_cols([alpha_tm1] * self.tag_size) # (from, to, batch_size)
# each column i of tag_score will be the repeated emission score to tag i
tag_score = dy.transpose(dy.concatenate_cols([tag_score] * self.tag_size))
alpha_t = alpha_tm1 + transpose_transition_score + tag_score
alpha_tm1 = self._log_sum_exp_dim_0(alpha_t) # (tag_size, batch_size)
terminal_alpha = self._log_sum_exp_dim_0(alpha_tm1 + self.transition_matrix[self.end_id]) # (1, batch_size)
return terminal_alpha
def score_one_sequence(self, tag_scores, tags, batch_size):
''' tags: list of tag ids at each time step '''
# print tags, batch_size
# print batch_size
# print "scoring one sentence"
tags = [[self.start_id] * batch_size] + tags # len(tag_scores) = len(tags) - 1
score = dy.inputTensor( | np.zeros(batch_size) | numpy.zeros |
from pathlib import Path
import h5py
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision.datasets.utils import download_and_extract_archive
class Traffic4Cast20(Dataset):
""" Data for the 2020 traffic4cast challenge. """
URLS = {
"https://zurich-ml-datasets.s3.amazonaws.com/traffic4cast/2020/BERLIN.tar?AWSAccessKeyId=<KEY>&Expires=1609923309&Signature=hqE1F%2FkqZmozMMLEEGJUjnYIppo%3D&mkt_tok=<KEY>":
{
'filename': "BERLIN.tar",
'md5': "e5ff2ea5bfad2c7098aa1e58e21927a8"
},
"https://zurich-ml-datasets.s3.amazonaws.com/traffic4cast/2020/MOSCOW.tar?AWSAccessKeyId=<KEY>&Expires=1609923367&Signature=MKSDRXlbu0mgpOTsh8Lg6WbOK%2FI%3D&mkt_tok=<KEY>":
{
'filename': "MOSCOW.tar",
'md5': "8101753853af80c183f3cc10f84d42f4"
},
"https://zurich-ml-datasets.s3.amazonaws.com/traffic4cast/2020/ISTANBUL.tar?AWSAccessKeyId=<KEY>&Expires=1609923358&Signature=BSbSFV0%2B%2F5VeU9d0uFFeJiGWuFg%3D&mkt_tok=<KEY>":
{
'filename': "ISTANBUL.tar",
'md5': "229730cf5e95d31d1e8494f2a57e50e9"
}
}
def __init__(self,
root: str,
train: bool = True,
city: str = None,
single_sample: bool = False,
normalised: bool = False,
time_diff: int = 0,
masking: str = None,
sparse: bool = False,
download: bool = False,
seed: int = None):
self.root = Path(root).expanduser().resolve()
self.normalised = normalised
self.time_diff = time_diff
self.mask = None
self.sparse = sparse
if download:
self.download()
city = "" if city is None else city.upper()
file_id = city if city in {"BERLIN", "ISTANBUL", "MOSCOW"} else "*"
mode = "training" if train else "validation"
data = []
for file in self.path.glob("_".join([file_id, mode]) + ".npz"):
data.append(np.load(file.with_suffix(".npz")))
self.inputs = np.concatenate([arr['ins'] for arr in data], axis=0)
self.outputs = np.concatenate([arr['outs'] for arr in data], axis=0)
if single_sample:
self.inputs = self.inputs.reshape(1, -1, self.inputs.shape[-1])
self.outputs = self.outputs.reshape(1, -1, self.outputs.shape[-1])
if masking == 'zero':
self.mask = np.zeros_like(self.inputs[0])
elif masking == 'avg' or sparse:
if single_sample:
raise NotImplementedError("no meaningful averaging for single seq (yet)")
self.mask = np.mean(self.inputs, axis=0)
if sparse:
rng = np.random.RandomState(seed)
self._indices = rng.randint(self.inputs.shape[1], size=self.inputs.shape[:1])
def __getitem__(self, index):
inputs = torch.from_numpy(self.inputs[index].astype('float32'))
outputs = torch.from_numpy(self.outputs[index].astype('float32'))
aux = torch.linspace(0., 1., 288).repeat(len(inputs) // 288).view(-1, 1)
if self.sparse:
xi = inputs[self.idx]
inputs[:] = torch.from_numpy(self.mask)
inputs[self.idx] = xi
elif self.mask is not None:
mask_val = torch.from_numpy(self.mask[len(inputs) - self.time_diff:])
inputs[len(inputs) - self.time_diff:] = mask_val
elif self.time_diff:
inputs = inputs[:len(inputs) - self.time_diff]
aux = aux[:len(inputs)]
outputs = outputs[self.time_diff:]
if self.normalised:
mu, sigma = inputs.mean(), inputs.std()
inputs = (inputs - mu) / sigma
outputs = (outputs - mu) / sigma
return inputs, aux, outputs
def __len__(self):
return len(self.inputs)
@property
def idx(self):
if self.sparse:
# 6AM, 12AM, 6PM +- 5 min
return [71, 72, 73, 143, 144, 145, 215, 216, 217]
else:
# midnight
return -1
@property
def path(self):
return self.root / "traffic4cast20"
@staticmethod
def _process_dir(path: Path):
inputs, outputs = [], []
for file in sorted(path.glob("*.h5")):
with h5py.File(file, 'r') as f:
data = np.asarray(f['array'])
volumes = data[..., :8:2] # last column: NE, NW, SE, SW
north_south = np.sum(volumes[..., [0, -1], 1:-1, :], axis=-2)
west_east = np.sum(volumes[..., 1:-1, [0, -1], :], axis=-3)
corners = volumes[..., [0, -1, 0, -1], [0, 0, -1, -1], :]
nw, sw, ne, se = np.moveaxis(corners, -2, 0)
incoming = [
np.sum(west_east[..., 0, 0::2], axis=-1) + (sw[..., 0] + nw[..., 2]) / 2, # W
np.sum(west_east[..., 1, 1::2], axis=-1) + (se[..., 1] + ne[..., 3]) / 2, # E
np.sum(north_south[..., 0, 2:], axis=-1) + (nw[..., 2] + ne[..., 3]) / 2, # N
np.sum(north_south[..., 1, :2], axis=-1) + (sw[..., 0] + se[..., 1]) / 2 # S
]
outgoing = [
np.sum(west_east[..., 0, 1::2], axis=-1) + sw[..., 1] + nw[..., 3] + (sw[..., 3] + nw[..., 1]) / 2, # W
np.sum(west_east[..., 1, 0::2], axis=-1) + se[..., 0] + ne[..., 2] + (se[..., 2] + ne[..., 0]) / 2, # E
| np.sum(north_south[..., 0, :2], axis=-1) | numpy.sum |
import unittest
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from PySeismoSoil.class_ground_motion import Ground_Motion as GM
from PySeismoSoil.class_Vs_profile import Vs_Profile
from PySeismoSoil.class_frequency_spectrum import Frequency_Spectrum
import os
from os.path import join as _join
f_dir = _join(os.path.dirname(os.path.realpath(__file__)), 'files')
class Test_Class_Ground_Motion(unittest.TestCase):
def test_loading_data__two_columns_from_file(self):
# Two columns from file
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='gal')
PGA_benchmark = 294.30 # unit: cm/s/s
PGV_benchmark = 31.46 # unit: cm/s
PGD_benchmark = 38.77 # unit: cm
tol = 1e-2
self.assertAlmostEqual(gm.pga_in_gal, PGA_benchmark, delta=tol)
self.assertAlmostEqual(gm.pgv_in_cm_s, PGV_benchmark, delta=tol)
self.assertAlmostEqual(gm.pgd_in_cm, PGD_benchmark, delta=tol)
self.assertAlmostEqual(gm.peak_Arias_Intensity, 1.524, delta=tol)
self.assertAlmostEqual(gm.rms_accel, 0.4645, delta=tol)
def test_loading_data__two_columns_from_numpy_array(self):
# Two columns from numpy array
gm = GM(np.array([[0.1, 0.2, 0.3, 0.4], [1, 2, 3, 4]]).T, unit='m/s/s')
self.assertAlmostEqual(gm.pga, 4)
def test_loading_data__one_column_from_file(self):
# One column from file
gm = GM(_join(f_dir, 'one_column_data_example.txt'), unit='g', dt=0.2)
self.assertAlmostEqual(gm.pga_in_g, 12.0)
def test_loading_data__one_column_from_numpy_array(self):
# One column from numpy array
gm = GM(np.array([1, 2, 3, 4, 5]), unit='gal', dt=0.1)
self.assertAlmostEqual(gm.pga_in_gal, 5.0)
def test_loading_data__one_column_without_specifying_dt(self):
# One column without specifying dt
error_msg = 'is needed for one-column `data`.'
with self.assertRaisesRegex(ValueError, error_msg):
gm = GM(np.array([1, 2, 3, 4, 5]), unit='gal')
def test_loading_data__test_invalid_unit_names(self):
# Test invalid unit names
with self.assertRaisesRegex(ValueError, 'Invalid `unit` name.'):
GM(np.array([1, 2, 3, 4, 5]), unit='test', dt=0.1)
with self.assertRaisesRegex(ValueError, r"use '/s/s' instead of 's\^2'"):
GM(np.array([1, 2, 3, 4, 5]), unit='m/s^2', dt=0.1)
def test_differentiation(self):
veloc = np.array([[.1, .2, .3, .4, .5, .6], [1, 3, 7, -1, -3, 5]]).T
gm = GM(veloc, unit='m', motion_type='veloc')
accel_benchmark = np.array(
[[.1, .2, .3, .4, .5, .6],
[0, 20, 40, -80, -20, 80]]
).T
self.assertTrue(np.allclose(gm.accel, accel_benchmark))
def test_integration__artificial_example(self):
gm = GM(_join(f_dir, 'two_column_data_example.txt'), unit='m/s/s')
v_bench = np.array([[0.1000, 0.1000], # from MATLAB
[0.2000, 0.3000],
[0.3000, 0.6000],
[0.4000, 1.0000],
[0.5000, 1.5000],
[0.6000, 1.7000],
[0.7000, 2.0000],
[0.8000, 2.4000],
[0.9000, 2.9000],
[1.0000, 3.5000],
[1.1000, 3.8000],
[1.2000, 4.2000],
[1.3000, 4.7000],
[1.4000, 5.3000],
[1.5000, 6.0000]])
u_bench = np.array([[0.1000, 0.0100], # from MATLAB
[0.2000, 0.0400],
[0.3000, 0.1000],
[0.4000, 0.2000],
[0.5000, 0.3500],
[0.6000, 0.5200],
[0.7000, 0.7200],
[0.8000, 0.9600],
[0.9000, 1.2500],
[1.0000, 1.6000],
[1.1000, 1.9800],
[1.2000, 2.4000],
[1.3000, 2.8700],
[1.4000, 3.4000],
[1.5000, 4.0000]])
self.assertTrue(np.allclose(gm.veloc, v_bench))
self.assertTrue(np.allclose(gm.displ, u_bench))
def test_integration__real_world_example(self):
# Note: In this test, the result by cumulative trapezoidal numerical
# integration is used as the benchmark. Since it is infeasible to
# achieve perfect "alignment" between the two time histories,
# we check the correlation coefficient instead of element-wise
# check.
veloc_ = np.genfromtxt(_join(f_dir, 'sample_accel.txt'))
gm = GM(veloc_, unit='m/s', motion_type='veloc')
displ = gm.displ[:, 1]
displ_cumtrapz = np.append(0, sp.integrate.cumtrapz(veloc_[:, 1], dx=gm.dt))
r = np.corrcoef(displ_cumtrapz, displ)[1, 1] # cross-correlation
self.assertTrue(r >= 0.999)
def test_fourier_transform(self):
gm = GM(_join(f_dir, 'two_column_data_example.txt'), unit='m/s/s')
freq, spec = gm.get_Fourier_spectrum(real_val=False).raw_data.T
freq_bench = [
0.6667, 1.3333, 2.0000, 2.6667, 3.3333, 4.0000, 4.6667, 5.3333,
]
FS_bench = [
60.0000 + 0.0000j, -1.5000 + 7.0569j, -1.5000 + 3.3691j,
-7.5000 +10.3229j, -1.5000 + 1.3506j, -1.5000 + 0.8660j,
-7.5000 + 2.4369j, -1.5000 + 0.1577j,
]
self.assertTrue(np.allclose(freq, freq_bench, atol=0.0001, rtol=0.0))
self.assertTrue(np.allclose(spec, FS_bench, atol=0.0001, rtol=0.0))
def test_baseline_correction(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m/s/s')
corrected = gm.baseline_correct(show_fig=True)
self.assertTrue(isinstance(corrected, GM))
def test_high_pass_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
hp = gm.highpass(cutoff_freq=1.0, show_fig=True)
self.assertTrue(isinstance(hp, GM))
def test_low_pass_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
lp = gm.lowpass(cutoff_freq=1.0, show_fig=True)
self.assertTrue(isinstance(lp, GM))
def test_band_pass_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
bp = gm.bandpass(cutoff_freq=[0.5, 8], show_fig=True)
self.assertTrue(isinstance(bp, GM))
def test_band_stop_filter(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
bs = gm.bandstop(cutoff_freq=[0.5, 8], show_fig=True)
self.assertTrue(isinstance(bs, GM))
def test_amplify_via_profile(self):
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
vs_prof = Vs_Profile(_join(f_dir, 'profile_FKSH14.txt'))
output_motion = gm.amplify(vs_prof, boundary='elastic')
self.assertTrue(isinstance(output_motion, GM))
def test_deconvolution(self):
# Assert `deconvolve()` & `amplify()` are reverse operations to each other.
gm = GM(_join(f_dir, 'sample_accel.txt'), unit='m')
vs_prof = Vs_Profile(_join(f_dir, 'profile_FKSH14.txt'))
for boundary in ['elastic', 'rigid']:
deconv_motion = gm.deconvolve(vs_prof, boundary=boundary)
output_motion = deconv_motion.amplify(vs_prof, boundary=boundary)
self.assertTrue(self.nearly_identical(gm.accel, output_motion.accel))
amplified_motion = gm.amplify(vs_prof, boundary=boundary)
output_motion = amplified_motion.deconvolve(vs_prof, boundary=boundary)
self.assertTrue(self.nearly_identical(gm.accel, output_motion.accel))
def test_plot(self):
filename = _join(f_dir, 'sample_accel.txt')
gm = GM(filename, unit='m')
fig, axes = gm.plot() # automatically generate fig/ax objects
self.assertTrue(isinstance(axes, tuple))
self.assertEqual(len(axes), 3)
self.assertEqual(axes[0].title.get_text(), os.path.split(filename)[1])
fig2 = plt.figure(figsize=(8, 8))
fig2_, axes = gm.plot(fig=fig2) # feed an external figure object
self.assertTrue(np.allclose(fig2_.get_size_inches(), (8, 8)))
def test_unit_convert(self):
data = | np.array([1, 3, 7, -2, -10, 0]) | numpy.array |
import dataclasses
import math
import json
import numpy as np
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
import geopandas as gpd
import shapely.geometry as shpg
from scipy.interpolate import interp1d
@dataclasses.dataclass
class UncClass:
_default_unc = 0.15 # 15%
mean: float
unit_name: str = ""
std_perc: float = _default_unc
def std_unit(self):
return self.mean * self.std_perc
def std_perc_100(self):
return self.std_perc * 100.
def perc_100_to_perc(self, perc_100):
if perc_100 < 1:
raise ValueError("Input percentage needs to be greater than 1")
self.std_perc = perc_100 / 100.
def unit_to_perc(self, unit_std):
self.std_perc = unit_std / self.mean
class ModelDefaults:
def __init__(self):
file_root = os.path.dirname(os.path.abspath(__file__))
# print(file_root)
defaults_file = "defaults.json"
infile = os.path.join(file_root, defaults_file)
with open(infile) as load_file:
j_data = json.load(load_file)
inputs = j_data['input_data'][0]
self.general_unc = inputs["general_unc"]
self.max_pf = inputs['pf']
self.pf_unit = "MPa"
self.density = inputs["density"]
self.density_unit = "kg/m^3"
self.hydro = inputs["hydro"]
self.hydro_unit = "MPa/km"
self.hydro_under = inputs["hydro_under"]
self.hydro_upper = inputs["hydro_upper"]
self.dip = inputs["dip"]
self.dip_unit = "deg"
az_unc = inputs["az_unc"]
self.az_unit = "deg"
self.az_unc_perc = az_unc / 360.
self.sv = (self.density * 9.81) / 1000 # MPa/km
self.max_sv = (5000 * 9.81) / 1000
self.stress_unit = "MPa/km"
self.sh_max_az = inputs["sh_max_az"]
self.sh_min_az = inputs["sh_min_az"]
self.mu = inputs["mu"]
self.mu_unit = "unitless"
self.F_mu = (math.sqrt(self.mu ** 2 + 1)) ** 2
abs_shmax = self.F_mu * (self.sv - self.hydro) + self.hydro
abs_shmin = ((self.sv - self.hydro) / self.F_mu) + self.hydro
self.shmax_r = abs_shmax
self.shmin_r = (abs_shmax - self.sv) / 2
self.shmax_ss = abs_shmax
self.shmin_ss = abs_shmin
self.shmax_n = (self.sv - abs_shmin) / 2
self.shmin_n = abs_shmin
class ModelInputs:
def __init__(self, input_dict):
"""
Parameters
----------
input_dict
"""
defaults = ModelDefaults()
if "max_pf" in input_dict.keys():
self.max_pf = input_dict["max_pf"]
else:
self.max_pf = defaults.max_pf
if "dip" in input_dict.keys():
if "dip_unc" in input_dict.keys():
self.Dip = UncClass(input_dict["dip"], defaults.dip_unit, input_dict["dip_unc"] / input_dict["dip"])
else:
self.Dip = UncClass(input_dict["dip"], defaults.dip_unit)
else:
self.Dip = UncClass(defaults.dip, defaults.dip_unit)
if "sv" in input_dict.keys():
if input_dict["sv"] > defaults.max_sv:
warnings.warn('Vertical stress gradient for density > 5000 kg/m^3. Are you sure you input a gradient?')
if "sv_unc" in input_dict.keys():
self.Sv = UncClass(input_dict["sv"], defaults.stress_unit, input_dict["sv_unc"])
else:
self.Sv = UncClass(input_dict["sv"], defaults.stress_unit)
else:
self.Sv = UncClass(defaults.sv, defaults.stress_unit)
if "hydro" in input_dict.keys():
self.SHydro = UncClass(input_dict["hydro"], defaults.stress_unit)
else:
if "pf_max" in input_dict.keys():
new_hydro = input_dict["pf_max"] / input_dict["depth"]
self.SHydro = UncClass(new_hydro, defaults.stress_unit)
else:
self.SHydro = UncClass(defaults.hydro, defaults.stress_unit)
if "hydro_under" in input_dict.keys():
self.hydro_l = self.SHydro.mean * input_dict["hydro_under"]
else:
self.hydro_l = self.SHydro.mean * defaults.hydro_under
if "hydro_upper" in input_dict.keys():
self.hydro_u = self.SHydro.mean * input_dict["hydro_upper"]
else:
self.hydro_u = self.SHydro.mean * defaults.hydro_upper
if "mu" in input_dict.keys():
if "mu_unc" in input_dict.keys():
self.Mu = UncClass(input_dict["mu"], defaults.mu_unit, input_dict["mu_unc"])
else:
self.Mu = UncClass(defaults.mu, defaults.mu_unit)
else:
self.Mu = UncClass(defaults.mu, defaults.mu_unit)
if "shmax" in input_dict.keys():
shmax = float(input_dict['shmax'])
if "shmin" in input_dict.keys():
shmin = float(input_dict['shmin'])
if shmax > self.Sv.mean > shmin:
if {"shMunc", "shmiunc"} <= input_dict.keys():
self.ShMaxSS = UncClass(shmax, defaults.stress_unit, float(input_dict["shMunc"]) / shmax)
self.ShMinSS = UncClass(shmin, defaults.stress_unit, float(input_dict["shmiunc"]) / shmin)
else:
self.ShMaxSS = UncClass(shmax, defaults.stress_unit)
self.ShMinSS = UncClass(shmin, defaults.stress_unit)
self.ShMaxR = UncClass(defaults.shmax_r, defaults.stress_unit)
self.ShMinR = UncClass(defaults.shmin_r, defaults.stress_unit)
self.ShMaxN = UncClass(defaults.shmax_n, defaults.stress_unit)
self.ShMinN = UncClass(defaults.shmin_n, defaults.stress_unit)
elif shmax > shmin > self.Sv.mean:
if {"shMunc", "shmiunc"} <= input_dict.keys():
self.ShMaxR = UncClass(shmax, defaults.stress_unit, float(input_dict["shMunc"]) / shmax)
self.ShMinR = UncClass(shmin, defaults.stress_unit, float(input_dict["shmiunc"]) / shmin)
else:
self.ShMaxR = UncClass(shmax, defaults.stress_unit)
self.ShMinR = UncClass(shmin, defaults.stress_unit)
self.ShMaxSS = UncClass(defaults.shmax_ss, defaults.stress_unit)
self.ShMinSS = UncClass(defaults.shmin_ss, defaults.stress_unit)
self.ShMaxN = UncClass(defaults.shmax_n, defaults.stress_unit)
self.ShMinN = UncClass(defaults.shmin_n, defaults.stress_unit)
elif self.Sv.mean > shmax > shmin:
if {"shMunc", "shmiunc"} <= input_dict.keys():
self.ShMaxN = UncClass(shmax, defaults.stress_unit, float(input_dict["shMunc"]) / shmax)
self.ShMinN = UncClass(shmin, defaults.stress_unit, float(input_dict["shmiunc"]) / shmin)
else:
self.ShMaxN = UncClass(shmax, defaults.stress_unit)
self.ShMinN = UncClass(shmin, defaults.stress_unit)
self.ShMaxR = UncClass(defaults.shmax_r, defaults.stress_unit)
self.ShMinR = UncClass(defaults.shmin_r, defaults.stress_unit)
self.ShMaxSS = UncClass(defaults.shmax_ss, defaults.stress_unit)
self.ShMinSS = UncClass(defaults.shmin_ss, defaults.stress_unit)
else:
# print("default")
self.ShMaxR = UncClass(defaults.shmax_r, defaults.stress_unit)
self.ShMinR = UncClass(defaults.shmin_r, defaults.stress_unit)
self.ShMaxSS = UncClass(defaults.shmax_ss, defaults.stress_unit)
self.ShMinSS = UncClass(defaults.shmin_ss, defaults.stress_unit)
self.ShMaxN = UncClass(defaults.shmax_n, defaults.stress_unit)
self.ShMinN = UncClass(defaults.shmin_n, defaults.stress_unit)
if "shmaxaz" in input_dict.keys():
if "az_unc" in input_dict.keys():
self.ShMaxAz = UncClass(input_dict["shmaxaz"], defaults.az_unit, input_dict["az_unc"])
else:
self.ShMaxAz = UncClass(input_dict["shmaxaz"], defaults.az_unit, defaults.az_unc_perc)
else:
self.ShMaxAz = UncClass(defaults.sh_max_az, defaults.az_unit, defaults.az_unc_perc)
if "shminaz" in input_dict.keys():
if "az_unc" in input_dict.keys():
self.ShMinAz = UncClass(input_dict["shminaz"], defaults.az_unit, input_dict["az_unc"])
else:
self.ShMinAz = UncClass(input_dict["shminaz"], defaults.az_unit, defaults.az_unc_perc)
else:
if "shmaxaz" in input_dict.keys():
self.ShMinAz = UncClass(self.ShMaxAz.mean + 90., defaults.az_unit, defaults.az_unc_perc)
else:
self.ShMinAz = UncClass(defaults.sh_min_az, defaults.az_unit, defaults.az_unc_perc)
def plot_uncertainty(self, stress, depth):
fig, axs = plt.subplots(2, 4, sharex='none', sharey='all')
n_samples = 1000
dip = np.random.normal(self.Dip.mean, self.Dip.std_unit(), n_samples)
mu = np.random.normal(self.Mu.mean, self.Mu.std_perc, n_samples)
s_v = np.random.normal(self.Sv.mean, self.Sv.std_unit(), n_samples)
# s_hydro = np.random.normal(self.SHydro.mean, self.SHydro.std_unit(), 500)
# lower_pf = -0.04
# upper_pf = 1.18
hydro1 = self.SHydro.mean - self.hydro_l
hydro2 = self.SHydro.mean + self.hydro_u
s_hydro = (hydro2 - hydro1) * np.random.random(n_samples) + hydro1
if stress == "reverse":
sh_max = np.random.normal(self.ShMaxR.mean, self.ShMaxR.std_unit(), n_samples)
sh_min = np.random.normal(self.ShMinR.mean, self.ShMinR.std_unit(), n_samples)
elif stress == "strike-slip":
sh_max = np.random.normal(self.ShMaxSS.mean, self.ShMaxSS.std_unit(), n_samples)
sh_min = np.random.normal(self.ShMinSS.mean, self.ShMinSS.std_unit(), n_samples)
elif stress == "normal":
sh_max = np.random.normal(self.ShMaxN.mean, self.ShMaxN.std_unit(), n_samples)
sh_min = np.random.normal(self.ShMinN.mean, self.ShMinN.std_unit(), n_samples)
else:
sh_max = np.random.normal(0, 1, n_samples)
sh_min = np.random.normal(0, 1, n_samples)
warnings.warn("Stress field not properly defined.", UserWarning)
shmax_az = np.random.normal(self.ShMaxAz.mean, self.ShMaxAz.std_unit(), n_samples)
shmin_az = np.random.normal(self.ShMinAz.mean, self.ShMinAz.std_unit(), n_samples)
s_v = s_v * depth
s_hydro = s_hydro * depth
sh_max = sh_max * depth
sh_min = sh_min * depth
plot_datas = [dip, mu, s_v, s_hydro, sh_max, sh_min, shmax_az, shmin_az]
titles = ["Dip", "Mu", "Vert. Stress [MPa]", "Hydro. Pres. [MPa]", "SHMax [MPa]", "Shmin [MPa]",
"Shmax Azimuth", "Shmin Azimuth"]
i = 0
for ax1 in axs:
for ax in ax1:
data = plot_datas[i]
ax.hist(data, 50)
ax.axvline(np.median(data), color="black")
ax.set_title(titles[i])
quantiles = np.quantile(data, [0.01, 0.5, 0.99])
if titles[i] == "Mu":
quantiles = np.around(quantiles, decimals=2)
else:
quantiles = np.around(quantiles, decimals=0)
ax.set_xticks(quantiles)
i = i + 1
fig.tight_layout()
class SegmentDet2dResult:
"""
"""
def __init__(self, x1, y1, x2, y2, result, metadata):
""""""
self.p1 = (x1, y1)
self.p2 = (x2, y2)
# self.pf_results = pf_results
if "line_id" in metadata:
self.line_id = metadata["line_id"]
if "seg_id" in metadata:
self.seg_id = metadata["seg_id"]
self.result = result
class MeshFaceResult:
"""
"""
def __init__(self, face_num, triangle, p1, p2, p3, pf_results):
"""
Parameters
----------
face_num
p1
p2
p3
"""
self.face_num = face_num
self.triangle = triangle
self.p1 = p1
self.p2 = p2
self.p3 = p3
# if pf_results.size == 0:
# x = np.array([0., 0., 0.])
# y = np.array([0., 0.5, 1.])
# self.ecdf = np.column_stack((x, y))
# pf_results.sort()
# n = pf_results.size
# y = np.linspace(1.0 / n, 1, n)
# self.ecdf = np.column_stack((pf_results, y))
pf1 = pf_results[:, 0]
mu1 = pf_results[:, 1]
slip_tend = pf_results[:, 2]
inds = slip_tend >= mu1
n1 = pf1.size
pf2 = pf1[inds]
n2 = pf2.size
if n2 == 0:
max_pf = np.max(pf1)
x = np.array([max_pf, max_pf, max_pf])
# x = np.empty(5000)
# x.fill(np.nan)
y = np.array([0., 0.5, 1.])
# y = np.linspace(0., 1., 5000)
self.ecdf = np.column_stack((x, y))
self.no_fail = True
elif n2 < 100 & n2 > 0:
self.no_fail = False
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
n2_2 = 100
z = np.linspace(1 / n2_2, 1, n2_2)
pf2_interp = interp1d(y, pf2, kind='linear')
pf2_2 = pf2_interp(z)
self.ecdf = np.column_stack((pf2_2, z))
else:
self.no_fail = False
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
self.ecdf = np.column_stack((pf2, y))
def ecdf_cutoff(self, cutoff):
"""
Parameters
----------
cutoff: float
Returns
-------
"""
# self.ecdf[:, 0] = self.ecdf[:, 0] - hydrostatic_pres
cutoff = cutoff / 100
ind_fail = (np.abs(self.ecdf[:, 1] - cutoff)).argmin()
fail_pressure = self.ecdf[ind_fail, 0]
return fail_pressure
class SegmentMC2dResult:
"""
"""
def __init__(self, x1, y1, x2, y2, pf_results, metadata):
""""""
self.p1 = (x1, y1)
self.p2 = (x2, y2)
# self.pf_results = pf_results
if "line_id" in metadata:
self.line_id = metadata["line_id"]
if "seg_id" in metadata:
self.seg_id = metadata["seg_id"]
pf1 = pf_results[:, 0]
mu1 = pf_results[:, 1]
slip_tend = pf_results[:, 2]
inds = slip_tend >= mu1
n1 = pf1.size
pf2 = pf1[inds]
n2 = pf2.size
if n2 == 0:
max_pf = np.max(pf1)
x = np.array([max_pf, max_pf, max_pf])
# x = np.empty(5000)
# x.fill(np.nan)
y = np.array([0., 0.5, 1.])
# y = np.linspace(0., 1., 5000)
self.ecdf = np.column_stack((x, y))
self.no_fail = True
elif n2 < 100 & n2 > 0:
self.no_fail = False
pf2.sort()
n2 = pf2.size
y = np.linspace(1 / n2, 1, n2)
n2_2 = 100
z = | np.linspace(1 / n2_2, 1, n2_2) | numpy.linspace |
from __future__ import print_function
import mxnet as mx
from mxnet import nd
import numpy as np
from d2l import mxnet as d2l
from programs.utils import draw_vertical_leg as draw_vertical_leg_new
from programs.utils import draw_rectangle_top as draw_rectangle_top_new
from programs.utils import draw_square_top as draw_square_top_new
from programs.utils import draw_circle_top as draw_circle_top_new
from programs.utils import draw_middle_rect_layer as draw_middle_rect_layer_new
from programs.utils import draw_circle_support as draw_circle_support_new
from programs.utils import draw_square_support as draw_square_support_new
from programs.utils import draw_circle_base as draw_circle_base_new
from programs.utils import draw_square_base as draw_square_base_new
from programs.utils import draw_cross_base as draw_cross_base_new
from programs.utils import draw_sideboard as draw_sideboard_new
from programs.utils import draw_horizontal_bar as draw_horizontal_bar_new
from programs.utils import draw_vertboard as draw_vertboard_new
from programs.utils import draw_locker as draw_locker_new
from programs.utils import draw_tilt_back as draw_tilt_back_new
from programs.utils import draw_chair_beam as draw_chair_beam_new
from programs.utils import draw_line as draw_line_new
from programs.utils import draw_back_support as draw_back_support_new
from programs.loop_gen import decode_loop, translate, rotate, end
def get_distance_to_center():
x = np.arange(32)
y = np.arange(32)
xx, yy = np.meshgrid(x, y)
xx = xx + 0.5
yy = yy + 0.5
d = np.sqrt(np.square(xx - int(32 / 2)) + np.square(yy - int(32 / 2)))
return d
def gather(self, dim, index):
"""
Gathers values along an axis specified by ``dim``.
For a 3-D tensor the output is specified by:
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
Parameters
----------
dim:
The axis along which to index
index:
A tensor of indices of elements to gather
Returns
-------
Output Tensor
"""
idx_xsection_shape = index.shape[:dim] + \
index.shape[dim + 1:]
self_xsection_shape = self.shape[:dim] + self.shape[dim + 1:]
if idx_xsection_shape != self_xsection_shape:
raise ValueError("Except for dimension " + str(dim) +
", all dimensions of index and self should be the same size")
if index.dtype != np.dtype('int_'):
raise TypeError("The values of index must be integers")
data_swaped = nd.swapaxes(self, 0, dim).asnumpy()
index_swaped = nd.swapaxes(index, 0, dim).asnumpy()
#print(data_swaped,index_swaped)
#print("index_swaped\n",index_swaped,index_swaped.shape,"data_swaped\n",data_swaped,data_swaped.shape,'\n')
gathered = nd.from_numpy(np.choose(index_swaped,data_swaped)).as_in_context(d2l.try_gpu())
return nd.swapaxes(gathered, 0, dim)
def scatter_numpy(self, dim, index, src):
"""
Writes all values from the Tensor src into self at the indices specified in the index Tensor.
:param dim: The axis along which to index
:param index: The indices of elements to scatter
:param src: The source element(s) to scatter
:return: self
"""
if index.dtype != np.dtype('int_'):
raise TypeError("The values of index must be integers")
if self.ndim != index.ndim:
raise ValueError("Index should have the same number of dimensions as output")
if dim >= self.ndim or dim < -self.ndim:
raise IndexError("dim is out of range")
if dim < 0:
# Not sure why scatter should accept dim < 0, but that is the behavior in PyTorch's scatter
dim = self.ndim + dim
idx_xsection_shape = index.shape[:dim] + index.shape[dim + 1:]
self_xsection_shape = self.shape[:dim] + self.shape[dim + 1:]
if idx_xsection_shape != self_xsection_shape:
raise ValueError("Except for dimension " + str(dim) +
", all dimensions of index and output should be the same size")
if (index >= self.shape[dim]).any() or (index < 0).any():
raise IndexError("The values of index must be between 0 and (self.shape[dim] -1)")
def make_slice(arr, dim, i):
slc = [slice(None)] * arr.ndim
slc[dim] = i
return slc
# We use index and dim parameters to create idx
# idx is in a form that can be used as a NumPy advanced index for scattering of src param. in self
idx = [[*np.indices(idx_xsection_shape).reshape(index.ndim - 1, -1),
index[make_slice(index, dim, i)].reshape(1, -1)[0]] for i in range(index.shape[dim])]
idx = list(np.concatenate(idx, axis=1))
idx.insert(dim, idx.pop())
if not np.isscalar(src):
if index.shape[dim] > src.shape[dim]:
raise IndexError("Dimension " + str(dim) + "of index can not be bigger than that of src ")
src_xsection_shape = src.shape[:dim] + src.shape[dim + 1:]
if idx_xsection_shape != src_xsection_shape:
raise ValueError("Except for dimension " +
str(dim) + ", all dimensions of index and src should be the same size")
# src_idx is a NumPy advanced index for indexing of elements in the src
src_idx = list(idx)
src_idx.pop(dim)
src_idx.insert(dim, np.repeat(np.arange(index.shape[dim]), np.prod(idx_xsection_shape)))
self[idx] = src[src_idx]
else:
self[idx] = src
return self
def to_contiguous(tensor):
if tensor.is_contiguous():
return tensor
else:
return tensor.contiguous()
def clip_gradient(optimizer, grad_clip):
for group in optimizer.param_groups:
for param in group['params']:
param.grad.data.clamp_(-grad_clip, grad_clip)
def get_last_block(pgm):
bsz = pgm.size(0)
n_block = pgm.size(1)
n_step = pgm.size(2)
pgm = pgm.copy() # not sure
if pgm.dim() == 4:
idx = nd.argmax(pgm, axis=3)
idx = idx.as_in_context(mx.cpu()) # not sure
elif pgm.dim() == 3:
idx = pgm.as_in_context(mx.cpu())
else:
raise ValueError("pgm.dim() != 2 or 3")
max_inds = []
for i in range(bsz):
j = n_block - 1
while j >= 0:
if idx[i, j, 0] == 0:
break
j = j - 1
if j == -1:
max_inds.append(0)
else:
max_inds.append(j)
return np.asarray(max_inds)
def sample_block(max_inds, include_tail=False):
sample_inds = []
for ind in max_inds:
if include_tail:
sample_inds.append(np.random.randint(0, ind + 1))
else:
sample_inds.append(np.random.randint(0, ind))
return np.asarray(sample_inds)
def get_max_step_pgm(pgm):
batch_size = pgm.size(0)
pgm = pgm.copy() # not sure
if pgm.dim() == 3:
pgm = pgm[:, 1:, :]
idx = get_class(pgm).as_in_context(mx.cpu())
elif pgm.dim() == 2:
idx = pgm[:, 1:].as_in_context(mx.cpu())
else:
raise ValueError("pgm.dim() != 2 or 3")
max_inds = []
for i in range(batch_size):
j = 0
while j < idx.shape[1]:
if idx[i, j] == 0:
break
j = j + 1
if j == 0:
raise ValueError("no programs for such sample")
max_inds.append(j)
return np.asarray(max_inds)
def get_vacancy(pgm):
batch_size = pgm.size(0)
pgm = pgm.copy() # not sure
if pgm.dim() == 3:
pgm = pgm[:, 1:, :]
idx = get_class(pgm).as_in_context(mx.cpu())
elif pgm.dim() == 2:
idx = pgm[:, 1:].as_in_context(mx.cpu())
else:
raise ValueError("pgm.dim() != 2 or 3")
vac_inds = []
for i in range(batch_size):
j = 0
while j < idx.shape[1]:
if idx[i, j] == 0:
break
j = j + 1
if j == idx.shape[1]:
j = j - 1
vac_inds.append(j)
return np.asarray(vac_inds)
def sample_ind(max_inds, include_start=False):
sample_inds = []
for ind in max_inds:
if include_start:
sample_inds.append(np.random.randint(0, ind + 1))
else:
sample_inds.append(np.random.randint(0, ind))
return np.asarray(sample_inds)
def sample_last_ind(max_inds, include_start=False):
sample_inds = []
for ind in max_inds:
if include_start:
sample_inds.append(ind)
else:
sample_inds.append(ind - 1)
return np.array(sample_inds)
def get_class(pgm):
print(pgm)
if len(pgm.shape) == 3:
idx = nd.argmax(pgm, axis=2)
elif len(pgm.shape) == 2:
idx = pgm
else:
raise IndexError("dimension of pgm is wrong")
return idx
def decode_to_shape_new(pred_pgm, pred_param):
batch_size = pred_pgm.shape[0]
idx = get_class(pred_pgm)
pgm = idx.as_in_context(mx.cpu()).asnumpy()
params = pred_param.as_in_context(mx.cpu()).asnumpy()
params = np.round(params).astype(np.int32)
data = np.zeros((batch_size, 32, 32, 32), dtype=np.uint8)
for i in range(batch_size):
for j in range(1, pgm.shape[1]):
if pgm[i, j] == 0:
continue
data[i] = render_one_step_new(data[i], pgm[i, j], params[i, j])
return data
def decode_pgm(pgm, param, loop_free=True):
"""
decode and check one single block
remove occasionally-happened illegal programs
"""
flag = 1
data_loop = []
if pgm[0] == translate:
if pgm[1] == translate:
if 1 <= pgm[2] < translate:
data_loop.append(np.hstack((pgm[0], param[0])))
data_loop.append(np.hstack((pgm[1], param[1])))
data_loop.append(np.hstack((pgm[2], param[2])))
data_loop.append(np.hstack(np.asarray([end, 0, 0, 0, 0, 0, 0, 0])))
data_loop.append(np.hstack(np.asarray([end, 0, 0, 0, 0, 0, 0, 0])))
else:
flag = 0
elif 1 <= pgm[1] < translate:
data_loop.append(np.hstack((pgm[0], param[0])))
data_loop.append(np.hstack((pgm[1], param[1])))
data_loop.append(np.hstack(np.asarray([end, 0, 0, 0, 0, 0, 0, 0])))
else:
flag = 0
elif pgm[0] == rotate:
if pgm[1] == 10:
data_loop.append(np.hstack((pgm[0], param[0])))
data_loop.append(np.hstack((pgm[1], param[1])))
data_loop.append(np.hstack(np.asarray([end, 0, 0, 0, 0, 0, 0, 0])))
if pgm[1] == 17:
data_loop.append(np.hstack((pgm[0], param[0])))
data_loop.append(np.hstack((pgm[1], param[1])))
data_loop.append(np.hstack(np.asarray([end, 0, 0, 0, 0, 0, 0, 0])))
else:
flag = 0
elif 1 <= pgm[0] < translate:
data_loop.append( | np.hstack((pgm[0], param[0])) | numpy.hstack |
"""
he data is dedicated to classification problem related to the post-operative life expectancy in the lung cancer patients: class 1 - death within one year after surgery, class 2 - survival.
"""
import numpy as np
import pandas as pd
from dataset_peek import data_peek
def load_thoracic():
fp = "thoracic.csv"
raw_data = pd.read_csv(fp, header=None)
x = | np.array(raw_data.iloc[:, 0:16]) | numpy.array |
#
# Copyright 2012 by Idiap Research Institute, http://www.idiap.ch
#
# See the file COPYING for the licence associated with this software.
#
# Author(s):
# <NAME>, December 2012
#
import numpy as np
import scipy.signal as sp
import numpy.linalg as linalg
from . import core
class Autoregression:
"""
Class containing autoregression methods; requires an order. The
word is taken to be one word 'autoregression', but abbreviated to 'ar'.
"""
def __init__(self, order):
self.order = int(order)
# All of the methods below actually calculate gain squared.
def ARMatrix(a, order=10, method='matrix'):
if a.ndim > 1:
ret = np.ndarray((a.shape[0], order))
gain = np.ndarray(a.shape[0])
for f in range(a.shape[0]):
ret[f], gain[f] = ARMatrix(a[f], order, method)
return ret, gain
coef = np.zeros(order)
# Follow the matrix based method to the letter. elop contains the
# poles reversed, coef is the poles in order.
if method == 'matrix':
Y = core.Frame(a[:a.size-1], size=order, period=1, pad=False)
y = a[order:]
YY = np.dot(Y.T,Y)
Yy = np.dot(Y.T,y)
elop = np.dot(linalg.inv(YY), Yy)
for i in range(order):
coef[i] = elop[order-i-1]
gain = (np.dot(y,y) - np.dot(elop,Yy)) / y.size
# Use the autocorrelation to populate the matrices. Here, Yy runs
# in ascending index, so we get coef in order right away.
elif method == 'acmatrix':
ac = core.Autocorrelation(a)
YY = np.ndarray((order, order))
Yy = np.ndarray(order)
for i in range(order):
Yy[i] = ac[i+1] * a.size
for j in range(order):
YY[i,j] = ac[abs(i-j)] * a.size
coef = np.dot(linalg.inv(YY), Yy)
gain = (ac[0] - np.dot(coef,Yy / a.size))
else:
print("Unknown AR method")
exit(1)
return (coef, gain)
# Raw Levinson-Durbin recursion
def levinson(ac, order, prior=0.0):
curr = np.zeros(order)
prev = np.zeros(order)
error = ac[0] + prior
for i in range(order):
# swap current and previous coefficients
tmp = curr
curr = prev
prev = tmp
# Recurse
k = ac[i+1]
for j in range(i):
k -= prev[j] * ac[i-j]
curr[i] = k / error
error *= 1 - curr[i]**2
for j in range(i):
curr[j] = prev[j] - curr[i] * prev[i-j-1]
return curr
# Levinson-Durbin recursion to calculate reflection coefficients from
# autocorrelaton.
def ARLevinson(ac, order=10):
if ac.ndim > 1:
ret = np.ndarray((ac.shape[0], order))
gain = np.ndarray(ac.shape[0])
for f in range(ac.shape[0]):
ret[f], gain[f] = ARLevinson(ac[f], order)
return ret, gain
coef = levinson(ac, order)
gain = ac[0] - np.dot(coef, ac[1:order+1])
return coef, gain
# Convert ac into matrices
def ACToMatrix(ac, order):
YY = np.ndarray((order, order))
Yy = np.ndarray(order)
for i in range(order):
Yy[i] = ac[i+1] * ac.size
for j in range(order):
YY[i,j] = ac[abs(i-j)] * ac.size
return YY, Yy
# Ridge regression implementation of AR
def ARRidge(ac, order=10, ridge=0.0):
if ac.ndim > 1:
ret = np.ndarray((ac.shape[0], order))
gain = np.ndarray(ac.shape[0])
for f in range(ac.shape[0]):
ret[f], gain[f] = ARRidge(ac[f], order, ridge)
return ret, gain
coef = levinson(ac, order, ridge*ac[0])
YY, Yy = ACToMatrix(ac, order)
gain = ac[0] + np.dot(coef, (np.dot(YY, coef) - 2*Yy)) / ac.size
return coef, gain
# Lasso-like implementation of AR
def ARLasso(ac, order=10, ridge=0.0):
if ac.ndim > 1:
ret = np.ndarray((ac.shape[0], order))
gain = np.ndarray(ac.shape[0])
for f in range(ac.shape[0]):
ret[f], gain[f] = ARLasso(ac[f], order, ridge)
return ret, gain
# Convert ac into matrices
YY, Yy = ACToMatrix(ac, order)
# Initialise lasso with ridge
gain = ac[0]
A = np.zeros((order, order))
for i in range(order):
#A[i,i] = ridge*ac[0]*ac.size
A[i,i] = 0.01*ac[0]
coef = np.dot(linalg.inv(YY+A), Yy)
for i in range(10):
for j in range(order):
A[j,j] = np.sqrt(abs(coef[j]))
gain = ac[0] + np.dot(coef, (np.dot(YY, coef) - 2*Yy)) / ac.size
B = np.identity(order) * gain
X = linalg.inv(np.dot(A, np.dot(YY, A)) + ridge*B)
coef = np.dot(np.dot(A, np.dot(X, A)), Yy)
# Each iteration should reduce the L1 norm of coef
#print i, linalg.norm(coef, ord=1)
return coef, gain
# AR power spectrum
# Old version before I found scipy.signal.freqz()
#def ARSpectrum(a, g, nSpec=256, twiddle=None):
# if twiddle is None:
# # Pre-compute the "twiddle" factors; saves a lot of CPU
# twiddle = np.ndarray((nSpec,a.shape[a.ndim-1]), dtype='complex')
# for i in range(nSpec):
# for j in range(twiddle.shape[1]):
# twiddle[i,j] = np.exp(-1.j * np.pi * i * (j+1) / nSpec)
# if a.ndim > 1:
# ret = np.ndarray((a.shape[0], nSpec))
# for f in range(a.shape[0]):
# ret[f] = ARSpectrum(a[f], g[f], nSpec, twiddle)
# return ret
#
# spec = np.ndarray(nSpec)
# for i in range(nSpec):
# sm = np.dot(a,twiddle[i])
# spec[i] = g / abs(1.0 - sm)**2
# return spec
def ARSpectrum(ar, gg, nSpec=256):
"""
Wrapper around scipy.signal.freqz() that both converts ar to
filter coefficients and broadcasts over an array.
"""
ret = np.ndarray(core.newshape(ar.shape, nSpec))
for a, g, r in core.refiter([ar, gg, ret], core.newshape(ar.shape)):
numer = np.sqrt(g)
denom = -np.insert(a, 0, -1)
tmp, r[...] = np.abs(sp.freqz(numer, denom, nSpec))**2
return ret
# AR cepstrum
def ARCepstrum(a, g, order=None):
if not order:
order = a.shape[-1]
if a.ndim > 1:
ret = np.ndarray((a.shape[0], order+1))
for f in range(a.shape[0]):
ret[f] = ARCepstrum(a[f], g[f], order)
return ret
cep = np.ndarray(order+1)
for i in range(order):
sum = 0
for k in range(i):
index = i-k-1
if (index < a.size):
sum += a[index] * cep[k] * (k+1)
cep[i] = sum / (i+1)
if (i < a.size):
cep[i] += a[i]
cep[order] = np.log(max(g, 1e-8))
return cep
# The opposite recursion: cepstrum to AR coeffs
def ARCepstrumToPoly(cep, order=None):
"""
Convert cepstra to AR polynomial
"""
if not order:
order = cep.shape[-1]-1
ar = np.ndarray(core.newshape(cep.shape, order))
ag = np.ndarray(core.newshape(cep.shape, 1))
for c, a, g in core.refiter([cep, ar, ag], core.newshape(cep.shape)):
for i in range(order):
sum = 0
for k in range(i):
index = i-k-1
if (index < a.size):
sum += a[index] * c[k] * (k+1)
a[i] = -sum / (i+1)
if (i < a.size):
a[i] += c[i]
g[0] = np.exp(c[order])
return ar, ag.reshape(core.newshape(cep.shape))
# AR excitation filter
def ARExcitation(a, ar, gg):
if a.ndim > 1:
ret = np.ndarray(a.shape)
for f in range(a.shape[0]):
ret[f] = ARExcitation(a[f], ar[f], gg[f])
return ret
# Reverse the coeffs; negate, and add a 1 for the current sample
c = np.append(-ar[::-1], 1)
r = np.ndarray(len(a))
g = 1.0 / np.sqrt(gg)
for i in range(len(a)):
if i < len(c):
r[i] = np.dot(a[:i+1], c[-i-1:]) * g
else:
r[i] = np.dot(a[i-len(c)+1:i+1], c) * g
return r
# AR resynthesis filter
def ARResynthesis(e, ar, gg):
if e.ndim > 1:
ret = np.ndarray(e.shape)
for f in range(e.shape[0]):
ret[f] = ARResynthesis(e[f], ar[f], gg[f])
return ret
c = ar[::-1]
r = np.ndarray(len(e))
g = np.sqrt(gg)
r[0] = e[0]*g
for i in range(1,len(e)):
if i < len(c):
r[i] = e[i]*g + np.dot(r[:i], c[-i:])
else:
r[i] = e[i]*g + np.dot(r[i-len(c):i], c)
return r
# AR resynthesis filter; assuming later overlap-add
def ARResynthesis2(e, ar, gg):
assert(e.ndim == 2) # Should iterate down to this
ret = np.ndarray(e.shape)
for f in range(len(e)):
c = ar[f,::-1]
g = np.sqrt(gg[f])
ret[f,0] = e[f,0]*g
for i in range(1,len(e[f])):
if i < len(c):
ret[f,i] = e[f,i]*g + np.dot(ret[f,:i], c[-i:])
if f >= 1:
# Complete using outputs of previous OLA frame
k = len(c) - i
j = len(e[f])/2
ret[f,i] += np.dot(ret[f-1,j-k:j], c[:k])
else:
ret[f,i] = e[f,i]*g + np.dot(ret[f,i-len(c):i], c)
return ret
# Sparse AR analysis assuming the excitation is distributed Laplacian.
def ARSparse(a, order=10, gamma=1.414):
if a.ndim > 1:
ret = np.ndarray((a.shape[0], order))
gain = np.ndarray(a.shape[0])
for f in range(a.shape[0]):
ret[f], gain[f] = ARSparse(a[f], order, gamma)
return ret, gain
# Initialise with the ML solution
ac = core.Autocorrelation(a)
coef, gain = ARLevinson(ac, order)
x = 1.0 / np.abs(ARExcitation(a, coef, gain)[order:])
# Follow the matrix based method to the letter. elop contains the
# poles reversed, coef is the poles in order.
for iter in range(5):
X = np.diag(np.sqrt(x)) # Actually root of inverse of X
Y = np.dot(X, core.Frame(a[:a.size-1], size=order, period=1, pad=False))
y = np.dot(X, a[order:])
YY = np.dot(Y.T,Y)
Yy = np.dot(Y.T,y)
elop = np.dot(linalg.inv(YY), Yy)
coef = elop[::-1]
xx = ARExcitation(a, coef, 1.0)[order:]**2
gain = gamma * np.dot(xx,x) / y.size
# for i in range(len(exn)):
# exn[i] = max(abs(exn[i]),1e-6)
x = 1.0 / np.sqrt(xx / gain)
return (coef, gain)
# AR analysis assuming the excitation is distributed Student-t.
def ARStudent(a, order=10, df=1.0):
if a.ndim > 1:
ret = np.ndarray((a.shape[0], order))
gain = np.ndarray(a.shape[0])
for f in range(a.shape[0]):
ret[f], gain[f] = ARStudent(a[f], order, df)
return ret, gain
# Initialise with the ML solution
ac = core.Autocorrelation(a)
coef, gain = ARLevinson(ac, order)
x = 1.0 / (ARExcitation(a, coef, gain)[order:]**2 + df)
# Follow the matrix based method to the letter. elop contains the
# poles reversed, coef is the poles in order.
for iter in range(5):
X = np.diag(np.sqrt(x)) # Actually root of inverse of X
Y = np.dot(X, core.Frame(a[:a.size-1], size=order, period=1, pad=False))
y = np.dot(X, a[order:])
YY = np.dot(Y.T,Y)
Yy = np.dot(Y.T,y)
elop = np.dot(linalg.inv(YY), Yy)
coef = elop[::-1]
xx = ARExcitation(a, coef, 1.0)[order:]**2
gain = (df+1) * np.dot(xx,x) / y.size
x = 1.0 / (xx / gain + df)
return (coef, gain)
# Solve polynomial corresponding to AR solution
def ARRoots(a):
if a.ndim > 1:
ret = np.ndarray(a.shape, dtype='complex')
for f in range(a.shape[0]):
ret[f] = ARRoots(a[f])
return ret
# The refection coeffs are negative, so insert -1 and assume that
# the whole thing can be multiplied by -1
r = np.roots(np.insert(a, 0, -1))
return r
# Tries to find the angle corresponding to the fundamental frequency
def ARAngle(a):
if a.ndim > 1:
ret_m = np.ndarray(a.shape)
ret_s = np.ndarray(a.shape)
for f in range(a.shape[0]):
ret_m[f], ret_s[f] = ARAngle(a[f])
return ret_m, ret_s
# First extract the angles of large poles above the real line
t = np.zeros(len(a))
j = 0
for i in range(len(a)):
if np.abs(a[i]) > 0.85 and np.imag(a[i]) > 0:
t[j] = np.angle(a[i])
j += 1
# Build an array of the differences between the sorted angles
t = np.sort(t[:j])
for i in range(1,j):
t[i-1] = t[i] - t[i-1]
# We need the mean and stddev of those differences
m = np.mean(t[:j-1])
s = np.std(t[:j-1])
return m, s
def ARLogLikelihoodRatio(a, order=10):
if a.ndim > 1:
ret = np.ndarray(a.shape[0])
for f in range(a.shape[0]):
ret[f] = ARLogLikelihoodRatio(a[f], order)
return ret
# Usual Gaussian
ac = core.Autocorrelation(a)
coef, gain = ARLevinson(ac, order)
exn = ARExcitation(a, coef, gain)
llGauss = - len(exn)/2 * np.log(2*np.pi) - 0.5 * np.dot(exn, exn)
# Unusual Laplacian
gamma = 1
X = np.identity(len(a)-order)
for iter in range(5):
Y = np.dot(X, Frame(a[:a.size-1], size=order, period=1))
y = np.dot(X, a[order:])
YY = np.dot(Y.T,Y)
Yy = np.dot(Y.T,y)
elop = np.dot(linalg.inv(YY), Yy)
coef = elop[::-1]
gain = gamma * (np.dot(y,y) - np.dot(elop,Yy)) / y.size
exn = ARExcitation(a, coef, gain)
for i in range(len(exn)):
exn[i] = max(abs(exn[i]),1e-6)
X = np.diag(1 / | np.sqrt(exn[order:]) | numpy.sqrt |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'test2.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
import os
os.environ["LANG"] = 'C'
try:
os.environ['LC_NAME'] = 'en_IN:en'
except:
print("No 'LC_NAME' entry was found.")
try:
os.environ["LANGUAGE"] = 'en_IN:en'
except:
print("No 'LANGUAGE' entry was found.")
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QDialog, QApplication, QFileDialog
from PyQt5.uic import loadUi
import sys
import glob
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 12})
from matplotlib.ticker import AutoMinorLocator
import matplotlib
matplotlib.interactive(True)
import numpy as np
from time import sleep #REMOVE LATER
from datetime import datetime #REMOVE LATER
import astropy.io.fits as pyfits
from astropy.time import Time
from fermipy.gtanalysis import GTAnalysis
from fermipy.plotting import ROIPlotter
import pathlib
libpath = str(pathlib.Path(__file__).parent.resolve())+"/images/"
class Worker(QtCore.QObject):
starting = QtCore.pyqtSignal()
finished = QtCore.pyqtSignal()
progress = QtCore.pyqtSignal(int)
def run_gtsetup(self):
"""Long-running task."""
self.starting.emit()
ui.setFermipy()
self.progress.emit(0)
ui.gta.setup()
self.progress.emit(1)
ui.analysisBasics()
self.progress.emit(2)
ui.analysis_advanced()
self.progress.emit(3)
self.finished.emit()
class Ui_mainWindow(QDialog):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(870, 595)
mainWindow.setWindowOpacity(1.0)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(670, 480, 191, 41))
self.pushButton.setObjectName("pushButton")
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setGeometry(QtCore.QRect(10, 530, 851, 20))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(520, 190, 71, 20))
self.label_2.setObjectName("label_2")
self.picture = QtWidgets.QLabel(self.centralwidget)
self.picture.setEnabled(True)
self.picture.setGeometry(QtCore.QRect(560, 210, 251, 251))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.picture.setFont(font)
self.picture.setMouseTracking(False)
self.picture.setAutoFillBackground(False)
self.picture.setText("")
self.picture.setPixmap(QtGui.QPixmap(libpath+"fermi.png"))
self.picture.setScaledContents(True)
self.picture.setWordWrap(False)
self.picture.setObjectName("picture")
self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_2.setGeometry(QtCore.QRect(300, 190, 211, 331))
self.groupBox_2.setObjectName("groupBox_2")
self.radioButton = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton.setEnabled(False)
self.radioButton.setGeometry(QtCore.QRect(40, 170, 61, 23))
self.radioButton.setChecked(True)
self.radioButton.setAutoExclusive(True)
self.radioButton.setObjectName("radioButton")
self.label_4 = QtWidgets.QLabel(self.groupBox_2)
self.label_4.setEnabled(False)
self.label_4.setGeometry(QtCore.QRect(100, 40, 121, 21))
self.label_4.setObjectName("label_4")
self.checkBox_3 = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_3.setGeometry(QtCore.QRect(10, 150, 131, 23))
self.checkBox_3.setObjectName("checkBox_3")
self.label_9 = QtWidgets.QLabel(self.groupBox_2)
self.label_9.setEnabled(False)
self.label_9.setGeometry(QtCore.QRect(100, 70, 81, 21))
self.label_9.setObjectName("label_9")
self.label_10 = QtWidgets.QLabel(self.groupBox_2)
self.label_10.setGeometry(QtCore.QRect(100, 120, 121, 21))
self.label_10.setObjectName("label_10")
self.checkBox = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox.setGeometry(QtCore.QRect(10, 20, 131, 23))
self.checkBox.setObjectName("checkBox")
self.spinBox_3 = QtWidgets.QSpinBox(self.groupBox_2)
self.spinBox_3.setGeometry(QtCore.QRect(40, 120, 48, 26))
self.spinBox_3.setMinimum(3)
self.spinBox_3.setProperty("value", 10)
self.spinBox_3.setObjectName("spinBox_3")
self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.groupBox_2)
self.doubleSpinBox.setEnabled(False)
self.doubleSpinBox.setGeometry(QtCore.QRect(40, 215, 69, 21))
self.doubleSpinBox.setProperty("value", 1.0)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.label_5 = QtWidgets.QLabel(self.groupBox_2)
self.label_5.setEnabled(False)
self.label_5.setGeometry(QtCore.QRect(120, 210, 81, 31))
self.label_5.setObjectName("label_5")
self.spinBox_2 = QtWidgets.QSpinBox(self.groupBox_2)
self.spinBox_2.setEnabled(False)
self.spinBox_2.setGeometry(QtCore.QRect(40, 70, 48, 26))
self.spinBox_2.setMinimum(1)
self.spinBox_2.setProperty("value", 1)
self.spinBox_2.setObjectName("spinBox_2")
self.checkBox_2 = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_2.setGeometry(QtCore.QRect(10, 100, 131, 23))
self.checkBox_2.setChecked(True)
self.checkBox_2.setObjectName("checkBox_2")
self.spinBox = QtWidgets.QSpinBox(self.groupBox_2)
self.spinBox.setEnabled(False)
self.spinBox.setGeometry(QtCore.QRect(40, 40, 48, 26))
self.spinBox.setMinimum(3)
self.spinBox.setProperty("value", 20)
self.spinBox.setObjectName("spinBox")
self.radioButton_2 = QtWidgets.QRadioButton(self.groupBox_2)
self.radioButton_2.setEnabled(False)
self.radioButton_2.setGeometry(QtCore.QRect(40, 190, 91, 23))
self.radioButton_2.setChecked(False)
self.radioButton_2.setAutoExclusive(True)
self.radioButton_2.setObjectName("radioButton_2")
self.checkBox_6 = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_6.setGeometry(QtCore.QRect(40, 310, 131, 23))
self.checkBox_6.setChecked(True)
self.checkBox_6.setObjectName("checkBox_6")
self.doubleSpinBox_2 = QtWidgets.QDoubleSpinBox(self.groupBox_2)
self.doubleSpinBox_2.setGeometry(QtCore.QRect(40, 280, 69, 26))
self.doubleSpinBox_2.setMinimum(0.5)
self.doubleSpinBox_2.setProperty("value", 2.0)
self.doubleSpinBox_2.setObjectName("doubleSpinBox_2")
self.checkBox_5 = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_5.setGeometry(QtCore.QRect(10, 260, 131, 23))
self.checkBox_5.setChecked(True)
self.checkBox_5.setObjectName("checkBox_5")
self.checkBox_4 = QtWidgets.QCheckBox(self.groupBox_2)
self.checkBox_4.setGeometry(QtCore.QRect(10, 240, 131, 23))
self.checkBox_4.setObjectName("checkBox_4")
self.label_11 = QtWidgets.QLabel(self.groupBox_2)
self.label_11.setGeometry(QtCore.QRect(120, 280, 101, 21))
self.label_11.setObjectName("label_11")
self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox_3.setGeometry(QtCore.QRect(10, 190, 282, 331))
self.groupBox_3.setObjectName("groupBox_3")
self.label_15 = QtWidgets.QLabel(self.groupBox_3)
self.label_15.setEnabled(False)
self.label_15.setGeometry(QtCore.QRect(30, 30, 111, 17))
self.label_15.setObjectName("label_15")
self.lineEdit_6 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_6.setEnabled(False)
self.lineEdit_6.setGeometry(QtCore.QRect(30, 50, 101, 21))
self.lineEdit_6.setObjectName("lineEdit_6")
self.checkBox_11 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_11.setGeometry(QtCore.QRect(10, 150, 141, 23))
self.checkBox_11.setObjectName("checkBox_11")
self.comboBox_2 = QtWidgets.QComboBox(self.groupBox_3)
self.comboBox_2.setEnabled(False)
self.comboBox_2.setGeometry(QtCore.QRect(30, 110, 101, 25))
self.comboBox_2.setObjectName("comboBox_2")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.comboBox_2.addItem("")
self.lineEdit_9 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_9.setEnabled(False)
self.lineEdit_9.setGeometry(QtCore.QRect(30, 180, 101, 21))
self.lineEdit_9.setObjectName("lineEdit_9")
self.checkBox_10 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_10.setGeometry(QtCore.QRect(10, 300, 151, 23))
self.checkBox_10.setChecked(True)
self.checkBox_10.setObjectName("checkBox_10")
self.label_21 = QtWidgets.QLabel(self.groupBox_3)
self.label_21.setGeometry(QtCore.QRect(110, 272, 141, 17))
self.label_21.setObjectName("label_21")
self.doubleSpinBox_3 = QtWidgets.QDoubleSpinBox(self.groupBox_3)
self.doubleSpinBox_3.setEnabled(True)
self.doubleSpinBox_3.setGeometry(QtCore.QRect(30, 240, 69, 21))
self.doubleSpinBox_3.setMinimum(1.0)
self.doubleSpinBox_3.setProperty("value", 4.0)
self.doubleSpinBox_3.setObjectName("doubleSpinBox_3")
self.doubleSpinBox_4 = QtWidgets.QDoubleSpinBox(self.groupBox_3)
self.doubleSpinBox_4.setEnabled(True)
self.doubleSpinBox_4.setGeometry(QtCore.QRect(30, 270, 69, 21))
self.doubleSpinBox_4.setMinimum(0.1)
self.doubleSpinBox_4.setProperty("value", 0.5)
self.doubleSpinBox_4.setObjectName("doubleSpinBox_4")
self.checkBox_8 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_8.setGeometry(QtCore.QRect(10, 220, 221, 23))
self.checkBox_8.setChecked(True)
self.checkBox_8.setObjectName("checkBox_8")
self.label_20 = QtWidgets.QLabel(self.groupBox_3)
self.label_20.setGeometry(QtCore.QRect(110, 242, 141, 17))
self.label_20.setObjectName("label_20")
self.checkBox_12 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_12.setGeometry(QtCore.QRect(10, 90, 131, 21))
self.checkBox_12.setObjectName("checkBox_12")
self.radioButton_5 = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_5.setGeometry(QtCore.QRect(150, 50, 81, 23))
self.radioButton_5.setChecked(True)
self.radioButton_5.setAutoExclusive(True)
self.radioButton_5.setObjectName("radioButton_5")
self.radioButton_6 = QtWidgets.QRadioButton(self.groupBox_3)
self.radioButton_6.setGeometry(QtCore.QRect(150, 70, 112, 23))
self.radioButton_6.setAutoExclusive(True)
self.radioButton_6.setObjectName("radioButton_6")
self.label_3 = QtWidgets.QLabel(self.groupBox_3)
self.label_3.setGeometry(QtCore.QRect(150, 30, 131, 17))
self.label_3.setObjectName("label_3")
self.label_19 = QtWidgets.QLabel(self.groupBox_3)
self.label_19.setEnabled(False)
self.label_19.setGeometry(QtCore.QRect(220, 100, 71, 21))
self.label_19.setObjectName("label_19")
self.checkBox_13 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_13.setEnabled(False)
self.checkBox_13.setGeometry(QtCore.QRect(170, 130, 121, 21))
self.checkBox_13.setChecked(False)
self.checkBox_13.setObjectName("checkBox_13")
self.lineEdit_12 = QtWidgets.QLineEdit(self.groupBox_3)
self.lineEdit_12.setEnabled(False)
self.lineEdit_12.setGeometry(QtCore.QRect(170, 100, 41, 21))
self.lineEdit_12.setObjectName("lineEdit_12")
self.line_4 = QtWidgets.QFrame(self.groupBox_3)
self.line_4.setGeometry(QtCore.QRect(0, 210, 281, 16))
self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_4.setObjectName("line_4")
self.checkBox_14 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_14.setEnabled(False)
self.checkBox_14.setGeometry(QtCore.QRect(170, 150, 121, 21))
self.checkBox_14.setChecked(False)
self.checkBox_14.setObjectName("checkBox_14")
self.checkBox_15 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_15.setEnabled(False)
self.checkBox_15.setGeometry(QtCore.QRect(170, 170, 121, 21))
self.checkBox_15.setChecked(False)
self.checkBox_15.setObjectName("checkBox_15")
self.checkBox_16 = QtWidgets.QCheckBox(self.groupBox_3)
self.checkBox_16.setEnabled(True)
self.checkBox_16.setGeometry(QtCore.QRect(150, 190, 141, 21))
self.checkBox_16.setChecked(False)
self.checkBox_16.setObjectName("checkBox_16")
self.label_22 = QtWidgets.QLabel(self.groupBox_3)
self.label_22.setGeometry(QtCore.QRect(150, 290, 111, 17))
self.label_22.setObjectName("label_22")
self.comboBox_3 = QtWidgets.QComboBox(self.groupBox_3)
self.comboBox_3.setEnabled(True)
self.comboBox_3.setGeometry(QtCore.QRect(150, 308, 101, 20))
self.comboBox_3.setObjectName("comboBox_3")
self.comboBox_3.addItem("")
self.comboBox_3.addItem("")
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.centralwidget)
self.plainTextEdit.setGeometry(QtCore.QRect(520, 210, 341, 261))
self.plainTextEdit.setObjectName("plainTextEdit")
self.toolButton_10 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_10.setEnabled(False)
self.toolButton_10.setGeometry(QtCore.QRect(650, 110, 26, 24))
self.toolButton_10.setObjectName("toolButton_10")
self.lineEdit_7 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_7.setEnabled(False)
self.lineEdit_7.setGeometry(QtCore.QRect(510, 110, 131, 25))
self.lineEdit_7.setObjectName("lineEdit_7")
self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_4.setGeometry(QtCore.QRect(490, 60, 151, 25))
self.lineEdit_4.setObjectName("lineEdit_4")
self.toolButton_4 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_4.setGeometry(QtCore.QRect(650, 60, 26, 24))
self.toolButton_4.setObjectName("toolButton_4")
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setGeometry(QtCore.QRect(490, 40, 151, 17))
self.label_14.setObjectName("label_14")
self.checkBox_9 = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_9.setGeometry(QtCore.QRect(490, 90, 161, 23))
self.checkBox_9.setObjectName("checkBox_9")
self.line_2 = QtWidgets.QFrame(self.centralwidget)
self.line_2.setGeometry(QtCore.QRect(670, 40, 20, 101))
self.line_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.line_3 = QtWidgets.QFrame(self.centralwidget)
self.line_3.setGeometry(QtCore.QRect(470, 40, 20, 101))
self.line_3.setFrameShape(QtWidgets.QFrame.VLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.dateTimeEdit_2 = QtWidgets.QDateTimeEdit(self.centralwidget)
self.dateTimeEdit_2.setGeometry(QtCore.QRect(690, 110, 171, 21))
self.dateTimeEdit_2.setDateTime(QtCore.QDateTime(QtCore.QDate(2008, 10, 14), QtCore.QTime(15, 43, 0)))
self.dateTimeEdit_2.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2008, 8, 14), QtCore.QTime(15, 43, 37)))
self.dateTimeEdit_2.setCalendarPopup(False)
self.dateTimeEdit_2.setObjectName("dateTimeEdit_2")
self.dateTimeEdit = QtWidgets.QDateTimeEdit(self.centralwidget)
self.dateTimeEdit.setGeometry(QtCore.QRect(690, 60, 171, 21))
self.dateTimeEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2008, 8, 4), QtCore.QTime(15, 43, 36)))
self.dateTimeEdit.setDate(QtCore.QDate(2008, 8, 4))
self.dateTimeEdit.setTime(QtCore.QTime(15, 43, 36))
self.dateTimeEdit.setMinimumDateTime(QtCore.QDateTime(QtCore.QDate(2008, 8, 4), QtCore.QTime(15, 43, 36)))
self.dateTimeEdit.setObjectName("dateTimeEdit")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(690, 40, 111, 17))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(690, 90, 111, 17))
self.label_8.setObjectName("label_8")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(290, 40, 20, 101))
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.toolButton_5 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_5.setGeometry(QtCore.QRect(610, 490, 26, 24))
self.toolButton_5.setObjectName("toolButton_5")
self.lineEdit_10 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_10.setGeometry(QtCore.QRect(520, 490, 81, 25))
self.lineEdit_10.setObjectName("lineEdit_10")
self.lineEdit_5 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_5.setGeometry(QtCore.QRect(310, 110, 131, 25))
self.lineEdit_5.setObjectName("lineEdit_5")
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(310, 60, 131, 25))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(310, 90, 131, 20))
self.label_6.setObjectName("label_6")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(310, 40, 111, 17))
self.label.setObjectName("label")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(520, 470, 131, 20))
self.label_12.setObjectName("label_12")
self.toolButton = QtWidgets.QToolButton(self.centralwidget)
self.toolButton.setGeometry(QtCore.QRect(450, 60, 26, 24))
self.toolButton.setWhatsThis("")
self.toolButton.setObjectName("toolButton")
self.toolButton_2 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_2.setGeometry(QtCore.QRect(450, 110, 26, 24))
self.toolButton_2.setObjectName("toolButton_2")
self.toolButton_3 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_3.setEnabled(False)
self.toolButton_3.setGeometry(QtCore.QRect(260, 160, 26, 24))
self.toolButton_3.setObjectName("toolButton_3")
self.label_17 = QtWidgets.QLabel(self.centralwidget)
self.label_17.setGeometry(QtCore.QRect(180, 90, 111, 17))
self.label_17.setObjectName("label_17")
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(180, 110, 111, 25))
self.lineEdit_2.setObjectName("lineEdit_2")
self.label_18 = QtWidgets.QLabel(self.centralwidget)
self.label_18.setGeometry(QtCore.QRect(40, 40, 61, 21))
self.label_18.setObjectName("label_18")
self.label_16 = QtWidgets.QLabel(self.centralwidget)
self.label_16.setGeometry(QtCore.QRect(180, 40, 81, 17))
self.label_16.setObjectName("label_16")
self.comboBox = QtWidgets.QComboBox(self.centralwidget)
self.comboBox.setGeometry(QtCore.QRect(40, 60, 71, 25))
self.comboBox.setObjectName("comboBox")
self.comboBox.addItem("")
self.comboBox.addItem("")
self.lineEdit_8 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_8.setEnabled(False)
self.lineEdit_8.setGeometry(QtCore.QRect(40, 160, 211, 25))
self.lineEdit_8.setObjectName("lineEdit_8")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(180, 60, 111, 25))
self.lineEdit.setObjectName("lineEdit")
self.radioButton_3 = QtWidgets.QRadioButton(self.centralwidget)
self.radioButton_3.setEnabled(True)
self.radioButton_3.setGeometry(QtCore.QRect(20, 20, 91, 23))
self.radioButton_3.setChecked(True)
self.radioButton_3.setAutoExclusive(True)
self.radioButton_3.setObjectName("radioButton_3")
self.radioButton_4 = QtWidgets.QRadioButton(self.centralwidget)
self.radioButton_4.setEnabled(True)
self.radioButton_4.setGeometry(QtCore.QRect(20, 140, 91, 23))
self.radioButton_4.setChecked(False)
self.radioButton_4.setAutoExclusive(True)
self.radioButton_4.setObjectName("radioButton_4")
self.label_25 = QtWidgets.QLabel(self.centralwidget)
self.label_25.setGeometry(QtCore.QRect(10, 0, 91, 21))
self.label_25.setObjectName("label_25")
self.label_23 = QtWidgets.QLabel(self.centralwidget)
self.label_23.setGeometry(QtCore.QRect(40, 90, 131, 21))
self.label_23.setObjectName("label_23")
self.comboBox_4 = QtWidgets.QComboBox(self.centralwidget)
self.comboBox_4.setGeometry(QtCore.QRect(40, 110, 71, 25))
self.comboBox_4.setObjectName("comboBox_4")
self.comboBox_4.addItem("")
self.comboBox_4.addItem("")
self.line_5 = QtWidgets.QFrame(self.centralwidget)
self.line_5.setGeometry(QtCore.QRect(160, 40, 20, 101))
self.line_5.setFrameShape(QtWidgets.QFrame.VLine)
self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_5.setObjectName("line_5")
self.lineEdit_11 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_11.setEnabled(False)
self.lineEdit_11.setGeometry(QtCore.QRect(310, 160, 131, 25))
self.lineEdit_11.setText("")
self.lineEdit_11.setObjectName("lineEdit_11")
self.toolButton_6 = QtWidgets.QToolButton(self.centralwidget)
self.toolButton_6.setEnabled(False)
self.toolButton_6.setGeometry(QtCore.QRect(450, 160, 26, 24))
self.toolButton_6.setObjectName("toolButton_6")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setEnabled(False)
self.label_13.setGeometry(QtCore.QRect(310, 140, 131, 20))
self.label_13.setObjectName("label_13")
self.line_6 = QtWidgets.QFrame(self.centralwidget)
self.line_6.setGeometry(QtCore.QRect(140, 220, 20, 181))
self.line_6.setFrameShape(QtWidgets.QFrame.VLine)
self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_6.setObjectName("line_6")
self.line_7 = QtWidgets.QFrame(self.centralwidget)
self.line_7.setGeometry(QtCore.QRect(140, 480, 20, 41))
self.line_7.setFrameShape(QtWidgets.QFrame.VLine)
self.line_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_7.setObjectName("line_7")
self.pushButton.raise_()
self.progressBar.raise_()
self.label_2.raise_()
self.groupBox_2.raise_()
self.groupBox_3.raise_()
self.plainTextEdit.raise_()
self.toolButton_10.raise_()
self.lineEdit_7.raise_()
self.lineEdit_4.raise_()
self.toolButton_4.raise_()
self.label_14.raise_()
self.checkBox_9.raise_()
self.line_2.raise_()
self.line_3.raise_()
self.dateTimeEdit_2.raise_()
self.dateTimeEdit.raise_()
self.label_7.raise_()
self.label_8.raise_()
self.line.raise_()
self.toolButton_5.raise_()
self.lineEdit_10.raise_()
self.lineEdit_5.raise_()
self.lineEdit_3.raise_()
self.label_6.raise_()
self.label.raise_()
self.label_12.raise_()
self.toolButton.raise_()
self.toolButton_2.raise_()
self.toolButton_3.raise_()
self.label_17.raise_()
self.lineEdit_2.raise_()
self.label_18.raise_()
self.label_16.raise_()
self.comboBox.raise_()
self.lineEdit_8.raise_()
self.lineEdit.raise_()
self.label_25.raise_()
self.picture.raise_()
self.label_23.raise_()
self.comboBox_4.raise_()
self.line_5.raise_()
self.lineEdit_11.raise_()
self.toolButton_6.raise_()
self.label_13.raise_()
self.line_6.raise_()
self.line_7.raise_()
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 870, 22))
self.menubar.setObjectName("menubar")
self.menuTutorial = QtWidgets.QMenu(self.menubar)
self.menuTutorial.setObjectName("menuTutorial")
self.menuCredits = QtWidgets.QMenu(self.menubar)
self.menuCredits.setObjectName("menuCredits")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.actionOpen = QtWidgets.QAction(mainWindow)
self.actionOpen.setShortcut("")
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(mainWindow)
self.actionSave.setObjectName("actionSave")
self.actionCopy = QtWidgets.QAction(mainWindow)
self.actionCopy.setObjectName("actionCopy")
self.actionPaste = QtWidgets.QAction(mainWindow)
self.actionPaste.setObjectName("actionPaste")
self.actionOpen_Tutorial = QtWidgets.QAction(mainWindow)
self.actionOpen_Tutorial.setObjectName("actionOpen_Tutorial")
self.actionSee_credits = QtWidgets.QAction(mainWindow)
self.actionSee_credits.setObjectName("actionSee_credits")
self.actionLoad_state = QtWidgets.QAction(mainWindow)
self.actionLoad_state.setObjectName("actionLoad_state")
self.menuTutorial.addAction(self.actionOpen_Tutorial)
self.menuTutorial.addAction(self.actionLoad_state)
self.menuCredits.addAction(self.actionSee_credits)
self.menubar.addAction(self.menuTutorial.menuAction())
self.menubar.addAction(self.menuCredits.menuAction())
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
#Click GO!
self.pushButton.setToolTip('Click to start the analysis')
self.pushButton.clicked.connect(self.runLongTask)
#self.actionOpen_Tutorial.connect(self.popup_tutorial)
self.actionOpen_Tutorial.triggered.connect(self.popup_tutorial)
self.actionSee_credits.triggered.connect(self.popup_credits)
self.actionLoad_state.triggered.connect(self.load_GUIstate)
############ Loading main files:
self.toolButton.setCheckable(True)
self.toolButton.setToolTip('You can download the spacecraft file from https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi')
self.toolButton.clicked.connect(self.browsefiles)
self.toolButton_2.setCheckable(True)
self.toolButton_2.setToolTip('You can download the photon files from https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi')
self.toolButton_2.clicked.connect(self.browsefiles)
self.toolButton_3.setCheckable(True)
self.toolButton_3.setToolTip('Please upload your own config.yaml file')
self.toolButton_3.clicked.connect(self.browsefiles)
self.toolButton_4.setCheckable(True)
self.toolButton_4.setToolTip('You can download the background files from https://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html')
self.toolButton_4.clicked.connect(self.browsefiles)
self.toolButton_5.setCheckable(True)
self.toolButton_5.clicked.connect(self.browsefiles)
self.toolButton_6.setCheckable(True)
self.toolButton_6.setToolTip('You can download the photon files from https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi')
self.toolButton_6.clicked.connect(self.browsefiles)
self.toolButton_10.setCheckable(True)
self.toolButton_10.clicked.connect(self.browsefiles)
self.lineEdit_9.setToolTip("e.g.: 4FGL J1222.5+0414,4FGL J1219.7+0444,4FGL ...")
###### Activating/deactivating options
self.checkBox.clicked.connect(self.activate)
self.checkBox_2.clicked.connect(self.activate)
self.checkBox_3.clicked.connect(self.activate)
self.checkBox_5.clicked.connect(self.activate)
self.checkBox_8.clicked.connect(self.activate)
self.checkBox_9.clicked.connect(self.activate)
self.checkBox_11.clicked.connect(self.activate)
self.checkBox_12.clicked.connect(self.activate)
self.radioButton.clicked.connect(self.activate)
self.radioButton_2.clicked.connect(self.activate)
self.radioButton_3.clicked.connect(self.activate)
self.radioButton_4.clicked.connect(self.activate)
self.radioButton_5.clicked.connect(self.activate)
self.radioButton_6.clicked.connect(self.activate)
self.comboBox_4.activated.connect(self.activate)
self.comboBox_4.setToolTip("Is your target listed in the catalog selected above?")
self.comboBox_2.setToolTip("Select a spectral model for your target")
self.comboBox_3.setToolTip("Select the output format for the main plots (i.e. SED, light curve etc)")
self.checkBox_13.setToolTip("Check if you wish that only the normalizations can vary")
self.checkBox_14.setToolTip("Freeze the Galactic diffuse model")
self.checkBox_15.setToolTip("Freeze the isotropic diffuse model")
self.checkBox_16.setToolTip("If checked, you will freeze the spectral shape of the target")
self.checkBox_8.setToolTip("Check to look for extra sources in the ROI, i.e. sources not listed in the adopted catalog")
self.checkBox_6.setToolTip("If checked, the target will be removed from the model. If unchecked, easyFermi computes the residuals TS map")
self.label_11.setToolTip("The photon index of the test source")
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowIcon(QtGui.QIcon(libpath+'easyFermiIcon.png'))
mainWindow.setWindowTitle(_translate("mainWindow", "easyFermi"))
self.pushButton.setText(_translate("mainWindow", "Go!"))
self.label_2.setText(_translate("mainWindow", "Log:"))
self.groupBox_2.setTitle(_translate("mainWindow", "Science:"))
self.radioButton.setText(_translate("mainWindow", "Disk"))
self.label_4.setText(_translate("mainWindow", "N⁰ of time bins"))
self.checkBox_3.setText(_translate("mainWindow", "Extension:"))
self.label_9.setText(_translate("mainWindow", "N⁰ of cores"))
self.label_10.setText(_translate("mainWindow", "N⁰ of energy bins"))
self.checkBox.setText(_translate("mainWindow", "Light curve:"))
self.label_5.setText(_translate("mainWindow", "Max. size"))
self.checkBox_2.setText(_translate("mainWindow", "SED:"))
self.radioButton_2.setText(_translate("mainWindow", "2D-Gauss"))
self.checkBox_6.setText(_translate("mainWindow", "Remove target"))
self.checkBox_5.setText(_translate("mainWindow", "TS map:"))
self.checkBox_4.setText(_translate("mainWindow", "Relocalize"))
self.label_11.setText(_translate("mainWindow", "Photon index"))
self.groupBox_3.setTitle(_translate("mainWindow", "Advanced configurations:"))
self.label_15.setText(_translate("mainWindow", "Target name/tag:"))
self.checkBox_11.setText(_translate("mainWindow", "Delete sources:"))
self.comboBox_2.setAccessibleName(_translate("mainWindow", "4FGL"))
self.comboBox_2.setAccessibleDescription(_translate("mainWindow", "4FGL"))
self.comboBox_2.setItemText(0, _translate("mainWindow", "Select..."))
self.comboBox_2.setItemText(1, _translate("mainWindow", "Power-law"))
self.comboBox_2.setItemText(2, _translate("mainWindow", "LogPar"))
self.comboBox_2.setItemText(3, _translate("mainWindow", "PLEC"))
self.lineEdit_9.setText(_translate("mainWindow", ""))
self.checkBox_10.setText(_translate("mainWindow", "Diagnostic plots"))
self.label_21.setText(_translate("mainWindow", "Minimum separation (⁰)"))
self.checkBox_8.setText(_translate("mainWindow", "Find extra sources in the ROI:"))
self.label_20.setText(_translate("mainWindow", "Minimum significance"))
self.checkBox_12.setText(_translate("mainWindow", "Change model:"))
self.radioButton_5.setText(_translate("mainWindow", "Defaut"))
self.radioButton_6.setText(_translate("mainWindow", "Customized"))
self.label_3.setText(_translate("mainWindow", "Free source radius:"))
self.label_19.setText(_translate("mainWindow", "Radius (⁰)"))
self.checkBox_13.setText(_translate("mainWindow", "Only norm."))
self.checkBox_14.setText(_translate("mainWindow", "Freeze Gal."))
self.checkBox_15.setText(_translate("mainWindow", "Freeze Iso."))
self.checkBox_16.setText(_translate("mainWindow", "Freeze shape targ."))
self.label_22.setText(_translate("mainWindow", "Output format:"))
self.comboBox_3.setAccessibleName(_translate("mainWindow", "4FGL"))
self.comboBox_3.setAccessibleDescription(_translate("mainWindow", "4FGL"))
self.comboBox_3.setItemText(0, _translate("mainWindow", "pdf"))
self.comboBox_3.setItemText(1, _translate("mainWindow", "png"))
self.plainTextEdit.setPlainText(_translate("mainWindow", "\n"
""))
self.toolButton_10.setText(_translate("mainWindow", "..."))
self.toolButton_4.setText(_translate("mainWindow", "..."))
self.label_14.setText(_translate("mainWindow", "Dir. of diff. emission:"))
self.checkBox_9.setText(_translate("mainWindow", "Use external ltcube:"))
self.dateTimeEdit_2.setDisplayFormat(_translate("mainWindow", "dd/MM/yyyy HH:mm:ss"))
self.dateTimeEdit.setDisplayFormat(_translate("mainWindow", "dd/MM/yyyy HH:mm:ss"))
self.label_7.setText(_translate("mainWindow", "Start:"))
self.label_8.setText(_translate("mainWindow", "Stop:"))
self.toolButton_5.setText(_translate("mainWindow", "..."))
self.lineEdit_10.setText(_translate("mainWindow", "."))
self.label_6.setText(_translate("mainWindow", "Dir. of photon files:"))
self.label.setText(_translate("mainWindow", "Spacecraft file:"))
self.label_12.setText(_translate("mainWindow", "Output directory:"))
self.toolButton.setAccessibleDescription(_translate("mainWindow", "spacecraft mission file"))
self.toolButton.setText(_translate("mainWindow", "..."))
self.toolButton_2.setText(_translate("mainWindow", "..."))
self.toolButton_3.setText(_translate("mainWindow", "..."))
self.label_17.setText(_translate("mainWindow", "<html><head/><body><p>E<span style=\" vertical-align:sub;\">min</span>, E<span style=\" vertical-align:sub;\">max</span> (MeV):</p></body></html>"))
self.lineEdit_2.setText(_translate("mainWindow", "100, 300000"))
self.label_18.setText(_translate("mainWindow", "Catalog:"))
self.label_16.setText(_translate("mainWindow", "RA, Dec (⁰):"))
self.comboBox.setAccessibleName(_translate("mainWindow", "4FGL"))
self.comboBox.setAccessibleDescription(_translate("mainWindow", "4FGL"))
self.comboBox.setItemText(0, _translate("mainWindow", "4FGL"))
self.comboBox.setItemText(1, _translate("mainWindow", "3FGL"))
self.lineEdit_8.setText(_translate("mainWindow", "Configuration file (yaml)"))
self.radioButton_3.setText(_translate("mainWindow", "Standard"))
self.radioButton_4.setText(_translate("mainWindow", "Custom"))
self.label_25.setText(_translate("mainWindow", "Config. file:"))
self.label_23.setText(_translate("mainWindow", "Target cataloged?"))
self.comboBox_4.setAccessibleName(_translate("mainWindow", "4FGL"))
self.comboBox_4.setAccessibleDescription(_translate("mainWindow", "4FGL"))
self.comboBox_4.setItemText(0, _translate("mainWindow", "Yes"))
self.comboBox_4.setItemText(1, _translate("mainWindow", "No"))
self.toolButton_6.setText(_translate("mainWindow", "..."))
self.label_13.setText(_translate("mainWindow", "Dir. of photon files:"))
self.menuTutorial.setTitle(_translate("mainWindow", "Menu"))
self.menuCredits.setTitle(_translate("mainWindow", "Credits"))
self.actionOpen.setText(_translate("mainWindow", "Open..."))
self.actionSave.setText(_translate("mainWindow", "Save"))
self.actionSave.setIconText(_translate("mainWindow", "Save"))
self.actionSave.setShortcut(_translate("mainWindow", "Ctrl+S"))
self.actionCopy.setText(_translate("mainWindow", "Copy"))
self.actionCopy.setShortcut(_translate("mainWindow", "Ctrl+C"))
self.actionPaste.setText(_translate("mainWindow", "Paste"))
self.actionPaste.setStatusTip(_translate("mainWindow", "Paste a file"))
self.actionPaste.setShortcut(_translate("mainWindow", "Ctrl+V"))
self.actionOpen_Tutorial.setText(_translate("mainWindow", "Open Tutorial"))
self.actionSee_credits.setText(_translate("mainWindow", "See credits"))
self.actionLoad_state.setText(_translate("mainWindow", "Load GUI state"))
def reportProgress(self, n):
if n == 0:
self.progressBar.setProperty("value", 5)
if self.lineEdit_9.text() != ' ':
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Deleting source(s): "+self.lineEdit_9.text()+".\n")
else:
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- No sources deleted.\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Running gtselect, gtbin, etc.\n")
if (self.IsThereLtcube is None) & (self.IsThereLtcube3==0):
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- It will take up to ~"+str(int(self.Time_intervMJD*206/(30.0*60)))+" min to run the ltcube\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"(Don't worry, it really takes some time...)\n")
else:
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Using precomputed ltcube.\n")
if n == 1:
self.progressBar.setProperty("value", 25)
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Setup finished.\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Computing flux, spectral index, TS, etc.\n")
#self.thread.terminate()
if n == 2:
self.progressBar.setProperty("value", 50)
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+self.fitquality+"\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Main results saved in Target_results.txt\n")
if self.freeradiusalert != 'ok':
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+self.freeradiusalert+"\n")
if self.checkBox_4.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Relocalizing target...\n")
if self.checkBox_5.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Computing TS map.\n")
if self.checkBox_2.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Computing SED.\n")
if self.checkBox_3.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Computing extension.\n")
if self.checkBox.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Computing light curve.\n")
if n == 3:
self.progressBar.setProperty("value", 75)
if self.checkBox_4.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- New position: RA = "+str(round(self.locRA,3))+", Dec = "+str(round(self.locDec,3))+", r_95 = "+str(round(self.locr95,3))+"\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Localization results saved in "+self.sourcename+"_loc.fits\n")
if self.checkBox_5.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- TS map saved as a figure and fits files.\n")
if self.checkBox_2.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- SED data saved in "+self.sourcename+"_sed.fits\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Preliminar SED shown in figure Quickplot_SED\n")
if self.checkBox_3.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Extension data saved in "+self.sourcename+"_ext.fits\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Extension is shown in figure Quickplot_extension\n")
if self.checkBox.isChecked():
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- LC saved in file "+self.sourcename+"_lightcurve.fits\n")
self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- LC is shown in figure Quickplot_LC\n")
def runLongTask(self):
#Making Fermi logo transparent:
new_pix = QtGui.QPixmap(libpath+"fermi.png")
new_pix.fill(QtCore.Qt.transparent)
painter = QtGui.QPainter(new_pix)
painter.setOpacity(0.2)
painter.drawPixmap(QtCore.QPoint(), QtGui.QPixmap(libpath+"fermi.png"))
painter.end()
self.picture.setPixmap(new_pix)
self.picture.setGeometry(QtCore.QRect(560, 210, 251, 251))
self.plainTextEdit.setPlainText("")
can_we_go = self.check_for_erros()
if can_we_go:
self.thread = QtCore.QThread()
self.worker = Worker()
self.worker.moveToThread(self.thread)
# Step 5: Connect signals and slots
self.thread.started.connect(self.worker.run_gtsetup)
self.worker.finished.connect(self.thread.quit)
self.worker.finished.connect(self.worker.deleteLater)
self.thread.finished.connect(self.thread.deleteLater)
self.worker.starting.connect(self.readytogo)
self.worker.progress.connect(self.reportProgress)
# Step 6: Start the thread
self.thread.start()
# Final reset
self.thread.finished.connect( lambda: self.pushButton.setEnabled(True) )
_translate = QtCore.QCoreApplication.translate
self.thread.finished.connect( lambda: self.pushButton.setText(_translate("MainWindow", "Go!")) )
self.thread.finished.connect( lambda: self.progressBar.setProperty("value", 100) )
self.thread.finished.connect( lambda: self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Saving GUI status...\n") )
self.thread.finished.connect( self.save_GUIstate )
self.thread.finished.connect( lambda: self.plainTextEdit.setPlainText(self.plainTextEdit.toPlainText()+"- Process finished!\n") )
else:
self.popup_go()
def save_GUIstate(self):
if self.radioButton_3.isChecked():
Standard = 'Yes'
Coords = self.lineEdit.text()
Energ = self.lineEdit_2.text()
date = self.dateTimeEdit.text()
date2 = self.dateTimeEdit_2.text()
spacecraft = self.lineEdit_3.text()
diffuse = self.lineEdit_4.text()
dir_photon = self.lineEdit_5.text()
Use_external_ltcube = self.checkBox_9.isChecked()
external_ltcube = self.lineEdit_7.text()
catalog = self.comboBox.currentText()
cataloged = self.comboBox_4.currentText()
state = [Standard,Coords,Energ,date,date2,spacecraft,diffuse,dir_photon, Use_external_ltcube, external_ltcube, catalog, cataloged]
else:
Standard = 'No'
configfile = self.lineEdit_8.text()
dir_photon = self.lineEdit_11.text()
state = [Standard, configfile, dir_photon]
nickname = self.lineEdit_6.text()
change_model = self.checkBox_12.isChecked()
which_model = self.comboBox_2.currentText()
delete_sources = self.checkBox_11.isChecked()
which_sources_deleted = self.lineEdit_9.text()
Free_radius_standard = self.radioButton_5.isChecked()
Free_radius_custom = self.radioButton_6.isChecked()
free_radius = self.lineEdit_12.text()
Only_norm = self.checkBox_13.isChecked()
Freeze_Gal = self.checkBox_14.isChecked()
Freeze_Iso = self.checkBox_15.isChecked()
Freeze_targ_shape = self.checkBox_16.isChecked()
find_sources = self.checkBox_8.isChecked()
min_sig = self.doubleSpinBox_3.value()
min_sep = self.doubleSpinBox_4.value()
diagnostic = self.checkBox_10.isChecked()
output_format = self.comboBox_3.currentText()
part2 = [nickname, change_model, which_model, delete_sources, which_sources_deleted, Free_radius_standard, Free_radius_custom, free_radius, Only_norm, Freeze_Gal, Freeze_Iso, Freeze_targ_shape, find_sources, min_sig, min_sep, diagnostic, output_format]
LC = self.checkBox.isChecked()
LC_Nbins = self.spinBox.value()
LC_Ncores = self.spinBox_2.value()
SED = self.checkBox_2.isChecked()
SED_Nbins = self.spinBox_3.value()
extension = self.checkBox_3.isChecked()
Disk = self.radioButton.isChecked()
Gauss2D = self.radioButton_2.isChecked()
max_size = self.doubleSpinBox.value()
reloc = self.checkBox_4.isChecked()
TS_map = self.checkBox_5.isChecked()
test_source_index = self.doubleSpinBox_2.value()
remove_targ_from_model = self.checkBox_6.isChecked()
output = self.lineEdit_10.text()
part3 = [LC, LC_Nbins, LC_Ncores, SED, SED_Nbins, extension, Disk, Gauss2D, max_size, reloc, TS_map, test_source_index, remove_targ_from_model, output]
state = state + part2 + part3
| np.save(self.OutputDir+'GUI_status.npy', state, allow_pickle=True, fix_imports=True) | numpy.save |
# -*- coding:UTF-8 -*-
from sklearn.linear_model import LogisticRegression
import numpy as np
import random
"""
函数说明:sigmoid函数
Parameters:
inX - 数据
Returns:
sigmoid函数
Author:
<NAME>
Blog:
http://blog.csdn.net/c406495762
Zhihu:
https://www.zhihu.com/people/Jack--Cui/
Modify:
2017-09-05
"""
def sigmoid(inX):
return 1.0 / (1 + np.exp(-inX))
"""
函数说明:改进的随机梯度上升算法
Parameters:
dataMatrix - 数据数组
classLabels - 数据标签
numIter - 迭代次数
Returns:
weights - 求得的回归系数数组(最优参数)
Author:
<NAME>
Blog:
http://blog.csdn.net/c406495762
Zhihu:
https://www.zhihu.com/people/Jack--Cui/
Modify:
2017-09-05
"""
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = np.shape(dataMatrix) #返回dataMatrix的大小。m为行数,n为列数。
weights = | np.ones(n) | numpy.ones |
# -*- coding:utf-8 -*-
"""
you do not need to update a existing instance of class ConvNetValue. because when you generate a instance of class
ConvNetValue, this instance must be a instance who has the latest params of matconvnet.
so just generate a new instance from class ConvNetValue when you need it
"""
import numpy as np
import tensorflow as tf
import scipy.io
import os
class ConvNetValue():
def __init__(self, matconvnet = None, Flag=False):
if matconvnet == None:
self.name = None
# TODO: <> params are stored in these dictionary
self.W_d = {}
self.b_d = {}
self.bn_gamma_d = {}
self.bn_moments_d = {}
self.bn_beta_d = {}
self.bn_moving_mean_d = {}
self.bn_moving_variance_d = {}
self.bnorm_adjust = False
self.bn_gamma_adj = None
self.bn_moments_adj = None
self.bn_beta_adj = None
self.bn_moving_mean_adj = None
self.bn_moving_variance_adj = None
self.num_layers = 5
self.bnorm_yn_n = np.array([1, 1, 1, 1, 0], dtype=bool)
self.conv_stride_n = np.array([2, 1, 1, 1, 1])
self.filtergroup_yn_n = np.array([0, 1, 0, 1, 1], dtype=bool)
self.bnorm_yn_n = np.array([1, 1, 1, 1, 0], dtype=bool)
self.relu_yn_n = np.array([1, 1, 1, 1, 0], dtype=bool)
self.trainable_n = np.array([0, 0, 0, 0, 1])
self.pool_stride_n = | np.array([2, 1, 0, 0, 0]) | numpy.array |
#!/usr/bin/env python
from genericpath import exists
import numpy as np
import glob
from distort_calibration import *
from cartesian import *
from registration_3d import *
from optical_tracking import *
from em_tracking import *
from eval import *
from pathlib import Path
import argparse
import csv
from improved_em_tracking import *
from fiducials import *
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"data_dir",
type=str,
help="path to data directory",
)
parser.add_argument(
"runtype",
type=int,
help="0 for debug, 1 for unknown",
)
parser.add_argument(
"letter",
type=int,
help="index of the letter",
)
parser.add_argument(
"output_dir",
type=str,
help="path to output directory(automatically created if does not exist)",
)
parser.add_argument(
"--eval",
type=bool,
default=False,
help="Whether to evaluate or not"
)
return parser.parse_args()
def main():
args = parse_args()
runtype = args.runtype
run = args.letter
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
type = ['debug', 'unknown']
data_dir = args.data_dir
# first read in Calbody (Nd, Na, Nc)
# then readings (ND, NA, NC, nframes)
# the last output determines whether to show the result in terminal
em_pivot = em_tracking( data_dir , "pa2-", type[runtype] , letters[run] , output = 0)
optical_pivot = optical_tracking( data_dir , "pa2-", type[runtype] , letters[run] , output = 0)
#Added improved_em_tracking file instead of the original
em_pivot = improved_em_tracking(data_dir, "pa2-", type[runtype] , letters[run], output=0)[1]
# print(em_pivot)
tmp_ce = distort_calibration( data_dir , "pa2-", type[runtype] , letters[run] ,output = 0)
C_exp = tmp_ce[0]
Nc = tmp_ce[1]
Nframes = tmp_ce[2]
# print(optical_pivot.shape)
ep = np.transpose(em_pivot)
op = np.transpose(optical_pivot)
# print(em_pivot)
# print(optical_pivot)
em_rounded = np.round(em_pivot.reshape(3), decimals=2)
opt_rounded = np.round(optical_pivot.reshape(3), decimals=2)
C_exp_rounded = | np.round(C_exp, decimals=2) | numpy.round |
import pyaudio
import numpy as np
import time
import json
import scipy.signal as signal
p = pyaudio.PyAudio()
CHANNELS = 1
RATE = 44100
with open('hk1.json') as f:
h1 = json.load(f)
print(h1)
with open('hk2.json') as f:
h2 = json.load(f)
print(h2)
with open('hk3.json') as f:
h3 = json.load(f)
print(h3)
h1mian = h1.pop()
h1licz = h1.pop()
h2mian = h2.pop()
h2licz = h2.pop()
h3mian = h3.pop()
h3licz = h3.pop()
h1m3 = h1mian.pop()
h1m2 = h1mian.pop()
h1m1 = h1mian.pop()
h1li3 = h1licz.pop()
h1li2 = h1licz.pop()
h1li1 = h1licz.pop()
h2m3 = h2mian.pop()
h2m2 = h2mian.pop()
h2m1 = h2mian.pop()
h2li3 = h2licz.pop()
h2li2 = h2licz.pop()
h2li1 = h2licz.pop()
h3m3 = h3mian.pop()
h3m2 = h3mian.pop()
h3m1 = h3mian.pop()
h3li3 = h3licz.pop()
h3li2 = h3licz.pop()
h3li1 = h3licz.pop()
##h1licz = np.array([h1li1, h1li2, h1li3])
##h1mian = np.array([h1m1, h1m2, h1m3])
##
##h2licz = np.array([h2li1, h2li2, h2li3])
##h2mian = np.array([h2m1, h2m2, h2m3])
##
##h3licz = np.array([h3li1, h3li2, h3li3])
##h3mian = np.array([h3m1, h3m2, h3m3])
h1licz = np.array([h1li3, h1li2, h1li1])
h1mian = np.array([h1m3, h1m2, h1m1])
h2licz = np.array([h2li3, h2li2, h2li1])
h2mian = np.array([h2m3, h2m2, h2m1])
h3licz = np.array([h3li3, h3li2, h3li1])
h3mian = np.array([h3m3, h3m2, h3m1])
def callback(in_data, frame_count, time_info, flag):
# using Numpy to convert to array for processing
# audio_data = np.fromstring(in_data, dtype=np.float32)
# return in_data, pyaudio.paContinue
#print(in_data)
in_data_after_conversion = np.frombuffer(in_data, dtype='<f4')
#print("AFTER CONVERISON")
# print("indata")
# print(in_data_after_conversion)
filtr_h1 = signal.lfilter(h1licz, h1mian, in_data_after_conversion, axis=0)
filtr_h2 = signal.lfilter(h2licz, h2mian, filtr_h1, axis=0)
data_filtered = signal.lfilter(h3licz, h3mian, filtr_h2, axis=0)
# print("filtered")
# print(data_filtered)
return | np.float32(data_filtered) | numpy.float32 |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 1 19:11:52 2017
@author: mariapanteli
"""
import pytest
import numpy as np
import scripts.map_and_average as map_and_average
def test_remove_inds():
labels = np.array(['a', 'a', 'b', 'unknown'])
features = np.array([[0, 1], [0,2], [0, 3], [0, 4]])
audiolabels = np.array(['a', 'b', 'c', 'd'])
features, labels, audiolabels = map_and_average.remove_inds(features, labels, audiolabels)
assert len(features) == 3 and len(labels) == 3 and len(audiolabels) == 3
def test_remove_inds():
labels = np.array(['a', 'a', 'b', 'unknown'])
features = np.array([[0, 1], [0,2], [0, 3], [0, 4]])
audiolabels = np.array(['a', 'b', 'c', 'd'])
features, labels, audiolabels = map_and_average.remove_inds(features, labels, audiolabels)
features_true = np.array([[0, 1], [0,2], [0, 3]])
assert np.array_equal(features, features_true)
def test_averageframes():
classlabels = np.array(['a', 'a', 'b', 'b', 'b'])
features = np.array([[0, 1], [0,2], [0, 1], [1, 1], [2, 1]])
audiolabels = np.array(['a', 'a', 'b', 'b', 'b'])
feat, audio, labels = map_and_average.averageframes(features, audiolabels, classlabels)
feat_true = np.array([[0, 1.5], [1, 1]])
assert np.array_equal(feat, feat_true)
def test_limit_to_n_seconds():
X = np.random.randn(10, 3)
Y = np.random.randn(10)
Yaudio = np.concatenate([ | np.repeat('a', 7) | numpy.repeat |
#calculate the extinction between two bands
#from Rieke & Lebofsky 1985 Table 3
import argparse
parser=argparse.ArgumentParser(
prog = 'CalcMIRExtinction',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''Calculate the MIR extinction and flux ratios between two wavelengths or bands based on Rieke & Lebofsky (1985).
Values are linearly interpolated between those in Table 3 (0.4-13 microns). The code will extrapolate out to 25 microns, but caution should be used!!!
Inputs will be rounded to 0.1 micron.
Band names must match Table 3 of Rieke & Lebofsky (1985).
Examples:
>>python CalcExtinction.py J 30 8.0
>>python CalcExtinction.py J 30 M
>>python CalcExtinction.py 8.0 2.0 J
>>python CalcExtinction.py 3.3 2.0 8.0
Required packages: numpy, scipy''',
epilog='''Author: <NAME> (<EMAIL>) - Last updated: 2020-11-20''')
parser.add_argument('ReferenceWavelength',type=str,help='Wavelength or band to use as the reference with known extinction (microns or band letter)')
parser.add_argument('ReferenceExtinction',type=float,help='Extinction (mag) in the reference band (A_ReferenceBand).')
parser.add_argument('TargetWavelength',type=str,help='Wavelength at which to calculate the extinction and flux ratio (microns or band letter).')
args=parser.parse_args()
import numpy as np
from scipy.interpolate import interp1d
#Rieke & Lebofsky 1985 Table 3
band = ['U','B','V','R','I','J','H','K','L','M','N','8.0','8.5','9.0','9.5','10.0','10.5','11.0','11.5','12.0','12.5','13.0'] #microns or band name
wl = np.array([0.365, 0.445, 0.551, 0.658, 0.806, 1.220, 1.630, 2.190, 3.450, 4.750, 10.50, 8., 8.5, 9., 9.5, 10., 10.5, 11., 11.5, 12., 12.5, 13.]) #microns
ElamV_EBV = np.array([1.64, 1.0, 0.0, -0.78, -1.60, -2.22, -2.55, -2.744, -2.91, -3.02, -2.93, -3.03, -2.96, -2.87, -2.83, -2.86, -2.87, -2.91, -2.95, -2.98, -3.00, -3.01])
Alam_Av = np.array([1.531, 1.324, 1.000, 0.748, 0.482, 0.282, 0.175, 0.112, 0.058, 0.023, 0.052, 0.020, 0.043, 0.074, 0.087, 0.083, 0.074, 0.060, 0.047, 0.037, 0.030, 0.027])
#sort the wavelengths and interpolate
idx_sort = | np.argsort(wl) | numpy.argsort |
"""
rotsesim.pecvel
This code runs a simulation that adds
random peculiar velocities to a simulated galaxy sample,
then finds H0 using a linear fit, attempting to mitigate
the effects of peculiar motion.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.odr import *
#- Define necessary functions for fitting and plotting
def fit_velocity(distance, h0):
"""
Linear fit of velocity vs. distance
"""
return h0 * distance
def fit_h0_disterrors(distance,disterr,velocity):
"""
Use ODR to fit h0 using distance errors
"""
model = Model(fit_velocity)
data = RealData(distance,velocity,sx=disterr)
odr = ODR(data,model,beta0=[0.])
out = odr.run()
fith0 = out.beta[0]
errh0 = out.sd_beta[0]
chi2dof = out.res_var
return fith0,errh0,chi2dof
def fit_h0_dist_vel_errors(distance,disterr,velocity,velerr,beta=None):
"""
Use ODR to fit h0 using errors in distance and velocity
"""
if beta:
beta0 = beta
else:
beta0 = 0.
model = Model(fit_velocity)
data = RealData(distance,velocity,sx=disterr,sy=velerr)
odr = ODR(data,model,beta0=[beta0])
out = odr.run()
fith0 = out.beta[0]
errh0 = out.sd_beta[0]
chi2dof = out.res_var
yerr = out.eps
return fith0,errh0,chi2dof,yerr
def plot_h0_histogram(h0values,h0mean,h0std):
"""
Plot histogram fit h0 values
"""
nbins = np.int(np.max(h0values) - np.min(h0values))
plt.xlabel('H0',fontsize=20)
plt.ylabel('counts per bin',fontsize=20)
plt.title('H0 distribution (H0 mock: avg = {:0.4f}, std = {:0.4f}'.format(h0mean,h0std),fontsize=20)
plt.hist(h0values,bins=nbins)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.show()
def plot_h0errors_histogram(h0errors,h0errors_mean,h0errors_std):
"""
Plot histogram fit h0 errors
"""
nbins = np.int(np.max(h0errors) - np.min(h0errors))
plt.xlabel('H0 errors',fontsize=20)
plt.ylabel('counts per bin',fontsize=20)
plt.title('H0 error distribution (H0 error: avg = {:0.4f}, std = {:0.4f}'.format(h0errors_mean,h0errors_std),fontsize=20)
plt.hist(h0errors,bins=nbins)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.show()
def plot_h0_mean_iterations(h0mean):
"""
Plot progression of h0 mean after iterating
"""
plt.title('H0 average progression, final H0 = {:0.4f}'.format(h0mean[-1]),fontsize=20)
plt.xlabel('iteration',fontsize=20)
plt.ylabel('H0 mean',fontsize=20)
iteration = np.arange(len(h0mean)) + 1
plt.plot(iteration,h0mean)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.show()
def plot_h0_std_iterations(h0std):
"""
Plot progression of h0 rms after iterating
"""
plt.title('H0 rms progression, final H0 rms = {:0.4f}'.format(h0std[-1]),fontsize=20)
plt.xlabel('iteration',fontsize=20)
plt.ylabel('H0 rms',fontsize=20)
iteration = np.arange(len(h0std)) + 1
plt.plot(iteration,h0std)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.show()
def pecvel_sim(h0,distance,disterr,pecvel,ngal,nsim,iterations,plothist,plotprog,seed):
if seed is not None:
np.random.seed(seed)
seeds = np.random.randint(2**30, size=nsim)
#- Generate ngal mock galaxies for nsim simulations
dist_all = []
disterr_all = []
vel_all = []
velerr_all = []
fith0_all = []
h0err = []
totchi2 = []
for sim in range(nsim):
#- Simulate distances shifted by random amount based on measurement error
if seed:
np.random.seed(seeds[sim])
dist = np.random.uniform(distance[0],distance[1],ngal)
if seed:
np.random.seed(seeds[sim])
derr = np.random.normal(disterr[0],disterr[1],ngal)
disterror = dist*derr
vel = h0*dist
if seed:
np.random.seed(seeds[sim])
dist = dist + np.random.uniform(-np.abs(disterror),np.abs(disterror),ngal)
dist_all.append(dist)
disterr_all.append(disterror)
#- Simulate galaxy velocities including peculiar velocities
if seed:
np.random.seed(seeds[sim])
vpec = np.random.uniform(-pecvel,pecvel,ngal)
vel = vel + vpec
vel_all.append(vel)
#- Fit h0
fith0, errh0, chi2 = fit_h0_disterrors(dist,disterror,vel)
fith0_all.append(fith0)
h0err.append(errh0)
totchi2.append(chi2)
#- Save difference in velocities from fit as errors
fitvel = fith0 * dist
verr = fitvel - vel
velerr_all.append(verr)
h0mean = np.mean(fith0_all)
h0std = np.std(fith0_all)
h0errmean = | np.mean(h0err) | numpy.mean |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Part of the PsychoPy library
# Copyright (C) 2002-2018 <NAME> (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
"""Tools related to working with various color spaces.
The routines provided in the module are used to transform color coordinates
between spaces. Most of the functions here are *vectorized*, allowing for array
inputs to convert multiple color values at once.
**As of version 2021.0 of PsychoPy**, users ought to use the
:class:`~psychopy.colors.Color` class for working with color coordinate values.
"""
from __future__ import absolute_import, division, print_function
__all__ = ['srgbTF', 'rec709TF', 'cielab2rgb', 'cielch2rgb', 'dkl2rgb',
'dklCart2rgb', 'rgb2dklCart', 'hsv2rgb', 'rgb2lms', 'lms2rgb',
'rgb2hsv', 'rescaleColor']
from past.utils import old_div
import numpy
from psychopy import logging
from psychopy.tools.coordinatetools import sph2cart
def unpackColors(colors): # used internally, not exported by __all__
"""Reshape an array of color values to Nx3 format.
Many color conversion routines operate on color data in Nx3 format, where
rows are color space coordinates. 1x3 and NxNx3 input are converted to Nx3
format. The original shape and dimensions are also returned, allowing the
color values to be returned to their original format using 'reshape'.
Parameters
----------
colors : ndarray, list or tuple of floats
Nx3 or NxNx3 array of colors, last dim must be size == 3 specifying each
color coordinate.
Returns
-------
tuple
Nx3 ndarray of converted colors, original shape, original dims.
"""
# handle the various data types and shapes we might get as input
colors = numpy.asarray(colors, dtype=float)
orig_shape = colors.shape
orig_dim = colors.ndim
if orig_dim == 1 and orig_shape[0] == 3:
colors = numpy.array(colors, ndmin=2)
elif orig_dim == 2 and orig_shape[1] == 3:
pass # NOP, already in correct format
elif orig_dim == 3 and orig_shape[2] == 3:
colors = numpy.reshape(colors, (-1, 3))
else:
raise ValueError(
"Invalid input dimensions or shape for input colors.")
return colors, orig_shape, orig_dim
def rescaleColor(rgb, convertTo='signed', clip=False):
"""Rescale RGB colors.
This function can be used to convert RGB value triplets from the PsychoPy
signed color format to the normalized OpenGL format.
PsychoPy represents colors using values between -1 and 1. However, colors
are commonly represented using values between 0 and 1 when working with
OpenGL and various other contexts. This function simply rescales values to
switch between these formats.
Parameters
----------
rgb : `array_like`
1-, 2-, 3-D vector of RGB coordinates to convert. The last dimension
should be length-3 in all cases, specifying a single coordinate.
convertTo : `str`
If 'signed', this function will assume `rgb` is in OpenGL format [0:1]
and rescale them to PsychoPy's format [-1:1]. If 'unsigned', input
values are treated as OpenGL format and will be rescaled to use
PsychoPy's. Default is 'signed'.
clip : bool
Clip values to the range that can be represented on a display. This is
an optional step. Default is `False`.
Returns
-------
ndarray
Rescaled values with the same shape as `rgb`.
Notes
-----
The `convertTo` argument also accepts strings 'opengl' and 'psychopy'
as substitutes for 'signed' and 'unsigned', respectively. This might be more
explicit in some contexts.
Examples
--------
Convert a signed RGB value to unsigned format::
rgb_signed = [-1, 0, 1]
rgb_unsigned = rescaleColor(rgb_signed, convertTo='unsigned')
"""
# While pretty simple, this operation is done often enough to justify having
# its own function to avoid writing it out all the time. It also explicitly
# shows the direction of which values are being rescaled to make code more
# readable.
if convertTo == 'signed' or convertTo == 'psychopy':
rgb_out = rgb * 2 - 1 # from OpenGL to PsychoPy format
elif convertTo == 'unsigned' or convertTo == 'opengl':
rgb_out = (rgb + 1) / 2. # from PsychoPy to OpenGL
else:
raise ValueError("Invalid value for `convertTo`, can either be "
"'signed' or 'unsigned'.")
if clip:
rgb_out = numpy.clip(rgb_out, -1 if convertTo == 'signed' else 0, 1)
return rgb_out
def srgbTF(rgb, reverse=False, **kwargs):
"""Apply sRGB transfer function (or gamma) to linear RGB values.
Input values must have been transformed using a conversion matrix derived
from sRGB primaries relative to D65.
Parameters
----------
rgb : tuple, list or ndarray of floats
Nx3 or NxNx3 array of linear RGB values, last dim must be size == 3
specifying RBG values.
reverse : boolean
If True, the reverse transfer function will convert sRGB -> linear RGB.
Returns
-------
ndarray
Array of transformed colors with same shape as input.
"""
rgb, orig_shape, orig_dim = unpackColors(rgb)
# apply the sRGB TF
if not reverse:
# applies the sRGB transfer function (linear RGB -> sRGB)
to_return = numpy.where(
rgb <= 0.0031308,
rgb * 12.92,
(1.0 + 0.055) * rgb ** (1.0 / 2.4) - 0.055)
else:
# do the inverse (sRGB -> linear RGB)
to_return = numpy.where(
rgb <= 0.04045,
rgb / 12.92,
((rgb + 0.055) / 1.055) ** 2.4)
if orig_dim == 1:
to_return = to_return[0]
elif orig_dim == 3:
to_return = numpy.reshape(to_return, orig_shape)
return to_return
def rec709TF(rgb, **kwargs):
"""Apply the Rec 709 transfer function (or gamma) to linear RGB values.
This transfer function is defined in the ITU-R BT.709 (2015) recommendation
document (http://www.itu.int/rec/R-REC-BT.709-6-201506-I/en) and is
commonly used with HDTV televisions.
Parameters
----------
rgb : tuple, list or ndarray of floats
Nx3 or NxNx3 array of linear RGB values, last dim must be size == 3
specifying RBG values.
Returns
-------
ndarray
Array of transformed colors with same shape as input.
"""
rgb, orig_shape, orig_dim = unpackColors(rgb)
# applies the Rec.709 transfer function (linear RGB -> Rec.709 RGB)
# mdc - I didn't compute the inverse for this one.
to_return = numpy.where(rgb >= 0.018,
1.099 * rgb ** 0.45 - 0.099,
4.5 * rgb)
if orig_dim == 1:
to_return = to_return[0]
elif orig_dim == 3:
to_return = numpy.reshape(to_return, orig_shape)
return to_return
def cielab2rgb(lab,
whiteXYZ=None,
conversionMatrix=None,
transferFunc=None,
clip=False,
**kwargs):
"""Transform CIE L*a*b* (1976) color space coordinates to RGB tristimulus
values.
CIE L*a*b* are first transformed into CIE XYZ (1931) color space, then the
RGB conversion is applied. By default, the sRGB conversion matrix is used
with a reference D65 white point. You may specify your own RGB conversion
matrix and white point (in CIE XYZ) appropriate for your display.
Parameters
----------
lab : tuple, list or ndarray
1-, 2-, 3-D vector of CIE L*a*b* coordinates to convert. The last
dimension should be length-3 in all cases specifying a single
coordinate.
whiteXYZ : tuple, list or ndarray
1-D vector coordinate of the white point in CIE-XYZ color space. Must be
the same white point needed by the conversion matrix. The default
white point is D65 if None is specified, defined as X, Y, Z = 0.9505,
1.0000, 1.0890.
conversionMatrix : tuple, list or ndarray
3x3 conversion matrix to transform CIE-XYZ to RGB values. The default
matrix is sRGB with a D65 white point if None is specified. Note that
values must be gamma corrected to appear correctly according to the sRGB
standard.
transferFunc : pyfunc or None
Signature of the transfer function to use. If None, values are kept as
linear RGB (it's assumed your display is gamma corrected via the
hardware CLUT). The TF must be appropriate for the conversion matrix
supplied (default is sRGB). Additional arguments to 'transferFunc' can
be passed by specifying them as keyword arguments. Gamma functions that
come with PsychoPy are 'srgbTF' and 'rec709TF', see their docs for more
information.
clip : bool
Make all output values representable by the display. However, colors
outside of the display's gamut may not be valid!
Returns
-------
ndarray
Array of RGB tristimulus values.
Example
-------
Converting a CIE L*a*b* color to linear RGB::
import psychopy.tools.colorspacetools as cst
cielabColor = (53.0, -20.0, 0.0) # greenish color (L*, a*, b*)
rgbColor = cst.cielab2rgb(cielabColor)
Using a transfer function to convert to sRGB::
rgbColor = cst.cielab2rgb(cielabColor, transferFunc=cst.srgbTF)
"""
lab, orig_shape, orig_dim = unpackColors(lab)
if conversionMatrix is None:
# XYZ -> sRGB conversion matrix, assumes D65 white point
# mdc - computed using makeXYZ2RGB with sRGB primaries
conversionMatrix = numpy.asmatrix([
[3.24096994, -1.53738318, -0.49861076],
[-0.96924364, 1.8759675, 0.04155506],
[0.05563008, -0.20397696, 1.05697151]
])
if whiteXYZ is None:
# D65 white point in CIE-XYZ color space
# See: https://en.wikipedia.org/wiki/SRGB
whiteXYZ = numpy.asarray([0.9505, 1.0000, 1.0890])
L = lab[:, 0] # lightness
a = lab[:, 1] # green (-) <-> red (+)
b = lab[:, 2] # blue (-) <-> yellow (+)
wht_x, wht_y, wht_z = whiteXYZ # white point in CIE-XYZ color space
# convert Lab to CIE-XYZ color space
# uses reverse transformation found here:
# https://en.wikipedia.org/wiki/Lab_color_space
xyz_array = numpy.zeros(lab.shape)
s = (L + 16.0) / 116.0
xyz_array[:, 0] = s + (a / 500.0)
xyz_array[:, 1] = s
xyz_array[:, 2] = s - (b / 200.0)
# evaluate the inverse f-function
delta = 6.0 / 29.0
xyz_array = numpy.where(xyz_array > delta,
xyz_array ** 3.0,
(xyz_array - (4.0 / 29.0)) * (3.0 * delta ** 2.0))
# multiply in white values
xyz_array[:, 0] *= wht_x
xyz_array[:, 1] *= wht_y
xyz_array[:, 2] *= wht_z
# convert to sRGB using the specified conversion matrix
rgb_out = numpy.asarray(numpy.dot(xyz_array, conversionMatrix.T))
# apply sRGB gamma correction if requested
if transferFunc is not None:
rgb_out = transferFunc(rgb_out, **kwargs)
# clip unrepresentable colors if requested
if clip:
rgb_out = numpy.clip(rgb_out, 0.0, 1.0)
# make the output match the dimensions/shape of input
if orig_dim == 1:
rgb_out = rgb_out[0]
elif orig_dim == 3:
rgb_out = numpy.reshape(rgb_out, orig_shape)
return rescaleColor(rgb_out, convertTo='psychopy')
def cielch2rgb(lch,
whiteXYZ=None,
conversionMatrix=None,
transferFunc=None,
clip=False,
**kwargs):
"""Transform CIE L*C*h* coordinates to RGB tristimulus values.
Parameters
----------
lch : tuple, list or ndarray
1-, 2-, 3-D vector of CIE L*C*h* coordinates to convert. The last
dimension should be length-3 in all cases specifying a single
coordinate. The hue angle *h is expected in degrees.
whiteXYZ : tuple, list or ndarray
1-D vector coordinate of the white point in CIE-XYZ color space. Must be
the same white point needed by the conversion matrix. The default
white point is D65 if None is specified, defined as X, Y, Z = 0.9505,
1.0000, 1.0890
conversionMatrix : tuple, list or ndarray
3x3 conversion matrix to transform CIE-XYZ to RGB values. The default
matrix is sRGB with a D65 white point if None is specified. Note that
values must be gamma corrected to appear correctly according to the sRGB
standard.
transferFunc : pyfunc or None
Signature of the transfer function to use. If None, values are kept as
linear RGB (it's assumed your display is gamma corrected via the
hardware CLUT). The TF must be appropriate for the conversion matrix
supplied. Additional arguments to 'transferFunc' can be passed by
specifying them as keyword arguments. Gamma functions that come with
PsychoPy are 'srgbTF' and 'rec709TF', see their docs for more
information.
clip : boolean
Make all output values representable by the display. However, colors
outside of the display's gamut may not be valid!
Returns
-------
ndarray
array of RGB tristimulus values
"""
lch, orig_shape, orig_dim = unpackColors(lch)
# convert values to L*a*b*
lab = numpy.empty(lch.shape, dtype=lch.dtype)
lab[:, 0] = lch[:, 0]
lab[:, 1] = lch[:, 1] * numpy.math.cos(numpy.math.radians(lch[:, 2]))
lab[:, 2] = lch[:, 1] * numpy.math.sin( | numpy.math.radians(lch[:, 2]) | numpy.math.radians |
import pyllusion
import numpy as np
def test_delbeouf():
delboeuf1 = pyllusion.Delboeuf(illusion_strength=1, difference=-2, size_min=0.25)
out1 = delboeuf1.to_image()
parameters1 = delboeuf1.get_parameters()
assert list(parameters1) == ['Difference', 'Size_Inner_Left', 'Size_Inner_Right',
'Size_Inner_Difference', 'Illusion', 'Illusion_Strength',
'Illusion_Type', 'Size_Outer_Left', 'Size_Outer_Right',
'Distance', 'Distance_Reference', 'Distance_Edges_Inner',
'Distance_Edges_Outer', 'Size_Inner_Smaller',
'Size_Inner_Larger', 'Size_Outer_Smaller',
'Size_Outer_Larger', 'Position_Left', 'Position_Right']
assert parameters1['Difference'] == -2
assert parameters1['Illusion_Strength'] == 1
assert parameters1['Illusion'] == 'Delboeuf'
assert out1.size == (800, 600)
delboeuf2 = pyllusion.Delboeuf(illusion_strength=1, difference=-2, size_min=0.5)
out2 = delboeuf2.to_image()
parameters2 = delboeuf2.get_parameters()
assert parameters1['Size_Inner_Smaller'] < parameters2['Size_Inner_Smaller']
assert np.mean(np.array(out1)) > np.mean(np.array(out2))
out3 = delboeuf2.to_image(width=900, height=900)
assert out3.size == (900, 900)
def test_ebbinghaus():
ebbinghaus1 = pyllusion.Ebbinghaus(illusion_strength=2, difference=3, size_min=0.25)
out1 = ebbinghaus1.to_image()
parameters1 = ebbinghaus1.get_parameters()
assert list(parameters1) == ['Difference', 'Size_Inner_Left', 'Size_Inner_Right',
'Size_Inner_Difference', 'Illusion', 'Illusion_Strength',
'Illusion_Type', 'Size_Outer_Left', 'Size_Outer_Right',
'Distance', 'Distance_Reference', 'Distance_Edges_Inner',
'Distance_Edges_Outer',
'Size_Inner_Smaller', 'Size_Inner_Larger', 'Size_Outer_Smaller',
'Size_Outer_Larger', 'Position_Outer_x_Left',
'Position_Outer_y_Left', 'Position_Outer_x_Right',
'Position_Outer_y_Right', 'Position_Left', 'Position_Right']
assert parameters1['Difference'] == 3
assert parameters1['Illusion_Strength'] == 2
assert parameters1['Illusion'] == 'Ebbinghaus'
assert out1.size == (800, 600)
ebbinghaus2 = pyllusion.Ebbinghaus(illusion_strength=2, difference=3, size_min=0.5)
out2 = ebbinghaus2.to_image()
parameters2 = ebbinghaus2.get_parameters()
assert parameters1['Size_Inner_Smaller'] < parameters2['Size_Inner_Smaller']
assert np.mean(np.array(out1)) > np.mean(np.array(out2))
out3 = ebbinghaus2.to_image(width=900, height=900)
assert out3.size == (900, 900)
def test_mullerlyer():
mullerlyer1 = pyllusion.MullerLyer(illusion_strength=30, difference=0.3, size_min=0.5)
out1 = mullerlyer1.to_image()
parameters1 = mullerlyer1.get_parameters()
assert list(parameters1) == ['Difference', 'Distance', 'Bottom_x1',
'Bottom_y1', 'Bottom_x2', 'Bottom_y2',
'Top_x1', 'Top_y1','Top_x2', 'Top_y2', 'Size_Bottom',
'Size_Top', 'Size_Larger', 'Size_Smaller', 'Distractor_TopLeft1_x1',
'Distractor_TopLeft1_y1', 'Distractor_TopLeft1_x2',
'Distractor_TopLeft1_y2',
'Distractor_TopLeft2_x1', 'Distractor_TopLeft2_y1',
'Distractor_TopLeft2_x2', 'Distractor_TopLeft2_y2',
'Distractor_TopRight1_x1', 'Distractor_TopRight1_y1',
'Distractor_TopRight1_x2', 'Distractor_TopRight1_y2',
'Distractor_TopRight2_x1', 'Distractor_TopRight2_y1',
'Distractor_TopRight2_x2', 'Distractor_TopRight2_y2',
'Distractor_BottomLeft1_x1', 'Distractor_BottomLeft1_y1',
'Distractor_BottomLeft1_x2', 'Distractor_BottomLeft1_y2',
'Distractor_BottomLeft2_x1', 'Distractor_BottomLeft2_y1',
'Distractor_BottomLeft2_x2', 'Distractor_BottomLeft2_y2',
'Distractor_BottomRight1_x1', 'Distractor_BottomRight1_y1',
'Distractor_BottomRight1_x2', 'Distractor_BottomRight1_y2',
'Distractor_BottomRight2_x1', 'Distractor_BottomRight2_y1',
'Distractor_BottomRight2_x2', 'Distractor_BottomRight2_y2',
'Illusion', 'Illusion_Strength', 'Illusion_Type', 'Distractor_Length']
assert parameters1['Difference'] == 0.3
assert parameters1['Illusion_Strength'] == 30
assert parameters1['Illusion'] == 'MullerLyer'
assert out1.size == (800, 600)
mullerlyer2 = pyllusion.MullerLyer(illusion_strength=30, difference=0.3, size_min=0.8)
out2 = mullerlyer2.to_image()
parameters2 = mullerlyer2.get_parameters()
assert parameters1['Size_Smaller'] < parameters2['Size_Smaller']
assert np.mean(np.array(out1)) > np.mean(np.array(out2))
out3 = mullerlyer2.to_image(width=900, height=900)
assert out3.size == (900, 900)
def test_ponzo():
ponzo1 = pyllusion.Ponzo(illusion_strength=20, difference=0.2, size_min=0.5)
out1 = ponzo1.to_image()
parameters1 = ponzo1.get_parameters()
assert list(parameters1) == ['Difference', 'Distance', 'Bottom_x1', 'Bottom_y1',
'Bottom_x2', 'Bottom_y2', 'Top_x1', 'Top_y1',
'Top_x2', 'Top_y2', 'Size_Bottom', 'Size_Top',
'Size_Larger', 'Size_Smaller', 'Illusion_Strength',
'Illusion_Type', 'Side_Angle', 'Side_Length',
'Left_x1', 'Left_y1', 'Left_x2', 'Left_y2',
'Right_x1', 'Right_y1', 'Right_x2', 'Right_y2', 'Illusion']
assert parameters1['Difference'] == 0.2
assert parameters1['Illusion_Strength'] == 20
assert parameters1['Illusion'] == 'Ponzo'
assert out1.size == (800, 600)
ponzo2 = pyllusion.Ponzo(illusion_strength=20, difference=0.2, size_min=0.8)
out2 = ponzo2.to_image()
parameters2 = ponzo2.get_parameters()
assert parameters1['Size_Bottom'] < parameters2['Size_Bottom']
assert np.mean(np.array(out1)) > np.mean(np.array(out2))
out3 = ponzo2.to_image(width=900, height=900)
assert out3.size == (900, 900)
def test_zollner():
zollner1 = pyllusion.Zollner(illusion_strength=60, difference=0.5, distractors_n=5)
out1 = zollner1.to_image()
parameters1 = zollner1.get_parameters()
assert list(parameters1) == ['Illusion', 'Illusion_Strength', 'Difference',
'Illusion_Type', 'Top_x1', 'Top_y1', 'Top_x2',
'Top_y2', 'Bottom_x1', 'Bottom_y1', 'Bottom_x2',
'Bottom_y2', 'Distractors_n', 'Distractors_Size', 'Distractors_Top_x1',
'Distractors_Top_y1', 'Distractors_Top_x2',
'Distractors_Top_y2', 'Distractors_Bottom_x1',
'Distractors_Bottom_y1', 'Distractors_Bottom_x2',
'Distractors_Bottom_y2', 'Distractors_Angle']
assert parameters1['Difference'] == 0.5
assert parameters1['Illusion_Strength'] == 60
assert parameters1['Illusion'] == 'Zollner'
assert out1.size == (800, 600)
zollner2 = pyllusion.Zollner(illusion_strength=60, difference=0.5, distractors_n=8)
out2 = zollner2.to_image()
parameters2 = zollner2.get_parameters()
assert parameters1['Distractors_n'] < parameters2['Distractors_n']
assert np.mean(np.array(out1)) > np.mean(np.array(out2))
out3 = zollner2.to_image(width=900, height=900)
assert out3.size == (900, 900)
def test_rodframe():
rodframe1 = pyllusion.RodFrame(illusion_strength=20, difference=10)
out1 = rodframe1.to_image()
parameters1 = rodframe1.get_parameters()
assert list(parameters1) == ['Illusion', 'Frame_Angle', 'Rod_Angle',
'Angle_Difference', 'Difference',
'Illusion_Strength', 'Illusion_Type']
assert parameters1['Difference'] == 10
assert parameters1['Illusion_Strength'] == 20
assert parameters1['Illusion'] == 'RodFrame'
assert out1.size == (800, 600)
rodframe2 = pyllusion.RodFrame(illusion_strength=20, difference=10)
out2 = rodframe2.to_image(outline=30)
parameters2 = rodframe2.get_parameters()
assert np.mean(np.array(out1)) > np.mean(np.array(out2))
out3 = rodframe2.to_image(width=900, height=900)
assert out3.size == (900, 900)
def test_verticalhorizontal():
verticalhorizontal1 = pyllusion.VerticalHorizontal(illusion_strength=90, difference=0)
out1 = verticalhorizontal1.to_image()
parameters1 = verticalhorizontal1.get_parameters()
assert list(parameters1) == ['Illusion', 'Size_Left', 'Size_Right',
'Size_Larger', 'Size_Smaller',
'Difference', 'Illusion_Strength', 'Illusion_Type',
'Left_x1', 'Left_y1', 'Left_x2', 'Left_y2',
'Left_Angle', 'Right_x1', 'Right_y1',
'Right_x2', 'Right_y2', 'Right_Angle']
assert parameters1['Difference'] == 0
assert parameters1['Illusion_Strength'] == 90
assert parameters1['Illusion'] == 'VerticalHorizontal'
assert out1.size == (800, 600)
verticalhorizontal2 = pyllusion.VerticalHorizontal(illusion_strength=90, difference=0.5)
out2 = verticalhorizontal2.to_image()
parameters2 = verticalhorizontal2.get_parameters()
assert parameters1['Difference'] < parameters2['Difference']
assert np.mean(np.array(out1)) > np.mean( | np.array(out2) | numpy.array |
import numpy as np
import tensorflow.keras.backend as K
from tensorflow.keras.layers import (Activation, Concatenate, Conv2D, Flatten,
Input, InputSpec, Layer, Reshape)
from tensorflow.keras.models import Model
from nets.vgg import VGG16
class Normalize(Layer):
def __init__(self, scale, **kwargs):
self.axis = 3
self.scale = scale
super(Normalize, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
init_gamma = self.scale * | np.ones(shape) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 19 13:16:25 2015
@author: hbanks
Brevity required, prudence preferred
"""
import os
import io
import glob
import errno
import copy
import json
import time
import warnings
import numpy as np
from scipy.optimize import curve_fit
import scipy.interpolate as spi
import scipy.optimize as spo
import scipy.integrate as intgt
import scipy.fftpack as fft
import scipy.special as spl
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
import itertools as itt
import multiprocessing as mp
import sys
sys.path.append('/Users/marketing/Desktop/HSG-turbo/')
import hsganalysis.QWPProcessing as qwp
from hsganalysis.QWPProcessing.extractMatrices import makeT,saveT
np.set_printoptions(linewidth=500)
# One of the main results is the HighSidebandCCD.sb_results array. These are the
# various mappings between index and real value
# I deally, this code should be converted to pandas to avoid this issue,
# but that's outside the scope of current work.
# [sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
# [ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
class sbarr(object):
SBNUM = 0
CENFREQ = 1
CENFREQERR = 2
AREA = 3
AREAERR = 4
WIDTH = 5
WIDTHERR = 6
####################
# Objects
####################
class CCD(object):
def __init__(self, fname, spectrometer_offset=None):
"""
This will read the appropriate file and make a basic CCD object. Fancier
things will be handled with the sub classes.
Creates:
self.parameters = Dictionary holding all of the information from the
data file, which comes from the JSON encoded header in the data
file
self.description = string that is the text box from data taking GUI
self.raw_data = raw data output by measurement software, wavelength vs.
data, errors. There may be text for some of the entries
corresponding to text used for Origin imports, but they
should appear as np.nan
self.ccd_data = semi-processed 1600 x 3 array of photon energy vs. data with standard error of mean at that pixel
calculated by taking multiple images. Standard error is calculated from
the data collection software
Most subclasses should make a self.proc_data, which will do whatever
processing is required to the ccd_data, such as normalizing, taking ratios,
etc.
:param fname: file name where the data is saved
:type fname: str
:param spectrometer_offset: if the spectrometer won't go where it's told, use this to correct the wavelengths (nm)
:type spectrometer_offset: float
"""
self.fname = fname
# Checking restrictions from Windows path length limits. Check if you can
# open the file:
try:
with open(fname) as f: pass
except FileNotFoundError:
# Couldn't find the file. Could be you passed the wrong one, but I'm
# finding with a large number of subfolders for polarimetry stuff,
# you end up exceeding Windows' filelength limit.
# Haven't tested on Mac or UNC moutned drives (e.g \\128.x.x.x\Sherwin\)
fname = r"\\?\\" + os.path.abspath(fname)
# Read in the JSON-formatted parameter string.
# The lines are all prepended by '#' for easy numpy importing
# so loop over all those lines
with open(fname, 'r') as f:
param_str = ''
line = f.readline()
while line[0] == '#':
### changed 09/17/18
# This line assumed there was a single '#'
# param_str += line[1:]
# while this one handles everal (because I found old files
# which had '## <text>...'
param_str += line.replace("#", "")
line = f.readline()
# Parse the JSON string
try:
self.parameters = json.loads(param_str)
except json.JSONDecodeError:
# error from _really_ old data where comments were dumped after a
# single-line json dumps
self.parameters=json.loads(param_str.splitlines()[0])
# Spec[trometer] steps are set to define the same physical data, but taken at
# different spectrometer center wavelengths. This value is used later
# for stitching these scans together
try:
self.parameters["spec_step"] = int(self.parameters["spec_step"])
except (ValueError, KeyError):
# If there isn't a spe
self.parameters["spec_step"] = 0
# Slice through 3 to get rid of comments/origin info.
# Would likely be better to check np.isnan() and slicing out those nans.
# I used flipup so that the x-axis is an increasing function of frequency
self.raw_data = np.flipud(np.genfromtxt(fname, comments='#', delimiter=',')[3:])
# The camera chip is 1600 pixels wide. This line was redudent with the [3:]
# slice above and served to make sure there weren't extra stray bad lines
# hanging around.
#
# This should also be updated some day to compensate for any horizontal bining
# on the chip, or masking out points that are bad (cosmic ray making it
# through processing, room lights or monitor lines interfering with signal)
self.ccd_data = np.array(self.raw_data[:1600, :])
# Check to see if the spectrometer offset is set. This isn't specified
# during data collection. This is a value that can be appended
# when processing if it's realized the data is offset.
# This allows the offset to be specified and kept with the data file itself,
# instead of trying to do it in individual processing scripts
#
# It's allowed as a kwarg parameter in this script for trying to determine
# what the correct offset should be
if spectrometer_offset is not None or "offset" in self.parameters:
try:
self.ccd_data[:, 0] += float(self.parameters["offset"])
except:
self.ccd_data[:, 0] += spectrometer_offset
# Convert from nm to eV
# self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
self.ccd_data[:, 0] = photon_converter["nm"]["eV"](self.ccd_data[:, 0])
class Photoluminescence(CCD):
def __init__(self, fname):
"""
This object handles PL-type data. The only distinction from the parent class
is that the CCD data gets normalized to the exposure time to make different
exposures directly comparable.
creates:
self.proc_data = self.ccd_data divided by the exposure time
units: PL counts / second
:param fname: name of the file
:type fname: str
"""
super(Photoluminescence, self).__init__(fname)
# Create a copy of the array , and then normalize the signal and the errors
# by the exposure time
self.proc_data = np.array(self.ccd_data)
self.proc_data[:, 1] = self.proc_data[:, 1] / self.parameters['exposure']
self.proc_data[:, 2] = self.proc_data[:, 2] / self.parameters['exposure']
class Absorbance(CCD):
def __init__(self, fname):
"""
There are several ways Absorbance data can be loaded
You could try to load the abs data output from data collection directly,
which has the wavelength, raw, blank and actual absorbance data itself.
This is best way to do it.
Alternatively, you could want to load the raw transmission/reference
data, ignoring (or maybe not even having) the abs calculated
from the data collection software. If you want to do it this way,
you should pass fname as a list where the first element is the
file name for the reference data, and the second is the absorbance data
At first, it didn't really seem to make sense to let you pass just the
raw reference or raw abs data,
Creates:
self.ref_data = np array of the reference,
freq (eV) vs. reference (counts)
self.raw_data = np.array of the raw absorption spectrum,
freq (eV) vs. reference (counts)
self.proc_data = np.array of the absorption spectrum
freq (eV) vs. "absorbance" (dB)
Note, the error bars for this data haven't been defined.
:param fname: either an absorbance filename, or a length 2 list of filenames
:type fname: str
:return: None
"""
if "abs_" in fname:
super(Absorbance, self).__init__(fname)
# Separate into the separate data sets
# The raw counts of the reference data
self.ref_data = np.array(self.ccd_data[:, [0, 1]])
# Raw counts of the sample
self.raw_data = np.array(self.ccd_data[:, [0, 2]])
# The calculated absorbance data (-10*log10(raw/ref))
self.proc_data = np.array(self.ccd_data[:, [0, 3]]) # Already in dB's
else:
# Should be here if you pass the reference/trans filenames
try:
super(Absorbance, self).__init__(fname[0])
self.ref_data = np.array(self.ccd_data)
super(Absorbance, self).__init__(fname[1])
self.raw_data = np.array(self.ccd_data)
except ValueError:
# ValueError gets thrown when importing older data
# which had more headers than data columns. Enforce
# only loading first two columns to avoid numpy trying
# to parse all of the data
# See CCD.__init__ for what's going on.
self.ref_data = np.flipud(np.genfromtxt(fname[0], comments='#',
delimiter=',', usecols=(0, 1)))
self.ref_data = np.array(self.ref_data[:1600, :])
self.ref_data[:, 0] = 1239.84 / self.ref_data[:, 0]
self.raw_data = np.flipud(np.genfromtxt(fname[1], comments='#',
delimiter=',', usecols=(0, 1)))
self.raw_data = np.array(self.raw_data[:1600, :])
self.raw_data[:, 0] = 1239.84 / self.raw_data[:, 0]
except Exception as e:
print("Exception opening absorbance data,", e)
# Calculate the absorbance from the raw camera counts.
self.proc_data = np.empty_like(self.ref_data)
self.proc_data[:, 0] = self.ref_data[:, 0]
self.proc_data[:, 1] = -10*np.log10(self.raw_data[:, 1] / self.ref_data[:,
1])
def abs_per_QW(self, qw_number):
"""
:param qw_number: number of quantum wells in the sample.
:type qw_number: int
:return: None
"""
"""
This method turns the absorption to the absorbance per quantum well. Is
that how this data should be reported?
Also, I'm not sure if columns 1 and 2 are correct.
"""
temp_abs = -np.log(self.proc_data[:, 1] / self.proc_data[:, 2]) / qw_number
self.proc_data = np.hstack((self.proc_data, temp_abs))
def fft_smooth(self, cutoff, inspectPlots=False):
"""
This function removes the Fabry-Perot that affects the absorption data
creates:
self.clean = np.array of the Fourier-filtered absorption data, freq (eV) vs. absorbance (dB!)
self.parameters['fourier cutoff'] = the low pass cutoff frequency, in eV**(-1)
:param cutoff: Fourier frequency of the cut off for the low pass filter
:type cutoff: int or float
:param inspectPlots: Do you want to see the results?
:type inspectPlots: bool
:return: None
"""
# self.fixed = -np.log10(abs(self.raw_data[:, 1]) / abs(self.ref_data[:, 1]))
# self.fixed = np.nan_to_num(self.proc_data[:, 1])
# self.fixed = np.column_stack((self.raw_data[:, 0], self.fixed))
self.parameters['fourier cutoff'] = cutoff
self.clean = low_pass_filter(self.proc_data[:, 0], self.proc_data[:, 1], cutoff, inspectPlots)
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This bad boy saves the absorption spectrum that has been manipulated.
Saves 100 lines of comments.
:param file_name: The base name of the file to be saved
:type file_name: str
:param folder_str: The name of the folder where the file will be saved
:type folder_str: str
:param marker: A further label that might be the series tag or something
:type marker: str
:param index: If multiple files are being saved with the same name, include an integer to append to the end of the file
:type index: int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
self.save_name = spectra_fname
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing into Origin is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
spectra_fname = 'clean ' + spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.clean, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
# class LaserLineCCD(HighSidebandCCD):
# """
# Class for use when doing alinging/testing by sending the laser
# directly into the CCD. Modifies how "sidebands" and guess and fit,
# simply looking at the max signal.
# """
# def guess_sidebands(self, cutoff=8, verbose=False, plot=False):
# pass
class NeonNoiseAnalysis(CCD):
"""
This class is used to make handling neon calibration lines easier. It's not great.
"""
def __init__(self, fname, spectrometer_offset=None):
# print 'opening', fname
super(NeonNoiseAnalysis, self).__init__(fname, spectrometer_offset=spectrometer_offset)
self.addenda = self.parameters['addenda']
self.subtrahenda = self.parameters['subtrahenda']
self.noise_and_signal()
self.process_stuff()
def noise_and_signal(self):
"""
This bad boy calculates the standard deviation of the space between the
neon lines.
The noise regions are, in nm:
high: 784-792
low1: 795-806
low2: 815-823
low3: 831-834
the peaks are located at, in nm:
#1, weak: 793.6
#2, medium: 794.3
#3, medium: 808.2
#4, weak: 825.9
#5, strong: 830.0
"""
print('\n\n')
self.ccd_data = np.flipud(self.ccd_data)
# self.high_noise_region = np.array(self.ccd_data[30:230, :])
self.high_noise_region = np.array(self.ccd_data[80:180, :]) # for dark current measurements
self.low_noise_region1 = np.array(self.ccd_data[380:700, :])
self.low_noise_region2 = np.array(self.ccd_data[950:1200, :])
self.low_noise_region3 = np.array(self.ccd_data[1446:1546, :])
# self.high_noise = np.std(self.high_noise_region[:, 1])
self.high_noise_std = np.std(self.high_noise_region[:, 1])
self.high_noise_sig = np.mean(self.high_noise_region[:, 1])
self.low_noise1 = np.std(self.low_noise_region1[:, 1])
self.low_noise2 = np.std(self.low_noise_region2[:, 1])
self.low_noise_std = np.std(self.low_noise_region2[:, 1])
self.low_noise_sig = np.mean(self.low_noise_region2[:, 1])
self.low_noise3 = np.std(self.low_noise_region3[:, 1])
# self.noise_list = [self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3]
self.peak1 = np.array(self.ccd_data[303:323, :])
self.peak2 = np.array(self.ccd_data[319:339, :])
self.peak3 = np.array(self.ccd_data[736:746, :])
self.peak4 = np.array(self.ccd_data[1268:1288, :])
self.peak5 = np.array(self.ccd_data[1381:1421, :])
temp_max = np.argmax(self.peak1[:, 1])
self.signal1 = np.sum(self.peak1[temp_max - 1:temp_max + 2, 1])
self.error1 = np.sqrt(np.sum(self.peak1[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak2[:, 1])
self.signal2 = np.sum(self.peak2[temp_max - 1:temp_max + 2, 1])
self.error2 = np.sqrt(np.sum(self.peak2[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak3[:, 1])
self.signal3 = np.sum(self.peak3[temp_max - 1:temp_max + 2, 1])
self.error3 = np.sqrt(np.sum(self.peak3[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak4[:, 1])
self.signal4 = np.sum(self.peak4[temp_max - 1:temp_max + 2, 1])
self.error4 = np.sqrt(np.sum(self.peak4[temp_max - 1:temp_max + 2, 2] ** 2))
temp_max = np.argmax(self.peak5[:, 1])
self.signal5 = np.sum(self.peak5[temp_max - 1:temp_max + 2, 1])
self.error5 = np.sqrt(np.sum(self.peak5[temp_max - 1:temp_max + 2, 2] ** 2))
self.signal_list = [self.signal1, self.signal2, self.signal3, self.signal4, self.signal5]
self.error_list = [self.error1, self.error2, self.error3, self.error4, self.error5]
print("Signal list:", self.signal_list)
self.ccd_data = np.flipud(self.ccd_data)
def process_stuff(self):
"""
This one puts high_noise, low_noise1, signal2, and error2 in a nice horizontal array
"""
# self.results = np.array([self.high_noise, self.low_noise1, self.signal5, self.error5])
# average = np.mean([self.low_noise1, self.low_noise2, self.low_noise3])
# self.results = np.array([self.high_noise, self.low_noise1, self.low_noise2, self.low_noise3, self.high_noise/average])
self.results = np.array([self.high_noise_sig, self.high_noise_std, self.low_noise_sig, self.low_noise_std])
def collect_noise(neon_list, param_name, folder_name, file_name, name='Signal'):
"""
This function acts like save parameter sweep.
param_name = string that we're gonna save!
"""
# param_array = None
for elem in neon_list:
print("pname: {}".format(elem.parameters[param_name]))
print("results:", elem.results)
temp = np.insert(elem.results, 0, elem.parameters[param_name])
try:
param_array = np.row_stack((param_array, temp))
except UnboundLocalError:
param_array = np.array(temp)
if len(param_array.shape) == 1:
print("I don't think you want this file")
return
# append the relative peak error
print('\n', param_array, '\n')
param_array = np.column_stack((param_array, param_array[:, 4] / param_array[:, 3]))
# append the snr
param_array = np.column_stack((param_array, param_array[:, 3] / param_array[:, 2]))
try:
param_array = param_array[param_array[:, 0].argsort()]
except:
print("param_array shape", param_array.shape)
raise
try:
os.mkdir(folder_name)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
file_name = file_name + '.txt'
origin_import1 = param_name + ",Noise,Noise,Signal,error,rel peak error,peak signal-to-noise"
# origin_import1 = param_name + ",Noise,Noise,Noise,Noise,Ratio"
origin_import2 = ",counts,counts,counts,counts,,"
# origin_import2 = ",counts,counts,counts,,"
origin_import3 = ",High noise region,Low noise region,{},{} error,{} rel error, {}".format(name, name, name, name)
# origin_import3 = ",High noise region,Low noise region 1,Low noise region 2,Low noise region 3,High/low"
header_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# print "Spec header: ", spec_header
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_name, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_name, file_name)))
class HighSidebandCCD(CCD):
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
"""
This will read the appropriate file. The header needs to be fixed to
reflect the changes to the output header from the Andor file. Because
another helper file will do the cleaning and background subtraction,
those are no longer part of this init. This also turns all wavelengths
from nm (NIR ones) or cm-1 (THz ones) into eV.
OR, if an array is thrown in there, it'll handle the array and dict
Input:
For post-processing analysis:
hsg_thing = file name of the hsg spectrum from CCD superclass
spectrometer_offset = number of nanometers the spectrometer is off by,
should be 0.0...but can be 0.2 or 1.0
For Live-software:
hsg_thing = np array of spectrum from camera
parameter_dict = equipment dict generated by software
Internal:
self.hsg_thing = the filename
self.parameters = string with all the relevant experimental perameters
self.description = the description we added to the file as the data
was being taken
self.proc_data = processed data that has gone is frequency vs counts/pulse
self.dark_stdev = this is not currently handled appropriately
self.addenda = the list of things that have been added to the file, in
form of [constant, *spectra_added]
self.subtrahenda = the list of spectra that have been subtracted from
the file. Constant subtraction is dealt with with
self.addenda
:param hsg_thing: file name for the file to be opened. OR the actually hsg np.ndarray. Fun!
:type hsg_thing: str OR np.ndarray
:param parameter_dict: If being loaded through the data acquisition GUI, throw the dict in here
:type parameter_dict: dict
:param spectrometer_offset: Number of nm the spectrometer is off by
:type spectrometer_offset: float
:return: None, technically
"""
if isinstance(hsg_thing, str):
super(HighSidebandCCD, self).__init__(hsg_thing, spectrometer_offset=spectrometer_offset)
# TODO: fix addenda bullshit
self.addenda = []
self.subtrahenda = []
elif isinstance(hsg_thing, np.ndarray):
self.parameters = parameter_dict.copy() # Probably shouldn't shoehorn this in this way
self.addenda = []
self.subtrahenda = []
self.ccd_data = np.array(hsg_thing)
self.ccd_data[:, 0] = 1239.84 / self.ccd_data[:, 0]
# This data won't have an error column, so attached a column of ones
self.ccd_data = np.column_stack((self.ccd_data, np.ones_like(self.ccd_data[:,1])))
self.ccd_data = np.flipud(self.ccd_data) # Because turning into eV switches direction
self.fname = "Live Data"
else:
raise Exception("I don't know what this file type is {}, type: {}".format(
hsg_thing, type(hsg_thing)
))
self.proc_data = np.array(self.ccd_data)
# proc_data is now a 1600 long array with [frequency (eV), signal (counts / FEL pulse), S.E. of signal mean]
# self.parameters["nir_freq"] = 1239.84 / float(self.parameters["nir_lambda"])
self.parameters["nir_freq"] = 1239.84 / float(self.parameters.get("nir_lambda", -1))
# self.parameters["thz_freq"] = 0.000123984 * float(self.parameters["fel_lambda"])
self.parameters["thz_freq"] = 0.000123984 * float(self.parameters.get("fel_lambda", -1))
# self.parameters["nir_power"] = float(self.parameters["nir_power"])
self.parameters["nir_power"] = float(self.parameters.get("nir_power", -1))
try: # This is the new way of doing things. Also, now it's power
self.parameters["thz_energy"] = float(self.parameters["pulseEnergies"]["mean"])
self.parameters["thz_energy_std"] = float(self.parameters["pulseEnergies"]["std"])
except: # This is the old way TODO: DEPRECATE THIS
self.parameters["thz_energy"] = float(self.parameters.get("fel_power", -1))
# things used in fitting/guessing
self.sb_list = np.array([])
self.sb_index = np.array([])
self.sb_dict = {}
self.sb_results = np.array([])
self.full_dict = {}
def __add__(self, other):
"""
Add together the image data from self.proc_data, or add a constant to
that np.array. It will then combine the addenda and subtrahenda lists,
as well as add the fel_pulses together. If type(other) is a CCD object,
then it will add the errors as well.
Input:
self = CCD-like object
other = int, float or CCD object
Internal:
ret.proc_data = the self.proc_data + other(.proc_data)
ret.addenda = combination of two input addenda lists
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be added, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Add a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other
ret.addenda[0] = ret.addenda[0] + other
# or add the data of two hsg_spectra together
else:
if np.isclose(ret.parameters['center_lambda'], other.parameters['center_lambda']):
ret.proc_data[:, 1] = self.proc_data[:, 1] + other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.addenda[0] = ret.addenda[0] + other.addenda[0]
ret.addenda.extend(other.addenda[1:])
ret.subtrahenda.extend(other.subtrahenda)
ret.parameters['fel_pulses'] += other.parameters['fel_pulses']
else:
raise Exception('Source: Spectrum.__add__:\nThese are not from the same grating settings')
return ret
def __sub__(self, other):
"""
This subtracts constants or other data sets between self.proc_data. I
think it even keeps track of what data sets are in the file and how
they got there.
See how __add__ works for more information.
This raises a FutureWarning because these were designed early on and
haven't been used much.
:param other: The thing to be subtracted, it's either a int/float or a HighSidebandCCD object
:type other: int/float or HighSidebandCCD
:return: Sum of self and other
:rtype: HighSidebandCCD
"""
raise FutureWarning
ret = copy.deepcopy(self)
# Subtract a constant offset to the data
if type(other) in (int, float):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other # Need to choose a name
ret.addenda[0] = ret.addenda[0] - other
# Subtract the data of two hsg_spectra from each other
else:
if np.isclose(ret.proc_data[0, 0], other.proc_data[0, 0]):
ret.proc_data[:, 1] = self.proc_data[:, 1] - other.proc_data[:, 1]
ret.proc_data[:, 2] = np.sqrt(self.proc_data[:, 1] ** 2 + other.proc_data[:, 1] ** 2)
ret.subtrahenda.extend(other.addenda[1:])
ret.addenda.extend(other.subtrahenda)
else:
raise Exception('Source: Spectrum.__sub__:\nThese are not from the same grating settings')
return ret
def __repr__(self):
base = """
fname: {},
Series: {series},
spec_step: {spec_step},
fel_lambda: {fel_lambda},
nir_lambda: {nir_lambda}""".format(os.path.basename(self.fname),**self.parameters)
return base
__str__ = __repr__
def calc_approx_sb_order(self, test_nir_freq):
"""
This simple method will simply return a float approximating the order
of the frequency input. We need this because the CCD wavelength
calibration is not even close to perfect. And it shifts by half a nm
sometimes.
:param test_nir_freq: the frequency guess of the nth sideband
:type test_nir_freq: float
:return: The approximate order of the sideband in question
:rtype: float
"""
nir_freq = self.parameters['nir_freq']
thz_freq = self.parameters['thz_freq']
# If thz = 0, prevent error
if not thz_freq: thz_freq = 1
approx_order = (test_nir_freq - nir_freq) / thz_freq
return approx_order
def guess_sidebands(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
Update 05/24/18:
Hunter had two different loops for negative order sidebands,
then positive order sidebands. They're done pretty much identically,
so I've finally merged them into one.
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
try:
error = np.array(self.proc_data[:, 2])
except IndexError:
# Happens on old data where spectra weren't calculated in the live
# software.
error = np.ones_like(x_axis)
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def guess_sidebandsOld(self, cutoff=4.5, verbose=False, plot=False, **kwargs):
"""
05/24/18
Old code from Hunter's days (or nearly, I've already started cleaning some
stuff up). keeping it around in case I break too much stuff
Finds the locations of all the sidebands in the proc_data array to be
able to seed the fitting method. This works by finding the maximum data
value in the array and guessing what sideband it is. It creates an array
that includes this information. It will then step down, initially by one
THz frequency, then by twos after it hasn't found any odd ones. It then
goes up from the max and finds everything above in much the same way.
There is currently no rhyme or reason to a cutoff of 8. I don't know what
it should be changed to, though.
Input:
cutoff = signal-to-noise threshold to count a sideband candidate.
kwargs:
window_size: how big of a window (in pixels) to use for checking for
sidebands. Specified in half-width
default: 15
Internal:
self.sb_list = List of all of the orders the method found
self.sb_index = index of all of the peaks of the sidebands
self.sb_guess = three-part list including the frequency, amplitude and
error guesses for each sideband
"""
# TODO: this isn't commented appropriately. Will it be made more readable first?
if "cutoff" in self.parameters:
cutoff = self.parameters["cutoff"]
else:
self.parameters['cutoff for guess_sidebands'] = cutoff
if verbose:
print("=" * 15)
print()
print("Guessing CCD Sideband parameters")
print(os.path.basename(self.fname))
print("\tCutoff = {}".format(cutoff))
print()
print("=" * 15)
x_axis = np.array(self.proc_data[:, 0])
y_axis = np.array(self.proc_data[:, 1])
error = np.array(self.proc_data[:, 2])
min_sb = int(self.calc_approx_sb_order(x_axis[0])) + 1
try:
max_sb = int(self.calc_approx_sb_order(x_axis[-1]))
except ValueError:
print(x_axis)
nir_freq = self.parameters["nir_freq"]
thz_freq = self.parameters["thz_freq"]
if verbose:
print("min_sb: {} | max_sb: {}".format(min_sb, max_sb))
# Find max strength sideband and it's order
global_max = np.argmax(y_axis)
order_init = int(round(self.calc_approx_sb_order(x_axis[global_max])))
# if verbose:
# print "The global max is at index", global_max
if global_max < 15:
check_y = y_axis[:global_max + 15]
check_y = np.concatenate((np.zeros(15 - global_max), check_y))
elif global_max > 1585:
check_y = y_axis[global_max - 15:]
check_y = np.concatenate((check_y, np.zeros(global_max - 1585)))
else:
check_y = y_axis[global_max - 15:global_max + 15]
check_max_index = np.argmax(check_y)
check_max_area = np.sum(check_y[check_max_index - 2:check_max_index + 3])
check_ave = np.mean(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_stdev = np.std(check_y[[0, 1, 2, 3, 4, -1, -2, -3, -4, -5]])
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if verbose:
print(("{:^16}" * 5).format(
"global_max idx", "check_max_area", "check_ave", "check_stdev",
"check_ratio"))
print(("{:^16.5g}" * 5).format(
global_max, check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
self.sb_list = [order_init]
self.sb_index = [global_max]
sb_freq_guess = [x_axis[global_max]]
sb_amp_guess = [y_axis[global_max]]
sb_error_est = [
np.sqrt(sum([i ** 2 for i in error[global_max - 2:global_max + 3]])) / (
check_max_area - 5 * check_ave)]
else:
print("There are no sidebands in", self.fname)
raise RuntimeError
if verbose:
print("\t Looking for sidebands with f < {:.6f}".format(sb_freq_guess[0]))
last_sb = sb_freq_guess[0]
index_guess = global_max
# keep track of how many consecutive sidebands we've skipped. Sometimes one's
# noisy or something, so we want to keep looking after skipping one
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init - 1, min_sb - 1, -1):
# Check to make sure we're not looking at an odd when
# we've decided to skip them.
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb - thz_freq
if verbose:
print("I skipped", order)
continue
# Window size to look for next sideband. Needs to be order dependent
# because higher orders get wider, so we need to look at more.
# Values are arbitrary.
window_size = 0.45 + 0.0004 * order # used to be last_sb?
lo_freq_bound = last_sb - thz_freq * (
1 + window_size) # Not sure what to do about these
hi_freq_bound = last_sb - thz_freq * (1 - window_size)
if verbose:
print("\nSideband", order)
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
# Get the indices where the energies lie within the bounds for this SB
sliced_indices = \
np.where((x_axis > lo_freq_bound) & (x_axis < hi_freq_bound))[0]
start_index, end_index = sliced_indices.min(), sliced_indices.max()
# Get a slice of the y_data which is only in the region of interest
check_y = y_axis[sliced_indices]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
# Calculate the "area" of the sideband by looking at the peak value
# within the range, and the pixel above/below it
check_max_area = np.sum(check_y[check_max_index - 1:check_max_index + 2])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label="{} Box".format(order))
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
# get the slice that doesn't have the peak in it to compare statistics
check_region = np.append(check_y[:check_max_index - 1],
check_y[check_max_index + 2:])
check_ave = check_region.mean()
check_stdev = check_region.std()
# Calculate an effective SNR, where check_ave is roughly the
# background level
check_ratio = (check_max_area - 3 * check_ave) / check_stdev
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 1.5
if verbose:
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.5g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("I just found", last_sb)
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - 3 * check_ave)
error_est = np.sqrt(
sum(
[i ** 2 for i in error[found_index - 1:found_index + 2]]
)) / (check_max_area - 3 * check_ave)
if verbose:
print("My error estimate is:", error_est)
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb - thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
# Look for higher sidebands
if verbose: print("\nLooking for higher energy sidebands")
last_sb = sb_freq_guess[0]
index_guess = global_max
consecutive_null_sb = 0
consecutive_null_odd = 0
no_more_odds = False
break_condition = False
for order in range(order_init + 1, max_sb + 1):
if no_more_odds == True and order % 2 == 1:
last_sb = last_sb + thz_freq
continue
window_size = 0.45 + 0.001 * order # used to be 0.28 and 0.0004
lo_freq_bound = last_sb + thz_freq * (
1 - window_size) # Not sure what to do about these
hi_freq_bound = last_sb + thz_freq * (1 + window_size)
start_index = False
end_index = False
if verbose:
print("\nSideband", order)
# print "The low frequency bound is", lo_freq_bound
# print "The high frequency bound is", hi_freq_bound
print("\t{:.4f} < f_{} < {:.4f}".format(lo_freq_bound, order,
hi_freq_bound))
for i in range(index_guess, 1600):
if start_index == False and i == 1599:
# print "I'm all out of space, captain!"
break_condition = True
break
elif start_index == False and x_axis[i] > lo_freq_bound:
# print "start_index is", i
start_index = i
elif i == 1599:
end_index = 1599
# print "hit end of data, end_index is 1599"
elif end_index == False and x_axis[i] > hi_freq_bound:
end_index = i
# print "end_index is", i
index_guess = i
break
if break_condition:
break
check_y = y_axis[start_index:end_index]
check_max_index = np.argmax(
check_y) # This assumes that two floats won't be identical
octant = len(check_y) // 8 # To be able to break down check_y into eighths
if octant < 1:
octant = 1
check_max_area = np.sum(
check_y[check_max_index - octant - 1:check_max_index + octant + 1])
if verbose and plot:
plt.figure("CCD data")
plt.plot([lo_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([hi_freq_bound] * 2, [0, check_y[check_max_index]], 'b')
plt.plot([lo_freq_bound, hi_freq_bound], [check_y[check_max_index]] *
2, 'b', label=order)
plt.text((lo_freq_bound + hi_freq_bound) / 2, check_y[check_max_index],
order)
no_peak = (2 * len(
check_y)) // 6 # The denominator is in flux, used to be 5
# if verbose: print "\tcheck_y length", len(check_y)
check_ave = np.mean(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_stdev = np.std(np.take(check_y, np.concatenate(
(np.arange(no_peak), np.arange(-no_peak, 0)))))
check_ratio = (check_max_area - (2 * octant + 1) * check_ave) / check_stdev
if verbose:
print("\tIndices: {}->{} (d={})".format(start_index, end_index,
len(check_y)))
# print "check_y is", check_y
# print "\ncheck_max_area is", check_max_area
# print "check_ave is", check_ave
# print "check_stdev is", check_stdev
# print "check_ratio is", check_ratio
print("\t" + ("{:^14}" * 4).format(
"check_max_area", "check_ave", "check_stdev", "check_ratio"))
print("\t" + ("{:^14.6g}" * 4).format(
check_max_area, check_ave, check_stdev, check_ratio))
if order % 2 == 1: # This raises the barrier for odd sideband detection
check_ratio = check_ratio / 2
if check_ratio > cutoff:
found_index = check_max_index + start_index
self.sb_index.append(found_index)
last_sb = x_axis[found_index]
if verbose:
print("\tI'm counting this SB at index {} (f={:.4f})".format(
found_index, last_sb), end=' ')
# print "\tI found", order, "at index", found_index, "at freq", last_sb
sb_freq_guess.append(x_axis[found_index])
sb_amp_guess.append(check_max_area - (2 * octant + 1) * check_ave)
error_est = np.sqrt(sum([i ** 2 for i in error[
found_index - octant:found_index + octant]])) / (
check_max_area - (2 * octant + 1) * check_ave)
# This error is a relative error.
if verbose:
print(". Err = {:.3g}".format(error_est))
# print "\tMy error estimate is:", error_est
# print "My relative error is:", error_est / sb_amp_guess
sb_error_est.append(error_est)
self.sb_list.append(order)
consecutive_null_sb = 0
if order % 2 == 1:
consecutive_null_odd = 0
else:
# print "I could not find sideband with order", order
last_sb = last_sb + thz_freq
consecutive_null_sb += 1
if order % 2 == 1:
consecutive_null_odd += 1
if verbose:
print("\t\tI did not count this sideband")
if consecutive_null_odd == 1 and no_more_odds == False:
# print "I'm done looking for odd sidebands"
no_more_odds = True
if consecutive_null_sb == 2:
# print "I can't find any more sidebands"
break
if verbose:
print("I found these sidebands:", self.sb_list)
print('-' * 15)
print()
print()
self.sb_guess = np.array([np.asarray(sb_freq_guess), np.asarray(sb_amp_guess),
np.asarray(sb_error_est)]).T
# self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
def fit_sidebands(self, plot=False, verbose=False):
"""
This takes self.sb_guess and fits to each maxima to get the details of
each sideband. It's really ugly, but it works. The error of the
sideband area is approximated from the data, not the curve fit. All
else is from the curve fit. Which is definitely underestimating the
error, but we don't care too much about those errors (at this point).
self.sb_guess = [frequency guess, amplitude guess, relative error of amplitude] for each sideband.
Temporary stuff:
sb_fits = holder of the fitting results until all spectra have been fit
window = an integer that determines the "radius" of the fit window, proportional to thz_freq.
Attributes created:
self.sb_results = the money maker. Column order:
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
self.full_dict = a dictionary similar to sb_results, but now the keys
are the sideband orders. Column ordering is otherwise the same.
:param plot: Do you want to see the fits plotted with the data?
:type plot: bool
:param verbose: Do you want to see the details AND the initial guess fits?
:type verbose: bool
:return: None
"""
# print "Trying to fit these"
sb_fits = []
if verbose:
print("=" * 15)
print()
print("Fitting CCD Sidebands")
print(os.path.basename(self.fname))
print()
print("=" * 15)
# pretty sure you want this up here so things don't break
# when no sidebands found
self.full_dict = {}
thz_freq = self.parameters["thz_freq"]
window = 15 + int(15 * thz_freq / 0.0022) # Adjust the fit window based on the sideband spacing
# The 15's are based on empirical knowledge that for
# 540 GHz (2.23 meV), the best window size is 30 and
# that it seems like the window size should grow slowly?
for elem, peakIdx in enumerate(self.sb_index): # Have to do this because guess_sidebands
# doesn't out put data in the most optimized way
if peakIdx < window:
data_temp = self.proc_data[:peakIdx + window, :]
elif (1600 - peakIdx) < window:
data_temp = self.proc_data[peakIdx - window:, :]
else:
data_temp = self.proc_data[peakIdx - window:peakIdx + window, :]
width_guess = 0.0001 + 0.000001 * self.sb_list[elem] # so the width guess gets wider as order goes up
p0 = np.array([self.sb_guess[elem, 0],
self.sb_guess[elem, 1] * width_guess,
width_guess,
0.1])
# print "Let's fit this shit!"
if verbose:
print("Fitting SB {}. Peak index: {}, {}th peak in spectra".format(
self.sb_list[elem], peakIdx, elem
))
# print "\nnumber:", elem, num
# print "data_temp:", data_temp
# print "p0:", p0
print(' '*20 +"p0 = " + np.array_str(p0, precision=4))
# plot_guess = True # This is to disable plotting the guess function
if verbose and plot:
plt.figure('CCD data')
linewidth = 3
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *p0),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *p0), '--', linewidth=linewidth)
try:
# 11/1/16
# needed to bump maxfev up to 2k because a sideband wasn't being fit
# Fix for sb 106
# 05-23 Loren 10nm\hsg_640_Perp352seq_spectrum.txt
coeff, var_list = curve_fit(
gauss, data_temp[:, 0], data_temp[:, 1], p0=p0, maxfev = 2000)
except Exception as e:
if verbose:
print("\tThe fit failed:")
print("\t\t", e)
print("\tFitting region: {}->{}".format(peakIdx-window, peakIdx+window))
# print "I couldn't fit", elem
# print "It's sideband", num
# print "In file", self.fname
# print "because", e
# print "wanted to fit xindx", peakIdx, "+-", window
self.sb_list[elem] = None
continue # This will ensure the rest of the loop is not run without an actual fit.
coeff[1] = abs(coeff[1]) # The amplitude could be negative if the linewidth is negative
coeff[2] = abs(coeff[2]) # The linewidth shouldn't be negative
if verbose:
print("\tFit successful: ", end=' ')
print("p = " + np.array_str(coeff, precision=4))
# print "coeffs:", coeff
# print "sigma for {}: {}".format(self.sb_list[elem], coeff[2])
if 10e-4 > coeff[2] > 10e-6:
try:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.diag(var_list)))))
except RuntimeWarning:
sb_fits.append(np.hstack((self.sb_list[elem], coeff, np.sqrt(np.abs(np.diag(var_list))))))
# the var_list wasn't approximating the error well enough, even when using sigma and absoluteSigma
# self.sb_guess[elem, 2] is the relative error as calculated by the guess_sidebands method
# coeff[1] is the area from the fit. Therefore, the product should be the absolute error
# of the integrated area of the sideband. The other errors are still underestimated.
#
# 1/12/18 note: So it looks like what hunter did is calculate an error estimate
# for the strength/area by the quadrature sum of errors of the points in the peak
# (from like 813 in guess_sidebands:
# error_est = np.sqrt(sum([i ** 2 for i in error[found_index - 1:found_index + 2]])) / (
# Where the error is what comes from the CCD by averaging 4 spectra. As far as I can tell,
# it doesn't currently pull in the dark counts or anything like that, except maybe
# indirectly since it'll cause the variations in the peaks
sb_fits[-1][6] = self.sb_guess[elem, 2] * coeff[1]
if verbose:
print("\tRel.Err: {:.4e} | Abs.Err: {:.4e}".format(
self.sb_guess[elem, 2], coeff[1] * self.sb_guess[elem, 2]
))
print()
# print "The rel. error guess is", self.sb_guess[elem, 2]
# print "The abs. error guess is", coeff[1] * self.sb_guess[elem, 2]
# The error from self.sb_guess[elem, 2] is a relative error
if plot and verbose:
plt.figure('CCD data')
linewidth = 5
x_vals = np.linspace(data_temp[0, 0], data_temp[-1, 0], num=500)
if elem != 0:
try:
plt.plot(x_vals, gauss(x_vals, *coeff),
plt.gca().get_lines()[-1].get_color() + '--' # I don't really know. Mostly
# just looked around at what functions
# matplotlib has...
, linewidth=linewidth)
except: # to prevent weird mac issues with the matplotlib things?
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
else:
plt.plot(x_vals, gauss(x_vals, *coeff), '--', linewidth=linewidth)
sb_fits_temp = np.asarray(sb_fits)
reorder = [0, 1, 5, 2, 6, 3, 7, 4, 8]
# Reorder the list to put the error of the i-th parameter as the i+1th.
try:
sb_fits = sb_fits_temp[:, reorder]
# if verbose: print "The abs. error guess is", sb_fits[:, 0:5]
except:
raise RuntimeError("No sidebands to fit?")
# Going to label the appropriate row with the sideband
self.sb_list = sorted(list([x for x in self.sb_list if x is not None]))
sb_names = np.vstack(self.sb_list)
# Sort by SB order
sorter = np.argsort(sb_fits[:, 0])
self.sb_results = np.array(sb_fits[sorter, :7])
if verbose:
print("\tsb_results:")
print("\t\t" + ("{:^5s}" + ("{:^12s}")*(self.sb_results.shape[1]-1)).format(
"SB", "Cen.En.", "", "Area", "", "Width",""))
for line in self.sb_results:
print('\t\t[' + ("{:^5.0f}"+ "{:<12.4g}"*(line.size-1)).format(*line) + ']')
print('-'*19)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def infer_frequencies(self, nir_units="wavenumber", thz_units="GHz", bad_points=-2):
"""
This guy tries to fit the results from fit_sidebands to a line to get the relevant frequencies
:param nir_units: What units do you want this to output?
:type nir_units: 'nm', 'wavenumber', 'eV', 'THz'
:param thz_units: What units do you want this to output for the THz?
:type thz_units: 'GHz', 'wavenumber', 'meV'
:param bad_points: How many more-positive order sidebands shall this ignore?
:type bad_points: int
:return: freqNIR, freqTHz, the frequencies in the appropriate units
"""
# force same units for in dict
freqNIR, freqTHz = calc_laser_frequencies(self, "wavenumber", "wavenumber", bad_points)
self.parameters["calculated NIR freq (cm-1)"] = "{}".format(freqNIR, nir_units)
self.parameters["calculated THz freq (cm-1)"] = "{}".format(freqTHz, freqTHz)
freqNIR, freqTHz = calc_laser_frequencies(self, nir_units, thz_units, bad_points)
return freqNIR, freqTHz
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # But [:, 3] is already area?
# (The old name was area)
# I think it must be amplitude
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
self.parameters['addenda'] = self.addenda
self.parameters['subtrahenda'] = self.subtrahenda
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'
origin_import_fits += '\norder,eV,,arb. u.,,meV,,arb. u.'
origin_import_fits += "\n{},,,{},,,".format(marker, marker)
fits_header = '#' + parameter_str + origin_import_fits
# print "DEBUG: in saving", folder_str, ",", spectra_fname
np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, spectra_fname)))
class HighSidebandCCDRaw(HighSidebandCCD):
"""
This class is meant for passing in an image file (currently supports a 2x1600)
Which it does all the processing on.
"""
def __init__(self, hsg_thing, parameter_dict=None, spectrometer_offset=None):
# let the supers do the hard work of importing the json dict and all that jazz
super(HighSidebandCCDRaw, self).__init__(hsg_thing, parameter_dict=None, spectrometer_offset=None)
self.ccd_data = np.genfromtxt(hsg_thing, delimiter=',').T
self.proc_data = np.column_stack((
self.gen_wavelengths(self.parameters["center_lambda"], self.parameters["grating"]),
np.array(self.ccd_data[:,1], dtype=float)-np.median(self.ccd_data[:,1]),
np.ones_like(self.ccd_data[:,1], dtype=float)
))
self.proc_data[:, 0] = 1239.84 / self.proc_data[:, 0]
self.proc_data = np.flipud(self.proc_data)
@staticmethod
def gen_wavelengths(center_lambda, grating):
'''
This returns a 1600 element list of wavelengths for each pixel in the EMCCD based on grating and center wavelength
grating = which grating, 1 or 2
center = center wavelength in nanometers
'''
b = 0.75 # length of spectrometer, in m
k = -1.0 # order looking at
r = 16.0e-6 # distance between pixles on CCD
if grating == 1:
d = 1. / 1800000.
gamma = 0.213258508834
delta = 1.46389935365
elif grating == 2:
d = 1. / 1200000.
gamma = 0.207412628027
delta = 1.44998344749
elif grating == 3:
d = 1. / 600000.
gamma = 0.213428934011
delta = 1.34584754696
else:
print("What a dick, that's not a valid grating")
return None
center = center_lambda * 10 ** -9
wavelength_list = np.arange(-799.0, 801.0)
output = d * k ** (-1) * ((-1) * np.cos(delta + gamma + (-1) * np.arccos(
(-1 / 4) * (1 / np.cos((1 / 2) * gamma)) ** 2 * (
2 * (np.cos((1 / 2) * gamma) ** 4 * (2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (
1 / 2) + d ** (-1) * k * center * np.sin(gamma))) + np.arctan(
b ** (-1) * (r * wavelength_list + b * np.cos(delta + gamma)) * (1 / np.sin(delta + gamma)))) + (
1 + (-1 / 16) * (1 / np.cos((1 / 2) * gamma)) ** 4 * (2 * (
np.cos((1 / 2) * gamma) ** 4 * (
2 + (-1) * d ** (-2) * k ** 2 * center ** 2 + 2 * np.cos(gamma))) ** (1 / 2) + d ** (
-1) * k * center * np.sin(
gamma)) ** 2) ** (1 / 2))
output = (output + center) * 10 ** 9
return output
class PMT(object):
def __init__(self, file_name):
"""
Initializes a SPEX spectrum. It'll open a file, and bring in the details
of a sideband spectrum into the object. There isn't currently any reason
to use inheritance here, but it could be extended later to include PLE or
something of the sort.
attributes:
self.parameters - dictionary of important experimental parameters
this will not necessarily be the same for each
file in the object
self.fname - the current file path
:param file_name: The name of the PMT file
:type file_name: str
:return: None
"""
# print "This started"
self.fname = file_name
# self.files_included = [file_name]
with open(file_name, 'r') as f:
param_str = ''
line = f.readline() # Needed to move past the first line, which is the sideband order. Not generally useful
line = f.readline()
while line[0] == '#':
param_str += line[1:]
line = f.readline()
self.parameters = json.loads(param_str)
class HighSidebandPMT(PMT):
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
if self.parameters.get("photon counted", False):
# The scale factor for photon counting to generic
# PMT data depends on... things. It's different each
# day. Unfortunately, the overlap in dynamic range between
# the two is small, and generally only one sideband
# can been seen by both methods. I don't really have
# the motivation to automatically calculate the
# appropriate factor, so this is your reminder to find
# it yourself.
import time
# assert time.strftime("%x") == "03/15/17"
assert self.parameters.get("pc ratio", -1) != -1, self.fname
raw_temp[:,3] *= self.parameters["pc ratio"]
pass
raw_temp[:, 0] = raw_temp[:, 0] / 8065.6 # turn NIR freq into eV
self.parameters["thz_freq"] = 0.000123984 * float(
self.parameters.get("fel_lambda", -1))
self.parameters["nir_freq"] = float(
self.parameters.get("nir_lambda", -1))/8065.6
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
NOTE: This means that if both aren't equally "good" (taking a second scan with higher
gain/photon counting because you didn't see it), you need to not add the file
(remove/rename the file, etc.)
I'd love to overhall the data collection/analysis so this can be more intelligent
(Effectively offload a lot of the processing (especially not saving 10 arbitrary
points to process later) onto the live software and add sideband strengths alone,
like the CCD works. But this would be a bigger change that I can seem to find
time for).
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb] = np.row_stack(
(self.sb_dict[other.initial_sb], other.initial_data)
)
except KeyError:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
except Exception as e:
print("THIS IS THE OTHER ERROR", e)
raise
def process_sidebands(self, verbose=False, baselineCorr = False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:param baselineCorr: Whether to subtract the average across
the two endpoints
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
# temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
if baselineCorr:
x = temp[[0, -1], 0]
y = temp[[0, -1], 1]
p = np.polyfit(x, y, 1)
temp[:, 1] -= np.polyval(p, temp[:,0])
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False, cutoff=1.0, **kwargs):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
cutoff is the ratio of area/error which must be exceeded to count
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
if verbose:
print("="*15)
print()
print("Integrating PMT Sidebands")
print("Cutoff: {}".format(cutoff))
print(os.path.basename(self.fname))
print()
print("=" * 15)
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
# stroff = np.nan_to_num(sideband[1][[0,1,-2,1], 1]).sum()/4.
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("\torder: {}, area: {:.3g}, error: {:.3g}, ratio: {:.3f}".format(
sideband[0], area, error, area/error
))
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("\t\tarea < 0")
continue
elif area < cutoff/5 * error: # Two seems like a good cutoff?
if verbose:
print("\t\tI did not keep sideband")
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
if verbose:
print('-'*19)
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False, **kwargs):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:,0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results)
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index='', verbose=False):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nIndex,Center energy,error,Amplitude,error,Linewidth,error\nInt,eV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
if verbose:
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class HighSidebandPMTOld(PMT):
"""
Old version: Replaced March 01, 2017
Class initialized by loading in data set.
Multiple copies of the same sideband were stacked as raw data and combined,
effectively causing (2) 10-pt scans to be treated the same as (1) 20pt scan.
This works well until you have photon counted pulses.
"""
def __init__(self, file_path, verbose=False):
"""
Initializes a SPEX spectrum. It'll open a single file, then read
the data from that file using .add_sideband(). The super's init will handle the parameters
and the description.
attributes:
self.parameters - dictionary of important experimental parameters, created in PMT
self.sb_dict - keys are sideband order, values are PMT data arrays
self.sb_list - sorted list of included sidebands
:param file_path: path to the current file
:type file_path: str
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return:
"""
super(HighSidebandPMT, self).__init__(
file_path) # Creates the json parameters dictionary
self.fname = file_path
self.parameters["files included"] = [file_path]
with open(file_path, 'r') as f:
sb_num = int(f.readline()[1:])
raw_temp = np.genfromtxt(file_path, comments='#', delimiter=',')[3:, :]
self.initial_sb = sb_num
self.initial_data = np.array(raw_temp)
self.sb_dict = {sb_num: np.array(raw_temp)}
self.sb_list = [sb_num]
def add_sideband(self, other):
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object. It handles
when you measure the same sideband twice. It assumes both are equally "good"
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could, if
you have two incomplete dictionaries
:param other: the new sideband data to add to the larger spectrum. Add means append, no additino is performed
:type other: HighSidebandPMT
:return:
"""
"""
This bad boy will add another PMT sideband object to the sideband spectrum of this object
It currently doesn't do any sort of job combining dictionaries or anything, but it definitely could
"""
self.parameters["files included"].append(other.fname)
if other.initial_sb in self.sb_list:
self.sb_list.append(other.initial_sb)
# Make things comma delimited?
try:
self.sb_dict[other.initial_sb].vstack((other.initial_data))
except:
self.sb_dict[other.initial_sb] = np.array(other.initial_data)
def process_sidebands(self, verbose=False):
"""
This bad boy will clean up the garbled mess that is the object before hand,
including clearing out misfired shots and doing the averaging.
Affects:
self.sb_dict = Averages over sidebands
Creates:
self.sb_list = The sideband orders included in this object.
:param verbose: Flag to see the nitty gritty details.
:type verbose: bool
:return: None
"""
for sb_num, sb in list(self.sb_dict.items()):
if sb_num == 0:
fire_condition = -np.inf # This way the FEL doesn't need to be on during laser line measurement
else:
fire_condition = np.mean(sb[:, 2]) / 2 # Say FEL fired if the
# cavity dump signal is
# more than half the mean
# of the cavity dump signal
frequencies = sorted(list(set(sb[:, 0])))
temp = None
for freq in frequencies:
data_temp = np.array([])
for point in sb:
if point[0] == freq and point[2] > fire_condition:
data_temp = np.hstack((data_temp, point[3]))
try:
temp = np.vstack(
(temp, np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])))
except:
temp = np.array([freq, np.mean(data_temp),
np.std(data_temp) / np.sqrt(len(data_temp))])
temp[:, 0] = temp[:, 0] / 8065.6 # turn NIR freq into eV
temp = temp[temp[:, 0].argsort()]
self.sb_dict[sb_num] = np.array(temp)
self.sb_list = sorted(self.sb_dict.keys())
if verbose:
print("Sidebands included", self.sb_list)
def integrate_sidebands(self, verbose=False):
"""
This method will integrate the sidebands to find their strengths, and then
use a magic number to define the width, since they are currently so utterly
undersampled for fitting.
It is currently the preferred method for calculating sideband strengths.
self.fit_sidebands is probably better with better-sampled lines.
Creates:
self.sb_results = full list of integrated data. Column order is:
[sb order, Freq (eV), "error" (eV), Integrate area (arb.), area error, "Linewidth" (eV), "Linewidth error" (eV)
self.full_dict = Dictionary where the SB order column is removed and turned into the keys. The values
are the rest of that sideband's results.
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
self.full_dict = {}
for sideband in list(self.sb_dict.items()):
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
area = np.trapz(np.nan_to_num(sideband[1][:, 1]), sideband[1][:, 0])
error = np.sqrt(np.sum(np.nan_to_num(
sideband[1][:, 2]) ** 2)) / 8065.6 # Divide by the step size?
if verbose:
print("order", sideband[0])
print("area", area)
print("error", error)
print("ratio", area / error)
details = np.array(
[sideband[0], nir_frequency, 1 / 8065.6, area, error, 2 / 8065.6,
1 / 8065.6])
if area < 0:
if verbose:
print("area less than 0", sideband[0])
continue
elif area < 1.0 * error: # Two seems like a good cutoff?
if verbose:
print("I did not keep sideband ", sideband[0])
continue
try:
self.sb_results = np.vstack((self.sb_results, details))
except:
self.sb_results = np.array(details)
self.full_dict[sideband[0]] = details[1:]
try:
self.sb_results = self.sb_results[self.sb_results[:, 0].argsort()]
except (IndexError, AttributeError):
# IndexError where there's only one sideband
# AttributeError when there aren't any (one sb which wasn't fit)
pass
def fit_sidebands(self, plot=False, verbose=False):
"""
This method will fit a gaussian to each of the sidebands provided in
the self.sb_dict and make a list just like in the EMCCD version. It
will also use the standard error of the integral of the PMT peak as the
error of the gaussian area instead of that element from the covariance
matrix. Seems more legit.
attributes:
self.sb_results: the numpy array that contains all of the fit info just
like it does in the CCD class.
self.full_dict = A dictionary version of self.sb_results
:param plot: Flag to see the results plotted
:type plot: bool
:param verbose: Flag to see the nitty gritty details
:type verbose: bool
:return: None
"""
sb_fits = {}
for sideband in list(self.sb_dict.items()):
if verbose:
print("Sideband number", sideband[0])
print("Sideband data:\n", sideband[1])
index = np.argmax(sideband[1][:, 1])
nir_frequency = sideband[1][index, 0]
peak = sideband[1][index, 1]
width_guess = 0.0001 # Yep, another magic number
p0 = [nir_frequency, peak * width_guess, width_guess, 0.00001]
if verbose:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *p0),
label="fit :{}".format(sideband[1]))
print("p0:", p0)
try:
coeff, var_list = curve_fit(gauss, sideband[1][:, 0], sideband[1][:, 1],
sigma=sideband[1][:, 2], p0=p0)
coeff[1] = abs(coeff[1])
coeff[2] = abs(coeff[2])
if verbose:
print("coeffs:", coeff)
print("stdevs:", np.sqrt(np.diag(var_list)))
print("integral", np.trapz(sideband[1][:, 1], sideband[1][:, 0]))
if np.sqrt(np.diag(var_list))[0] / coeff[
0] < 0.5: # The error on where the sideband is should be small
sb_fits[sideband[0]] = np.concatenate(
(np.array([sideband[0]]), coeff, np.sqrt(np.diag(var_list))))
# print "error then:", sb_fits[sideband[0]][6]
relative_error = np.sqrt(sum([x ** 2 for x in
sideband[1][index - 1:index + 2,
2]])) / np.sum(
sideband[1][index - 1:index + 2, 1])
if verbose:
print("relative error:", relative_error)
sb_fits[sideband[0]][6] = coeff[1] * relative_error
# print "error now:", sb_fits[sideband[0]][6]
if plot:
x_vals = np.linspace(np.amin(sideband[1][:, 0]),
np.amax(sideband[1][:, 0]), num=50)
plt.plot(x_vals, gauss(x_vals, *coeff))
# plt.plot(x_vals, gauss(x_vals, *p0))
else:
print("what happened?")
except:
print("God damn it, Leroy.\nYou couldn't fit this.")
sb_fits[sideband[0]] = None
for result in sorted(sb_fits.keys()):
try:
self.sb_results = np.vstack((self.sb_results, sb_fits[result]))
except:
self.sb_results = np.array(sb_fits[result])
self.sb_results = self.sb_results[:, [0, 1, 5, 2, 6, 3, 7, 4, 8]]
self.sb_results = self.sb_results[:, :7]
if verbose:
print("And the results, please:\n", self.sb_results)
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
def laser_line(self, verbose=False):
"""
This method is designed to scale everything in the PMT to the conversion
efficiency based on our measurement of the laser line with a fixed
attenuation.
Creates:
self.parameters['normalized?'] = Flag to specify if the laser has been
accounted for.
:return: None
"""
if 0 not in self.sb_list:
self.parameters['normalized?'] = False
return
else:
laser_index = np.where(self.sb_results[:, 0] == 0)[0][0]
if verbose:
print("sb_results", self.sb_results[laser_index, :])
print("laser_index", laser_index)
laser_strength = np.array(self.sb_results[laser_index, 3:5])
if verbose:
print("Laser_strength", laser_strength)
for sb in self.sb_results:
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
sb[4] = (sb[3] / laser_strength[0]) * np.sqrt(
(sb[4] / sb[3]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[3] = sb[3] / laser_strength[0]
if verbose:
print("\torder {}, strength {}, error {}".format(sb[0], sb[3], sb[4]))
for sb in list(self.full_dict.values()):
sb[3] = (sb[2] / laser_strength[0]) * np.sqrt(
(sb[3] / sb[2]) ** 2 + (laser_strength[1] / laser_strength[0]) ** 2)
sb[2] = sb[2] / laser_strength[0]
self.parameters['normalized?'] = True
def save_processing(self, file_name, folder_str, marker='', index=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files:
self.proc_data = the continuous spectrum
self.sb_results = the individual sideband details
:param file_name: The base name for the saved file
:type file_name: str
:param folder_str: The full name for the folder hte file is saved it. Folder can be created
:type folder_str: str
:param marker: Marker for the file, appended to file_name, often the self.parameters['series']
:type marker: str
:param index: used to keep these files from overwriting themselves when marker is the same
:type index: str or int
:return: None
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_fits.txt'
self.save_name = spectra_fname
# self.parameters["files included"] = list(self.files)
try:
parameter_str = json.dumps(self.parameters, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: PMT.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count(
'#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.\n,{:.3f},'.format(
self.parameters["fieldStrength"]["mean"])
spec_header = '#' + parameter_str + origin_import_spec
origin_import_fits = '\nCenter energy,error,Amplitude,error,Linewidth,error\neV,,arb. u.,,eV,,\n,,' # + marker
fits_header = '#' + parameter_str + origin_import_fits
for sideband in sorted(self.sb_dict.keys()):
try:
complete = np.vstack((complete, self.sb_dict[sideband]))
except:
complete = np.array(self.sb_dict[sideband])
np.savetxt(os.path.join(folder_str, spectra_fname), complete, delimiter=',',
header=spec_header, comments='', fmt='%0.6e')
try:
np.savetxt(os.path.join(folder_str, fit_fname), self.sb_results,
delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
except AttributeError:
# Catch the error that happens if you save something without files
print("warning, couldn't save fit file (no sidebands found?)")
print("Saved PMT spectrum.\nDirectory: {}".format(
os.path.join(folder_str, spectra_fname)))
class TimeTrace(PMT):
"""
This class will be able to handle time traces output by the PMT softare.
"""
def __init__(self, file_path):
super(HighSidebandPMT, self).__init__(file_path)
class FullSpectrum(object):
def __init__(self):
pass
class FullAbsorbance(FullSpectrum):
"""
I'm imagining this will sew up absorption spectra, but I'm not at all sure
how to do that at the moment.
"""
def __init__(self):
pass
class FullHighSideband(FullSpectrum):
"""
I'm imagining this class is created with a base CCD file, then gobbles up
other spectra that belong with it, then grabs the PMT object to normalize
everything, assuming that PMT object exists.
"""
def __init__(self, initial_CCD_piece):
"""
Initialize a full HSG spectrum. Starts with a single CCD image, then
adds more on to itself using stitch_hsg_dicts.
Creates:
self.fname = file name of the initial_CCD_piece
self.sb_results = The sideband details from the initializing data
self.parameters = The parameter dictionary of the initializing data. May
not have all details of spectrum pieces added later.
self.full_dict = a copy of the sb_results without the zeroth column, which
is SB order
:param initial_CCD_piece: The starting part of the spectrum, often the lowest orders seen by CCD
:type initial_CCD_piece: HighSidebandCCD
:return: None
"""
self.fname = initial_CCD_piece.fname
try:
self.sb_results = initial_CCD_piece.sb_results
except AttributeError:
print(initial_CCD_piece.full_dict)
raise
self.parameters = initial_CCD_piece.parameters
self.parameters['files_here'] = [initial_CCD_piece.fname.split('/')[-1]]
self.full_dict = {}
for sb in self.sb_results:
self.full_dict[sb[0]] = np.asarray(sb[1:])
@staticmethod
def parse_sb_array(arr):
"""
Check to make sure the first even order sideband in an array is not weaker
than the second even order. If this happens, it's likely because the SB was in
the short pass filter and isn't work counting.
We cut it out to prevent it from itnerfering with calculating overlaps
:param arr:
:return:
"""
arr = np.array(arr)
if (arr[0, sbarr.SBNUM]>0 and arr[1, sbarr.SBNUM]>0 and # make sure they're both pos
arr[0, sbarr.AREA] < arr[1, sbarr.AREA]): # and the fact the area is less
# print "REMOVING FIRST SIDEBAND FROM FULLSIDEBAND"
# print arr[0]
# print arr[1]
arr = arr[1:]
full_dict = {}
for sb in arr:
full_dict[sb[0]] = np.asarray(sb[1:])
return full_dict, arr
def add_CCD(self, ccd_object, verbose=False, force_calc=None, **kwargs):
"""
This method will be called by the stitch_hsg_results function to add another
CCD image to the spectrum.
:param ccd_object: The CCD object that will be stiched into the current FullHighSideband object
:type ccd_object: HighSidebandCCD
:return: None
"""
if self.parameters["gain"] == ccd_object.parameters["gain"]:
calc = False
else:
calc = True
if force_calc is not None:
calc = force_calc
if "need_ratio" in kwargs: #cascading it through, starting to think
# everything should be in a kwarg
calc = kwargs.pop("need_ratio")
try:
# self.full_dict = stitch_hsg_dicts(self.full_dict, ccd_object.full_dict,
# need_ratio=calc, verbose=verbose)
self.full_dict = stitch_hsg_dicts(self, ccd_object, need_ratio=calc,
verbose=verbose, **kwargs)
self.parameters['files_here'].append(ccd_object.fname.split('/')[-1])
# update sb_results, too
sb_results = [[k]+list(v) for k, v in list(self.full_dict.items())]
sb_results = np.array(sb_results)
self.sb_results = sb_results[sb_results[:,0].argsort()]
except AttributeError:
print('Error, not enough sidebands to fit here! {}, {}, {}, {}'.format(
self.parameters["series"], self.parameters["spec_step"],
ccd_object.parameters["series"], ccd_object.parameters["spec_step"]
))
def add_PMT(self, pmt_object, verbose=True):
"""
This method will be called by the stitch_hsg_results function to add the PMT
data to the spectrum.
"""
# print "I'm adding PMT once"
# self.full_dict = stitch_hsg_dicts(pmt_object.full_dict, self.full_dict,
# need_ratio=True, verbose=False)
self.full_dict = stitch_hsg_dicts(pmt_object, self,
need_ratio=True, verbose=verbose)
# if verbose:
# self.full_dict, ratio = self.full_dict
# print "I'm done adding PMT data"
self.parameters['files_here'].append(pmt_object.parameters['files included'])
self.make_results_array()
# if verbose:
# return ratio
def make_results_array(self):
"""
The idea behind this method is to create the sb_results array from the
finished full_dict dictionary.
"""
self.sb_results = None
# print "I'm making the results array:", sorted(self.full_dict.keys())
for sb in sorted(self.full_dict.keys()):
# print "Going to add this", sb
try:
self.sb_results = np.vstack((self.sb_results, np.hstack((sb, self.full_dict[sb]))))
except ValueError:
# print "It didn't exist yet!"
self.sb_results = np.hstack((sb, self.full_dict[sb]))
# print "and I made this array:", self.sb_results[:, 0]
def save_processing(self, file_name, folder_str, marker='', index='', verbose=''):
"""
This will save all of the self.proc_data and the results from the
fitting of this individual file.
Format:
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
Inputs:
file_name = the beginning of the file name to be saved
folder_str = the location of the folder where the file will be saved,
will create the folder, if necessary.
marker = I...I don't know what this was originally for
index = used to keep these files from overwriting themselves when in a
list
Outputs:
Two files, one that is self.proc_data, the other is self.sb_results
"""
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
temp = np.array(self.sb_results)
ampli = np.array([temp[:, 3] / temp[:, 5]]) # I'm pretty sure this is
# amplitude, not area
temp[:, 5:7] = temp[:, 5:7] * 1000 # For meV linewidths
if verbose:
print("sb_results", self.sb_results.shape)
print("ampli", ampli.shape)
save_results = np.hstack((temp, ampli.T))
# spectra_fname = file_name + '_' + marker + '_' + str(index) + '.txt'
fit_fname = file_name + '_' + marker + '_' + str(index) + '_full.txt'
# self.save_name = spectra_fname
# self.parameters['addenda'] = self.addenda
# self.parameters['subtrahenda'] = self.subtrahenda
try:
# PMT files add unnecessary number of lines, dump it into one line
# by casting it to a string.
reduced = self.parameters.copy()
reduced["files_here"] = str(reduced["files_here"])
parameter_str = json.dumps(reduced, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
print(e)
print("Source: EMCCD_image.save_images\nJSON FAILED")
print("Here is the dictionary that broke JSON:\n", self.parameters)
return
parameter_str = parameter_str.replace('\n', '\n#')
num_lines = parameter_str.count('#') # Make the number of lines constant so importing is easier
# for num in range(99 - num_lines): parameter_str += '\n#'
parameter_str += '\n#' * (99 - num_lines)
# origin_import_spec = '\nNIR frequency,Signal,Standard error\neV,arb. u.,arb. u.'
# spec_header = '#' + parameter_str + '\n#' + self.description[:-2] + origin_import_spec
origin_import_fits = '\nSideband,Center energy,error,Sideband strength,error,Linewidth,error,Amplitude'+\
'\norder,eV,,arb. u.,,meV,,arb. u.\n' + ','.join([marker]*8)
fits_header = '#' + parameter_str + origin_import_fits
# np.savetxt(os.path.join(folder_str, spectra_fname), self.proc_data, delimiter=',',
# header=spec_header, comments='', fmt='%f')
np.savetxt(os.path.join(folder_str, fit_fname), save_results, delimiter=',',
header=fits_header, comments='', fmt='%0.6e')
if verbose:
print("Save image.\nDirectory: {}".format(os.path.join(folder_str, fit_fname)))
class TheoryMatrix(object):
def __init__(self,ThzField,Thzomega,nir_wl,dephase,peakSplit,temp=60):
'''
This class is designed to handle everything for creating theory
matrices and comparing them to experiement.
Init defines some constants that are used throughout the calculation
and puts somethings in proper units.
Parameters:
:ThzField: Give in kV/cm.
:Thzomega: Give in Ghz.
:nir_wl: Give in nanometers.
:dephase: Dephasing, give in meV.
Should roughly be the width of absorption peaks
:detune: Detuning, give in meV.
Difference between NIR excitation and band gap
:temp: Temperature, give in K
'''
self.F = ThzField * 10**5
self.Thz_w = Thzomega * 10**9 *2*np.pi
self.nir_wl = nir_wl * 10**(-9)
self.nir_ph = .0012398/self.nir_wl #NIR PHOTON ENERGY
self.detune = 1.52 - self.nir_ph
self.peakSplit = peakSplit*1.602*10**(-22)
self.dephase = dephase*1.602*10**(-22)
self.n_ref = 0
self.iterations = 0
self.max_iter = 0
self.hbar = 1.055*10**(-34) # hbar in Js
self.temp = temp
self.kb = 8.617*10**(-5) # Boltzmann constant in eV/K
self.temp_ev = self.temp*self.kb
def mu_generator(self,gamma1,gamma2,phi,beta):
'''
Given gamma1 and gamma2 produces mu+- according to
mu+- = electron mass/(mc^-1+gamma1 -+ 2*gamma2)
Note that this formula is only accurate for THz and NIR
polarized along [010]. The general form requires gamma3 as well
Parameters:
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
Returns: mu_p, mu_m effective mass of of mu plus/minus
'''
theta = phi + np.pi/4
emass = 9.109*10**(-31) # bare electron mass in kg
m_cond = 0.0665 # Effective mass of conduction band
mu_p = emass/( 1/m_cond + gamma1 - gamma2*np.sqrt(3*np.sin(2*theta)**2+1+3*np.cos(2*theta)**2*beta**2) ) # Calculates mu_plus
mu_m = emass/( 1/m_cond + gamma1 + gamma2*np.sqrt(3*np.sin(2*theta)**2+1+3*np.cos(2*theta)**2*beta**2) ) # Calculates mu_minus
return mu_p,mu_m
def alpha_value(self,x):
'''
alpha parameter given by Qile's notes on two band model for a given x
Parameters:
:x: the argument of the calculation. Give in radians
Returns:
:alpha_val: the alpha parameter given in Qile's notes
'''
alpha_val = np.cos(x/2) - np.sin(x/2)/(x/2)
# This does the calculation. Pretty straightforward
return alpha_val
def gamma_value(self,x):
'''
gamma parameter given by Qile's notes on two band model
Parameters:
:x: Argument of the calculation. Give in radians
Returns:
:gamma_val: the gamma parameter given in Qile's notes
'''
gamma_val = np.sin(x/2)/(x/2)
# does the calculation
return gamma_val
def Up(self,mu):
'''
Calculates the ponderemotive energy
Ponderemotive energy given by
U = e^2*F_THz^2/(4*mu*w_THz^2)
Parameters:
:F: Thz field. Give in V/m
:mu: effective mass. Give in kg
:w: omega, the THz freqeuncy. Give in angular frequency.
Returns:
:u: The ponderemotive energy
'''
F = self.F
w = self.Thz_w
echarge = 1.602*10**(-19) # electron charge in Coulombs
u = echarge**(2)*F**(2)/(4*mu*w**2) # calculates the ponderemotive energy
return u
def phonon_dephase(self,n):
'''
Step function that will compare the energy gained by the sideband to the
energy of the phonon (36.6meV). If the energy is less than the phonon,
return zero. If it's more return the scattering rate as determined by
Yu and Cordana Eq 5.51
This really should be treated as a full integral, but whatever
'''
thz_omega = self.Thz_w
hbar = self.hbar
thz_ev = n*hbar*thz_omega/(1.602*10**-19) # converts to eV
phonon_ev = 36.6*10**(-3) # phonon energy in Vv
emass = 9.109*10**(-31) # bare electron mass in kg
m_cond = 0.0665 # Effective mass of conduction band
m_eff = emass*m_cond
phonon_n = 1/(np.exp(phonon_ev/self.temp_ev)-1)
if thz_ev<phonon_ev:
# print('No phonon for order',n)
return 0
else:
W0 = 7.7*10**12 # characteristic rate
rate_frac = phonon_n*np.sqrt((thz_ev+phonon_ev)/thz_ev)+(
phonon_n+1)*np.sqrt((thz_ev-phonon_ev)/thz_ev)+(
phonon_ev/thz_ev)*(-phonon_n*np.arcsinh(np.sqrt(
phonon_ev/thz_ev))+(phonon_n+1)*np.arcsinh(np.sqrt(
(thz_ev-phonon_ev)/thz_ev)))
# Got this from Yu and Cordana's book
fullW = W0*rate_frac
return fullW
def integrand(self,x,mu,n):
'''
Calculate the integrand to integrate A_n+- in two_band_model pdf eqn 13.
Given in the new doc pdf from Qile as I_d^(2n)
Parameters:
:x: Argument of integrand equal to omega*t. This is the variable integrated
over.
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:w: Frequency of THz in radians.
:F: Thz field in V/m
:mu: reduced mass give in kg
:n: Order of the sideband
Returns:
:result: The value of the integrand for a given x value
'''
hbar = self.hbar
F = self.F
w = self.Thz_w
dephase = self.dephase
detune = self.detune
pn_dephase = self.phonon_dephase(n)
exp_arg = (-dephase*x/(hbar*w)-pn_dephase*x/w + 1j*x*self.Up(mu)/(hbar*w)*(self.gamma_value(x)**2-1)+1j*n*x/2-1j*detune*x/(hbar*w))
# Argument of the exponential part of the integrand
bessel_arg = x*self.Up(mu)*self.alpha_value(x)*self.gamma_value(x)/(hbar*w)
# Argument of the bessel function
bessel = spl.jv(n/2,bessel_arg)
# calculates the J_n(bessel_arg) bessel function
result = np.exp(exp_arg)*bessel/x
# This is the integrand for a given x
return result
def Qintegrand(self,x,mu,n):
'''
Calculate the integrand in the expression for Q, with the simplification
that the canonical momentum is zero upon exciton pair creation.
Parameters:
:x: integration variable of dimensionless units. Equal to omega*tau
:dephase: dephasing rate of the electron hole pair as it is accelerated by
the THz field
:w: Frequency of THz is radiams
:F: THz field in V/m
:mu: the effective reduced mass of the electron-hole pair
:n: Order of the sideband
'''
hbar = self.hbar
F = self.F
w = self.Thz_w
dephase = self.dephase
pn_detune = self.phonon_dephase(n)
c0 = 2*(x-np.sin(x))
a = 3*np.sin(2*x)-4*np.sin(w*x)-2*w*x*np.cos(2*x)
b = -3*np.cos(2*w*x)-4*np.cos(x)+2*w*x*np.sin(2*x)+1
c1 = np.sign(a)*np.sqrt(a**2+b**2)
phi = np.arctan2(a,b)
exp_arg = -dephase*x/w-1j*pn_detune*x/w + 1j*(self.Up(mu)*x)/(hbar*w**2)*c0 -1j*n*phi
bessel_arg = self.Up(mu)/(hbar*w)*c1
bessel = spl.jv(n,bessel_arg)
result = np.exp(exp_arg)*bessel*(-1)**(n/2)
return result
def scale_J_n_T(self,Jraw,Jxx,observedSidebands,crystalAngle,saveFileName,
index, save_results=True, scale_to_i=True):
'''
This function takes the raw J from fan_n_Tmat or findJ and scales it with
Jxx found from scaling sideband strengths with the laser line/PMT
In regular processing we actually find all the matrices normalized to Jxx
Now can scale to a given sideband order.
This is to allow comparision between the measured sideband powers,
normalized by the PMT, to the evalueated Path Integral from the two band
model. By normalizing the measured values and integrals to a given
sideband index, we can remove the physical constants from the evaluation.
:param Jraw: set of matrices from findJ
:param Jxx: sb_results from PMT and CCD data
:param observedSidebands: np array of observed sidebands. Data will be
cropped such that these sidebands are included in everything.
:param crystalAngle: (Float) Angle of the sample from the 010 crystal face
:saveFileName: Str of what you want to call the text files to be saved
:save_results: Boolean controls if things are saved to txt files.
Currently saves scaled J and T
:param index: the sideband index to which we want to normalize.
:param saveFileName: Str of what you want to call the text files to be saved.
:param scale_to_i: Boolean that controls to normalize to the ith sideband
True -> Scale to ith | False -> scale to laser line
returns: scaledJ, scaledT matrices scaled by Jxx strengths
'''
# Initialize the array for scaling
Jxx_scales = np.array([ ])
self.n_ref = index
if scale_to_i:
for idx in np.arange(len(Jxx[:,0])):
if Jxx[idx,0] == index:
scale_to = Jxx[idx,3]
print('scale to:',scale_to)
# sets the scale_to to be Jxx for the ith sideband
else:
scale_to = 1 # just makes this 1 if you don't want to scale to i
scaledJ = Jraw # initialize the scaled J matrix
for idx in np.arange(len(Jxx[:,0])):
if Jxx[idx,0] in observedSidebands:
Jxx_scales = np.append(Jxx_scales,Jxx[idx,3]/scale_to)
print('Scaling sb order',Jxx[idx,0])
# Creates scaling factor
for idx in np.arange(len(Jxx_scales)):
scaledJ[:,:,idx] = Jraw[:,:,idx]*Jxx_scales[idx]
# For each sideband scales Jraw by Jxx_scales
scaledT = makeT(scaledJ,crystalAngle)
# Makes scaledT from our new scaledJ
if save_results:
saveT(scaledJ, observedSidebands, "{}_scaledJMatrix.txt".format(saveFileName))
saveT(scaledT, observedSidebands, "{}_scaledTMatrix.txt".format(saveFileName))
# Saves the matrices
return scaledJ, scaledT
def Q_normalized_integrals(self,gamma1,gamma2,n,phi,beta):
'''
Returns Q_n^{HH}/Q_n^{LH} == Integrand_n^{HH}/Integrand_n^{LH}
Unlike the normallized integrals used in early 2020 analysis, these integrals are of a
given Fourier component's intensity from either the HH or LH band, and thus there is no
prefactor related to the energy of the given sideband photon
Parameters:
:dephase: dephasing rate passed to intiallized TMAtrix object
:w: the frequency of the THz field, in GHz
:F: THz field strength in V/m
:gamma1: Gamma1 parameter from Luttinger Hamiltonian
:gamma2: Gamma2 parameter from Luttinger Hamiltonian
:n: Order of the sideband for this integral
:phi: [100] to THz orientation, passed from the cost function funciton (in radians)
:beta: experimentally measured g3/g2 ratio
Returns: QRatio, the ratio of Q_n^{HH}/Q_n^{LH}
'''
mu_p,mu_m = self.mu_generator(gamma1,gamma2,phi,beta)
w = self.Thz_w
hbar = self.hbar
detune = self.detune
U_pp = self.Up(mu_p)
U_pm = self.Up(mu_m)
int_cutoff_HH = ((n*hbar*w-detune)/(8*U_pp))**(1/4)
int_cutoff_LH = ((n*hbar*w-detune)/(8*U_pm))**(1/4)
# Because the integral is complex, the real and imaginary parts have to be
# counted seperatly.
re_Q_HH = intgt.quad(lambda x: self.Qintegrand(x,mu_p,n),
0,int_cutoff_HH)[0]
re_Q_LH = intgt.quad(lambda x: self.Qintegrand(x,mu_m,n),
0,int_cutoff_LH)[0]
im_Q_HH = intgt.quad(lambda x: self.Qintegrand(x,mu_p,n),
0,int_cutoff_HH)[1]
im_Q_LH = intgt.quad(lambda x: self.Qintegrand(x,mu_m,n),
0,int_cutoff_LH)[1]
# Combine the real and imaginary to have the full integral
QRatioRe = re_Q_HH/re_Q_LH
QRatioIm = im_Q_HH/im_Q_LH
return QRatioRe, QRatioIm
def normalized_integrals(self,gamma1,gamma2,n,n_ref,phi,beta):
'''
Returns the plus and minus eta for a given sideband order, normalized
to order n_ref (should probably be 10?). This whole calculation relies
on calculating the ratio of these quantities to get rid of some troubling
constants. So you need a reference integral.
eta(n)+- =
(w_nir + 2*n*w_thz)^2/(w_nir + 2*n_ref*w_thz)^2 *
(mu_+-/mu_ref)^2 * (int(n)+-)^2/(int(n_ref)+)^2
This takes gamma1 and gamma2 and gives the effective mass via mu_generator.
It then calculates the normalized integrals for both mu's and gives eta,
which is the integrals squared with some prefactors.
Then you feed this into a cost function that varies gamma1 and gamma2.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency in GHz of fel. DO NOT give in angular form, the code
does that for you.
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
Returns: eta_p, eta_m the values of the eta parameter normalized to the
appropriate sideband order for plus and minus values of mu.
'''
mu_p,mu_m = self.mu_generator(gamma1,gamma2,phi,beta)
# gets the plus/minus effective mass
omega_thz = self.Thz_w # FEL frequency
omega_nir = 2.998*10**8/(self.nir_wl) *2*np.pi
# NIR frequency, takes nm (wavelength) and gives angular Hz
Field = self.F # THz field
hbar = self.hbar
dephase = self.dephase
int_cutoff = hbar*omega_thz/dephase*10
# This cuts off the integral when x* dephase/hbaromega = 10
# Therefore the values of the integrand will be reduced by a value
# of e^(-10) which is about 4.5*10^(-5)
re_int_ref = intgt.quad(lambda x: np.real(self.integrand(
x,mu_p,n_ref)),0,int_cutoff,limit = 1000000)[0]
re_int_p = intgt.quad(lambda x: np.real(self.integrand(
x,mu_p,n)),0,int_cutoff,limit = 1000000)[0]
re_int_m = intgt.quad(lambda x: np.real(self.integrand(
x,mu_m,n)),0,int_cutoff,limit = 1000000)[0]
# Ok so these integrands are complex valued, but the intgt.quad integration
# does not work with that. So we split the integral up into two parts,
# real and imaginary parts. These lines calculate the real part for the
# reference, plus, and minus integrals.
# The integrals currently are limited to 10,000 iterations. No clue if that's
# a good amount or what. We could potentially make this simpler by doing
# a trapezoidal rule.
# We define the lambda function here to set all the values of the integrand
# function we want except for the variable of integration x
im_int_ref = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_p,n_ref)),0,int_cutoff,limit = 1000000)[0]
im_int_p = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_p,n)),0,int_cutoff,limit = 1000000)[0]
im_int_m = intgt.quad(lambda x: np.imag(self.integrand(
x,mu_m,n)),0,int_cutoff,limit = 1000000)[0]
# Same as above but these are the imaginary parts of the integrals.
int_ref = re_int_ref + 1j*im_int_ref
int_p = re_int_p + 1j*im_int_p
int_m = re_int_m + 1j*im_int_m
# All the king's horses and all the king's men putting together our integrals
# again. :)
prefactor = ((omega_nir +2*n*omega_thz)**2)/((omega_nir +2*n_ref*omega_thz)**2)
# This prefactor is the ratio of energy of the nth sideband to the reference
m_pre = (mu_m/mu_p)**2
# There is a term of mu/mu_ref in the eta expression. For the
eta_p = prefactor*(np.abs(int_p)**2)/(np.abs(int_ref)**2)
eta_m = prefactor*m_pre*(np.abs(int_m)**2)/(np.abs(int_ref)**2)
# Putting everthing together in one tasty little result
return eta_p,eta_m
def cost_func(self,gamma1,gamma2,observedSidebands,n_ref,Jexp,phi,beta,gc_fname,eta_folder):
'''
This will sum up a cost function that takes the difference between
the theory generated eta's and experimental scaled matrices
eta+/eta+_ref = |Jxx|^2
eta-/eta+_ref = |Jyy-Jxx/4|^2/|3/4|^2
The cost function is given as
Sqrt(|eta+(theory)-eta+(experiment)|^2 + |eta-(theory)-eta-(experiment)|^2)
Where the J elements have been scaled to the n_ref sideband (Jxx_nref)
This is designed to run over and over again as you try different
gamma values. On my (Joe) lab computer a single run takes ~300-400 sec.
The function keeps track of values by writing a file with iteration,
gamma1, gamma2, and cost for each run. This lets you keep track of the
results as you run.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength in kV/cm
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n_ref: Order of the reference integral which everything will be divided by
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:phi: [100] to THz orientation, passed from the data array
:beta: experimentally measured g3/g2 ratio
:gc_fname: File name for the gammas and cost results
:eta_folder: Folder name for the eta lists to go in
:i: itteration, for parallel processing output purposes
Returns:
:costs: Cumulative cost function for that run
:i: itteration, for parallel processing output purposes
:eta_list: list of eta for's for each sideband order of the form
sb order | eta_plus theory | eta_plus experiment | eta_minus thoery | eta_minus experiment
.
.
.
'''
costs = 0 # initialize the costs for this run
t_start = time.time() # keeps track of the time the run started.
eta_list = np.array([0,0,0,0,0])
dephase = self.dephase
lambda_nir = self.nir_wl
omega_nir = 2.998*10**8/(self.nir_wl) *2*np.pi
w_thz = self.Thz_w
F = self.F
for idx in np.arrange(len(observedSidebands)):
n = observedSidebands[idx]
eta_p,eta_m = self.normalized_integrals(gamma1,gamma2,n,n_ref,phi,beta)
# calculates eta from the normalized_integrals function
prefactor = ((omega_nir +2*n*w_thz)**2)/((omega_nir +2*n_ref*w_thz)**2)
#Have to hard code the index of the 16th order sideband (8,10,12,14,16)
exp_p = prefactor*np.abs(Jexp[0,0,idx])**2
exp_m = prefactor*np.abs(Jexp[1,1,idx]-(1/4)*Jexp[0,0,idx])**2*(9/16)
# calculates the experimental plus and minus values
# 1/9/20 added prefactor to these bad boys
costs += np.sqrt(np.abs((exp_p-eta_p)/(exp_p))**2 + np.abs((exp_m-eta_m)/(exp_m))**2)
# Adds the cost function for this sideband to the overall cost function
# 1/8/20 Changed cost function to be the diiference of the ratio of the two etas
# 01/30/20 Changed cost function to be relative difference of eta_pm
this_etas = np.array([n,eta_p,exp_p,eta_m,exp_m])
eta_list = np.vstack((eta_list,this_etas))
self.iterations += 1
# Ups the iterations counter
g1rnd = round(gamma1,3)
g2rnd = round(gamma2,3)
costs_rnd = round(costs,5)
# Round gamma1,gamma2,costs to remove float rounding bullshit
g_n_c = str(self.iterations)+','+str(g1rnd)+','+str(g2rnd)+','+str(costs)+'\n'
# String version of iteration, gamma1, gamma2, cost with a new line
gc_file = open(gc_fname,'a') #opens the gamma/cost file in append mode
gc_file.write(g_n_c) # writes the new line to the file
gc_file.close() # closes the file
etas_header = "#\n"*95
etas_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
etas_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
etas_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
etas_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
etas_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
etas_header += 'sb order, eta_plus theory, eta_plus experiment, eta_minus thoery, eta_minus experiment \n'
etas_header += 'unitless, unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for the eta's
# eta_fname = 'eta_g1_' + str(g1rnd) + '_g2_' + str(g2rnd) + r'.txt'
eta_fname = f'eta_g1_{g1rnd}_g2_{g2rnd}.txt'
eta_path = os.path.join(eta_folder,eta_fname)
#creates the file for this run of etas
eta_list = eta_list[1:,:]
np.savetxt(eta_path,eta_list, delimiter = ',',
header = etas_header, comments = '') #save the etas for these gammas
t_taken = round(time.time()-t_start,5) # calcuates time taken for this run
print(" ")
print("---------------------------------------------------------------------")
print(" ")
print(f'Iteration number {self.iterations} / {self.max_iter} done')
print('for gamma1, gamma2 = ',g1rnd,g2rnd)
print('Cost function is = ',costs_rnd)
print('This calculation took ',t_taken,' seconds')
print(" ")
print("---------------------------------------------------------------------")
print(" ")
# These print statements help you keep track of what's going on as this
# goes on and on and on.
return costs
def Q_cost_func(self,gamma1,gamma2,Gamma_Sidebands,Texp,crystalAngles,
beta,gc_fname,Q_folder,ThetaSweep = True):
'''
This compairs the T Matrix components measured by experiment to the
'''
costs = 0 # Initialize the costs
imcost = 0
recost = 0
t_start = time.time()
Q_list = np.array([0,0,0,0,0])
if ThetaSweep:
for idx in np.arange(len(crystalAngles)):
n = Gamma_Sidebands
phi = float(crystalAngles[idx])
phi_rad = phi*np.pi/180
theta = phi_rad + np.pi/4
#Calculate the Theoretical Q Ratio
QRatioRe, QRatioIm = self.Q_normalized_integrals(gamma1,gamma2,n,phi_rad,beta)
QRatio = QRatioRe + 1j*QRatioIm
#Prefactor for experimental T Matirx algebra
PHI = 5/(3*(np.sin(2*theta) - 1j*beta*np.cos(2*theta)))
THETA = 1/(np.sin(2*theta)-1j*beta*np.cos(2*theta))
ExpQ = (Texp[idx,0,0]+PHI*Texp[idx,0,1])/(Texp[idx,0,0]-THETA*Texp[idx,0,1])
costs += np.abs((ExpQ - QRatio)/QRatio)
imcost += np.abs((np.imag(ExpQ)-QRatioIm)/QRatioIm)
recost += np.abs((np.real(ExpQ)-QRatioRe)/QRatioRe)
this_Qs = np.array([phi,np.real(ExpQ),np.imag(ExpQ),QRatioRe,QRatioIm])
Q_list = np.vstack((Q_list,this_Qs))
else:
for idx in np.arange(len(Gamma_Sidebands)):
n = Gamma_Sidebands[idx]
phi = float(crystalAngles)
phi_rad = phi*np.pi/180
theta = phi_rad + np.pi/4
#Calculate the Theoretical Q Ratio
QRatioRe, QRatioIm = self.Q_normalized_integrals(gamma1,gamma2,n,phi_rad,beta)
QRatio = QRatioRe + 1j*QRatioIm
#Prefactor for experimental T Matirx algebra
PHI = 5/(3*(np.sin(2*theta) - 1j*beta*np.cos(2*theta)))
THETA = 1/(np.sin(2*theta)-1j*beta*np.cos(2*theta))
ExpQ = (Texp[0,0,idx]+PHI*Texp[0,1,idx])/(Texp[0,0,idx]-THETA*Texp[0,1,idx])
costs += np.abs((ExpQ - QRatio)/QRatio)
imcost += np.abs((np.imag(ExpQ)-QRatioIm)/QRatioIm)
recost += np.abs((np.real(ExpQ)-QRatioRe)/QRatioRe)
this_Qs = np.array([n,np.real(ExpQ),np.imag(ExpQ),QRatioRe,QRatioIm])
Q_list = np.vstack((Q_list,this_Qs))
self.iterations += 1
g1rnd = round(gamma1,3)
g2rnd = round(gamma2,3)
costs_rnd = round(costs,5)
imcost_rnd = round(imcost,5)
recost_rnd = round(recost,5)
g_n_c = str(self.iterations) + ',' + str(g1rnd) + ',' + str(g2rnd) + ',' + str(costs) + ',' + str(imcost) + ',' + str(recost) + '\n'
gc_file = open(gc_fname,'a')
gc_file.write(g_n_c)
gc_file.close()
# Origin Header
Q_header = "#\n"*94
Q_header += f'# Crystal Angle: {phi} Deg \n'
Q_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
Q_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
Q_header += f'# Feild Strength: {self.F/(10**5)} kV/cm \n'
Q_header += f'# THz Frequncy {self.Thz_w/(10**9 *2*np.pi)} GHz \n'
Q_header += f'# NIR Wavelength {self.nir_wl/(10**(-9))} nm \n'
Q_header += 'Crystal Angles, QRatio Experiment Real, Imaginary, QRatio Theory Real, Imaginary\n'
Q_header += 'Degrees, unitless, unitless \n'
#Eta File Name
Q_fname = f'Q_g1_{g1rnd}_g2_{g2rnd}.txt'
Q_path = os.path.join(Q_folder,Q_fname)
Q_list = Q_list[1:,:]
np.savetxt(Q_path,Q_list, delimiter = ',',
header = Q_header, comments = '')
t_taken = round(time.time() - t_start,5)
print(" ")
print("---------------------------------------------------------------------")
print(" ")
print(f'Iteration number {self.iterations} / {self.max_iter} done')
print('for gamma1, gamma2 = ',g1rnd,g2rnd)
print('Cost function is = ',costs_rnd)
print('Imaginary Cost function is =',imcost_rnd)
print('Real Cost function is =',recost_rnd)
print('This calculation took ',t_taken,' seconds')
print(" ")
print("---------------------------------------------------------------------")
print(" ")
return costs,imcost,recost
def gamma_sweep(self,gamma1_array,gamma2_array,observedSidebands,n_ref,
Jexp,crystalAngle,gc_fname,eta_folder,save_results = True):
'''
This function calculates the integrals and cost function for an array of
gamma1 and gamma2. You can pass any array of gamma1 and gamma2 values and
this will return the costs for all those values. Let's you avoid the
weirdness of fitting algorithims.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:observedSidebands: List or array of observed sidebands. The code will
loop over sidebands in this array.
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:gc_fname: File name for the gammas and cost functions, include .txt
:eta_folder: Folder name for the eta lists to go in
Returns: gamma_cost_array of form
gamma1 | gamma2 | cost |
. . .
. . .
. . .
This is just running cost_func over and over again essentially.
'''
dephase = self.dephase
lambda_nir = self.nir_wl
w_thz = self.Thz_w
F = self.F
phi = crystalAngle
self.max_iter = len(gamma1_array)*len(gamma2_array)
self.iterations = 0
gamma_cost_array = np.array([0,0,0])
# Initialize the gamma cost array
gammas_costs = np.array([])
# This is just for initializing the gamma costs file
gammacosts_header = "#\n"*95
gammacosts_header += f'# Dephasing: {self.dephase/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
gammacosts_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
gammacosts_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
gammacosts_header += 'Iteration, Gamma1, Gamma2, Cost Function \n'
gammacosts_header += 'unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for gamma costs
np.savetxt(gc_fname, gammas_costs, delimiter = ',',
header = gammacosts_header, comments = '')
# create the gamma cost file
data = [gamma1_array,gamma2_array]
for gamma1 in gamma1_array:
for gamma2 in gamma2_array:
cost = self.cost_func(gamma1,gamma2,observedSidebands,
n_ref,Jexp, phi, 1.42, gc_fname,eta_folder)
this_costngamma = np.array([gamma1,gamma2,cost])
gamma_cost_array = np.vstack((gamma_cost_array,this_costngamma))
# calculates the cost for each gamma1/2 and adds the gamma1, gamma2,
# and cost to the overall array.
# gamma_cost_array = gamma_cost_final[1:,:]
# if save_results:
# sweepcosts_header = "#\n"*100
# sweepcosts_header += 'Gamma1, Gamma2, Cost Function \n'
# sweepcosts_header += 'unitless, unitless, unitless \n'
#
# sweep_name = 'sweep_costs_' + gc_fname
# np.savetxt(sweep_name,gamma_cost_array,delimiter = ',',
# header = sweepcosts_header, comments = '')
# Ok so right now I think I am going to get rid of saving this file
# since it has the same information as the file that is saved in
# cost_func but that file is updated every interation where this
# one only works at the end. So if the program gets interrupted
# the other one will still give you some information.
return gamma_cost_array
def gamma_th_sweep(self,gamma1_array,gamma2_array,n,crystalAngles,
Texp,gc_fname,Q_folder,ThetaSweep = True, save_results = True):
'''
This function calculates the integrals and cost function for an array of
gamma1 and gamma2. You can pass any array of gamma1 and gamma2 values and
this will return the costs for all those values. Let's you avoid the
weirdness of fitting algorithims.
Parameters:
:dephase: dephasing rate. Should be a few meV, ~the width of the exciton
absorption peak (according to Qile). Should be float
:lambda_nir: wavelength of NIR in nm
:w_thz: frequency of fel
:F: THz field strength
:gamma1: Gamma1 parameter in the luttinger hamiltonian.
Textbook value of 6.85
:gamma2: Gamma2 parameter in the luttinger hamiltonian.
Textbook value of 2.1
:n: Order of sideband for this integral
:n_ref: Order of the reference integral which everything will be divided by
:observedSidebands: List or array of observed sidebands. The code will
loop over sidebands in this array.
:Jexp: Scaled experimental Jones matrices in xy basis that will be compared
to the theoretical values. Pass in the not flattened way.
:gc_fname: File name for the gammas and cost functions, include .txt
:eta_folder: Folder name for the eta lists to go in
Returns: gamma_cost_array of form
gamma1 | gamma2 | cost |
. . .
. . .
. . .
This is just running cost_func over and over again essentially.
'''
#Hard Coding the experimental g3/g2 factor
beta = 1.42
self.iterations = 0
self.max_iter = len(gamma1_array)*len(gamma2_array)
gamma_cost_array = np.array([0,0,0,0,0])
# Initialize the gamma cost array
gammas_costs = np.array([])
# This is just for initializing the gamma costs file
gammacosts_header = "#\n"*95
gammacosts_header += f'# Detuning: {self.detune/(1.602*10**(-22))} eV \n'
gammacosts_header += f'# Field Strength: {self.F/(10**5)} kV/cm \n'
gammacosts_header += f'# THz Frequency: {self.Thz_w/(10**9 * 2*np.pi)} GHz \n'
gammacosts_header += f'# NIR Wavelength: {self.nir_wl/(10**(-9))} nm \n'
gammacosts_header += 'Iteration, Gamma1, Gamma2, Cost Function, Imaginary, Real \n'
gammacosts_header += 'unitless, unitless, unitless, unitless, unitless \n'
# Creates origin frienldy header for gamma costs
np.savetxt(gc_fname, gammas_costs, delimiter = ',',
header = gammacosts_header, comments = '')
# create the gamma cost file
for gamma1 in gamma1_array:
for gamma2 in gamma2_array:
cost,imcost,recost = self.Q_cost_func(gamma1,gamma2,n,
Texp,crystalAngles,beta,gc_fname,Q_folder,ThetaSweep)
this_costngamma = np.array([gamma1,gamma2,cost,imcost,recost])
gamma_cost_array = np.vstack((gamma_cost_array,this_costngamma))
# calculates the cost for each gamma1/2 and adds the gamma1, gamma2,
# and cost to the overall array.
return gamma_cost_array
####################
# Fitting functions
####################
def gauss(x, *p):
"""
Gaussian fit function.
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, y offset] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0 = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0
def lingauss(x, *p):
"""
Gaussian fit function with a linear offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, sigma, y0, m = p
return (A / sigma) * np.exp(-(x - mu) ** 2 / (2. * sigma ** 2)) + y0 + m * x
def lorentzian(x, *p):
"""
Lorentzian fit with constant offset
:param x: The independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant offset of background, slope of background] to be unpacked
:type p: list of floats or ints
:return: Depends on x, returns another np.array or float or int
:rtype: type(x)
"""
mu, A, gamma, y0 = p
return (A / np.pi) * (gamma / ((x - mu) ** 2 + gamma ** 2)) + y0
def background(x, *p):
"""
Arbitrary pink-noise model background data for absorbance FFT
for the intention of replacing a peak in the FFT
with the background
:param x: The independent variable
:type x: np.array, or int or float
:param p: [proportionality factor, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
a, b = p
return a * (1 / x) ** b
def gaussWithBackground(x, *p):
"""
Gaussian with pink-noise background function
:param x: independent variable
:type x: np.array, or int or float
:param p: [mean, area, width, constant background, proportionality of power law, exponent of power law]
:type p: list of floats or ints
:return: Depends on x
:rtype: type(x)
"""
pGauss = p[:4]
a, b = p[4:]
return gauss(x, *pGauss) + background(x, a, b)
####################
# Collection functions
####################
def hsg_combine_spectra(spectra_list, verbose = False, **kwargs):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: List of HighSidebandCCD objects
kwargs gets passed onto add_item
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
good_list = []
spectra_list = spectra_list.copy()
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each series' spec step
# This allows you to combine spectra whose spec steps
# change by values other than 1 (2, if you skip, or 0.5 if you
# decide to insert things, or arbitary strings)
spec_steps = {}
for elem in spectra_list:
# if verbose:
# print "Spec_step is", elem.parameters["spec_step"]
current_steps = spec_steps.get(elem.parameters["series"], [])
current_steps.append(elem.parameters["spec_step"])
spec_steps[elem.parameters["series"]] = current_steps
if verbose:
print("I found these spec steps for each series:")
print("\n\t".join("{}: {}".format(*ii) for ii in spec_steps.items()))
# sort the list of spec steps
for series in spec_steps:
spec_steps[series].sort()
same_freq = lambda x,y: x.parameters["fel_lambda"] == y.parameters["fel_lambda"]
for index in range(len(spectra_list)):
try:
temp = spectra_list.pop(0)
if verbose:
print("\nStarting with this guy", temp, "\n")
except:
break
good_list.append(FullHighSideband(temp))
counter = 1
temp_list = list(spectra_list)
for piece in temp_list:
if verbose:
print("\tchecking this spec_step", piece.parameters["spec_step"], end=' ')
print(", the counter is", counter)
if not same_freq(piece, temp):
if verbose:
print("\t\tnot the same fel frequencies ({} vs {})".format(piece.parameters["fel_lambda"], temp.parameters["fel_lambda"]))
continue
if temp.parameters["series"] == piece.parameters["series"]:
if piece.parameters["spec_step"] == spec_steps[temp.parameters["series"]][counter]:
if verbose:
print("I found this one", piece)
counter += 1
good_list[-1].add_CCD(piece, verbose=verbose, **kwargs)
spectra_list.remove(piece)
else:
print("\t\tNot the right spec step?", type(piece.parameters["spec_step"]))
else:
if verbose:
print("\t\tNot the same series ({} vs {}".format(
piece.parameters["series"],temp.parameters["series"]))
good_list[-1].make_results_array()
return good_list
def hsg_combine_spectra_arb_param(spectra_list, param_name="series", verbose = False):
"""
This function is all about smooshing different parts of the same hsg
spectrum together. It takes a list of HighSidebandCCD spectra and turns the
zeroth spec_step into a FullHighSideband object. It then uses the function
stitch_hsg_dicts over and over again for the smooshing.
This is different than hsg_combine_spectra in that you pass which
criteria distinguishes the files to be the "same". Since it can be any arbitrary
value, things won't be exactly the same (field strength will never be identical
between images). It will start with the first (lowest) spec step, then compare the
number of images in the next step. Whichever has
Input:
spectra_list = list of HighSidebandCCD objects that have sideband spectra
larger than the spectrometer can see.
Returns:
good_list = A list of FullHighSideband objects that have been combined as
much as can be.
:param spectra_list: randomly-ordered list of HSG spectra, some of which can be stitched together
:type spectra_list: list of HighSidebandCCD
:return: fully combined list of full hsg spectra. No PMT business yet.
:rtype: list of FullHighSideband
"""
if not spectra_list:
raise RuntimeError("Passed an empty spectra list!")
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name)
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
param_name = param_name[0]
elif isinstance(spectra_list[0].parameters[param_name], dict):
paramGetter = lambda x: x.parameters[param_name]["mean"]
else:
paramGetter = lambda x: x.parameters[param_name]
good_list = []
spectra_list.sort(key=lambda x: x.parameters["spec_step"])
# keep a dict for each spec step.
spec_steps = {}
for elem in spectra_list:
if verbose:
print("Spec_step is", elem.parameters["spec_step"])
current_steps = spec_steps.get(elem.parameters["spec_step"], [])
current_steps.append(elem)
spec_steps[elem.parameters["spec_step"]] = current_steps
# Next, loop over all of the elements. For each element, if it has not
# already been added to a spectra, look at all of the combinations from
# other spec steps to figure out which has the smallest overall deviation
# to make a new full spectrum
good_list = []
already_added = set()
for elem in spectra_list:
if elem in already_added: continue
already_added.add(elem)
good_list.append(FullHighSideband(elem))
other_spec_steps = [v for k, v in list(spec_steps.items()) if
k != good_list[-1].parameters["spec_step"]]
min_distance = np.inf
cur_value = paramGetter(good_list[-1])
best_match = None
for comb in itt.product(*other_spec_steps):
new_values = list(map(paramGetter, comb))
all_values = new_values + [cur_value]
if np.std(all_values) < min_distance:
min_distance = np.std(all_values)
best_match = list(comb)
if best_match is None:
raise RuntimeError("No matches found. Empty lists passed?")
best_values = list(map(paramGetter, best_match))
for spec in best_match:
print("Adding new spec step\n\tStarted with spec={},series={}".format(
good_list[-1].parameters["spec_step"],good_list[-1].parameters["series"]
))
print("\tAdding with spec={},series={}\n".format(
spec.parameters["spec_step"],
spec.parameters["series"]
))
print("\n\nfirst SBs:\n", good_list[-1].sb_results)
print("\n\nsecond SBs:\n", spec.sb_results)
good_list[-1].add_CCD(spec, True)
print("\n\nEnding SBs:\n", good_list[-1].sb_results)
already_added.add(spec)
best_match.append(good_list[-1])
best_values.append(cur_value)
new_value = np.mean(best_values)
new_std = np.std(best_values)
if isinstance(good_list[-1].parameters[param_name], dict):
best_values = np.array([x.parameters[param_name]["mean"] for x in best_match])
best_std = np.array([x.parameters[param_name]["std"] for x in best_match])
new_value = np.average(best_values, weights = best_std)
new_std = np.sqrt(np.average((best_values-new_value)**2, weights=best_std))
good_list[-1].parameters[param_name] = {
"mean": new_value,
"std": new_std
}
return good_list
def pmt_sorter(folder_path, plot_individual = True):
"""
This function will be fed a folder with a bunch of PMT data files in it.
The folder should contain a bunch of spectra with at least one sideband in
them, each differing by the series entry in the parameters dictionary.
This function will return a list of HighSidebandPMT objects.
:param folder_path: Path to a folder containing a bunch of PMT data, can be
part of a parameter sweep
:type folder_path: str
:param plot_individual: Whether to plot each sideband itself
:return: A list of all the possible hsg pmt spectra, organized by series tag
:rtype: list of HighSidebandPMT
"""
file_list = glob.glob(os.path.join(folder_path, '*[0-9].txt'))
pmt_list = []
plot_sb = lambda x: None
if plot_individual:
plt.figure("PMT data")
def plot_sb(spec):
spec = copy.deepcopy(spec)
spec.process_sidebands()
elem = spec.sb_dict[spec.initial_sb]
plt.errorbar(elem[:, 0], elem[:, 1], elem[:, 2],
marker='o',
label="{} {}, {}.{} ".format(
spec.parameters["series"], spec.initial_sb,
spec.parameters["pm_hv"],
't' if spec.parameters.get("photon counted", False) else 'f')
)
for sb_file in file_list:
temp = HighSidebandPMT(sb_file)
plot_sb(temp)
try:
for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object
if temp.parameters['series'] == pmt_spectrum.parameters['series']:
pmt_spectrum.add_sideband(temp)
break
else: # this will execute IF the break was NOT called
pmt_list.append(temp)
except:
pmt_list.append(temp)
# for sb_file in file_list:
# with open(sb_file,'rU') as f:
# param_str = ''
# line = f.readline()
# line = f.readline()
# while line[0] == '#':
# param_str += line[1:]
# line = f.readline()
#
# parameters = json.loads(param_str)
# try:
# for pmt_spectrum in pmt_list: # pmt_spectrum is a pmt object?
# if parameters['series'] == pmt_spectrum.parameters['series']:
# pmt_spectrum.add_sideband(sb_file)
# break
# else: # this will execute IF the break was NOT called
# pmt_list.append(HighSidebandPMT(sb_file))
# except:
# pmt_list.append(HighSidebandPMT(sb_file))
for pmt_spectrum in pmt_list:
pmt_spectrum.process_sidebands()
return pmt_list
def stitch_abs_results(main, new):
raise NotImplementedError
def hsg_combine_qwp_sweep(path, loadNorm = True, save = False, verbose=False,
skipOdds = True):
"""
Given a path to data taken from rotating the QWP (doing polarimetry),
process the data (fit peaks), and parse it into a matrix of sb strength vs
QWP angle vs sb number.
By default, saves the file into "Processed QWP Dependence"
Return should be passed directly into fitting
-1 | SB1 | SB1 | SB2 | SB2 | ... | ... | SBn | SBn |
angle1 | SB Strength | SB err | SB Strength | SB Err |
angle2 | ... | . |
.
.
.
:param path: Path to load
:param loadNorm: if true, load the normalized data
:param save: Save the processed file or not
:param verbose:
:param skipOdds: Passed on to save sweep; determine whether or not to save
odd orders. Generally, odds are artifacts and I don't want
them messing up the data, so default to True.
:return:
"""
def getData(fname):
"""
Helper function for loading the data and getting the header information for incident NIR stuff
:param fname:
:return:
"""
if isinstance(fname, str):
if loadNorm:
ending = "_norm.txt"
else:
ending = "_snip.txt"
header = ''
with open(os.path.join("Processed QWP Dependence", fname + ending)) as fh:
ln = fh.readline()
while ln[0] == '#':
header += ln[1:]
ln = fh.readline()
data = np.genfromtxt(os.path.join("Processed QWP Dependence", fname + ending),
delimiter=',', dtype=str)
if isinstance(fname, io.BytesIO):
header = b''
ln = fname.readline()
while ln.decode()[0] == '#':
header += ln[1:]
ln = fname.readline()
fname.seek(0)
data = np.genfromtxt(fname,
delimiter=',', dtype=str)
header = json.loads(header)
return data, float(header["lAlpha"]), float(header["lGamma"]), float(header["nir"]), float(header["thz"])
######### End getData
try:
sbData, lAlpha, lGamma, nir, thz = getData(path)
except:
# Do the processing on all the files
specs = proc_n_plotCCD(path, keep_empties=True, verbose=verbose)
for sp in specs:
try:
sp.parameters["series"] = round(float(sp.parameters["rotatorAngle"]), 2)
except KeyError:
# Old style of formatting
sp.parameters["series"] = round(float(sp.parameters["detectorHWP"]), 2)
specs = hsg_combine_spectra(specs, ignore_weaker_lowers=False)
if not save:
# If you don't want to save them, set everything up for doing Bytes objects
# to replacing saving files
full, snip, norm = io.BytesIO(), io.BytesIO(), io.BytesIO()
if "nir_pola" not in specs[0].parameters:
# in the olden days. Force them. Hopefully making them outside of ±360
# makes it obvious
specs[0].parameters["nir_pola"] = 361
specs[0].parameters["nir_polg"] = 361
keyName = "rotatorAngle"
if keyName not in specs[0].parameters:
# from back before I changed the name
keyName = "detectorHWP"
save_parameter_sweep(specs, [full, snip, norm], None,
keyName, "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
if loadNorm:
sbData, lAlpha, lGamma, nir, thz = getData(norm)
else:
sbData, lAlpha, lGamma, nir, thz = getData(snip)
else:
save_parameter_sweep(specs, os.path.basename(path), "Processed QWP Dependence",
"rotatorAngle", "deg", wanted_indices=[3, 4],
header_dict={
"lAlpha": specs[0].parameters["nir_pola"],
"lGamma": specs[0].parameters["nir_polg"],
"nir": specs[0].parameters["nir_lambda"],
"thz": specs[0].parameters["fel_lambda"], },
only_even=skipOdds)
sbData, lAlpha, lGamma, nir, thz = getData(os.path.basename(path))
laserParams = {
"lAlpha": lAlpha,
"lGamma": lGamma,
"nir": nir,
"thz": thz
}
# get which sidebands were found in this data set
# first two rows are origin header, second is sideband number
# (and empty strings, which is why the "if ii" below, to prevent
# ValueErrors on int('').
foundSidebands = np.array(sorted([float(ii) for ii in set(sbData[2]) if ii]))
# Remove first 3 rows, which are strings for origin header, and cast it to floats
sbData = sbData[3:].astype(float)
# double the sb numbers (to account for sb strength/error) and add a dummy
# number so the array is the same shape
foundSidebands = np.insert(foundSidebands, range(len(foundSidebands)), foundSidebands)
foundSidebands = np.insert(foundSidebands, 0, -1)
return laserParams, np.row_stack((foundSidebands, sbData))
def makeCurve(eta, isVertical):
"""
:param eta: QWP retardance at the wavelength
:return:
"""
cosd = lambda x: np.cos(x * np.pi / 180)
sind = lambda x: np.sin(x * np.pi / 180)
eta = eta * 2 * np.pi
if isVertical:
# vertical polarizer
def analyzerCurve(x, *S):
S0, S1, S2, S3 = S
return S0-S1/2*(1+np.cos(eta)) \
+ S3*np.sin(eta)*sind(2*x) \
+ S1/2*(np.cos(eta)-1)*cosd(4*x) \
+ S2/2*(np.cos(eta)-1)*sind(4*x)
else:
# vertical polarizer
def analyzerCurve(x, *S):
S0, S1, S2, S3 = S
return S0+S1/2*(1+np.cos(eta)) \
- S3*np.sin(eta)*sind(2*x) \
+ S1/2*(1-np.cos(eta))*cosd(4*x) \
+ S2/2*(1-np.cos(eta))*sind(4*x)
return analyzerCurve
def proc_n_fit_qwp_data(data, laserParams = dict(), wantedSBs = None, vertAnaDir = True, plot=False,
save = False, plotRaw = lambda sbidx, sbnum: False, series = '', eta=None, fourier = True,
**kwargs):
"""
Fit a set of sideband data vs QWP angle to get the stoke's parameters
:param data: data in the form of the return of hsg_combine_qwp_sweep
:param laserParams: dictionary of the parameters of the laser, the angles and frequencies. See function for
expected keys. I don't think the errors are used (except for plotting?), or the wavelengths (but
left in for potential future use (wavelength dependent stuff?))
:param wantedSBs: List of the wanted sidebands to fit out.
:param vertAnaDir: direction of the analzyer. True if vertical, false if horizontal.
:param plot: True/False to plot alpha/gamma/dop. Alternatively, a list of "a", "g", "d" to only plot selected ones
:param save: filename to save the files. Accepts BytesIO
:param plotRaw: callable that takes an index of the sb and sb number, returns true to plot the raw curve
:param series: a string to be put in the header for the origin files
:param eta: a function to call to calculate the desired retardance. Input will be the SB order.
:param fourier: Will use Fourier analysis over a fit funciton if True
if saveStokes is in kwargs and False, it will not save the stokes parameters, since I rarely actually use them.
:return:
"""
defaultLaserParams = {
"lAlpha": 90,
"ldAlpha": 0.2,
"lGamma": 0.0,
"ldGamma": 0.2,
"lDOP": 1,
"ldDOP": 0.02,
"nir": 765.7155,
"thz": 21.1
}
defaultLaserParams.update(laserParams)
lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP = defaultLaserParams["lAlpha"], \
defaultLaserParams["ldAlpha"], \
defaultLaserParams["lGamma"], \
defaultLaserParams["ldGamma"], \
defaultLaserParams["lDOP"], \
defaultLaserParams["ldDOP"]
allSbData = data
angles = allSbData[1:, 0]
# angles += -5
# print("="*20)
# print("\n"*3)
# print(" WARNING")
# print("\n"*3)
# print("ANGLES HAVE BEEN MANUALLY OFFEST IN proc_n_fit_qwp_data")
# print("\n"*3)
# print("="*20)
allSbData = allSbData[:, 1:] # trim out the angles
if wantedSBs is None:
# set to get rid of duplicates, 1: to get rid of the -1 used for
# getting arrays the right shape
wantedSBs = set(allSbData[0, 1:])
if eta is None:
"""
It might be easier for the end user to do this by passing eta(wavelength) instead of eta(sborder),
but then this function would need to carry around wavelengths, which is extra work. It could convert
between NIR/THz wavelengths to SB order, but it's currently unclear whether you'd rather use what the WS6
claims, or what the sidebands say, and you'd probably want to take the extra step to ensure the SB fit rseults
if using the spectromter wavelengths. In general, if you have a function as etal(wavelength), you'd probably
want to pass this as
eta = lambda x: etal(1239.84/(nirEv + x*THzEv))
assuming nirEv/THzEv are the photon energies of the NIR/THz.
"""
eta = lambda x: 0.25
# allow pasing a flag it ignore odds. I think I generally do, so set it to
# default to True
skipOdds = kwargs.get("skip_odds", True)
# Make an array to keep all of the sideband information.
# Start it off by keeping the NIR information (makes for easier plotting into origin)
sbFits = [[0] + [-1] * 8 + [lAlpha, ldAlpha, lGamma, ldGamma, lDOP, ldDOP]]
# Also, for convenience, keep a dictionary of the information.
# This is when I feel like someone should look at porting this over to pandas
sbFitsDict = {}
sbFitsDict["S0"] = [[0, -1, -1]]
sbFitsDict["S1"] = [[0, -1, -1]]
sbFitsDict["S2"] = [[0, -1, -1]]
sbFitsDict["S3"] = [[0, -1, -1]]
sbFitsDict["alpha"] = [[0, lAlpha, ldAlpha]]
sbFitsDict["gamma"] = [[0, lGamma, ldGamma]]
sbFitsDict["DOP"] = [[0, lDOP, ldDOP]]
# Iterate over all sb data. Skip by 2 because error bars are included
for sbIdx in range(0, allSbData.shape[1], 2):
sbNum = allSbData[0, sbIdx]
if sbNum not in wantedSBs: continue
if skipOdds and sbNum%2: continue
# if verbose:
# print("\tlooking at sideband", sbNum)
sbData = allSbData[1:, sbIdx]
sbDataErr = allSbData[1:, sbIdx + 1]
if fourier:
# We want to do Fourier Analysis
# I've hard coded the maximum expected variance from QWP retardance to be
# 5 degrees (converted to radians bc of small angle approximation).
# Not sure how to deal with the fact that this method leaves no variance
# for the S3 paramter.
f0 = 0
f2 = 0
f4 = 0
df0 = 0
df2 = 0
df4 = 0
for k in range(0,16,1):
f0 = f0 + allSbData[k+1,sbIdx]
f2 = f2 + allSbData[k+1,sbIdx]*np.exp(-1j*np.pi*k/4)
f4 = f4 + allSbData[k+1,sbIdx]*np.exp(-1j*np.pi*k/2)
df0 = df0 + allSbData[k+1, sbIdx+1]
df2 = df2 + allSbData[k+1,sbIdx+1]*np.exp(-1j*np.pi*k/4)
df4 = df4 + allSbData[k+1,sbIdx+1]*np.exp(-1j*np.pi*k/2)
phi = 5*2*np.pi/180
# Generate the Stokes parameters from the Fourier Components
S0 = (f0 - 2*f4.real)/(np.pi)
S1 = 4*f4.real/(np.pi)
S2 = -4*f4.imag/(np.pi)
S3 = 2*f2.imag/(np.pi)
# For the Error Propagation, I say phi = 0 and dPhi = 2*phi (value set above)
d0 = np.sqrt(df0**2+2*(4*f4.real**2*phi**2+df4.real**2*(1+phi)**2*(1-1*phi)**2)/(1+phi)**4)/(2*np.pi)
d1 = np.sqrt((f4.real**2*phi**2+df4.real**2*phi**2)/(1+phi)**4)/(np.pi)
d2 = np.sqrt((f4.imag**2*phi**2+df4.imag**2*phi**2)/(1+phi)**4)/(np.pi)
d3 = 2*df2.imag/np.pi
# Calculate the alpha, gamma, DOP and errors from Stokes parameters
thisAlpha = np.arctan2(S2, S1) / 2 * 180. / np.pi
thisAlphaError = np.sqrt(d2 ** 2 * S1 ** 2 + d1 ** 2 * S2 ** 2) / (S1 ** 2 + S2 ** 2) * 180./np.pi
thisGamma = np.arctan2(S3, np.sqrt(S1 ** 2 + S2 ** 2)) / 2 * 180. / np.pi
thisGammaError = np.sqrt((d3 ** 2 * (S1 ** 2 + S2 ** 2) ** 2 + (d1 ** 2 * S1 ** 2 + d2 ** 2 * S2 ** 2) * S3 ** 2) / (
(S1 ** 2 + S2 ** 2) * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2)) *180. /np.pi
thisDOP = np.sqrt(S1 ** 2 + S2 ** 2 + S3 ** 2) / S0
thisDOPerror = np.sqrt(((d1 ** 2 * S0 ** 2 * S1 ** 2 + d0 ** 2 * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2 + S0 ** 2 * (
d2 ** 2 * S2 ** 2 + d3 ** 2 * S3 ** 2)) / (S0 ** 4 * (S1 ** 2 + S2 ** 2 + S3 ** 2))))
# Append The stokes parameters and errors to the dictionary output.
sbFitsDict["S0"].append([sbNum, S0, d0])
sbFitsDict["S1"].append([sbNum, S1, d1])
sbFitsDict["S2"].append([sbNum, S2, d2])
sbFitsDict["S3"].append([sbNum, S3, d3])
sbFitsDict["alpha"].append([sbNum, thisAlpha, thisAlphaError])
sbFitsDict["gamma"].append([sbNum, thisGamma, thisGammaError])
sbFitsDict["DOP"].append([sbNum, thisDOP, thisDOPerror])
toAppend = [sbNum, S0, d0, S1, d1, S2, d2, S3, d3, thisAlpha, thisAlphaError, thisGamma, thisGammaError, thisDOP, thisDOPerror]
sbFits.append(toAppend)
# Otherwise we will do the normal fit
else:
# try:
# p0 = sbFits[-1][1:8:2]
# except:
# p0 = [1, 1, 0, 0]
p0 = [1, 1, 0, 0]
etan = eta(sbNum)
try:
p, pcov = curve_fit(makeCurve(etan, vertAnaDir), angles, sbData, p0=p0)
except ValueError:
# This is getting tossed around, especially when looking at noisy data,
# especially with the laser line, and it's fitting erroneous values.
# Ideally, I should be cutting this out and not even returning them,
# but that's immedaitely causing
p = np.nan*np.array(p0)
pcov = np.eye(len(p))
if plot and plotRaw(sbIdx, sbNum):
# pg.figure("{}: sb {}".format(dataName, sbNum))
plt.figure("All Curves")
plt.errorbar(angles, sbData, sbDataErr, 'o-', name=f"{series}, {sbNum}")
# plt.plot(angles, sbData,'o-', label="Data")
fineAngles = np.linspace(angles.min(), angles.max(), 300)
# plt.plot(fineAngles,
# makeCurve(eta, "V" in dataName)(fineAngles, *p0), name="p0")
plt.plot(fineAngles,
makeCurve(etan, vertAnaDir)(fineAngles, *p))
# plt.show()
plt.ylim(0, 1)
plt.xlim(0, 360)
plt.ylabel("Normalized Intensity")
plt.xlabel("QWP Angle (θ)")
print(f"\t{series} {sbNum}, p={p}")
# get the errors
d = np.sqrt(np.diag(pcov))
thisData = [sbNum] + list(p) + list(d)
d0, d1, d2, d3 = d
S0, S1, S2, S3 = p
# reorder so errors are after values
thisData = [thisData[i] for i in [0, 1, 5, 2, 6, 3, 7, 4, 8]]
sbFitsDict["S0"].append([sbNum, S0, d0])
sbFitsDict["S1"].append([sbNum, S1, d1])
sbFitsDict["S2"].append([sbNum, S2, d2])
sbFitsDict["S3"].append([sbNum, S3, d3])
# append alpha value
thisData.append(np.arctan2(S2, S1) / 2 * 180. / np.pi)
# append alpha error
variance = (d2 ** 2 * S1 ** 2 + d1 ** 2 * S2 ** 2) / (S1 ** 2 + S2 ** 2) ** 2
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["alpha"].append([sbNum, thisData[-2], thisData[-1]])
# append gamma value
thisData.append(np.arctan2(S3, np.sqrt(S1 ** 2 + S2 ** 2)) / 2 * 180. / np.pi)
# append gamma error
variance = (d3 ** 2 * (S1 ** 2 + S2 ** 2) ** 2 + (d1 ** 2 * S1 ** 2 + d2 ** 2 * S2 ** 2) * S3 ** 2) / (
(S1 ** 2 + S2 ** 2) * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2)
thisData.append(np.sqrt(variance) * 180. / np.pi)
sbFitsDict["gamma"].append([sbNum, thisData[-2], thisData[-1]])
# append degree of polarization
thisData.append(np.sqrt(S1 ** 2 + S2 ** 2 + S3 ** 2) / S0)
variance = ((d1 ** 2 * S0 ** 2 * S1 ** 2 + d0 ** 2 * (S1 ** 2 + S2 ** 2 + S3 ** 2) ** 2 + S0 ** 2 * (
d2 ** 2 * S2 ** 2 + d3 ** 2 * S3 ** 2)) / (S0 ** 4 * (S1 ** 2 + S2 ** 2 + S3 ** 2)))
thisData.append(np.sqrt(variance))
sbFitsDict["DOP"].append([sbNum, thisData[-2], thisData[-1]])
sbFits.append(thisData)
sbFits = np.array(sbFits)
sbFitsDict = {k: np.array(v) for k, v in sbFitsDict.items()}
# This chunk used to insert the "alpha deviation", the difference between the angles and the
# nir. I don't think I use this anymore, so stop saving it
# origin_header = 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha deviation,alpha err,gamma,gamma err,DOP,DOP err\n'
# origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,deg,arb.u.,arb.u.\n'
# origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 15)
# sbFits = np.array(sbFits)
# sbFits = np.insert(sbFits, 10, sbFits[:, 9] - lAlpha, axis=1)
# sbFits = sbFits[sbFits[:, 0].argsort()]
origin_header = "#\n"*100 # to fit all other files for easy origin importing
origin_header += 'Sideband,S0,S0 err,S1,S1 err,S2,S2 err,S3,S3 err,alpha,alpha err,gamma,gamma err,DOP,DOP err\n'
origin_header += 'Order,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,arb.u,deg,deg,deg,deg,arb.u.,arb.u.\n'
origin_header += 'Sideband,{},{},{},{},{},{},{},{},{},{},{},{},{},{}'.format(*["{}".format(series)] * 14)
sbFits = sbFits[sbFits[:, 0].argsort()]
if isinstance(save, str):
sbFitsSave = sbFits
if not kwargs.get("saveStokes", True):
headerlines = origin_header.splitlines()
ln, units, coms = headerlines[-3:]
ln = ','.join([ln.split(',')[0]] + ln.split(',')[9:])
units = ','.join([units.split(',')[0]] + units.split(',')[9:])
coms = ','.join([coms.split(',')[0]] + coms.split(',')[9:])
headerlines[-3:] = ln, units, coms
# remove them from the save data
origin_header = '\n'.join(headerlines)
sbFitsSave = np.delete(sbFits, range(1, 9), axis=1)
if not os.path.exists(os.path.dirname(save)):
os.mkdir(os.path.dirname(save))
np.savetxt(save, np.array(sbFitsSave), delimiter=',', header=origin_header,
comments='', fmt='%.6e')
# print("a = {:.2f} ± {:.2f}".format(sbFits[1, 9], sbFits[1, 10]))
# print("g = {:.2f} ± {:.2f}".format(sbFits[1, 11], sbFits[1, 12]))
if plot:
plt.figure("alpha")
plt.errorbar(sbFitsDict["alpha"][:, 0],
sbFitsDict["alpha"][:, 1],
sbFitsDict["alpha"][:, 2],
'o-', name = series
)
plt.figure("gamma")
plt.errorbar(sbFitsDict["gamma"][:, 0],
sbFitsDict["gamma"][:, 1],
sbFitsDict["gamma"][:, 2],
'o-', name=series
)
return sbFits, sbFitsDict
####################
# Helper functions
####################
def fvb_crr(raw_array, offset=0, medianRatio=1, noiseCoeff=5, debugging=False):
"""
Remove cosmic rays from a sequency of identical exposures
:param raw_array: The array to be cleaned. Successive spectra should
be the columns (i.e. 1600 x n) of the raw_array
:param offset: baseline to add to raw_array.
Not used, but here if it's needed in the future
:param medianRatio: Multiplier to the median when deciding a cutoff
:param noiseCoeff: Multiplier to the noise on the median
May need changing for noisy data
:return:
"""
d = np.array(raw_array)
med = ndimage.filters.median_filter(d, size=(1, d.shape[1]), mode='wrap')
med = np.median(d, axis=1).reshape(d.shape[0], 1)
if debugging:
print("shape of median filter:", med.shape)
meanMedian = med.mean(axis=1)
# meanMedian = med.copy()
if debugging:
print("shape of meaned median filter:", meanMedian.shape)
# Construct a cutoff for each pixel. It was kind of guess and
# check
cutoff = meanMedian * medianRatio + noiseCoeff * np.std(meanMedian[-100:])
if debugging:
print("shape of cutoff criteria:", cutoff.shape)
import pyqtgraph as pg
winlist = []
app = pg.QtGui.QApplication([])
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Raw Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate(d.T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(np.sum(d, axis=1), pen=pg.mkPen('w', width=3))
win.show()
winlist.append(win)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("Median Image")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage(med.T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(med, axis=1) / d.shape[1])
win2.show()
winlist.append(win2)
win2 = pg.GraphicsLayoutWidget()
win2.setWindowTitle("d-m")
p1 = win2.addPlot()
img = pg.ImageItem()
img.setImage((d - med).T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win2.addItem(hist)
win2.nextRow()
p2 = win2.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.addLegend()
for i, v in enumerate((d - med).T):
p2.plot(v, pen=(i, d.shape[1]), name=str(i))
p2.plot(cutoff, pen=pg.mkPen('w', width=3))
win2.show()
winlist.append(win2)
# Find the bad pixel positions
# Note the [:, None] - needed to cast the correct shapes
badPixs = np.argwhere((d - med) > (cutoff.reshape(len(cutoff), 1)))
for pix in badPixs:
# get the other pixels in the row which aren't the cosmic
if debugging:
print("cleaning pixel", pix)
p = d[pix[0], [i for i in range(d.shape[1]) if not i == pix[1]]]
if debugging:
print("\tRemaining pixels in row are", p)
# Replace the cosmic by the average of the others
# Could get hairy if more than one cosmic per row.
# Maybe when doing many exposures?
d[pix[0], pix[1]] = np.mean(p)
if debugging:
win = pg.GraphicsLayoutWidget()
win.setWindowTitle("Clean Image")
p1 = win.addPlot()
img = pg.ImageItem()
img.setImage(d.copy().T)
p1.addItem(img)
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
p2.plot(np.sum(d, axis=1))
win.show()
winlist.append(win)
app.exec_()
return np.array(d)
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data set
to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be fairly
straightforward to recursivly handle a list>2. Shifts the second
data set to overlap the first
elements of dataList can be either np.arrays or Absorbance class,
where it will take the proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def stitch_hsg_dicts(full_obj, new_obj, need_ratio=False, verbose=False, ratios=[1,1],
override_ratio = False, ignore_weaker_lowers = True):
"""
This helper function takes a FullHighSideband and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
This function has been updated to take the CCD objects themselves to be more
intelligent about stitching. Consider two scans, (a) spec step 0 with 1 gain, spec
step 2 with 110 gain and (b) spec step 0 with 50 gain and spec step 1 with 110 gain.
The old version would always take spec step 0 to scale to, so while comparisons
between spec step 0 and 1 for either case is valid, comparison between (a) and (b)
were not, since they were scaled to different gain parameters. This new code will
check what the gain values are and scale to the 110 data set, if present. This seems
valid because we currently always have a 110 gain exposure for higher order
sidebands.
The exception is if the laser is present (sideband 0), as that is an absolute
measure to which all else should be related.
TODO: run some test cases to test this.
06/11/18
--------
That sometimes was breaking if there were only 3-4 sidebands to fit with poor
SNR. I've added the override_ratio to be passed to set a specific ratio to scale
by. From data on 06/03/18, the 50gain to 110gain is a ~3.6 ratio. I haven't done
a clean way of specifying which data set it should be scaled. Right now,
it leaves the laser line data, or the 110 gain data alone.
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
ratios: Will update with the values to the ratios needed to scale the data.
ratios[0] is the ratio for the "full_obj"
ratios[1] is the ratio for the "new_obj"
one of them will be one, one will be the appropriate scale, since one of
them is unscaled. This is strictly speaking an output
override_ratio: Pass a float to specify the ratio that should be used.
ignore_weaker_lowers: Sometimes, a SB is in the short pass filter so a lower
order is weaker than the next highest. If True, causes script to ignore all
sidebands which are weaker and lower order.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if isinstance(full_obj, dict) and isinstance(new_obj, dict):
return stitch_hsg_dicts_old(full_obj, new_obj, need_ratio, verbose)
if verbose:
print("=" * 15)
print()
print("Stitching HSG dicts")
print()
print("=" * 15)
# remove potentially offensive SBs, i.e. a 6th order SB being in the SPF for more
# data, but being meaningless to pull intensity information from.
# Note: this might not be the best if you get to higher order stitches where it's
# possible that the sidebands might not be monotonic (from noise?)
if ignore_weaker_lowers:
full_obj.full_dict, full_obj.sb_results = FullHighSideband.parse_sb_array(full_obj.sb_results)
new_obj.new_dict, new_obj.sb_results = FullHighSideband.parse_sb_array(new_obj.sb_results)
# was fucking around with references and causing updates to arrays when it shouldn't
# be
full = copy.deepcopy(full_obj.full_dict)
new_dict = copy.deepcopy(new_obj.full_dict)
# Force a rescaling if you've passed a specified parameter
# if isinstance(override_ratio, float):
# need_ratio = True
# Do some testing to see which dict should be scaled to the other
# I honestly forget why I prioritized the PMT first like this. But the third
# check looks to make a gain 110 prioritize non-110, unless the non-110 includes
# a laser line
scaleTo = ""
if need_ratio:
if isinstance(new_obj, HighSidebandPMT):
scaleTo = "new"
elif isinstance(full_obj, HighSidebandPMT):
scaleTo = "full"
elif new_obj.parameters["gain"] == 110 and full_obj.parameters["gain"] != 110 \
and 0 not in full:
scaleTo = "new"
else:
scaleTo = "full"
if verbose:
print("\tI'm adding these sidebands", new_obj.sb_results[:,0])
print("\t With these:", sorted(full.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in new_obj.sb_results[:,0]:
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("\t ( overlap:", overlap, ")")
print("\t ( missing:", missing, ")")
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
try:
new_starter = overlap[-1]
if verbose:
print("\n\tadding these ratios,", end=' ')
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]# and (x != min(overlap) and (x != max(overlap)))]
if scaleTo == "new":
if verbose:
print("scaling to new :")
for sb in overlap:
ratio_list.append(new_dict[sb][2]/full[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, new_dict[sb][2],
full[sb][2], ratio_list[-1]))
# new_ratio = 1 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
else:
if verbose:
print("scaling to full:")
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
if verbose:
print("\t\t{:2.0f}: {:.3e}/{:.3e} ~ {:.3e},".format(sb, full[sb][2],
new_dict[sb][2], ratio_list[-1]))
# new_ratio = np.mean(ratio_list) 06/11/18 Not sure what these were used for
ratio = np.mean(ratio_list)
# Maybe not the best way to do it, performance wise, since you still
# iterate through the list, even though you'll override it.
if isinstance(override_ratio, float):
ratio = override_ratio
if verbose:
print("overriding calculated ratio with user inputted")
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen), hardcode a ratio
# and error. I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
# print "Ratio list\n\t", ("{:.3g}, "*len(ratio_list))[:-2].format(*ratio_list)
# print "Overlap \n\t", [round(ii, 3) for ii in overlap]
print("\t Ratio: {:.3g} +- {:.3g} ({:.2f}%)\n".format(ratio, error, error/ratio*100))
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
if scaleTo == "full":
ratios[1] = ratio
for sb in overlap:
if verbose:
print("For SB {:02d}, original strength is {:.3g} +- {:.3g} ({:.3f}%)".format(int(sb), new_dict[sb][2], new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100
))
new_dict[sb][3] = ratio * new_dict[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
new_dict[sb][2] = ratio * new_dict[sb][2]
if verbose:
print("\t\t scaled\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(new_dict[sb][2],
new_dict[sb][3],
new_dict[sb][3]/new_dict[sb][2]*100))
print("\t\t full\t\t\t\t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
sb_error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sb_error
if verbose:
print("\t\t replaced with \t\t{:.3g} +- {:.3g} ({:.3f}%)".format(full[sb][2],
full[sb][3],
full[sb][3]/full[sb][2]*100))
print()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else:
ratios[0] = ratio
for sb in overlap:
full[sb][3] = ratio * full[sb][2] * np.sqrt((error / ratio) ** 2 + (full[sb][3] / full[sb][2]) ** 2)
full[sb][2] = ratio * full[sb][2]
sberror = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (
new_dict[sb][3] ** 2)) / (full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = sberror
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (
new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
else: # not needing a new ratio
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0)
] # and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
try:
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
except RuntimeWarning:
raise IOError()
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
for sb in [x for x in list(new_dict.keys()) if ((x > new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if scaleTo == "full":
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
if scaleTo == "new":
for sb in set(full.keys()) - set(sorted(new_dict.keys())[:]):
full[sb][2] *= ratio
# TODO: I think this is an invalid error
# propagation (since ratio has error associated with it
full[sb][3] *= ratio
if verbose:
print("I made this dictionary", sorted(full.keys()))
print('-'*19)
return full
return full, ratio #the fuck? Why was this here?
return full
def stitch_hsg_dicts_old(full, new_dict, need_ratio=False, verbose=False):
"""
This helper function takes a FullHighSideband.full_dict attribute and a sideband
object, either CCD or PMT and smushes the new sb_results into the full_dict.
The first input doesn't change, so f there's a PMT set of data involved, it
should be in the full variable to keep the laser normalization intact.
This function almost certainly does not work for stitching many negative orders
in it's current state
11/14/16
--------
The original function has been updated to take the full object (instead of
the dicts alone) to better handle calculating ratios when stitching. This is called
once things have been parsed in the original function (or legacy code where dicts
are passed instead of the object)
Inputs:
full = full_dict from FullHighSideband, or HighSidebandPMT. It's important
that it contains lower orders than the new_dict.
new_dict = another full_dict.
need_ratio = If gain or other parameters aren't equal and must resort to
calculating the ratio instead of the measurements being equivalent.
Changing integration time still means N photons made M counts,
but changing gain or using PMT or whatever does affect things.
Returns:
full = extended version of the input full. Overlapping sidebands are
averaged because that makes sense?
"""
if verbose:
print("I'm adding these sidebands in old stitcher", sorted(new_dict.keys()))
overlap = [] # The list that hold which orders are in both dictionaries
missing = [] # How to deal with sidebands that are missing from full but in new.
for new_sb in sorted(new_dict.keys()):
full_sbs = sorted(full.keys())
if new_sb in full_sbs:
overlap.append(new_sb)
elif new_sb not in full_sbs and new_sb < full_sbs[-1]:
# This probably doesn't work with bunches of negative orders
missing.append(new_sb)
if verbose:
print("overlap:", overlap)
print("missing:", missing)
# This if-else clause handles how to average together overlapping sidebands
# which are seen in both spectra,
if need_ratio:
# Calculate the appropriate ratio to multiply the new sidebands by.
# I'm not entirely sure what to do with the error of this guy.
ratio_list = []
#print '\n1979\nfull[2]', full[0][2]
try:
new_starter = overlap[-1]
if len(overlap) > 2:
overlap = [x for x in overlap if (x % 2 == 0)
]#and (x != min(overlap) and (x != max(overlap)))]
for sb in overlap:
ratio_list.append(full[sb][2] / new_dict[sb][2])
ratio = np.mean(ratio_list)
# print
# print '-'*15
# print "ratio for {}: {}".format()
error = np.std(ratio_list) / np.sqrt(len(ratio_list))
except IndexError:
# If there's no overlap (which you shouldn't let happen),
# hardcode a ratio and error.
# I looked at all the ratios for the overlaps from 6/15/16
# (540ghz para) to get the rough average. Hopefully they hold
# for all data.
if not overlap:
ratio = 0.1695
error = 0.02
# no overlap, so make sure it grabs
# all the sidebands
new_starter = min(new_dict.keys())
else:
raise
if verbose:
print("Ratio list","\n", [round(ii, 3) for ii in ratio_list])
print("Overlap ","\n", [round(ii, 3) for ii in overlap])
print("Ratio", ratio)
print("Error", error)
#print '\n2118\nfull[2]', full[0][2]
# Adding the new sidebands to the full set and moving errors around.
# I don't know exactly what to do about the other aspects of the sidebands
# besides the strength and its error.
for sb in overlap:
full[sb][2] = ratio * new_dict[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (new_dict[sb][3] / new_dict[sb][2]) ** 2)
#print '\n2125\nfull[2]', full[0][3]
# Now for linewidths
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error
#print '\n2132\nfull[2]', full[0][2]
else:
try:
new_starter = overlap[-1] # This grabs the sideband order where only the new dictionary has
# sideband information. It's not clear why it necessarily has to be
# at this line.
overlap = [x for x in overlap if (x % 2 == 0) and (x != min(overlap) and (x != max(overlap)))]
# This cuts out the lowest order sideband in the overlap for mysterious reasons
for sb in overlap: # This for loop average two data points weighted by their relative errors
if verbose:
print("The sideband", sb)
print("Old value", full[sb][4] * 1000)
print("Add value", new_dict[sb][4] * 1000)
error = np.sqrt(full[sb][3] ** (-2) + new_dict[sb][3] ** (-2)) ** (-1)
avg = (full[sb][2] / (full[sb][3] ** 2) + new_dict[sb][2] / (new_dict[sb][3] ** 2)) / (
full[sb][3] ** (-2) + new_dict[sb][3] ** (-2))
full[sb][2] = avg
full[sb][3] = error
lw_error = np.sqrt(full[sb][5] ** (-2) + new_dict[sb][5] ** (-2)) ** (-1)
lw_avg = (full[sb][4] / (full[sb][5] ** 2) + new_dict[sb][4] / (new_dict[sb][5] ** 2)) / (
full[sb][5] ** (-2) + new_dict[sb][5] ** (-2))
full[sb][4] = lw_avg
full[sb][5] = lw_error # This may not be the exactly right way to calculate the error
if verbose:
print("New value", lw_avg * 1000)
except:
new_starter = 0 # I think this makes things work when there's no overlap
if verbose:
print("appending new elements. new_starter={}".format(new_starter))
# This loop will add the sidebands which were only seen in the second step
for sb in [x for x in list(new_dict.keys()) if ((x >= new_starter) or (x in missing))]:
full[sb] = new_dict[sb]
if need_ratio:
full[sb][2] = ratio * full[sb][2]
full[sb][3] = full[sb][2] * np.sqrt((error / ratio) ** 2 + (ratio * full[sb][3] / full[sb][2]) ** 2)
#print '\n2164\nfull[2]', full[0][2]
if verbose:
print("I made this dictionary", sorted(full.keys()))
return full
def save_parameter_sweep_no_sb(spectrum_list, file_name, folder_str, param_name, unit,
verbose=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
for spec in spectrum_list:
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
included_spectra[spec.fname.split('/')[-1]] = spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
temp_dict = {} # This is different from full_dict in that the list has the
# sideband order as the zeroth element.
if verbose:
print("the sb_results:", spec.sb_results)
if spec.sb_results.ndim == 1: continue
for index in range(len(spec.sb_results[:, 0])):
if verbose:
print("my array slice:", spec.sb_results[index, :])
temp_dict[int(round(spec.sb_results[index, 0]))] = np.array(
spec.sb_results[index, 1:])
if verbose:
print(temp_dict)
for sb in sb_included:
blank = np.zeros(6)
# print "checking sideband order:", sb
# print "blank", blank
if sb not in temp_dict:
# print "\nNeed to add sideband order:", sb
temp_dict[sb] = blank
try: # Why is this try-except here?
spec_data = np.array([float(spec.parameters[param_name])])
except:
spec_data = np.array([float(spec.parameters[param_name][:2])])
for key in sorted(temp_dict.keys()):
# print "I am going to hstack this:", temp_dict[key]
spec_data = np.hstack((spec_data, temp_dict[key]))
try:
param_array = np.vstack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if verbose:
print("The shape of the param_array is:", param_array.shape)
# print "The param_array itself is:", param_array
'''
param_array_norm = np.array(param_array).T # python iterates over rows
for elem in [x for x in xrange(len(param_array_norm)) if (x-1)%7 == 3]:
temp_max = np.max(param_array_norm[elem])
param_array_norm[elem] = param_array_norm[elem] / temp_max
param_array_norm[elem + 1] = param_array_norm[elem + 1] / temp_max
'''
snipped_array = param_array[:, 0]
norm_array = param_array[:, 0]
if verbose:
print("Snipped_array is", snipped_array)
for ii in range(len(param_array.T)):
if (ii - 1) % 6 == 0:
if verbose:
print("param_array shape", param_array[:, ii])
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii]))
elif (ii - 1) % 6 == 2:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
temp_max = np.max(param_array[:, ii])
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
elif (ii - 1) % 6 == 3:
snipped_array = np.vstack((snipped_array, param_array[:, ii]))
norm_array = np.vstack((norm_array, param_array[:, ii] / temp_max))
snipped_array = snipped_array.T
norm_array = norm_array.T
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += "Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",Frequency,Sideband strength,error"
origin_import2 += ",eV,arb. u.,"
origin_import3 += ",{0},{0},".format(order)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(
os.path.join(folder_str, file_name)))
def save_parameter_sweep(spectrum_list, file_name, folder_str, param_name, unit,
wanted_indices = [1, 3, 4], skip_empties = False, verbose=False,
header_dict = {}, only_even=False):
"""
This function will take a fully processed list of spectrum objects and
slice Spectrum.sb_fits appropriately to get an output like:
"Parameter" | SB1 freq | err | SB1 amp | error | SB1 linewidth | error | SB2...| SBn...|
param1 | . |
param2 | . |
.
.
.
Currently I'm thinking fuck the offset y0
After constructing this large matrix, it will save it somewhere.
Thus function has been update to pass a list of indices to slice for the return
values
skip_empties: If False, will add a row of zeroes for the parameter even if no sidebands
are found. If True, will not add a line for that parameter
only_even: don't include odd orders in the saved sweep
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
"""
if isinstance(param_name, list):
# if you pass two things because the param you want
# is in a dict (e.g. field strength has mean/std)
# do it that way
param_name_list = list(param_name) # keep reference to old one
paramGetter = lambda x: x.parameters[param_name_list[0]][param_name_list[1]]
# Keep the name for labeling things later on
param_name = param_name[0]
else:
paramGetter = lambda x: x.parameters[param_name]
# Sort all of the spectra based on the desired key
spectrum_list.sort(key=paramGetter)
# keep track of which file name corresponds to which parameter which gets put in
included_spectra = dict()
# The big array which will be stacked up to keep all of the sideband details vs desired parameter
param_array = None
# list of which sidebands are seen throughout.
sb_included = []
# how many parameters (area, strength, linewidth, pos, etc.) are there?
# Here incase software changes and more things are kept in
# sb results. Needed to handle how to slice the arrays
try:
num_params = spectrum_list[0].sb_results.shape[1]
except IndexError:
# There's a file with only 1 sb and it happens to be first
# in the list.
num_params = spectrum_list[0].sb_results.shape[0]
except AttributeError:
# The first file has no sidebands, so just hardcode it, as stated below.
num_params=0
# Rarely, there's an issue where I'm doing some testing and there's a set
# where the first file has no sidebands in it, so the above thing returns 0
# It seems really silly to do a bunch of testing to try and correct for that, so
# I'm going to hardcode the number of parameters.
if num_params == 0:
num_params = 7
# loop through all of them once to figure out which sidebands are seen in all spectra
for spec in spectrum_list:
try:
# use sets to keep track of only unique sidebands
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
except AttributeError:
print("No full dict?", spec.fname)
print(spec.sb_list)
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
included_spectra[spec.fname.split('/')[-1]] = paramGetter(spec)
if only_even:
sb_included = [ii for ii in sb_included if not ii%2]
if verbose:
print("included names:", included_spectra)
print("sb_included:", sb_included)
for spec in spectrum_list:
# Flag to keep whethere there are no sidebands or not. Used to skip
# issues when trying to index on empty arrays
noSidebands = False
if verbose:
print("the sb_results:", spec.sb_results)
# if no sidebands were found, skip this one
try:
# TODO: (08/14/18) the .ndim==1 isn't the correct check, since it fails
# when looking at the laser line. Need to test this with a real
# empty data set, vs data set with 1 sb
#
#
# (08/28/18) I'm not sure what the "not spec" is trying to handle
# spec.sb_results is None occurs when _no_ sidebands were fit
# spec.sb_results.ndim == 1 happens when only one sideband is found
if not spec or spec.sb_results is None or spec.sb_results.ndim == 1:
if spec.sb_results is None:
# Flag no sidebands are afound
noSidebands = True
elif spec.sb_results[0] == 0:
# Cast it to 2d to allow slicing later on. Not sure hwy this is
# only done if the laser line is the one found.
spec.sb_results = np.atleast_2d(spec.sb_results)
elif skip_empties:
continue
else:
noSidebands = True
except (AttributeError, TypeError):
# continue
raise
# Make an sb_results of all zeroes where we'll fill
# in the sideband info we found
new_spec = np.zeros((len(sb_included), num_params))
if not noSidebands:
sb_results = spec.sb_results.copy()
saw_sbs = sb_results[:, 0]
found_sb = sorted(list(set(sb_included) & set(saw_sbs)))
found_idx = [sb_included.index(ii) for ii in found_sb]
try:
new_spec[:, 0] = sb_included
except:
print("new_spec", new_spec)
raise
try:
if only_even:
new_spec[found_idx, :] = sb_results[sb_results[:,0]%2==0]
else:
new_spec[found_idx, :] = sb_results
except ValueError:
print(spec.fname)
print("included:", sb_included)
print("found:", found_sb, found_idx)
print(new_spec.shape, sb_results.shape)
print(sb_results)
print(new_spec)
raise
spec_data = np.insert(new_spec.flatten(), 0, float(paramGetter(spec)))
try:
param_array = np.row_stack((param_array, spec_data))
except:
param_array = np.array(spec_data)
if param_array.ndim == 1: # if you only pass one spectra
param_array = param_array[None, :] # recast it to 2D for slicing
# the indices we want from the param array from the passed argument
snip = wanted_indices
N = len(sb_included)
# run it out across all of the points across the param_array
snipped_indices = [0] + list(
1+np.array(snip * N) + num_params * np.array(sorted(list(range(N)) * len(snip))))
snipped_array = param_array[:, snipped_indices]
norm_array = snipped_array.copy()
# normalize the area if it's requested
if 3 in snip:
num_snip = len(snip)
strength_idx = snip.index(3)
if 4 in snip:
#normalize error first if it was requested
idx = snip.index(4)
norm_array[:, 1 + idx + np.arange(N) * num_snip] /= norm_array[:,1 + strength_idx + np.arange(N) * num_snip].max(axis=0)
strength_idx = snip.index(3)
norm_array[:, 1+strength_idx+np.arange(N)*num_snip]/=norm_array[:, 1+strength_idx+np.arange(N)*num_snip].max(axis=0)
try:
os.mkdir(folder_str)
except TypeError:
pass # if you pass None as folder_str (for using byteIO)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
included_spectra.update(header_dict)
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4,
separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
# this will make the header chunk for the full, un-sliced data set
# TODO: fix naming so you aren't looping twice
### 1/9/18 This isn't needed, right? Why isn't it deleted?
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
for order in sb_included:
origin_import1 += ",sideband,Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",order,eV,eV,arb. u.,arb.u.,meV,meV"
origin_import3 += ",,{0},,{0},,{0},".format(order)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# This little chunk will make a chunk block of header strings for the sliced
# data set which can be looped over
origin_import1 = param_name
origin_import2 = unit
origin_import3 = ""
wanted_titles = ["Sideband", "Frequency", "error", "Sideband strength","error","Linewidth","error"]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for order in sb_included:
origin_import1 += ","+wanted_titles
origin_import2 += ","+wanted_units
origin_import3 += ","+wanted_comments.format(order)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
if isinstance(file_name, list):
if isinstance(file_name[0], io.BytesIO):
np.savetxt(file_name[0], param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(file_name[1], snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(file_name[2], norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
# Need to reset the file position if you want to read them immediately
# Is it better to do that here, or assume you'll do it later?
# I'm gonna assume here, because I can't currently think of a time when I'd want
# to be at the end of the file
[ii.seek(0) for ii in file_name]
if verbose:
print("Saved the file to bytes objects")
else:
if file_name:
norm_name = file_name + '_norm.txt'
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, norm_name), norm_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_str, file_name)))
else:
if verbose:
print("Didn't save")
return sb_included, param_array, snipped_array, norm_array
def save_parameter_sweep_vs_sideband(spectrum_list, file_name,
folder_str, param_name, unit, verbose=False,
wanted_indices = [1, 3, 4]):
"""
Similar to save_parameter_sweep, but the data[:,0] column is sideband number instead of
series, and each set of columns correspond to a series step. Pretty much compiles
all of the fit parameters from the files that are already saved and puts it into
one file to keep from polluting the Origin folder
:param spectrum_list:
:param file_name:
:param folder_str:
:param param_name:
:param unit:
:param verbose:
sb number is automatically prepended, so do not include in slicing list
[sb number, Freq (eV), Freq error (eV), Gauss area (arb.), Area error, Gauss linewidth (eV), Linewidth error (eV)]
[ 0 , 1 , 2, , 3 , 4 , 5 , 6 ]
:return:
"""
spectrum_list.sort(key=lambda x: x.parameters[param_name])
included_spectra = dict()
param_array = None
sb_included = []
# what parameters were included (for headers)
params = sorted([x.parameters[param_name] for x in spectrum_list])
for spec in spectrum_list:
sb_included = sorted(list(set(sb_included + list(spec.full_dict.keys()))))
included_spectra[spec.fname.split('/')[-1]] = spec.parameters[param_name]
# If these are from summed spectra, then only the the first file name
# from that sum will show up here, which should be fine?
if verbose:
# print "full name:", spectrum_list[0].fname
print("included names:", included_spectra)
print("sb_included:", sb_included)
param_array = np.array(sb_included)
for spec in spectrum_list:
temp_dict = spec.full_dict.copy()
#prevent breaking if no sidebands in spectrum
if not temp_dict:
if verbose:
print("No sidebands here? {}, {}".format(spec.parameters["series"],
spec.parameters["spec_step"]))
continue
if verbose:
print(temp_dict)
# matrix for holding all of the sb information
# for a given spectrum
spec_matrix = None
for sb in sb_included:
blank = np.zeros(6)
# print "checking sideband order:", sb
# print "blank", blank
sb_data = temp_dict.get(sb, blank)
try:
spec_matrix = np.row_stack((spec_matrix, sb_data))
except:
spec_matrix = sb_data
param_array = np.column_stack((param_array, spec_matrix))
# the indices we want from the param array
# 1- freq, 3-area, 4-area error
snip = wanted_indices
N = len(spectrum_list)
# run it out across all of the points across the param_array
snipped_indices = [0] + list( np.array(snip*N) + 6*np.array(sorted(list(range(N))*len(snip))) )
snipped_array = param_array[:, snipped_indices]
try:
os.mkdir(folder_str)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
snip_name = file_name + '_snip.txt'
file_name = file_name + '.txt'
try:
included_spectra_str = json.dumps(included_spectra, sort_keys=True, indent=4, separators=(',', ': '))
except:
print("Source: save_parameter_sweep\nJSON FAILED")
return
included_spectra_str = included_spectra_str.replace('\n', '\n#')
included_spectra_str += '\n#' * (99 - included_spectra_str.count('\n'))
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
for param in params:
origin_import1 += ",Frequency,error,Sideband strength,error,Linewidth,error"
origin_import2 += ",eV,,arb. u.,,meV,"
origin_import3 += ",{0},,{0},,{0},".format(param)
origin_total = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
# This little chunk will make a chunk block of header strings for the sliced
# data set which can be looped over
origin_import1 = "Sideband"
origin_import2 = "Order"
origin_import3 = "SB"
wanted_titles = ["Sideband", "Frequency", "error", "Sideband strength", "error",
"Linewidth", "error"]
wanted_units = ["order", "eV", "eV", "arb. u.", "arb. u.", "eV", "eV"]
wanted_comments = ["", "{0}", "", "{0}", "", "{0}", ""]
wanted_titles = ",".join([wanted_titles[ii] for ii in wanted_indices])
wanted_units = ",".join([wanted_units[ii] for ii in wanted_indices])
wanted_comments = ",".join([wanted_comments[ii] for ii in wanted_indices])
for param in params:
origin_import1 += "," + wanted_titles
origin_import2 += "," + wanted_units
origin_import3 += "," + wanted_comments.format(param)
origin_snip = origin_import1 + "\n" + origin_import2 + "\n" + origin_import3
header_total = '#' + included_spectra_str + '\n' + origin_total
header_snip = '#' + included_spectra_str + '\n' + origin_snip
# print "Spec header: ", spec_header
if verbose:
print("the param_array is:", param_array)
if file_name: # allow passing false (or empty string) to prevent saving
np.savetxt(os.path.join(folder_str, file_name), param_array, delimiter=',',
header=header_total, comments='', fmt='%0.6e')
np.savetxt(os.path.join(folder_str, snip_name), snipped_array, delimiter=',',
header=header_snip, comments='', fmt='%0.6e')
if verbose:
print("Saved the file.\nDirectory: {}".format(os.path.join(folder_str, file_name)))
return None
def stitchData(dataList, plot=False):
"""
Attempt to stitch together absorbance data. Will translate the second data set
to minimize leastsq between the two data sets.
:param dataList: Iterable of the data sets to be fit. Currently
it only takes the first two elements of the list, but should be fairly
straightforward to recursivly handle a list>2. Shifts the second
data set to overlap the first
elements of dataList can be either np.arrays or Absorbance class,
where it will take the proc_data itself
:param plot: bool whether or not you want the fit iterations to be plotted
(for debugging)
:return: a, a (2,) np.array of the shift
"""
# Data coercsion, make sure we know what we're working wtih
first = dataList[0]
if isinstance(first, Absorbance):
first = first.proc_data
second = dataList[1]
if isinstance(second, Absorbance):
second = second.proc_data
if plot:
# Keep a reference to whatever plot is open at call-time
# Useful if the calling script has plots before and after, as
# omitting this will cause future plots to be added to figures here
firstFig = plt.gcf()
plt.figure("Stitcher")
# Plot the raw input data
plt.plot(*first.T)
plt.plot(*second.T)
# Algorithm is set up such that the "second" data set spans the
# higher domain than first. Need to enforce this, and remember it
# so the correct shift is applied
flipped = False
if max(first[:, 0]) > max(second[:, 0]):
flipped = True
first, second = second, first
def fitter(p, shiftable, immutable):
# designed to over
# Get the shifts
dx = p[0]
dy = p[1]
# Don't want pass-by-reference nonsense, recast our own refs
shiftable = np.array(shiftable)
immutable = np.array(immutable)
# Shift the data set
shiftable[:, 1] += dy
shiftable[:, 0] += dx
# Create an interpolator. We want a
# direct comparision for subtracting the two functions
# Different spec grating positions have different wavelengths
# so they're not directly comparable.
shiftF = spi.interp1d(*shiftable.T)
# Find the bounds of where the two data sets overlap
overlap = (min(shiftable[:, 0]), max(immutable[:, 0]))
print("overlap", overlap)
# Determine the indices of the immutable function
# where it overlaps. argwhere returns 2-d thing,
# requiring the [0] at the end of each call
fOlIdx = (min(np.argwhere(immutable[:, 0] >= overlap[0]))[0],
max(np.argwhere(immutable[:, 0] <= overlap[1]))[0])
print("fOlIdx", fOlIdx)
# Get the interpolated values of the shiftable function at the same
# x-coordinates as the immutable case
newShift = shiftF(immutable[fOlIdx[0]:fOlIdx[1], 0])
if plot:
plt.plot(*immutable[fOlIdx[0]:fOlIdx[1], :].T, marker='o', label="imm", markersize=10)
plt.plot(immutable[fOlIdx[0]:fOlIdx[1], 0], newShift, marker='o', label="shift")
imm = immutable[fOlIdx[0]:fOlIdx[1], 1]
shift = newShift
return imm - shift
a, _, _, msg, err = spo.leastsq(fitter, [0.0001, 0.01 * max(first[:, 1])], args=(second, first), full_output=1)
# print "a", a
if plot:
# Revert back to the original figure, as per top comments
plt.figure(firstFig.number)
# Need to invert the shift if we flipped which
# model we're supposed to move
if flipped: a *= -1
return a
def integrateData(data, t1, t2, ave=False):
"""
Integrate a discrete data set for a
given time period. Sums the data between
the given bounds and divides by dt. Optional
argument to divide by T = t2-t1 for calculating
averages.
data = 2D array. data[:,0] = t, data[:,1] = y
t1 = start of integration
t2 = end of integration
if data is a NxM, with M>=3, it will take the
third column to be the errors of the points,
and return the error as the quadrature sum
"""
t = data[:, 0]
y = data[:, 1]
if data.shape[0] >= 3:
errors = data[:, 2]
else:
errors = np.ones_like(y) * np.nan
gt = set(np.where(t > t1)[0])
lt = set(np.where(t < t2)[0])
# find the intersection of the sets
vals = list(gt & lt)
# Calculate the average
tot = np.sum(y[vals])
error = np.sqrt(np.sum(errors[vals] ** 2))
# Multiply by sampling
tot *= (t[1] - t[0])
error *= (t[1] - t[0])
if ave:
# Normalize by total width if you want an average
tot /= (t2 - t1)
errors /= (t2 - t1)
if not np.isnan(error):
return tot, error
return tot
def fourier_prep(x_vals, y_vals, num=None):
"""
This function will take a Nx2 array with unevenly spaced x-values and make
them evenly spaced for use in fft-related things.
And remove nans!
"""
y_vals = handle_nans(y_vals)
spline = spi.interp1d(x_vals, y_vals,
kind='linear') # for some reason kind='quadratic' doesn't work? returns all nans
if num is None:
num = len(x_vals)
even_x = np.linspace(x_vals[0], x_vals[-1], num=num)
even_y = spline(even_x)
# even_y = handle_nans(even_y)
return even_x, even_y
def handle_nans(y_vals):
"""
This function removes nans and replaces them with linearly interpolated
values. It requires that the array maps from equally spaced x-values.
Taken from Stack Overflow: "Interpolate NaN values in a numpy array"
"""
nan_idx = np.isnan(y_vals)
my_lambda = lambda x: x.nonzero()[0] # Returns the indices where Trues reside
y_vals[nan_idx] = np.interp(my_lambda(nan_idx), my_lambda(~nan_idx), y_vals[~nan_idx])
return y_vals
def calc_laser_frequencies(spec, nir_units="eV", thz_units="eV",
bad_points=-2, inspect_plots=False):
"""
Calculate the NIR and FEL frequency for a spectrum
:param spec: HSGCCD object to fit
:type spec: HighSidebandCCD
:param nir_units: str of desired units.
Options: wavenumber, eV, meV, THz, GHz, nm
:param thz_units: str of desired units.
Options: wavenumber, eV, meV, THz, GHz, nm
:param bad_points: How many bad points which shouldn't be used
to calculate the frequencies (generally because the last
few points are noisy and unreliable)
:return: <NIR freq>, <THz freq>
"""
if not hasattr(spec, "sb_results"):
spec.guess_sidebands()
spec.fit_sidebands()
sidebands = spec.sb_results[:, 0]
locations = spec.sb_results[:, 1]
errors = spec.sb_results[:, 2]
try:
p = np.polyfit(sidebands[1:bad_points],
# This is 1 because the peak picker function was calling the 10th order the 9th
locations[1:bad_points], deg=1)
except TypeError:
# if there aren't enough sidebands to fit, give -1
p = [-1, -1]
NIRfreq = p[1]
THzfreq = p[0]
if inspect_plots:
plt.figure("Frequency Fit")
plt.errorbar(sidebands, locations, errors, marker='o')
plt.errorbar(sidebands[:bad_points], locations[:bad_points],
errors[:bad_points], marker='o')
plt.plot(sidebands, np.polyval(p, sidebands))
converter = {
"eV": lambda x: x,
"meV": lambda x: 1000. * x,
"wavenumber": lambda x: 8065.6 * x,
"THz": lambda x: 241.80060 * x,
"GHz": lambda x: 241.80060 * 1e3 * x,
"nm": lambda x: 1239.83 / x
}
freqNIR = converter.get(nir_units, converter["eV"])(NIRfreq)
freqTHz = converter.get(thz_units, converter["eV"])(THzfreq)
return freqNIR, freqTHz
def get_data_and_header(fname, returnOrigin = False):
"""
Given a file to a raw data file, returns the data
and the json decoded header.
Can choose to return the origin header as well
:param fname: Filename to open
:return: data, header (dict)
"""
with open(fname) as fh:
line = fh.readline()
header_string = ''
while line[0]=='#':
header_string += line[1:]
line = fh.readline()
# image files don't have an origin header
if not "Images" in fname:
oh = line
# last readline in loop removes first line in Origin Header
# strip the remaining two
oh += fh.readline()
oh += fh.readline()[:-1] #remove final \n
# data = np.genfromtxt(fh, delimiter=',')
data = np.genfromtxt(fname, delimiter=',')
header = json.loads(header_string)
if returnOrigin:
return data, header, oh
return data, header
def natural_glob(*args):
# glob/python sort alphabetically, so 1, 10, 11, .., 2, 21,
# but I sometimes wnat "natural" sorting: 1, 2, 3, ..., 10, 11, 12, ..., 20, 21, 21 ...
# There's tons of stack overflows, so I grabbed one of them. I put it in here
# because I use it all the damned time. I also almost always use it when
# glob.glob'ing, so just internally do it that way
#
# This is taken from
# https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside
import re
def atoi(text):
try:
return int(text)
except ValueError:
return text
# return int(text) if text.isdigit() else text
def natural_keys(text):
'''
alist.sort(key=natural_keys) sorts in human order
http://nedbatchelder.com/blog/200712/human_sorting.html
(See Toothy's implementation in the comments)
'''
return [atoi(c) for c in re.split('(-?\d+)', text)]
return sorted(glob.glob(os.path.join(*args)), key=natural_keys)
def convertTime(timeStr):
"""
The data file headers have the timestamp of data collection. Sometimes you want to
convert that to numbers for data's sake, but I constantly forget the functions
to convert it from the time-stamp string. So here you go
:param timeStr: the time as a string from the data file
:return: int of the time since the epoch
"""
import time
return time.mktime(time.strptime(timeStr, "%x %X%p"))
# photonConverter[A][B](x):
# convert x from A to B.
photon_converter = {
"nm": {"nm": lambda x: x, "eV": lambda x:1239.84/x, "wavenumber": lambda x: 10000000./x},
"eV": {"nm": lambda x: 1239.84/x, "eV": lambda x: x, "wavenumber":lambda x: 8065.56 * x},
"wavenumber": {"nm": lambda x: 10000000./x, "eV": lambda x: x/8065.56, "wavenumber": lambda x: x}
}
####################
# Smoothing functions
####################
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] <NAME>, <NAME>, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, <NAME>, <NAME>, <NAME>
Cambridge University Press ISBN-13: 9780521880688
source:
http://scipy.github.io/old-wiki/pages/Cookbook/SavitzkyGolay
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except ValueError as msg:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = list(range(order + 1))
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
def fft_filter(data, cutoffFrequency=1520, inspectPlots=False, tryFitting=False, freqSigma=50, ftol=1e-4,
isInteractive=False):
"""
Performs an FFT, then fits a peak in frequency around the
input with the input width.
If only data is given, it will cut off all frequencies above the default value.
inspectPlots = True will plot the FFT and the filtering at each step, as well as the results
tryFitting = True will try to fit the peak in frequency space centered at the cutoffFrequency
and with a width of freqSigma, using the background function above. Will replace
the peak with the background function. Feature not very well tested
isInteractive: Will pop up interactive windows to move the cutoff frequency and view the
FFT in real time. Requires pyqtgraph and PyQt4 installed (pyqt4 is standard with
anaconda/winpython, but pyqtgraph is not)
"""
# Make a copy so we can return the same thing
retData = np.array(data)
x = np.array(retData[:, 0])
y = np.array(retData[:, -1])
# Let's you place with zero padding.
zeroPadding = len(x)
N = len(x)
if isInteractive:
try:
import pyqtgraph as pg
from PyQt5 import QtCore, QtWidgets
except:
raise ImportError("Cannot do interactive plotting without pyqtgraph installed")
# Need to make some basic classes fir signals and slots to make things simple
class FFTWin(pg.PlotWindow):
sigCutoffChanged = QtCore.pyqtSignal(object)
sigClosed = QtCore.pyqtSignal()
def __init__(self, x, y):
super(FFTWin, self).__init__()
# Plot the log of the data,
# it breaks text boxes to do semilogy
self.plotItem.plot(x, np.log10(y), pen='k')
# The line for picking the cutoff
# Connect signals so the textbox updates and the
# realspace window can recalcualte the FFT
self.line = pg.InfiniteLine(cutoffFrequency, movable=True)
self.line.sigPositionChanged.connect(lambda x: self.sigCutoffChanged.emit(x.value()))
self.line.sigPositionChanged.connect(self.updateText)
self.addItem(self.line)
# Set up the textbox so user knows the frequency
# If this ends up being useful, may need
# a way to set the cutoff manually
self.text = pg.TextItem("{:.4f}".format(cutoffFrequency))
self.addItem(self.text)
self.text.setPos(min(x), max(np.log10(y)))
# Cheap magic to get the close event
# of the main window. Need to keep a reference
# to the old function so that we can call it
# to properly clean up afterwards
self.oldCloseEvent = self.win.closeEvent
self.win.closeEvent = self.closeEvent
def updateText(self, val):
self.text.setText("{:.4f}".format(val.value()))
def closeEvent(self, ev):
# Just emit that we've been closed and
# pass it along to the window closer
self.sigClosed.emit()
self.oldCloseEvent(ev)
class RealWin(pg.PlotWindow):
sigClosed = QtCore.pyqtSignal()
def __init__(self, data, fftWin):
super(RealWin, self).__init__()
# To connect signals from it
self.fftWin = fftWin
self.data = data
# Start off with the FFT given by the original
# inputted cutoff
self.updatePlot(cutoffFrequency)
# See above comments
self.oldClose = self.win.closeEvent
self.win.closeEvent = self.closeEvent
fftWin.sigCutoffChanged.connect(self.updatePlot)
# Close self if other window is closed
fftWin.sigClosed.connect(self.win.close)
def updatePlot(self, val):
self.plotItem.clear()
self.plotItem.plot(*self.data.T, pen=pg.mkPen('k', width=3))
# Recursion! Call this same function to do the FFT
newData = fft_filter(self.data, cutoffFrequency=val)
self.plotItem.plot(*newData.T, pen=pg.mkPen('r', width=3))
def closeEvent(self, ev):
self.sigClosed.emit()
try:
self.fftWin.win.close()
except:
pass
self.oldClose(ev)
k = fft.fftfreq(zeroPadding, x[1] - x[0])
Y = fft.fft(y, n=zeroPadding)
# Make the windows
fftWin = FFTWin(k, np.abs(Y))
realWin = RealWin(np.array(retData), fftWin)
realWin.show()
# Need to pause the program until the frequency is selected
# Done with this qeventloop.
loop = QtCore.QEventLoop()
realWin.sigClosed.connect(loop.exit)
loop.exec_()
# Return with the desired output value
return fft_filter(retData, fftWin.line.value())
if inspectPlots:
plt.figure("Real Space")
plt.plot(x, y, label="Input Data")
# Replicate origin directy
# http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
# "rotate" the data set so it ends at 0,
# enforcing a periodicity in the data. Otherwise
# oscillatory artifacts result at the ends
onePerc = int(0.01 * N)
x1 = np.mean(x[:onePerc])
x2 = np.mean(x[-onePerc:])
y1 = np.mean(y[:onePerc])
y2 = np.mean(y[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x + b
y -= flattenLine
if inspectPlots:
plt.plot(x, y, label="Rotated Data")
# Perform the FFT and find the appropriate frequency spacing
k = fft.fftfreq(zeroPadding, x[1] - x[0])
Y = fft.fft(y, n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(k, np.abs(Y), label="Raw FFT")
if tryFitting:
try:
# take +/- 4 sigma points around peak to fit to
sl = np.abs(k - cutoffFrequency).argmin() + np.array([-1, 1]) * 10 * freqSigma / np.abs(k[0] - k[1])
sl = slice(*[int(j) for j in sl])
p0 = [cutoffFrequency,
np.abs(Y)[sl].max() * freqSigma, # estimate the height baased on the max in the set
freqSigma,
0.14, 2e3, 1.1] # magic test numbers, they fit the background well
if inspectPlots:
plt.semilogy(k[sl], gaussWithBackground(k[sl], *p0), label="Peak with initial values")
p, _ = curve_fit(gaussWithBackground, k[sl], np.abs(Y)[sl], p0=p0, ftol=ftol)
if inspectPlots:
plt.semilogy(k[sl], gaussWithBackground(k[sl], *p), label="Fitted Peak")
# Want to remove data within 5 sigma ( arb value... )
st = int(p[0] - 5 * p[2])
en = int(p[0] + 5 * p[2])
# Find get the indices to remove.
refitRangeIdx = np.argwhere((k > st) & (k < en))
refitRangeIdxNeg = np.argwhere((k < -st) & (k > -en))
# Replace the data with the backgroudn
# Note: abuses the symmetry of the FFT of a real function
# to get the negative side of the data
Y[refitRangeIdx] = background(k[refitRangeIdx], *p[-2:])
Y[refitRangeIdxNeg] = background(k[refitRangeIdx], *p[-2:])[::-1]
except:
print("ERROR: Trouble fitting the peak in frequency space.\n\t Defaulting to cutting off")
# Assume cutoffFrequency was the peak, not the actual cutoff
# Leaving it alone means half the peak would remain and the data
# wouldn't really be smoothed
cutoffFrequency -= 5 * freqSigma
# Reset this so the next part gets called
tryFitting = False
# "if not" instead of "else" because if the above
# fitting fails, we can default to the sharp cutoff
if not tryFitting:
# Define where to remove the data
st = cutoffFrequency
en = int(max(k)) + 1
# Find the indices to remove the data
refitRangeIdx = np.argwhere((k > st) & (k < en))
refitRangeIdxNeg = np.argwhere((k < -st) & (k > -en))
# Kill it all after the cutoff
Y[refitRangeIdx] = 0
Y[refitRangeIdxNeg] = 0
smoothIdx = np.argwhere((-st < k) & (k < st))
smoothr = -1. / cutoffFrequency ** 2 * k[smoothIdx] ** 2 + 1
Y[smoothIdx] *= smoothr
if inspectPlots:
plt.plot(k, np.abs(Y), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
# invert the FFT
y = fft.ifft(Y, n=zeroPadding)
# unshift the data
y += flattenLine
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y = np.abs(y)[:len(x)]
if inspectPlots:
plt.figure("Real Space")
print(x.size, y.size)
plt.plot(x, y, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
retData[:, 0] = x
retData[:, -1] = y
return retData
def low_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
# print "zero padding", zeroPadding # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
# print abs(y_fourier[-10:])
butterworth = np.sqrt(1 / (1 + (x_fourier / cutoff) ** 100))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
# print "y_fourier", len(y_fourier)
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
# print x_vals.size, y_vals.size
plt.plot(x_vals, y_vals, linewidth=3, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
def high_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
print("zero padding", zeroPadding) # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
print(abs(y_fourier[-10:]))
butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff) ** 50))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
print("y_fourier", len(y_fourier))
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = np.abs(y_vals)
if inspectPlots:
plt.figure("Real Space")
print(x_vals.size, y_vals.size)
plt.plot(x_vals, y_vals, label="Smoothed Data")
a = plt.legend()
a.draggable(True)
return np.column_stack((x_vals, y_vals))
def band_pass_filter(x_vals, y_vals, cutoff, inspectPlots=True):
"""
Replicate origin directy
http://www.originlab.com/doc/Origin-Help/Smooth-Algorithm
"rotate" the data set so it ends at 0,
enforcing a periodicity in the data. Otherwise
oscillatory artifacts result at the ends
This uses a 50th order Butterworth filter.
"""
x_vals, y_vals = fourier_prep(x_vals, y_vals)
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Non-nan Data")
zeroPadding = len(x_vals)
print("zero padding", zeroPadding) # This needs to be this way because truncation is bad and actually zero padding
N = len(x_vals)
onePerc = int(0.01 * N)
x1 = np.mean(x_vals[:onePerc])
x2 = np.mean(x_vals[-onePerc:])
y1 = np.mean(y_vals[:onePerc])
y2 = np.mean(y_vals[-onePerc:])
m = (y1 - y2) / (x1 - x2)
b = y1 - m * x1
flattenLine = m * x_vals + b
y_vals -= flattenLine
if inspectPlots:
plt.figure("Real Space")
plt.plot(x_vals, y_vals, label="Rotated Data")
# even_data = np.column_stack((x_vals, y_vals))
# Perform the FFT and find the appropriate frequency spacing
x_fourier = fft.fftfreq(zeroPadding, x_vals[1] - x_vals[0])
y_fourier = fft.fft(y_vals) # , n=zeroPadding)
if inspectPlots:
plt.figure("Frequency Space")
plt.semilogy(x_fourier, np.abs(y_fourier), label="Raw FFT")
# Define where to remove the data
band_start = cutoff
band_end = int(max(abs(x_fourier))) + 1
'''
# Find the indices to remove the data
refitRangeIdx = np.argwhere((x_fourier > band_start) & (x_fourier <= band_end))
refitRangeIdxNeg = np.argwhere((x_fourier < -band_start) & (x_fourier >= -band_end))
#print "x_fourier", x_fourier[795:804]
#print "max(x_fourier)", max(x_fourier)
#print "refitRangeIdxNeg", refitRangeIdxNeg[:-400]
# Kill it all after the cutoff
y_fourier[refitRangeIdx] = 0
y_fourier[refitRangeIdxNeg] = 0
# This section does a square filter on the remaining code.
smoothIdx = np.argwhere((-band_start < x_fourier) & (x_fourier < band_start))
smoothr = -1 / band_start**2 * x_fourier[smoothIdx]**2 + 1
y_fourier[smoothIdx] *= smoothr
'''
print(abs(y_fourier[-10:]))
butterworth = 1 - np.sqrt(1 / (1 + (x_fourier / cutoff[0]) ** 50))
butterworth *= np.sqrt(1 / (1 + (x_fourier / cutoff[1]) ** 50))
y_fourier *= butterworth
if inspectPlots:
plt.plot(x_fourier, np.abs(y_fourier), label="FFT with removed parts")
a = plt.legend()
a.draggable(True)
print("y_fourier", len(y_fourier))
# invert the FFT
y_vals = fft.ifft(y_fourier, n=zeroPadding)
# using fft, not rfft, so data may have some
# complex parts. But we can assume they'll be negligible and
# remove them
# ( Safer to use np.real, not np.abs? )
# Need the [:len] to remove zero-padded stuff
y_vals = y_vals[:len(x_vals)]
# unshift the data
y_vals += flattenLine
y_vals = | np.abs(y_vals) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 15 22:38:18 2020
@author: alankar
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import interpolate
from matplotlib.lines import Line2D
import pickle
#Constants
kB = 1.3807e-16 #Boltzman's Constant in CGS
mp = 1.6726231e-24 #Mass of a Proton in CGS
GAMMA = 5./3 #Specific Heat Ratio for an Ideal Gas
fig = plt.figure(figsize=(30,30))
CHI = np.linspace(1.0,1000, 100000)
M1=0.5
M2=1.0
M3=1.5
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
#Problem Constants
mu = 0.672442
Tcl = 1.e4 #K
ncl = 0.1 # particles per cm^3
T_hot = CHI*Tcl
LAMBDA_HOT= LAMBDA(T_hot) #erg cm3 s-1 #LAMBDA at T_hot #GET IT FROM COOLTABLE.DAT
Tmix= np.sqrt(Tcl*T_hot) #K
LAMBDA_MIX = LAMBDA(Tmix) #erg cm3 s-1 #LAMBDA at T_mix #GET IT FROM COOLTABLE.DAT
ALPHA = 1.
n_hot=ncl/CHI
#Normalized Quantities
Tcl_4 = Tcl/1e4 #K
P3 = (ncl*Tcl)/1e3 #cm-3 K
CHI_100=(CHI)/100
LAMBDA_HOT_N23 = LAMBDA_HOT/1e-23 #erg cm3 s-1
LAMBDA_MIX_N21_4 = LAMBDA_MIX/(10**-21.4) #erg cm3 s-1
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
R1= (2 * (Tcl_4**(5/2)) * M1 * CHI_100 )/(P3*LAMBDA_MIX_N21_4*ALPHA)
R2= (2 * (Tcl_4**(5/2)) * M2 * CHI_100 )/(P3*LAMBDA_MIX_N21_4*ALPHA)
R3= (2 * (Tcl_4**(5/2)) * M3 * CHI_100 )/(P3*LAMBDA_MIX_N21_4*ALPHA)
pc=3.098e18
tcc1= (np.sqrt(CHI)*R1*pc)/(M1*cs_hot)
tcc2= (np.sqrt(CHI)*R2*pc)/(M2*cs_hot)
tcc3= (np.sqrt(CHI)*R3*pc)/(M3*cs_hot)
f1=0.9*((2*R1*(n_hot/0.01))**0.3)*((M1*(cs_hot/1.e7))**0.6)
f2=0.9*((2*R2*(n_hot/0.01))**0.3)*((M2*(cs_hot/1.e7))**0.6)
f3=0.9*((2*R3*(n_hot/0.01))**0.3)*((M3*(cs_hot/1.e7))**0.6)
t_life_pred1=10*tcc1*f1
t_life_pred2=10*tcc2*f2
t_life_pred3=10*tcc3*f3
t_cool_hot=((1/(GAMMA-1))*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y1=np.log10(t_life_pred1/Myr)
Y2=np.log10(t_life_pred2/Myr)
Y3=np.log10(t_life_pred3/Myr)
plt.plot(X,Y1,label='Gronke-Oh Criterion for $\mathrm{\mathcal{M}=0.5}$',linewidth=4.5)
plt.plot(X,Y2,label='Gronke-Oh Criterion for $\mathrm{\mathcal{M}=1.0}$',linewidth=4.5, color='red')
plt.plot(X,Y3,label='Gronke-Oh Criterion for $\mathrm{\mathcal{M}=1.5}$',linewidth=4.5, color='green')
############################################
data1=np.loadtxt('Li_pt_dest.dat')
X1=data1[:,0]
Y1=data1[:,1]
plt.plot(X1,Y1,'o', color='gray', markersize=30, label='Li Destroyed Clouds',alpha=0.5)
data1=np.loadtxt('Li_pt_grth.dat')
X1=data1[:,0]
Y1=data1[:,1]
plt.plot(X1,Y1,'^', color='gray', markersize=30, label='Li Growing Clouds', alpha=0.5)
#######################################################
############################################
M=0.5
R= [10.36,3.49]
pc=3.098e18
T_hot=1.e6
n_hot=0.001
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (10*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:blue', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,label=r'Growing Clouds in Our Simulations for $\mathrm{\mathcal{M}=0.5}$',fillstyle=filling, **marker_style)
#######################################################
M=1.0
R= [14.0,5.47]
pc=3.098e18
T_hot=1.e6
n_hot=0.001
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (10*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:red', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,label=r'Growing Clouds in Our Simulations for $\mathrm{\mathcal{M}=1.0}$',fillstyle=filling, **marker_style)
#############################################################
M=1.5
R= [17.0,7.16]
pc=3.098e18
T_hot=1.e6
n_hot=0.001
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (10*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:green', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,label=r'Growing Clouds in Our Simulations for $\mathrm{\mathcal{M}=1.5}$',fillstyle=filling, **marker_style)
#######################################################
M=0.5
R=[23.92,124.06]
pc=3.098e18
T_hot=3.e6
n_hot=0.1/300
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (17.32*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:blue', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,fillstyle=filling, **marker_style)
##############################################################
M=1.0
R=[37.64,169.02]
pc=3.098e18
T_hot=3.e6
n_hot=0.1/300
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (17.32*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:red', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,fillstyle=filling, **marker_style)
#############################################################
M=1.5
R=[49.01,202.45]
pc=3.098e18
T_hot=3.e6
n_hot=0.1/300
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (17.32*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:green', linestyle='None', marker='^',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,fillstyle=filling, **marker_style)
#######################################################
M=1.0
R= [1.0,0.5]
pc=3.098e18
T_hot=1.e6
n_hot=0.001
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot=np.sqrt((GAMMA*kB*T_hot)/(mu*mp))
tcc= (10*np.asarray(R)*pc)/(M*cs_hot)
f=0.9*((2*np.asarray(R)*(n_hot/0.01))**0.3)*((M*(cs_hot/1.e7))**0.6)
t_life_pred=10*tcc*f
t_cool_hot=(1.5*kB*T_hot)/(n_hot*LAMBDA_HOT)
Myr=365*24*60*60*1.e6
X=np.log10(t_cool_hot/Myr)
Y=np.log10(t_life_pred/Myr)
X,Y=np.meshgrid(X,Y)
marker_style = dict(color='tab:red', linestyle='None', marker='o',
markersize=30, markerfacecoloralt='tab:red', markeredgewidth=5)
filling = Line2D.fillStyles[-1]
plt.plot(X,Y,label=r'Destroyed Clouds in Our Simulations for $\mathrm{\mathcal{M}=1.0}$',fillstyle=filling, **marker_style)
#######################################################3
#######################################################
M=1.0
R= [2.8,1.5]
pc=3.098e18
T_hot=3.e6
n_hot=0.1/300
cooling = np.loadtxt('cooltable.dat') #solar metallicity
LAMBDA = interpolate.interp1d(cooling[:,0], cooling[:,1])
LAMBDA_HOT=LAMBDA(T_hot)
cs_hot= | np.sqrt((GAMMA*kB*T_hot)/(mu*mp)) | numpy.sqrt |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.