prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
import sqlite3
import numpy as np
import Helpers
conn = sqlite3.connect('../data/SandP500.sqlite3')
all_tickers = Helpers.get_all_tickers(conn)
cursor = conn.cursor()
prices_at_start = np.array([])
prices_at_end = np.array([])
for ticker in all_tickers:
cursor.execute("SELECT closing_price "
"FROM historical_prices "
f"WHERE ticker is '{ticker}'"
"AND date is '2013-02-08'")
all_rows = cursor.fetchall()
if len(all_rows) == 0:
continue
print(ticker)
price_at_start = all_rows[0]
prices_at_start =
|
np.append(prices_at_start, price_at_start)
|
numpy.append
|
import numpy as np
from ..visualization import Viewer
from ..utils import Subject, Observer, deprecated, matrices, NList
import copy
from numba import njit, int64, float64
from numba.types import ListType as LT
@njit(int64[:](LT(LT(int64))), cache=True)
def _valence(adj_x2y):
valences = np.zeros(len(adj_x2y), dtype=np.int64)
for idx, row in enumerate(adj_x2y):
valences[idx] = len(row)
return valences
class Clipping(object):
class __Flip(object):
def __init__(self):
self.x = False
self.y = False
self.z = False
def __init__(self):
self.min_x = None
self.max_x = None
self.min_y = None
self.max_y = None
self.min_z = None
self.max_z = None
self.flip = self.__Flip()
super(Clipping, self).__init__()
def __repr__(self):
return ("Clipping:\n" +
f"min_x: {self.mini_x} \tmax_x: {self.max_x} \t{('flipped' if self.flip.x else '')}\n" +
f"min_y: {self.min_y} \tmax_y: {self.max_y} \t{('flipped' if self.flip.y else '')}\n" +
f"min_z: {self.min_z} \tmax_z: {self.max_z} \t{('flipped' if self.flip.z else '')}\n")
class AbstractMesh(Observer, Subject):
"""
This class represents a generic mesh. It must be extended by a specific mesh class. It stores all the information
shared among the different kind of supported meshes.
"""
def __init__(self):
self.__boundary_needs_update = True
self.__boundary_cached = None
self.__finished_loading = False
self._dont_update = False
self.__poly_size = None
self.vertices = None #npArray (Nx3)
self.__edges = None #npArray (Nx2)
self.__polys = None #npArray (NxM)
self.labels = None # npArray (Nx1)
self.uvcoords = None
self.coor = [] #Mappatura indici coordinate uv per faccia
self.texture = None
self.material = {}
self.smoothness = False
self.__adj_vtx2vtx = None
self.__adj_vtx2edge = None
self.__adj_vtx2poly = None #npArray (NxM)
self.__adj_edge2vtx = None
self.__adj_edge2edge = None
self.__adj_edge2poly = None
self.__adj_poly2vtx = None
self.__adj_poly2edge = None
self.__adj_poly2poly = None
self.__bounding_box = None #npArray (2x3)
self.__simplex_centroids = None #npArray (Nx1)
self.__clipping = Clipping()
self.__visible_polys = None
self.simplex_metrics = dict() #dictionary[propertyName : ((min, max), npArray (Nx1))]
self.__filename = ''
Observer.__init__(self)
Subject.__init__(self)
# ==================== METHODS ==================== #
def __setattr__(self, key, value):
self.__dict__[key] = value
if key[0] != "_" and self.__finished_loading:
self.update()
def copy(self):
new = type(self)()
for key in self.__dict__.keys():
if "observer" not in key and ("adj" not in key or "poly2poly" in key):
setattr(new, key, copy.deepcopy(getattr(self, key)))
return new
def update(self):
"""
Update the mesh manually when the Viewer is set as not reactive.
"""
self.__boundary_needs_update = True
self.__update_bounding_box()
if (not self._dont_update):
self._notify()
def show(self, width = 700, height = 700, mesh_color = None, reactive = False):
"""
Show the mesh within the current cell. It is possible to manipulate the mesh through the UI.
Parameters:
UI (bool): Show or not show the graphic user interface of the viewer
width (int): The width of the canvas
height (int): The height of the canvas
Return:
Viewer: The viewer object
"""
view = Viewer(self, width = width, height = height, reactive=reactive)
view.show()
return view
def set_clipping(self, min_x = None, max_x = None,
min_y = None, max_y = None,
min_z = None, max_z = None,
flip_x = None, flip_y = None, flip_z = None):
"""
clipping the mesh along x, y and z axes. It doesn't affect the geometry of the mesh.
Parameters:
min_x (float): The minimum value of x
max_x (float): The maximum value of x
min_y (float): The minimum value of y
max_y (float): The maximum value of y
min_z (float): The minimum value of z
max_z (float): The maximum value of z
"""
if min_x is not None:
self.__clipping.min_x = min_x
if max_x is not None:
self.__clipping.max_x = max_x
if min_y is not None:
self.__clipping.min_y = min_y
if max_y is not None:
self.__clipping.max_y = max_y
if min_z is not None:
self.__clipping.min_z = min_z
if max_z is not None:
self.__clipping.max_z = max_z
if flip_x is not None:
self.__clipping.flip.x = flip_x
if flip_y is not None:
self.__clipping.flip.y = flip_y
if flip_z is not None:
self.__clipping.flip.z = flip_z
self.__boundary_needs_update = True
self.update()
def reset_clipping(self):
"""
Set the clippings to the bounding box in order to show the whole mesh.
"""
self.set_clipping(min_x = self.bbox[0,0], max_x = self.bbox[1,0],
min_y = self.bbox[0,1], max_y = self.bbox[1,1],
min_z = self.bbox[0,2], max_z = self.bbox[1,2])
self.__boundary_needs_update = True
self.update()
def load_from_file(filename):
raise NotImplementedError('This method must be implemented in the subclasses')
def __compute_adjacencies(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def save_file(self, filename):
raise NotImplementedError('This method must be implemented in the subclasses')
def get_metric(self, property_name, id_element):
"""
Get a specific metric element from the dictionary of metrics 'simplex_metrics'.
Parameters:
property_name (string): The name of the wanted metric
id_element (int): The index of a specific element of the metric
Returns:
object: The specific metric element. The return type depends on the metric
"""
return self.simplex_metrics[property_name][id_element]
@property
def clipping(self):
"""
Return the clipping region of the current mesh.
"""
return self.__clipping
@property
def visible_polys(self):
return self.__visible_polys
def __compute_metrics(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def as_triangles_flat(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def as_edges_flat(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def _as_threejs_colors(self):
raise NotImplementedError('This method must be implemented in the subclasses')
def boundary(self):
"""
Compute the boundary of the current mesh. It only returns the faces that are inside the clipping
"""
min_x = self.clipping.min_x
max_x = self.clipping.max_x
min_y = self.clipping.min_y
max_y = self.clipping.max_y
min_z = self.clipping.min_z
max_z = self.clipping.max_z
flip_x = self.clipping.flip.x
flip_y = self.clipping.flip.y
flip_z = self.clipping.flip.z
centroids = np.array(self.poly_centroids)
x_range = np.logical_xor(flip_x,((centroids)[:,0] >= min_x) & (centroids[:,0] <= max_x))
y_range = np.logical_xor(flip_y,((centroids[:,1] >= min_y) & (centroids[:,1] <= max_y)))
z_range =
|
np.logical_xor(flip_z,((centroids[:,2] >= min_z) & (centroids[:,2] <= max_z)))
|
numpy.logical_xor
|
import os, math
import _pickle as pickle
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from sklearn import preprocessing
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--data-folder', default='data', help='Parent dir of the dataset')
parser.add_argument('--file-name', default='electricity.csv', help='Directory containing data.csv')
parser.add_argument('--pickle-name', default='electricity.pkl', help='Directory containing data.csv')
parser.add_argument('--horizon', type=int, default=24, help='Forecast horizon. Default=24')
parser.add_argument('--test', action='store_true', help='whenever to use test set only.')
parser.add_argument('--hop', action='store_true', help='Whether to use test set for validation') # default=False
if __name__ == '__main__':
args = parser.parse_args()
### load the data
dir_path = args.data_folder # './data'
file_name = args.file_name
if file_name=='electricity.csv':
train_start = '2012-01-01 00:00:00'
if args.test:
train_end = '2013-10-19 23:00:00'
test_start = '2014-05-20 00:00:00' #need additional 7 days as given info
test_end = '2014-12-31 23:00:00'
elif args.hop:
train_end = '2012-04-30 23:00:00'
test_start = '2012-04-24 00:00:00'
test_end = '2012-05-31 23:00:00'
else:
train_end = '2013-10-19 23:00:00'
test_start = '2013-10-20 00:00:00' #need additional 7 days as given info
test_end = '2014-12-31 23:00:00'
elif 'europe_power_system' in file_name:
train_start = '2015-01-01 00:00:00'
if args.test:
train_end = '2017-01-15 23:00:00'
test_start = '2017-06-17 00:00:00' #need additional 7 days as given info
test_end = '2017-11-30 23:00:00'
elif args.hop:
train_end = '2015-04-30 23:00:00'
test_start = '2015-04-24 00:00:00' #need additional 7 days as given info
test_end = '2015-05-31 23:00:00'
else:
train_end = '2017-01-15 23:00:00'
test_start = '2017-01-16 00:00:00' #need additional 7 days as given info
test_end = '2017-11-30 23:00:00'
df = pd.read_csv(os.path.join(dir_path, file_name), sep=",", index_col=0, parse_dates=True, decimal='.')
df = df.reset_index()
df = df.drop([df.columns[0]], axis=1).transpose()
dt = df.rename(columns=df.iloc[0]).values #.drop(df.index[0])
## The date range
date_list = pd.date_range(start=train_start, end=test_end)
date_list = pd.to_datetime(date_list)
yr = int(date_list.year[0])
hour_list = []
for nDate in date_list:
for nHour in range(24):
tmp_timestamp = nDate+timedelta(hours=nHour)
hour_list.append(tmp_timestamp)
hour_list = np.array(hour_list)
#print('hour_list', hour_list.shape[0])
#print('dt.shape[0]', dt.shape[0])
station_index = list(range(dt.shape[0]))
#if args.horizon ==36:
# sliding_window_dis = 24;
#else:
# sliding_window_dis = args.horizon;
#print('sliding_window_dis: ', sliding_window_dis)
sliding_window_dis = args.horizon; # 24;
input_len = 168;
output_len = args.horizon; #24;
sample_len = input_len + output_len; #192; #168+24
coef = args.horizon/24;
total_n = int((len(date_list) - 8)/coef) #800; ## The total days
test_n = int(len(pd.date_range(start=test_start, end=test_end))/coef) #7 ## The testing days, day of the last 7 days
train_n = total_n - test_n ## The training days
#print('train_n', train_n)
#print('test_n', test_n)
trainX_list = [];trainX2_list = [];trainY_list = [];trainY2_list = []
testX_list = [];testX2_list = [];testY_list = [];testY2_list = []
#for station in station_index:
for station in station_index:
print('Station', station)
sub_series = dt[station,1:].astype('float32')
sub_index = np.array(range(dt.shape[1]-1))-np.min(np.where(sub_series>0))
trainX = np.zeros(shape=(train_n, input_len)) ## The input series
trainY = np.zeros(shape=(train_n, output_len)) ## The output series
testX =
|
np.zeros(shape=(test_n, input_len))
|
numpy.zeros
|
import numpy as np
import sys, os
if __name__== "__main__":
# read samples mesh gids
smgids = np.loadtxt("sample_mesh_gids.dat", dtype=int)
print(smgids)
# read full velo
fv = np.loadtxt("./full/velo.txt")
# read full velo
fullJ = np.loadtxt("./full/jacobian.txt")
# read sample mesh velo
sv = np.loadtxt("velo.txt")
# read sample mesh jac
sjac = np.loadtxt("jacobian.txt")
maskedVelo = []
maskedJacob= []
for i in smgids:
maskedVelo.append(fv[i])
maskedJacob.append(fullJ[i,:])
maskedVelo = np.array(maskedVelo)
maskedJacob = np.array(maskedJacob)
assert(np.allclose(maskedVelo.shape, sv.shape))
assert(np.isnan(sv).all() == False)
assert(np.isnan(fv).all() == False)
assert(np.allclose(sv, maskedVelo,rtol=1e-8, atol=1e-10))
assert(
|
np.allclose(maskedJacob.shape, sjac.shape)
|
numpy.allclose
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module for tools used in vaspy
"""
import bz2
from itertools import zip_longest
import os
import re
import numpy as np
from typing import List, Iterable, Sequence, Tuple, Union, IO, Any, Optional
def open_by_suffix(filename: str) -> IO[str]:
"""Open file."""
if os.path.splitext(filename)[1] == ".bz2":
thefile = bz2.open(filename, mode="rt")
else:
thefile = open(filename, mode="rt")
return thefile
def each_slice(
iterable: Iterable, n: int, fillvalue: Optional[float] = None
) -> Iterable[Any]:
"""each_slice(iterable, n[, fillvalue]) => iterator
make new iterator object which get n item from [iterable] at once.
"""
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
_RERANGE = re.compile(r"(\d+)-(\d+)")
_RESINGLE = re.compile(r"\d+")
def atom_selection_to_list(
input_str: str, number: bool = True
) -> List[Union[int, str]]:
"""Return list of ordered "String" represents the number.
Parameters
----------
input_str: str
range of the atoms. the numbers deliminated by "-" or ","
Returns
--------
list
ordered "String" represents the number.
Example
--------
>>> atom_selection_to_list("1-5,8,8,9-15,10", False)
['1', '10', '11', '12', '13', '14', '15', '2', '3', '4', '5', '8', '9']
>>> atom_selection_to_list("1-5,8,8,9-15,10")
[1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13, 14, 15]
"""
array = input_str.split(",")
output = set()
for each in array:
if re.search(_RERANGE, each):
start, stop = re.findall(_RERANGE, each)[0]
# Version safety
output |= set(str(i) for i in range(int(start), int(stop) + 1))
elif re.search(_RESINGLE, each):
output.add(each)
if number:
return sorted(int(i) for i in output)
return sorted(list(output))
def atomtypes_atomnums_to_atoms(
atomtypes: Iterable[str], atomnums: Iterable[int]
) -> Tuple[str, ...]:
"""Return list representation for atom in use.
Parameters
------------
atomtypes: list
atom names
atomnums: list
atom numbers
Examples
--------
>>> test_nums = [2, 3, 2, 1]
>>> test_elements = ['Si', 'Ag', 'H', 'Si']
>>> atomtypes_atomnums_to_atoms(test_elements, test_nums)
('Si', 'Si', 'Ag', 'Ag', 'Ag', 'H', 'H', 'Si')
"""
atoms = []
for elem, nums in zip(atomtypes, atomnums):
for _ in range(nums):
atoms.append(elem)
return tuple(atoms)
def atoms_to_atomtypes_atomnums(atoms: List[str]) -> Tuple[List[str], List[int]]:
r"""Return atomnums and atomtypes list.
Returns
--------
atomnums
list of number of atoms
atomtypes
list of atomnames
Examples
--------
>>> test = ['Si', 'Si', 'Ag', 'Ag', 'Ag', 'H', 'H', 'Si']
>>> atoms_to_atomtypes_atomnums(test)
(['Si', 'Ag', 'H', 'Si'], [2, 3, 2, 1])
"""
thelast = ""
atomnums: List[int] = []
atomtypes: List[str] = []
while atoms:
atom = atoms.pop(0)
if thelast == atom:
atomnums[-1] = atomnums[-1] + 1
else:
atomnums.append(1)
atomtypes.append(atom)
thelast = atom
return atomtypes, atomnums
def cuboid(crystal_axes: Union[Sequence[List[float]], np.ndarray]) -> np.ndarray:
"""Return the coordinates for cuboid that includes tetrahedron represented by vectors.
Parameters
------------
vectors: array-like.
Three vectors for tetrahedron. (Crystal axis a,b,c)
Return
"""
a = np.array(crystal_axes[0])
b = np.array(crystal_axes[1])
c =
|
np.array(crystal_axes[2])
|
numpy.array
|
__author__ = 'Mario'
import numpy as np
from scipy.stats import norm
class EuropeanLookback():
def __init__(self, strike, expiry, spot, sigma, rate, dividend, M, flag, N=100, Vbar=.12, alpha=.69):
# Instantiate variables
self.strike = float(strike)
self.expiry = float(expiry)
self.spot = float(spot)
self.sigma = float(sigma)
self.sigma2 = sigma2 = float(sigma)*float(sigma)
self.rate = float(rate)
self.dividend = float(dividend)
self.alpha = float(alpha)
self.dt = float(expiry)/float(N)
self.Vbar = Vbar
self.xi = xi = .025
self.N = N
self.M = int(M)
self.beta1 = -.88
self.beta2 = -.42
self.beta3 = -.0003
self.alphadt = self.alpha*self.dt
self.xisdt = self.xi*np.sqrt(self.dt)
self.erddt = np.exp((self.rate-self.dividend)*self.dt)
self.egam1 = np.exp(2*(self.rate-self.dividend)*self.dt)
self.egam2 = -2*self.erddt + 1
self.eveg1 = np.exp(-self.alpha*self.dt)
self.eveg2 = self.Vbar - self.Vbar*self.eveg1
self.VectorizedMonteCarlo(float(spot), float(rate), float(sigma),
float(expiry), int(N), int(M), float(strike),
float(sigma2), flag)
def VectorizedMonteCarlo(self, spot, rate, sigma, expiry, N, M, strike, sigma2, flag):
# Initialize the matrices
newdt = float(expiry)/float(N) # Get the dt for the Weiner process
dW = np.sqrt(newdt)*np.random.normal(0,1,(M,N-1)) # Create the brownian motion
W = np.cumsum(dW, axis=1) # Set up the Weiner Process as a Matrix
time = np.linspace(0, expiry, N) # Set the discrete time space
tempA = np.zeros((M,1)) # Create an initial zero vector for the first column
#This is the Random aspects and the stochastic volatility
Wnew = np.c_[tempA,W] # Append the Weiner matrix to the zeros vector
Vt = self.sigma2
Vtn = np.abs(Vt + self.alphadt*(self.Vbar - Vt) + self.xisdt*np.sqrt(Vt)*Wnew)
tt = np.tile(np.array(time),(M,1)) # Create a matrix of time x M so we have time for every iteration
self.tau = expiry-1
### Calculate the lookback option ###
assetpath1 = np.array(spot*np.exp((rate-.5*Vtn)*tt+np.sqrt(Vtn)*Wnew)) #European standard Antithetic1
assetpath2 = np.array(spot*np.exp((rate-.5*Vtn)*tt+
|
np.sqrt(Vtn)
|
numpy.sqrt
|
import unittest
from scipy.stats import gaussian_kde
from scipy.linalg import cholesky
import numpy as np
from pyapprox.bayesian_inference.laplace import *
from pyapprox.density import NormalDensity, ObsDataDensity
from pyapprox.utilities import get_low_rank_matrix
from pyapprox.randomized_svd import randomized_svd, MatVecOperator, \
adjust_sign_svd
from pyapprox.tests.test_density import helper_gradient
from pyapprox.multivariate_gaussian import MultivariateGaussian,\
CholeskySqrtCovarianceOperator, CovarianceOperator, get_operator_diagonal
from pyapprox.models.wrappers import evaluate_1darray_function_on_2d_array
class QuadraticMisfitModel(object):
def __init__(self,num_vars,rank,num_qoi,
obs=None,noise_covariance=None,Amatrix=None):
self.num_vars = num_vars
self.rank=rank
self.num_qoi=num_qoi
if Amatrix is None:
self.Amatrix = get_low_rank_matrix(num_qoi,num_vars,rank)
else:
self.Amatrix=Amatrix
if obs is None:
self.obs = np.zeros(num_qoi)
else:
self.obs=obs
if noise_covariance is None:
self.noise_covariance = np.eye(num_qoi)
else:
self.noise_covariance=noise_covariance
self.noise_covariance_inv = np.linalg.inv(self.noise_covariance)
def value(self,sample):
assert sample.ndim==1
residual = np.dot(self.Amatrix,sample)-self.obs
return np.asarray(
[0.5*np.dot(residual.T,np.dot(self.noise_covariance_inv,residual))])
def gradient(self,sample):
assert sample.ndim==1
grad = np.dot(self.Amatrix.T,np.dot(self.noise_covariance_inv,
np.dot(self.Amatrix,sample)-self.obs))
return grad
def gradient_set(self,samples):
assert samples.ndim==2
num_vars, num_samples = samples.shape
gradients = np.empty((num_vars,num_samples),dtype=float)
for i in range(num_samples):
gradients[:,i] = self.gradient(samples[:,i])
return gradients
def hessian(self,sample):
assert sample.ndim==1 or sample.shape[1]==1
return np.dot(
np.dot(self.Amatrix.T,self.noise_covariance_inv),self.Amatrix)
def __call__(self,samples,opts=dict()):
eval_type=opts.get('eval_type','value')
if eval_type=='value':
return evaluate_1darray_function_on_2d_array(
self.value,samples,opts)
elif eval_type=='value_grad':
vals = evaluate_1darray_function_on_2d_array(
self.value,samples,opts)
return np.hstack((vals,self.gradient_set(samples).T))
elif eval_type=='grad':
return self.gradient_set(samples).T
else:
raise Exception('%s is not a valid eval_type'%eval_type)
class LogUnormalizedPosterior(object):
def __init__(self, misfit, misfit_gradient, prior_pdf, prior_log_pdf,
prior_log_pdf_gradient):
"""
Initialize the object.
Parameters
----------
"""
self.misfit = misfit
self.misfit_gradient = misfit_gradient
self.prior_pdf = prior_pdf
self.prior_log_pdf = prior_log_pdf
self.prior_log_pdf_gradient = prior_log_pdf_gradient
def gradient(self,samples):
"""
Evaluate the gradient of the logarithm of the unnormalized posterior
likelihood(x)*posterior(x)
at a sample x
Parameters
----------
samples : (num_vars,num_samples) vector
The location at which to evalute the unnormalized posterior
Returns
-------
val : (1x1) vector
The logarithm of the unnormalized posterior
"""
if samples.ndim==1:
samples=samples[:,np.newaxis]
grad = -self.misfit_gradient(samples) + \
self.prior_log_pdf_gradient(samples)
return grad
def __call__(self,samples,opts=dict()):
"""
Evaluate the logarithm of the unnormalized posterior
likelihood(x)*posterior(x)
at samples x
Parameters
----------
sampels : np.ndarray (num_vars, num_samples)
The samples at which to evalute the unnormalized posterior
Returns
-------
values : np.ndarray (num_samples,1)
The logarithm of the unnormalized posterior
"""
if samples.ndim==1:
samples=samples[:,np.newaxis]
eval_type = opts.get('eval_type','value')
if eval_type=='value':
values = -self.misfit(samples)+self.prior_log_pdf(samples)
assert values.ndim==2
elif eval_type=='grad':
values = self.gradient(samples).T
elif eval_type=='value_grad':
values = -self.misfit(samples)+self.prior.log_pdf(samples)
grad = self.gradient(samples)
values = np.hstack((values,grad))
else:
raise Exception()
return values
def assert_ndarray_allclose(matrix1,matrix2,atol=1e-8,rtol=1e-5,msg=None):
"""
A more useful function for testing equivalence of numpy arrays.
Print norms used by np.allclose function to determine equivalence.
Matrix1 is considered the truth
"""
if not np.allclose(matrix1,matrix2,atol=atol,rtol=rtol):
if msg is not None:
print(msg)
diff = np.absolute(matrix1-matrix2)
abs_error = diff.max()
rel_error = (diff/np.absolute(matrix1)).max()
print('abs error:', abs_error)
print('rel error:', rel_error)
print('atol:', atol)
print('rtol:', rtol)
print('matrix1 shape',matrix1.shape)
print('matrix2 shape',matrix2.shape)
assert False, 'matrices are not equivalent'
def setup_quadratic_misfit_problem(prior,rank,noise_sigma2=1):
# Define observations
num_qoi = 2*rank
#assert num_qoi>=rank
noise_covariance = np.eye(num_qoi)*noise_sigma2
noise_covariance_inv = np.linalg.inv(noise_covariance)
# In high dimensions computing cholesky factor is too expensive.
# That is why we use PDE based operator
noise_covariance_chol_factor = np.linalg.cholesky(noise_covariance)
truth_sample = prior.generate_samples(1)[:,0]
num_vars = truth_sample.shape[0]
Amatrix = get_low_rank_matrix(num_qoi,num_vars,rank)
noise = np.dot(noise_covariance_chol_factor,
np.random.normal(0.,noise_sigma2,num_qoi))
obs = np.dot(Amatrix,truth_sample)+noise
# Define mistit model
misfit_model = QuadraticMisfitModel(num_vars,rank,num_qoi,Amatrix)
return misfit_model, noise_covariance_inv, obs
def posterior_covariance_helper(prior, rank, comparison_tol,
test_sampling=False, plot=False):
"""
Test that the Laplace posterior approximation can be obtained using
the action of the sqrt prior covariance computed using a PDE solve
Parameters
----------
prior : MultivariateGaussian object
The model which must be able to compute the action of the sqrt of the
prior covariance (and its tranpose) on a set of vectors
rank : integer
The rank of the linear model used to generate the observations
comparision_tol :
tolerances for each of the internal comparisons. This allows different
accuracy for PDE based operators
"""
# Define prior sqrt covariance and covariance operators
L_op = prior.sqrt_covariance_operator
# Extract prior information required for computing exact posterior
# mean and covariance
num_vars = prior.num_vars()
prior_mean = np.zeros((num_vars),float)
L = L_op(np.eye(num_vars),False)
L_T = L_op(np.eye(num_vars),True)
assert_ndarray_allclose(L.T,L_T,rtol=comparison_tol,atol=0,
msg='Comparing prior sqrt and transpose')
prior_covariance = np.dot(L,L_T)
prior_pointwise_variance = prior.pointwise_variance()
assert_ndarray_allclose(
np.diag(prior_covariance), prior_pointwise_variance, rtol=1e-14,
atol=0,msg='Comparing prior pointwise variance')
misfit_model, noise_covariance_inv, obs = setup_quadratic_misfit_problem(
prior,rank,noise_sigma2=1)
# Get analytical mean and covariance
prior_hessian = np.linalg.inv(prior_covariance)
exact_laplace_mean, exact_laplace_covariance = \
laplace_posterior_approximation_for_linear_models(
misfit_model.Amatrix, prior.mean, prior_hessian,
noise_covariance_inv, obs)
# Define prior conditioned misfit operator
sample = np.zeros(num_vars)
misfit_hessian_operator = MisfitHessianVecOperator(
misfit_model, sample, fd_eps=None)
LHL_op = PriorConditionedHessianMatVecOperator(
L_op, misfit_hessian_operator)
# For testing purposes build entire L*H*L matrix using operator
# and compare to result based upon explicit matrix mutiplication
LHL_op = LHL_op.apply(np.eye(num_vars),transpose=False)
H = misfit_model.hessian(sample)
assert np.allclose(H,np.dot(np.dot(
misfit_model.Amatrix.T,noise_covariance_inv),misfit_model.Amatrix))
LHL_mat = np.dot(L_T,np.dot(H,L))
assert_ndarray_allclose(LHL_mat, LHL_op, rtol=comparison_tol,
msg='Comparing prior matrix and operator based LHL')
# Test singular values obtained by randomized svd using operator
# are the same as those obtained using singular decomposition
Utrue,Strue,Vtrue = np.linalg.svd(LHL_mat)
Utrue, Vtrue = adjust_sign_svd(Utrue,Vtrue)
standard_svd_opts = {
'num_singular_values':rank, 'num_extra_samples':10}
svd_opts={'single_pass':True, 'standard_opts':standard_svd_opts}
L_post_op = get_laplace_covariance_sqrt_operator(
L_op, misfit_hessian_operator, svd_opts, weights=None,
min_singular_value=0.0)
#print np.max((Strue[:rank]-L_post_op.e_r)/Strue[0])
max_error = np.max(Strue[:rank]-L_post_op.e_r)
assert max_error/Strue[0]<comparison_tol, max_error/Strue[0]
assert_ndarray_allclose(Vtrue.T[:,:rank],L_post_op.V_r,rtol=1e-6,
msg='Comparing eigenvectors')
L_post_op.V_r=Vtrue.T[:,:rank]
# Test posterior sqrt covariance operator transpose is the same as
# explicit matrix transpose of matrix obtained by prior sqrt
# covariance operator
L_post = L_post_op.apply(np.eye(num_vars),transpose=False)
L_post_T = L_post_op.apply(np.eye(num_vars),transpose=True)
assert_ndarray_allclose(L_post.T,L_post_T,rtol=comparison_tol,
msg='Comparing posterior sqrt and transpose')
# Test posterior covariance operator produced matrix is the same
# as the exact posterior covariance obtained using analytical formula
if rank==num_vars:
# this test only makes sense if entire set of directions is found
# if low rank approx is used then this will ofcourse induce errors
post_covariance = np.dot(L_post,L_post_T)
assert_ndarray_allclose(
exact_laplace_covariance,post_covariance,rtol=comparison_tol,
atol=0.,
msg='Comparing matrix and operator based posterior covariance')
# Test pointwise covariance of posterior
post_pointwise_variance, prior_pointwise_variance=\
get_pointwise_laplace_variance_using_prior_variance(
prior, L_post_op, prior_pointwise_variance)
assert_ndarray_allclose(
np.diag(exact_laplace_covariance),post_pointwise_variance,
rtol=comparison_tol,atol=0.,msg='Comparing pointwise variance')
if not test_sampling:
return
num_samples = int(2e5)
posterior_samples = sample_from_laplace_posterior(
exact_laplace_mean, L_post_op, num_vars, num_samples, weights=None)
assert_ndarray_allclose(
exact_laplace_covariance,np.cov(posterior_samples),
atol=1e-2*exact_laplace_covariance.max(),rtol=0.,
msg='Comparing posterior samples covariance')
assert_ndarray_allclose(
exact_laplace_mean.squeeze(),
np.mean(posterior_samples,axis=1),atol=2e-2,rtol=0.,
msg='Comparing posterior samples mean')
if plot:
# plot marginals of posterior using orginal ordering
from pyapprox.visualization import plot_multiple_2d_gaussian_slices
texfilename= 'slices.tex'
plot_multiple_2d_gaussian_slices(
exact_laplace_mean[:10], np.diag(exact_laplace_covariance)[:10],
texfilename, reference_gaussian_data=(0.,1.),show=False)
# plot marginals of posterior in rotated coordinates
# from most to least important.
# The following is not feasiable in practice as we cannot compute
# entire covariance matrix in full space. But we have
# C_r = V_r*L*V_r*D*V_r.T*L.T*V_r.T
# is we compute matrix products from right to left we only have to
# compute at most (d x r) matrices. And if only want first 20 say
# variances then can apply C_r to vectors e_i i=1,...,20
# then we need at most (dx20 matrices)
texfilename= 'rotated-slices.tex'
V_r= L_post_op.V_r
plot_multiple_2d_gaussian_slices(
np.dot(V_r.T,exact_laplace_mean[:10]),
np.diag(np.dot(V_r.T,np.dot(exact_laplace_covariance,V_r)))[:10],
texfilename, reference_gaussian_data=(0.,1.),show=True)
class TestLaplace(unittest.TestCase):
def setUp( self ):
np.random.seed(2)
@unittest.skip(reason="only shows how to plot")
def test_plot_multiple_2d_gaussian_slices(self):
from pyapprox.visualization import plot_multiple_2d_gaussian_slices
mean=np.array([0,1,-1])
covariance = np.diag(np.array([1,0.5,0.025]))
texfilename= 'slices.tex'
plot_multiple_2d_gaussian_slices(
mean[:10], np.diag(covariance)[:10],texfilename,
reference_gaussian_data=(0.,1.),show=False)
import glob, os
filenames = glob.glob(texfilename[:-4]+'*')
for filename in filenames:
os.remove(filename)
def test_operator_diagonal(self):
num_vars = 4; eval_concurrency=2
randn = np.random.normal(0.,1.,(num_vars,num_vars))
prior_covariance = np.dot(randn.T,randn)
sqrt_covar_op = CholeskySqrtCovarianceOperator(
prior_covariance,eval_concurrency)
covariance_operator=CovarianceOperator(sqrt_covar_op)
diagonal = get_operator_diagonal(
covariance_operator, num_vars, eval_concurrency, transpose=None)
assert np.allclose(diagonal,np.diag(prior_covariance))
def test_posterior_dense_matrix_covariance_operator(self):
num_vars = 121; rank = 10; eval_concurrency=20
#randn = np.random.normal(0.,1.,(num_vars,num_vars))
#prior_covariance = np.dot(randn.T,randn)
prior_covariance = np.eye(num_vars)
prior_sqrt_covariance_op = CholeskySqrtCovarianceOperator(
prior_covariance,eval_concurrency)
prior = MultivariateGaussian(prior_sqrt_covariance_op)
comparison_tol = 6e-7
posterior_covariance_helper(
prior, rank, comparison_tol,test_sampling=True)
def test_log_unnormalized_posterior(self):
num_dims = 4; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
prior_mean = np.zeros((num_dims),float)
prior_covariance = np.eye(num_dims)*0.25
prior_covariance_chol_factor = np.linalg.cholesky(prior_covariance)
noise_covariance = np.eye(num_qoi)*0.1
noise_covariance_inv = np.linalg.inv(noise_covariance)
misfit_model = QuadraticMisfitModel(
num_dims, rank, num_qoi, obs, noise_covariance=noise_covariance)
prior_density = NormalDensity(prior_mean,covariance=prior_covariance)
objective = LogUnormalizedPosterior(
misfit_model,misfit_model.gradient_set,prior_density.pdf,
prior_density.log_pdf,prior_density.log_pdf_gradient)
samples = prior_density.generate_samples(2)
exact_log_unnormalized_posterior_vals = np.log(
np.exp(-misfit_model(samples))*
prior_density.pdf(samples))
log_unnormalized_posterior_vals = objective(samples)
assert np.allclose(exact_log_unnormalized_posterior_vals,
log_unnormalized_posterior_vals)
exact_log_unnormalized_posterior_grads = \
-misfit_model.gradient_set(samples)+\
prior_density.log_pdf_gradient(samples)
log_unnormalized_posterior_grads = objective(
samples,{'eval_type':'grad'})
assert np.allclose(exact_log_unnormalized_posterior_grads.T,
log_unnormalized_posterior_grads)
def test_get_map_point(self):
num_dims = 4; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
prior_mean = np.zeros((num_dims),float)
prior_covariance = np.eye(num_dims)*0.25
prior_covariance_chol_factor = np.linalg.cholesky(prior_covariance)
noise_covariance = np.eye(num_qoi)*0.1
noise_covariance_inv = np.linalg.inv(noise_covariance)
misfit_model = QuadraticMisfitModel(
num_dims, rank, num_qoi, obs, noise_covariance=noise_covariance)
# exact map point should be mean of Gaussian posterior
prior_hessian = np.linalg.inv(prior_covariance)
exact_map_point = \
laplace_posterior_approximation_for_linear_models(
misfit_model.Amatrix,prior_mean,prior_hessian,
noise_covariance_inv,obs)[0]
prior_density = NormalDensity(prior_mean,covariance=prior_covariance)
objective = LogUnormalizedPosterior(
misfit_model,misfit_model.gradient_set,prior_density.pdf,
prior_density.log_pdf,prior_density.log_pdf_gradient)
initial_point = prior_mean
map_point, obj_min = find_map_point(objective,initial_point)
assert np.allclose(exact_map_point.squeeze(), map_point)
assert np.allclose(
objective.gradient(map_point),objective.gradient(exact_map_point))
assert np.allclose(objective.gradient(map_point),np.zeros(num_dims))
def test_push_forward_gaussian_though_linear_model(self):
num_qoi = 1
num_dims = 2
A = np.random.normal(0.,1.,(num_qoi,num_dims))
b = np.random.normal(0.,1.,(num_qoi))
mean = np.ones((num_dims),float)
covariance = 0.1*np.eye(num_dims)
covariance_chol_factor = cholesky(covariance)
push_forward_mean, push_forward_covariance =\
push_forward_gaussian_though_linear_model(A,b,mean,covariance)
# Generate samples from original density and push forward through model
# and approximate density using KDE
num_samples = 1000000
samples = dot(covariance_chol_factor,
np.random.normal(0.,1.,(num_dims,num_samples)))+\
np.tile(mean.reshape(num_dims,1),num_samples)
push_forward_samples = dot(A,samples)+b
kde_density = ObsDataDensity(push_forward_samples)
push_forward_density = NormalDensity(
push_forward_mean,covariance=push_forward_covariance)
test_samples = np.linspace(
push_forward_samples.min(),
push_forward_samples.max(),100).reshape(1,100)
kde_values = kde_density.pdf(test_samples)
normal_values = push_forward_density.pdf(test_samples)
assert np.linalg.norm(kde_values-normal_values[:,0])<4e-2
#plt = kde_density.plot_density(1000,show=False)
#import pylab
#pylab.setp(plt, linewidth=2, color='r')
#push_forward_density.plot_density(100,show=True)
def test_quadratic_misfit_model(self):
num_dims = 10; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
model = QuadraticMisfitModel(num_dims,rank,num_qoi,obs)
sample = np.random.normal(0.,1.,(num_dims))
helper_gradient(model.value,model.gradient,sample)
def test_neg_log_posterior(self):
num_dims = 10; rank = 3; num_qoi=3
obs = np.random.normal(0.,1.,(num_qoi))
noise_covariance = np.eye(num_qoi)*0.1
misfit_model=QuadraticMisfitModel(
num_dims,rank,num_qoi,obs,noise_covariance=noise_covariance)
prior_mean = np.ones((num_dims),float)
prior_covariance = np.eye(num_dims)*0.25
prior_density = NormalDensity(prior_mean,covariance=prior_covariance)
objective = LogUnormalizedPosterior(
misfit_model,misfit_model.gradient_set,prior_density.pdf,
prior_density.log_pdf,prior_density.log_pdf_gradient)
sample = np.random.normal(0.,1.,(num_dims))
helper_gradient(misfit_model.value,misfit_model.gradient,sample)
def test_directional_derivative_using_finite_difference(self):
num_dims = 10; rank = 3; num_qoi=3
model = QuadraticMisfitModel(num_dims,rank,num_qoi)
directions = np.random.normal(0.,1.,(num_dims,2))
directions /= np.linalg.norm(directions,axis=0)
# derivatives of function values
sample = np.random.normal(0.,1.,(num_dims,1))
opts = {'eval_type':'value_grad'}
result = model(sample,opts)[0,:]
# result is num_samples x num_qoi. There is only one sample so take
# first row of result above
value_at_sample = result[0:1]# must be a vector
gradient = result[1:]
#gradient = model.gradient(sample)
assert np.allclose(
|
np.dot(gradient,directions)
|
numpy.dot
|
"""Class for playing and annotating video sources in Python using Tkinter."""
import json
import logging
import pathlib
import datetime
import tkinter
import tkinter.filedialog
import numpy as np
import cv2
import PIL.Image
import PIL.ImageTk
logger = logging.getLogger("VideoPyer")
logging.basicConfig(level=logging.INFO)
# Delay should be changed with caution
# Tkinter event loop gets flooded with delays < 60 ms
DELAY = 60
# Default colour options
BKG_COLOUR = "#3E4149"
COLOUR_MAP = {"blue": "#749CE2", "pink": "#E274CF", "green": "#8CE274"}
class VideoPyer: # pylint: disable=too-many-instance-attributes
"""Play, pause and record position of mouse clicks on videos."""
def __init__(self, window: tkinter.Tk, title: str) -> None:
"""Set up video frame and menus of GUI, variables and logging.
Args:
window (tkinter.Tk): Main instance of tkinter.Tk.
title (str): Title of Tk window.
"""
self.window = window
self.window.title(title)
self.window.configure(background=BKG_COLOUR)
# Frame that will contain the video
video_frame = tkinter.Frame(self.window)
video_frame.pack(side=tkinter.TOP, pady=5)
self.canvas = tkinter.Canvas(video_frame, bg=BKG_COLOUR)
# Log position of double click on canvas to record salient 'point'
self.canvas.bind("<Double-1>", self.log_point)
# Log head direction arrow drawn on press and release of click
self.canvas.bind("<Button-1>", self.log_click)
self.canvas.bind("<ButtonRelease-1>", self.draw_line)
self.arrow_start_x, self.arrow_start_y = None, None # Store start pos of click
# Remove a selected tk object on backspace
self.canvas.bind("<BackSpace>", self.remove_tk_object)
self.selected_tk_object = None # Current object user selects
# Rotate head direction arrow with Up or Down keys
self.canvas.bind("<KeyPress>", self.rotate)
self.canvas.focus_set() # Enable listen to key presses by default
self.canvas.pack()
# Frame that will display the menu buttons
menu_frame = tkinter.Frame(self.window)
menu_frame.pack(side=tkinter.BOTTOM, pady=5)
# Button to select video
self.btn_select = tkinter.Button(
menu_frame,
text="Select video",
width=10,
command=self.select_and_open_source,
highlightbackground=BKG_COLOUR,
)
self.btn_select.grid(row=0, column=0)
# Button to begin play
self.btn_play = tkinter.Button(
menu_frame,
text="Play",
width=8,
command=self.resume_video,
highlightbackground=BKG_COLOUR,
state="disabled",
)
self.btn_play.grid(row=0, column=1)
# Button to pause
self.pause = False
self.btn_pause = tkinter.Button(
menu_frame,
text="Pause",
width=8,
command=self.pause_video,
highlightbackground=BKG_COLOUR,
state="disabled",
)
self.btn_pause.grid(row=0, column=2)
# Mini menu to select marker colour for salient 'points'
colours = list(COLOUR_MAP.keys())
var = tkinter.StringVar(video_frame)
var.set(colours[0])
self.marker_colour = colours[0]
opt_colour = tkinter.OptionMenu(
video_frame,
var,
*colours,
command=self.set_colour,
)
opt_colour.config(bg=BKG_COLOUR, width=8)
opt_colour.place(x=3, y=3)
# Set up some variables for logging (points and arrows are logged independently)
self.annotation_logs = dict()
self.tkid_to_idx = dict()
self.arrow_head_x, self.arrow_head_y = 0, 0
self.frame_counter, self.mouse_x, self.mouse_y = 0, 0, 0
self.arrows_log_keys = [
"frame_counter",
"arrow_start_x",
"arrow_start_y",
"arrow_head_x",
"arrow_head_y",
"marker_colour",
]
self.points_log_keys = ["frame_counter", "mouse_x", "mouse_y", "marker_colour"]
self.filename = None # File currently loaded
self.vid = None # OpenCV capture instance
self.img = None # Holds current frame of video
self.window.mainloop()
def set_colour(self, value: str) -> None:
"""Set colour of visible marker for double mouse clicks."""
self.marker_colour = value
def shrink(self, c_id: int, x: int, y: int, radius: int) -> None:
"""Shrink a Tk circle object over time before finalling removing it.
Args:
c_id (int): Integer ID of circle/oval object from Tk.
x (int): X coord for circle centre.
y (int): Y coord for circle centre.
radius (int): Circle radius.
"""
if radius > 0.0:
radius -= 0.5
self.canvas.coords(c_id, x - radius, y - radius, x + radius, y + radius)
self.canvas.after(100, self.shrink, c_id, x, y, radius)
else:
self.canvas.delete(c_id) # Remove circle entirely
def log_point(self, event: tkinter.Event) -> None:
"""Log the (x,y) coords of double mouse click during video and the frame number.
Coordinates are given from top left of canvas. A fading marker becomes visible."""
logger.info(
"Point (%d,%d). Frame %d. Colour %s.",
event.x,
event.y,
self.frame_counter,
self.marker_colour,
)
self.mouse_x, self.mouse_y = event.x, event.y
self.arrow_start_x, self.arrow_start_y = (event.x, event.y) # Potential arrow
radius = 8
c_id = self.canvas.create_oval(
self.mouse_x - radius,
self.mouse_y - radius,
self.mouse_x + radius,
self.mouse_y + radius,
fill=COLOUR_MAP[self.marker_colour],
)
self.shrink(c_id, self.mouse_x, self.mouse_y, radius) # Shrink circle over time
# Add relevant keys to logs for current file
for key in self.points_log_keys:
self.annotation_logs[self.filename]["points"].setdefault(key, []).append(
getattr(self, key)
)
def log_click(self, event: tkinter.Event) -> None:
"""Log (x,y) coords of mouse click during video. Check if user is clicking on
existing line object to get ready for further commands (e.g. remove, rotate)."""
self.arrow_start_x, self.arrow_start_y = event.x, event.y
self.selected_tk_object = self.canvas.find_withtag("current")[
0
] # Top most object under mouse
def draw_line(self, event: tkinter.Event) -> None:
"""Draw a line between on coords on press and release of click and log.
The frame number recorded will be that at the time on release of click."""
self.arrow_head_x, self.arrow_head_y = event.x, event.y
# Only draw intentional arrows (i.e. not just a result from regular clicks)
if (
np.linalg.norm(
np.array([self.arrow_start_x, self.arrow_start_y])
- np.array([self.arrow_head_x, self.arrow_head_y])
)
> 20
):
l_id = self.canvas.create_line(
self.arrow_head_x,
self.arrow_head_y,
self.arrow_start_x,
self.arrow_start_y,
fill="yellow",
arrow="first",
)
logger.info(
"Arrow %d (%d,%d) -> (%d, %d). Frame %d. Colour %s.",
l_id,
self.arrow_start_x,
self.arrow_start_y,
self.arrow_head_x,
self.arrow_head_y,
self.frame_counter,
self.marker_colour,
)
# Add arrow coordinates to logs
for key in self.arrows_log_keys:
self.annotation_logs[self.filename]["arrows"].setdefault(
key, []
).append(getattr(self, key))
# Maintain standard indexing starting from 0
self.tkid_to_idx[l_id] = (
len(self.annotation_logs[self.filename]["arrows"]["arrow_start_x"]) - 1
)
self.arrow_start_x, self.arrow_start_y = None, None
def remove_tk_object(self, event: tkinter.Event) -> None:
"""Remove the tk object that is currently selected from the canvas and logs
(only head direction arrows are currently removeable from logs)."""
if self.selected_tk_object:
self.canvas.delete(self.selected_tk_object)
logger.info("Object w/ id %d removed from canvas.", self.selected_tk_object)
# Remove object from our logs
remove_idx = self.tkid_to_idx.get(self.selected_tk_object)
if remove_idx is not None: # Else not a line object and thus not logged
# Remove the object's recorded annotations for all keys
for key in self.arrows_log_keys:
self.annotation_logs[self.filename]["arrows"].setdefault(key, [])
del self.annotation_logs[self.filename]["arrows"][key][remove_idx]
# Decrement the indices larger than the object just removed
for k in self.tkid_to_idx:
if k > self.selected_tk_object:
self.tkid_to_idx[k] -= 1
del self.tkid_to_idx[self.selected_tk_object]
self.selected_tk_object = None
else:
logger.info("No object selected to remove via %s.", event.keysym)
def rotate(self, event: tkinter.Event) -> None:
"""Rotate the selected object by 1 degree (increment or decrement depending
on Up or Down key press). Currently only head direction arrows can be rotated."""
if (
self.selected_tk_object
and self.canvas.type(self.selected_tk_object) == "line"
):
# Calculate angle between arrow and 0 radians East
x0, y0, x1, y1 = self.canvas.coords(self.selected_tk_object)
vec = np.array([x0 - x1, y0 - y1])
unit_vec = vec / np.linalg.norm(vec)
theta = np.arctan2(unit_vec[1], unit_vec[0]) # np.arctan2 takes (y, x)
# Increment or decrement angle
if event.keysym == "Up":
theta += np.deg2rad(1)
elif event.keysym == "Down":
theta -= np.deg2rad(1)
# Rotate arrow around it's origin
radius = np.linalg.norm(np.array([x0, y0]) -
|
np.array([x1, y1])
|
numpy.array
|
from DNN.hans_on_feedforward_neural_network import Feedforward_neural_network
import numpy as np
Net = Feedforward_neural_network()
#--------------------------多元回归实验-----------------------------
# ---------------------------准备数据-------------------------------
#-------------------------------------------------------------------
# 20 维到 3 维的转换
X_data = np.random.uniform(0, 100, size=(1000, 20))
W = np.random.random(size=(20, 3))
Y_data = np.dot(X_data, W)
# 给标签加上高斯白噪声,使之成为非线性关系
Y_data = Y_data +
|
np.random.normal(0, 10, size=Y_data.shape)
|
numpy.random.normal
|
#Contains MeldCohort and MeldSubject classes
from contextlib import contextmanager
from meld_classifier.paths import (
DEMOGRAPHIC_FEATURES_FILE,
CORTEX_LABEL_FILE,
SURFACE_FILE,
DEFAULT_HDF5_FILE_ROOT,
BOUNDARY_ZONE_FILE,
NVERT,
BASE_PATH,
)
import pandas as pd
import numpy as np
import nibabel as nb
import os
import h5py
import glob
import logging
import meld_classifier.mesh_tools as mt
import scipy
class MeldCohort:
"""Class to define cohort-level parameters such as subject ids, mesh"""
def __init__(self, hdf5_file_root=DEFAULT_HDF5_FILE_ROOT, dataset=None, data_dir=BASE_PATH):
self.data_dir = data_dir
self.hdf5_file_root = hdf5_file_root
self.dataset = dataset
self.log = logging.getLogger(__name__)
# class properties (readonly attributes):
# full_feature_list: list of features available in this cohort
self._full_feature_list = None
# surface information known to MeldCohort
# cortex_label: information about which nodes are cortex
self._cortex_label = None
self._cortex_mask = None
# coords: spherical 2D coordinates
self._coords = None
# surf: inflated mesh, surface vertices and triangles
self._surf = None
# surf_partial: partially inflated mesh, surface vertices and triangles
self._surf_partial = None
# surf_area: surface area for each triangle
self._surf_area = None
# adj_mat: sparse adjacency matrix for all vertices
self._adj_mat = None
# lobes: labels for cortical lobes
self._lobes = None
# neighbours: list of neighbours for each vertex
self._neighbours = None
@property
def full_feature_list(self):
"""list of features available in this cohort"""
if self._full_feature_list is None:
self._full_feature_list = []
subject_ids = self.get_subject_ids()
# get union of all features from subjects in this cohort
features = set()
for subj in subject_ids:
features = features.union(MeldSubject(subj, self).get_feature_list().copy())
self._full_feature_list = sorted(list(features))
self.log.info(f"full_feature_list: {self._full_feature_list}")
return self._full_feature_list
@property
def cortex_label(self):
if self._cortex_label is None:
p = os.path.join(self.data_dir, CORTEX_LABEL_FILE)
self._cortex_label = np.sort(nb.freesurfer.io.read_label(p))
return self._cortex_label
@property
def cortex_mask(self):
if self._cortex_mask is None:
self._cortex_mask = np.zeros(NVERT, dtype=bool)
self._cortex_mask[self.cortex_label] = True
return self._cortex_mask
@property
def surf_area(self):
if self._surf_area is None:
p = os.path.join(self.data_dir, "fsaverage_sym/surf/lh.area")
self._surf_area = nb.freesurfer.read_morph_data(p)
return self._surf_area
@property
def surf(self):
"""inflated surface, dict with 'faces' and 'coords'"""
if self._surf is None:
p = os.path.join(self.data_dir, "fsaverage_sym", "surf", "lh.inflated")
self._surf = mt.load_mesh_geometry(p)
return self._surf
@property
def surf_partial(self):
"""partially inflated surface, dict with 'faces' and 'coords'"""
if self._surf_partial is None:
p = os.path.join(self.data_dir, "fsaverage_sym", "surf", "lh.partial_inflated")
vertices, faces = nb.freesurfer.io.read_geometry(p)
self._surf_partial = {"faces": faces, "coords": vertices}
return self._surf_partial
@property
def adj_mat(self):
if self._adj_mat is None:
all_edges = np.vstack(
[self.surf["faces"][:, :2], self.surf["faces"][:, 1:3], self.surf["faces"][:, [2, 0]]]
)
self._adj_mat = scipy.sparse.coo_matrix(
(np.ones(len(all_edges), np.uint8), (all_edges[:, 0], all_edges[:, 1])),
shape=(len(self.surf["coords"]), len(self.surf["coords"])),
).tocsr()
return self._adj_mat
@property
def neighbours(self):
if self._neighbours is None:
self._neighbours = mt.get_neighbours_from_tris(self.surf["faces"])
return self._neighbours
@property
def lobes(self):
if self._lobes is None:
p = os.path.join(self.data_dir, "fsaverage_sym/label/lh.lobes.annot")
self._lobes = nb.freesurfer.read_annot(p)
return self._lobes
@property
def coords(self):
if self._coords is None:
surf = mt.load_mesh_geometry(os.path.join(self.data_dir, SURFACE_FILE))
# spherical 2D coordinates. ignore radius
# spherical_coords = mt.spherical_np(surf["coords"])[:, 1:]
# surf_coords_norm = (surf['coords']-np.min(surf['coords'],axis=0))/(np.max(surf['coords'],axis=0)-np.min(surf['coords'],axis=0))
# norm_coords = (spherical_coords - np.min(spherical_coords, axis=0)) / (
# np.max(spherical_coords, axis=0) - np.min(spherical_coords, axis=0)
# )
# round to have around 1500 unique coordinates
# rounded_norm_coords = np.round(norm_coords * 5, 1) / 5
self._coords = surf["coords"] #rounded_norm_coords
return self._coords
def read_subject_ids_from_dataset(self):
"""Read subject ids from the dataset csv file.
Returns subject_ids, trainval_ids, test_ids"""
assert self.dataset is not None, "please set a valid dataset csv file"
df = pd.read_csv(os.path.join(self.data_dir, self.dataset))
subject_ids = list(df.subject_id)
trainval_ids = list(df[df.split == "trainval"].subject_id)
test_ids = list(df[df.split == "test"].subject_id)
return subject_ids, trainval_ids, test_ids
def get_sites(self):
"""get all valid site codes that exist on this system"""
sites = []
for f in glob.glob(os.path.join(self.data_dir, "MELD_*")):
if os.path.isdir(f):
sites.append(f.split("_")[-1])
return sites
@contextmanager
def _site_hdf5(self, site_code, group, write=False, hdf5_file_root=None):
"""
Hdf5 file handle for specified site_code and group (patient or control).
This function is to be used in a context block as follows:
```
with cohort._site_hdf5('H1', 'patient') as f:
# read information from f
pass
# f is automatically closed outside of the `with` block
```
Args:
site_code: hospital site code, e.g. 'H1'
group: 'patient' or 'control'
write (optional): flag to open hdf5 file with writing permissions, or to create
the hdf5 if it does not exist.
Yields: a pointer to the opened hdf5 file.
"""
if hdf5_file_root is None:
hdf5_file_root = self.hdf5_file_root
p = os.path.join(self.data_dir, f"MELD_{site_code}", hdf5_file_root.format(site_code=site_code, group=group))
# open existing file or create new one
if os.path.isfile(p) and not write:
f = h5py.File(p, "r")
elif os.path.isfile(p) and write:
f = h5py.File(p, "r+")
elif not os.path.isfile(p) and write:
f = h5py.File(p, "a")
else:
f = None
try:
yield f
finally:
if f is not None:
f.close()
def get_subject_ids(self, **kwargs):
"""Output list of subject_ids.
List can be filtered by sites (given as list of site_codes, e.g. 'H2'),
groups (patient / control / both), features (subject_features_to_exclude),
Sites are given as a list of site_codes (e.g. 'H2').
Optionally filter subjects by group (patient or control).
If self.dataset is not none, restrict subjects to subjects in dataset csv file.
subject_features_to_exclude: exclude subjects that dont have this feature
Args:
site_codes (list of str): hospital site codes, e.g. ['H1'].
group (str): 'patient', 'control', or 'both'.
subject_features_to_exclude (list of str): exclude subjects that dont have this feature
subject_features_to_include (list of str): exclude subjects that have this feature
scanners (list of str): list of scanners to include
lesional_only (bool): filter out lesion negative patients
Returns:
subject_ids: the list of subject ids
"""
# parse kwargs:
# get groups
if kwargs.get("group", "both") == "both":
groups = ["patient", "control"]
else:
groups = [kwargs.get("group", "both")]
# get sites
site_codes = kwargs.get("site_codes", self.get_sites())
if isinstance(site_codes, str):
site_codes = [site_codes]
# get scanners
scanners = kwargs.get("scanners", ["3T", "15T"])
if not isinstance(scanners, list):
scanners = [scanners]
lesional_only = kwargs.get("lesional_only", True)
subject_features_to_exclude = kwargs.get("subject_features_to_exclude", [""])
subject_features_to_include = kwargs.get("subject_features_to_include", [""])
# get subjects for specified groups and sites
subject_ids = []
for site_code in site_codes:
for group in groups:
with self._site_hdf5(site_code, group) as f:
if f is None:
continue
cur_scanners = f[site_code].keys()
for scanner in cur_scanners:
subject_ids += list(f[os.path.join(site_code, scanner, group)].keys())
self.log.info(f"total number of subjects: {len(subject_ids)}")
# restrict to ids in dataset (if specified)
if self.dataset is not None:
subjects_in_dataset, _, _ = self.read_subject_ids_from_dataset()
subject_ids = list(np.array(subject_ids)[np.in1d(subject_ids, subjects_in_dataset)])
self.log.info(
f"total number of subjects after restricting to subjects from {self.dataset}: {len(subject_ids)}"
)
# get list of features that is used to filter subjects
# e.g. use this to filter subjects without FLAIR features
_, required_subject_features = self._filter_features(
subject_features_to_exclude,
return_excluded=True,
)
self.log.debug("selecting subjects that have features: {}".format(required_subject_features))
# get list of features that determine whether to exclude subjects
# e.g. use this to filter subjects with FLAIR features
_, undesired_subject_features = self._filter_features(
subject_features_to_include,
return_excluded=True,
)
self.log.debug("selecting subjects that don't have features: {}".format(undesired_subject_features))
# filter ids by scanner, features and whether they have lesions.
filtered_subject_ids = []
for subject_id in subject_ids:
subj = MeldSubject(subject_id, self)
# check scanner
if subj.scanner not in scanners:
continue
# check required features
if not subj.has_features(required_subject_features):
continue
# check undesired features
if subj.has_features(undesired_subject_features) and len(undesired_subject_features) > 0:
continue
# check lesion mask presence
if lesional_only and subj.is_patient and not subj.has_lesion():
continue
# subject has passed all filters, add to list
filtered_subject_ids.append(subject_id)
self.log.info(
f"total number after filtering by scanner {scanners}, features, lesional_only {lesional_only}: {len(filtered_subject_ids)}"
)
return filtered_subject_ids
def get_features(self, features_to_exclude=[""]):
"""
get filtered list of features.
"""
# get list of all features that we want to train models on
# if a subject does not have a feature, 0 is returned for this feature during dataset creation
features = self._filter_features(features_to_exclude=features_to_exclude)
self.log.debug("features that will be loaded in train/test datasets: {}".format(features))
return features
def _filter_features(self, features_to_exclude, return_excluded=False):
"""Return a list of features, with features_to_exclude removed.
Args:
features_to_exclude (list of str): list of features that should be excluded,
NB 'FLAIR' will exclude all FLAIR features but all other features must be exact matches
return_excluded (bool): if True, return list of excluded features.
Returns:
tuple:
features: the list of features with appropriate features excluded.
excluded_features: list of all excluded features. Only returned, if return_exluded is specified.
"""
all_features = self.full_feature_list.copy()
excludable_features = []
filtered_features = self.full_feature_list.copy()
for feature in self.full_feature_list.copy():
for exclude in features_to_exclude:
if exclude == "":
pass
elif exclude == "FLAIR":
if exclude in feature:
filtered_features.remove(feature)
excludable_features.append(feature)
elif feature == exclude:
if exclude in self.full_feature_list: # only remove if still in list
filtered_features.remove(feature)
excludable_features.append(feature)
if return_excluded:
return filtered_features, excludable_features
else:
return filtered_features
def split_hemispheres(self, input_data):
"""
split vector of cortex-masked data back into 2 full overlays,
including zeros for medial wall
Returns:
hemisphere_data: dictionary with keys "left" and "right".
"""
# make sure that input_data has expected format
assert len(input_data) == 2 * len(self.cortex_label)
# split data in two hemispheres
hemisphere_data = {}
for i, hemi in enumerate(["left", "right"]):
feature_data = np.zeros((NVERT,) + input_data.shape[1:])
feature_data[self.cortex_label] = input_data[i * len(self.cortex_label) : (i + 1) * len(self.cortex_label)]
hemisphere_data[hemi] = feature_data
return hemisphere_data
class MeldSubject:
"""
individual patient from meld cohort, can read subject data and other info
"""
def __init__(self, subject_id, cohort):
self.subject_id = subject_id
self.cohort = cohort
self.log = logging.getLogger(__name__)
# unseeded rng for generating random numbers
self.rng = np.random.default_rng()
@property
def scanner(self):
_, site_code, scanner, group, ID = self.subject_id.split("_")
return scanner
@property
def group(self):
_, site_code, scanner, group, ID = self.subject_id.split("_")
if group == "FCD":
group = "patient"
elif group == "C":
group = "control"
else:
print(
f"Error: incorrect naming scheme used for {self.subject_id}. Unable to determine if patient or control."
)
return group
@property
def site_code(self):
_, site_code, scanner, group, ID = self.subject_id.split("_")
return site_code
def surf_dir_path(self, hemi):
"""return path to features dir (surf_dir)"""
return os.path.join(self.site_code, self.scanner, self.group, self.subject_id, hemi)
@property
def is_patient(self):
return self.group == "patient"
@property
def has_flair(self):
return "FLAIR" in " ".join(self.get_feature_list())
def has_lesion(self):
return self.get_lesion_hemisphere() in ["lh", "rh"]
def get_lesion_hemisphere(self):
"""
return 'lh', 'rh', or None
"""
if not self.is_patient:
return None
with self.cohort._site_hdf5(self.site_code, self.group) as f:
surf_dir_lh = f.require_group(self.surf_dir_path("lh"))
if ".on_lh.lesion.mgh" in surf_dir_lh.keys():
return "lh"
surf_dir_rh = f.require_group(self.surf_dir_path("rh"))
if ".on_lh.lesion.mgh" in surf_dir_rh.keys():
return "rh"
return None
def has_features(self, features):
missing_features = np.setdiff1d(features, self.get_feature_list())
return len(missing_features) == 0
def get_feature_list(self, hemi="lh"):
"""Outputs a list of the features a participant has for each hemisphere"""
with self.cohort._site_hdf5(self.site_code, self.group) as f:
keys = list(f[self.surf_dir_path(hemi)].keys())
# remove lesion and boundaries from list of features
if ".on_lh.lesion.mgh" in keys:
keys.remove(".on_lh.lesion.mgh")
if ".on_lh.boundary_zone.mgh" in keys:
keys.remove(".on_lh.boundary_zone.mgh")
return keys
def get_demographic_features(
self, feature_names, csv_file=DEMOGRAPHIC_FEATURES_FILE, normalize=False, default=None
):
"""
Read demographic features from csv file. Features are given as (partial) column titles
Args:
feature_names: list of partial column titles of features that should be returned
csv_path: csv file containing demographics information.
can be raw participants file or qc-ed values.
"{site_code}" is replaced with current site_code.
normalize: implemented for "Age of Onset" and "Duration"
default: default value to be used when subject does not exist.
Either "random" (which will choose a random value from the current
demographics feature column) or any other value which will be used
as default value.
Returns:
list of features, matching structure of feature_names
"""
csv_path = os.path.join(self.cohort.data_dir, csv_file)
return_single = False
if isinstance(feature_names, str):
return_single = True
feature_names = [feature_names]
df = pd.read_csv(csv_path, header=0, encoding="latin")
# get index column
id_col = None
for col in df.keys():
if "ID" in col:
id_col = col
# ensure that found an index column
if id_col is None:
self.log.warning("No ID column found in file, please check the csv file")
return None
df = df.set_index(id_col)
# find desired demographic features
features = []
for desired_name in feature_names:
matched_name = None
for col in df.keys():
if desired_name in col:
if matched_name is not None:
# already found another matching col
self.log.warning(
f"Multiple columns matching {desired_name} found ({matched_name}, {col}), please make search more specific"
)
return None
matched_name = col
# ensure that found necessary data
if matched_name is None:
if "urfer" in desired_name:
matched_name = "Freesurfer_nul"
else:
self.log.warning(f"Unable to find column matching {desired_name}, please double check for typos")
return None
# read feature
# if subject does not exists, add None
if self.subject_id in df.index:
if matched_name == "Freesurfer_nul":
feature = "5.3"
else:
feature = df.loc[self.subject_id][matched_name]
if normalize:
if matched_name == "Age of onset":
feature = np.log(feature + 1)
feature = feature / df[matched_name].max()
elif matched_name == "Duration":
feature = (feature - df[matched_name].min()) / (df[matched_name].max() - df[matched_name].min())
else:
self.log.info(f"demographic feature normalisation not implemented for feature {matched_name}")
elif default == "random":
# unseeded rng for generating random numbers
rng = np.random.default_rng()
feature = np.clip(np.random.normal(0, 0.1) + rng.choice(df[matched_name]), 0, 1)
else:
feature = default
features.append(feature)
if return_single:
return features[0]
return features
def load_feature_values(self, feature, hemi="lh"):
"""
Load and return values of specified feature.
"""
feature_values = np.zeros(NVERT, dtype=np.float32)
# read data from hdf5
with self.cohort._site_hdf5(self.site_code, self.group) as f:
surf_dir = f[self.surf_dir_path(hemi)]
if feature in surf_dir.keys():
feature_values[:] = surf_dir[feature][:]
else:
self.log.debug(f"missing feature: {feature} set to zero")
return feature_values
def load_feature_lesion_data(self, features, hemi="lh", features_to_ignore=[]):
"""
Load all patient's data into memory
Args:
features: list of features to be loaded
hemi: 'lh' or 'rh'
features_to_ignore: list of features that should be replaced with 0 upon loading
Returns:
feature_data, label
"""
# load all features
feature_values = []
for feature in features:
if feature in features_to_ignore:
# append zeros for features_to_ignore
feature_values.append(np.zeros(NVERT, dtype=np.float32))
else:
# read feature_values
feature_values.append(self.load_feature_values(feature, hemi=hemi))
feature_values = np.stack(feature_values, axis=-1)
# load lesion data
lesion_values = np.ceil(self.load_feature_values(".on_lh.lesion.mgh", hemi=hemi)).astype(int)
return feature_values, lesion_values
def load_boundary_zone(self, max_distance=40, feat_name=".on_lh.boundary_zone.mgh"):
"""
load and return boundary zone mask
max_distance - distance from lesion mask to extend boundary zone in mm
30 for training exclusion, 20 for sensitivity testing
"""
cortex_mask = self.cohort.cortex_mask
boundary_zones = np.zeros(2 * sum(cortex_mask)).astype(float)
hemi = self.get_lesion_hemisphere()
for k, h in enumerate(["lh", "rh"]):
if hemi == h:
bz = self.load_feature_values(feat_name, hemi=hemi)
if max_distance is not None:
bz = bz < max_distance
boundary_zones[k * sum(cortex_mask) : (k + 1) * sum(cortex_mask)] = bz[cortex_mask]
else:
bz = np.zeros(len(cortex_mask))
boundary_zones[k * sum(cortex_mask) : (k + 1) * sum(cortex_mask)] = bz[cortex_mask]
return boundary_zones
def get_histology(self):
"""
get histological classification from cleaned up demographics files
"""
histology = self.get_demographic_features("Histo")
return histology
# TODO write test
def write_feature_values(self, feature, feature_values, hemis=["lh", "rh"], hdf5_file=None, hdf5_file_root=None):
"""
write feature to subject's hdf5.
Args:
feature: name of the feature
feature_values: feature values to be written to the hdf5
hemis: hemispheres that should be written. If only one hemisphere is given,
it is assumed that all values given with feature_values belong to this hemisphere.
hdf5_file: uses self.cohort._site_hdf5 by default, but another filename can be specified,
e.g. to write predicted lesions to another hdf5
hdf5_file_root: optional to specify a different root from baseline, if writing to a new file
"""
# check that feature_values have expected length
if hdf5_file_root is None:
hdf5_file_root = self.cohort.hdf5_file_root
assert len(feature_values) == sum(self.cohort.cortex_mask) * len(hemis)
n_vert_cortex = sum(self.cohort.cortex_mask)
# open hdf5 file
if hdf5_file is not None:
if not os.path.isfile(hdf5_file):
hdf5_file_context = h5py.File(hdf5_file, "a")
else:
hdf5_file_context = h5py.File(hdf5_file, "r+")
else:
hdf5_file_context = self.cohort._site_hdf5(
self.site_code, self.group, write=True, hdf5_file_root=hdf5_file_root
)
with hdf5_file_context as f:
for i, hemi in enumerate(hemis):
group = f.require_group(self.surf_dir_path(hemi))
hemi_data = np.zeros(NVERT)
hemi_data[self.cohort.cortex_mask] = feature_values[i * n_vert_cortex : (i + 1) * n_vert_cortex]
dset = group.require_dataset(
feature, shape=(NVERT,), dtype="float32", compression="gzip", compression_opts=9
)
dset[:] = hemi_data
def delete(self, f, feat):
print("delete")
del f[feat]
def get_lesion_area(self):
"""
calculate lesion area as the proportion of the hemisphere that is lesion.
Returns:
lesion_area, lesion_hemisphere, lesion_lobe
"""
hemi = self.get_lesion_hemisphere()
lobes_i, _, lobes_labels = self.cohort.lobes
if hemi is not None:
lesion = self.load_feature_values(".on_lh.lesion.mgh", hemi=hemi).astype(bool)
total_area = np.sum(self.cohort.surf_area[self.cohort.cortex_mask])
lesion_area =
|
np.sum(self.cohort.surf_area[lesion])
|
numpy.sum
|
import numpy as np
import math
import os
def load_obj(dire):
fin = open(dire,'r')
lines = fin.readlines()
fin.close()
vertices = []
triangles = []
for i in range(len(lines)):
line = lines[i].split()
if len(line)==0:
continue
if line[0] == 'v':
x = float(line[1])
y = float(line[2])
z = float(line[3])
vertices.append([x,y,z])
if line[0] == 'f':
x = int(line[1].split("/")[0])
y = int(line[2].split("/")[0])
z = int(line[3].split("/")[0])
triangles.append([x-1,y-1,z-1])
vertices = np.array(vertices, np.float32)
#remove isolated points
triangles_ =
|
np.array(triangles, np.int32)
|
numpy.array
|
# Licensed under an MIT open source license - see LICENSE
"""
SCOUSE - Semi-automated multi-COmponent Universal Spectral-line fitting Engine
Copyright (c) 2016-2018 <NAME>
CONTACT: <EMAIL>
"""
import numpy as np
import sys
import warnings
import pyspeckit
import matplotlib.pyplot as plt
import itertools
import time
from astropy import log
from astropy import units as u
from astropy.utils.console import ProgressBar
from .indiv_spec_description import *
from .parallel_map import *
from .saa_description import add_indiv_spectra, clean_up, merge_models
from .solution_description import fit, print_fit_information
from .verbose_output import print_to_terminal
def initialise_indiv_spectra(scouseobject, verbose=False, njobs=1):
"""
Here, the individual spectra are primed ready for fitting. We create a new
object for each spectrum and they are contained within a dictionary which
can be located within the relavent SAA.
Parameters
----------
scouseobject : Instance of the scousepy class
verbose : bool (optional)
verbose output
njobs : number (optional)
number of cores used for the computation - prep spec is parallelised
"""
# Cycle through potentially multiple wsaa values
for i in range(len(scouseobject.wsaa)):
# Get the relavent SAA dictionary
saa_dict = scouseobject.saa_dict[i]
# initialise the progress bar
if verbose:
count=0
progress_bar = print_to_terminal(stage='s3', step='init',
length=len(saa_dict.keys()),
var=scouseobject.wsaa[i])
for _key in saa_dict.keys():
prep_spec(_key, saa_dict, njobs, scouseobject)
if verbose:
progress_bar.update()
if verbose:
print("")
def prep_spec(_key, saa_dict, njobs, scouseobject):
"""
Prepares the spectra for automated fitting
Parameters
----------
_key : number
key for SAA dictionary entry - used to select the correct SAA
saa_dict : dictionary
dictionary of spectral averaging areas
njobs : number
number of cores used for the computation - prep spec is parallelised
scouseobject : Instance of the scousepy class
"""
# get the relavent SAA
SAA = saa_dict[_key]
# Initialise indiv spectra
indiv_spectra = {}
# We only care about the SAA's that are to be fit at this stage
if SAA.to_be_fit:
if np.size(SAA.indices_flat) != 0.0:
# Parallel
if njobs > 1:
args = [scouseobject, SAA]
inputs = [[k] + args for k in range(len(SAA.indices_flat))]
# Send to parallel_map
indiv_spec = parallel_map(get_indiv_spec,inputs,numcores=njobs)
# flatten the output from parallel map
merged_spec = [spec for spec in indiv_spec if spec is not None]
merged_spec = np.asarray(merged_spec)
for k in range(len(SAA.indices_flat)):
# Add the spectra to the dict
key = SAA.indices_flat[k]
indiv_spectra[key] = merged_spec[k]
else:
for k in range(len(SAA.indices_flat)):
key = SAA.indices_flat[k]
args = [scouseobject, SAA]
inputs = [[k] + args]
inputs = inputs[0]
indiv_spec = get_indiv_spec(inputs)
indiv_spectra[key] = indiv_spec
# add the spectra to the spectral averaging areas
add_indiv_spectra(SAA, indiv_spectra)
def get_indiv_spec(inputs):
"""
Returns a spectrum
Parameters
----------
inputs : list
list containing inputs to parallel map - contains the index of the
relavent spectrum, the scouseobject, and the SAA
"""
idx, scouseobject, SAA = inputs
# get the coordinates of the pixel based on the flattened index
_coords = np.unravel_index(SAA.indices_flat[idx],scouseobject.cube.shape[1:])
# create a pyspeckit spectrum
indiv_spec = spectrum(_coords, \
scouseobject.cube[:,_coords[0], _coords[1]].value, \
idx=SAA.indices_flat[idx], \
scouse=scouseobject)
return indiv_spec
def fit_indiv_spectra(scouseobject, saa_dict, wsaa, njobs=1,
spatial=False, verbose=False, stage=3):
"""
Automated fitting procedure for individual spectra
Parameters
----------
scouseobject : Instance of the scousepy class
saa_dict : dictionary
dictionary of spectral averaging areas
wsaa : number
width of the SAA
njobs : number (optional)
number of cores used for the computation - prep spec is parallelised
spatial : bool (optional)
not implemented yet
verbose : bool (optional)
verbose output
stage : number (optional)
indicates whether the fitting is being performed during stage 3 or 6
"""
if verbose:
if stage == 3:
progress_bar = print_to_terminal(stage='s3', step='fitting',
length=len(saa_dict.keys()),
var=wsaa)
else:
progress_bar = print_to_terminal(stage='s6', step='fitting',
length=len(saa_dict.keys()),
var=wsaa)
for _key in saa_dict.keys():
fitting_spec(_key, scouseobject, saa_dict, wsaa, njobs, spatial)
if verbose:
progress_bar.update()
if verbose:
print("")
def fitting_spec(_key, scouseobject, saa_dict, wsaa, njobs, spatial):
"""
The automated fitting process followed by scouse
Parameters
----------
_key : number
key for SAA dictionary entry - used to select the correct SAA
scouseobject : Instance of the scousepy class
saa_dict : dictionary
dictionary of spectral averaging areas
wsaa : number
width of the SAA
njobs : number
number of cores used for the computation - prep spec is parallelised
spatial : bool
not implemented yet
"""
# get the relavent SAA
SAA = saa_dict[_key]
# We only care about those locations we have SAA fits for.
if SAA.to_be_fit:
# Shhh
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
# Generate a template spectrum
template_spectrum = generate_template_spectrum(scouseobject)
log.setLevel(old_log)
# Get the SAA model solution
parent_model = SAA.model
# Parallel
if njobs > 1:
if np.size(SAA.indices_flat) != 0.0:
args = [scouseobject, SAA, parent_model, template_spectrum]
inputs = [[k] + args for k in range(len(SAA.indices_flat))]
# Send to parallel_map
bfs = parallel_map(fit_a_spectrum, inputs, numcores=njobs)
merged_bfs = [core_bf for core_bf in bfs if core_bf is not None]
merged_bfs = np.asarray(merged_bfs)
for k in range(len(SAA.indices_flat)):
# Add the models to the spectra
key = SAA.indices_flat[k]
add_model_parent(SAA.indiv_spectra[key], merged_bfs[k,0])
add_model_dud(SAA.indiv_spectra[key], merged_bfs[k,1])
else:
# If njobs = 1 just cycle through
for k in range(len(SAA.indices_flat)):
key = SAA.indices_flat[k]
args = [scouseobject, SAA, parent_model, template_spectrum]
inputs = [[k] + args]
inputs = inputs[0]
bfs = fit_a_spectrum(inputs)
add_model_parent(SAA.indiv_spectra[key], bfs[0])
add_model_dud(SAA.indiv_spectra[key], bfs[1])
def generate_template_spectrum(scouseobject):
"""
Generate a template spectrum to be passed to the fitter. This will contain
some basic information that will be updated during the fitting process. This
is implemented because the parallelised fitting replaces the spectrum in
memory and things...break
Parameters
----------
scouseobject : Instance of the scousepy class
"""
x=scouseobject.xtrim
y=scouseobject.saa_dict[0][0].ytrim
rms=scouseobject.saa_dict[0][0].rms
return pyspeckit.Spectrum(data=y,
error=np.ones(len(y))*rms,
xarr=x,
doplot=False,
unit=scouseobject.cube.header['BUNIT'],
xarrkwargs={'unit':'km/s',
'refX': scouseobject.cube.wcs.wcs.restfrq*u.Hz,
'velocity_convention': 'radio',
},
verbose=False
)
def get_flux(scouseobject, indiv_spec):
"""
Returns flux for a given spectrum
Parameters
----------
scouseobject : Instance of the scousepy class
indiv_spec : Instance of the fit class
the spectrum to be fit, produced by prep spec
"""
y=scouseobject.cube[:,indiv_spec.coordinates[0],indiv_spec.coordinates[1]]
y=y[scouseobject.trimids]
return y
def get_spec(scouseobject, indiv_spec, template_spectrum):
"""
Here we update the template with values corresponding to the spectrum
we want to fit
Parameters
----------
scouseobject : Instance of the scousepy class
indiv_spec : pyspeckit spectrum
the spectrum to be fit, produced by prep spec
template_spectrum : pyspeckit spectrum
dummy spectrum to be updated
"""
y = get_flux(scouseobject, indiv_spec)
rms=indiv_spec.rms
template_spectrum.data = u.Quantity(y).value
template_spectrum.error = u.Quantity(np.ones(len(y))*rms).value
template_spectrum.specfit.spectofit = u.Quantity(y).value
template_spectrum.specfit.errspec = u.Quantity(np.ones(len(y))*rms).value
return template_spectrum
def fit_a_spectrum(inputs):
"""
Process used for fitting spectra. Returns a best-fit solution and a dud for
every spectrum.
Parameters
----------
inputs : list
list containing inputs to parallel map - contains the spectrum index,
the scouseobject, SAA, the best-fitting model solution to the SAA, and
the template spectrum
"""
idx, scouseobject, SAA, parent_model, template_spectrum = inputs
key = SAA.indices_flat[idx]
spec=None
# Shhh
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
# update the template
spec = get_spec(scouseobject, SAA.indiv_spectra[key], template_spectrum)
log.setLevel(old_log)
# begin the fitting process
bf = fitting_process_parent(scouseobject, SAA, key, spec, parent_model)
# if the result is a zero component fit, create a dud spectrum
if bf.ncomps == 0.0:
dud = bf
else:
dud = fitting_process_duds(scouseobject, SAA, key, spec)
return [bf, dud]
def fitting_process_parent(scouseobject, SAA, key, spec, parent_model):
"""
Pyspeckit fitting of an individual spectrum using the parent SAA model
Parameters
----------
scouseobject : Instance of the scousepy class
SAA : Instance of the saa class
scousepy spectral averaging area
key : number
index of the individual spectrum
spec : pyspeckit spectrum
the spectrum to fit
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
"""
# Check the model
happy = False
initfit = True
fit_dud = False
while not happy:
if np.all(np.isfinite(np.array(spec.flux))):
if initfit:
guesses = np.asarray(parent_model.params)
if np.sum(guesses) != 0.0:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
old_log = log.level
log.setLevel('ERROR')
spec.specfit(interactive=False, \
clear_all_connections=True,\
xmin=scouseobject.ppv_vol[0], \
xmax=scouseobject.ppv_vol[1], \
fittype = scouseobject.fittype, \
guesses = guesses,\
verbose=False,\
use_lmfit=True)
log.setLevel(old_log)
modparnames = spec.specfit.fitter.parnames
modncomps = spec.specfit.npeaks
modparams = spec.specfit.modelpars
moderrors = spec.specfit.modelerrs
modrms = spec.error[0]
_inputs = [modparnames, [modncomps], modparams, moderrors, [modrms]]
happy, guesses = check_spec(scouseobject, parent_model, _inputs, happy)
initfit = False
else:
# If no satisfactory model can be found - fit a dud!
fit_dud=True
happy = True
else:
# If no satisfactory model can be found - fit a dud!
fit_dud = True
happy = True
if fit_dud:
bf = fitting_process_duds(scouseobject, SAA, key, spec)
else:
bf = fit(spec, idx=key, scouse=scouseobject)
return bf
def fitting_process_duds(scouseobject, SAA, key, spec):
"""
Fitting duds
Parameters
----------
scouseobject : Instance of the scousepy class
SAA : Instance of the saa class
scousepy spectral averaging area
key : number
index of the individual spectrum
spec : pyspeckit spectrum
the spectrum to fit
"""
bf = fit(spec, idx=key, scouse=scouseobject, fit_dud=True,\
noise=SAA.indiv_spectra[key].rms, \
duddata=np.array(spec.flux))
return bf
def check_spec(scouseobject, parent_model, inputs, happy):
"""
This routine controls the fit quality.
Here we are going to check the output spectrum against user-defined
tolerance levels described in Henshaw et al. 2016 and against the SAA fit.
Parameters
----------
scouseobject : Instance of the scousepy class
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
inputs : list
contains various information about the model (see fitting_process_parent)
happy : bool
fitting stops when happy = True
"""
guesses = np.asarray(inputs[2])
condition_passed = np.zeros(3, dtype='bool')
condition_passed, guesses = check_rms(scouseobject, inputs, guesses,
condition_passed)
if condition_passed[0]:
condition_passed, guesses = check_dispersion(scouseobject, inputs,
parent_model, guesses,
condition_passed)
if (condition_passed[0]) and (condition_passed[1]):
condition_passed, guesses = check_velocity(scouseobject, inputs,
parent_model, guesses,
condition_passed)
if np.all(condition_passed):
if (inputs[1][0] == 1):
happy = True
else:
happy, guesses = check_distinct(scouseobject, inputs,
parent_model, guesses,
happy)
return happy, guesses
def unpack_inputs(inputs):
"""
Unpacks the input list
Parameters:
-----------
inputs : list
contains various information about the model (see fitting_process_parent)
"""
parnames = [pname.lower() for pname in inputs[0]]
nparams = np.size(parnames)
ncomponents = inputs[1][0]
params = inputs[2]
errors = inputs[3]
rms = inputs[4][0]
return parnames, nparams, ncomponents, params, errors, rms
def get_index(parnames, namelist):
"""
Searches for a particular parname in a list and returns the index of where
that parname appears
Parameters
----------
parnames : list
list of strings containing the names of the parameters in the pyspeckit
fit. This will vary depending on the input model so keep as general as
possibleself
namelist : list
list of various names used by pyspeckit for parameters in the model
"""
foundname = [pname in namelist for pname in parnames]
foundname = np.array(foundname)
idx = np.where(foundname==True)[0]
return np.asscalar(idx[0])
def check_rms(scouseobject, inputs, guesses, condition_passed):
"""
Check the rms of the best-fitting model components
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
Notes
-----
I'm comparing one of the parameters in _peaknames against the rms value.
This isn't strictly correct for models other than Gaussian, since e.g. Tex
isn't equivalent to the amplitude of the model component. However, in the
absence of anything else to compare, I will leave this for now and think of
something better.
"""
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the peak is located in the parameter array
namelist = ['tex', 'amp', 'amplitude', 'peak', 'tant', 'tmb']
idx = get_index(parnames, namelist)
# Now check all components to see if they are above the rms threshold
for i in range(int(ncomponents)):
if (params[int((i*nparams)+idx)] < rms*scouseobject.tolerances[0]): # or \
#(params[int((i*nparams)+idx)] < errors[int((i*nparams)+idx)]*scouseobject.tolerances[0]):
# set to zero
guesses[int((i*nparams)):int((i*nparams)+nparams)] = 0.0
violating_comps = (guesses==0.0)
if np.any(violating_comps):
condition_passed[0]=False
else:
condition_passed[0]=True
guesses = guesses[(guesses != 0.0)]
return condition_passed, guesses
def check_dispersion(scouseobject,inputs,parent_model,guesses,condition_passed):
"""
Check the fwhm of the best-fitting model components
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
"""
fwhmconv = 2.*np.sqrt(2.*np.log(2.))
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the velocity dispersion is located in the parameter array
namelist = ['dispersion', 'width', 'fwhm']
idx = get_index(parnames, namelist)
for i in range(int(ncomponents)):
# Find the closest matching component in the parent SAA model
diff = find_closest_match(i, nparams, ncomponents, params, parent_model)
idmin = np.where(diff == np.min(diff))[0]
idmin = idmin[0]
# Work out the relative change in velocity dispersion
relchange = params[int((i*nparams)+idx)]/parent_model.params[int((idmin*nparams)+idx)]
if relchange < 1.:
relchange = 1./relchange
# Does this satisfy the criteria
if (params[int((i*nparams)+idx)]*fwhmconv < scouseobject.cube.header['CDELT3']*scouseobject.tolerances[1]) or \
(relchange > scouseobject.tolerances[2]):
# set to zero
guesses[int((i*nparams)):int((i*nparams)+nparams)] = 0.0
violating_comps = (guesses==0.0)
if np.any(violating_comps):
condition_passed[1]=False
else:
condition_passed[1]=True
guesses = guesses[(guesses != 0.0)]
return condition_passed, guesses
def check_velocity(scouseobject,inputs,parent_model,guesses,condition_passed):
"""
Check the centroid velocity of the best-fitting model components
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
"""
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the peak is located in the parameter array
namelist = ['velocity', 'shift', 'centroid', 'center']
idxv = get_index(parnames, namelist)
# Find where the velocity dispersion is located in the parameter array
namelist = ['dispersion', 'width', 'fwhm']
idxd = get_index(parnames, namelist)
for i in range(int(ncomponents)):
# Find the closest matching component in the parent SAA model
diff = find_closest_match(i, nparams, ncomponents, params, parent_model)
idmin = np.where(diff == np.min(diff))[0]
idmin = idmin[0]
# Limits for tolerance
lower_lim = parent_model.params[int((idmin*nparams)+idxv)]-(scouseobject.tolerances[3]*parent_model.params[int((idmin*nparams)+idxd)])
upper_lim = parent_model.params[int((idmin*nparams)+idxv)]+(scouseobject.tolerances[3]*parent_model.params[int((idmin*nparams)+idxd)])
# Does this satisfy the criteria
if (params[(i*nparams)+idxv] < lower_lim) or \
(params[(i*nparams)+idxv] > upper_lim):
# set to zero
guesses[int((i*nparams)):int((i*nparams)+nparams)] = 0.0
violating_comps = (guesses==0.0)
if np.any(violating_comps):
condition_passed[2]=False
else:
condition_passed[2]=True
guesses = guesses[(guesses != 0.0)]
return condition_passed, guesses
def check_distinct(scouseobject,inputs,parent_model,guesses,happy):
"""
Check to see if component pairs can be distinguished in velocity
Parameters
----------
scouseobject : Instance of the scousepy class
inputs : list
contains various information about the model (see fitting_process_parent)
parent_model : instance of the fit class
best-fitting model solution to the parent SAA
guesses : array like
array or list of guesses to be fed to pyspeckit in case refitting is
required
condition_passed : list
boolean list indicating which quality control steps have been satisfied
"""
parnames, nparams, ncomponents, params, errors, rms = unpack_inputs(inputs)
# Find where the peak is located in the parameter array
namelist = ['tex', 'amp', 'amplitude', 'peak', 'tant', 'tmb']
idxp = get_index(parnames, namelist)
# Find where the peak is located in the parameter array
namelist = ['velocity', 'shift', 'centroid', 'center']
idxv = get_index(parnames, namelist)
# Find where the velocity dispersion is located in the parameter array
namelist = ['dispersion', 'width', 'fwhm']
idxd = get_index(parnames, namelist)
fwhmconv = 2.*np.sqrt(2.*np.log(2.))
intlist = [params[int((i*nparams)+idxp)] for i in range(int(ncomponents))]
velolist = [params[int((i*nparams)+idxv)] for i in range(int(ncomponents))]
displist = [params[int((i*nparams)+idxd)] for i in range(int(ncomponents))]
diff = np.zeros(int(ncomponents))
validvs = np.ones(int(ncomponents))
for i in range(int(ncomponents)):
if validvs[i] != 0.0:
# Calculate the velocity difference between all components
for j in range(int(ncomponents)):
diff[j] = abs(velolist[i]-velolist[j])
diff[(diff==0.0)] = np.nan
# Find the minimum difference (i.e. the adjacent component)
idmin = np.where(diff==np.nanmin(diff))[0]
idmin = idmin[0]
adjacent_intensity = intlist[idmin]
adjacent_velocity = velolist[idmin]
adjacent_dispersion = displist[idmin]
# Get the separation between each component and its neighbour
sep =
|
np.abs(velolist[i] - adjacent_velocity)
|
numpy.abs
|
import math
import numpy as np
from scipy import signal
def gaussian_pdf_1d(mu, sigma, length):
'''Generate one dimension Gaussian distribution
- input mu: the mean of pdf
- input sigma: the standard derivation of pdf
- input length: the size of pdf
- output: a row vector represents one dimension Gaussian distribution
'''
# create an array
half_len = length / 2
if np.remainder(length, 2) == 0:
ax = np.arange(-half_len, half_len, 1)
else:
ax = np.arange(-half_len, half_len + 1, 1)
ax = ax.reshape([-1, ax.size])
denominator = sigma * np.sqrt(2 * np.pi)
nominator = np.exp( -np.square(ax - mu) / (2 * sigma * sigma) )
return nominator / denominator
def gaussian_pdf_2d(mu, sigma, row, col):
'''Generate two dimensional Gaussian distribution
- input mu: the mean of pdf
- input sigma: the standard derivation of pdf
- input row: length in row axis
- input column: length in column axis
- output: a 2D matrix represents two dimensional Gaussian distribution
'''
# create row vector as 1D Gaussian pdf
g_row = gaussian_pdf_1d(mu, sigma, row)
# create column vector as 1D Gaussian pdf
g_col = gaussian_pdf_1d(mu, sigma, col).transpose()
return signal.convolve2d(g_row, g_col, mode='full')
def get_derivatives(gray, sigma=0.4):
'''Compute gradient information of the input grayscale image
- Input gray: H x W matrix as image
- Output mag: H x W matrix represents the magnitude of derivatives
- Output magx: H x W matrix represents the magnitude of derivatives along x-axis
- Output magy: H x W matrix represents the magnitude of derivatives along y-axis
- Output ori: H x W matrix represents the orientation of derivatives
'''
mu = 0
sigma = sigma # 0.4, less sigma, more blurred edge
Ga = gaussian_pdf_2d(mu, sigma, 5, 5)
# Filter
dx = np.array([[1, 0, -1]]) # Horizontal
dy = np.array([[1], [0], [-1]]) # Vertical
#dx = np.array([[1, -1]]) # Horizontal
#dy = np.array([[1],[-1]]) # Vertical
# Convolution of image
#Gx = np.convolve(Ga, dx, 'same')
#Gy = np.convolve(Ga, dy, 'same')
#lx = np.convolve(I_gray, Gx, 'same')
#ly = np.convolve(I_gray, Gy, 'same')
Gx = signal.convolve2d(Ga, dx, mode='same', boundary='fill')
Gy = signal.convolve2d(Ga, dy, mode='same', boundary='fill')
lx = signal.convolve2d(gray, Gx, mode='same', boundary='fill')
ly = signal.convolve2d(gray, Gy, mode='same', boundary='fill')
# Magnitude
mag = np.sqrt(lx*lx+ly*ly)
# Angle
angle =
|
np.arctan(ly/lx)
|
numpy.arctan
|
"""
desisim.spec_qa.redshifts
=========================
Module to run high_level QA on a given DESI run
Written by JXP on 3 Sep 2015
"""
from __future__ import print_function, absolute_import, division
import matplotlib
# matplotlib.use('Agg')
import numpy as np
import sys, os, pdb, glob
from matplotlib import pyplot as plt
import matplotlib.gridspec as gridspec
from astropy.io import fits
from astropy.table import Table, vstack, hstack, MaskedColumn, join
try:
from scipy import constants
C_LIGHT = constants.c/1000.0
except TypeError: # This can happen during documentation builds.
C_LIGHT = 299792458.0/1000.0
import desispec.io
from .utils import elg_flux_lim, get_sty_otype, catastrophic_dv, match_otype
from desiutil.log import get_logger, DEBUG
def calc_dz(simz_tab):
'''Calcualte deltaz/(1+z) for a given simz_tab
'''
dz = (simz_tab['Z']-simz_tab['TRUEZ'])/(1+simz_tab['TRUEZ'])
#
return dz
def calc_dzsig(simz_tab):
'''Calcualte deltaz/sig(z) for a given simz_tab
'''
dzsig = (simz_tab['Z']-simz_tab['TRUEZ'])/simz_tab['ZERR']
#
return dzsig
def calc_obj_stats(simz_tab, objtype):
"""Calculate redshift statistics for a given objtype
Parameters
----------
simz_tab : Table
TODO: document this
objtype : str
Object type, e.g. 'ELG', 'LRG'
Returns
-------
stat_dict : dict
Survey results for a given object type
"""
# zstats
ngood, nfail, nmiss, nlost = zstats(simz_tab, objtype=objtype, count=True, survey=True)
ntot = ngood+nfail+nmiss+nlost
# Grab the masks
objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask = criteria(simz_tab, objtype=objtype)
# Init
stat_dict = {} #dict(OBJTYPE=objtype)
# N targets (irrespective of the Survey)
stat_dict['N_TARG'] = ntot
# Number of objects with Redshift Analysis
stat_dict['N_zA'] = np.count_nonzero(z_mask & objtype_mask)
# Redshift measured (includes catastrophics)
# For ELGs, cut on OII_Flux too
stat_dict['N_SURVEY'] = np.count_nonzero(survey_mask & objtype_mask & z_mask)
# Catastrophic failures in the survey
stat_dict['N_CAT'] = nfail
if stat_dict['N_SURVEY'] > 0:
stat_dict['CAT_RATE'] = float(nfail)/stat_dict['N_SURVEY']
else:
stat_dict['CAT_RATE'] = 0
# Good redshifts in the survey
stat_dict['N_GOODZ'] = ngood
# Redshift with ZWARN=0 in the survey
stat_dict['N_ZWARN0'] = ngood+nfail
# Survey Efficiency
if stat_dict['N_SURVEY'] > 0:
stat_dict['EFF'] = float(ngood)/float(stat_dict['N_SURVEY'])
else:
stat_dict['EFF'] = 1.
# Purity
if stat_dict['N_ZWARN0'] > 0:
stat_dict['PURITY'] = float(ngood)/float(stat_dict['N_ZWARN0'])
else:
stat_dict['PURITY'] = 1.
# delta z
gdz_tab = slice_simz(simz_tab, objtype=objtype, survey=True, goodz=True, all_zwarn0=True, z_analy=True)
dz = calc_dz(gdz_tab)
if len(dz) == 0:
dz = np.zeros(1)
not_nan = np.isfinite(dz)
stat_dict['MEAN_DZ'] = float(np.mean(dz[not_nan]))
stat_dict['MEDIAN_DZ'] = float(np.median(dz[not_nan]))
stat_dict['RMS_DZ'] = float(np.std(dz[not_nan]))
# Return
return stat_dict
def spectype_confusion(simz_tab, zb_tab=None):
""" Generate a Confusion Matrix for spectral types
See the Confusion_matrix_spectypes Notebook in docs/nb for an example
Parameters
----------
simz_tab : Table
Truth table; may be input from truth.fits
zb_tab : Table (optional)
zcatalog/zbest table; may be input from zcatalog-mini.fits
If provided, used to match the simz_tab to the zbest quantities
Returns
-------
simz_tab : astropy.Table
Merged table of simpsec data
results : dict
Nested dict.
First key is the TRUESPECTYPE
Second key is the SPECTYPE
e.g. results['QSO']['QSO'] reports the number of True QSO classified as QSO
results['QSO']['Galaxy'] reports the number of True QSO classified as Galaxy
"""
# Process simz_tab as need be
if zb_tab is not None:
match_truth_z(simz_tab, zb_tab, mini_read=True)
# Cut down to those processed with the Redshift fitter
measured_z = simz_tab['ZWARN'].mask == False
cut_simz = simz_tab[measured_z]
# Strip those columns
strip_ttypes = np.char.rstrip(cut_simz['TRUESPECTYPE'])
strip_stypes = np.char.rstrip(cut_simz['SPECTYPE'])
# All TRUE, SPEC types
ttypes = np.unique(strip_ttypes)
stypes = np.unique(strip_stypes)
# Init
results = {}
for ttype in ttypes:
results[ttype] = {}
# Fill
for ttype in ttypes:
itrue = strip_ttypes == ttype
# Init correct answer in case there are none
results[ttype][ttype] = 0
# import pdb; pdb.set_trace()
for stype in stypes:
results[ttype][stype] = np.sum(strip_stypes[itrue] == stype)
# Return
return results
def find_zbest_files(fibermap_data):
from desimodel.footprint import radec2pix
# Init
zbest_files = []
# Search for zbest files with healpy
ra_targ = fibermap_data['TARGET_RA'].data
dec_targ = fibermap_data['TARGET_DEC'].data
# Getting some NAN in RA/DEC
good = np.isfinite(ra_targ) & np.isfinite(dec_targ)
pixels = radec2pix(64, ra_targ[good], dec_targ[good])
uni_pixels = np.unique(pixels)
for uni_pix in uni_pixels:
zbest_files.append(desispec.io.findfile('zbest', groupname=uni_pix, nside=64))
# Return
return zbest_files
def load_z(fibermap_files, zbest_files=None, outfil=None):
'''Load input and output redshift values for a set of exposures
Parameters
----------
fibermap_files: list
List of fibermap files; None of these should be calibration..
zbest_files: list, optional
List of zbest output files
Slurped from fibermap info if not provided
outfil: str, optional
Output file for the table
Returns
-------
simz_tab: astropy.Table
Merged table of simpsec data
zb_tab: astropy.Table
Merged table of zbest output
'''
# imports
log = get_logger()
# Init
if zbest_files is None:
flag_load_zbest = True
zbest_files = []
else:
flag_load_zbest = False
# Load up fibermap and simspec tables
fbm_tabs = []
sps_tabs = []
for fibermap_file in fibermap_files:
# zbest?
if flag_load_zbest:
fibermap_data = desispec.io.read_fibermap(fibermap_file)
zbest_files += find_zbest_files(fibermap_data)
log.info('Reading: {:s}'.format(fibermap_file))
# Load simspec (for fibermap too!)
simspec_file = fibermap_file.replace('fibermap','simspec')
sps_hdu = fits.open(simspec_file)
# Make Tables
fbm_tabs.append(Table(sps_hdu['FIBERMAP'].data,masked=True))
truth = Table(sps_hdu['TRUTH'].data,masked=True)
if 'TRUTH_ELG' in sps_hdu:
truth_elg = Table(sps_hdu['TRUTH_ELG'].data)
truth = join(truth, truth_elg['TARGETID', 'OIIFLUX'],
keys='TARGETID', join_type='left')
else:
truth['OIIFLUX'] = 0.0
sps_tabs.append(truth)
sps_hdu.close()
# Stack + Sort
fbm_tab = vstack(fbm_tabs)
sps_tab = vstack(sps_tabs)
del fbm_tabs, sps_tabs
fbm_tab.sort('TARGETID')
sps_tab.sort('TARGETID')
# Add the version number header keywords from fibermap_files[0]
hdr = fits.getheader(fibermap_files[0].replace('fibermap', 'simspec'))
for key, value in sorted(hdr.items()):
if key.startswith('DEPNAM') or key.startswith('DEPVER'):
fbm_tab.meta[key] = value
# Drop to unique
univ, uni_idx = np.unique(np.array(fbm_tab['TARGETID']),return_index=True)
fbm_tab = fbm_tab[uni_idx]
sps_tab = sps_tab[uni_idx]
# Combine
assert np.all(fbm_tab['TARGETID'] == sps_tab['TARGETID'])
keep_colnames = list()
for colname in sps_tab.colnames:
if colname not in fbm_tab.colnames:
keep_colnames.append(colname)
simz_tab = hstack([fbm_tab,sps_tab[keep_colnames]],join_type='exact')
# Cleanup some names
#simz_tab.rename_column('OBJTYPE_1', 'OBJTYPE')
#simz_tab.rename_column('OBJTYPE_2', 'TRUETYPE')
# Update QSO naming
qsol = np.where( match_otype(simz_tab, 'QSO') & (simz_tab['TRUEZ'] >= 2.1))[0]
simz_tab['TEMPLATETYPE'][qsol] = 'QSO_L'
qsot = np.where( match_otype(simz_tab, 'QSO') & (simz_tab['TRUEZ'] < 2.1))[0]
simz_tab['TEMPLATETYPE'][qsot] = 'QSO_T'
# Load up zbest files
zb_tabs = []
for zbest_file in zbest_files:
try:
zb_hdu = fits.open(zbest_file)
except FileNotFoundError:
log.error("zbest file {} not found".format(zbest_file))
else:
zb_tabs.append(Table(zb_hdu[1].data))
# Stack
zb_tab = vstack(zb_tabs)
univ, uni_idx = np.unique(np.array(zb_tab['TARGETID']),return_index=True)
zb_tab = zb_tab[uni_idx]
# Return
return simz_tab, zb_tab
def match_truth_z(simz_tab, zb_tab, mini_read=False, outfil=None):
""" Match truth and zbest tables
:param simz_tab: astropy.Table; Either generated from load_z() or read from disk via 'truth.fits'
:param zb_tab: astropy.Table; Either generated from load_z() or read from disk via 'zcatalog-mini.fits'
:param mini_read: bool, optional; Tables were read from the summary tables written to disk
:param outfil: str, optional
:return: simz_tab: modified in place
"""
nsim = len(simz_tab)
# Match up
sim_id = np.array(simz_tab['TARGETID'])
z_id = np.array(zb_tab['TARGETID'])
inz = np.in1d(z_id,sim_id,assume_unique=True)
ins = np.in1d(sim_id,z_id,assume_unique=True)
z_idx = np.arange(z_id.shape[0])[inz]
sim_idx = np.arange(sim_id.shape[0])[ins]
assert np.array_equal(sim_id[sim_idx],z_id[z_idx])
# Fill up
ztags = ['Z','ZERR','ZWARN','SPECTYPE']
# This is for truth and zcat tables read from disk as opposed to the fibermap files
if mini_read:
ztags += ['DESI_TARGET']
# And clean up the QSO names
stypes = np.char.rstrip(simz_tab['TEMPLATETYPE'])
qsol = np.where((stypes == 'QSO') & (simz_tab['TRUEZ'] >= 2.1))[0]
simz_tab['TEMPLATETYPE'][qsol] = 'QSO_L'
qsot = np.where((stypes == 'QSO') & (simz_tab['TRUEZ'] < 2.1))[0]
simz_tab['TEMPLATETYPE'][qsot] = 'QSO_T'
# Generate the new columns
new_clms = []
mask = np.array([True]*nsim)
mask[sim_idx] = False
for kk,ztag in enumerate(ztags):
# Generate a MaskedColumn
new_clm = MaskedColumn([zb_tab[ztag][z_idx[0]]]*nsim, name=ztag, mask=mask)
#name=new_tags[kk], mask=mask)
# Fill
new_clm[sim_idx] = zb_tab[ztag][z_idx]
# Append
new_clms.append(new_clm)
# Add columns
simz_tab.add_columns(new_clms)
# Write?
if outfil is not None:
simz_tab.write(outfil,overwrite=True)
return
def obj_requirements(zstats, objtype):
"""Assess where a given objtype passes the requirements
Requirements from Doc 318 (August 2014)
Parameters
----------
zstats : Object
This parameter is not documented.
objtype : str
Object type, e.g. 'ELG', 'LRG'
Returns
-------
dict
Pass/fail dict
"""
log = get_logger()
pf_dict = {}
#
all_dict=dict(ELG={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.90},
LRG={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},
BGS={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},
MWS={'RMS_DZ':0.0005, 'MEAN_DZ': 0.0002, 'CAT_RATE': 0.05, 'EFF': 0.95},
QSO_T={'RMS_DZ':0.0025, 'MEAN_DZ': 0.0004, 'CAT_RATE': 0.05, 'EFF': 0.90},
QSO_L={'RMS_DZ':0.0025, 'CAT_RATE': 0.02, 'EFF': 0.90})
req_dict = all_dict[objtype]
tst_fail = ''
passf = str('PASS')
for key in req_dict:
ipassf = str('PASS')
if key in ['EFF']: # Greater than requirement
if zstats[key] < req_dict[key]:
ipassf = str('FAIL')
tst_fail = tst_fail+key+'-'
log.warning('{:s} failed requirement {:s}: {} < {}'.format(objtype, key, zstats[key], req_dict[key]))
else:
log.debug('{:s} passed requirement {:s}: {} >= {}'.format(objtype, key, zstats[key], req_dict[key]))
else:
if zstats[key] > req_dict[key]:
ipassf = str('FAIL')
tst_fail = tst_fail+key+'-'
log.warning('{:s} failed requirement {:s}: {} > {}'.format(objtype, key, zstats[key], req_dict[key]))
else:
log.debug('{:s} passed requirement {:s}: {} <= {}'.format(objtype, key, zstats[key], req_dict[key]))
# Update
pf_dict[key] = ipassf
if ipassf == str('FAIL'):
passf = str('FAIL')
if passf == str('FAIL'):
tst_fail = tst_fail[:-1]
# log.warning('OBJ={:s} failed tests {:s}'.format(objtype,tst_fail))
#
#pf_dict['FINAL'] = passf
return pf_dict, passf
def zstats(simz_tab, objtype=None, dvlimit=None, count=False, survey=False):
""" Perform statistics on the input truth+z table
good = Satisfies dv criteria and ZWARN==0
fail = Fails dv criteria with ZWARN==0 (catastrophic failures)
miss = Satisfies dv criteria but ZWARN!=0 (missed opportunities)
lost = Fails dv criteria and ZWARN!=0 (lost, but at least we knew it)
Args:
simz_tab:
objtype:
dvlimit: float, optional -- Over-rides object specific dv limits
count: bool, optional
survey: bool, optional -- Restrict to targets meeting the Survey criteria (e.g. ELG flux)
Returns:
if count=True: just the raw counts of each category :: ngood, nfail, nmiss, nlost
else: percentile of each relative to ntot, and ntot
"""
# Grab the masks
objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask = criteria(
simz_tab, dvlimit=dvlimit, objtype=objtype)
# Score-card
good = zwarn_mask & dv_mask & objtype_mask & z_mask
cat = zwarn_mask & (~dv_mask) & objtype_mask & z_mask
miss = (~zwarn_mask) & dv_mask & objtype_mask & z_mask
lost = (~zwarn_mask) & (~dv_mask) & objtype_mask & z_mask
# Restrict to the Survey design?
tot_msk = objtype_mask & z_mask
if survey:
good &= survey_mask
cat &= survey_mask
miss &= survey_mask
lost &= survey_mask
tot_msk &= survey_mask
#
ngood = np.count_nonzero(good)
nfail = np.count_nonzero(cat)
nmiss = np.count_nonzero(miss)
nlost = np.count_nonzero(lost)
ntot = np.count_nonzero(tot_msk)
# Check
assert(ntot == ngood+nfail+nmiss+nlost)
# Return
if count:
return ngood, nfail, nmiss, nlost
elif ntot == 0:
return (np.nan, np.nan, np.nan, np.nan, 0)
else:
return 100*ngood/ntot, 100*nfail/ntot, 100*nmiss/ntot, 100*nlost/ntot, ntot
def criteria(simz_tab, objtype=None, dvlimit=None):
"""Analyze the input table for various criteria
Parameters
----------
simz_tab : Table
objtype : str, optional -- Restrict analysis to a specific object type
Returns
-------
objtype_mask : ndarray
Match to input objtype (if any given)
z_mask : ndarray
Analyzed by the redshift analysis software
survey_mask : ndarray
Part of the DESI survey (not filler)
dv_mask : ndarray
Satisfies the dv criterion; Either specific to each objtype
or using an input dvlimit
zwarn_mask : ndarray
ZWARN=0
"""
# Init
nrow = len(simz_tab)
stypes = np.char.rstrip(simz_tab['TEMPLATETYPE'].astype(str))
# Object type
if objtype is None:
objtype_mask = np.array([True]*nrow)
else:
if objtype in ['STAR', 'WD', 'QSO']:
objtype_mask = stypes == objtype
else:
objtype_mask = match_otype(simz_tab, objtype) # Use DESI_TARGET when possible
# Redshift analysis
z_mask = simz_tab['Z'].mask == False # Not masked in Table
# Survey
survey_mask = (simz_tab['Z'].mask == False)
elg = np.where(match_otype(simz_tab, 'ELG') & survey_mask)[0]
if len(elg) > 0:
elg_mask = elg_flux_lim(simz_tab['TRUEZ'][elg],
simz_tab['OIIFLUX'][elg])
# Update
survey_mask[elg[~elg_mask]] = False
# zwarn -- Masked array
zwarn_mask = np.array([False]*nrow)
idx = np.where((simz_tab['ZWARN'] == 0) & (simz_tab['ZWARN'].mask == False))[0]
zwarn_mask[idx] = True
# Catastrophic/Good (This gets a bit more messy...)
dv_mask = np.array([True]*nrow)
for obj in np.unique(stypes):
if obj in ['ELG','LRG','QSO_L','QSO_T', 'BGS', 'MWS']: # Use DESI_TARGET when possible
omask = np.where(match_otype(simz_tab, obj))[0] # & (simz_tab['ZWARN']==0))[0]
else:
omask = np.where(stypes == obj)[0]
if dvlimit is None:
try:
dv = catastrophic_dv(obj) # km/s
except:
dv = 1000.
else:
dv = dvlimit
dz = calc_dz(simz_tab[omask]) # dz/1+z
cat = np.where(np.abs(dz)*C_LIGHT > dv)[0]
dv_mask[omask[cat]] = False
# Return
return objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask
def slice_simz(simz_tab, objtype=None, z_analy=False, survey=False,
catastrophic=False, goodz=False, all_zwarn0=False, **kwargs):
"""Slice input simz_tab in one of many ways
Parameters
----------
z_analy : bool, optional
redshift analysis required?
all_zwarn0 : bool, optional
Ignores catastrophic failures in the slicing to return
all sources with ZWARN==0
survey : bool, optional
Only include objects that satisfy the Survey requirements
e.g. ELGs with sufficient OII_flux
catastrophic : bool, optional
Restrict to catastropic failures
goodz : bool, optional
Restrict to good redshifts
all_zwarn0 : bool, optional
Restrict to ZWARN=0 cases
**kwargs : passed to criteria
Returns
-------
simz_table : Table cut by input parameters
"""
# Grab the masks
objtype_mask, z_mask, survey_mask, dv_mask, zwarn_mask = criteria(
simz_tab, objtype=objtype, **kwargs)
# Slice me
final_mask = objtype_mask
if z_analy:
final_mask &= z_mask
if survey:
final_mask &= survey_mask
if catastrophic:
final_mask &= (~dv_mask)
final_mask &= zwarn_mask # Must also have ZWARN=0
if goodz:
final_mask &= dv_mask
final_mask &= zwarn_mask
if all_zwarn0:
final_mask &= zwarn_mask
# Return
return simz_tab[final_mask]
def obj_fig(simz_tab, objtype, summ_stats, outfile=None):
"""Generate QA plot for a given object type
"""
from astropy.stats import sigma_clip
logs = get_logger()
gdz_tab = slice_simz(simz_tab,objtype=objtype, survey=True,goodz=True, all_zwarn0=True)
if objtype == 'ELG':
allgd_tab = slice_simz(simz_tab,objtype=objtype, survey=False,goodz=True, all_zwarn0=True)
if len(gdz_tab) <= 1:
logs.info("Not enough objects of type {:s} for QA".format(objtype))
return
# Plot
sty_otype = get_sty_otype()
fig = plt.figure(figsize=(8, 6.0))
gs = gridspec.GridSpec(2,2)
# Title
fig.suptitle('{:s}: Summary'.format(sty_otype[objtype]['lbl']),
fontsize='large')
# Offset
for kk in range(4):
yoff = 0.
ax= plt.subplot(gs[kk])
if kk == 0:
yval = calc_dzsig(gdz_tab)
ylbl = (r'$(z_{\rm red}-z_{\rm true}) / \sigma(z)$')
ylim = 5.
# Stats with clipping
clip_y = sigma_clip(yval, sigma=5.)
rms = np.std(clip_y)
redchi2 = np.sum(clip_y**2)/np.sum(~clip_y.mask)
#
xtxt = 0.05
ytxt = 1.0
for req_tst in ['EFF','CAT_RATE']:
ytxt -= 0.12
if summ_stats[objtype]['REQ_INDIV'][req_tst] == 'FAIL':
tcolor='red'
else:
tcolor='green'
ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format(req_tst,
summ_stats[objtype][req_tst]), color=tcolor,
transform=ax.transAxes, ha='left', fontsize='small')
# Additional
ytxt -= 0.12
ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format('RMS:', rms),
color='black', transform=ax.transAxes, ha='left', fontsize='small')
ytxt -= 0.12
ax.text(xtxt, ytxt, '{:s}: {:.3f}'.format(r'$\chi^2_\nu$:',
redchi2), color='black', transform=ax.transAxes,
ha='left', fontsize='small')
else:
yval = calc_dz(gdz_tab)
if kk == 1:
ylbl = (r'$(z_{\rm red}-z_{\rm true}) / (1+z)$')
else:
ylbl = r'$\delta v_{\rm red-true}$ [km/s]'
ylim = max(5.*summ_stats[objtype]['RMS_DZ'],1e-5)
if (np.median(summ_stats[objtype]['MEDIAN_DZ']) >
summ_stats[objtype]['RMS_DZ']):
yoff = summ_stats[objtype]['MEDIAN_DZ']
if kk==1:
# Stats
xtxt = 0.05
ytxt = 1.0
dx = ((ylim/2.)//0.0001 +1)*0.0001
ax.xaxis.set_major_locator(plt.MultipleLocator(dx))
for stat in ['RMS_DZ','MEAN_DZ', 'MEDIAN_DZ']:
ytxt -= 0.12
try:
pfail = summ_stats[objtype]['REQ_INDIV'][stat]
except KeyError:
tcolor='black'
else:
if pfail == 'FAIL':
tcolor='red'
else:
tcolor='green'
ax.text(xtxt, ytxt, '{:s}: {:.5f}'.format(stat,
summ_stats[objtype][stat]), color=tcolor,
transform=ax.transAxes, ha='left', fontsize='small')
# Histogram
if kk < 2:
binsz = ylim/10.
#i0, i1 = int( np.min(yval) / binsz) - 1, int( np.max(yval) / binsz) + 1
i0, i1 = int(-ylim/binsz) - 1, int( ylim/ binsz) + 1
rng = tuple( binsz*np.array([i0,i1]) )
nbin = i1-i0
# Histogram
hist, edges = np.histogram(yval, range=rng, bins=nbin)
xhist = (edges[1:] + edges[:-1])/2.
#ax.hist(xhist, color='black', bins=edges, weights=hist)#, histtype='step')
ax.hist(xhist, color=sty_otype[objtype]['color'], bins=edges, weights=hist)#, histtype='step')
ax.set_xlabel(ylbl)
ax.set_xlim(-ylim, ylim)
else:
if kk == 2:
lbl = r'$z_{\rm true}$'
xval = gdz_tab['TRUEZ']
xmin,xmax=np.min(xval),np.max(xval)
dx = np.maximum(1,(xmax-xmin)//0.5)*0.1
ax.xaxis.set_major_locator(plt.MultipleLocator(dx))
#xmin,xmax=0.6,1.65
elif kk == 3:
if objtype == 'ELG':
lbl = r'[OII] Flux ($10^{-16}$)'
#xval = gdz_tab['OIIFLUX']*1e16
xval = allgd_tab['OIIFLUX']*1e16
yval = calc_dz(allgd_tab)
# Avoid NAN
gdy = np.isfinite(yval)
xval = xval[gdy]
yval = yval[gdy]
xmin,xmax=0.5,20
ax.set_xscale("log", nonposx='clip')
elif objtype == 'QSO':
lbl = 'g (Mag)'
xval = 22.5 - 2.5 * np.log10(gdz_tab['FLUX_G'])
xmin,xmax=np.min(xval),np.max(xval)
else:
lbl = 'r (Mag)'
xval = 22.5 - 2.5 * np.log10(gdz_tab['FLUX_R'])
xmin,xmax=np.min(xval),
|
np.max(xval)
|
numpy.max
|
"""
fastspecfit.continuum
=====================
Methods and tools for continuum-fitting.
"""
import pdb # for debugging
import os, time
import numpy as np
import astropy.units as u
from fastspecfit.util import C_LIGHT
from desiutil.log import get_logger
log = get_logger()
def _fnnls_continuum(myargs):
"""Multiprocessing wrapper."""
return fnnls_continuum(*myargs)
def fnnls_continuum(ZZ, xx, flux=None, ivar=None, modelflux=None,
support=None, get_chi2=False, jvendrow=False):
"""Fit a continuum using fNNLS. This function is a simple wrapper on fnnls; see
the ContinuumFit.fnnls_continuum method for documentation.
Mapping between mikeiovine fnnls(AtA, Aty) and jvendrow fnnls(Z, x) inputs:
Z [mxn] --> A [mxn]
x [mx1] --> y [mx1]
And mikeiovine wants:
A^T * A
A^T * y
AtA = A.T.dot(A)
Aty = A.T.dot(y)
"""
if jvendrow:
from fnnls import fnnls
if support is None:
support = np.zeros(0, dtype=int)
try:
warn, coeff, _ = fnnls(ZZ, xx)#, P_initial=support)
except:
log.warning('fnnls failed to converge.')
warn, coeff = True, np.zeros(modelflux.shape[1])
else:
#from fastnnls import fnnls
from fastspecfit.fnnls import fnnls
AtA = ZZ.T.dot(ZZ)
Aty = ZZ.T.dot(xx)
coeff = fnnls(AtA, Aty)
warn = False
#if warn:
# print('WARNING: fnnls did not converge after 5 iterations.')
if get_chi2:
chi2 = np.sum(ivar * (flux - modelflux.dot(coeff))**2)
chi2 /= np.sum(ivar > 0) # reduced chi2
return warn, coeff, chi2
else:
return warn, coeff
class ContinuumTools(object):
def __init__(self, metallicity='Z0.0190', minwave=None, maxwave=6e4, seed=1):
"""Tools for dealing with stellar continua..
"""
import fitsio
from astropy.cosmology import FlatLambdaCDM
from astropy.table import Table, Column
from speclite import filters
from desiutil.dust import SFDMap
from fastspecfit.emlines import read_emlines
from fastspecfit.io import FASTSPECFIT_TEMPLATES_NERSC, DUST_DIR_NERSC
self.cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
# pre-compute the luminosity distance on a grid
#self.redshift_ref = np.arange(0.0, 5.0, 0.05)
#self.dlum_ref = self.cosmo.luminosity_distance(self.redshift_ref).to(u.pc).value
self.fluxnorm = 1e17 # normalization factor for the spectra
self.massnorm = 1e10 # stellar mass normalization factor for the SSPs [Msun]
self.metallicity = metallicity
self.Z = float(metallicity[1:])
self.library = 'CKC14z'
self.isochrone = 'Padova' # would be nice to get MIST in here
self.imf = 'Kroupa'
# dust maps
mapdir = os.path.join(os.environ.get('DUST_DIR', DUST_DIR_NERSC), 'maps')
self.SFDMap = SFDMap(scaling=1.0, mapdir=mapdir)
#self.SFDMap = SFDMap(scaling=0.86, mapdir=mapdir) # SF11 recalibration of the SFD maps
self.RV = 3.1
self.dustslope = 0.7
# SSPs
templates_dir = os.environ.get('FASTSPECFIT_TEMPLATES', FASTSPECFIT_TEMPLATES_NERSC)
self.sspfile = os.path.join(templates_dir, 'SSP_{}_{}_{}_{}.fits'.format(
self.isochrone, self.library, self.imf, self.metallicity))
if not os.path.isfile(self.sspfile):
log.warning('SSP templates file not found {}'.format(self.sspfile))
raise IOError
log.info('Reading {}'.format(self.sspfile))
wave, wavehdr = fitsio.read(self.sspfile, ext='WAVE', header=True)
flux = fitsio.read(self.sspfile, ext='FLUX')
sspinfo = Table(fitsio.read(self.sspfile, ext='METADATA'))
# Trim the wavelengths and select the number/ages of the templates.
# https://www.sdss.org/dr14/spectro/galaxy_mpajhu
if minwave is None:
minwave = np.min(wave)
keep = np.where((wave >= minwave) * (wave <= maxwave))[0]
sspwave = wave[keep]
myages = np.array([0.005, 0.025, 0.1, 0.2, 0.6, 0.9, 1.4, 2.5, 5, 10.0, 13.0])*1e9
iage = np.array([np.argmin(np.abs(sspinfo['age']-myage)) for myage in myages])
sspflux = flux[:, iage][keep, :] # flux[keep, ::5]
sspinfo = sspinfo[iage]
nage = len(sspinfo)
npix = len(sspwave)
self.pixkms = wavehdr['PIXSZBLU'] # pixel size [km/s]
# add AGN templates here?
if False:
# https://www.aanda.org/articles/aa/pdf/2017/08/aa30378-16.pdf
# F_nu \propto \nu^(-alpha) or F_lambda \propto \lambda^(alpha-2)
self.agn_lambda0 = 4020.0 # [Angstrom]a
self.agn_alpha = [0.5, 1.0, 1.5, 2.0]
nagn = len(self.agn_alpha)
agnflux = np.zeros((npix, nagn), 'f4')
#import matplotlib.pyplot as plt
for ii, alpha in enumerate(self.agn_alpha):
agnflux[:, ii] = sspwave**(alpha-2) / self.agn_lambda0**(alpha-2)
#plt.plot(sspwave, agnflux[:, ii])
#plt.xlim(3000, 9000) ; plt.ylim(0.1, 2.2) ; plt.savefig('junk.png')
sspflux = np.vstack((agnflux.T, sspflux.T)).T
sspinfo = Table(np.hstack([sspinfo[:nagn], sspinfo]))
sspinfo.add_column(Column(name='agn_alpha', length=nagn+nage, dtype='f4'))
sspinfo['age'][:nagn] = 0.0
sspinfo['mstar'][:nagn] = 0.0
sspinfo['lbol'][:nagn] = 0.0
sspinfo['agn_alpha'][:nagn] = self.agn_alpha
nage = len(sspinfo)
self.sspwave = sspwave
self.sspflux = sspflux # no dust, no velocity broadening [npix,nage]
self.sspinfo = sspinfo
self.nage = nage
self.npix = npix
# emission lines
self.linetable = read_emlines()
self.linemask_sigma_narrow = 200.0 # [km/s]
self.linemask_sigma_balmer = 1000.0 # [km/s]
self.linemask_sigma_broad = 2000.0 # [km/s]
# photometry
self.bands = np.array(['g', 'r', 'z', 'W1', 'W2'])
self.synth_bands = np.array(['g', 'r', 'z']) # for synthesized photometry
self.fiber_bands = np.array(['g', 'r', 'z']) # for fiber fluxes
self.decam = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z')
self.bassmzls = filters.load_filters('BASS-g', 'BASS-r', 'MzLS-z')
self.decamwise = filters.load_filters('decam2014-g', 'decam2014-r', 'decam2014-z',
'wise2010-W1', 'wise2010-W2')
self.bassmzlswise = filters.load_filters('BASS-g', 'BASS-r', 'MzLS-z',
'wise2010-W1', 'wise2010-W2')
self.bands_to_fit = np.ones(len(self.bands), bool)
self.bands_to_fit[self.bands == 'W2'] = False # drop W2
# rest-frame filters
self.absmag_bands = ['U', 'B', 'V', 'sdss_u', 'sdss_g', 'sdss_r', 'sdss_i', 'sdss_z', 'W1']
self.absmag_filters = filters.load_filters('bessell-U', 'bessell-B', 'bessell-V',
'sdss2010-u', 'sdss2010-g', 'sdss2010-r',
'sdss2010-i', 'sdss2010-z', 'wise2010-W1')
self.absmag_bandshift = np.array([0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
0.0, 0.0, 0.0])
#self.absmag_bandshift = np.array([0.0, 0.0, 0.0,
# 0.1, 0.1, 0.1,
# 0.1, 0.1, 0.0])
self.min_uncertainty = np.array([0.01, 0.01, 0.01, 0.02, 0.02])
# used in one place...
#self.rand = np.random.RandomState(seed=seed)
@staticmethod
def get_dn4000(wave, flam, flam_ivar=None, redshift=None, rest=True):
"""Compute DN(4000) and, optionally, the inverse variance.
Parameters
----------
wave
flam
flam_ivar
redshift
rest
Returns
-------
Notes
-----
If `rest`=``False`` then `redshift` input is required.
"""
from fastspecfit.util import ivar2var
dn4000, dn4000_ivar = 0.0, 0.0
if rest:
flam2fnu = wave**2 / (C_LIGHT * 1e5) # [erg/s/cm2/A-->erg/s/cm2/Hz, rest]
else:
wave = np.copy(wave)
wave /= (1 + redshift) # [Angstrom]
flam2fnu = (1 + redshift) * wave**2 / (C_LIGHT * 1e5) # [erg/s/cm2/A-->erg/s/cm2/Hz, rest]
if flam_ivar is None:
goodmask = np.ones(len(flam), bool) # True is good
else:
goodmask = flam_ivar > 0
indxblu = np.where((wave >= 3850.) * (wave <= 3950.) * goodmask)[0]
indxred = np.where((wave >= 4000.) * (wave <= 4100.) * goodmask)[0]
if len(indxblu) < 5 or len(indxred) < 5:
return dn4000, dn4000_ivar
blufactor, redfactor = 3950.0 - 3850.0, 4100.0 - 4000.0
deltawave = np.gradient(wave) # should be constant...
fnu = flam * flam2fnu # [erg/s/cm2/Hz]
numer = blufactor * np.sum(deltawave[indxred] * fnu[indxred])
denom = redfactor * np.sum(deltawave[indxblu] * fnu[indxblu])
if denom == 0.0:
log.warning('DN(4000) is ill-defined!')
return dn4000, dn4000_ivar
dn4000 = numer / denom
if flam_ivar is not None:
fnu_ivar = flam_ivar / flam2fnu**2
fnu_var, _ = ivar2var(fnu_ivar)
numer_var = blufactor**2 * np.sum(deltawave[indxred] * fnu_var[indxred])
denom_var = redfactor**2 * np.sum(deltawave[indxblu] * fnu_var[indxblu])
dn4000_var = (numer_var + numer**2 * denom_var) / denom**2
if dn4000_var <= 0:
log.warning('DN(4000) variance is ill-defined!')
dn4000_ivar = 0.0
else:
dn4000_ivar = 1.0 / dn4000_var
return dn4000, dn4000_ivar
@staticmethod
def parse_photometry(bands, maggies, lambda_eff, ivarmaggies=None,
nanomaggies=True, nsigma=1.0, min_uncertainty=None):
"""Parse input (nano)maggies to various outputs and pack into a table.
Parameters
----------
flam - 10-17 erg/s/cm2/A
fnu - 10-17 erg/s/cm2/Hz
abmag - AB mag
nanomaggies - input maggies are actually 1e-9 maggies
nsigma - magnitude limit
Returns
-------
phot - photometric table
Notes
-----
"""
from astropy.table import Table, Column
shp = maggies.shape
if maggies.ndim == 1:
nband, ngal = shp[0], 1
else:
nband, ngal = shp[0], shp[1]
phot = Table()
phot.add_column(Column(name='band', data=bands))
phot.add_column(Column(name='lambda_eff', length=nband, dtype='f4'))
phot.add_column(Column(name='nanomaggies', length=nband, shape=(ngal, ), dtype='f4'))
phot.add_column(Column(name='nanomaggies_ivar', length=nband, shape=(ngal, ), dtype='f4'))
phot.add_column(Column(name='flam', length=nband, shape=(ngal, ), dtype='f8')) # note f8!
phot.add_column(Column(name='flam_ivar', length=nband, shape=(ngal, ), dtype='f8'))
phot.add_column(Column(name='abmag', length=nband, shape=(ngal, ), dtype='f4'))
phot.add_column(Column(name='abmag_ivar', length=nband, shape=(ngal, ), dtype='f4'))
#phot.add_column(Column(name='abmag_err', length=nband, shape=(ngal, ), dtype='f4'))
phot.add_column(Column(name='abmag_brighterr', length=nband, shape=(ngal, ), dtype='f4'))
phot.add_column(Column(name='abmag_fainterr', length=nband, shape=(ngal, ), dtype='f4'))
phot.add_column(Column(name='abmag_limit', length=nband, shape=(ngal, ), dtype='f4'))
if ivarmaggies is None:
ivarmaggies = np.zeros_like(maggies)
if min_uncertainty is None:
min_uncertainty = np.zeros_like(maggies)
## Gaia-only targets all have grz=-99 fluxes (we now cut these out in
## io.DESISpectra.find_specfiles)
#if np.all(maggies==-99):
# log.warning('Gaia-only targets not supported.')
# raise ValueError
phot['lambda_eff'] = lambda_eff#.astype('f4')
if nanomaggies:
phot['nanomaggies'] = maggies#.astype('f4')
phot['nanomaggies_ivar'] = ivarmaggies#.astype('f4')
else:
phot['nanomaggies'] = (maggies * 1e9)#.astype('f4')
phot['nanomaggies_ivar'] = (ivarmaggies * 1e-18)#.astype('f4')
if nanomaggies:
nanofactor = 1e-9 # [nanomaggies-->maggies]
else:
nanofactor = 1.0
#pdb.set_trace()
#factor=(2.5/alog(10.))
#err=factor/sqrt(maggies_ivar[k,igood])/maggies[k,igood]
#err2=err^2+minerrors[k]^2
#maggies_ivar[k,igood]=factor^2/(maggies[k,igood]^2*err2)
factor = nanofactor * 10**(-0.4 * 48.6) * C_LIGHT * 1e13 / lambda_eff**2 # [maggies-->erg/s/cm2/A]
if ngal > 1:
factor = factor[:, None] # broadcast for the models
phot['flam'] = (maggies * factor)
phot['flam_ivar'] = (ivarmaggies / factor**2)
# deal with measurements
good = np.where(maggies > 0)[0]
if len(good) > 0:
if maggies.ndim > 1:
igood, jgood = np.unravel_index(good, maggies.shape)
goodmaggies = maggies[igood, jgood]
else:
igood, jgood = good, [0]
goodmaggies = maggies[igood]
phot['abmag'][igood, jgood] = (-2.5 * np.log10(nanofactor * goodmaggies))#.astype('f4')
# deal with the uncertainties
snr = maggies * np.sqrt(ivarmaggies)
good = np.where(snr > nsigma)[0]
upper = np.where((ivarmaggies > 0) * (snr <= nsigma))[0]
if maggies.ndim > 1:
if len(upper) > 0:
iupper, jupper = np.unravel_index(upper, maggies.shape)
abmag_limit = +2.5 * np.log10(np.sqrt(ivarmaggies[iupper, jupper]) / nsigma) # note "+" instead of 1/ivarmaggies
igood, jgood = np.unravel_index(good, maggies.shape)
maggies = maggies[igood, jgood]
ivarmaggies = ivarmaggies[igood, jgood]
errmaggies = 1 / np.sqrt(ivarmaggies)
#fracerr = 1 / snr[igood, jgood]
else:
if len(upper) > 0:
iupper, jupper = upper, [0]
abmag_limit = +2.5 * np.log10(np.sqrt(ivarmaggies[iupper]) / nsigma)
igood, jgood = good, [0]
maggies = maggies[igood]
ivarmaggies = ivarmaggies[igood]
errmaggies = 1 / np.sqrt(ivarmaggies)
#fracerr = 1 / snr[igood]
# significant detections
if len(good) > 0:
phot['abmag_brighterr'][igood, jgood] = errmaggies / (0.4 * np.log(10) * (maggies+errmaggies))#.astype('f4') # bright end (flux upper limit)
phot['abmag_fainterr'][igood, jgood] = errmaggies / (0.4 * np.log(10) * (maggies-errmaggies))#.astype('f4') # faint end (flux lower limit)
#phot['abmag_loerr'][igood, jgood] = +2.5 * np.log10(1 + fracerr) # bright end (flux upper limit)
#phot['abmag_uperr'][igood, jgood] = +2.5 * np.log10(1 - fracerr) # faint end (flux lower limit)
#test = 2.5 * np.log(np.exp(1)) * fracerr # symmetric in magnitude (approx)
# approximate the uncertainty as being symmetric in magnitude
phot['abmag_ivar'][igood, jgood] = (ivarmaggies * (maggies * 0.4 * np.log(10))**2)#.astype('f4')
if len(upper) > 0:
phot['abmag_limit'][iupper, jupper] = abmag_limit#.astype('f4')
return phot
def convolve_vdisp(self, sspflux, vdisp):
"""Convolve by the velocity dispersion.
Parameters
----------
sspflux
vdisp
Returns
-------
Notes
-----
"""
from scipy.ndimage import gaussian_filter1d
if vdisp <= 0.0:
return sspflux
sigma = vdisp / self.pixkms # [pixels]
smoothflux = gaussian_filter1d(sspflux, sigma=sigma, axis=0)
return smoothflux
def dust_attenuation(self, wave, AV):
"""Compute the dust attenuation curve A(lambda)/A(V) from Charlot & Fall 2000.
ToDo: add a UV bump and IGM attenuation!
https://gitlab.lam.fr/cigale/cigale/-/blob/master/pcigale/sed_modules/dustatt_powerlaw.py#L42
"""
return 10**(-0.4 * AV * (wave / 5500.0)**(-self.dustslope))
def build_linemask(self, wave, flux, ivar, redshift=0.0):
"""Generate a mask which identifies pixels potentially affected by emission
lines.
wave - observed-frame wavelength array
"""
def _estimate_linesigma(zlinewaves, sigma, label='Broad-line', junkplot=None):
"""Estimate the velocity width from potentially strong, isolated lines; somewhat
fragile!
"""
from scipy.optimize import curve_fit
linesigma, snr = 0.0, 0.0
inrange = (zlinewaves > np.min(wave)) * (zlinewaves < np.max(wave))
if np.sum(inrange) > 0:
stackdvel, stackflux, stackivar = [], [], []
for zlinewave in zlinewaves[inrange]:
I = ((wave >= (zlinewave - 2*sigma * zlinewave / C_LIGHT)) *
(wave <= (zlinewave + 2*sigma * zlinewave / C_LIGHT)))
if np.max(flux[I]) > 0:
stackdvel.append((wave[I] - zlinewave) / zlinewave * C_LIGHT)
stackflux.append(flux[I] / np.max(flux[I]))
stackivar.append(ivar[I] * np.max(flux[I])**2)
if len(stackflux) > 0:
stackdvel = np.hstack(stackdvel)
stackflux = np.hstack(stackflux)
stackivar = np.hstack(stackivar)
onegauss = lambda x, amp, sigma, cont: amp * np.exp(-0.5 * x**2 / sigma**2) + cont
noneg = stackivar > 0
if np.sum(noneg) > 10:
#snr = np.median(stackflux*np.sqrt(stackivar))
stacksigma = 1 / np.sqrt(stackivar[noneg])
try:
popt, _ = curve_fit(onegauss, xdata=stackdvel[noneg], ydata=stackflux[noneg],
sigma=stacksigma, p0=[1.0, sigma, np.median(stackflux)])
if popt[0] > 0 and popt[1] > 0:
linesigma = popt[1]
snr = popt[0] / np.std(stackflux[noneg])
else:
popt = None
except RuntimeError:
popt = None
if junkplot:
import matplotlib.pyplot as plt
plt.clf()
plt.plot(stackdvel, stackflux)
if popt is not None:
plt.plot(stackdvel, onegauss(stackdvel, *popt))
plt.savefig(junkplot)
log.debug('{} masking sigma={:.3f} and S/N={:.3f}'.format(label, linesigma, snr))
return linesigma, snr
# Lya, SiIV doublet, CIV doublet, CIII], MgII doublet
zlinewaves = np.array([1215.670, 1398.2625, 1549.4795, 1908.734, 2799.941]) * (1 + redshift)
linesigma_broad, broad_snr = _estimate_linesigma(
zlinewaves, self.linemask_sigma_broad, label='Broad-line')#, junkplot='cosmo-www/tmp/junk-broad.png')
if (linesigma_broad < 300) or (linesigma_broad > 2500) or (broad_snr < 3):
linesigma_broad = self.linemask_sigma_broad
# [OII] doublet, [OIII] 4959,5007
zlinewaves = np.array([3728.483, 4960.295, 5008.239]) * (1 + redshift)
linesigma_narrow, narrow_snr = _estimate_linesigma(
zlinewaves, self.linemask_sigma_narrow, label='Narrow-line')#, junkplot='cosmo-www/tmp/junk-narrow.png')
if (linesigma_narrow < 50) or (linesigma_narrow > 250) or (narrow_snr < 3):
linesigma_narrow = self.linemask_sigma_narrow
# Hbeta, Halpha
zlinewaves = np.array([4862.683, 6564.613]) * (1 + redshift)
linesigma_balmer, narrow_balmer = _estimate_linesigma(
zlinewaves, self.linemask_sigma_balmer, label='Balmer-line')#, junkplot='desi-users/ioannis/tmp2/junk-balmer.png')
if (linesigma_balmer < 50) or (linesigma_balmer > 2500) or (narrow_balmer < 3):
linesigma_balmer = self.linemask_sigma_balmer
# now build the mask
linemask = np.zeros_like(wave, bool) # False = unaffected by emission line
linenames = np.hstack(('Lya', self.linetable['name'])) # include Lyman-alpha
zlinewaves = np.hstack((1215.0, self.linetable['restwave'])) * (1 + redshift)
isbroads = np.hstack((True, self.linetable['isbroad']))
isbalmers = np.hstack((False, self.linetable['isbalmer']))
inrange = (zlinewaves > np.min(wave)) * (zlinewaves < np.max(wave))
# Index I for building the line-mask; J for estimating the local
# continuum (to be used in self.smooth_residuals).
linepix, contpix, linename = [], [], []
for _linename, zlinewave, isbroad, isbalmer in zip(linenames[inrange], zlinewaves[inrange],
isbroads[inrange], isbalmers[inrange]):
if isbroad:
sigma = linesigma_broad
elif isbalmer:
sigma = linesigma_balmer
else:
sigma = linesigma_narrow
sigma *= zlinewave / C_LIGHT # [km/s --> Angstrom]
I = (wave >= (zlinewave - 3*sigma)) * (wave <= (zlinewave + 3*sigma))
#if np.sum(I) > 0:
# linename.append(_linename)
# linepix.append(I)
# linemask[I] = True # True = affected by line
Jblu = (wave > (zlinewave - 5*sigma)) * (wave < (zlinewave - 3*sigma)) * (linemask == False)
Jred = (wave < (zlinewave + 5*sigma)) * (wave > (zlinewave + 3*sigma)) * (linemask == False)
J = np.logical_or(Jblu, Jred)
#if '4686' in _linename:
# pdb.set_trace()
if np.sum(I) > 0 and np.sum(J) > 0:
linename.append(_linename)
linepix.append(I)
contpix.append(J)
linemask[I] = True # True = affected by line
#pdb.set_trace()
#for _linename, zlinewave, isbroad, isbalmer in zip(linenames[inrange], zlinewaves[inrange],
# isbroads[inrange], isbalmers[inrange]):
# if isbroad:
# sigma = linesigma_broad
# elif isbalmer:
# sigma = linesigma_balmer
# else:
# sigma = linesigma_narrow
# sigma *= zlinewave / C_LIGHT # [km/s --> Angstrom]
# Jblu = (wave > (zlinewave - 6*sigma)) * (wave < (zlinewave - 4*sigma)) * (linemask == False)
# Jred = (wave < (zlinewave + 6*sigma)) * (wave > (zlinewave + 4*sigma)) * (linemask == False)
#
# if '4686' in _linename:
# pdb.set_trace()
#
# J = np.logical_or(Jblu, Jred)
# if np.sum(J) > 0:
# contpix.append(J)
linemask_dict = {'linemask': linemask, 'linename': linename, 'linepix': linepix, 'contpix': contpix}
return linemask_dict
def smooth_and_resample(self, sspflux, sspwave, specwave=None, specres=None):
"""Given a single template, apply the resolution matrix and resample in
wavelength.
Parameters
----------
sspflux : :class:`numpy.ndarray` [npix]
Input (model) spectrum.
sspwave : :class:`numpy.ndarray` [npix]
Wavelength array corresponding to `sspflux`.
specwave : :class:`numpy.ndarray` [noutpix], optional, defaults to None
Desired output wavelength array, usually that of the object being fitted.
specres : :class:`desispec.resolution.Resolution`, optional, defaults to None
Resolution matrix.
vdisp : :class:`float`, optional, defaults to None
Velocity dispersion broadening factor [km/s].
pixkms : :class:`float`, optional, defaults to None
Pixel size of input spectra [km/s].
Returns
-------
:class:`numpy.ndarray` [noutpix]
Smoothed and resampled flux at the new resolution and wavelength sampling.
Notes
-----
This function stands by itself rather than being in a class because we call
it with multiprocessing, below.
"""
from redrock.rebin import trapz_rebin
if specwave is None:
resampflux = sspflux
else:
trim = (sspwave > (specwave.min()-10.0)) * (sspwave < (specwave.max()+10.0))
resampflux = trapz_rebin(sspwave[trim], sspflux[trim], specwave)
#try:
# resampflux = trapz_rebin(sspwave[trim], sspflux[trim], specwave)
#except:
# pdb.set_trace()
if specres is None:
smoothflux = resampflux
else:
smoothflux = specres.dot(resampflux)
return smoothflux # [noutpix]
def SSP2data(self, _sspflux, _sspwave, redshift=0.0, AV=None, vdisp=None,
cameras=['b', 'r', 'z'], specwave=None, specres=None, coeff=None,
south=True, synthphot=True):
"""Workhorse routine to turn input SSPs into spectra that can be compared to
real data.
Redshift, apply the resolution matrix, and resample in wavelength.
Parameters
----------
redshift
specwave
specres
south
synthphot - synthesize photometry?
Returns
-------
Vector or 3-element list of [npix, nmodel] spectra.
Notes
-----
This method does none or more of the following:
- redshifting
- wavelength resampling
- apply dust reddening
- apply velocity dispersion broadening
- apply the resolution matrix
- synthesize photometry
It also naturally handles SSPs which have been precomputed on a grid of
reddening or velocity dispersion (and therefore have an additional
dimension). However, if the input grid is 3D, it is reshaped to be 2D
but then it isn't reshaped back because of the way the photometry table
is organized (bug or feature?).
"""
# Are we dealing with a 2D grid [npix,nage] or a 3D grid
# [npix,nage,nAV] or [npix,nage,nvdisp]?
sspflux = _sspflux.copy() # why?!?
sspwave = _sspwave.copy() # why?!?
ndim = sspflux.ndim
if ndim == 2:
npix, nage = sspflux.shape
nmodel = nage
elif ndim == 3:
npix, nage, nprop = sspflux.shape
nmodel = nage*nprop
sspflux = sspflux.reshape(npix, nmodel)
else:
log.fatal('Input SSPs have an unrecognized number of dimensions, {}'.format(ndim))
raise ValueError
#t0 = time.time()
##sspflux = sspflux.copy().reshape(npix, nmodel)
#log.info('Copying the data took: {:.2f} sec'.format(time.time()-t0))
# apply reddening
if AV:
atten = self.dust_attenuation(sspwave, AV)
sspflux *= atten[:, np.newaxis]
## broaden for velocity dispersion
#if vdisp:
# sspflux = self.convolve_vdisp(sspflux, vdisp)
# Apply the redshift factor. The models are normalized to 10 pc, so
# apply the luminosity distance factor here. Also normalize to a nominal
# stellar mass.
#t0 = time.time()
if redshift:
zsspwave = sspwave * (1.0 + redshift)
dfactor = (10.0 / self.cosmo.luminosity_distance(redshift).to(u.pc).value)**2
#dfactor = (10.0 / np.interp(redshift, self.redshift_ref, self.dlum_ref))**2
factor = (self.fluxnorm * self.massnorm * dfactor / (1.0 + redshift))[np.newaxis, np.newaxis]
zsspflux = sspflux * factor
else:
zsspwave = sspwave.copy()
zsspflux = self.fluxnorm * self.massnorm * sspflux
#log.info('Cosmology calculations took: {:.2f} sec'.format(time.time()-t0))
# Optionally synthesize photometry. We assume that velocity broadening,
# if any, won't impact the measured photometry.
sspphot = None
if synthphot:
if south:
filters = self.decamwise
else:
filters = self.bassmzlswise
effwave = filters.effective_wavelengths.value
if ((specwave is None and specres is None and coeff is None) or
(specwave is not None and specres is not None)):
#t0 = time.time()
maggies = filters.get_ab_maggies(zsspflux, zsspwave, axis=0) # speclite.filters wants an [nmodel,npix] array
maggies = np.vstack(maggies.as_array().tolist()).T
maggies /= self.fluxnorm * self.massnorm
sspphot = self.parse_photometry(self.bands, maggies, effwave, nanomaggies=False)
#log.info('Synthesizing photometry took: {:.2f} sec'.format(time.time()-t0))
# Are we returning per-camera spectra or a single model? Handle that here.
#t0 = time.time()
if specwave is None and specres is None:
datasspflux = []
for imodel in np.arange(nmodel):
datasspflux.append(self.smooth_and_resample(zsspflux[:, imodel], zsspwave))
datasspflux = np.vstack(datasspflux).T
if vdisp:
datasspflux = self.convolve_vdisp(datasspflux, vdisp)
# optionally compute the best-fitting model
if coeff is not None:
datasspflux = datasspflux.dot(coeff)
if synthphot:
maggies = filters.get_ab_maggies(datasspflux, zsspwave, axis=0)
maggies = np.array(maggies.as_array().tolist()[0])
maggies /= self.fluxnorm * self.massnorm
sspphot = self.parse_photometry(self.bands, maggies, effwave, nanomaggies=False)
else:
# loop over cameras
datasspflux = []
for icamera in np.arange(len(cameras)): # iterate on cameras
_datasspflux = []
for imodel in np.arange(nmodel):
_datasspflux.append(self.smooth_and_resample(
zsspflux[:, imodel], zsspwave, specwave=specwave[icamera],
specres=specres[icamera]))
_datasspflux = np.vstack(_datasspflux).T
if vdisp:
_datasspflux = self.convolve_vdisp(_datasspflux, vdisp)
if coeff is not None:
_datasspflux = _datasspflux.dot(coeff)
datasspflux.append(_datasspflux)
#if icamera == 1:
# pdb.set_trace()
#log.info('Resampling took: {:.2f} sec'.format(time.time()-t0))
return datasspflux, sspphot # vector or 3-element list of [npix,nmodel] spectra
def smooth_residuals(self, residuals, wave, specivar, linemask,
linepix, contpix, seed=1, binwave=0.8*120):#binwave=0.8*160):
"""Derive a median-smoothed correction to the continuum fit in order to pick up
any unmodeled flux, before emission-line fitting.
Parameters
----------
Returns
-------
Notes
-----
There are a few different algorithms in here, but the default one is to
do a very simple median-smoothing on the camera-stacked
(percamera=False) spectrum, which prevents any crazy discontinuities
between the cameras.
https://github.com/moustakas/moustakas-projects/blob/master/ages/ppxf/ages_gandalf_specfit.pro#L138-L145
"""
#from scipy.stats import sigmaclip
from scipy.ndimage import median_filter
from fastspecfit.util import ivar2var
rand = np.random.RandomState(seed=seed)
#def robust_median(wave, flux, ivar, binwave):
# from scipy import ptp
# from scipy.stats import sigmaclip
# #from scipy.signal import savgol_filter
#
# smoothflux = np.zeros_like(flux)
# minwave, maxwave = np.min(wave), np.max(wave)
# nbin = int(ptp(wave) / binwave)
# wavebins = np.linspace(minwave, maxwave, nbin)
# idx = np.digitize(wave, wavebins)
# for kk in np.arange(nbin):
# I = (idx == kk)
# J = I * (ivar > 0)
# #print(kk, np.sum(I), np.sum(J))
# if np.sum(J) > 10:
# clipflux, _, _ = sigmaclip(flux[J], low=5.0, high=5.0)
# smoothflux[I] = np.median(clipflux)
# #print(kk, np.sum(J), np.sum(I), np.median(wave[I]), smoothflux[I][0])
# return smoothflux
medbin = np.int(binwave * 2)
#print(binwave, medbin)
# Replace pixels potentially affected by lines by the ivar-weighted mean
# of the surrounding (local) continuum.
residuals_clean = []
for icam in np.arange(len(residuals)): # iterate over cameras
#var, I = ivar2var(specivar[icam])
ivar = specivar[icam]
_residuals = residuals[icam].copy()
for _linepix, _contpix in zip(linepix[icam], contpix[icam]):
I = ivar[_contpix] > 0
if np.sum(I) > 0:
norm = np.sum(ivar[_contpix][I])
mn = np.sum(ivar[_contpix][I] * _residuals[_contpix][I]) / norm # weighted mean
sig = np.sqrt(np.sum(ivar[_contpix][I] * (_residuals[_contpix][I] - mn)**2) / norm) # weighted standard deviation
#sig = np.sqrt(np.sum(ivar[I][_contpix]**2 * (_residuals[I][_contpix] - mn)**2) / norm**2) # weighted error in the mean
#_residuals[_linepix] = np.median(residuals[icam][_contpix])
#_residuals[_linepix] = (self.rand.normal(size=len(_linepix)) * np.std(residuals[icam][_contpix])) + np.median(residuals[icam][_contpix])
#clipflux, _, _ = sigmaclip(residuals[icam][_contpix], low=3.0, high=3.0)
#if icam == 1:
# import matplotlib.pyplot as plt
# plt.clf()
# plt.scatter(wave[icam][_linepix], _residuals[_linepix], color='blue', s=1)
# plt.scatter(wave[icam][_contpix], _residuals[_contpix], color='green', s=1)
# plt.axhline(y=mn, color='k')
# plt.axhline(y=mn+sig, ls='--', color='k')
# plt.axhline(y=mn-sig, ls='--', color='k')
# plt.plot(wave[icam][_linepix], (rand.normal(size=len(_linepix)) * sig) + mn, color='orange', alpha=0.5)
# plt.savefig('desi-users/ioannis/tmp2/junk.png')
# pdb.set_trace()
_residuals[_linepix] = (rand.normal(size=len(_linepix)) * sig) + mn
#_residuals[_linepix] = (rand.normal(size=len(_linepix)) * np.std(clipflux)) + np.median(clipflux)
residuals_clean.append(_residuals)
residuals_clean = np.hstack(residuals_clean)
smooth1 = median_filter(residuals_clean, medbin, mode='nearest')
smooth_continuum = median_filter(smooth1, medbin//2, mode='nearest')
#if percamera:
# smooth_continuum = []
# for icam in np.arange(len(specflux)): # iterate over cameras
# residuals = specflux[icam] - continuummodel[icam]
# if False:
# smooth1 = robust_median(specwave[icam], residuals, specivar[icam], binwave)
# #smooth1 = robust_median(specwave[icam], residuals, specivar[icam] * linemask[icam], binwave)
# smooth2 = median_filter(smooth1, medbin, mode='nearest')
# smooth_continuum.append(smooth2)
# else:
# # Fragile...replace the line-affected pixels with noise to make the
# # smoothing better-behaved.
# pix_nolines = linemask[icam] # unaffected by a line = True
# pix_emlines = np.logical_not(pix_nolines) # affected by line = True
# residuals[pix_emlines] = (self.rand.normal(np.count_nonzero(pix_emlines)) *
# np.std(residuals[pix_nolines]) +
# np.median(residuals[pix_nolines]))
# smooth1 = median_filter(residuals, medbin, mode='nearest')
# #smooth2 = savgol_filter(smooth1, 151, 2)
# smooth2 = median_filter(smooth1, medbin//2, mode='nearest')
# smooth_continuum.append(smooth2)
#else:
# pix_nolines = linemask # unaffected by a line = True
# pix_emlines = np.logical_not(pix_nolines) # affected by line = True
# residuals[pix_emlines] = (self.rand.normal(size=np.sum(pix_emlines)) *
# np.std(residuals[pix_nolines]) + np.median(residuals[pix_nolines]))
# smooth1 = median_filter(residuals, medbin, mode='nearest')
# smooth_continuum = median_filter(smooth1, medbin//2, mode='nearest')
return smooth_continuum
class ContinuumFit(ContinuumTools):
def __init__(self, metallicity='Z0.0190', minwave=None, maxwave=6e4):
"""Class to model a galaxy stellar continuum.
Parameters
----------
metallicity : :class:`str`, optional, defaults to `Z0.0190`.
Stellar metallicity of the SSPs. Currently fixed at solar
metallicity, Z=0.0190.
minwave : :class:`float`, optional, defaults to None
Minimum SSP wavelength to read into memory. If ``None``, the minimum
available wavelength is used (around 100 Angstrom).
maxwave : :class:`float`, optional, defaults to 6e4
Maximum SSP wavelength to read into memory.
Notes
-----
Need to document all the attributes.
Plans for improvement (largely in self.fnnls_continuum).
- Update the continuum redshift using cross-correlation.
- Don't draw reddening from a flat distribution (try gamma or a custom
distribution of the form x**2*np.exp(-2*x/scale).
"""
super(ContinuumFit, self).__init__(metallicity=metallicity, minwave=minwave, maxwave=maxwave)
# Initialize the velocity dispersion and reddening parameters. Make sure
# the nominal values are in the grid.
vdispmin, vdispmax, dvdisp, vdisp_nominal = (100.0, 350.0, 20.0, 150.0)
#vdispmin, vdispmax, dvdisp, vdisp_nominal = (0.0, 0.0, 30.0, 150.0)
nvdisp = np.int(np.ceil((vdispmax - vdispmin) / dvdisp))
if nvdisp == 0:
nvdisp = 1
vdisp = np.linspace(vdispmin, vdispmax, nvdisp)#.astype('f4') # [km/s]
if not vdisp_nominal in vdisp:
vdisp = np.sort(np.hstack((vdisp, vdisp_nominal)))
self.vdisp = vdisp
self.vdisp_nominal = vdisp_nominal
self.nvdisp = len(vdisp)
#AVmin, AVmax, dAV, AV_nominal = (0.0, 0.0, 0.1, 0.0)
AVmin, AVmax, dAV, AV_nominal = (0.0, 1.5, 0.1, 0.0)
nAV = np.int(np.ceil((AVmax - AVmin) / dAV))
if nAV == 0:
nAV = 1
AV = np.linspace(AVmin, AVmax, nAV)#.astype('f4')
assert(AV[0] == 0.0) # minimum value has to be zero (assumed in fnnls_continuum)
if not AV_nominal in AV:
AV = np.sort(np.hstack((AV, AV_nominal)))
self.AV = AV
self.AV_nominal = AV_nominal
self.nAV = len(AV)
# Next, precompute a grid of spectra convolved to the nominal velocity
# dispersion with reddening applied. This isn't quite right redward of
# ~1 micron where the pixel size changes, but fix that later.
sspflux_dustvdisp = []
for AV in self.AV:
atten = self.dust_attenuation(self.sspwave, AV)
_sspflux_dustvdisp = self.convolve_vdisp(self.sspflux * atten[:, np.newaxis], self.vdisp_nominal)
sspflux_dustvdisp.append(_sspflux_dustvdisp)
# nominal velocity broadening on a grid of A(V) [npix,nage,nAV]
self.sspflux_dustvdisp = np.stack(sspflux_dustvdisp, axis=-1) # [npix,nage,nAV]
# Do a throw-away trapezoidal resampling so we can compile the numba
# code when instantiating this class.
#from redrock.rebin import trapz_rebin
#t0 = time.time()
#_ = trapz_rebin(np.arange(4), np.ones(4), np.arange(2)+1)
#print('Initial rebin ', time.time() - t0)
def init_spec_output(self, nobj=1):
"""Initialize the output data table for this class.
"""
from astropy.table import Table, Column
nssp_coeff = len(self.sspinfo)
out = Table()
out.add_column(Column(name='CONTINUUM_Z', length=nobj, dtype='f8')) # redshift
out.add_column(Column(name='CONTINUUM_COEFF', length=nobj, shape=(nssp_coeff,), dtype='f8'))
out.add_column(Column(name='CONTINUUM_CHI2', length=nobj, dtype='f4')) # reduced chi2
#out.add_column(Column(name='CONTINUUM_DOF', length=nobj, dtype=np.int32))
out.add_column(Column(name='CONTINUUM_AGE', length=nobj, dtype='f4', unit=u.Gyr))
out.add_column(Column(name='CONTINUUM_AV', length=nobj, dtype='f4', unit=u.mag))
out.add_column(Column(name='CONTINUUM_AV_IVAR', length=nobj, dtype='f4', unit=1/u.mag**2))
out.add_column(Column(name='CONTINUUM_VDISP', length=nobj, dtype='f4', unit=u.kilometer/u.second))
out.add_column(Column(name='CONTINUUM_VDISP_IVAR', length=nobj, dtype='f4', unit=u.second**2/u.kilometer**2))
for cam in ['B', 'R', 'Z']:
out.add_column(Column(name='CONTINUUM_SNR_{}'.format(cam), length=nobj, dtype='f4')) # median S/N in each camera
#out.add_column(Column(name='CONTINUUM_SNR', length=nobj, shape=(3,), dtype='f4')) # median S/N in each camera
# maximum correction to the median-smoothed continuum
for cam in ['B', 'R', 'Z']:
out.add_column(Column(name='CONTINUUM_SMOOTHCORR_{}'.format(cam), length=nobj, dtype='f4'))
out['CONTINUUM_AV'] = self.AV_nominal
out['CONTINUUM_VDISP'] = self.vdisp_nominal
if False:
# continuum fit with *no* dust reddening (to be used as a diagnostic
# tool to identify potential calibration issues).
out.add_column(Column(name='CONTINUUM_NODUST_COEFF', length=nobj, shape=(nssp_coeff,), dtype='f8'))
out.add_column(Column(name='CONTINUUM_NODUST_CHI2', length=nobj, dtype='f4')) # reduced chi2
#out.add_column(Column(name='CONTINUUM_NODUST_AGE', length=nobj, dtype='f4', unit=u.Gyr))
out.add_column(Column(name='DN4000', length=nobj, dtype='f4'))
out.add_column(Column(name='DN4000_IVAR', length=nobj, dtype='f4'))
out.add_column(Column(name='DN4000_MODEL', length=nobj, dtype='f4'))
return out
def init_phot_output(self, nobj=1):
"""Initialize the photometric output data table.
"""
from astropy.table import Table, Column
nssp_coeff = len(self.sspinfo)
out = Table()
#out.add_column(Column(name='CONTINUUM_Z', length=nobj, dtype='f8')) # redshift
out.add_column(Column(name='CONTINUUM_COEFF', length=nobj, shape=(nssp_coeff,), dtype='f8'))
out.add_column(Column(name='CONTINUUM_CHI2', length=nobj, dtype='f4')) # reduced chi2
#out.add_column(Column(name='CONTINUUM_DOF', length=nobj, dtype=np.int32))
out.add_column(Column(name='CONTINUUM_AGE', length=nobj, dtype='f4', unit=u.Gyr))
out.add_column(Column(name='CONTINUUM_AV', length=nobj, dtype='f4', unit=u.mag))
out.add_column(Column(name='CONTINUUM_AV_IVAR', length=nobj, dtype='f4', unit=1/u.mag**2))
out.add_column(Column(name='DN4000_MODEL', length=nobj, dtype='f4'))
# observed-frame photometry synthesized from the best-fitting continuum model fit
for band in self.bands:
out.add_column(Column(name='FLUX_SYNTH_MODEL_{}'.format(band.upper()), length=nobj, dtype='f4', unit=u.nanomaggy))
if False:
for band in self.fiber_bands:
out.add_column(Column(name='FIBERTOTFLUX_{}'.format(band.upper()), length=nobj, dtype='f4', unit=u.nanomaggy)) # observed-frame fiber photometry
#out.add_column(Column(name='FIBERTOTFLUX_IVAR_{}'.format(band.upper()), length=nobj, dtype='f4', unit=1/u.nanomaggy**2))
for band in self.bands:
out.add_column(Column(name='FLUX_{}'.format(band.upper()), length=nobj, dtype='f4', unit=u.nanomaggy)) # observed-frame photometry
out.add_column(Column(name='FLUX_IVAR_{}'.format(band.upper()), length=nobj, dtype='f4', unit=1/u.nanomaggy**2))
for band in self.absmag_bands:
out.add_column(Column(name='KCORR_{}'.format(band.upper()), length=nobj, dtype='f4', unit=u.mag))
out.add_column(Column(name='ABSMAG_{}'.format(band.upper()), length=nobj, dtype='f4', unit=u.mag)) # absolute magnitudes
out.add_column(Column(name='ABSMAG_IVAR_{}'.format(band.upper()), length=nobj, dtype='f4', unit=1/u.mag**2))
return out
def get_meanage(self, coeff):
"""Compute the light-weighted age, given a set of coefficients.
"""
nage = len(coeff)
age = self.sspinfo['age'][0:nage] # account for age of the universe trimming
if np.count_nonzero(coeff > 0) == 0:
log.warning('Coefficients are all zero!')
meanage = -1.0
#raise ValueError
else:
meanage = np.sum(coeff * age) / np.sum(coeff) / 1e9 # [Gyr]
return meanage
def younger_than_universe(self, redshift):
"""Return the indices of the SSPs younger than the age of the universe at the
given redshift.
"""
return np.where(self.sspinfo['age'] <= self.cosmo.age(redshift).to(u.year).value)[0]
def kcorr_and_absmag(self, data, continuum, coeff, snrmin=2.0):
"""Computer K-corrections and absolute magnitudes.
# To get the absolute r-band magnitude we would do:
# M_r = m_X_obs + 2.5*log10(r_synth_rest/X_synth_obs)
# where X is the redshifted bandpass
"""
redshift = data['zredrock']
band_shift = self.absmag_bandshift
bands_to_fit = self.bands_to_fit
if data['photsys'] == 'S':
filters_in = self.decamwise
else:
filters_in = self.bassmzlswise
filters_out = self.absmag_filters
nout = len(filters_out)
# shift the bandpasses blueward by a factor of 1+band_shift
lambda_in = filters_in.effective_wavelengths.value
lambda_out = filters_out.effective_wavelengths.value / (1 + band_shift)
# redshifted wavelength array and distance modulus
zsspwave = self.sspwave * (1 + redshift)
dmod = self.cosmo.distmod(redshift).value
maggies = data['phot']['nanomaggies'].data * 1e-9
ivarmaggies = (data['phot']['nanomaggies_ivar'].data / 1e-9**2) * bands_to_fit
# input bandpasses, observed frame; maggies and bestmaggies should be
# very close.
bestmaggies = filters_in.get_ab_maggies(continuum / self.fluxnorm, zsspwave)
bestmaggies = np.array(bestmaggies.as_array().tolist()[0])
# output bandpasses, rest frame -- need to shift the filter curves
# blueward by a factor of 1+band_shift!
synth_outmaggies_rest = filters_out.get_ab_maggies(continuum * (1 + redshift) / self.fluxnorm, self.sspwave)
synth_outmaggies_rest = np.array(synth_outmaggies_rest.as_array().tolist()[0])
# output bandpasses, observed frame
synth_outmaggies_obs = filters_out.get_ab_maggies(continuum / self.fluxnorm, zsspwave)
synth_outmaggies_obs = np.array(synth_outmaggies_obs.as_array().tolist()[0])
absmag = np.zeros(nout, dtype='f4')
ivarabsmag = np.zeros(nout, dtype='f4')
kcorr = np.zeros(nout, dtype='f4')
for jj in np.arange(nout):
lambdadist = np.abs(lambda_in / (1 + redshift) - lambda_out[jj])
# K-correct from the nearest "good" bandpass (to minimizes the K-correction)
#oband = np.argmin(lambdadist)
#oband = np.argmin(lambdadist + (ivarmaggies == 0)*1e10)
oband = np.argmin(lambdadist + (maggies*np.sqrt(ivarmaggies) < snrmin)*1e10)
kcorr[jj] = + 2.5 * np.log10(synth_outmaggies_rest[jj] / bestmaggies[oband])
# m_R = M_Q + DM(z) + K_QR(z) or
# M_Q = m_R - DM(z) - K_QR(z)
if maggies[oband] * np.sqrt(ivarmaggies[oband]) > snrmin:
#if (maggies[oband] > 0) and (ivarmaggies[oband]) > 0:
absmag[jj] = -2.5 * np.log10(maggies[oband]) - dmod - kcorr[jj]
ivarabsmag[jj] = maggies[oband]**2 * ivarmaggies[oband] * (0.4 * np.log(10.))**2
else:
# if we use synthesized photometry then ivarabsmag is zero
# (which should never happen?)
absmag[jj] = -2.5 * np.log10(synth_outmaggies_rest[jj]) - dmod
# get the stellar mass
if False:
nage = len(coeff)
dfactor = (10.0 / self.cosmo.luminosity_distance(redshift).to(u.pc).value)**2
mstar = self.sspinfo['mstar'][:nage].dot(coeff) * self.massnorm * self.fluxnorm * dfactor / (1+redshift)
# From Taylor+11, eq 8
#https://researchportal.port.ac.uk/ws/files/328938/MNRAS_2011_Taylor_1587_620.pdf
#mstar = 1.15 + 0.7*(absmag[1]-absmag[3]) - 0.4*absmag[3]
return kcorr, absmag, ivarabsmag, bestmaggies
def _fnnls_parallel(self, modelflux, flux, ivar, xparam=None, debug=False):
"""Wrapper on fnnls to set up the multiprocessing. Works with both spectroscopic
and photometric input and with both 2D and 3D model spectra.
To be documented.
"""
from redrock import fitz
if xparam is not None:
nn = len(xparam)
ww = np.sqrt(ivar)
xx = flux * ww
# If xparam is None (equivalent to modelflux having just two
# dimensions, [npix,nage]), assume we are just finding the
# coefficients at some best-fitting value...
#if modelflux.ndim == 2:
if xparam is None:
ZZ = modelflux * ww[:, np.newaxis]
warn, coeff, chi2 = fnnls_continuum(ZZ, xx, flux=flux, ivar=ivar,
modelflux=modelflux, get_chi2=True)
if np.any(warn):
print('WARNING: fnnls did not converge after 10 iterations.')
return coeff, chi2
# ...otherwise multiprocess over the xparam (e.g., AV or vdisp)
# dimension.
ZZ = modelflux * ww[:, np.newaxis, np.newaxis] # reshape into [npix/nband,nage,nAV/nvdisp]
fitargs = [(ZZ[:, :, ii], xx, flux, ivar, modelflux[:, :, ii], None, True) for ii in np.arange(nn)]
rr = [fnnls_continuum(*_fitargs) for _fitargs in fitargs]
warn, _, chi2grid = list(zip(*rr)) # unpack
if np.any(warn):
vals = ','.join(['{:.1f}'.format(xp) for xp in xparam[np.where(warn)[0]]])
log.warning('fnnls did not converge after 10 iterations for parameter value(s) {}.'.format(vals))
chi2grid = np.array(chi2grid)
try:
imin = fitz.find_minima(chi2grid)[0]
xbest, xerr, chi2min, warn = fitz.minfit(xparam[imin-1:imin+2], chi2grid[imin-1:imin+2])
except:
print('Problem here!', chi2grid)
imin, xbest, xerr, chi2min, warn = 0, 0.0, 0.0, 0.0, 1
#if np.all(chi2grid == 0):
# imin, xbest, xerr, chi2min, warn = 0, 0.0, 0.0, 0.0, 1
#else:
if warn == 0:
xivar = 1.0 / xerr**2
else:
chi2min = 1e6
xivar = 0.0
if debug:
import matplotlib.pyplot as plt
plt.clf()
plt.scatter(xparam, chi2grid)
plt.scatter(xparam[imin-1:imin+2], chi2grid[imin-1:imin+2], color='red')
#plt.plot(xx, np.polyval([aa, bb, cc], xx), ls='--')
plt.axvline(x=xbest, color='k')
if xivar > 0:
plt.axhline(y=chi2min, color='k')
plt.yscale('log')
plt.savefig('qa-chi2min.png')
return chi2min, xbest, xivar
def continuum_fastphot(self, data):
"""Fit the broad photometry.
Parameters
----------
data : :class:`dict`
Dictionary of input spectroscopy (plus ancillary data) populated by
`unpack_one_spectrum`.
Returns
-------
:class:`astropy.table.Table`
Table with all the continuum-fitting results with columns documented
in `init_phot_output`.
Notes
-----
See
https://github.com/jvendrow/fnnls
https://github.com/mikeiovine/fast-nnls
for the fNNLS algorithm(s).
"""
# Initialize the output table; see init_fastspecfit for the data model.
result = self.init_phot_output()
redshift = data['zredrock']
#result['CONTINUUM_Z'] = redshift
# Prepare the reddened and unreddened SSP templates. Note that we ignore
# templates which are older than the age of the universe at the galaxy
# redshift.
agekeep = self.younger_than_universe(redshift)
t0 = time.time()
zsspflux_dustvdisp, zsspphot_dustvdisp = self.SSP2data(
self.sspflux_dustvdisp[:, agekeep, :], self.sspwave, # [npix,nage,nAV]
redshift=redshift, specwave=None, specres=None,
south=data['photsys'] == 'S')
log.info('Preparing the models took {:.2f} sec'.format(time.time()-t0))
objflam = data['phot']['flam'].data * self.fluxnorm
objflamivar = (data['phot']['flam_ivar'].data / self.fluxnorm**2) * self.bands_to_fit
assert(np.all(objflamivar >= 0))
if np.all(objflamivar == 0): # can happen for secondary targets
log.info('All photometry is masked or not available!')
AVbest, AVivar = self.AV_nominal, 0.0
nage = self.nage
coeff = np.zeros(self.nage)
continuummodel = np.zeros(len(self.sspwave))
else:
zsspflam_dustvdisp = zsspphot_dustvdisp['flam'].data * self.fluxnorm * self.massnorm # [nband,nage*nAV]
inodust = np.asscalar(np.where(self.AV == 0)[0]) # should always be index 0
npix, nmodel = zsspflux_dustvdisp.shape
nage = nmodel // self.nAV # accounts for age-of-the-universe constraint (!=self.nage)
zsspflam_dustvdisp = zsspflam_dustvdisp.reshape(len(self.bands), nage, self.nAV) # [nband,nage,nAV]
t0 = time.time()
AVchi2min, AVbest, AVivar = self._fnnls_parallel(zsspflam_dustvdisp, objflam,
objflamivar, xparam=self.AV)
log.info('Fitting the photometry took: {:.2f} sec'.format(time.time()-t0))
if AVivar > 0:
log.info('Best-fitting photometric A(V)={:.4f}+/-{:.4f} with chi2={:.3f}'.format(
AVbest, 1/np.sqrt(AVivar), AVchi2min))
else:
AVbest = self.AV_nominal
log.info('Finding photometric A(V) failed; adopting A(V)={:.4f}'.format(self.AV_nominal))
# Get the final set of coefficients and chi2 at the best-fitting
# reddening and nominal velocity dispersion.
bestsspflux, bestphot = self.SSP2data(self.sspflux_dustvdisp[:, agekeep, inodust], # equivalent to calling with self.sspflux[:, agekeep]
self.sspwave, AV=AVbest, redshift=redshift,
south=data['photsys'] == 'S')
coeff, chi2min = self._fnnls_parallel(bestphot['flam'].data*self.massnorm*self.fluxnorm,
objflam, objflamivar) # bestphot['flam'] is [nband, nage]
continuummodel = bestsspflux.dot(coeff)
# Compute DN4000, K-corrections, and rest-frame quantities.
if np.count_nonzero(coeff > 0) == 0:
log.warning('Continuum coefficients are all zero!')
chi2min, dn4000, meanage = 1e6, -1.0, -1.0
kcorr = np.zeros(len(self.absmag_bands))
absmag = np.zeros(len(self.absmag_bands))-99.0
ivarabsmag = np.zeros(len(self.absmag_bands))
synth_bestmaggies = np.zeros(len(self.bands))
else:
dn4000, _ = self.get_dn4000(self.sspwave, continuummodel, rest=True)
meanage = self.get_meanage(coeff)
kcorr, absmag, ivarabsmag, synth_bestmaggies = self.kcorr_and_absmag(data, continuummodel, coeff)
log.info('Photometric DN(4000)={:.3f}, Age={:.2f} Gyr, Mr={:.2f} mag'.format(
dn4000, meanage, absmag[1]))
# Pack it up and return.
result['CONTINUUM_COEFF'][0][:nage] = coeff
result['CONTINUUM_CHI2'][0] = chi2min
result['CONTINUUM_AGE'][0] = meanage
result['CONTINUUM_AV'][0] = AVbest
result['CONTINUUM_AV_IVAR'][0] = AVivar
result['DN4000_MODEL'][0] = dn4000
if False:
for iband, band in enumerate(self.fiber_bands):
result['FIBERTOTFLUX_{}'.format(band.upper())] = data['fiberphot']['nanomaggies'][iband]
#result['FIBERTOTFLUX_IVAR_{}'.format(band.upper())] = data['fiberphot']['nanomaggies_ivar'][iband]
for iband, band in enumerate(self.bands):
result['FLUX_{}'.format(band.upper())] = data['phot']['nanomaggies'][iband]
result['FLUX_IVAR_{}'.format(band.upper())] = data['phot']['nanomaggies_ivar'][iband]
for iband, band in enumerate(self.absmag_bands):
result['KCORR_{}'.format(band.upper())] = kcorr[iband]
result['ABSMAG_{}'.format(band.upper())] = absmag[iband]
result['ABSMAG_IVAR_{}'.format(band.upper())] = ivarabsmag[iband]
for iband, band in enumerate(self.bands):
result['FLUX_SYNTH_MODEL_{}'.format(band.upper())] = synth_bestmaggies[iband]
return result, continuummodel
def continuum_specfit(self, data, solve_vdisp=False):
"""Fit the stellar continuum of a single spectrum using fast non-negative
least-squares fitting (fNNLS).
Parameters
----------
data : :class:`dict`
Dictionary of input spectroscopy (plus ancillary data) populated by
`unpack_one_spectrum`.
solve_vdisp : :class:`bool`, optional, defaults to False
Solve for the velocity dispersion.
Returns
-------
:class:`astropy.table.Table`
Table with all the continuum-fitting results with columns documented
in `init_fastspecfit`.
Notes
-----
ToDo:
- Use cross-correlation to update the redrock redshift.
- Need to mask more emission lines than we fit (e.g., Mg II).
"""
# Initialize the output table; see init_fastspecfit for the data model.
result = self.init_spec_output()
redshift = data['zredrock']
result['CONTINUUM_Z'] = redshift
for icam, cam in enumerate(data['cameras']):
result['CONTINUUM_SNR_{}'.format(cam.upper())] = data['snr'][icam]
# Prepare the reddened and unreddened SSP templates. Note that we ignore
# templates which are older than the age of the universe at the galaxy
# redshift.
agekeep = self.younger_than_universe(redshift)
t0 = time.time()
zsspflux_dustvdisp, _ = self.SSP2data(
self.sspflux_dustvdisp[:, agekeep, :], self.sspwave, # [npix,nage,nAV]
redshift=redshift, specwave=data['wave'], specres=data['res'],
cameras=data['cameras'], synthphot=False)
log.info('Preparing the models took {:.2f} sec'.format(time.time()-t0))
# Combine all three cameras; we will unpack them to build the
# best-fitting model (per-camera) below.
npixpercamera = [len(gw) for gw in data['wave']]
npixpercam = np.hstack([0, npixpercamera])
specwave = np.hstack(data['wave'])
specflux = np.hstack(data['flux'])
specivar = np.hstack(data['ivar']) * np.logical_not(np.hstack(data['linemask'])) # mask emission lines
zsspflux_dustvdisp = np.concatenate(zsspflux_dustvdisp, axis=0) # [npix,nage*nAV]
assert(np.all(specivar >= 0))
inodust = np.asscalar(np.where(self.AV == 0)[0]) # should always be index 0
npix, nmodel = zsspflux_dustvdisp.shape
nage = nmodel // self.nAV # accounts for age-of-the-universe constraint (!=self.nage)
zsspflux_dustvdisp = zsspflux_dustvdisp.reshape(npix, nage, self.nAV) # [npix,nage,nAV]
if False:
# Fit the spectra with *no* dust reddening so we can identify potential
# calibration issues (again, at the nominal velocity dispersion).
t0 = time.time()
coeff, chi2min = self._fnnls_parallel(zsspflux_dustvdisp[:, :, inodust],
specflux, specivar)
log.info('No-dust model fit has chi2={:.3f} and took {:.2f} sec'.format(
chi2min, time.time()-t0))
result['CONTINUUM_NODUST_COEFF'][0][0:nage] = coeff
result['CONTINUUM_NODUST_CHI2'] = chi2min
# Fit the spectra for reddening using the models convolved to the
# nominal velocity dispersion and then fit for velocity dispersion.
t0 = time.time()
AVchi2min, AVbest, AVivar = self._fnnls_parallel(zsspflux_dustvdisp, specflux, specivar,
xparam=self.AV, debug=False)
log.info('Fitting for the reddening took: {:.2f} sec'.format(time.time()-t0))
if AVivar > 0:
log.info('Best-fitting spectroscopic A(V)={:.4f}+/-{:.4f} with chi2={:.3f}'.format(
AVbest, 1/np.sqrt(AVivar), AVchi2min))
else:
AVbest = self.AV_nominal
log.info('Finding spectroscopic A(V) failed; adopting A(V)={:.4f}'.format(
self.AV_nominal))
# Optionally build out the model spectra on our grid of velocity
# dispersion and then solve.
if solve_vdisp:
t0 = time.time()
zsspflux_vdisp = []
for vdisp in self.vdisp:
_zsspflux_vdisp, _ = self.SSP2data(self.sspflux[:, agekeep], self.sspwave,
specwave=data['wave'], specres=data['res'],
AV=AVbest, vdisp=vdisp, redshift=redshift,
cameras=data['cameras'], synthphot=False)
_zsspflux_vdisp = np.concatenate(_zsspflux_vdisp, axis=0)
zsspflux_vdisp.append(_zsspflux_vdisp)
zsspflux_vdisp = np.stack(zsspflux_vdisp, axis=-1) # [npix,nage,nvdisp] at best A(V)
vdispchi2min, vdispbest, vdispivar = self._fnnls_parallel(zsspflux_vdisp, specflux, specivar,
xparam=self.vdisp, debug=False)
log.info('Fitting for the velocity dispersion took: {:.2f} sec'.format(time.time()-t0))
if vdispivar > 0:
log.info('Best-fitting vdisp={:.2f}+/-{:.2f} km/s with chi2={:.3f}'.format(
vdispbest, 1/np.sqrt(vdispivar), vdispchi2min))
else:
vdispbest = self.vdisp_nominal
log.info('Finding vdisp failed; adopting vdisp={:.2f} km/s'.format(self.vdisp_nominal))
else:
vdispbest, vdispivar = self.vdisp_nominal, 0.0
# Get the final set of coefficients and chi2 at the best-fitting
# reddening and velocity dispersion.
bestsspflux, bestphot = self.SSP2data(self.sspflux[:, agekeep], self.sspwave,
specwave=data['wave'], specres=data['res'],
AV=AVbest, vdisp=vdispbest, redshift=redshift,
cameras=data['cameras'], south=data['photsys'] == 'S')
bestsspflux = np.concatenate(bestsspflux, axis=0)
coeff, chi2min = self._fnnls_parallel(bestsspflux, specflux, specivar)
# Get the mean age and DN(4000).
bestfit = bestsspflux.dot(coeff)
meanage = self.get_meanage(coeff)
dn4000_model, _ = self.get_dn4000(specwave, bestfit, redshift=redshift, rest=False)
dn4000, dn4000_ivar = self.get_dn4000(specwave, specflux, specivar, redshift=redshift, rest=False)
log.info('Spectroscopic DN(4000)={:.3f}, Age={:.2f} Gyr'.format(dn4000, meanage))
# Do a quick median-smoothing of the stellar continuum-subtracted
# residuals, to help with the emission-line fitting.
if False:
log.warning('Skipping smoothing residuals!')
_smooth_continuum = np.zeros_like(bestfit)
else:
_residuals = specflux - bestfit
residuals = [_residuals[np.sum(npixpercam[:icam+1]):np.sum(npixpercam[:icam+2])] for icam in np.arange(len(data['cameras']))]
_smooth_continuum = self.smooth_residuals(
residuals, data['wave'], data['ivar'],
data['linemask'], data['linepix'], data['contpix'])
#_smooth_continuum = self.smooth_residuals(
# bestfit, specwave, specflux, np.hstack(data['ivar']),
# np.hstack(data['linemask']))
#smooth_continuum = self.smooth_residuals(
# bestfit, data['coadd_wave'], data['coadd_flux'],
# data['coadd_ivar'], data['coadd_linemask'],
# npixpercam)
# Unpack the continuum into individual cameras.
continuummodel = []
smooth_continuum = []
for icam in np.arange(len(data['cameras'])): # iterate over cameras
ipix = np.sum(npixpercam[:icam+1])
jpix = np.sum(npixpercam[:icam+2])
continuummodel.append(bestfit[ipix:jpix])
smooth_continuum.append(_smooth_continuum[ipix:jpix])
## Like above, but with per-camera smoothing.
#smooth_continuum = self.smooth_residuals(
# continuummodel, data['wave'], data['flux'],
# data['ivar'], data['linemask'], percamera=False)
if False:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2, 1)
for icam in np.arange(len(data['cameras'])): # iterate over cameras
resid = data['flux'][icam]-continuummodel[icam]
ax[0].plot(data['wave'][icam], resid)
ax[1].plot(data['wave'][icam], resid-smooth_continuum[icam])
for icam in np.arange(len(data['cameras'])): # iterate over cameras
resid = data['flux'][icam]-continuummodel[icam]
pix_emlines = np.logical_not(data['linemask'][icam]) # affected by line = True
ax[0].scatter(data['wave'][icam][pix_emlines], resid[pix_emlines], s=30, color='red')
ax[0].plot(data['wave'][icam], smooth_continuum[icam], color='k', alpha=0.7, lw=2)
plt.savefig('junk.png')
# Pack it in and return.
result['CONTINUUM_COEFF'][0][0:nage] = coeff
result['CONTINUUM_CHI2'][0] = chi2min
result['CONTINUUM_AV'][0] = AVbest
result['CONTINUUM_AV_IVAR'][0] = AVivar
result['CONTINUUM_VDISP'][0] = vdispbest
result['CONTINUUM_VDISP_IVAR'][0] = vdispivar
result['CONTINUUM_AGE'] = meanage
result['DN4000'][0] = dn4000
result['DN4000_IVAR'][0] = dn4000_ivar
result['DN4000_MODEL'][0] = dn4000_model
for icam, cam in enumerate(data['cameras']):
nonzero = continuummodel[icam] != 0
#nonzero = np.abs(continuummodel[icam]) > 1e-5
if np.sum(nonzero) > 0:
corr = np.median(smooth_continuum[icam][nonzero] / continuummodel[icam][nonzero])
result['CONTINUUM_SMOOTHCORR_{}'.format(cam.upper())] = corr * 100 # [%]
log.info('Smooth continuum correction: b={:.3f}%, r={:.3f}%, z={:.3f}%'.format(
result['CONTINUUM_SMOOTHCORR_B'][0], result['CONTINUUM_SMOOTHCORR_R'][0],
result['CONTINUUM_SMOOTHCORR_Z'][0]))
return result, continuummodel, smooth_continuum
def qa_fastphot(self, data, fastphot, metadata, coadd_type='healpix',
outdir=None, outprefix=None):
"""QA of the best-fitting continuum.
"""
import matplotlib.pyplot as plt
from matplotlib import colors
import matplotlib.ticker as ticker
import seaborn as sns
from fastspecfit.util import ivar2var
sns.set(context='talk', style='ticks', font_scale=1.2)#, rc=rc)
col1 = [colors.to_hex(col) for col in ['skyblue', 'darkseagreen', 'tomato']]
col2 = [colors.to_hex(col) for col in ['navy', 'forestgreen', 'firebrick']]
ymin, ymax = 1e6, -1e6
redshift = metadata['Z']
if metadata['PHOTSYS'] == 'S':
filters = self.decam
allfilters = self.decamwise
else:
filters = self.bassmzls
allfilters = self.bassmzlswise
if outdir is None:
outdir = '.'
if outprefix is None:
outprefix = 'fastphot'
if coadd_type == 'healpix':
title = 'Survey/Program/HealPix: {}/{}/{}, TargetID: {}'.format(
metadata['SURVEY'], metadata['FAPRGRM'], metadata['HPXPIXEL'], metadata['TARGETID'])
pngfile = os.path.join(outdir, '{}-{}-{}-{}-{}.png'.format(
outprefix, metadata['SURVEY'], metadata['FAPRGRM'], metadata['HPXPIXEL'], metadata['TARGETID']))
elif coadd_type == 'cumulative':
title = 'Tile/thruNight: {}/{}, TargetID/Fiber: {}/{}'.format(
metadata['TILEID'], metadata['THRUNIGHT'], metadata['TARGETID'], metadata['FIBER'])
pngfile = os.path.join(outdir, '{}-{}-{}-{}.png'.format(
outprefix, metadata['TILEID'], coadd_type, metadata['TARGETID']))
elif coadd_type == 'pernight':
title = 'Tile/Night: {}/{}, TargetID/Fiber: {}/{}'.format(
metadata['TILEID'], metadata['NIGHT'], metadata['TARGETID'],
metadata['FIBER'])
pngfile = os.path.join(outdir, '{}-{}-{}-{}.png'.format(
outprefix, metadata['TILEID'], metadata['NIGHT'], metadata['TARGETID']))
elif coadd_type == 'perexp':
title = 'Tile/Night/Expid: {}/{}/{}, TargetID/Fiber: {}/{}'.format(
metadata['TILEID'], metadata['NIGHT'], metadata['EXPID'],
metadata['TARGETID'], metadata['FIBER'])
pngfile = os.path.join(outdir, '{}-{}-{}-{}-{}.png'.format(
outprefix, metadata['TILEID'], metadata['NIGHT'],
metadata['EXPID'], metadata['TARGETID']))
else:
pass
# rebuild the best-fitting photometric model fit
continuum_phot, _ = self.SSP2data(self.sspflux, self.sspwave, redshift=redshift,
AV=fastphot['CONTINUUM_AV'],
coeff=fastphot['CONTINUUM_COEFF'] * self.massnorm,
synthphot=False)
continuum_wave_phot = self.sspwave * (1 + redshift)
wavemin, wavemax = 0.2, 6.0
indx = np.where((continuum_wave_phot/1e4 > wavemin) * (continuum_wave_phot/1e4 < wavemax))[0]
phot = self.parse_photometry(self.bands,
maggies=np.array([metadata['FLUX_{}'.format(band.upper())] for band in self.bands]),
ivarmaggies=np.array([metadata['FLUX_IVAR_{}'.format(band.upper())] for band in self.bands]),
lambda_eff=allfilters.effective_wavelengths.value)
fiberphot = self.parse_photometry(self.fiber_bands,
maggies=np.array([metadata['FIBERTOTFLUX_{}'.format(band.upper())] for band in self.fiber_bands]),
lambda_eff=filters.effective_wavelengths.value)
#fibertotphot = self.parse_photometry(self.fiber_bands,
# maggies=np.array([metadata['FIBERTOTFLUX_{}'.format(band.upper())] for band in self.fiber_bands]),
# lambda_eff=filters.effective_wavelengths.value)
#if specfit:
# synthphot = self.parse_photometry(self.synth_bands,
# maggies=np.array([specfit['FLUX_SYNTH_{}'.format(band.upper())] for band in self.synth_bands]),
# lambda_eff=filters.effective_wavelengths.value)
# synthmodelphot = self.parse_photometry(self.synth_bands,
# maggies=np.array([specfit['FLUX_SYNTH_MODEL_{}'.format(band.upper())] for band in self.synth_bands]),
# lambda_eff=filters.effective_wavelengths.value)
#else:
# synthphot, synthmodelphot = None, None
fig, ax = plt.subplots(figsize=(12, 8))
if np.any(continuum_phot <= 0):
log.warning('Best-fitting photometric continuum is all zeros or negative!')
continuum_phot_abmag = continuum_phot*0 + np.median(fiberphot['abmag'])
else:
factor = 10**(0.4 * 48.6) * continuum_wave_phot**2 / (C_LIGHT * 1e13) / self.fluxnorm / self.massnorm # [erg/s/cm2/A --> maggies]
continuum_phot_abmag = -2.5*np.log10(continuum_phot * factor)
ax.plot(continuum_wave_phot[indx] / 1e4, continuum_phot_abmag[indx], color='gray', zorder=1)
# we have to set the limits *before* we call errorbar, below!
dm = 0.75
good = phot['abmag_ivar'] > 0
if np.sum(good) > 0:
ymin = np.max((np.nanmax(phot['abmag'][good]), np.nanmax(continuum_phot_abmag[indx]))) + dm
ymax = np.min((np.nanmin(phot['abmag'][good]),
|
np.nanmin(continuum_phot_abmag[indx])
|
numpy.nanmin
|
"""
Module for calculating metrics from CO2, usually as a baseline to compare other gases.
Author: <NAME> (UK)
Adapted by <NAME>
"""
import numpy as np
from fair.constants import molwt
from fair.constants.general import M_ATMOS
from fair.forcing.ghg import meinshausen
from fair.defaults.thermal import q, d
def ch4_analytical(H, co2=409.85, ch4=1866.3275, n2o=332.091, ch4_ra=-0.14, f_ch4_o3=0.29, f_ch4_h2o=-0.1, d=d, q=q, alpha_ch4=11.8):
re = meinshausen(np.array([co2, ch4+1, n2o]),
|
np.array([co2, ch4, n2o])
|
numpy.array
|
"""Run chemical evolution model."""
from __future__ import print_function, division, absolute_import
import os
from os.path import join
import copy
import traceback
import time
import numpy as np
import pandas as pd
import utils
def integrate_power_law(exponent, bins=None):
"""Integrate a power law distribution.
Args:
exponent (float): power law exponent.
bins (array): stellar mass bins. Defaults to None.
"""
if exponent == 0.:
integral = bins[1:] - bins[:-1]
elif exponent == -1.:
integral = np.log(bins[1:]) - np.log(bins[:-1])
else:
integral = (1. / exponent) * (bins[1:]**(exponent)
- bins[:-1]**(exponent))
return integral
def integrate_multi_power_law(bins, exponents, breaks, mass_bins,
norm_factor):
"""Integrate over each section of multi-slope power law distribution.
Args:
bins (array): stellar mass bins.
exponents (array): slope of power law.
breaks (array): stellar masses of breaks in multi-slope power law.
norm_factor (array): normalization factor of integrals.
"""
if check_multi_slope_compatibility(exponents, breaks, mass_bins):
integral = np.zeros(len(bins) - 1)
for i in range(len(exponents)):
if i == 0:
if len(breaks) > 0:
ind = np.where(np.around(bins, decimals=5) <= breaks[0])[0]
else:
ind = np.arange(len(bins), dtype=int)
elif i != len(exponents) - 1:
ind = np.where((bins >= breaks[i - 1])
& (bins <= breaks[i]))[0]
else:
ind = np.where(bins >= breaks[-1])[0]
ind_int = ind[:-1]
integral[ind_int] = (integrate_power_law(exponents[i],
bins[ind]) *
norm_factor[i])
return integral
def check_multi_slope_compatibility(exponents, breaks, mass_bins):
"""Check if the parameters of multi-slope power law are allowed.
Args:
exponents (array): Power law exponents.
breaks (array): Stellar masses of breaks in multi-slope power law.
mass_bins (array): Stellar mass bins.
Returns:
bool
"""
for b in breaks:
if np.around(b, decimals=5) not in \
np.around(mass_bins, decimals=5):
raise ValueError('Breaks in power law IMF must be located '
+ 'at the edge of a mass bin.')
if (len(exponents) > 1) and (len(exponents) - len(breaks) != 1):
raise ValueError('Number of power law IMF slopes must be exactly '
+ 'ONE greater than the number of breaks in the '
+ 'power law.')
return True
def lifetime_int(m):
"""Compute the lifetime of an intermediate mass star (M=0.6--6.6 Msun).
Args:
m (array): stellar mass.
Returns:
array: stellar lifetimes.
"""
return 10.**((1.338 - np.sqrt(1.790 - 0.2232 * (7.764 - np.log10(m))))
/ 0.1116 - 9.) * 1000.
def lifetime_high(m):
"""Compute the lifetime of a high mass star (M > 6.6 Msun)
Args:
m (array): stellar mass.
Returns:
array: stellar lifetimes.
"""
return (1.2 * m**(-1.85) + 0.003) * 1000.
def invert_lifetime(t):
"""Compute stellar masses given lifetimes (valid for <50 Gyr).
Args:
t (array): lifetime in Myr.
Returns:
array: stellar masses.
"""
m = np.zeros(len(t))
ind_int = np.where((t >= 40.) & (t < 50000))
ind_high = np.where((t > 1.) & (t < 40.))
m[ind_int] = invert_lifetime_int(t[ind_int])
m[ind_high] = invert_lifetime_high(t[ind_high])
return m
def invert_lifetime_int(t):
"""Compute stellar masses given lifetimes (valid for 40 Myr-50 Gyr).
Args:
t (array): lifetime in Myr.
Returns:
array: stellar masses.
"""
return 10.**(7.764 - (1.790 - (0.3336 - 0.1116
* np.log10(t / 1000.))**2.) / 0.2232)
def invert_lifetime_high(t):
"""Compute stellar masses given lifetimes (valid for <40 Myr).
Args:
t (array): lifetime in Myr.
Returns:
array: stellar masses.
"""
return (((t / 1000.) - 0.003) / 1.2)**(-1. / 1.85)
def random_poisson(x):
'''Draw a number from a Poisson distribution. Used for determining
the number of actual stars to form given an expected value.
np.random.poisson cannot handle numbers larger than 2147483647
(~2.14e9) because it uses the C long type. For numbers larger than
this value, round the expected (statistical) value to the nearest
integer. Since 2e9 is a large number, the Poisson fluctuations would
have been relatively small anyway.
This function is intended to replace this line of code:
self.Nstar[i] = np.random.poisson(self.Nstar_stat[i])
'''
try:
y = np.random.poisson(x)
except ValueError:
y = np.zeros(len(x), dtype=np.int64)
for i, item in enumerate(x):
try:
y[i] = np.random.poisson(item)
except ValueError:
y[i] = np.round(item)
return y
class ChemEvol:
"""Run chemical evolution model."""
def __init__(self, mass_bins, radius=10., time_tot=12000., dt=30.,
imf='kroupa', imf_alpha=None, imf_mass_breaks=None,
sim_id=None):
"""Initialize model.
Args:
mass_bins (array): Stellar mass bins [Msun].
radius (float): Radius of zone [kpc]. Only invoked if N_kslaw not
equal to 1. Defaults to 10.
time_tot (float): length of simulation [Myr]. Defaults to 12000.
dt (float): length of time step [Myr]. Defaults to 30.
imf (str): Stellar initial mass function. Defaults to 'kroupa'.
imf_alpha (array): Power law slopes of user-defined stellar
initial mass function. Must set imf to 'power_law'. Defaults
to None.
imf_mass_breaks (array): Mass breaks between different power law
slopes of user-defined stellar initial mass function. Must set
imf to 'power_law'. Defaults to None.
sim_id (str): simulation ID number.
"""
path_flexce = join(os.path.abspath(os.path.dirname(__file__)), '')
self.path_yldgen = join(path_flexce, 'data', 'yields', 'general', '')
self.sim_id = 'box' + sim_id
self.mass_bins = mass_bins
self.n_bins = len(self.mass_bins) - 1
self.n_bins_high = len(np.where(self.mass_bins >= 8)[0]) - 1
self.n_bins_low = len(np.where(self.mass_bins < 8)[0])
self.radius = radius # kpc
self.area = self.radius**2. * np.pi * 1e6 # pc^2
self.timesteps(time_tot, dt)
self.imf = imf
self.select_imf(imf, imf_alpha, imf_mass_breaks)
self.stellar_lifetimes()
self.frac_evolve()
def timesteps(self, time_tot, dt):
"""Set time steps.
Args:
time_tot (float): Length of simulation in Myr.
dt (float): Size of time step in Myr.
"""
self.time_tot = time_tot
self.dt = dt
self.t = np.arange(0., self.time_tot + 1., self.dt)
self.n_steps = int(self.time_tot / self.dt + 1.)
def select_imf(self, imf, imf_alpha, imf_mass_breaks):
"""Choose IMF or input user-defined power-law IMF.
Args:
imf (str): Stellar initial mass function to use.
imf_alpha (array): Power law slopes of user-defined stellar
initial mass function. Must set imf to 'power_law'.
imf_mass_breaks (array): Mass breaks between different power law
slopes of user-defined stellar initial mass function. Must set
imf to 'power_law'.
"""
if imf == 'power_law':
self.powerlaw_imf(imf_alpha, imf_mass_breaks)
elif imf == 'salpeter':
self.salpeter()
elif imf == 'kroupa':
self.kroupa()
elif imf == 'bell':
self.bell()
else:
raise ValueError('Use valid IMF type: "kroupa", "salpeter",'
+ '"bell", or "power_law".')
self.mass_per_bin()
def powerlaw_imf(self, alpha, mass_breaks):
"""Single or multiple slope power law IMF.
Args:
alpha (array): Power law slopes of user-defined stellar initial
mass function.
mass_breaks (array): Mass breaks between different power law
slopes of user-defined stellar initial mass function.
"""
self.alpha = np.atleast_1d(np.array(alpha))
if mass_breaks is None:
mass_breaks = []
self.mass_breaks = np.atleast_1d(np.array(mass_breaks))
self.imf_setup()
def salpeter(self):
"""Set slope and mass breaks of Salpeter (1955) IMF."""
self.alpha = np.array([2.35])
self.mass_breaks = np.array([])
self.imf_setup()
def kroupa(self):
"""Set slope and mass breaks of Kroupa (2001) IMF."""
self.alpha = np.array([1.3, 2.3])
self.mass_breaks = np.array([0.5])
self.imf_setup()
def bell(self):
"""Set slope and mass breaks of Bell IMF.
See Bell & <NAME> (2001) (see Figure 4) and Bell et al. (2003) IMF.
"""
self.alpha = np.array([1., 2.35])
self.mass_breaks = np.array([0.6])
self.imf_setup()
def imf_setup(self):
"""Create reduced exponentials for IMF integration."""
self.alpha1 = self.alpha - 1.
self.alpha2 = self.alpha - 2.
def mass_per_bin(self):
"""Calculate mass fraction that goes into each stellar mass bin."""
# Normalize phi(m) for continuity between different IMF slopes
try:
norm_factor = self.normalize_imf()
except IndexError:
print(traceback.print_exc())
raise ValueError('Number of power law IMF slopes must be '
+ 'exactly ONE greater than the number of breaks '
+ 'in the power law.')
self.mass_int = integrate_multi_power_law(self.mass_bins,
self.alpha2 * -1.,
self.mass_breaks,
self.mass_bins,
norm_factor)
self.num_int = integrate_multi_power_law(self.mass_bins,
self.alpha1 * -1.,
self.mass_breaks,
self.mass_bins,
norm_factor)
self.mass_ave = self.mass_int / self.num_int
a = 1. / np.sum(self.mass_int)
self.mass_frac = a * self.mass_int
# as a function of timestep
self.mass_bins2 = invert_lifetime(self.t)
self.mass_bins2[0] = self.mass_bins[-1]
self.mass_int2 = integrate_multi_power_law(self.mass_bins2,
self.alpha2 * -1.,
self.mass_breaks,
self.mass_bins,
norm_factor * -1.)
self.num_int2 = integrate_multi_power_law(self.mass_bins2,
self.alpha1 * -1.,
self.mass_breaks,
self.mass_bins,
norm_factor * -1.)
self.mass_ave2 = self.mass_int2 / self.num_int2
self.mass_frac2 = a * self.mass_int2
def normalize_imf(self):
"""Normalize stellar initial mass function."""
norm_factor = np.ones(len(self.alpha))
if len(self.mass_breaks) > 0:
for i in range(1, len(self.alpha)):
norm_factor[i] = self.mass_breaks[i - 1]**(-self.alpha[i - 1]) / \
self.mass_breaks[i - 1]**(-self.alpha[i])
return norm_factor
def stellar_lifetimes(self):
"""Stellar lifetimes adopted from Padovani & Matteucci (1993).
See Romano et al. (2005) for motivation.
"""
self.tau_m = 160000. * np.ones(self.n_bins) # [Myr]
ind_mint = np.where((self.mass_ave > 0.6) & (self.mass_ave <= 6.6))[0]
ind_mhigh = np.where(self.mass_ave > 6.6)[0]
self.tau_m[ind_mint] = lifetime_int(self.mass_ave[ind_mint])
self.tau_m[ind_mhigh] = lifetime_high(self.mass_ave[ind_mhigh])
def frac_evolve(self):
"""Compute fraction of stars born in a given timestep will evolve.
Figure out which mass bins will have at least some stars evolving in
a given timestep (ind_ev) and what fraction of the stars in that mass
bin will evolve (frac_ev).
"""
self.ind_ev = []
self.frac_ev = []
# lowest mass star that would evolve in a timestep
m_ev = invert_lifetime(self.t)
m_ev[0] = self.mass_bins[-1]
# integrate the IMF in each mass bin
mass_int_tmp = np.zeros(self.n_bins)
norm_factor = self.normalize_imf()
for j in range(self.n_bins):
mbin = self.mass_bins[j:j + 2]
mass_int_tmp[j] = integrate_multi_power_law(mbin,
self.alpha2 * -1,
self.mass_breaks,
self.mass_bins,
norm_factor)
# figure out which mass bins will have at least some stars evolving in
# a given timestep (ind_ev) and what fraction of the stars in that mass
# bin will evolve (frac_ev)
for i in range(self.n_steps - 1):
indtmp = []
fractmp = []
for j in range(self.n_bins):
# mass bin that spans the top end of the mass range that will
# evolve in this timestep
if (m_ev[i] >= self.mass_bins[j]) and \
(m_ev[i] < self.mass_bins[j + 1]):
indtmp.append(j)
mlow_tmp = np.maximum(self.mass_bins[j], m_ev[i + 1])
mbin_tmp = np.array([mlow_tmp, m_ev[i]])
mass_int2_tmp = integrate_multi_power_law(mbin_tmp,
self.alpha2 * -1,
self.mass_breaks,
self.mass_bins,
norm_factor)
fractmp.append(mass_int2_tmp / mass_int_tmp[j])
# mass bins fully contained within the mass range that will
# evolve in this timestep
elif ((self.mass_bins[j] > m_ev[i + 1])
and (self.mass_bins[j] < m_ev[i])):
indtmp.append(j)
mbin_tmp = self.mass_bins[j:j + 2]
mass_int2_tmp = integrate_multi_power_law(mbin_tmp,
self.alpha2 * -1,
self.mass_breaks,
self.mass_bins,
norm_factor)
fractmp.append(mass_int2_tmp / mass_int_tmp[j])
# mass bin that spans bottom top end of the mass range that
# will evolve in this timestep
elif ((m_ev[i + 1] > self.mass_bins[j])
and (m_ev[i + 1] < self.mass_bins[j + 1])):
indtmp.append(j)
mbin_tmp = np.array([m_ev[i + 1], self.mass_bins[j + 1]])
mass_int2_tmp = integrate_multi_power_law(mbin_tmp,
self.alpha2 * -1,
self.mass_breaks,
self.mass_bins,
norm_factor)
fractmp.append(mass_int2_tmp / mass_int_tmp[j])
indtmp = np.array(indtmp)
self.ind_ev.append(indtmp)
fractmp = np.array(fractmp)[:, 0]
self.frac_ev.append(fractmp)
self.frac_ev_tot = np.zeros(self.n_bins)
for j in range(self.n_steps - 1):
self.frac_ev_tot[self.ind_ev[j]] += self.frac_ev[j]
def snia_dtd(self, func='exponential', kwargs=None):
"""Set SNIa delay time distribution.
Args:
func (str): functional form of DTD. Defaults to 'exponential'.
kwargs (dict): keyword arguments to pass to individual DTD
functions. Defaults to None.
"""
kwargs = utils.none_to_empty_dict(kwargs)
self.snia_param = dict(func=func, k=kwargs)
self.snia_dtd_func = func
try:
if func == 'exponential':
self.snia_dtd_exp(**kwargs)
elif func == 'power_law':
self.snia_dtd_powerlaw(**kwargs)
elif func == 'prompt_delayed':
self.snia_dtd_prompt_delayed(**kwargs)
elif func == 'single_degenerate':
self.snia_dtd_single_degenerate(**kwargs)
except TypeError:
print(traceback.print_exc())
print('\nValid keywords:\n')
print('exponential: timescale, min_snia_time, snia_fraction\n')
print('power_law: min_snia_time, nia_per_mstar, slope\n')
print('prompt_delayed: A, B, min_snia_time\n')
print('single_degenerate: no keywords\n')
def snia_dtd_exp(self, min_snia_time=150., timescale=1500.,
snia_fraction=0.078):
"""Implement exponential SNIa delay time distribution.
If we adopt the SNIa prescription of <NAME> (2009a)
and a Salpeter IMF, 7.8% of the white dwarf mass formed form stars with
initial mass between 3.2-8.0 Msun in a stellar population explodes as a
SNIa (once we adjust to a mass range between 3.2-8.0 Msun instead of
7.5% of the white dwarf mass that forms from stars of initial mass
between 3.2-8.5 Msun). For a Kroupa (2001) IMF, 5.5% of the white
dwarf mass will explode as SNIa.
Args:
min_snia_time (float): Minimum delay time for SNIa in Myr.
Defaults to 150.
timescale (float): exponential decay timescale of delay time
distribution in Myr. Defaults to 1500.
snia_fraction (float): fraction of white dwarf mass formed from
stars with initial mass M=3.2-8.0 Msun that will explode in
SNIa (see extended description). Defaults to 0.078.
"""
self.snia_fraction = snia_fraction
self.min_snia_time = min_snia_time
self.snia_timescale = timescale
self.dMwd = self.dt / self.snia_timescale
def snia_dtd_powerlaw(self, min_snia_time=40., nia_per_mstar=2.2e-3,
slope=-1.):
"""Implement power-law SNIa delay time distribution.
Args:
min_snia_time (float): Minimum delay time for SNIa in Myr.
Defaults to 150.
nia_per_mstar (float): number of SNIa per stellar mass formed
that explode within 10 Gyr. Defaults to 2.2e-3.
slope (float): power law slope. Defaults to -1.
"""
self.min_snia_time = min_snia_time
ind_min = np.where(self.t >= min_snia_time)
ind10000 = np.where(self.t <= 10000.)
ria = np.zeros(len(self.t))
ria[ind_min] = self.t[ind_min]**slope
norm = nia_per_mstar / ria[ind10000].sum()
self.ria = ria * norm
def snia_dtd_prompt_delayed(self, A=4.4e-8, B=2.6e3, min_snia_time=40.):
"""Implement prompt plus delayed SNIa delay time distribution.
Args:
A (float): coefficient connected to stellar mass of galaxy
(see extended description). Defaults to 4.4e-8.
B (float): Defaults to 2.6e3.
min_snia_time (float): Minimum delay time for SNIa in Myr.
Defaults to 150.
Scannapieco & Bildstein (2005) prompt + delayed components to SNIa
rate Equation 1\:
N_Ia / (100 yr)^-1 = A [Mstar / 10^10 Msun] +
B [SFR / (10^10 Msun Gyr^-1)]
A = 4.4e-2 (errors: +1.6e-2 -1.4e-2)
B = 2.6 (errors: +/-1.1)
In units useful for flexCE\:
N_Ia per timestep = {4.4e-8 [Mstar / Msun] +
2.6e3 [SFR / (Msun yr^-1)]} * (len of timestep in Myr)
see also Mannucci et al. (2005)
"""
self.prob_delay = A
self.prob_prompt = B
self.min_snia_time = min_snia_time
return
def snia_dtd_single_degenerate(self, A=5e-4, gam=2., eps=1.,
normalize=False, nia_per_mstar=1.54e-3):
'''SNIa DTD for the single degenerate scenario (SDS).
Solve for the SNIa rate (ria) according to Greggio (2005). The minimum
primary mass is either (1) the mass of the secondary, (2) the mass
required to form a carbon-oxygen white dwarf (2 Msun), or (3) the mass
needed such that the WD mass plus the envelope of the secondary
(accreted at an efficiency [eps]) equals the Chandrasekhar limit (1.4
Msun).
'''
t2 = np.arange(29., self.t[-1] + 1., 1.) # time in 1 Myr intervals
m2 = invert_lifetime(t2)
# mass_int2_tmp = self.integrate_multi_power_law(
# m2, self.alpha2 * -1, self.mass_breaks, self.mass_bins,
# self.normalize_imf() * -1)
# num_int2_tmp = self.integrate_multi_power_law(
# m2, self.alpha1 * -1, self.mass_breaks, self.mass_bins,
# self.normalize_imf() * -1)
# mass_ave2 = mass_int2_tmp / num_int2_tmp
# a = 1. / self.mass_int.sum()
# a2 = 1. / np.sum(mass_int2_tmp)
# calculate the envelope mass of the secondary
m2ca = 0.3 * np.ones(len(t2))
m2cb = 0.3 + 0.1 * (m2 - 2.)
m2cc = 0.5 + 0.15 * (m2 - 4.)
m2c = np.max((m2ca, m2cb, m2cc), axis=0) # secondary core mass
m2e = m2 - m2c # secondary envelope mass
mwdn = 1.4 - (eps * m2e) # minimum WD mass
m1na = 2. * np.ones(len(t2))
m1nb = 2. + 10. * (mwdn - 0.6)
# min prim. mass set by min CO WD mass
m1n = np.max((m1na, m1nb), axis=0) # min prim. mass
m1low1 = invert_lifetime(t2)
m1low = np.max((m1low1, m1n), axis=0) # min primary mass
m1up = 8.
k_alpha = self.num_int.sum() / self.mass_int.sum()
nm2 = np.zeros(len(m1low))
for i in range(len(self.alpha)):
if i == 0:
if len(self.mass_breaks) > 0:
ind = np.where(np.around(m1low, decimals=5)
<= self.mass_breaks[0])[0]
else:
ind = np.arange(len(m1low), dtype=int)
ind_int = ind[:-1]
elif i != len(self.alpha) - 1:
ind = np.where((m1low >= self.mass_breaks[i - 1])
& (m1low <= self.mass_breaks[i]))[0]
ind_int = ind[:-1]
else:
ind = np.where(m1low >= self.mass_breaks[-1])[0]
ind_int = ind
nm2[ind_int] = ((m2[ind_int]**-self.alpha[i])
* ((m2[ind_int] / m1low[ind_int])**(self.alpha[i] + gam) -
(m2[ind_int] / m1up)**(self.alpha[i] + gam)))
# from Greggio (2005): t**-1.44 approximates log(dm/dt) = log(m) -
# log(t), which works for either the Padovani & Matteucci (1993) or the
# Greggio (2005)/Girardi et al. (2000) stellar lifetimes
dm2dt = 10.**4.28 * t2**1.44
fia2 = nm2 / dm2dt
fia = fia2 / fia2.sum()
ria1 = k_alpha * A * fia
ind_tbin = np.where(t2 % self.dt == 0.)[0]
self.ria = np.zeros(self.n_steps - 1)
self.ria[0] = ria1[:ind_tbin[0]].sum()
for i in range(1, self.n_steps - 1):
self.ria[i] = ria1[ind_tbin[i - 1]:ind_tbin[i]].sum()
if normalize:
ind10000 = np.where(self.t <= 10000.)
self.ria = self.ria / self.ria[ind10000].sum() * nia_per_mstar
def snia_ev(self, tstep, snia_mass, mstar_tot, sfr):
'''Calculate the expected number of SNIa of a stellar population from
a previous timestep. The delay time distribution can be\:
1. exponential
2. empirical t^-1 power law
3. empirical two component model with a prompt [~SFR] component and
a delayed component [~Mstar]).
4. theoretical DTD based on the single degenerate scenario
Mannucci et al. (2005) find that the Rate SNIa / Rate SNII =
0.35 +/- 0.08 in young stellar populations. Maoz et al. (2011) find
that the time-integrated Rate SNII / Rate SNIa from a stellar
population is about 5:1.
snia_mass: mass of an individual SNIa
min_snia_time: the minimum delay time from the birth of a stellar
population
'''
if self.snia_dtd_func == 'exponential':
ind_min_t = (tstep
- np.ceil(self.min_snia_time / self.dt).astype(int))
if ind_min_t > 0:
Nia_stat = np.sum(self.Mwd_Ia[:ind_min_t + 1] * self.dMwd
/ snia_mass)
self.Mwd_Ia[:ind_min_t + 1] *= 1. - self.dMwd
else:
Nia_stat = 0.
elif self.snia_dtd_func == 'power_law':
Nia_stat = np.sum(self.ria[:tstep]
* np.sum(self.mstar[1:tstep + 1], axis=1)[::-1])
elif self.snia_dtd_func == 'prompt_delayed':
ind = tstep - np.ceil(self.min_snia_time / self.dt)
if ind < 0.:
Nia_prompt = 0.
else:
Nia_prompt = sfr[ind] * self.prob_prompt
Nia_stat = (Nia_prompt + (mstar_tot * self.prob_delay)) * self.dt
elif self.snia_dtd_func == 'single_degenerate':
Nia_stat = np.sum(self.ria[:tstep]
* np.sum(self.mstar[1:tstep + 1], axis=1)[::-1])
return Nia_stat
def inflow_rx(self, func='double_exp', mgas_init=0., k=None,
inflow_rate=None, inflow_ab_pattern='bbns',
inflow_metallicity=1.):
'''
inflow_rate [=] Msun/Myr
func: double_exp, exp, te-t, constant_mgas, or custom
double_exp: M1, b1, M2, b2 (see Eq. 6 in Schoenrich & Binney 2009) with
b1 & b2 in Myr
exp: M1, b1 with b1 in Myr
te-t: M1, b1 with b1 in Myr
constant_mgas: inflow_rate will be dynamically defined in evolve_box
custom: user-defined inflow rate
inflow_comp: alpha enhanced
'''
self.inflow_param = dict(
mgas_init=mgas_init, func=func, k=k, ab_pattern=inflow_ab_pattern,
metallicity=inflow_metallicity)
self.mgas_init = mgas_init
self.inflow_func = func
if func == 'double_exp':
self.inflow_rate = ((k['M1'] / k['b1']) * np.exp(-self.t / k['b1'])
+ (k['M2'] / k['b2']) * np.exp(-self.t / k['b2']))
elif func == 'exp':
self.inflow_rate = (k['M1'] / k['b1']) * np.exp(-self.t / k['b1'])
elif func == 'te-t':
self.inflow_rate = ((k['M1'] / k['b1']) * (self.t / k['b1'])
* np.exp(-self.t / k['b1']))
elif func == 'constant_mgas':
self.inflow_rate = np.zeros(self.n_steps)
elif func == 'custom':
self.inflow_rate = inflow_rate
else:
print('\nValid inflow functions: "double_exp", "exp", "te-t",'
+ ' "constant_mgas", and "custom\n')
self.inflow_ab_pattern = inflow_ab_pattern
self.inflow_metallicity = inflow_metallicity
def inflow_composition(self, yields, tstep):
'''Compute the mass fraction of each element in the inflowing gas.
"bbns": Big Bang Nucleosynthesis abundance pattern
"alpha_enhanced": abundance pattern of a simulation before SNIa
"scaled_solar": solar abundance pattern
"recycled": abundance pattern of last timestep
scaling factor is relative to solar (i.e., solar = 1)
Set hydrogen mass fraction to 0.75 and helium mass fraction to 0.25 -
the mass fraction of metals. You need a hydrogen to helium mass
fraction ratio of ~3 to avoid negative absolute yields of hydrogen. (I
had originally set things up to match the hydrogen/helium ratio of the
ISM but that ran away to negative hydrogen masses).
'''
scaling_factor = self.inflow_metallicity # relative to solar
ind_h = np.where(yields.sym == 'H1')
ind_he = np.where(yields.sym == 'He4')
ind_metal = np.where(yields.sym_mass > 4.)
if self.inflow_ab_pattern == 'bbns':
inflow = yields.bbmf
return inflow
elif self.inflow_ab_pattern == 'alpha_enhanced':
inftmp = pd.read_csv(self.path_yldgen + 'Z_0.1-Zsun_alpha_enhanced.txt',
skiprows=6, header=None)
inflow_init = np.array(inftmp).T
scaling_factor *= 10.
elif self.inflow_ab_pattern == 'scaled_solar':
inflow_init = copy.deepcopy(yields.solar_mfrac)
elif self.inflow_ab_pattern == 'recycled':
ind = tstep - 1
inflow_init = self.mgas_iso[ind] / self.mgas_iso[ind].sum()
scaling_factor = (0.02 * scaling_factor
/ inflow_init[ind_metal].sum())
else:
print('\nValid inflow compositions: "bbns", "alpha_enhanced",'
+ ' "scaled_solar", and "recycled"\n')
inflow = np.zeros(yields.n_sym)
inflow[ind_metal] = inflow_init[ind_metal] * scaling_factor
tmp = inflow.sum()
# Set H & He mass fraction to 0.75 & 0.25 - Z, respectively.
inflow[ind_h] = 0.75
inflow[ind_he] = 0.25 - tmp
return inflow
def outflow_rx(self, outflow_source='ism', eta_outflow=1.,
variable_eta=None, feject=0.15):
'''outflow_source = "ism" (ambient ISM is ejected in the wind; standard
Mdot_wind = eta * SFR treatment) or "stellar_ejecta" (the yields from
SNII, SNIa, and AGB stars makes up the wind; from Schoenrich & Binney
2009). '''
self.outflow_param = dict(outflow_source=outflow_source,
eta_outflow=eta_outflow,
variable_eta=variable_eta, feject=feject)
self.outflow_source = outflow_source
self.variable_eta = variable_eta
if outflow_source == 'ism':
if self.variable_eta is not None:
self.eta_outflow = self.variable_eta
else:
self.eta_outflow = eta_outflow
self.feject = 0.
elif outflow_source == 'stellar_ejecta':
self.feject = feject
self.eta_outflow = 0.
else:
print('\nValid outflow sources: "ism" and "stellar_ejecta"\n')
def outflow_calc(self, timestep, sfr, snii, agb, snia):
if self.outflow_source == 'ism':
if self.variable_eta is not None:
return self.eta_outflow[timestep] * sfr
else:
return self.eta_outflow * sfr
elif self.outflow_source == 'stellar_ejecta':
return self.feject * (snii + agb + snia)
else:
print('\nValid outflow sources: "ism" and "stellar_ejecta"\n')
def warmgasres_rx(self, mwarmgas_init=0., fdirect=0.01, tcool=1200.,
warmgas=True):
'''Parameters that control gas flow into and out of the warm gas
reservoir.
<NAME> (2009) fiducial values:
mwarmgas_init = 5e8 Msun
fdirect = 0.01 (feject=0.15 for R < 3.5 kpc and 0.04 for R > 3.5 kpc)
tcool = 1200 Myr
fwarm = 1 - fdirect - feject
'''
self.warmgas_on = warmgas
if warmgas:
filename = 'warmgas_abundance_pattern.txt'
tmp = pd.read_csv(self.path_yldgen + filename,
delim_whitespace=True, skiprows=10,
names=['el', 'ab'])
self.warmgas_ab_pattern =
|
np.array(tmp['ab'])
|
numpy.array
|
import os
import sys
import h5py
import torch
import numpy as np
import importlib
import random
import shutil
from PIL import Image
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(BASE_DIR, '../utils'))
from colors import colors
colors = np.array(colors, dtype=np.float32)
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from subprocess import call
def force_mkdir(folder):
if os.path.exists(folder):
shutil.rmtree(folder)
os.mkdir(folder)
def printout(flog, strout):
print(strout)
if flog is not None:
flog.write(strout + '\n')
def optimizer_to_device(optimizer, device):
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.to(device)
def get_model_module(model_version):
importlib.invalidate_caches()
return importlib.import_module('models.' + model_version)
def collate_feats(b):
return list(zip(*b))
def collate_feats_pass(b):
return b
def collate_feats_with_none(b):
b = filter (lambda x:x is not None, b)
return list(zip(*b))
def worker_init_fn(worker_id):
""" The function is designed for pytorch multi-process dataloader.
Note that we use the pytorch random generator to generate a base_seed.
Please try to be consistent.
References:
https://pytorch.org/docs/stable/notes/faq.html#dataloader-workers-random-seed
"""
base_seed = torch.IntTensor(1).random_().item()
#print(worker_id, base_seed)
np.random.seed(base_seed + worker_id)
def viz_mask(ids):
return colors[ids]
def draw_dot(img, xy):
out = np.array(img, dtype=np.uint8)
x, y = xy[0], xy[1]
neighbors = np.array([[0, 0, 0, 1, 1, 1, -1, -1, 1], \
[0, 1, -1, 0, 1, -1, 0, 1, -1]], dtype=np.int32)
for i in range(neighbors.shape[1]):
nx = x + neighbors[0, i]
ny = y + neighbors[1, i]
if nx >= 0 and nx < img.shape[0] and ny >= 0 and ny < img.shape[1]:
out[nx, ny, 0] = 0
out[nx, ny, 1] = 0
out[nx, ny, 2] = 255
return out
def print_true_false(d):
d = int(d)
if d > 0.5:
return 'True'
return 'False'
def img_resize(data):
data = np.array(data, dtype=np.float32)
mini, maxi = np.min(data), np.max(data)
data -= mini
data /= maxi - mini
data = np.array(Image.fromarray((data*255).astype(np.uint8)).resize((224, 224)), dtype=np.float32) / 255
data *= maxi - mini
data += mini
return data
def export_pts(out, v):
with open(out, 'w') as fout:
for i in range(v.shape[0]):
fout.write('%f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2]))
def export_label(out, l):
with open(out, 'w') as fout:
for i in range(l.shape[0]):
fout.write('%f\n' % (l[i]))
def export_pts_label(out, v, l):
with open(out, 'w') as fout:
for i in range(l.shape[0]):
fout.write('%f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], l[i]))
def render_pts_label_png(out, v, l):
export_pts(out+'.pts', v)
export_label(out+'.label', l)
export_pts_label(out+'.feats', v, l)
cmd = 'RenderShape %s.pts -f %s.feats %s.png 448 448 -v 1,0,0,-5,0,0,0,0,1 >> /dev/null' % (out, out, out)
call(cmd, shell=True)
def export_pts_color_obj(out, v, c):
with open(out+'.obj', 'w') as fout:
for i in range(v.shape[0]):
fout.write('v %f %f %f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], c[i, 0], c[i, 1], c[i, 2]))
def export_pts_color_pts(out, v, c):
with open(out+'.pts', 'w') as fout:
for i in range(v.shape[0]):
fout.write('%f %f %f %f %f %f\n' % (v[i, 0], v[i, 1], v[i, 2], c[i, 0], c[i, 1], c[i, 2]))
def load_checkpoint(models, model_names, dirname, epoch=None, optimizers=None, optimizer_names=None, strict=True):
if len(models) != len(model_names) or (optimizers is not None and len(optimizers) != len(optimizer_names)):
raise ValueError('Number of models, model names, or optimizers does not match.')
for model, model_name in zip(models, model_names):
filename = f'net_{model_name}.pth'
if epoch is not None:
filename = f'{epoch}_' + filename
model.load_state_dict(torch.load(os.path.join(dirname, filename)), strict=strict)
start_epoch = 0
if optimizers is not None:
filename = os.path.join(dirname, 'checkpt.pth')
if epoch is not None:
filename = f'{epoch}_' + filename
if os.path.exists(filename):
checkpt = torch.load(filename)
start_epoch = checkpt['epoch']
for opt, optimizer_name in zip(optimizers, optimizer_names):
opt.load_state_dict(checkpt[f'opt_{optimizer_name}'])
print(f'resuming from checkpoint {filename}')
else:
response = input(f'Checkpoint {filename} not found for resuming, refine saved models instead? (y/n) ')
if response != 'y':
sys.exit()
return start_epoch
def get_global_position_from_camera(camera, depth, x, y):
"""
This function is provided only to show how to convert camera observation to world space coordinates.
It can be removed if not needed.
camera: an camera agent
depth: the depth obsrevation
x, y: the horizontal, vertical index for a pixel, you would access the images by image[y, x]
"""
cm = camera.get_metadata()
proj, model = cm['projection_matrix'], cm['model_matrix']
print('proj:', proj)
print('model:', model)
w, h = cm['width'], cm['height']
# get 0 to 1 coordinate for (x, y) coordinates
xf, yf = (x + 0.5) / w, 1 - (y + 0.5) / h
# get 0 to 1 depth value at (x,y)
zf = depth[int(y), int(x)]
# get the -1 to 1 (x,y,z) coordinate
ndc = np.array([xf, yf, zf, 1]) * 2 - 1
# transform from image space to view space
v = np.linalg.inv(proj) @ ndc
v /= v[3]
# transform from view space to world space
v = model @ v
return v
def rot2so3(rotation):
assert rotation.shape == (3, 3)
if np.isclose(rotation.trace(), 3):
return np.zeros(3), 1
if np.isclose(rotation.trace(), -1):
raise RuntimeError
theta = np.arccos((rotation.trace() - 1) / 2)
omega = 1 / 2 /
|
np.sin(theta)
|
numpy.sin
|
import numpy as np
def gradient_descent(x, y):
m_curr = b_curr = 0
iterations = 1250
n = len(x)
learning_rate = 0.08
for i in range(iterations):
y_predicted = m_curr * x + b_curr
cost = (1 / n) * sum([val ** 2 for val in (y - y_predicted)])
md = -(2 / n) * sum(x * (y - y_predicted))
bd = -(2 / n) * sum(y - y_predicted)
m_curr = m_curr - learning_rate * md
b_curr = b_curr - learning_rate * bd
print("m {}, b {}, cost {} iteration {}".format(m_curr, b_curr, cost, i))
x1 =
|
np.array([1, 2, 3, 4, 5])
|
numpy.array
|
import os
import tqdm
import shutil
import argparse
import setproctitle
import pandas as pd
import numpy as np
from skimage import measure
from skimage.io import imsave
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from ast import literal_eval
import SimpleITK as sitk
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from utils.utils import get_metrics, draw_results
from dataloaders import make_data_loader
from models.attention_d2unet import AttD2UNet
from models.denseatt import DenseAtt
def get_args():
parser = argparse.ArgumentParser()
# general config
parser.add_argument('--gpu', default='1', type=str)
parser.add_argument('--ngpu', type=int, default=1)
# dataset config
parser.add_argument('--dataset', type=str, default='abus3d')
parser.add_argument('--fold', type=str, default='0')
parser.add_argument('--batch_size', type=int, default=1)
# network config
parser.add_argument('--arch', default='attention_d2unet', type=str, choices=('dense161', 'dense121', 'dense201', 'unet', 'resunet', 'sdnet', 'auto_attention'))
parser.add_argument('--in_channels', default=1, type=int)
parser.add_argument('--num_classes', default=2, type=int)
parser.add_argument('--num_init_features', default=64, type=int)
parser.add_argument('--growth_rate', default=48, type=int)
parser.add_argument('--bn_size', default=4, type=int)
parser.add_argument('--block_config', default='[6, 12, 24, 16]', type=str)
# resume and save
parser.add_argument('--resume', type=str)
parser.add_argument('--save', default=None, type=str) # save to resume path if None
parser.add_argument('--save_image', action='store_true')
args = parser.parse_args()
return args
def main():
# --- init args ---
args = get_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
# --- building network ---
if args.arch == 'attention_d2unet':
model = AttD2UNet()
elif args.arch == 'dense_att':
model = DenseAtt()
else:
raise(NotImplementedError('model {} not implement'.format(args.arch)))
model = model.cuda()
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_pre = checkpoint['best_pre']
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
# --- saving path ---
if 'best' in args.resume:
file_name = 'model_best_' + str(checkpoint['epoch'])
elif 'check' in args.resume:
file_name = 'checkpoint_{}_result'.format(checkpoint['epoch'])
if args.save is not None:
save_path = os.path.join(args.save, file_name)
csv_path = os.save
else:
save_path = os.path.join(os.path.dirname(args.resume), file_name)
csv_path = os.path.dirname(args.resume)
setproctitle.setproctitle('...')
if args.save_image:
# image path
args.save_image_path = save_path + '/image'
if os.path.exists(args.save_image_path):
shutil.rmtree(args.save_image_path)
os.makedirs(args.save_image_path, exist_ok=True)
# label path
args.save_pred_path = save_path + '/pred'
if os.path.exists(args.save_pred_path):
shutil.rmtree(args.save_pred_path)
os.makedirs(args.save_pred_path, exist_ok=True)
args.save_path = save_path
print('=> saving images in :', save_path)
else:
print('we don\'t save any images!')
# xlsx path
csv_file_name = file_name + '.xlsx'
args.csv_file_name = os.path.join(csv_path, csv_file_name)
print('=> saving csv in :', args.csv_file_name)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
raise(RuntimeError('resume is None!'))
# --- preparing dataset
kwargs = {'num_workers': 1, 'pin_memory': True}
_, _, test_loader = make_data_loader(args, **kwargs)
# --- testing ---
test(args, test_loader, model)
def test(args, loader, model):
model.eval()
dsc_list = []
jc_list = []
hd_list = []
hd95_list = []
asd_list = []
precision_list = []
recall_list = []
vs_list = []
filename_list = []
with torch.no_grad():
for sample in tqdm.tqdm(loader):
image, label, file_name = sample['image'], sample['target'], sample['file_name']
image = image.cuda()
if args.arch == 'sdnet':
_, _, _, _, _, pred, _, _, _, _ = model(image, label, 'val')
else:
pred = model(image)
pred = F.softmax(pred, dim=1)
pred = pred.max(1)[1]
image = image[0][0].cpu().numpy()
image = (image + 0.5) * 0.5
image = image.astype(np.float)
label = label[0].cpu().numpy()
label = label.astype(np.float)
pred = pred[0].cpu().numpy()
pred = pred.astype(np.float)
# get metrics
metrics = get_metrics(pred, label, voxelspacing=(0.5, 0.5, 0.5))
dsc_list.append(metrics['dsc'])
jc_list.append(metrics['jc'])
hd_list.append(metrics['hd'])
hd95_list.append(metrics['hd95'])
asd_list.append(metrics['asd'])
precision_list.append(metrics['precision'])
recall_list.append(metrics['recall'])
vs_list.append(metrics['vs'])
filename_list.append(file_name)
if args.save_image:
save_name = os.path.join(args.save_path, file_name[0][:-4])
if not os.path.exists(save_name):
os.makedirs(save_name)
img = sitk.GetImageFromArray(image)
sitk.WriteImage(img, save_name + '/' + "img.nii.gz")
img = sitk.GetImageFromArray(label)
sitk.WriteImage(img, save_name + '/' + "gt.nii.gz")
img = sitk.GetImageFromArray(pred)
sitk.WriteImage(img, save_name + '/' + "pred.nii.gz")
df = pd.DataFrame()
df['filename'] = filename_list
df['dsc'] =
|
np.array(dsc_list)
|
numpy.array
|
"""
Author: <NAME> <<EMAIL>>.
References:
-
-
-
"""
import os
import networkx as nx
import numpy as np
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from sklearn.metrics import f1_score, accuracy_score
# For the plot functions.
_Z_ORDER_V = 10
_Z_ORDER_SE = _Z_ORDER_V - 1
_Z_ORDER_SSV = _Z_ORDER_V + 1
_IS_NOT_FITTED_MSG = 'The model must be fitted before this call.'
def _calculate_dist_matrix(X):
(nsamples, ndim) = X.shape
dist_matrix = -np.ones(shape=(nsamples, nsamples), dtype=np.float64)
# Calculate distance matrix.
for i in range(nsamples):
xi = X[i, :].reshape(1, ndim)
dist_matrix[i, :] = dist_matrix[:, i] = np.linalg.norm(xi - X, axis=1, ord=2) ** 4
dist_matrix[i, i] = np.inf
return dist_matrix
class GabrielGraph(nx.Graph):
"""
Gabriel Graph (GG)
"""
def __init__(self, X, Y=None):
super().__init__()
self._build(X, Y)
def _build(self, X, Y):
(nsamples, _) = X.shape
# Calculate distance matrix.
self.dist_matrix = _calculate_dist_matrix(X)
## Gabriel Graph definition.
# Nodes
nodes = np.arange(nsamples)
if Y is not None and hasattr(Y, '__iter__'):
[self.add_node(node, y=c) for (node, c) in zip(nodes, Y)]
else:
self.add_nodes_from(nodes)
# Adjacency
for vi in range(nsamples - 1):
for vj in range(vi + 1, nsamples):
if self.is_continguous(vi, vj):
self.add_edge(vi, vj)
def _rebuild(self, Y=None):
self.clear()
## Gabriel Graph definition.
# Nodes
nsamples = self.dist_matrix.shape[0]
nodes = np.arange(nsamples)
if Y is not None and hasattr(Y, '__iter__'):
[self.add_node(node, y=c) for (node, c) in zip(nodes, Y)]
else:
self.add_nodes_from(nodes)
# Adjacency
for vi in range(nsamples - 1):
for vj in range(vi + 1, nsamples):
if self.is_continguous(vi, vj):
self.add_edge(vi, vj)
def delete_nodes(self, nodes, Y=None):
""" Remove the vertices in `nodes` from the graph. """
mask = np.ones(shape=len(self.nodes), dtype=bool)
mask[nodes] = False
dist_matrix = self.dist_matrix[mask, :]
self.dist_matrix = dist_matrix[:, mask]
if Y is not None:
Y = Y[mask]
self._rebuild(Y)
return mask
def is_continguous(self, vi, vj):
""" Eval if the samples `vi` and `vj` are adjacent. """
dvivj = self.dist_matrix[vi, vj]
dvivjdvk = np.min(self.dist_matrix[vi, :] + self.dist_matrix[vj, :])
return dvivj <= dvivjdvk
@staticmethod
def plot(gg, X, points_color=None, point_scale=10**2, edge_color='k', edge_width=.5):
""" Plot the graph. """
from matplotlib import pyplot as plt
if X.ndim != 2:
raise ValueError("The visualization it's only in 2d data.")
fig = plt.gcf() # Get current figure or create it.
ax = fig.add_subplot(111)
for (i, j) in gg.edges:
ax.plot([X[i, 0], X[j, 0]], [X[i, 1], X[j, 1]],
color=edge_color, lw=edge_width, zorder=_Z_ORDER_SE)
ax.scatter(*X.T, s=point_scale, zorder=_Z_ORDER_V,
c='lightgray' if points_color is None else points_color,
linewidths=0.5,
edgecolors='k')
return ax
class GGClassifier:
"""
Gabriel Graph Classifier (GGC)
"""
def __init__(self, se_deep=-1):
self._middle_points = None
self._w = None
self._bias = None
self._ssv = None
self._se = None
self._gg = None
self._labels = None
self._is_fitted = False
self._metrics = None
self._se_deep = se_deep
@property
def ssv(self):
""" Structural Support Vectors (SSV). """
if self._ssv is None:
self._se, self._ssv = self._find_se()
return self._ssv
@property
def se(self):
""" Support Edges. """
if self._se is None:
self._se, self._ssv = self._find_se()
return self._se
@property
def middle_points(self):
""" Middle points of each SE. """
return self._middle_points
@property
def bias(self):
""" Hyperplanes's bias. """
return self._bias
@property
def w(self):
""" Hyperplanes's inclination. """
return self._w
@property
def gg(self):
""" Gabriel Graph. """
return self._gg
def _find_se(self):
""" Select the SEs based on the criteria defined in `se_deep`. """
if self._gg is None:
raise Exception(_IS_NOT_FITTED_MSG)
se = []
ssv = []
if self._se_deep <= 0:
for (vi, vj) in self._gg.edges:
node_vi, node_vj = self._gg.nodes[vi], self._gg.nodes[vj]
yi, yj = node_vi['y'], node_vj['y']
if yi != yj:
if yi == self._labels[0]: # Ensures order (negative class first).
se.append((vi, vj))
else:
se.append((vj, vi))
ssv.append(vi)
ssv.append(vj)
elif self._se_deep > 0:
for (vi, vj) in self._gg.edges:
node_vi, node_vj = self._gg.nodes[vi], self._gg.nodes[vj]
yi, yj = node_vi['y'], node_vj['y']
if yi != yj:
def walk(y_target, v_origin, v_new, v_previous, v_current, current_deep):
""" 'Walk' on adjacency vertices in a recursive way. """
v_previous.append(v_current)
adjs = np.array(list(self._gg.adj[v_current].keys()))
if len(adjs) > 0 and current_deep >= 0 and current_deep < self._se_deep:
adjs = adjs[np.argsort(self._gg.dist_matrix[v_current, adjs])]
v_next = None
for v in adjs:
if v not in v_previous:
node = self._gg.nodes[v]
if node['y'] == y_target:
v_new = v
v_next = v
break
if v_next is not None:
return walk(y_target, v_current, v_new, v_previous, v_next, current_deep + 1)
# print(current_deep)
if v_next is None:
return walk(y_target, v_origin, v_new, v_previous, v_origin, current_deep - 1)
return v_new
adivi = walk(yi, vi, vi, [], vi, 0)
adjvj = walk(yj, vj, vj, [], vj, 0)
if yi == self._labels[0]: # Ensures order (negative class first).
se.append((adivi, adjvj))
else:
se.append((adjvj, adivi))
ssv.append(adivi)
ssv.append(adjvj)
return np.array(se), np.unique(ssv)
def fit(self, X, Y, remove_noise=True):
""" Fit the model. """
y = np.unique(Y)
if y.size != 2:
raise NotImplementedError("This implementation it's only for binary classification.")
# Remove duplicated samples.
idxuniq = np.unique([np.nonzero(np.equal(x, X).all(axis=1))[0][0] for x in X])
X = X[idxuniq, :]
Y = Y[idxuniq]
self._gg = GabrielGraph(X, Y)
self._labels = y
if remove_noise:
self.noise_nodes, X, Y = self.filter_noise(X, Y, return_new_XY=True)
self._middle_points, self._w, self._bias = self._calculate_model_params(X)
self._is_fitted = True
ypred = self.predict(X)
self._metrics = {'f1': f1_score(Y, ypred), 'acc': accuracy_score(Y, ypred)}
return self, X, Y
def predict(self, X):
""" Assign a class to samples in `X`. """
if not self._is_fitted:
raise Exception(_IS_NOT_FITTED_MSG)
nsamples = X.shape[0]
labels =
|
np.zeros(shape=nsamples, dtype=np.int8)
|
numpy.zeros
|
"""
File: examples/distribution/binomial_distribution.py
Author: <NAME>
Date: Oct 15 2019
Description: Example of using the BinomialDistribution class.
"""
import os, time
import numpy as np
import matplotlib.pyplot as pl
from distpy import BinomialDistribution
sample_size = int(1e5)
distribution = BinomialDistribution(0.4, 10)
hdf5_file_name = 'TEST_DELETE_THIS.hdf5'
distribution.save(hdf5_file_name)
try:
assert distribution == BinomialDistribution.load(hdf5_file_name)
except:
os.remove(hdf5_file_name)
raise
else:
os.remove(hdf5_file_name)
assert distribution.numparams == 1
t0 = time.time()
sample = distribution.draw(sample_size)
print(('It took {0:.5f} s to draw {1} points from a binomial ' +\
'distribution.').format(time.time() - t0, sample_size))
print('Sample mean was {0:.3g}, while expected mean was {1:.3g}.'.format(\
np.mean(sample), distribution.mean))
print(('Sample standard deviation was {0:.3g}, while expected standard ' +\
'deviation was {1:.3g}.').format(
|
np.std(sample)
|
numpy.std
|
import numpy as np
def Fourier_shear(image,shear_factor,axis=[-1,-2],fftshifted=False):
"""Accomplishes the following affine transformation to an image:
[x'] = [ 1 shear_factor] [x]
[y'] [ 0 1 ] [y]
via Fourier transform based methods.
Parameters
----------
image -- Array like object containing image to be shear
transformed
shear_factor -- Amount to shear image by
axis -- List like object specifying axes to apply shear
operation to. The axis[0] will be sheared relative
to axis[1]
fftshifted -- True if the object is already fft shifted, ie the
zeroth coordinate is in the top left hand corner
"""
#If array is not a complex numpy array, make it so
complex = np.iscomplexobj(image)
if(not complex): image = np.asarray(image,dtype=np.csingle)
#Pad output image to fit transformed array
padding = np.zeros([len(image.shape),2],dtype=np.int)
padding[axis[0],:] = np.ceil(shear_factor*image.shape[axis[1]]).astype(np.int)//2
print(image.shape,shear_factor*image.shape[axis[1]])
# print(shear_factor*image.shape[axis[0]],image.shape[axis[1]],image.shape[axis[0]])
image_ = np.pad(image,padding,mode='constant')
#Routine assumes that the origin is the top-left hand pixel
if not fftshifted: image_ = np.fft.fftshift(image_,axes =axis)
#Get shape of image
Y,X = [image_.shape[i] for i in axis]
print(Y,X)
qy = np.fft.fftfreq(Y)
qx = np.fft.fftfreq(X)
#Fourier space shear operator
a = np.exp(-2*np.pi*1j*qy[:,np.newaxis]*qx[np.newaxis,:]*X*shear_factor)
#This function is designed to shear arbitrary axes
#TODO test this functionality
#This next part shapes the Fourier space shear
#operator for appropriate broadcasting to the
#input arary
ashape = ()
ndim = len(image_.shape)
for idim,dim in enumerate(image_.shape):
if(idim == axis[0]%ndim or idim == axis[1]%ndim): ashape += (dim,)
else: ashape +=(1,)
if(axis[0]>axis[1]):
a = a.T.reshape(ashape)
else:
a = a.reshape(ashape)
# fig,ax=plt.subplots()
# ax.imshow(a.real)
# plt.show()
#Apply shear operator
image_ = np.fft.ifft(a*np.fft.fft(image_,axis=axis[0]),axis=axis[0])
#Inverse FFT shift
if not fftshifted: image_ = np.fft.ifftshift(image_,axes =axis)
#Return same type as input
if complex:
return image_
else:
return np.real(image_)
def Fourier_rotate(image,theta,fftshifted=False,outsize='minimum'):
"""Performs a Fourier rotation of array image counterclockwise by angle
theta in radians. Outsize can be 'original', 'minimum', 'double'
Example
----------------------------------
from skimage import data
image = np.sum(np.asarray(data.astronaut()),axis=2)
y,x = image.shape
# image = np.pad(image,(y,x),'constant')
fig,axes = plt.subplots(nrows=4,ncols=4,figsize=(16,16),squeeze=False)
for i in range(16):
image = np.sum(np.asarray(data.astronaut()),axis=2)
# image = np.pad(image,(y,x),'constant')
image = Fourier_rotate(image,np.deg2rad(360/16*(i)-45),outsize='original')
axes[i//4,i%4].matshow(image)
axes[i//4,i%4].set_axis_off()
axes[i//4,i%4].set_title('{0} degrees'.format(360/16*(i)-45))
plt.show()
"""
#If array is not complex, make it so
complex = np.iscomplexobj(image)
if(not complex): image = np.asarray(image,dtype=np.csingle)
#Fourier rotation only works for an angle less than 45, use np.rot90 to get
#correct quadrant
quadrant = np.round(theta/(np.pi/2))
image = np.rot90(image,quadrant,axes=(-1,-2))
iy,ix = image.shape[-2:]
#Pad array by factor 2
padding = np.zeros([len(image.shape),2],dtype=np.int)
padding[-2,:] = iy//2
padding[-1,:] = ix//2
image = np.pad(image,padding,mode='constant')
#Routine assumes that the origin is the top-left hand pixel
if not fftshifted: image = np.fft.fftshift(image,axes =[-2,-1])
#...and then Fourier rotation to desired angle within that quadrant
fftrot = theta - quadrant*(np.pi/2)
#Get shape of image
Y,X = image.shape[-2:]
qy = np.fft.fftfreq(Y)
qx = np.fft.fftfreq(X)
#Fourier space y shear operator
a = np.exp(-2*np.pi*1j*qy[:,np.newaxis]*qx[np.newaxis,:]*Y*np.tan(fftrot/2))
#Fourier space x shear operator
b = np.exp( 2*np.pi*1j*qx[np.newaxis,:]*qy[:,np.newaxis]*X*np.sin(fftrot))
#X shear
image = np.fft.ifft(a*np.fft.fft(image,axis=-1),axis=-1)
#Y shear
image = np.fft.ifft(b*np.fft.fft(image,axis=-2),axis=-2)
#X shear again
image = np.fft.ifft(a*np.fft.fft(image,axis=-1),axis=-1)
#Reset array coordinates to that of input
if not fftshifted: image = np.fft.ifftshift(image,axes =[-2,-1])
crop = tuple([slice(0, i) for i in image.shape[:-2]])
#Crop array to requested output size
if outsize == 'original':
crop += (slice(iy//2,-iy//2-iy%2),)
crop += (slice(ix//2,-ix//2-ix%2),)
image = image[crop]
elif outsize == 'minimum':
#Work output array size
c, s = np.cos(theta), np.sin(theta)
rot_matrix = np.array([[c, s],
[-s, c]])
# Compute transformed input bounds
out_bounds = rot_matrix @ [[0, 0,ix, ix],
[0,iy, 0, iy]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
crop += (slice((Y-out_plane_shape[0])//2,(Y+out_plane_shape[0])//2),)
crop += (slice((X-out_plane_shape[1])//2,(X+out_plane_shape[1])//2),)
image = image[crop]
#Return same type as input
if complex:
return image
else:
return np.real(image)
def fourier_interpolate_2d(ain,shapeout,norm=True):
'''ain - input numpy array, npiy_ number of y pixels in
interpolated image, npix_ number of x pixels in interpolated
image. Perfoms a fourier interpolation on array ain.'''
#Import required FFT functions
from numpy.fft import fftshift,fft2,ifft2
#Make input complex
aout = np.zeros(shapeout,dtype=np.complex)
#Get input dimensions
npiyin,npixin = np.shape(ain)
npiyout,npixout = shapeout
#Construct input and output fft grids
qyin,qxin,qyout,qxout = [(np.fft.fftfreq(x,1/x ) ).astype(np.int)
for x in [npiyin,npixin,npiyout,npixout]]
#Get maximum and minimum common reciprocal space coordinates
minqy,maxqy = [max(np.amin(qyin),np.amin(qyout)),min(np.amax(qyin),np.amax(qyout))]
minqx,maxqx = [max(np.amin(qxin),np.amin(qxout)),min(np.amax(qxin),np.amax(qxout))]
#Make 2d grids
qqxout,qqyout =
|
np.meshgrid(qxout,qyout)
|
numpy.meshgrid
|
import numpy as np
import scipy.sparse
from numpy import sin, cos, tan
import sys
import slepc4py
slepc4py.init(sys.argv)
from petsc4py import PETSc
from slepc4py import SLEPc
opts = PETSc.Options()
import pickle as pkl
class Model():
def __init__(self, model_variables, model_parameters, physical_constants):
self.model_variables = model_variables
self.model_parameters = model_parameters
self.physical_constants = physical_constants
for key in model_parameters:
exec('self.'+str(key)+' = model_parameters[\''+str(key)+'\']')
for key in physical_constants:
exec('self.'+str(key)+' = physical_constants[\''+str(key)+'\']')
self.calculate_nondimensional_parameters()
self.set_up_grid(self.R, self.h)
def set_up_grid(self, R, h):
"""
Creates the r and theta coordinate vectors
inputs:
R: radius of outer core in m
h: layer thickness in m
outputs: None
"""
self.R = R
self.h = h
self.Size_var = self.Nk*self.Nl
self.SizeM = len(self.model_variables)*self.Size_var
self.rmin = (R-h)/self.r_star
self.rmax = R/self.r_star
self.dr = (self.rmax-self.rmin)/(self.Nk)
ones = np.ones((self.Nk,self.Nl))
self.r = (ones.T*np.linspace(self.rmin+self.dr/2., self.rmax-self.dr/2.,num=self.Nk)).T # r value at center of each cell
self.rp = (ones.T*np.linspace(self.rmin+self.dr, self.rmax, num=self.Nk)).T # r value at plus border (top) of cell
self.rm = (ones.T*np.linspace(self.rmin, self.rmax-self.dr, num=self.Nk)).T # r value at minus border (bottom) of cell
self.dth = np.pi/(self.Nl)
self.th = ones*np.linspace(self.dth/2., np.pi-self.dth/2., num=self.Nl) # theta value at center of cell
self.thp = ones*np.linspace(self.dth, np.pi, num=self.Nl) # theta value at plus border (top) of cell
self.thm = ones*np.linspace(0,np.pi-self.dth, num=self.Nl)
return None
def calculate_nondimensional_parameters(self):
'''
Calculates the non-dimensional parameters in model from the physical
constants.
'''
self.t_star = 1/self.Omega # seconds
self.r_star = self.R # meters
self.P_star = self.rho*self.r_star**2/self.t_star**2
self.B_star = (self.eta*self.mu_0*self.rho/self.t_star)**0.5
self.u_star = self.r_star/self.t_star
self.E = self.nu*self.t_star/self.r_star**2
self.Pm = self.nu/self.eta
return None
def set_Br(self, BrT):
''' Sets the background phi magnetic field in Tesla
BrT = Br values for each cell in Tesla'''
if isinstance(BrT, (float, int)):
self.BrT = np.ones((self.Nk, self.Nl))*BrT
self.Br = self.BrT/self.B_star
elif isinstance(BrT, np.ndarray) and BrT.shape == (self.Nk, self.Nl):
self.BrT = BrT
self.Br = self.BrT/self.B_star
else:
raise TypeError("BrT must either be an int, float, or np.ndarray of correct size")
def set_Bth(self, BthT):
''' Sets the background phi magnetic field in Tesla
BthT = Bth values for each cell in Tesla'''
if isinstance(BthT, (float, int)):
self.BthT = np.ones((self.Nk, self.Nl))*BthT
self.Bth = self.BthT/self.B_star
elif isinstance(BthT, np.ndarray) and BthT.shape == (self.Nk, self.Nl) :
self.BthT = BthT
self.Bth = self.BthT/self.B_star
else:
raise TypeError("BthT must either be an int, float, or np.ndarray of correct size")
def set_Bph(self, BphT):
''' Sets the background phi magnetic field in Tesla
BphT = Bph values for each cell in Tesla'''
if isinstance(BphT, (float, int)):
self.BphT = np.ones((self.Nk, self.Nl))*BphT
self.Bph = self.BphT/self.B_star
elif isinstance(BphT, np.ndarray) and BphT.shape ==(self.Nk, self.Nl):
self.BphT = BphT
self.Bph = self.BphT/self.B_star
else:
raise TypeError("BphT must either be an int, float, or np.ndarray of correct size")
def set_Br_dipole(self, Bd, const=0):
''' Sets the background magnetic field to a dipole field with
Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*cos(self.th)*Bd + const
self.Br = self.BrT/self.B_star
self.set_Bth(0.0)
self.set_Bph(0.0)
return None
def set_B_dipole(self, Bd, const=0):
''' Sets the background magnetic field to a dipole field with
Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*cos(self.th)*Bd + const
self.Br = self.BrT/self.B_star
self.BthT = sin(self.th)*Bd + const
self.Bth = self.BthT/self.B_star
self.set_Bph(0.0)
return None
def set_B_abs_dipole(self, Bd, const=0):
''' Sets the background magnetic Br and Bth field to the absolute value of a
dipole field with Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*abs(cos(self.th))*Bd + const
self.Br = self.BrT/self.B_star
self.BthT = abs(sin(self.th))*Bd + const
self.Bth = self.BthT/self.B_star
self.set_Bph(0.0)
return None
def set_B_dipole_absrsymth(self, Bd, const=0):
''' Sets the background magnetic Br and Bth field to the absolute value of a
dipole field with Bd = dipole constant in Tesla '''
self.Bd = Bd
self.BrT = 2*abs(cos(self.th))*Bd + const
self.Br = self.BrT/self.B_star
self.BthT = sin(self.th)*Bd + const
self.Bth = self.BthT/self.B_star
self.set_Bph(0.0)
return None
def set_Br_abs_dipole(self, Bd, const=0, noise=None, N=10000):
''' Sets the background Br magnetic field the absolute value of a
dipole with Bd = dipole constant in Tesla.
optionally, can offset the dipole by a constant with const or add numerical noise with noise '''
if noise:
from scipy.special import erf
def folded_mean(mu, s):
return s*(2/np.pi)**0.5*np.exp(-mu**2/(2*s**2)) - mu*erf(-mu/(2*s**2)**0.5)
self.Bd = Bd
Bdip = 2*Bd*np.abs(np.cos(self.th))
Bdip_noise = np.zeros_like(Bdip)
for (i,B) in enumerate(Bdip):
Bdip_noise[i] = folded_mean(Bdip[i], noise)
self.BrT = np.ones((self.Nk, self.Nl))*Bdip_noise
self.Br = self.BrT/self.B_star
else:
self.Bd = Bd
self.BrT = 2*abs(cos(self.th))*Bd + const
self.Br = self.BrT/self.B_star
self.set_Bth(0.0)
self.set_Bph(0.0)
return None
def set_Br_sinfunc(self, Bmin, Bmax, sin_exp=2.5):
self.BrT = ((1-sin(self.th)**sin_exp)*(Bmax-Bmin)+Bmin)
self.Br = self.BrT/self.B_star
self.set_Bth(0.0)
self.set_Bph(0.0)
return None
def set_B_by_type(self, B_type, Bd=0.0, Br=0.0, Bth=0.0, Bph=0.0, const=0.0, Bmin=0.0, Bmax=0.0, sin_exp=2.5, noise=0.0):
''' Sets the background magnetic field to given type.
B_type choices:
* dipole : Br, Bth dipole; specify scalar dipole constant Bd (T)
* abs_dipole : absolute value of dipole in Br and Bth, specify scalar Bd (T)
* dipole_Br : Br dipole, Bth=0; specify scalar dipole constant Bd (T)
* abs_dipole_Br : absolute value of dipole in Br, specify scalar Bd (T)
* constant_Br : constant Br, Bth=0; specify scalar Br (T)
* set : specify array Br, Bth, and Bph values in (T)
* dipole_absrsymth : absolute value of dipole in Br, symmetric in Bth, specify scalar Bd (T)
'''
if B_type == 'dipole':
self.set_B_dipole(Bd, const=const)
elif B_type == 'dipoleBr':
self.set_Br_dipole(Bd, const=const)
elif B_type == 'constantBr':
self.set_Br(Br*np.ones((self.Nk, self.Nl)))
self.set_Bth(0.0)
self.set_Bph(0.0)
elif B_type == 'set':
self.set_Br(Br)
self.set_Bth(Bth)
self.set_Bph(Bph)
elif B_type == 'absDipoleBr':
self.set_Br_abs_dipole(Bd, const=const, noise=noise)
elif B_type == 'absDipole':
self.set_B_abs_dipole(Bd, const=const)
elif B_type == 'dipoleAbsRSymTh':
self.set_B_dipole_absrsymth(Bd, const=const)
elif B_type == 'sinfuncBr':
self.set_Br_sinfunc(Bmin, Bmax, sin_exp=sin_exp)
else:
raise ValueError('B_type not valid')
def set_CC_skin_depth(self, period):
''' sets the magnetic skin depth for conducting core BC
inputs:
period = period of oscillation in years
returns:
delta_C = skin depth in (m)
'''
self.delta_C = np.sqrt(2*self.eta/(2*np.pi/(period*365.25*24*3600)))
self.physical_constants['delta_C'] = self.delta_C
return self.delta_C
def set_Uphi(self, Uphi):
'''Sets the background velocity field in m/s'''
if isinstance(Uphi, (float, int)):
self.Uphi = np.ones((self.Nk, self.Nl))*Uphi
elif isinstance(Uphi, np.ndarray):
self.Uphi = Uphi
else:
raise TypeError("The value passed for Uphi must be either an int, float, or np.ndarray")
self.U0 = self.Uphi*self.r_star/self.t_star
return None
def set_buoyancy(self, drho_dr):
'''Sets the buoyancy structure of the layer'''
self.omega_g = np.sqrt(-self.g/self.rho*drho_dr)
self.N = self.omega_g**2*self.t_star**2
def set_buoy_by_type(self, buoy_type, buoy_ratio):
self.omega_g0 = buoy_ratio*self.Omega
if buoy_type == 'constant':
self.omega_g = np.ones((self.Nk, self.Nl))*self.omega_g0
elif buoy_type == 'linear':
self.omega_g = (np.ones((self.Nk, self.Nl)).T*np.linspace(0, self.omega_g0, self.Nk)).T
self.N = self.omega_g**2*self.t_star**2
def get_index(self, k, l, var):
'''
Takes coordinates for a point, gives back index in matrix.
inputs:
k: k grid value from 0 to K-1
l: l grid value from 0 to L-1
var: variable name in model_variables
outputs:
index of location in matrix
'''
Nk = self.Nk
Nl = self.Nl
SizeM = self.SizeM
Size_var = self.Size_var
if (var not in self.model_variables):
raise RuntimeError('variable not in model_variables')
elif not (l >= 0 and l <= Nl-1):
raise RuntimeError('l index out of bounds')
elif not (k >= 0 and k <= Nk-1):
raise RuntimeError('k index out of bounds')
return Size_var*self.model_variables.index(var) + k + l*Nk
def get_variable(self, vector, var):
'''
Takes a flat vector and a variable name, returns the variable in a
np.matrix
inputs:
vector: flat vector array with len == SizeM
var: str of variable name in model
outputs:
variable in np.array
'''
Nk = self.Nk
Nl = self.Nl
if (var not in self.model_variables):
raise RuntimeError('variable not in model_variables')
elif len(vector) != self.SizeM:
raise RuntimeError('vector given is not correct length in this \
model')
else:
var_start = self.get_index(0, 0, var)
var_end = self.get_index(Nk-1, Nl-1, var)+1
variable = np.array(np.reshape(vector[var_start:var_end], (Nk, Nl), 'F'))
return variable
def create_vector(self, variables):
'''
Takes a set of variables and creates a vector out of
them.
inputs:
variables: list of (Nk x Nl) matrices or vectors for each model
variable
outputs:
vector of size (SizeM x 1)
'''
Nk = self.Nk
Nl = self.Nl
vector = np.array([1])
# Check Inputs:
if len(variables) != len(self.model_variables):
raise RuntimeError('Incorrect number of variable vectors passed')
for var in variables:
vector = np.vstack((vector, np.reshape(var, (Nk*Nl, 1))))
return np.array(vector[1:])
def add_gov_equation(self, name, variable):
setattr(self, name, GovEquation(self, variable))
def setup_SLEPc(self, nev=10, Target=None, Which='TARGET_MAGNITUDE'):
self.EPS = SLEPc.EPS().create()
self.EPS.setDimensions(10, PETSc.DECIDE)
self.EPS.setOperators(self.A_SLEPc, self.M_SLEPc)
self.EPS.setProblemType(SLEPc.EPS.ProblemType.PGNHEP)
self.EPS.setTarget(Target)
self.EPS.setWhichEigenpairs(eval('self.EPS.Which.'+Which))
self.EPS.setFromOptions()
self.ST = self.EPS.getST()
self.ST.setType(SLEPc.ST.Type.SINVERT)
return self.EPS
def solve_SLEPc(self, Target=None):
self.EPS.solve()
conv = self.EPS.getConverged()
vs, ws = PETSc.Mat.getVecs(self.A_SLEPc)
vals = []
vecs = []
for ind in range(conv):
vals.append(self.EPS.getEigenpair(ind, ws))
vecs.append(ws.getArray())
return vals, vecs
def save_mat_PETSc(self, filename, mat, type='Binary'):
''' Saves a Matrix in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'w')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'w')
viewer(mat)
def load_mat_PETSc(self, filename, type='Binary'):
''' Loads and returns a Matrix stored in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'r')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'r')
return PETSc.Mat().load(viewer)
def save_vec_PETSc(self, filename, vec, type='Binary'):
''' Saves a vector in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'w')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'w')
viewer(vec)
def load_vec_PETSc(self, filename, type='Binary'):
''' Loads and returns a vector stored in PETSc format '''
if type == 'Binary':
viewer = PETSc.Viewer().createBinary(filename, 'r')
elif type == 'ASCII':
viewer = PETSc.Viewer().createASCII(filename, 'r')
return PETSc.Mat().load(viewer)
def save_model(self, filename):
''' Saves the model structure without the computed A and M matrices'''
try:
self.A
except:
pass
else:
A = self.A
del self.A
try:
self.M
except:
pass
else:
M = self.M
del self.M
pkl.dump(self, open(filename, 'wb'))
try:
A
except:
pass
else:
self.A = A
try:
M
except:
pass
else:
self.M = M
def make_d2Mat(self):
self.d2_rows = []
self.d2_cols = []
self.d2_vals = []
for var in self.model_variables:
self.add_gov_equation('d2_'+var, var)
exec('self.d2_'+var+'.add_d2_bd0(\''+var+'\','+str(self.m)+')')
exec('self.d2_rows = self.d2_'+var+'.rows')
exec('self.d2_cols = self.d2_'+var+'.cols')
exec('self.d2_vals = self.d2_'+var+'.vals')
self.d2Mat = coo_matrix((self.d2_vals, (self.d2_rows, self.d2_cols)),
shape=(self.SizeM, self.SizeM))
return self.d2Mat
def make_dthMat(self):
self.dth_rows = []
self.dth_cols = []
self.dth_vals = []
for var in self.model_variables:
self.add_gov_equation('dth_'+var, var)
exec('self.dth_'+var+'.add_dth(\''+var+'\','+str(self.m)+')')
exec('self.dth_rows += self.dth_'+var+'.rows')
exec('self.dth_cols += self.dth_'+var+'.cols')
exec('self.dth_vals += self.dth_'+var+'.vals')
self.dthMat = coo_matrix((self.dth_vals, (self.dth_rows, self.dth_cols)),
shape=(self.SizeM, self.SizeM))
return self.dthMat
def make_dphMat(self):
self.dph_rows = []
self.dph_cols = []
self.dph_vals = []
for var in self.model_variables:
self.add_gov_equation('dth_'+var, var)
exec('self.dph_'+var+'.add_dth(\''+var+'\','+str(self.m)+')')
exec('self.dph_rows += self.dth_'+var+'.rows')
exec('self.dph_cols += self.dth_'+var+'.cols')
exec('self.dph_vals += self.dth_'+var+'.vals')
self.dthMat = coo_matrix((self.dph_vals, (self.dph_rows, self.dph_cols)),
shape=(self.SizeM, self.SizeM))
return self.dphMat
def make_Bobs(self):
BrobsT = 2*np.ones((self.Nk, self.Nl))*cos(self.th)
self.Brobs = BrobsT/self.B_star
gradBrobsT = -2*np.ones((self.Nk, self.Nl))*sin(self.th)/self.R
self.gradBrobs = gradBrobsT/self.B_star*self.r_star
self.add_gov_equation('Bobs', self.model_variables[0])
self.Bobs.add_term('uth', self.gradBrobs)
self.Bobs.add_dth('uth', C= self.Brobs)
self.Bobs.add_dph('uph', C= self.Brobs)
self.BobsMat = coo_matrix((self.Bobs.vals, (self.Bobs.rows, self.Bobs.cols)),
shape=(self.SizeM, self.SizeM))
return self.BobsMat
def make_operators(self):
"""
:return:
"""
dr = self.dr
r = self.r
rp = self.rp
rm = self.rm
dth = self.dth
th = self.th
thm = self.thm
thp = self.thp
Nk = self.Nk
Nl = self.Nl
m = self.m
delta_C = self.delta_C/self.r_star
E = self.E
Pm = self.Pm
# ddr
self.ddr_kp1 = rp**2/(2*r**2*dr)
self.ddr_km1 = -rm**2/(2*r**2*dr)
self.ddr = 1/r
self.ddr_kp1_b0 = np.array(self.ddr_kp1)
self.ddr_km1_b0 = np.array(self.ddr_km1)
self.ddr_b0 = np.array(self.ddr)
self.ddr_kp1_b0[-1,:] = np.zeros(Nl)
self.ddr_b0[-1,:] = -rm[-1,:]**2/(2*r[-1,:]**2*dr)
self.ddr_km1_b0[0,:] = np.zeros(Nl)
self.ddr_b0[0,:] = rp[0,:]**2/(2*r[0,:]**2*dr)
self.ddr_kp1_bd0 = np.array(self.ddr_kp1)
self.ddr_km1_bd0 = np.array(self.ddr_km1)
self.ddr_bd0 = np.array(self.ddr)
self.ddr_kp1_bd0[-1,:] = np.zeros(Nl)
self.ddr_bd0[-1,:] = (2*rp[-1,:]**2 -rm[-1,:]**2)/(2*r[-1,:]**2*dr)
self.ddr_km1_bd0[0,:] = np.zeros(Nl)
self.ddr_bd0[0,:] = (rp[0,:]**2 - 2*rm[0,:]**2)/(2*r[0,:]**2*dr)
# ddr for Conducting core boundary conditions
self.ddr_kp1_ccb0 = np.array(self.ddr_kp1_b0)
self.ddr_kp1_ccb0[0,:] = rp[0,:]**2/(r[0,:]**2*2*dr)
self.ddr_km1_ccb0 = np.array(self.ddr_km1_b0)
self.ddr_km1_ccb0[0,:] = np.zeros(Nl)
self.ddr_ccb0 = np.array(self.ddr_b0)
self.ddr_ccb0[0,:] = rp[0,:]**2/(r[0,:]**2*2*dr)
self.ddr_u_ccb0 = -rm[0,:]**2/(r[0,:]**2*dr)
# ddth
self.ddth_lp1 = sin(thp)/(2*r*sin(th)*dth)
self.ddth_lm1 = -sin(thm)/(2*r*sin(th)*dth)
self.ddth = (sin(thp)-sin(thm))/(2*r*sin(th)*dth)
# ddph
self.ddph = 1j*m/(r*sin(th))
# drP
self.drP_kp1 = rp**2/(2*dr*r**2)
self.drP_km1 = -rm**2/(2*dr*r**2)
self.drP_lp1 = -sin(thp)/(4*r*sin(th))
self.drP_lm1 = -sin(thm)/(4*r*sin(th))
self.drP = -(sin(thp)+sin(thm))/(4*r*sin(th))
self.drP_kp1[-1,:] = np.zeros(Nl)
self.drP[-1,:] = rp[-1,:]**2/(2*dr*r[-1,:]**2) \
- (sin(thp[-1,:]) + sin(thm[-1,:]))/(4*r[-1,:]*sin(th[-1,:]))
self.drP_km1[0,:] = np.zeros(Nl)
self.drP[0,:] = -rm[0,:]**2/(2*dr*r[0,:]**2) \
- (sin(thp[0,:]) + sin(thm[0,:]))/(4*r[0,:]*sin(th[0,:]))
# dthP
self.dthP_lp1 = sin(thp)/(2*r*sin(th)*dth)
self.dthP_lm1 = -sin(thm)/(2*r*sin(th)*dth)
self.dthP = (sin(thp)-sin(thm))/(2*r*sin(th)*dth) - cos(th)/(r*sin(th))
# dphP
self.dphP = 1j*m/(r*sin(th))
# Laplacian
self.d2_kp1 = (rp/(r*dr))**2
self.d2_km1 = (rm/(r*dr))**2
self.d2_lp1 = sin(thp)/(sin(th)*(r*dth)**2)
self.d2_lm1 = sin(thm)/(sin(th)*(r*dth)**2)
self.d2 = -((rp**2+rm**2)/(r*dr)**2 + (sin(thp) + sin(thm))/(sin(th)*(r*dth)**2) + (m/(r*sin(th)))**2)
# Laplacian for B.C. var = 0
self.d2_kp1_b0 = np.array(self.d2_kp1)
self.d2_km1_b0 = np.array(self.d2_km1)
self.d2_lp1_b0 = self.d2_lp1
self.d2_lm1_b0 = self.d2_lm1
self.d2_b0 = np.array(self.d2)
self.d2_kp1_b0[-1,:] = np.zeros(Nl)
self.d2_b0[-1,:] = (-((2*rp**2+rm**2)/(r*dr)**2 + (sin(thp) + sin(thm))/(sin(th)*(r*dth)**2) + (m/(r*
|
sin(th)
|
numpy.sin
|
#!/usr/bin/env python3
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Training/decoding definition for the speech recognition task."""
import copy
import json
import logging
import math
import os
import sys
from chainer import reporter as reporter_module
from chainer import training
from chainer.training import extensions
from chainer.training.updater import StandardUpdater
import numpy as np
from tensorboardX import SummaryWriter
import torch
from torch.nn.parallel import data_parallel
from espnet.asr.asr_utils import adadelta_eps_decay
from espnet.asr.asr_utils import add_results_to_json
from espnet.asr.asr_utils import CompareValueTrigger
from espnet.asr.asr_utils import format_mulenc_args
from espnet.asr.asr_utils import get_model_conf
from espnet.asr.asr_utils import plot_spectrogram
from espnet.asr.asr_utils import restore_snapshot
from espnet.asr.asr_utils import snapshot_object
from espnet.asr.asr_utils import torch_load
from espnet.asr.asr_utils import torch_resume
from espnet.asr.asr_utils import torch_snapshot
from espnet.asr.pytorch_backend.asr_init_blstm import freeze_modules
from espnet.asr.pytorch_backend.asr_init_blstm import load_trained_model
from espnet.asr.pytorch_backend.asr_init_blstm import load_trained_modules
import espnet.lm.pytorch_backend.extlm as extlm_pytorch
from espnet.nets.asr_interface import ASRInterface
from espnet.nets.beam_search_transducer import BeamSearchTransducer
from espnet.nets.pytorch_backend.e2e_asr import pad_list
import espnet.nets.pytorch_backend.lm.default as lm_pytorch
from espnet.nets.pytorch_backend.streaming.segment import SegmentStreamingE2E
from espnet.nets.pytorch_backend.streaming.window import WindowStreamingE2E
from espnet.transform.spectrogram import IStft
from espnet.transform.transformation import Transformation
from espnet.utils.cli_writers import file_writer_helper
from espnet.utils.dataset import ChainerDataLoader
from espnet.utils.dataset import TransformDataset
from espnet.utils.deterministic_utils import set_deterministic_pytorch
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.io_utils import LoadInputsAndTargets
from espnet.utils.training.batchfy import make_batchset
from espnet.utils.training.evaluator import BaseEvaluator
from espnet.utils.training.iterators import ShufflingEnabler
from espnet.utils.training.tensorboard_logger import TensorboardLogger
from espnet.utils.training.train_utils import check_early_stop
from espnet.utils.training.train_utils import set_early_stop
import matplotlib
matplotlib.use("Agg")
if sys.version_info[0] == 2:
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest as zip_longest
def _recursive_to(xs, device):
if torch.is_tensor(xs):
return xs.to(device)
if isinstance(xs, tuple):
return tuple(_recursive_to(x, device) for x in xs)
return xs
class CustomEvaluator(BaseEvaluator):
"""Custom Evaluator for Pytorch.
Args:
model (torch.nn.Module): The model to evaluate.
iterator (chainer.dataset.Iterator) : The train iterator.
target (link | dict[str, link]) :Link object or a dictionary of
links to evaluate. If this is just a link object, the link is
registered by the name ``'main'``.
device (torch.device): The device used.
ngpu (int): The number of GPUs.
"""
def __init__(self, model, iterator, target, device, ngpu=None):
super(CustomEvaluator, self).__init__(iterator, target)
self.model = model
self.device = device
if ngpu is not None:
self.ngpu = ngpu
elif device.type == "cpu":
self.ngpu = 0
else:
self.ngpu = 1
# The core part of the update routine can be customized by overriding
def evaluate(self):
"""Main evaluate routine for CustomEvaluator."""
iterator = self._iterators["main"]
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, "reset"):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
self.model.eval()
with torch.no_grad():
for batch in it:
x = _recursive_to(batch, self.device)
observation = {}
with reporter_module.report_scope(observation):
# read scp files
# x: original json with loaded features
# will be converted to chainer variable later
if self.ngpu == 0:
self.model(*x)
else:
# apex does not support torch.nn.DataParallel
data_parallel(self.model, x, range(self.ngpu))
summary.add(observation)
self.model.train()
return summary.compute_mean()
class CustomUpdater(StandardUpdater):
"""Custom Updater for Pytorch.
Args:
model (torch.nn.Module): The model to update.
grad_clip_threshold (float): The gradient clipping value to use.
train_iter (chainer.dataset.Iterator): The training iterator.
optimizer (torch.optim.optimizer): The training optimizer.
device (torch.device): The device to use.
ngpu (int): The number of gpus to use.
use_apex (bool): The flag to use Apex in backprop.
"""
def __init__(
self,
model,
grad_clip_threshold,
train_iter,
optimizer,
device,
ngpu,
grad_noise=False,
accum_grad=1,
use_apex=False,
):
super(CustomUpdater, self).__init__(train_iter, optimizer)
self.model = model
self.grad_clip_threshold = grad_clip_threshold
self.device = device
self.ngpu = ngpu
self.accum_grad = accum_grad
self.forward_count = 0
self.grad_noise = grad_noise
self.iteration = 0
self.use_apex = use_apex
# The core part of the update routine can be customized by overriding.
def update_core(self):
"""Main update routine of the CustomUpdater."""
# When we pass one iterator and optimizer to StandardUpdater.__init__,
# they are automatically named 'main'.
train_iter = self.get_iterator("main")
optimizer = self.get_optimizer("main")
epoch = train_iter.epoch
# Get the next batch (a list of json files)
batch = train_iter.next()
# self.iteration += 1 # Increase may result in early report,
# which is done in other place automatically.
x = _recursive_to(batch, self.device)
is_new_epoch = train_iter.epoch != epoch
# When the last minibatch in the current epoch is given,
# gradient accumulation is turned off in order to evaluate the model
# on the validation set in every epoch.
# see details in https://github.com/espnet/espnet/pull/1388
# Compute the loss at this time step and accumulate it
if self.ngpu == 0:
loss = self.model(*x).mean() / self.accum_grad
else:
# apex does not support torch.nn.DataParallel
loss = (
data_parallel(self.model, x, range(self.ngpu)).mean() / self.accum_grad
)
if self.use_apex:
from apex import amp
# NOTE: for a compatibility with noam optimizer
opt = optimizer.optimizer if hasattr(optimizer, "optimizer") else optimizer
with amp.scale_loss(loss, opt) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# gradient noise injection
if self.grad_noise:
from espnet.asr.asr_utils import add_gradient_noise
add_gradient_noise(
self.model, self.iteration, duration=100, eta=1.0, scale_factor=0.55
)
# update parameters
self.forward_count += 1
if not is_new_epoch and self.forward_count != self.accum_grad:
return
self.forward_count = 0
# compute the gradient norm to check if it is normal or not
grad_norm = torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.grad_clip_threshold
)
logging.info("grad norm={}".format(grad_norm))
if math.isnan(grad_norm):
logging.warning("grad norm is nan. Do not update model.")
else:
optimizer.step()
optimizer.zero_grad()
def update(self):
self.update_core()
# #iterations with accum_grad > 1
# Ref.: https://github.com/espnet/espnet/issues/777
if self.forward_count == 0:
self.iteration += 1
class CustomConverter(object):
"""Custom batch converter for Pytorch.
Args:
subsampling_factor (int): The subsampling factor.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, subsampling_factor=1, dtype=torch.float32):
"""Construct a CustomConverter object."""
self.subsampling_factor = subsampling_factor
self.ignore_id = -1
self.dtype = dtype
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple(torch.Tensor, torch.Tensor, torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs, ys = batch[0]
# perform subsampling
if self.subsampling_factor > 1:
xs = [x[:: self.subsampling_factor, :] for x in xs]
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
# currently only support real number
if xs[0].dtype.kind == "c":
xs_pad_real = pad_list(
[torch.from_numpy(x.real).float() for x in xs], 0
).to(device, dtype=self.dtype)
xs_pad_imag = pad_list(
[torch.from_numpy(x.imag).float() for x in xs], 0
).to(device, dtype=self.dtype)
# Note(kamo):
# {'real': ..., 'imag': ...} will be changed to ComplexTensor in E2E.
# Don't create ComplexTensor and give it E2E here
# because torch.nn.DataParellel can't handle it.
xs_pad = {"real": xs_pad_real, "imag": xs_pad_imag}
else:
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0).to(
device, dtype=self.dtype
)
ilens = torch.from_numpy(ilens).to(device)
# NOTE: this is for multi-output (e.g., speech translation)
ys_pad = pad_list(
[
torch.from_numpy(
np.array(y[0][:]) if isinstance(y, tuple) else y
).long()
for y in ys
],
self.ignore_id,
).to(device)
return xs_pad, ilens, ys_pad
class CustomConverterMulEnc(object):
"""Custom batch converter for Pytorch in multi-encoder case.
Args:
subsampling_factors (list): List of subsampling factors for each encoder.
dtype (torch.dtype): Data type to convert.
"""
def __init__(self, subsamping_factors=[1, 1], dtype=torch.float32):
"""Initialize the converter."""
self.subsamping_factors = subsamping_factors
self.ignore_id = -1
self.dtype = dtype
self.num_encs = len(subsamping_factors)
def __call__(self, batch, device=torch.device("cpu")):
"""Transform a batch and send it to a device.
Args:
batch (list): The batch to transform.
device (torch.device): The device to send to.
Returns:
tuple( list(torch.Tensor), list(torch.Tensor), torch.Tensor)
"""
# batch should be located in list
assert len(batch) == 1
xs_list = batch[0][: self.num_encs]
ys = batch[0][-1]
# perform subsampling
if np.sum(self.subsamping_factors) > self.num_encs:
xs_list = [
[x[:: self.subsampling_factors[i], :] for x in xs_list[i]]
for i in range(self.num_encs)
]
# get batch of lengths of input sequences
ilens_list = [
|
np.array([x.shape[0] for x in xs_list[i]])
|
numpy.array
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SeedEditor for organ segmentation
Example:
$ seed_editor_qp.py -f head.mat
"""
from loguru import logger
# try:
# QString = unicode
# except NameError:
# Python 3
# QString = str
QString = str
# import unittest
from optparse import OptionParser
from scipy.io import loadmat
import numpy as np
import sys
from scipy.spatial import Delaunay
import PyQt5
from PyQt5.QtCore import Qt, QSize, pyqtSignal
try:
pass
except ImportError:
# we are using Python3 so QString is not defined
QString = type("")
from PyQt5.QtGui import QImage, QPixmap, QPainter, qRgba, QIcon
from PyQt5.QtWidgets import (
QDialog,
QApplication,
QSlider,
QPushButton,
QLabel,
QComboBox,
QStatusBar,
QHBoxLayout,
QVBoxLayout,
QFrame,
QSizePolicy,
)
from PyQt5 import QtCore, QtGui, QtWidgets
import math
# BGRA order
GRAY_COLORTABLE = np.array([[ii, ii, ii, 255] for ii in range(256)], dtype=np.uint8)
SEEDS_COLORTABLE = np.array(
[
[(15 + ii * 41) % 256, (47 + ii * 117) % 256, (11 + ii * -31) % 256, 220]
for ii in range(256)
],
dtype=np.uint8,
)
SEEDS_COLORTABLE[:4] = np.array(
[[0, 255, 0, 220], [64, 0, 255, 220], [0, 200, 128, 220], [64, 128, 200, 220]],
dtype=np.uint8,
)
# In future something like this...
# CONTOURS_COLORS = np.array([[
# (15 + ii * 41) % 256,
# (47 + ii * 117) % 256,
# (11 + ii * -31) % 256
# ] for ii in range(256)],
# dtype=np.uint8)
CONTOURS_COLORS = {
1: [64, 255, 0],
2: [255, 0, 64],
3: [0, 64, 255],
4: [255, 64, 0],
5: [64, 0, 255],
6: [0, 255, 64],
7: [0, 128, 192],
8: [128, 0, 192],
9: [128, 192, 0],
10: [0, 192, 128],
11: [192, 128, 0],
12: [192, 0, 128],
13: [128, 0, 0],
14: [0, 128, 0],
15: [0, 0, 128],
16: [64, 128, 128],
17: [128, 128, 64],
18: [128, 64, 128],
19: [128, 255, 0],
20: [128, 255, 128],
21: [128, 128, 255],
22: [128, 255, 128],
23: [64, 255, 128],
24: [0, 255, 128],
25: [128, 255, 255],
26: [64, 0, 0],
27: [0, 0, 64],
28: [0, 64, 64],
29: [64, 128, 0],
30: [192, 128, 64],
31: [64, 0, 128],
32: [128, 128, 64],
33: [0, 128, 64],
34: [128, 0, 64],
35: [64, 64, 0],
36: [0, 64, 0],
37: [0, 64, 64],
38: [128, 0, 0],
39: [128, 255, 0],
40: [0, 0, 128],
41: [0, 128, 128],
42: [64, 128, 0],
43: [64, 128, 0],
44: [128, 0, 128],
45: [128, 64, 128],
46: [128, 128, 64],
47: [128, 64, 128],
48: [64, 64, 128],
49: [0, 64, 128],
50: [128, 64, 64],
51: [255, 64, 64],
52: [64, 64, 255],
53: [64, 255, 255],
54: [255, 128, 64],
55: [255, 128, 255],
56: [255, 64, 128],
57: [128, 128, 255],
58: [64, 128, 255],
59: [128, 64, 255],
60: [255, 255, 64],
61: [64, 255, 64],
62: [64, 255, 255],
63: [128, 64, 64],
64: [128, 64, 64],
65: [64, 64, 128],
66: [64, 128, 128],
67: [255, 128, 64],
68: [255, 128, 64],
69: [128, 64, 128],
70: [128, 255, 128],
71: [128, 128, 255],
72: [128, 255, 128],
73: [255, 255, 128],
74: [64, 255, 128],
75: [128, 255, 255],
76: [64, 255, 255],
77: [255, 255, 64],
78: [255, 64, 64],
79: [64, 128, 255],
80: [64, 128, 64],
81: [64, 255, 128],
82: [128, 128, 64],
83: [255, 128, 64],
84: [128, 255, 64],
85: [64, 64, 255],
86: [255, 64, 255],
87: [255, 64, 64],
88: [128, 255, 255],
89: [128, 255, 64],
90: [255, 255, 128],
91: [255, 128, 128],
92: [64, 128, 255],
93: [64, 128, 255],
94: [128, 255, 128],
95: [128, 64, 128],
96: [128, 128, 64],
97: [128, 64, 128],
98: [64, 64, 128],
99: [255, 64, 128],
100: [128, 64, 64],
}
CONTOURS_COLORTABLE = np.zeros((256, 4), dtype=np.uint8)
CONTOURS_COLORTABLE[:, :3] = 255
CONTOURLINES_COLORTABLE = np.zeros((256, 2, 4), dtype=np.uint8)
CONTOURLINES_COLORTABLE[:, :, :3] = 255
for ii, jj in CONTOURS_COLORS.items():
key = ii - 1
CONTOURS_COLORTABLE[key, :3] = jj
CONTOURS_COLORTABLE[key, 3] = 64
CONTOURLINES_COLORTABLE[key, 0, :3] = jj
CONTOURLINES_COLORTABLE[key, 0, 3] = 16
CONTOURLINES_COLORTABLE[key, 1, :3] = jj
CONTOURLINES_COLORTABLE[key, 1, 3] = 255
VIEW_TABLE = {"axial": (2, 1, 0), "sagittal": (1, 0, 2), "coronal": (2, 0, 1)}
DRAW_MASK = [
(np.array([[1]], dtype=np.int8), "small pen"),
(
np.array(
[
[0, 1, 1, 1, 0],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 1, 1, 0],
],
dtype=np.int8,
),
"middle pen",
),
(
np.array(
[
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
],
dtype=np.int8,
),
"large pen",
),
]
BOX_BUTTONS_SEED = {
Qt.LeftButton: 1,
Qt.RightButton: 2,
# Qt.MiddleButton: 3, # it is not possible because of delete mode for segmentation
}
BOX_BUTTONS_SEED_SHIFT_OFFSET = 2
BOX_BUTTONS_DRAW = {Qt.LeftButton: 1, Qt.RightButton: 0}
NEI_TAB = [[-1, -1], [0, -1], [1, -1], [-1, 0], [1, 0], [-1, 1], [0, 1], [1, 1]]
def erase_reg(arr, p, val=0):
from scipy.ndimage.measurements import label
labs, num = label(arr)
aval = labs[p]
idxs = np.where(labs == aval)
arr[idxs] = val
class SliceBox(QLabel):
"""
Widget for marking reagions of interest in DICOM slices.
"""
focus_slider = pyqtSignal()
def __init__(self, sliceSize, grid, mode="seeds", seeds_colortable=None, contours_colortable=None, contourlines_colortable=None):
"""
Initialize SliceBox.
Parameters
----------
sliceSize : tuple of int
Size of slice matrix.
grid : tuple of float
Pixel size:
imageSize = (grid1 * sliceSize1, grid2 * sliceSize2)
mode : str
Editor mode.
"""
QLabel.__init__(self)
self.drawing = False
self.modified = False
self.seed_mark = None
self.last_position = None
self.imagesize = QSize(int(sliceSize[0] * grid[0]), int(sliceSize[1] * grid[1]))
self.grid = grid
self.slice_size = sliceSize
self.ctslice_rgba = None
self.cw = {"c": 1.0, "w": 1.0}
self.seeds = None
self.contours = None
self.contours_old = None
self.mask_points = None
self.erase_region_button = None
self.erase_fun = None
self.erase_mode = "inside"
self.contour_mode = "fill"
self.scroll_fun = None
if mode == "draw":
self.box_buttons = BOX_BUTTONS_DRAW
self.mode_draw = True
else:
self.box_buttons = BOX_BUTTONS_SEED
self.mode_draw = False
if seeds_colortable is None:
self.seeds_colortable = CONTOURS_COLORTABLE if self.mode_draw else SEEDS_COLORTABLE
else:
self.seeds_colortable = seeds_colortable
self.contourlines_colortable = contourlines_colortable if contourlines_colortable else CONTOURLINES_COLORTABLE
self.contours_colortable = contours_colortable if contours_colortable else CONTOURS_COLORTABLE
self.image = QImage(self.imagesize, QImage.Format_RGB32)
self.setPixmap(QPixmap.fromImage(self.image))
self.setScaledContents(True)
def paintEvent(self, event):
painter = QPainter(self)
painter.drawImage(event.rect(), self.image)
painter.end()
def drawSeedMark(self, x, y):
if sys.version_info.major == 2:
xx = self.mask_points[0] + x # .astype(np.int)
yy = self.mask_points[1] + y # .astype(np.int)
else:
xx = self.mask_points[0] + x + 0.5 # .astype(np.int)
yy = self.mask_points[1] + y + 0.5 # .astype(np.int)
idx = np.arange(len(xx))
idx[np.where(xx < 0)] = -1
idx[np.where(xx >= self.slice_size[0])] = -1
idx[np.where(yy < 0)] = -1
idx[np.where(yy >= self.slice_size[1])] = -1
ii = idx[np.where(idx >= 0)]
xx_ii = xx[ii] # .round().astype(np.int)
yy_ii = yy[ii] # .round().astype(np.int)
linear_index = (yy_ii * self.slice_size[0] + xx_ii).round().astype(np.int)
self.seeds[linear_index] = self.seed_mark
def drawLine(self, p0, p1):
"""
Draw line to slice image and seed matrix.
Parameters
----------
p0 : tuple of int
Line star point.
p1 : tuple of int
Line end point.
"""
x0, y0 = p0
x1, y1 = p1
dx = np.abs(x1 - x0)
dy = np.abs(y1 - y0)
if x0 < x1:
sx = 1
else:
sx = -1
if y0 < y1:
sy = 1
else:
sy = -1
err = dx - dy
while True:
self.drawSeedMark(x0, y0)
if x0 == x1 and y0 == y1:
break
e2 = 2 * err
if e2 > -dy:
err = err - dy
x0 = x0 + sx
if e2 < dx:
err = err + dx
y0 = y0 + sy
def drawSeeds(self, pos):
"""
:param pos: list of two indexes with mouse position
:return:
"""
if (
pos[0] < 0
or pos[0] >= self.slice_size[0]
or pos[1] < 0
or pos[1] >= self.slice_size[1]
):
return
self.drawLine(self.last_position, pos)
self.updateSlice()
self.modified = True
self.last_position = pos
self.update()
def get_contours(self, img, sl):
idxs = sl.nonzero()[0]
keys = np.unique(sl[idxs])
for ii in keys:
if ii == 0:
continue
aux = np.zeros_like(sl)
idxsi = np.where(sl == ii)[0]
aux[idxsi] = 1
cnt = self.gen_contours(aux)
self.composeRgba(img, cnt, self.contourlines_colortable[ii - 1, ...])
def gen_contours(self, sl):
sls = sl.reshape(self.slice_size, order="F")
cnt = sls.copy()
chunk = np.zeros((cnt.shape[1] + 2,), dtype=np.int8)
for irow, row in enumerate(sls):
chunk[1:-1] = row
chdiff = np.diff(chunk)
idx1 = np.where(chdiff > 0)[0]
if idx1.shape[0] > 0:
idx2 = np.where(chdiff < 0)[0]
if idx2.shape[0] > 0:
cnt[irow, idx1] = 2
cnt[irow, idx2 - 1] = 2
chunk = np.zeros((cnt.shape[0] + 2,), dtype=np.int8)
for icol, col in enumerate(sls.T):
chunk[1:-1] = col
chdiff = np.diff(chunk)
idx1 = np.where(chdiff > 0)[0]
if idx1.shape[0] > 0:
idx2 = np.where(chdiff < 0)[0]
if idx2.shape[0] > 0:
cnt[idx1, icol] = 2
cnt[idx2 - 1, icol] = 2
return cnt.ravel(order="F")
def composeRgba(self, bg, fg, cmap):
# TODO here is bug. Should be used nearest neighboor instead of linear interpolation
idxs = fg.nonzero()[0]
if idxs.shape[0] > 0:
fg_rgb = cmap[fg[idxs] - 1]
af = fg_rgb[..., 3].astype(np.uint32)
rgbf = fg_rgb[..., :3].astype(np.uint32)
rgbb = bg[idxs, :3].astype(np.uint32)
rgbx = ((rgbf.T * af).T + (rgbb.T * (255 - af)).T) / 255
bg[idxs, :3] = rgbx.astype(np.uint8)
def overRgba(self, bg, fg, cmap):
idxs = fg.nonzero()[0]
bg[idxs] = cmap[fg[idxs] - 1]
def updateSlice(self):
if self.ctslice_rgba is None:
return
img = self.ctslice_rgba.copy()
if self.seeds is not None:
if self.mode_draw:
if self.contour_mode == "fill":
self.composeRgba(img, self.seeds, self.seeds_colortable)
elif self.contour_mode == "contours":
self.get_contours(img, self.seeds)
else:
# self.overRgba(img, self.seeds, self.seeds_colortable)
self.composeRgba(img, self.seeds, self.seeds_colortable)
if self.contours is not None:
if self.contour_mode == "fill":
self.composeRgba(img, self.contours, self.contours_colortable)
elif self.contour_mode == "contours":
self.get_contours(img, self.contours)
image = QImage(
img.flatten(), self.slice_size[0], self.slice_size[1], QImage.Format_ARGB32
).scaled(self.imagesize)
painter = QPainter(self.image)
painter.drawImage(0, 0, image)
painter.end()
self.update()
def getSliceRGBA(self, ctslice):
if self.cw["w"] > 0:
mul = 255.0 / float(self.cw["w"])
else:
mul = 0
lb = self.cw["c"] - self.cw["w"] / 2
aux = (ctslice.ravel(order="F") - lb) * mul
idxs = np.where(aux < 0)[0]
aux[idxs] = 0
idxs = np.where(aux > 255)[0]
aux[idxs] = 255
return aux.astype(np.uint8)
def updateSliceCW(self, ctslice=None):
if ctslice is not None:
self.ctslice_rgba = GRAY_COLORTABLE[self.getSliceRGBA(ctslice)]
self.updateSlice()
def setSlice(self, ctslice=None, seeds=None, contours=None):
if ctslice is not None:
self.ctslice_rgba = GRAY_COLORTABLE[self.getSliceRGBA(ctslice)]
if seeds is not None:
self.seeds = seeds.ravel(order="F")
else:
self.seeds = None
if contours is not None:
self.contours = contours.ravel(order="F")
else:
self.contours = None
self.updateSlice()
def getSliceSeeds(self):
if self.modified:
self.modified = False
return self.seeds.reshape(self.slice_size, order="F")
else:
return None
def gridPosition(self, pos):
return (int(pos.x() / self.grid[0]), int(pos.y() / self.grid[1]))
# mouse events
def _setSeedMark(self, button):
modifiers = PyQt5.QtWidgets.QApplication.keyboardModifiers()
shift_offset = 0
if modifiers == PyQt5.QtCore.Qt.ShiftModifier:
shift_offset = BOX_BUTTONS_SEED_SHIFT_OFFSET
elif modifiers == PyQt5.QtCore.Qt.ControlModifier:
# this make seed_mark = 0 when left button is pressed
shift_offset = -1
# print('Control+Click')
# elif modifiers == (QtCore.Qt.ControlModifier |
# QtCore.Qt.ShiftModifier):
# print('Control+Shift+Click')
# this means
# 0 - Ctrl + LMB
# 1 - LMB
# 2 - RMB
# 3 - Shift + LMB
# 4 - Shift + RMB
self.seed_mark = self.box_buttons[button()] + shift_offset
if self.seed_mark == 1:
parent = self.parent()
self.seed_mark = parent.seeds_slab[parent.textFocusedSeedLabel]
def _get_intensity(self, grid_position):
lp = grid_position
actual_slice = self.parent().img_aview[..., int(self.parent().actual_slice)]
xx_ii, yy_ii = lp
# linear_index = np.round(yy_ii * self.slice_size[0] + xx_ii).astype(np.int)
intensity = actual_slice[xx_ii, yy_ii]
return intensity
def _get_seed_label(self, grid_position):
lp = grid_position
xx_ii, yy_ii = lp
linear_index = np.round(yy_ii * self.slice_size[0] + xx_ii).astype(np.int)
picked_seed_value = self.seeds[linear_index]
return picked_seed_value
def _get_segmentation_label(self, grid_position):
lp = grid_position
xx_ii, yy_ii = lp
linear_index = np.round(yy_ii * self.slice_size[0] + xx_ii).astype(np.int)
if self.contours is None:
picked_seed_value = None
else:
picked_seed_value = self.contours[linear_index]
return picked_seed_value
def _pick_up_seed_label(self, grid_position):
picked_seed_value = self._get_seed_label(grid_position)
parent = self.parent().change_focus_seed_label(picked_seed_value)
# picked_seed_value = self.seeds.reshape(self.slice_size)[lp]
def _pick_up_segmentation_label(self, grid_position):
picked_seed_value = self._get_segmentation_label(grid_position)
if picked_seed_value is not None:
parent = self.parent().change_focus_segmentation_label(picked_seed_value)
def mousePressEvent(self, event):
self.make_last_click_status(self.gridPosition(event.pos()))
if event.button() in self.box_buttons:
#
self.drawing = True
self._setSeedMark(event.button)
modifiers = PyQt5.QtWidgets.QApplication.keyboardModifiers()
self.last_position = self.gridPosition(event.pos())
if modifiers == PyQt5.QtCore.Qt.ControlModifier:
if event.button() == Qt.RightButton:
# pickup seed
self.drawing = False
self._pick_up_seed_label(self.last_position)
elif modifiers == PyQt5.QtCore.Qt.AltModifier:
if event.button() == Qt.RightButton:
# pickup seed
self.drawing = False
self._pick_up_segmentation_label(self.last_position)
# fir
elif event.button() == Qt.MiddleButton:
self.drawing = False
self.erase_region_button = True
def mouseMoveEvent(self, event):
if self.drawing:
self.drawSeeds(self.gridPosition(event.pos()))
def mouseReleaseEvent(self, event):
if (event.button() in self.box_buttons) and self.drawing:
self.drawSeeds(self.gridPosition(event.pos()))
self.drawing = False
elif event.button() == Qt.MiddleButton and self.erase_region_button == True:
self.eraseRegion(self.gridPosition(event.pos()), self.erase_mode)
self.erase_region_button == False
def make_last_click_status(self, grid_pos):
slicen = self.parent().actual_slice
intensity = self._get_intensity(grid_pos)
seed_label = self._get_seed_label(grid_pos)
segm_label = self._get_segmentation_label(grid_pos)
self.parent().last_click_label.setText(
"{}, {}, {}\n{}, {}, {}".format(
slicen, grid_pos[0], grid_pos[1], intensity, seed_label, segm_label
)
)
def resizeSlice(self, new_slice_size=None, new_grid=None):
logger.debug("resizeSlice " + str(new_slice_size) + str(new_grid))
# print("new slice size" , str(new_slice_size), str(new_grid),
# str(self.slice_size), str(self.grid)
# )
if new_slice_size is not None:
self.slice_size = new_slice_size
if new_grid is not None:
self.grid = new_grid
self.imagesize = QSize(
int(self.slice_size[0] * self.grid[0]),
int(self.slice_size[1] * self.grid[1]),
)
self.image = QImage(self.imagesize, QImage.Format_RGB32)
self.setPixmap(QPixmap.fromImage(self.image))
def resizeEvent(self, event):
# print("self.grid ", self.grid)
new_height = self.height()
new_grid = new_height / float(self.slice_size[1])
mul = new_grid / self.grid[1]
self.grid = np.array(self.grid) * mul
# print("self.grid new", self.grid)
self.resizeSlice()
self.updateSlice()
def leaveEvent(self, event):
self.drawing = False
def enterEvent(self, event):
self.drawing = False
self.focus_slider.emit()
def setMaskPoints(self, mask):
self.mask_points = mask
def getCW(self):
return self.cw
def setCW(self, val, key):
self.cw[key] = val
def eraseRegion(self, pos, mode):
if self.erase_fun is not None:
self.erase_fun(pos, mode)
self.updateSlice()
def setEraseFun(self, fun):
self.erase_fun = fun
def setScrollFun(self, fun):
self.scroll_fun = fun
def wheelEvent(self, event):
d = event.angleDelta().y()
absd = abs(d)
if absd > 0:
nd = d / absd
if self.scroll_fun is not None:
self.scroll_fun(-nd)
# TODO do widget
# class QTSeedEditorWidget(QWidget):
class QTSeedEditor(QDialog):
"""
DICOM viewer.
"""
def __init__(
self,
img,
viewPositions=None,
seeds=None,
contours=None,
mode="seed",
modeFun=None,
voxelSize=None,
volume_unit="mm3",
button_text=None,
button_callback=None,
appmenu_text=None,
seed_labels=None,
slab=None,
init_brush_index=1,
seeds_colortable=None,
contours_colortable=None,
contourlines_colortable=None,
unit="mm",
):
"""
Initiate Editor
Parameters
----------
:param img: array
DICOM data matrix.
:param actualSlice : int
Index of actual slice.
:param seeds : array
Seeds, user defined regions of interest.
:param contours : array
Computed segmentation.
:param mode : str
Editor modes:
'seed' - seed editor
'crop' - manual crop
'draw' - drawing
'mask' - mask region
:param modeFun : fun
Mode function invoked by user button.
:param voxelSize : tuple of float
voxel size [mm]
:param volume_unit : allow select output volume in mililiters or mm3
[mm, ml]
:param appmenu_text: text which is displayed in the right toolbar
:param button_callback: callback function used when button is clicked. Implemented in
"mask" mode. If none, default mask function is used.
:param button_text: text on the button. Implemented for "mask" mode. If None, default text
is used.
:param seed_labels: dictionary with text key and int value
:param slab: dictionary with text key and int value
:param seeds_colortable: ndarray with dtype=np.uint8 and shape (256, 4) [BGRA]
:param contours_colortable: ndarray with dtype=np.uint8 and shape (256, 4) [BGRA]
:param contourlines_colortable: ndarray with dtype=np.uint8 and shape (256, 2, 4) [BGRA]
"""
QDialog.__init__(self)
if voxelSize is None:
voxelSize = [1, 1, 1]
self.BACKGROUND_NOMODEL_SEED_LABEL = 4
self.FOREGROUND_NOMODEL_SEED_LABEL = 3
self.mode = mode
self.mode_fun = modeFun
# self.actual_view = "axial"
self.actual_view = list(VIEW_TABLE.keys())[0]
self.act_transposition = VIEW_TABLE[self.actual_view]
self.img = img
self.img_aview = self.img.transpose(self.act_transposition)
self.volume_unit = volume_unit
self.last_view_position = {}
for jj, ii in enumerate(VIEW_TABLE.keys()):
if viewPositions is None:
viewpos = img.shape[VIEW_TABLE[ii][-1]] / 2
else:
viewpos = viewPositions[jj]
self.last_view_position[ii] = img.shape[VIEW_TABLE[ii][-1]] - viewpos - 1
self.actual_slice = int(self.last_view_position[self.actual_view])
# set contours
self.set_contours(contours)
# masked data - has information about which data were removed
# 1 == enabled, 0 == deleted
# How to return:
# editorDialog.exec_()
# masked_data = editorDialog.masked
self.masked = np.ones(self.img.shape, np.int8)
self.set_voxelsize(voxelSize)
if seeds is None:
seeds = np.zeros(self.img.shape, np.int8)
self.set_seeds(seeds)
self.seeds_modified = False
self.set_labels(seed_labels)
self.set_slab(slab)
self.unit = unit
self.initUI(
self.img_aview.shape,
self.voxel_scale[np.array(self.act_transposition)],
600,
mode,
button_text=button_text,
button_callback=button_callback,
appmenu_text=appmenu_text,
init_brush_index=init_brush_index,
seeds_colortable=seeds_colortable,
contours_colortable=contours_colortable,
contourlines_colortable=contourlines_colortable,
unit=self.unit
)
if mode == "draw":
self.seeds_orig = self.seeds.copy()
self.slice_box.setEraseFun(self.eraseVolume)
# set view window values C/W
lb = np.min(img)
self.img_min_val = lb
ub = np.max(img)
dul = np.double(ub) - np.double(lb)
self.cw_range = {"c": [lb, ub], "w": [1, dul]}
self.slider_cw["c"].setRange(lb, ub)
self.slider_cw["w"].setRange(1, dul)
self.changeC(lb + dul / 2)
self.changeW(dul)
self.offset = np.zeros((3,), dtype=np.int16)
self.plugins = []
# set what labels will be deleted by 'delete seeds' button
def set_seeds(self, seeds):
self.seeds = seeds
self.seeds_aview = self.seeds.transpose(self.act_transposition)
def set_contours(self, contours):
self.contours = contours
if self.contours is None:
self.contours_aview = None
else:
self.contours_aview = self.contours.transpose(self.act_transposition)
def set_voxelsize(self, voxelSize):
self.voxel_size = np.squeeze(np.asarray(voxelSize))
self.voxel_scale = self.voxel_size / float(np.min(self.voxel_size))
self.voxel_volume = np.prod(voxelSize)
@staticmethod
def get_line(mode="h"):
line = QFrame()
if mode == "h":
line.setFrameStyle(QFrame.HLine)
elif mode == "v":
line.setFrameStyle(QFrame.VLine)
line.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Expanding)
return line
def __prepare_mgrid(self, shape, vscale, max_width, max_height):
grid = max_height / float(shape[1] * vscale[1])
mgrid1 = (grid * vscale[0], grid * vscale[1])
expected_im_size = shape[:-1] * vscale[:-1] * mgrid1
if expected_im_size[0] > max_width:
grid = max_width / float(shape[0] * vscale[0])
mgrid0 = (grid * vscale[0], grid * vscale[1])
mgrid = mgrid0
else:
mgrid = mgrid1
expected_im_size = shape[:-1] * vscale[:-1] * mgrid
return mgrid
def initUI(
self,
shape,
vscale,
height=600,
mode="seed",
button_text=None,
button_callback=None,
appmenu_text=None,
init_brush_index=1,
seeds_colortable=None,
contours_colortable=None,
contourlines_colortable=None,
unit=None
):
"""
Initialize UI.
Parameters
----------
shape : (int, int, int)
Shape of data matrix.
vscale : (float, float, float)
Voxel scaling.
height : int
Maximal slice height in pixels.
mode : str
Editor mode.
"""
# picture
mgrid2 = self.__prepare_mgrid(shape, vscale, max_width=1000, max_height=height)
grid = height / float(shape[1] * vscale[1])
mgrid = (grid * vscale[0], grid * vscale[1])
self.slice_box = SliceBox(shape[:-1], mgrid2, mode,
seeds_colortable, contours_colortable, contourlines_colortable
)
self.slice_box.setScrollFun(self.scrollSlices)
self.slice_box.focus_slider.connect(self.focusSliceSlider)
# sliders
self.allow_select_slice = True
self.n_slices = shape[2]
self.slider = QSlider(Qt.Vertical)
self.slider.label = QLabel()
self.slider.label.setText("Slice: %d / %d" % (self.actual_slice, self.n_slices))
self.slider.setRange(1, self.n_slices)
self.slider.valueChanged.connect(self.sliderSelectSlice)
self.slider.setValue(self.actual_slice)
self.slider_cw = {}
self.slider_cw["c"] = QSlider(Qt.Horizontal)
self.slider_cw["c"].valueChanged.connect(self.changeC)
self.slider_cw["c"].label = QLabel()
self.slider_cw["w"] = QSlider(Qt.Horizontal)
self.slider_cw["w"].valueChanged.connect(self.changeW)
self.slider_cw["w"].label = QLabel()
self.view_label = QLabel("View size: %d x %d" % self.img_aview.shape[:-1])
if unit is None:
unit = self.unit
self.voxel_label = QLabel(
f"%.2f x %.2f x %.2f [{unit}]"
% tuple(self.voxel_size[np.array(self.act_transposition)])
)
self.voxel_label.setToolTip(
"Voxel size[mm]:\n %.4f x %.4f x %.4f"
% tuple(self.voxel_size[np.array(self.act_transposition)])
)
self.last_click_label = QLabel("")
self.last_click_label.setToolTip(
"Position index\nIntensity, Seed label, Segmentation label"
)
combo_view_options = list(VIEW_TABLE)
combo_view = QComboBox(self)
combo_view.activated[str].connect(self.setView)
combo_view.addItems(combo_view_options)
# buttons
self.btn_quit = QPushButton("Return", self)
self.btn_quit.clicked.connect(self.quit)
self.combo_dmask = QComboBox(self)
self.combo_dmask.setToolTip("Change brush size (B)")
# self.combo_dmask.activated.connect(self.changeMask)
self.combo_dmask.currentIndexChanged.connect(self.changeMask)
self.mask_points_tab, aux = self.init_draw_mask(DRAW_MASK, mgrid)
for icon, label in aux:
self.combo_dmask.addItem(icon, label)
self.slice_box.setMaskPoints(
self.mask_points_tab[self.combo_dmask.currentIndex()]
)
# Set middle pencil as default (<NAME>)
self.combo_dmask.setCurrentIndex(init_brush_index)
# -----mjirik---end------
self.status_bar = QStatusBar()
self.seeds_copy = None
vopts = []
vmenu = []
appmenu = []
if mode == "seed" and self.mode_fun is not None:
btn_recalc = QPushButton("Recalculate", self)
btn_recalc.clicked.connect(self.recalculate)
self.btn_save = QPushButton("Advanced seeds", self)
self.btn_save.setToolTip(
"Save/Load seeds for later use and use advanced seed drawing methods"
)
self.btn_save.clicked.connect(self.saveload_seeds)
btn_convex = QPushButton("Convex", self)
btn_convex.clicked.connect(self.updateMaskRegion_btn)
btn_s2b = QPushButton("Seg. to bckgr.", self)
btn_s2b.clicked.connect(self.seg_to_background_seeds)
btn_s2f = QPushButton("Seg. to forgr.", self)
btn_s2f.clicked.connect(self.seg_to_foreground_seeds)
appmenu.append(
QLabel(
"<b>Segmentation mode</b><br><br><br>"
+ "Select the region of interest<br>"
+ "using the mouse buttons:<br><br>"
+ " <i>left</i> - inner region<br>"
+ " <i>right</i> - outer region<br><br>"
)
)
appmenu.append(btn_recalc)
appmenu.append(self.btn_save)
appmenu.append(btn_convex)
appmenu.append(btn_s2f)
appmenu.append(btn_s2b)
appmenu.append(QLabel())
self.volume_label = QLabel("Volume:\n unknown")
appmenu.append(self.volume_label)
if mode == "seed" or mode == "crop" or mode == "mask" or mode == "draw":
# segmentation label
combo_segmentation_label_options = list(
self.slab.keys()
) # ['all', '1', '2', '3', '4']
csl_tooltip = "Used for drawing with LMB or to delete labels"
self.combo_segmentation_label = QComboBox(self)
self.combo_segmentation_label.setToolTip(csl_tooltip)
self.combo_segmentation_label.addItems(combo_segmentation_label_options)
# self.__focus_seed_label_changed_by_gui(combo_segmentation_label_options[self.combo_segmentation_label.currentIndex()])
# self.combo_seed_label.currentIndexChanged[str].connect(self.__focus_seed_label_changed_by_gui)
combo_segmentation_label_label = QLabel("Segmentation label:")
combo_segmentation_label_label.setToolTip(csl_tooltip)
# combo_seeds_label.setTooltip(csl_tooltip)
vmenu.append(combo_segmentation_label_label)
vmenu.append(self.combo_segmentation_label)
# seed labels
combo_seed_label_options = list(
self.seeds_slab.keys()
) # ['all', '1', '2', '3', '4']
csl_tooltip = "Used for drawing with LMB or to delete labels"
self.combo_seed_label = QComboBox(self)
self.combo_seed_label.setToolTip(csl_tooltip)
self.combo_seed_label.addItems(combo_seed_label_options)
self.__focus_seed_label_changed_by_gui(
combo_seed_label_options[self.combo_seed_label.currentIndex()]
)
self.combo_seed_label.currentIndexChanged[str].connect(
self.__focus_seed_label_changed_by_gui
)
# vopts.append(QLabel('Label to delete:'))
# vopts.append(combo_seed_label)
combo_seeds_label_label = QLabel("Seed label")
combo_seeds_label_label.setToolTip(csl_tooltip)
# combo_seeds_label.setTooltip(csl_tooltip)
vmenu.append(combo_seeds_label_label)
vmenu.append(self.combo_seed_label)
btn_del = QPushButton("Del Slice Seeds", self)
btn_del.clicked.connect(self.deleteSliceSeeds)
vmenu.append(None)
vmenu.append(btn_del)
btn_del = QPushButton("Del All Seeds", self)
btn_del.clicked.connect(self.deleteSeedsInAllImage)
vmenu.append(None)
vmenu.append(btn_del)
combo_contour_options = ["fill", "contours", "off"]
combo_contour = QComboBox(self)
combo_contour.activated[str].connect(self.changeContourMode)
combo_contour.addItems(combo_contour_options)
self.changeContourMode(combo_contour_options[combo_contour.currentIndex()])
vopts.append(QLabel("Selection mode:"))
vopts.append(combo_contour)
if mode == "mask":
if button_text is None:
button_text = "Mask region"
if button_callback is None:
button_callback = self.maskRegion
btn_recalc_mask = QPushButton("Recalculate mask", self)
btn_recalc_mask.clicked.connect(self.updateMaskRegion_btn)
btn_all = QPushButton("Select all", self)
btn_all.clicked.connect(self.maskSelectAll)
btn_reset = QPushButton("Reset selection", self)
btn_reset.clicked.connect(self.resetSelection)
btn_reset_seads = QPushButton("Reset seads", self)
btn_reset_seads.clicked.connect(self.resetSeads)
btn_add = QPushButton("Add selection", self)
btn_add.clicked.connect(self.maskAddSelection)
btn_rem = QPushButton("Remove selection", self)
btn_rem.clicked.connect(self.maskRemoveSelection)
btn_mask = QPushButton(button_text, self)
btn_mask.clicked.connect(button_callback)
appmenu.append(
QLabel(
"<b>Mask mode</b><br><br><br>"
+ "Select the region to mask<br>"
+ "using the left mouse button<br><br>"
)
)
appmenu.append(self.get_line("h"))
appmenu.append(btn_recalc_mask)
appmenu.append(btn_all)
appmenu.append(btn_reset)
appmenu.append(btn_reset_seads)
appmenu.append(self.get_line("h"))
appmenu.append(btn_add)
appmenu.append(btn_rem)
appmenu.append(self.get_line("h"))
appmenu.append(btn_mask)
appmenu.append(self.get_line("h"))
self.mask_qhull = None
if mode == "crop":
btn_crop = QPushButton("Crop", self)
btn_crop.clicked.connect(self.crop)
appmenu.append(
QLabel(
"<b>Crop mode</b><br><br><br>"
+ "Select the crop region<br>"
+ "using the left mouse button<br><br>"
)
)
appmenu.append(btn_crop)
if mode == "draw":
appmenu.append(
QLabel(
"<b>Manual segmentation<br> mode</b><br><br><br>"
+ "Mark the region of interest<br>"
+ "using the mouse buttons:<br><br>"
+ " <i>left</i> - draw<br>"
+ " <i>right</i> - erase<br>"
+ " <i>middle</i> - vol. erase<br><br>"
)
)
btn_reset = QPushButton("Reset", self)
btn_reset.clicked.connect(self.resetSliceDraw)
vmenu.append(None)
vmenu.append(btn_reset)
combo_erase_options = ["inside", "outside"]
combo_erase = QComboBox(self)
combo_erase.activated[str].connect(self.changeEraseMode)
combo_erase.addItems(combo_erase_options)
self.changeEraseMode(combo_erase_options[combo_erase.currentIndex()])
vopts.append(QLabel("Volume erase mode:"))
vopts.append(combo_erase)
if appmenu_text is not None:
appmenu.append(QLabel(appmenu_text))
hbox = QHBoxLayout()
vbox = QVBoxLayout()
vbox_left = QVBoxLayout()
self.vbox_app = QVBoxLayout()
self.vbox_plugins = QVBoxLayout()
hbox.addWidget(self.slice_box)
hbox.addWidget(self.slider)
vbox_left.addWidget(self.slider.label)
vbox_left.addWidget(self.view_label)
vbox_left.addWidget(self.voxel_label)
# vbox_left.addWidget(QLabel())
vbox_left.addWidget(self.last_click_label)
vbox_left.addWidget(QLabel())
vbox_left.addWidget(QLabel("View plane:"))
vbox_left.addWidget(combo_view)
vbox_left.addWidget(self.get_line())
vbox_left.addWidget(self.slider_cw["c"].label)
vbox_left.addWidget(self.slider_cw["c"])
vbox_left.addWidget(self.slider_cw["w"].label)
vbox_left.addWidget(self.slider_cw["w"])
vbox_left.addWidget(self.get_line())
vbox_left.addWidget(QLabel("Drawing brush:"))
vbox_left.addWidget(self.combo_dmask)
for ii in vopts:
vbox_left.addWidget(ii)
for ii in vmenu:
if ii is None:
vbox_left.addStretch(1)
else:
vbox_left.addWidget(ii)
for ii in appmenu:
if ii is None:
self.vbox_app.addStretch(1)
else:
self.vbox_app.addWidget(ii)
self.vbox_app.addLayout(self.vbox_plugins)
self.vbox_app.addStretch(1)
self.vbox_app.addWidget(self.btn_quit)
hbox.addLayout(vbox_left)
hbox.addWidget(self.get_line("v"))
hbox.addLayout(self.vbox_app)
vbox.addLayout(hbox)
vbox.addWidget(self.status_bar)
self.my_layout = vbox
self.setLayout(vbox)
self.setWindowTitle("Segmentation Editor")
self.__init_keyboard_shortucuts()
self.show()
def __init_keyboard_shortucuts(self):
QtWidgets.QShortcut(
QtGui.QKeySequence(QtCore.Qt.Key_B), self
).activated.connect(self.__key_change_brush)
QtWidgets.QShortcut(
QtGui.QKeySequence(QtCore.Qt.Key_L), self
).activated.connect(self.__key_change_label)
def __key_change_brush(self):
idx = self.combo_dmask.currentIndex()
if idx < len(self.combo_dmask) - 1:
self.combo_dmask.setCurrentIndex(idx + 1)
else:
self.combo_dmask.setCurrentIndex(0)
def __key_change_label(self):
idx = self.combo_seed_label.currentIndex()
if idx < len(self.combo_seed_label) - 1:
self.combo_seed_label.setCurrentIndex(idx + 1)
else:
self.combo_seed_label.setCurrentIndex(0)
def addPlugin(self, plugin):
"""
Add QTSeedEditorWidget
:param plugin:
:return:
"""
self.plugins.append(plugin)
plugin.setData(self.img, self.contours, self.seeds, self.voxel_size)
self.vbox_plugins.addWidget(plugin)
plugin.setRunCallback(self._update_from_plugin)
plugin.setGetDataFromParentCallback(self._get_data)
plugin.setShowStatusCallback(self.showStatus)
plugin.updateUI()
def _update_from_plugin(self, widget, data3d, segmentation, seeds, voxelsize_mm):
if widget is not None:
self.set_image(data3d)
# self.img = data3d
if segmentation is not None:
self.set_contours(segmentation)
# self.contours = segmentation
if seeds is not None:
self.set_seeds(seeds)
if voxelsize_mm is not None:
self.set_voxelsize(voxelsize_mm)
# self.updateMaskRegion()
self.updateSlice()
def _get_data(self):
self.saveSliceSeeds()
return self.img, self.contours, self.seeds, self.voxel_size
def showStatus(self, msg):
self.status_bar.showMessage(QString(msg))
QApplication.processEvents()
def init_draw_mask(self, draw_mask, grid):
mask_points = []
mask_iconlabel = []
for mask, label in draw_mask:
w, h = mask.shape
xx, yy = mask.nonzero()
mask_points.append((xx - w / 2, yy - h / 2))
img = QImage(w, h, QImage.Format_ARGB32)
img.fill(qRgba(255, 255, 255, 0))
for ii in range(xx.shape[0]):
img.setPixel(xx[ii], yy[ii], qRgba(0, 0, 0, 255))
img = img.scaled(QSize(w * grid[0], h * grid[1]))
icon = QIcon(QPixmap.fromImage(img))
mask_iconlabel.append((icon, label))
return mask_points, mask_iconlabel
def saveSliceSeeds(self):
aux = self.slice_box.getSliceSeeds()
if aux is not None:
self.seeds_aview[..., self.actual_slice] = aux
self.seeds_modified = True
else:
self.seeds_modified = False
def updateSlice(self):
self.selectSlice(self.actual_slice)
def updateMaskRegion_btn(self):
self.saveSliceSeeds()
self.updateMaskRegion()
def updateMaskRegion(self):
crp = self.getCropBounds(return_nzs=True)
if crp is not None:
off, cri, nzs = crp
if nzs[0].shape[0] <= 5:
self.showStatus("Not enough points (need >= 5)!")
else:
if not hasattr(self, "contours_old"):
self.contours_old = None
points = np.transpose(nzs)
hull = Delaunay(points)
X, Y, Z = np.mgrid[cri[0], cri[1], cri[2]]
grid = np.vstack([X.ravel(), Y.ravel(), Z.ravel()]).T
simplex = hull.find_simplex(grid)
fill = grid[simplex >= 0, :]
fill = (fill[:, 0], fill[:, 1], fill[:, 2])
if self.contours is None or self.contours_old is None:
self.contours = np.zeros(self.img.shape, np.int8)
self.contours_old = self.contours.copy()
else:
self.contours[self.contours != 2] = 0
self.contours[fill] = 1
self.contours_aview = self.contours.transpose(self.act_transposition)
self.selectSlice(self.actual_slice)
def maskRegion(self):
self.masked[self.contours == 0] = 0
self.img[self.contours != 2] = self.img_min_val
self.contours.fill(0)
self.contours_old = self.contours.copy()
self.seeds.fill(0)
self.selectSlice(self.actual_slice)
def maskAddSelection(self):
self.updateMaskRegion()
if self.contours is None:
return
self.contours[self.contours == 1] = 2
self.contours_old = self.contours.copy()
self.seeds.fill(0)
self.selectSlice(self.actual_slice)
def maskRemoveSelection(self):
self.updateMaskRegion()
if self.contours is None:
return
self.contours[self.contours == 1] = 0
self.contours_old = self.contours.copy()
self.seeds.fill(0)
self.selectSlice(self.actual_slice)
def maskSelectAll(self):
self.updateMaskRegion()
self.seeds[0][0][0] = 1
self.seeds[0][0][-1] = 1
self.seeds[0][-1][0] = 1
self.seeds[0][-1][-1] = 1
self.seeds[-1][0][0] = 1
self.seeds[-1][0][-1] = 1
self.seeds[-1][-1][0] = 1
self.seeds[-1][-1][-1] = 1
self.updateMaskRegion()
self.selectSlice(self.actual_slice)
def resetSelection(self):
self.updateMaskRegion()
if self.contours is None:
return
self.contours.fill(0)
self.contours_old = self.contours.copy()
self.seeds.fill(0)
self.selectSlice(self.actual_slice)
def resetSeads(self):
self.seeds.fill(0)
if self.contours is not None:
self.contours = self.contours_old.copy()
self.contours_aview = self.contours.transpose(self.act_transposition)
self.updateMaskRegion()
self.selectSlice(self.actual_slice)
def updateCropBounds(self):
crp = self.getCropBounds()
if crp is not None:
_, cri = crp
self.contours = np.zeros(self.img.shape, np.int8)
self.contours[cri].fill(1)
self.contours_aview = self.contours.transpose(self.act_transposition)
def focusSliceSlider(self):
self.slider.setFocus(True)
def sliderSelectSlice(self, value):
self.selectSlice(self.n_slices - value)
def scrollSlices(self, inc):
if abs(inc) > 0:
new = self.actual_slice + inc
self.selectSlice(new)
def selectSlice(self, value, force=False):
if not (self.allow_select_slice):
return
if (value < 0) or (value >= self.n_slices):
return
if (value != self.actual_slice) or force:
self.saveSliceSeeds()
if self.seeds_modified:
if self.mode == "crop":
self.updateCropBounds()
elif self.mode == "mask":
self.updateMaskRegion()
if self.contours is None:
contours = None
else:
contours = self.contours_aview[..., int(value)]
slider_val = self.n_slices - value
self.slider.setValue(slider_val)
self.slider.label.setText("Slice: %d / %d" % (slider_val, self.n_slices))
self.slice_box.setSlice(
self.img_aview[..., int(value)], self.seeds_aview[..., int(value)], contours
)
self.actual_slice = int(value)
def _set_plugins_seeds(self):
# sometimes during init the seeds are None
# this is the way how to update the actual value
for plugin in self.plugins:
plugin.seeds = self.seeds
def _set_plugins_segmentation(self):
# sometimes during init the seeds are None
# this is the way how to update the actual value
for plugin in self.plugins:
plugin.segmentation = self.contours
def setSeeds(self, seeds):
self.seeds = seeds
self._set_plugins_seeds()
def getSeeds(self):
return self.seeds
def getImg(self):
return self.img
def getOffset(self):
return self.offset * self.voxel_size
def getSeedsVal(self, label):
return self.img[self.seeds == label]
def getContours(self):
return self.contours
def setContours(self, contours):
"""
store segmentation
:param contours: segmentation
:return: Nothing
"""
"""
:param contours:
:return:
"""
self.contours = contours
self.contours_aview = self.contours.transpose(self.act_transposition)
self.selectSlice(self.actual_slice)
def changeCW(self, value, key):
rg = self.cw_range[key]
if (value < rg[0]) or (value > rg[1]):
return
if value != self.slice_box.getCW()[key]:
self.slider_cw[key].setValue(value)
self.slider_cw[key].label.setText("%s: %d" % (key.upper(), value))
self.slice_box.setCW(value, key)
self.slice_box.updateSliceCW(self.img_aview[..., int(self.actual_slice)])
def changeC(self, value):
self.changeCW(value, "c")
def changeW(self, value):
self.changeCW(value, "w")
def setView(self, value):
self.last_view_position[self.actual_view] = int(self.actual_slice)
# save seeds
self.saveSliceSeeds()
if self.seeds_modified:
if self.mode == "crop":
self.updateCropBounds()
elif self.mode == "mask":
self.updateMaskRegion()
key = str(value)
self.actual_view = key
self.actual_slice = int(self.last_view_position[key])
self.act_transposition = VIEW_TABLE[key]
self.img_aview = self.img.transpose(self.act_transposition)
self.seeds_aview = self.seeds.transpose(self.act_transposition)
if self.contours is not None:
self.contours_aview = self.contours.transpose(self.act_transposition)
contours = self.contours_aview[..., int(self.actual_slice)]
else:
contours = None
vscale = self.voxel_scale[np.array(self.act_transposition)]
height = self.slice_box.height()
grid = height / float(self.img_aview.shape[1] * vscale[1])
# width = (self.img_aview.shape[0] * vscale[0])[0]
# if width > 800:
# height = 400
# grid = height / float(self.img_aview.shape[1] * vscale[1])
mgrid = (grid * vscale[0], grid * vscale[1])
self.slice_box.resizeSlice(
new_slice_size=self.img_aview.shape[:-1], new_grid=mgrid
)
self.slice_box.setSlice(
self.img_aview[..., int(self.actual_slice)],
self.seeds_aview[..., self.actual_slice],
contours,
)
self.allow_select_slice = False
self.n_slices = self.img_aview.shape[2]
slider_val = self.n_slices - self.actual_slice
self.slider.setRange(1, self.n_slices)
self.slider.setValue(slider_val)
self.allow_select_slice = True
self.slider.label.setText("Slice: %d / %d" % (slider_val, self.n_slices))
self.view_label.setText("View size: %d x %d" % self.img_aview.shape[:-1])
self.adjustSize()
self.adjustSize()
def changeMask(self, val):
self.slice_box.setMaskPoints(self.mask_points_tab[val])
def changeContourMode(self, val):
self.slice_box.contour_mode = str(val)
self.slice_box.updateSlice()
def changeEraseMode(self, val):
self.slice_box.erase_mode = str(val)
def eraseVolume(self, pos, mode):
self.showStatus("Processing...")
xyz = np.array(pos + (self.actual_slice,))
p = np.zeros_like(xyz)
p[np.array(self.act_transposition)] = xyz
p = tuple(p)
if self.seeds[p] > 0:
if mode == "inside":
erase_reg(self.seeds, p, val=0)
elif mode == "outside":
erase_reg(self.seeds, p, val=-1)
idxs = np.where(self.seeds < 0)
self.seeds.fill(0)
self.seeds[idxs] = 1
if self.contours is None:
contours = None
else:
contours = self.contours_aview[..., self.actual_slice]
self.slice_box.setSlice(
self.img_aview[..., self.actual_slice],
self.seeds_aview[..., self.actual_slice],
contours,
)
self.showStatus("Done")
def set_image(self, img):
prev_shape = self.img_aview.shape
self.img = img
self.img_aview = self.img.transpose(self.act_transposition)
self.contours = None
self.contours_aview = None
self.seeds = np.zeros(self.img.shape, np.int8)
self.seeds_aview = self.seeds.transpose(self.act_transposition)
self.seeds_modified = False
self.n_slices = self.img_aview.shape[2]
if np.array_equal(self.img_aview.shape, prev_shape):
# do not reset actual slice position
pass
else:
for ii in VIEW_TABLE.keys():
self.last_view_position[ii] = 0
self.actual_slice = int(0)
vscale = self.voxel_scale[np.array(self.act_transposition)]
height = self.slice_box.height()
grid = height / float(self.img_aview.shape[1] * vscale[1])
mgrid = (grid * vscale[0], grid * vscale[1])
self.slice_box.resizeSlice(
new_slice_size=self.img_aview.shape[:-1], new_grid=mgrid
)
self.slice_box.setSlice(
self.img_aview[..., self.actual_slice],
self.seeds_aview[..., self.actual_slice],
None,
)
self.allow_select_slice = False
self.slider.setValue(self.actual_slice + 1)
self.slider.setRange(1, self.n_slices)
self.allow_select_slice = True
self.slider.label.setText(
"Slice: %d / %d" % (self.actual_slice + 1, self.n_slices)
)
self.view_label.setText("View size: %d x %d" % self.img_aview.shape[:-1])
self.selectSlice(self.actual_slice)
# self.slice_box.updateSlice()
def getCropBounds(self, return_nzs=False, flat=False):
nzs = self.seeds.nonzero()
cri = []
flag = True
for ii in range(3):
if nzs[ii].shape[0] == 0:
flag = False
break
smin, smax = np.min(nzs[ii]), np.max(nzs[ii])
if not (flat):
if smin == smax:
flag = False
break
cri.append((smin, smax))
if flag:
cri = np.array(cri)
out = []
offset = []
for jj, ii in enumerate(cri):
out.append(slice(ii[0], ii[1] + 1))
offset.append(ii[0])
if return_nzs:
return np.array(offset), tuple(out), nzs
else:
return np.array(offset), tuple(out)
else:
return None
def crop(self):
self.showStatus("Processing...")
crp = self.getCropBounds()
if crp is not None:
offset, cri = crp
crop = self.img[cri]
self.img = np.ascontiguousarray(crop)
self.offset += offset
self.showStatus("Done")
else:
self.showStatus("Region not selected!")
self.set_image(self.img)
def seg_to_background_seeds(self, event):
self.saveSliceSeeds()
self.seeds[self.seeds < 3] = 0
from PyQt5.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
self.seeds[
(self.contours == 1) & (self.seeds < 3)
] = self.BACKGROUND_NOMODEL_SEED_LABEL
self.contours[...] = 0
self.updateSlice()
def seg_to_foreground_seeds(self, event):
self.saveSliceSeeds()
self.seeds[self.seeds < 3] = 0
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
self.seeds[
(self.contours == 1) & (self.seeds < 3)
] = self.FOREGROUND_NOMODEL_SEED_LABEL
self.contours[...] = 0
self.updateSlice()
def saveload_seeds(self, event):
if self.seeds_copy is None:
self.seeds_copy = self.seeds.copy()
self.seeds[...] = 0
if self.contours is not None:
self.contours[:] = 0
# print "save"
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
self.btn_save.setText("Simple seeds")
else:
# from PyQt4.QtCore import pyqtRemoveInputHook
# pyqtRemoveInputHook()
# import ipdb; ipdb.set_trace()
self.seeds[self.seeds_copy > 0] = self.seeds_copy[self.seeds_copy > 0]
self.seeds_copy = None
self.btn_save.setText("Advanced seeds")
self.updateSlice()
def recalculate(self, event):
self.saveSliceSeeds()
if np.abs(np.min(self.seeds) -
|
np.max(self.seeds)
|
numpy.max
|
import cv2
import numpy as np
import os
from objects.bbox import BBox
from objects.image import Image
class BBoxList:
def __init__(self):
self.data = []
def __len__(self):
return len(self.data)
def reduce_to_classes(self, class_list):
new_data = []
for d in self.data:
if d.class_name in class_list:
new_data.append(d)
self.data = new_data
def append(self, bbox: BBox):
self.data.append(bbox)
def __getitem__(self, index):
return self.data[index]
def draw_on(self, img: Image, line_thickness:int=1, font_scale:int=0.3, line_height=10, pretty=True)->Image:
img = img.to_rgb()
line_type = cv2.LINE_AA if pretty else cv2.LINE_4
for bbox in self.data:
x1,y1,x2,y2 = bbox.to_int()
cv2.rectangle(img,(x1,y1),(x2,y2), (0,255,0), line_thickness, line_type)
cv2.putText(img, f"{bbox.class_name}, {bbox.score:.2f}" , (x1+2,y1+line_height), cv2.FONT_HERSHEY_SIMPLEX , font_scale, (235,255,235), line_thickness, line_type)
return Image.from_rgb_array(img)
def __repr__(self):
result = ""
for i in self.data:
result += str(i) + os.linesep
return result
class BBoxListFrames:
def __init__(self):
self.data = []
def __len__(self):
return len(self.data)
def append(self, bbox_list: BBoxList):
self.data.append(bbox_list)
def reduce_to_classes(self, class_list):
for d in self.data:
d.reduce_to_classes(class_list)
def __getitem__(self, index):
return self.data[index]
def __repr__(self):
counts = []
for bbox_list in self.data:
counts.append(len(bbox_list))
result = f"BBoxListFrames with {len(counts)} frames with average of {
|
np.mean(counts)
|
numpy.mean
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 15:12:49 2016
@author: uzivatel
"""
import numpy as np
import timeit
from multiprocessing import Pool, cpu_count
from functools import partial
from sys import platform
import scipy
from copy import deepcopy
from ..qch_functions import overlap_STO, dipole_STO, quadrupole_STO, norm_GTO
from ..positioningTools import fill_basis_transf_matrix
from .general import Coordinate
# nepocitat self.exct_spec[exct_i]['coeff_mat']=M_mat pokud neni nutne - zdrzuje
def _ao_over_line(Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,indx):
'''
This Function calculates row in overlap matrix
'''
# print('Start of parallel calculation witj process id:',os.getpid())
NAO=len(Coor_lin)
AO_over_row=np.zeros(NAO)
for ii in range(indx,NAO):
AO_over_row[ii]=overlap_STO(Coor_lin[indx],Coor_lin[ii],np.array(Coeffs_lin[indx]),np.array(Coeffs_lin[ii]),np.array(Exp_lin[indx]),np.array(Exp_lin[ii]),np.array(Orient_lin[indx]),np.array(Orient_lin[ii]))
# print(os.getpid())
return AO_over_row
def _dip_over_line(Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,indx):
'''
This Function calculates row in dipole matrix
'''
NAO=len(Coor_lin)
Dip_over_row=np.zeros((NAO,3))
for ii in range(indx,NAO):
Dip_over_row[ii,:]=dipole_STO(Coor_lin[indx],Coor_lin[ii],np.array(Coeffs_lin[indx]),np.array(Coeffs_lin[ii]),np.array(Exp_lin[indx]),np.array(Exp_lin[ii]),np.array(Orient_lin[indx]),np.array(Orient_lin[ii]))
return Dip_over_row
def _quad_over_line(Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,only_triangle,indx):
'''
This Function calculates row in quadrupole matrix
'''
NAO=len(Coor_lin)
Quad_over_row=np.zeros((NAO,6))
if only_triangle:
start=indx
else:
start=0
for ii in range(start,NAO):
Rik=Coor_lin[ii]-Coor_lin[indx]
R0=np.zeros(3)
Quad_over_row[ii,:]=quadrupole_STO(R0,Rik,np.array(Coeffs_lin[indx]),np.array(Coeffs_lin[ii]),np.array(Exp_lin[indx]),np.array(Exp_lin[ii]),np.array(Orient_lin[indx]),np.array(Orient_lin[ii]))
return Quad_over_row
class Atom:
''' Class containing atom type and index(position in structure)
type : string
Atom type e.g. 'C' or 'H'...
indx : integer
Position of atom in structure class. !Starting from 0!
'''
def __init__(self,typ,indx):
self.type=typ
self.indx=int(indx)
class AO:
''' Class containing all information about atomic orbitals
name : string
Name of the atomic basis e.g. 6-31G*,..
coeff : list of numpy.arrays of real
For every atomic orbital contains expansion coefficients of STO orbital
into GTO (more expained in Notes)
exp : list of numpy.arrays of real
For every atomic orbital contains exponents for GTO obitals in expansion
of STO (more explaind in Notes)
coor : Coordinate class - position units managed
Information about center of every atomic orbital. Units are coor.units.
Dimension of coor.value is (dimension Norbitals x 3)
type : list of string and integer (dimension Norbitals x 2)
Orbital types for every orbital (for example ``'s', 'p', 'd', '5d', 'f', '7f'``)
atom : list of Atom class (dimesion Norbitals)
For every orbital there is a list with atom information. (atom[i].indx
index of the atom in structure class, atom[i].type string with atom type)
nao : integer
Number of orbitals
nao_orient : integer
number of atomic orbitals with orientation (for 's' type orbital there
is only one orientation, for 'p' orbital there are 3 orientaions - x,y
and z and so on for oter orbitals) = total number of atomic orbital
basis functions
orient : list (dimension Norbitals)
For every atomic orbital type there is a list with possible atomic orbital
spatial orientations (e.g. one possible orientation could be for f orbital
[2,0,1] which correspond to X^2Z spatial orinetation or for d orbital [0,1,1]
correspond to YZ spatial orientation)
indx_orient : list (dimension Norbitals_orient)
For every spatialy oriented orbital there is a list with a number of atomic
orbitals to which this orientation corresponds, at the fisrt position and
orientation of the orbital at the second position. (e.g [2, [0, 1, 0]]
which means orientation in y direction of third orbital (numbering from 0)
which is p type orbital (summ of all numbers in orient. is 1))
overlap : numpy.array of real (dimension N_AO_orient x N_AO_orient)
Overlap matrix between AO: overlap[i,j]=<AO_i|AO_j>
dipole : dictionary
* **dipole['Dip_X']** = numpy.array of real (dimension N_AO_orient x N_AO_orient)
with dipole x coordinate in AO basis: dipole['Dip_X'][i,j]=<AO_i|x|AO_j>
* **dipole['Dip_Y']** = numpy.array of real (dimension N_AO_orient x N_AO_orient)
with dipole y coordinate in AO basis: dipole['Dip_Y'][i,j]=<AO_i|y|AO_j>
* **dipole['Dip_Z']** = numpy.array of real (dimension N_AO_orient x N_AO_orient)
with dipole z coordinate in AO basis: dipole['Dip_Z'][i,j]=<AO_i|z|AO_j>
grid : list of numpy arrays of float (dimension Nao x Grid_Nx x Grid_Ny x Grid_Nz)
Slater orbital basis evaluated on given grid
quadrupole : numpy.array of real (dimension 6 x N_AO_orient x N_AO_orient)
quadrupole components in AO basis:
* quadrupole[0,:,:] = quadrupole xx matrix <AO_i|x^2|AO_j>
* quadrupole[1,:,:] = quadrupole xy matrix <AO_i|xy|AO_j>
* quadrupole[2,:,:] = quadrupole xz matrix <AO_i|xz|AO_j>
* quadrupole[3,:,:] = quadrupole yy matrix <AO_i|yy|AO_j>
* quadrupole[4,:,:] = quadrupole yz matrix <AO_i|yz|AO_j>
* quadrupole[5,:,:] = quadrupole zz matrix <AO_i|zz|AO_j>
Functions
-----------
add_orbital :
Add atomic orbital including expansion coefficients into gaussian orbitals
coordinates, type, atom on which is centered.
rotate :
Rotate the atomic orbitals and all aditional quantities by specified
angles in radians in positive direction.
rotate_1 :
Inverse totation to rotate
move :
Moves the atomic orbitals and all aditional quantities along specified
vector
copy :
Create 1 to 1 copy of the atomic orbitals with all classes and types.
get_overlap :
Calculate overlap matrix between atomic orbitals
import_multiwfn_overlap :
Imports the overlap matrix from multiwfn output
get_dipole_matrix :
Calculate dipoles between each pair of atomic orbitals and outputs it
as matrix
get_quadrupole :
Calculate quadrupole moments between each pair of atomic orbitals and
outputs it as matrix
get_slater_ao_grid :
Evaluate slater atomic orbital on given grid
get_all_slater_grid
Evalueate all slater orbitals on given grid (create slater orbital basis
on given grid)
Notes
----------
Expansion of STO (slater orbital) into GTO (gaussian orbital) bassis
is defined as: STO=Sum(coef*GTO(r,exp)*NormGTO(r,exp)) where r is center of
the orbital (position of the corresponding atom)
'''
def __init__(self):
self.name='AO-basis'
self.coeff=[]
self.exp=[]
self.coor=None
self.type=[]
self.atom=[]
self.nao=0
self.nao_orient=0
self.orient=[]
self.indx_orient=[]
self.init=False
self.overlap=None
self.dipole=None
self.quadrupole=None
self.grid=None
def add_orbital(self,coeffs,exps,coor,orbit_type,atom_indx,atom_type):
""" Adds atomic orbital including all needed informations
Parameters
----------
coeffs : numpy array or list of floats
Expansion coefficients of the slater atomic orbital into gaussian
atomic orbitals
exps : numpy array or list of floats
Exponents of gaussian orbitals in expansion of the slater atomic
orbital.
coor : Coordinate class
Centers of atomic orbitals (position units managed)
orbit_type : string
Type of the orbital e.g. 's','d','5d'...
atom_indx : integer
index of atom on which orbital is centered
atom_type : string
Atom type on which orbital is cenetered e.g. 'C','H',...
"""
if type(atom_type)==list or type(atom_type)==np.ndarray:
if not self.init:
if type(coeffs)==np.ndarray:
self.coeff=list(coeffs) # it should be list of numpy arrays
elif type(coeffs)==list:
self.coeff=coeffs.copy()
else:
raise IOError('Imput expansion coefficients of AO should be list of numpy arrays or numpy array')
if type(exps)==np.ndarray:
self.exp=list(exps) # it should be list of numpy arrays
elif type(coeffs)==list:
self.exp=exps.copy()
else:
raise IOError('Imput expansion coefficients of AO should be list of numpy arrays or numpy array')
self.coor=Coordinate(coor) # assuming that we all arbital coordinates will be imputed in Bohrs
self.type=orbit_type
if type(atom_indx)==list:
for ii in range(len(atom_indx)):
self.atom.append(Atom(atom_type[ii],atom_indx[ii]))
else:
self.atom.append(Atom(atom_type,atom_indx))
self.nao=len(orbit_type)
for ii in range(len(self.type)):
orient=l_orient(self.type[ii])
self.orient.append(orient)
self.nao_orient+=len(orient)
for jj in range(len(orient)):
self.indx_orient.append([ii,orient[jj]])
self.init=True
else:
self.coor.add_coor(coor)
for ii in range(len(orbit_type)):
self.coeff.append(np.array(coeffs[ii],dtype='f8'))
self.exp.append(np.array(exps[ii],dtype='f8'))
self.type.append(orbit_type[ii])
self.atom.append(Atom(atom_type[ii],int(atom_indx[ii])))
self.nao+=1
orient=l_orient(orbit_type[ii])
self.orient.append(orient)
self.nao_orient+=len(orient)
for jj in range(len(orient)):
self.indx_orient.append([self.nao-1,orient[jj]])
else:
if not self.init:
self.coor=Coordinate(coor)
else:
self.coor.add_coor(coor)
self.coeff.append(np.array(coeffs,dtype='f8'))
self.exp.append(np.array(exps,dtype='f8'))
self.type.append(orbit_type)
self.atom.append(Atom(atom_type,atom_indx))
self.nao+=1
orient=l_orient(orbit_type[0])
self.orient.append(orient)
self.nao_orient+=len(orient)
for jj in range(len(orient)):
self.indx_orient.append([self.nao-1,orient[jj]])
self.init=True
def get_overlap(self,nt=0,verbose=False):
""" Calculate overlap matrix between atomic orbitals
Parameters
----------
nt : integer (optional init = 0)
Specifies how many cores should be used for the calculation.
Secial cases:
``nt=0`` all available cores are used for the calculation.
``nt=1`` serial calculation is performed.
verbose : logical (optional init = False)
If ``True`` information about time needed for overlap calculation
will be printed
Notes
---------
Overlap matrix is stored in:\n
**self.overlap** \n
as numpy array of float dimension (Nao_orient x Nao_orient)
"""
# Toto by chtelo urcite zefektivnit pomoci np
if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
typ='paralell'
elif platform=='win32' or nt==1:
typ='seriall'
else:
typ='seriall_old'
if typ=='seriall' or typ=='paralell':
SS=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
start_time = timeit.default_timer()
''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# prepare imput
Coor_lin=np.zeros((self.nao_orient,3))
Coeffs_lin=[]
Exp_lin=[]
Orient_lin=[]
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
Coor_lin[counter1]=self.coor._value[ii]
Coeffs_lin.append(self.coeff[ii])
Exp_lin.append(self.exp[ii])
Orient_lin.append(self.orient[ii][jj])
counter1+=1
ao_over_line_partial = partial(_ao_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin)
# Only parameter of this function is number of row whih is calculated
elif typ=='seriall_old':
SS=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
counter1=0
start_time = timeit.default_timer()
percent=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
counter2=0
for kk in range(self.nao):
for ll in range(len(self.orient[kk])):
if counter1>=counter2:
SS[counter1,counter2] = overlap_STO(self.coor._value[ii],self.coor._value[kk],self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
counter2 += 1
counter1 += 1
elapsed = timeit.default_timer() - start_time
if elapsed>60.0:
if percent!=(counter1*10//self.nao_orient)*10:
if percent==0:
print('Overlap matrix calculation progres:')
percent=(counter1*10//self.nao_orient)*10
print(percent,'% ',sep="",end="")
if verbose:
print('Elapsed time for serial overlap matrix allocation:',elapsed)
for ii in range(self.nao_orient):
for jj in range(ii+1,self.nao_orient):
SS[ii,jj]=SS[jj,ii]
if verbose:
print(' ')
self.overlap=np.copy(SS)
if typ=='paralell':
''' Parallel part '''
# print('Prepairing parallel calculation')
if nt>0:
pool = Pool(processes=nt)
else:
pool = Pool(processes=cpu_count())
index_list=range(self.nao_orient)
SS= np.array(pool.map(ao_over_line_partial,index_list))
pool.close() # ATTENTION HERE
pool.join()
elif typ=='seriall':
index_list=range(self.nao_orient)
SS=np.zeros((self.nao_orient,self.nao_orient))
''' Seriall part '''
for ii in range(self.nao_orient):
SS[ii,:]=ao_over_line_partial(index_list[ii])
''' Fill the lower triangle of overlap matrix'''
for ii in range(self.nao_orient):
for jj in range(ii):
SS[ii,jj]=SS[jj,ii]
elapsed = timeit.default_timer() - start_time
if verbose:
if typ=='paralell':
print('Elapsed time for parallel overlap matrix allocation:',elapsed)
elif typ=='seriall':
print('Elapsed time for seriall overlap matrix allocation:',elapsed)
self.overlap=np.copy(SS)
# TODO: include multiwfn script for generation of overlap matrix
def import_multiwfn_overlap(self,filename):
""" Import overlap matrix from multiwfn output
Parameters
----------
filename : string
Name of the input file including the path if needed. (output file
from multiwfn calculation)
Notes
---------
Overlap matrix is stored in:\n
**self.overlap** \n
as numpy array of float dimension (Nao_orient x Nao_orient)
"""
fid = open(filename,'r') # Open the file
flines = fid.readlines() # Read the WHOLE file into RAM
fid.close()
counter=0
Norb=self.nao_orient
SS_inp=np.zeros((Norb,Norb))
for jj in range(Norb//5+1):
for ii in range(5*jj,Norb+1):
if ii!=5*jj:
line = flines[counter]
thisline = line.split()
for kk in range(5):
if kk+5*jj+1<=ii:
if 'D' in thisline[kk+1]:
SS_inp[ii-1,kk+5*jj]=float(thisline[kk+1].replace('D', 'e'))
else:
SS_inp[ii-1,kk+5*jj]=float(thisline[kk+1][:-4]+'e'+thisline[kk+1][-4:])
#print(thisline[kk+1],SS_inp[ii-1,kk+5*jj])
#print(5*jj,ii-1,flines[counter])
counter+=1
for ii in range(Norb):
for jj in range(ii+1,Norb):
SS_inp[ii,jj]=SS_inp[jj,ii]
self.overlap=SS_inp
def get_dipole_matrix(self,nt=0,verbose=False):
""" Calculate dipole matrix between atomic orbitals
Parameters
----------
nt : integer (optional init = 0)
Specifies how many cores should be used for the calculation.
Secial cases:
``nt=0`` all available cores are used for the calculation.
``nt=1`` serial calculation is performed.
verbose : logical (optional init = False)
If ``True`` information about time needed for overlap calculation
will be printed
Notes
---------
Dipole matrix is stored in:\n
**self.dipole** \n
as dictionary of numpy arrays of float dimension (Nao_orient x Nao_orient).
Dictionary has 3 keys: 'Dip_X', 'Dip_Y', 'Dip_Z'. More information can
be found in class documentation
"""
# select platform
if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
typ='paralell'
elif platform=='win32' or nt==1:
typ='seriall'
else:
typ='seriall_old'
start_time = timeit.default_timer()
SS_dipX=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
SS_dipY=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
SS_dipZ=np.zeros((self.nao_orient,self.nao_orient),dtype='f8')
if typ=='seriall' or typ=='paralell':
''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# prepare imput
Coor_lin=np.zeros((self.nao_orient,3))
Coeffs_lin=[]
Exp_lin=[]
Orient_lin=[]
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
Coor_lin[counter1]=self.coor._value[ii]
Coeffs_lin.append(self.coeff[ii])
Exp_lin.append(self.exp[ii])
Orient_lin.append(self.orient[ii][jj])
counter1+=1
dip_over_line_partial = partial(_dip_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin)
# Only parameter of this function is number of row whih is calculated
else:
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
counter2=0
for kk in range(self.nao):
for ll in range(len(self.orient[kk])):
if counter1>=counter2:
dipole=dipole_STO(self.coor._value[ii],self.coor._value[kk],self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
SS_dipX[counter1,counter2] = dipole[0]
SS_dipY[counter1,counter2] = dipole[1]
SS_dipZ[counter1,counter2] = dipole[2]
counter2 += 1
counter1 += 1
for ii in range(self.nao_orient):
for jj in range(ii+1,self.nao_orient):
SS_dipX[ii,jj]=SS_dipX[jj,ii]
SS_dipY[ii,jj]=SS_dipY[jj,ii]
SS_dipZ[ii,jj]=SS_dipZ[jj,ii]
elapsed = timeit.default_timer() - start_time
if verbose:
print('Elapsed time slater dipole matrix allocation', elapsed)
self.dipole={}
self.dipole['Dip_X']=np.copy(SS_dipX)
self.dipole['Dip_Y']=np.copy(SS_dipY)
self.dipole['Dip_Z']=np.copy(SS_dipZ)
if typ=='paralell':
''' Parallel part '''
# print('Prepairing parallel calculation')
if nt>0:
pool = Pool(processes=nt)
else:
pool = Pool(processes=cpu_count())
index_list=range(self.nao_orient)
DipMat= np.array(pool.map(dip_over_line_partial,index_list))
pool.close() # ATTENTION HERE
pool.join()
elif typ=='seriall':
index_list=range(self.nao_orient)
DipMat=np.zeros((self.nao_orient,self.nao_orient,3))
''' Seriall part '''
for ii in range(self.nao_orient):
DipMat[ii,:]=dip_over_line_partial(index_list[ii])
if typ=='seriall' or typ=='paralell':
''' Fill the lower triangle of overlap matrix'''
for ii in range(self.nao_orient):
for jj in range(ii):
DipMat[ii,jj,:]=DipMat[jj,ii,:]
elapsed = timeit.default_timer() - start_time
if verbose:
if typ=='paralell':
print('Elapsed time for parallel slater dipole matrix allocation:',elapsed)
elif typ=='seriall':
print('Elapsed time for seriall slater dipole matrix allocation:',elapsed)
if typ=='seriall' or typ=='paralell':
self.dipole={}
self.dipole['Dip_X']=np.zeros((self.nao_orient,self.nao_orient))
self.dipole['Dip_Y']=np.zeros((self.nao_orient,self.nao_orient))
self.dipole['Dip_Z']=np.zeros((self.nao_orient,self.nao_orient))
self.dipole['Dip_X'][:,:]=np.copy(DipMat[:,:,0])
self.dipole['Dip_Y'][:,:]=np.copy(DipMat[:,:,1])
self.dipole['Dip_Z'][:,:]=np.copy(DipMat[:,:,2])
def get_quadrupole(self,nt=0,verbose=False):
""" Calculate quadrupole matrix between atomic orbitals
Parameters
----------
nt : integer (optional init = 0)
Specifies how many cores should be used for the calculation.
Secial cases:
``nt=0`` all available cores are used for the calculation.
``nt=1`` serial calculation is performed.
verbose : logical (optional init = False)
If ``True`` information about time needed for overlap calculation
will be printed
Notes
---------
Dipole matrix is stored in:\n
**self.quadrupole** \n
as numpy array of float dimension (6 x Nao_orient x Nao_orient) and
ordering of quadrupole moments is: xx, xy, xz, yy, yz, zz \n \n
quadrupoles are defined as ``Qij(mu,nu) = \int{AO_mu(r+R_mu)*ri*rj*AO_nu(r+R_mu)}``\n
The AO_mu is shifted to zero making the quadrupoles independent to coordinate shifts
"""
QuadMat=np.zeros((6,self.nao_orient,self.nao_orient),dtype='f8')
start_time = timeit.default_timer()
do_faster=False
if (self.dipole is not None) and (self.overlap is not None):
do_faster=True
# choose platform for calculation
if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
typ='paralell'
elif platform=='win32' or nt==1:
typ='seriall'
else:
typ='seriall_old'
if typ=='seriall' or typ=='paralell':
''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# prepare imput
Coor_lin=np.zeros((self.nao_orient,3))
Coeffs_lin=[]
Exp_lin=[]
Orient_lin=[]
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
Coor_lin[counter1]=self.coor._value[ii]
Coeffs_lin.append(self.coeff[ii])
Exp_lin.append(self.exp[ii])
Orient_lin.append(self.orient[ii][jj])
counter1+=1
quad_over_line_partial = partial(_quad_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,do_faster)
# Only parameter of this function is number of row whih is calculated
else:
counter1=0
for ii in range(self.nao):
for jj in range(len(self.orient[ii])):
counter2=0
for kk in range(self.nao):
for ll in range(len(self.orient[kk])):
Rik=self.coor._value[kk]-self.coor._value[ii]
R0=np.zeros(3)
QuadMat[:,counter1,counter2]=quadrupole_STO(R0,Rik,self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
counter2 += 1
counter1 += 1
elapsed = timeit.default_timer() - start_time
print('Elapsed time for slater quadrupole matrix allocation:',elapsed)
if typ=='paralell':
''' Parallel part '''
# print('Prepairing parallel calculation')
if nt>0:
pool = Pool(processes=nt)
else:
pool = Pool(processes=cpu_count())
index_list=range(self.nao_orient)
QuadMat_tmp= np.array(pool.map(quad_over_line_partial,index_list))
pool.close() # ATTENTION HERE
pool.join()
elif typ=='seriall':
index_list=range(self.nao_orient)
''' Seriall part '''
for ii in range(self.nao_orient):
QuadMat[:,ii,:]=np.swapaxes(quad_over_line_partial(index_list[ii]),0,1)
''' Fill the lower triangle of overlap matrix'''
if typ=='seriall' and do_faster:
for ii in range(self.nao_orient):
for jj in range(ii):
counter=0
Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
Rj=self.coor._value[self.indx_orient[jj][0]]
Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
SSji=self.overlap[jj,ii]
for kk in range(3):
for ll in range(kk,3):
QuadMat[counter,ii,jj]=QuadMat[counter,jj,ii]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
counter+=1
if typ=='paralell' and do_faster:
for ii in range(self.nao_orient):
for jj in range(ii):
counter=0
Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
Rj=self.coor._value[self.indx_orient[jj][0]]
Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
SSji=self.overlap[jj,ii]
for kk in range(3):
for ll in range(kk,3):
QuadMat_tmp[ii,jj,counter]=QuadMat_tmp[jj,ii,counter]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
counter+=1
QuadMat=np.copy(np.swapaxes(QuadMat_tmp,0,2))
QuadMat=np.copy(np.swapaxes(QuadMat,1,2))
elapsed = timeit.default_timer() - start_time
if verbose:
if typ=='paralell':
print('Elapsed time for parallel slater quadrupole matrix allocation:',elapsed)
elif typ=='seriall':
print('Elapsed time for seriall slater quadrupoele matrix allocation:',elapsed)
self.quadrupole=np.copy(QuadMat)
# def get_quadrupole_old(self,nt=0,verbose=False):
# """ Calculate quadrupole matrix between atomic orbitals
#
# Parameters
# ----------
# nt : integer (optional init = 0)
# Specifies how many cores should be used for the calculation.
# Secial cases:
# ``nt=0`` all available cores are used for the calculation.
# ``nt=1`` serial calculation is performed.
# verbose : logical (optional init = False)
# If ``True`` information about time needed for overlap calculation
# will be printed
#
# Notes
# ---------
# Dipole matrix is stored in:\n
# **self.quadrupole** \n
# as numpy array of float dimension (6 x Nao_orient x Nao_orient) and
# ordering of quadrupole moments is: xx, xy, xz, yy, yz, zz
#
# """
#
# QuadMat=np.zeros((6,self.nao_orient,self.nao_orient),dtype='f8')
# start_time = timeit.default_timer()
#
# do_faster=False
# if (self.dipole is not None) and (self.overlap is not None):
# do_faster=True
#
# # choose platform for calculation
# if (platform=='cygwin' or platform=="linux" or platform == "linux2") and nt!=1 and nt>=0:
# typ='paralell'
# elif platform=='win32' or nt==1:
# typ='seriall'
# else:
# typ='seriall_old'
#
# if typ=='seriall' or typ=='paralell':
# ''' Convert all imput parameters into matrix which has dimension Nao_orient '''
# # prepare imput
# Coor_lin=np.zeros((self.nao_orient,3))
# Coeffs_lin=[]
# Exp_lin=[]
# Orient_lin=[]
# counter1=0
# for ii in range(self.nao):
# for jj in range(len(self.orient[ii])):
# Coor_lin[counter1]=self.coor._value[ii]
# Coeffs_lin.append(self.coeff[ii])
# Exp_lin.append(self.exp[ii])
# Orient_lin.append(self.orient[ii][jj])
# counter1+=1
#
# quad_over_line_partial = partial(_quad_over_line, Coor_lin,Coeffs_lin,Exp_lin,Orient_lin,do_faster)
# # Only parameter of this function is number of row whih is calculated
# else:
# counter1=0
# for ii in range(self.nao):
# for jj in range(len(self.orient[ii])):
# counter2=0
# for kk in range(self.nao):
# for ll in range(len(self.orient[kk])):
# Rik=self.coor._value[kk]-self.coor._value[ii]
# R0=np.zeros(3)
# QuadMat[:,counter1,counter2]=quadrupole_STO(R0,Rik,self.coeff[ii],self.coeff[kk],self.exp[ii],self.exp[kk],self.orient[ii][jj],self.orient[kk][ll])
# counter2 += 1
# counter1 += 1
#
# elapsed = timeit.default_timer() - start_time
# print('Elapsed time for slater quadrupole matrix allocation:',elapsed)
#
# if typ=='paralell':
# ''' Parallel part '''
## print('Prepairing parallel calculation')
# if nt>0:
# pool = Pool(processes=nt)
# else:
# pool = Pool(processes=cpu_count())
# index_list=range(self.nao_orient)
# QuadMat_tmp= np.array(pool.map(quad_over_line_partial,index_list))
# pool.close() # ATTENTION HERE
# pool.join()
# elif typ=='seriall':
# index_list=range(self.nao_orient)
# ''' Seriall part '''
# for ii in range(self.nao_orient):
# QuadMat[:,ii,:]=np.swapaxes(quad_over_line_partial(index_list[ii]),0,1)
#
# ''' Fill the lower triangle of overlap matrix'''
# if typ=='seriall' and do_faster:
# for ii in range(self.nao_orient):
# for jj in range(ii):
# counter=0
# Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
# Rj=self.coor._value[self.indx_orient[jj][0]]
# Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
# SSji=self.overlap[jj,ii]
# for kk in range(3):
# for ll in range(kk,3):
# QuadMat[counter,ii,jj]=QuadMat[counter,jj,ii]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
# counter+=1
# if typ=='paralell' and do_faster:
# for ii in range(self.nao_orient):
# for jj in range(ii):
# counter=0
# Rji=self.coor._value[self.indx_orient[ii][0]]-self.coor._value[self.indx_orient[jj][0]]
# Rj=self.coor._value[self.indx_orient[jj][0]]
# Dji=np.array([self.dipole['Dip_X'][jj,ii],self.dipole['Dip_Y'][jj,ii],self.dipole['Dip_Z'][jj,ii]])
# SSji=self.overlap[jj,ii]
# for kk in range(3):
# for ll in range(kk,3):
# QuadMat_tmp[ii,jj,counter]=QuadMat_tmp[jj,ii,counter]-Rji[kk]*Dji[ll]-Rji[ll]*Dji[kk]+(Rj[kk]*Rji[ll]+Rj[ll]*Rji[kk]+Rji[kk]*Rji[ll])*SSji
# counter+=1
# QuadMat=np.copy(np.swapaxes(QuadMat_tmp,0,2))
# QuadMat=np.copy(np.swapaxes(QuadMat,1,2))
#
# elapsed = timeit.default_timer() - start_time
#
# if verbose:
# if typ=='paralell':
# print('Elapsed time for parallel slater quadrupole matrix allocation:',elapsed)
# elif typ=='seriall':
# print('Elapsed time for seriall slater quadrupoele matrix allocation:',elapsed)
#
# self.quadrupole=np.copy(QuadMat)
def get_slater_ao_grid(self,grid,indx,keep_grid=False,new_grid=True): # Jediny je spravne se spravnou normalizaci
""" Evaluate single slater orbital on given grid
Parameters
----------
grid : Grid class
Information about grid on which slater atomic orbital is evaluated.
indx :
Index of atomic orbital whic is evaluated (position in indx_orient)
keep_grid : logical (optional init = False)
If ``True`` local grid (dependent on orbital center) is kept as
global internal variable in order to avoid recalculation of the
grid for calculation of more orientations of the same orbital.
new_grid : logical (optional init = True)
If ``True`` local grid (dependent on orbital center) is recalculated
and the old one is overwriten. It is needed if local grid for orbital
with different center was previously saved.
Returns
---------
slater_ao_tmp : numpy array of float (dimension Grid_Nx x Grid_Ny x Grid_Nz)
Values of slater orbital on grid points defined by grid.
"""
slater_ao_tmp=np.zeros(np.shape(grid.X))
ii=self.indx_orient[indx][0]
# print(indx,'/',mol.ao_spec['Nao_orient'])
if new_grid:
global X_grid_loc,Y_grid_loc,Z_grid_loc,RR_grid_loc
X_grid_loc=np.add(grid.X,-self.coor._value[ii][0]) # posunu grid vzdy tak abych dostal centrum orbitalu do nuly.
Y_grid_loc=np.add(grid.Y,-self.coor._value[ii][1])
Z_grid_loc=np.add(grid.Z,-self.coor._value[ii][2])
RR_grid_loc=np.square(X_grid_loc)+np.square(Y_grid_loc)+np.square(Z_grid_loc)
# Vytvorena souradnicova sit a vsechny souradnice rozlozeny na grid
# Pro kazdou orientaci pouziji ruzny slateruv atomovy orbital kvuli ruzne normalizaci gaussovskych orbitalu
# Vypocet slaterova orbitalu na gridu pro AO=ii a orientaci ao_comb[jj+index]
if self.type[ii][0] in ['s','p','d','f','5d']:
slater_ao=np.zeros(np.shape(grid.X))
for kk in range(len(self.coeff[ii])):
coef=self.coeff[ii][kk]
exp=self.exp[ii][kk]
r_ao= self.coor._value[ii]
norm=norm_GTO(r_ao,exp,self.indx_orient[indx][1])
c=coef*norm
slater_ao += np.multiply(c,np.exp(np.multiply(-exp,RR_grid_loc)))
else:
raise IOError('Supported orbitals are so far only s,p,d,5d,f orbitals')
#slateruv orbital vytvoren
if self.type[ii][0] in ['s','p','d','f']:
m=self.indx_orient[indx][1][0]
n=self.indx_orient[indx][1][1]
o=self.indx_orient[indx][1][2]
slater_ao_tmp=np.copy(slater_ao)
if m!=0:
slater_ao_tmp=np.multiply(np.power(X_grid_loc,m),slater_ao_tmp)
if n!=0:
slater_ao_tmp=np.multiply(np.power(Y_grid_loc,n),slater_ao_tmp)
if o!=0:
slater_ao_tmp=np.multiply(np.power(Z_grid_loc,o),slater_ao_tmp)
elif self.type[ii][0]=='5d':
orient=self.indx_orient[indx][1]
if orient[0]==(-2):
## 3Z^2-R^2
slater_ao_tmp=np.copy(slater_ao)
slater_ao_tmp=np.multiply(3,np.multiply(
|
np.power(Z_grid_loc,2)
|
numpy.power
|
# -*- coding: utf-8 -*-
from datetime import datetime
from io import StringIO
import re
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, option_context
from pandas.util import testing as tm
import pandas.io.formats.format as fmt
lorem_ipsum = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod"
" tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim"
" veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex"
" ea commodo consequat. Duis aute irure dolor in reprehenderit in"
" voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur"
" sint occaecat cupidatat non proident, sunt in culpa qui officia"
" deserunt mollit anim id est laborum.")
def expected_html(datapath, name):
"""
Read HTML file from formats data directory.
Parameters
----------
datapath : pytest fixture
The datapath fixture injected into a test by pytest.
name : str
The name of the HTML file without the suffix.
Returns
-------
str : contents of HTML file.
"""
filename = '.'.join([name, 'html'])
filepath = datapath('io', 'formats', 'data', 'html', filename)
with open(filepath, encoding='utf-8') as f:
html = f.read()
return html.rstrip()
@pytest.fixture(params=['mixed', 'empty'])
def biggie_df_fixture(request):
"""Fixture for a big mixed Dataframe and an empty Dataframe"""
if request.param == 'mixed':
df = DataFrame({'A': np.random.randn(200),
'B': tm.makeStringIndex(200)},
index=lrange(200))
df.loc[:20, 'A'] = np.nan
df.loc[:20, 'B'] = np.nan
return df
elif request.param == 'empty':
df = DataFrame(index=np.arange(200))
return df
@pytest.fixture(params=fmt._VALID_JUSTIFY_PARAMETERS)
def justify(request):
return request.param
@pytest.mark.parametrize('col_space', [30, 50])
def test_to_html_with_col_space(col_space):
df = DataFrame(np.random.random(size=(1, 3)))
# check that col_space affects HTML generation
# and be very brittle about it.
result = df.to_html(col_space=col_space)
hdrs = [x for x in result.split(r"\n") if re.search(r"<th[>\s]", x)]
assert len(hdrs) > 0
for h in hdrs:
assert "min-width" in h
assert str(col_space) in h
def test_to_html_with_empty_string_label():
# GH 3547, to_html regards empty string labels as repeated labels
data = {'c1': ['a', 'b'], 'c2': ['a', ''], 'data': [1, 2]}
df = DataFrame(data).set_index(['c1', 'c2'])
result = df.to_html()
assert "rowspan" not in result
@pytest.mark.parametrize('df,expected', [
(DataFrame({'\u03c3': np.arange(10.)}), 'unicode_1'),
(DataFrame({'A': ['\u03c3']}), 'unicode_2')
])
def test_to_html_unicode(df, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html()
assert result == expected
def test_to_html_decimal(datapath):
# GH 12031
df = DataFrame({'A': [6.0, 3.1, 2.2]})
result = df.to_html(decimal=',')
expected = expected_html(datapath, 'gh12031_expected_output')
assert result == expected
@pytest.mark.parametrize('kwargs,string,expected', [
(dict(), "<type 'str'>", 'escaped'),
(dict(escape=False), "<b>bold</b>", 'escape_disabled')
])
def test_to_html_escaped(kwargs, string, expected, datapath):
a = 'str<ing1 &'
b = 'stri>ng2 &'
test_dict = {'co<l1': {a: string,
b: string},
'co>l2': {a: string,
b: string}}
result = DataFrame(test_dict).to_html(**kwargs)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('index_is_named', [True, False])
def test_to_html_multiindex_index_false(index_is_named, datapath):
# GH 8452
df = DataFrame({
'a': range(2),
'b': range(3, 5),
'c': range(5, 7),
'd': range(3, 5)
})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
if index_is_named:
df.index = Index(df.index.values, name='idx')
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh8452_expected_output')
assert result == expected
@pytest.mark.parametrize('multi_sparse,expected', [
(False, 'multiindex_sparsify_false_multi_sparse_1'),
(False, 'multiindex_sparsify_false_multi_sparse_2'),
(True, 'multiindex_sparsify_1'),
(True, 'multiindex_sparsify_2')
])
def test_to_html_multiindex_sparsify(multi_sparse, expected, datapath):
index = MultiIndex.from_arrays([[0, 0, 1, 1], [0, 1, 0, 1]],
names=['foo', None])
df = DataFrame([[0, 1], [2, 3], [4, 5], [6, 7]], index=index)
if expected.endswith('2'):
df.columns = index[::2]
with option_context('display.multi_sparse', multi_sparse):
result = df.to_html()
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('max_rows,expected', [
(60, 'gh14882_expected_output_1'),
# Test that ... appears in a middle level
(56, 'gh14882_expected_output_2')
])
def test_to_html_multiindex_odd_even_truncate(max_rows, expected, datapath):
# GH 14882 - Issue on truncation with odd length DataFrame
index = MultiIndex.from_product([[100, 200, 300],
[10, 20, 30],
[1, 2, 3, 4, 5, 6, 7]],
names=['a', 'b', 'c'])
df = DataFrame({'n': range(len(index))}, index=index)
result = df.to_html(max_rows=max_rows)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('df,formatters,expected', [
(DataFrame(
[[0, 1], [2, 3], [4, 5], [6, 7]],
columns=['foo', None], index=lrange(4)),
{'__index__': lambda x: 'abcd' [x]},
'index_formatter'),
(DataFrame(
{'months': [datetime(2016, 1, 1), datetime(2016, 2, 2)]}),
{'months': lambda x: x.strftime('%Y-%m')},
'datetime64_monthformatter'),
(DataFrame({'hod': pd.to_datetime(['10:10:10.100', '12:12:12.120'],
format='%H:%M:%S.%f')}),
{'hod': lambda x: x.strftime('%H:%M')},
'datetime64_hourformatter')
])
def test_to_html_formatters(df, formatters, expected, datapath):
expected = expected_html(datapath, expected)
result = df.to_html(formatters=formatters)
assert result == expected
def test_to_html_regression_GH6098():
df = DataFrame({
'clé1': ['a', 'a', 'b', 'b', 'a'],
'clé2': ['1er', '2ème', '1er', '2ème', '1er'],
'données1': np.random.randn(5),
'données2': np.random.randn(5)})
# it works
df.pivot_table(index=['clé1'], columns=['clé2'])._repr_html_()
def test_to_html_truncate(datapath):
index = pd.date_range(start='20010101', freq='D', periods=20)
df = DataFrame(index=index, columns=range(20))
result = df.to_html(max_rows=8, max_cols=4)
expected = expected_html(datapath, 'truncate')
assert result == expected
@pytest.mark.parametrize('sparsify,expected', [
(True, 'truncate_multi_index'),
(False, 'truncate_multi_index_sparse_off')
])
def test_to_html_truncate_multi_index(sparsify, expected, datapath):
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
df = DataFrame(index=arrays, columns=arrays)
result = df.to_html(max_rows=7, max_cols=7, sparsify=sparsify)
expected = expected_html(datapath, expected)
assert result == expected
@pytest.mark.parametrize('option,result,expected', [
(None, lambda df: df.to_html(), '1'),
(None, lambda df: df.to_html(border=0), '0'),
(0, lambda df: df.to_html(), '0'),
(0, lambda df: df._repr_html_(), '0'),
])
def test_to_html_border(option, result, expected):
df = DataFrame({'A': [1, 2]})
if option is None:
result = result(df)
else:
with option_context('display.html.border', option):
result = result(df)
expected = 'border="{}"'.format(expected)
assert expected in result
def test_display_option_warning():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.options.html.border
@pytest.mark.parametrize('biggie_df_fixture', ['mixed'], indirect=True)
def test_to_html(biggie_df_fixture):
# TODO: split this test
df = biggie_df_fixture
s = df.to_html()
buf = StringIO()
retval = df.to_html(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
df.to_html(columns=['B', 'A'], col_space=17)
df.to_html(columns=['B', 'A'],
formatters={'A': lambda x: '{x:.1f}'.format(x=x)})
df.to_html(columns=['B', 'A'], float_format=str)
df.to_html(columns=['B', 'A'], col_space=12, float_format=str)
@pytest.mark.parametrize('biggie_df_fixture', ['empty'], indirect=True)
def test_to_html_empty_dataframe(biggie_df_fixture):
df = biggie_df_fixture
df.to_html()
def test_to_html_filename(biggie_df_fixture, tmpdir):
df = biggie_df_fixture
expected = df.to_html()
path = tmpdir.join('test.html')
df.to_html(path)
result = path.read()
assert result == expected
def test_to_html_with_no_bold():
df = DataFrame({'x': np.random.randn(5)})
html = df.to_html(bold_rows=False)
result = html[html.find("</thead>")]
assert '<strong' not in result
def test_to_html_columns_arg():
df = DataFrame(tm.getSeriesData())
result = df.to_html(columns=['A'])
assert '<th>B</th>' not in result
@pytest.mark.parametrize('columns,justify,expected', [
(MultiIndex.from_tuples(
list(zip(np.arange(2).repeat(2), np.mod(lrange(4), 2))),
names=['CL0', 'CL1']),
'left',
'multiindex_1'),
(MultiIndex.from_tuples(
list(zip(range(4), np.mod(lrange(4), 2)))),
'right',
'multiindex_2')
])
def test_to_html_multiindex(columns, justify, expected, datapath):
df = DataFrame([list('abcd'), list('efgh')], columns=columns)
result = df.to_html(justify=justify)
expected = expected_html(datapath, expected)
assert result == expected
def test_to_html_justify(justify, datapath):
df = DataFrame({'A': [6, 30000, 2],
'B': [1, 2, 70000],
'C': [223442, 0, 1]},
columns=['A', 'B', 'C'])
result = df.to_html(justify=justify)
expected = expected_html(datapath, 'justify').format(justify=justify)
assert result == expected
@pytest.mark.parametrize("justify", ["super-right", "small-left",
"noinherit", "tiny", "pandas"])
def test_to_html_invalid_justify(justify):
# GH 17527
df = DataFrame()
msg = "Invalid value for justify parameter"
with pytest.raises(ValueError, match=msg):
df.to_html(justify=justify)
def test_to_html_index(datapath):
# TODO: split this test
index = ['foo', 'bar', 'baz']
df = DataFrame({'A': [1, 2, 3],
'B': [1.2, 3.4, 5.6],
'C': ['one', 'two', np.nan]},
columns=['A', 'B', 'C'],
index=index)
expected_with_index = expected_html(datapath, 'index_1')
assert df.to_html() == expected_with_index
expected_without_index = expected_html(datapath, 'index_2')
result = df.to_html(index=False)
for i in index:
assert i not in result
assert result == expected_without_index
df.index = Index(['foo', 'bar', 'baz'], name='idx')
expected_with_index = expected_html(datapath, 'index_3')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
tuples = [('foo', 'car'), ('foo', 'bike'), ('bar', 'car')]
df.index = MultiIndex.from_tuples(tuples)
expected_with_index = expected_html(datapath, 'index_4')
assert df.to_html() == expected_with_index
result = df.to_html(index=False)
for i in ['foo', 'bar', 'car', 'bike']:
assert i not in result
# must be the same result as normal index
assert result == expected_without_index
df.index = MultiIndex.from_tuples(tuples, names=['idx1', 'idx2'])
expected_with_index = expected_html(datapath, 'index_5')
assert df.to_html() == expected_with_index
assert df.to_html(index=False) == expected_without_index
@pytest.mark.parametrize('classes', [
"sortable draggable",
["sortable", "draggable"]
])
def test_to_html_with_classes(classes, datapath):
df = DataFrame()
expected = expected_html(datapath, 'with_classes')
result = df.to_html(classes=classes)
assert result == expected
def test_to_html_no_index_max_rows(datapath):
# GH 14998
df = DataFrame({"A": [1, 2, 3, 4]})
result = df.to_html(index=False, max_rows=1)
expected = expected_html(datapath, 'gh14998_expected_output')
assert result == expected
def test_to_html_multiindex_max_cols(datapath):
# GH 6131
index = MultiIndex(levels=[['ba', 'bb', 'bc'], ['ca', 'cb', 'cc']],
codes=[[0, 1, 2], [0, 1, 2]],
names=['b', 'c'])
columns = MultiIndex(levels=[['d'], ['aa', 'ab', 'ac']],
codes=[[0, 0, 0], [0, 1, 2]],
names=[None, 'a'])
data = np.array(
[[1., np.nan, np.nan], [np.nan, 2., np.nan], [np.nan, np.nan, 3.]])
df = DataFrame(data, index, columns)
result = df.to_html(max_cols=2)
expected = expected_html(datapath, 'gh6131_expected_output')
assert result == expected
def test_to_html_multi_indexes_index_false(datapath):
# GH 22579
df = DataFrame({'a': range(10), 'b': range(10, 20), 'c': range(10, 20),
'd': range(10, 20)})
df.columns = MultiIndex.from_product([['a', 'b'], ['c', 'd']])
df.index = MultiIndex.from_product([['a', 'b'],
['c', 'd', 'e', 'f', 'g']])
result = df.to_html(index=False)
expected = expected_html(datapath, 'gh22579_expected_output')
assert result == expected
@pytest.mark.parametrize('index_names', [True, False])
@pytest.mark.parametrize('header', [True, False])
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('column_index, column_type', [
(Index([0, 1]), 'unnamed_standard'),
(Index([0, 1], name='columns.name'), 'named_standard'),
(MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'),
(MultiIndex.from_product(
[['a'], ['b', 'c']], names=['columns.name.0',
'columns.name.1']), 'named_multi')
])
@pytest.mark.parametrize('row_index, row_type', [
(Index([0, 1]), 'unnamed_standard'),
(Index([0, 1], name='index.name'), 'named_standard'),
(MultiIndex.from_product([['a'], ['b', 'c']]), 'unnamed_multi'),
(MultiIndex.from_product(
[['a'], ['b', 'c']], names=['index.name.0',
'index.name.1']), 'named_multi')
])
def test_to_html_basic_alignment(
datapath, row_index, row_type, column_index, column_type,
index, header, index_names):
# GH 22747, GH 22579
df = DataFrame(
|
np.zeros((2, 2), dtype=int)
|
numpy.zeros
|
import pandas as pd
import numpy as np
from ..dataModel.dataProcessing import DataContainer, myDataset
import matplotlib.pyplot as plt
from torch import nn
import torch as tc
from sklearn.metrics import *
from tqdm import tqdm
from torch.nn import functional as F
from ..utils import one_hot_embedding, window_padding
import pickle as pkl
import copy
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, WeightedRandomSampler
import warnings
myseed = 45237552
np.random.seed(120)
tc.manual_seed(myseed)
# Model Container Class Definition
class ModelContainer():
def __init__(self, **kwargs):
if "name" in kwargs:
self.name = kwargs["name"]
if "device" in kwargs:
self.device = kwargs["device"]
else:
self.device = tc.device("cuda:0" if tc.cuda.is_available else "cpu")
print(f"Model container using {self.device}")
if "optimizer" in kwargs:
self.optimizer = kwargs["optimizer"]
else:
self.optimizer = "adam"
if "data" in kwargs:
assert type(kwargs["data"]) == DataContainer
self.myData = kwargs["data"]
def printModel(self):
print(self)
def show_feature_importance(self, columns, flight_id=0,
adjust_proba_window=False,
# remove_constant=False,
class_interest = None,
show_largest_change= False,
plot_save = None, LIMIT=None,
plot = True,figsize= (10,10)):
"""
To be used only when a precursor subclass was created
Parameters
----------
columns : list of columns names
flight_id : int, optional
id of flight of interest. The default is 0 (only one flight).
figsize : set, optional
width and length of canvas. The default is (30,30).
remove_constant : bool, optional
removes features that had a constant value. The default is False
Returns
-------
None.
"""
if "cuda" in self.device:
proba_time = self.proba_time.cpu().detach().numpy()
precursor_proba = self.precursor_proba.cpu().detach().numpy()
else:
proba_time = self.proba_time.detach().numpy()
precursor_proba = self.precursor_proba.detach().numpy()
if adjust_proba_window:
raise NotImplementedError
# proba_time = window_padding(proba_time[flight_id,:].flatten(), self.x_train.shape[1])
else:
if (class_interest is None) or (self.n_classes==1):
proba_time = proba_time[flight_id, :]
else:
if (len(proba_time.shape) ==3) and (class_interest is not None):
proba_time = proba_time[flight_id, :, class_interest]
elif (len(proba_time.shape) ==3) and (class_interest is None):
raise ValueError("class_interest is not set")
time_step_masks = np.where(proba_time > self.threshold)[0] # proba_time.shape= N,T | [True True False False True True]
diff_time_step_masks = np.diff(time_step_masks) # [0, -1, 0, 1, 0]
where_jump = np.where(diff_time_step_masks > 1)[0] +1 # 3 + 1 = np.array([4])
# self.first_flag_index = []
self.multiple_precursors = [False if where_jump.shape[0] == 0 else True][0]
# Search where precursor proba > threshold and obtain feature precursor score
if where_jump.shape[0] == 0: # if empty then only precursor
time_step_mask = time_step_masks
self.first_flag_index = time_step_mask[0]
temp = precursor_proba[flight_id, time_step_mask, :] # precursor_proba.shape = N, T, D
if show_largest_change:
precursor_proba_val_to_plot = np.zeros(temp.shape[-1])
for feature in range(temp.shape[-1]):
precursor_proba_val_to_plot[feature] = abs(temp[0, feature]- temp[-1, feature])
else:
precursor_proba_val_to_plot = np.average(abs(temp-0.5), axis=0)
sorted_avg = list(np.argsort(precursor_proba_val_to_plot)[::-1])
# if remove_constant:
# for i in range(precursor_proba.shape[-1]):
# if np.std(precursor_proba[flight_id, time_step_mask, i]) <= 1e-4:
# sorted_avg.remove(i)
if plot:
plt.figure(figsize=figsize)
if LIMIT is None:
plt.barh(range(len(sorted_avg)), precursor_proba_val_to_plot[sorted_avg][::-1])
plt.yticks(range(len(sorted_avg)), columns[sorted_avg][::-1])
else:
# print(columns[sorted_avg][::-1])
plt.barh(range(LIMIT), precursor_proba_val_to_plot[sorted_avg][::-1][-LIMIT:])
plt.yticks(range(LIMIT), columns[sorted_avg][::-1][-LIMIT:])
plt.grid(True)
if plot_save is not None:
# plt.rcParams['font.size'] = '20'
plt.savefig(plot_save, dpi=600)
plt.show()
self.sorted_features = columns[sorted_avg].values
self.sorted_features_values = precursor_proba_val_to_plot[sorted_avg]
self.list_sorted_features = np.nan
self.list_sorted_features_values = np.nan
else:
split_time_masks = np.split(time_step_masks, where_jump) # create itereable for each interval of time np.split([True True False False True True], array([4]) = [array([True, True, False, False], [True, True)]
self.list_sorted_features = []
self.list_sorted_features_values = []
for e, time_step_mask in enumerate(split_time_masks):
self.first_flag_index = time_step_mask[0]
temp = precursor_proba[flight_id, time_step_mask, :]
if show_largest_change:
precursor_proba_val_to_plot = np.zeros(temp.shape[-1])
for feature in range(temp.shape[-1]):
precursor_proba_val_to_plot[feature] = abs(temp[0, feature]- temp[-1, feature])
else:
precursor_proba_val_to_plot = np.average(abs(temp-0.5), axis=0)
sorted_avg = list(np.argsort(precursor_proba_val_to_plot)[::-1])
# if remove_constant:
# for i in range(precursor_proba.shape[-1]):
# if np.std(precursor_proba[flight_id, time_step_mask, i]) <= 1e-4:
# sorted_avg.remove(i)
self.list_sorted_features.append(columns[sorted_avg].values)
self.list_sorted_features_values.append(precursor_proba_val_to_plot[sorted_avg])
self.sorted_features_values = np.nan
self.sorted_features = np.nan
if plot:
plt.figure(figsize=figsize)
if LIMIT is None:
plt.barh(range(len(sorted_avg)), precursor_proba_val_to_plot[sorted_avg][::-1])
plt.yticks(range(len(sorted_avg)), columns[sorted_avg][::-1])
else:
plt.barh(range(LIMIT), precursor_proba_val_to_plot[sorted_avg][::-1][-LIMIT:])
plt.yticks(range(LIMIT), columns[sorted_avg][::-1][-LIMIT:])
plt.grid(True)
if plot_save is not None:
# plt.rcParams['font.size'] = '20'
plt.savefig(plot_save.replace(".png", f"_{e}.png"), dpi=600)
plt.show()
def save(self, filename):
"""
Save model as a .pt file
:param filename: str
Location of directory and name of the file to be saved
:return:
"""
if "pt" not in filename:
filename = filename + ".pt"
with open(filename, "wb") as f:
tc.save(self, f)
print(f"Model Saved! (path: {filename})")
def count_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
def plot_feature_effects(self, full_length, columns,
flight_id=0, save_path=None,
class_interest = None,
show_precursor_range=False,
rescaling_factor = 0, **kw):
# Initializations
ticks_on = kw.get("ticks_on", True)
counter = 0
width = 4*5.5
if "cuda" in self.device:
proba_time = self.proba_time.cpu().detach().numpy()
precursor_proba = self.precursor_proba.cpu().detach().numpy()
else:
proba_time = self.proba_time.detach().numpy()
precursor_proba = self.precursor_proba.detach().numpy()
if rescaling_factor < 0:
precursor_proba = precursor_proba + rescaling_factor
elif rescaling_factor > 0:
precursor_proba = abs(precursor_proba-rescaling_factor)
else:
precursor_proba = precursor_proba - rescaling_factor
n_features = precursor_proba.shape[2]
if class_interest is not None:
grey_area_class_plot_idx = class_interest
else:
grey_area_class_plot_idx = 0
# if len(proba_time.shape) == 3:
# proba_time = proba_time[flight_id, :, class_interest]
height = 3.5* int(n_features/4+1)
fig, ax1 = plt.subplots(int(n_features/4+1), 4, figsize=(width, height))
fig.tight_layout(pad=6.5)
for i in range(0, int(n_features-4)+1):
for j in range(0, 4):
if i == 0 and j == 0:
if len(proba_time.shape) < 3:
num_loops = 1
else:
num_loops = proba_time.shape[-1]
for class_idx in range(num_loops):
tmp_proba_time = proba_time[flight_id,:] if len(proba_time.shape) < 3 else proba_time[flight_id,:, class_idx]
if proba_time.shape[1] != full_length:
# pad values
r_proba_time = self.window_padding(tmp_proba_time.flatten(),
full_length)
else:
r_proba_time = tmp_proba_time.flatten()
if (show_precursor_range) and (grey_area_class_plot_idx==class_idx):
mask_idx = np.where(r_proba_time > self.threshold)[0]
diff_time_step_masks = np.diff(mask_idx)
where_jump = np.where(diff_time_step_masks > 1)[0] +1
if class_idx == 0:
ax1[i,j].plot(r_proba_time, "r")
else:
ax1[i, j].plot(r_proba_time)
ax1[i,j].set_ylabel("Probability")
ax1[i,j].set_title("Precursor Score")
ax1[i,j].grid(True)
ax1[i,j].set_xlabel("Distance to event")
ax1[i,j].set_yticks(np.arange(0, 1.1, 0.1))
if ticks_on:
x = np.arange(20 , -0.25, -0.25)
ax1[i,j].set_xticks(range(0, full_length, 10))
if (show_precursor_range) and (grey_area_class_plot_idx==class_idx):
if where_jump.shape[0] == 0:
ax1[i,j].axvspan(mask_idx[0], mask_idx[-1], alpha=0.3, color='grey')
else:
mask_idxs = mask_idx
split_time_masks = np.split(mask_idxs, where_jump)
for mask_idx in split_time_masks:
ax1[i,j].axvspan(mask_idx[0], mask_idx[-1], alpha=0.3, color='grey')
if ticks_on:
ax1[i,j].set_xticklabels(x[::10])
continue
if counter == n_features:
break
# In the case the window used does not match the flight length
if precursor_proba.shape[1] != full_length:
# pad values
precursor_value = self.window_padding(precursor_proba[flight_id,
:, counter].flatten(),
full_length)
else:
precursor_value = precursor_proba[flight_id,
:, counter].flatten()
ax1[i,j].plot(precursor_value, "r")
ax1[i,j].set_title(f"Feature: {columns[counter]}")
ax1[i,j].set_xlabel("Distance Remaining")
ax1[i,j].set_ylabel("Probabilities")
ax1[i,j].grid(True)
ax1[i,j].set_yticks(np.arange(0, 1.1, 0.1))
if ticks_on:
x = np.arange(20 , -0.25, -0.25)
ax1[i,j].set_xticks(range(0, full_length, 10))
ax1[i,j].set_xticklabels(x[::10])
if show_precursor_range:
if where_jump.shape[0] == 0:
ax1[i,j].axvspan(mask_idx[0], mask_idx[-1], alpha=0.3, color='grey')
else:
for mask_idx in split_time_masks:
ax1[i,j].axvspan(mask_idx[0], mask_idx[-1], alpha=0.3, color='grey')
counter += 1
if save_path is not None:
text = save_path+"//precursor_proba.pdf"
plt.savefig(text, dpi=600)
def window_padding(self, data_to_pad, full_length, method="interp"):
"""
Used to adjust the size of the windows created by CNN outputs
Parameters
----------
data_to_pad : numpy array
1D array containing the data to pad.
full_length : int
final length to be used.
method : str, optional
methodology to use "interp" or "steps". The default is "interp".
Returns
-------
numpy array
1D array of requested length (full_length).
"""
# seq_len = int(round(full_length/data_to_pad.shape[0]))
if method == "interp":
x = np.linspace(0, full_length-1, full_length)
xp = np.linspace(0, full_length-1, data_to_pad.shape[0]) #might need to be -n instead of 1
fp = data_to_pad
return np.interp(x=x, xp=xp, fp=fp)
elif method =="steps":
temp = np.zeros((full_length, 1))
seq_len = int(round(full_length/data_to_pad.shape[0]))
for n, m in enumerate(range(0, temp.shape[0], seq_len)):
try:
temp[m:m+seq_len] = data_to_pad[n]
except:
temp[m:m+seq_len] = data_to_pad[-1]
return temp
def train_Precursor_binary(self, clf, X_train, y_train,
X_val=None, y_val=None, l2=0,
num_epochs=200, learning_rate=0.01, verbose=0,
model_out_cpu = True, **kw):
# Convert to parameters CUDA Tensors
clf = clf.to(self.device)
self.n_epochs = num_epochs
print_every_epochs = kw.pop("print_every_epochs", 10)
n_important_features = kw.pop("n_important_features", None)
alpha = kw.pop("alpha", 1)
# Binary cross entropy loss, learning rate and l2 regularization
weight = kw.pop("class_weight", None)
if weight is not None:
weight = tc.Tensor(weight).to(self.device)
if len(np.unique(y_train)) <= 2:
criterion = tc.nn.BCELoss(weight=weight)
self.task = "binary"
else:
raise NotImplementedError
# criterion = tc.nn.CrossEntropyLoss(weight=weight)
# self.task = "multiclass"
# self.n_classes = len(np.unique(y_train))
print("Classification task: {}".format(self.task))
if self.optimizer == "adam":
optimizer = tc.optim.Adam(clf.parameters(),
lr=learning_rate, weight_decay=l2)
else:
optimizer = tc.optim.SGD(clf.parameters(),
lr=learning_rate, weight_decay=l2)
# Init loss history and balanced accuracy
hist = np.zeros(num_epochs)
val_hist = np.zeros(num_epochs)
b_acc = np.zeros(num_epochs)
val_b_acc = np.zeros(num_epochs)
f1 = np.zeros(num_epochs)
val_f1 = np.zeros(num_epochs)
# Conversion to tensors
if not tc.is_tensor(X_train):
X_train = tc.Tensor(X_train)
if not tc.is_tensor(y_train):
y_train = tc.Tensor(y_train.flatten())
if self.task == "multiclass":
y_train = y_train.type(tc.int64)
if X_val is not None:
if not tc.is_tensor(X_val):
X_val = tc.Tensor(X_val)
if not tc.is_tensor(y_val):
y_val = tc.Tensor(y_val)
data_val = myDataset(X_val, y_val)
if self.batch_size is None:
self.batch_size = X_train.size(0)
warnings.warn("Setting the batch size = full training set could overwhelm GPU memory")
data_train = myDataset(X_train, y_train)
if self.use_stratified_batch_size is False:
print("Mini-batch strategy: Random sampling")
dataloader_train = DataLoader(data_train, batch_size=self.batch_size, shuffle=True)
else:
print("Mini-batch strategy: Stratified")
# get class counts
weights = []
for label in tc.unique(y_train):
count = len(tc.where(y_train == label)[0])
weights.append(1 / count)
weights = tc.tensor(weights).to(self.device)
samples_weights = weights[y_train.type(tc.LongTensor).to(self.device)]
sampler = WeightedRandomSampler(samples_weights, len(samples_weights), replacement=True)
dataloader_train = DataLoader(data_train, batch_size=self.batch_size, sampler=sampler)
if X_val is not None:
dataloader_val = DataLoader(data_val, batch_size=self.batch_size, shuffle=False)
# Train the model
try:
for epoch in tqdm(range(num_epochs)):
batch_acc = []
batch_val_acc = []
batch_f1 = []
batch_val_f1 = []
# last_it = [x for x,_ in enumerate(range(0, X_train.size(0), self.batch_size))][-2]
for iteration, (batch_x, batch_y) in enumerate(dataloader_train):
batch_x, batch_y = batch_x.to(self.device), batch_y.to(self.device)
optimizer.zero_grad()
if (epoch == 0) and iteration == 0:
for c in tc.unique(y_train):
print(f"Proportion Class {c}: {batch_y[batch_y==c].shape[0]/len(batch_y)}")
outputs = clf(batch_x)
# obtain the loss
if n_important_features is None:
loss = criterion(outputs.flatten(), batch_y.view(-1).flatten())
else:
loss = criterion(outputs.flatten(), batch_y.view(-1).flatten()) + \
alpha * self.precursor_proba_loss(clf.precursor_proba, n_important_features, batch_y)
hist[epoch] = loss.item()
if self.task == "binary":
if "cuda" in self.device:
temp_outpouts = (outputs.cpu().detach().numpy() > self.threshold).astype(int)
y_batch = batch_y.view(-1).cpu().detach().numpy()
b_acc[epoch] = balanced_accuracy_score(y_batch,
temp_outpouts)
else:
temp_outpouts = (outputs.detach().numpy() > self.threshold).astype(int)
y_batch = batch_y.view(-1).detach().numpy()
b_acc[epoch] = balanced_accuracy_score(y_batch,
temp_outpouts)
batch_acc.append(b_acc[epoch])
batch_f1.append(f1_score(y_batch, temp_outpouts, average='binary'))
# Backprop and perform Adam optimisation
loss.backward()
optimizer.step()
if X_val is not None:
with tc.no_grad():
mini_loss = []
for batch_X_val, batch_y_val in dataloader_val:
batch_X_val, batch_y_val = batch_X_val.to(self.device), batch_y_val.to(self.device)
self.valYhat = clf(batch_X_val)
if n_important_features is None:
val_loss = criterion(self.valYhat, batch_y_val.flatten())
else:
val_loss = criterion(self.valYhat, batch_y_val.flatten()) + \
alpha * self.precursor_proba_loss(clf.precursor_proba, n_important_features, batch_y_val)
mini_loss.append(val_loss.item())
if self.task == "binary":
if "cuda" in self.device:
temp_out_y = ( self.valYhat.cpu().detach().numpy() > self.threshold).astype(int)
y_val_batch = batch_y_val.view(-1).cpu().detach().numpy()
val_b_acc[epoch] =balanced_accuracy_score( y_val_batch ,
temp_out_y)
else:
temp_out_y = ( self.valYhat.detach().numpy() > self.threshold).astype(int)
y_val_batch = batch_y_val.view(-1).detach().numpy()
val_b_acc[epoch] =balanced_accuracy_score( y_val_batch,
temp_out_y)
batch_val_acc.append(val_b_acc[epoch])
batch_val_f1.append(f1_score( y_val_batch, temp_out_y, average='binary'))
val_hist[epoch] =
|
np.mean(mini_loss)
|
numpy.mean
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 11 August, 2018
Testing suite for Network class
@author: <NAME>
@email: <EMAIL>
@date: 11 August, 2018
@modified: 16 february, 2021
"""
import unittest
import numpy as np
from topopy import Flow, Network
import os
infolder = "data/in"
outfolder = "data/out"
class NetworkClassTest(unittest.TestCase):
def test_empty_network(self):
net = Network()
# Test PRaster properties
self.assertEqual(net.get_cellsize(), (1.0, -1.0))
self.assertEqual(net.get_dims(), (1, 1))
self.assertEqual(net.get_size(), (1, 1))
self.assertEqual(net.get_extent(), (0.0, 1.0, 0.0, 1.0))
self.assertEqual(net.get_geotransform(), (0.0, 1.0, 0.0, 1.0, 0.0, -1.0))
self.assertEqual(net.get_ncells(), 1)
self.assertEqual(net.get_projection(), "")
# Test PRaster functions
self.assertEqual(net.cell_2_ind(0, 0), .0)
self.assertEqual(net.cell_2_xy(0, 0), (0.5, 0.5))
self.assertEqual(net.xy_2_cell(1, 1), (0, 1))
self.assertEqual(net.ind_2_cell(0), (0, 0))
# Test saving functions
path = outfolder + "/net_delete.dat"
net.save(path)
self.assertEqual(os.path.exists(path), True)
if os.path.exists(path):
os.remove(path)
path = outfolder + "/points_delete.txt"
net.export_to_points(path)
self.assertEqual(os.path.exists(path), True)
if os.path.exists(path):
os.remove(path)
path = outfolder +"/shp_delete.shp"
net.export_to_shp(path)
self.assertEqual(os.path.exists(path), True)
if os.path.exists(path):
os.remove(path)
path = outfolder +"/chi_delete.shp"
net.get_chi_shapefile(path, 0)
self.assertEqual(os.path.exists(path), True)
if os.path.exists(path):
os.remove(path)
# Test other functions
self.assertEqual(np.array_equal(net.get_streams(False), np.array([1]).reshape(1, 1)), True)
self.assertEqual(np.array_equal(net.get_stream_segments(False), np.array([0]).reshape(1, 1)), True)
self.assertEqual(np.array_equal(net.get_stream_orders("strahler", False), np.array([1]).reshape(1, 1)), True)
self.assertEqual(np.array_equal(net.get_stream_orders('shreeve', False), np.array([1]).reshape(1, 1)), True)
self.assertEqual(np.array_equal(net.get_stream_orders('dinosaur', False), np.array([1]).reshape(1, 1)), True)
self.assertEqual(np.array_equal(net.get_stream_poi("heads", "XY"), np.array([]).reshape(0, 2)), True)
self.assertEqual(np.array_equal(net.get_stream_poi("heads", "CELL"), np.array([]).reshape(0, 2)), True)
self.assertEqual(np.array_equal(net.get_stream_poi("heads", "IND"),
|
np.array([])
|
numpy.array
|
'''
Helper functions for the puzzle.py
'''
import random
from simpleimage import SimpleImage
import math
import numpy as np
def create_solution(num_pieces, seed = 2000):
'''
Takes a number of pieces as input
Returns the original order and a random order of pieces as a dictionary with
{orignal_position: {'random_position': random_position}}
Number is by column then row like in
((0, 1, 2)
(3, 4, 5)
(6, 7, 8))
>>> create_solution(4)
{0: {'random_position': 2}, 1: {'random_position': 1}, 2: {'random_position': 0}, 3: {'random_position': 3}}
>>> create_solution(2)
{0: {'random_position': 0}, 1: {'random_position': 1}}
'''
correct_order = [i for i in range(num_pieces)]
random_order = correct_order.copy()
random.Random(seed).shuffle(random_order)
temp = [(correct_order[i], random_order[i]) for i in range(0, len(correct_order))]
solution = {}
for a, b in temp:
#solution.setdefault(a, []).append(b)
solution.setdefault(a, {'random_position': b})
return solution
def create_blank_pieces(num_pieces, piece_width, piece_height):
'''
Takes a number of pieces num_pieces and their width and height as input
Returns a list of num_pieces of blank SimpleImages of given width and height
'''
puzzle_pieces = []
for i in range(num_pieces):
puzzle_pieces.append(SimpleImage.blank(piece_width, piece_height))
return puzzle_pieces
def rotate_coordinates(x, y, angle, width, height):
'''
This takes coordinates of a pixel, the image's width and height as
input. It returns the coordinates of the image rotates by the given
angle. (Pixel coordinates are always integers)
NOTE: The angle has to be a integer multiple of 90 degrees.
Helper function to rotate_image() and rotate_file().
I wrote this function as an extension to SimpleImage so the
remaining programme can be based on it.
It applies an affine transformation based on the matrix
((cos(angle), sin(angle), 0)
(-sin(angle), cos(angle), 0)
(0, , 0 , 1))
This rotates the image around the origin. Therefore the rotated
image afterwards gets offset so that the lower left corner sits
at the origin again.
cf. https://stackabuse.com/affine-image-transformations-in-python-with-numpy-pillow-and-opencv/
The Python Pillow library in contrast implements an
inverse transformation v,w = T^(-1)(x,y) with x, y as output pixels
and v,w as input pixels.
See https://github.com/python-pillow/Pillow/blob/master/src/PIL/Image.py
I assume this is computationally more efficient.
>>> rotate_coordinates(0, 0, 90, 60, 60)
(0, 59)
>>> rotate_coordinates(59, 59, 180, 60, 60)
(0, 0)
>>> rotate_coordinates(59, 0, 270, 60, 60)
(59, 59)
'''
angle = angle % 360
if angle == 0:
offset = np.array([0, 0, 0])
elif angle == 90:
offset =
|
np.array([0, height - 1, 0])
|
numpy.array
|
import math
import numpy as np
import random
class Tiling():
def __init__(self, x_range=[-1.2,0.6], v_range= [-0.07,0.07], n_tiles=4, n_tilings=5, displacement_vector=[1,3]):
self.x_range = np.array(x_range)
self.v_range = np.array(v_range)
self.x = 0
self.v = 0
self.last_action = 0
self.n_tiles = n_tiles # nxn tiles in a tiling (i.e. n_tiles=4 --> each tiling has 4x4 tiles)
self.n_tilings = n_tilings # Number of tilings (grids) overlayed with different offsets
self.displacement = np.array(displacement_vector)
self.init_tilings()
def init_tilings(self):
# List of displacement vectors indexed by tiling number --> [(1,3), (2,6), (4,9) ...] (example for asymmetrical displacement (3,1))
self.tiling_displacement = np.array([self.displacement * i for i in range(self.n_tilings)])
# List of tile widths in each dimension --> [0.3 , 0,25]
self.tile_width = np.array([(self.x_range[1]-self.x_range[0])/self.n_tiles , (self.v_range[1]-self.v_range[0])/self.n_tiles])
# The offset between tilings --> [0.02, 0.045]
self.offset = self.tile_width / (self.n_tilings-1)
self.extra_tiles = np.array([math.ceil(self.offset[k] * self.tiling_displacement[len(self.tiling_displacement)-1][k] / self.tile_width[k]) for k in range(len(self.offset)) ])
self.total_tiles = self.n_tiles + self.extra_tiles
print ("-----------------------------------------------------")
print("v tile width: ", (self.v_range[1]-self.v_range[0])/self.n_tiles)
print("n_tiles: ", self.n_tiles)
print("Extra tiles needed: ", self.extra_tiles)
print ("total tiles: " , self.total_tiles)
print("n_tilings: ", self.n_tilings)
print("Tile width:" , self.tile_width)
print("Tiling displacement:" , self.tiling_displacement)
print("offset: ", self.offset)
print ("-----------------------------------------------------")
def convert_state(self, x, v):
"""
Finds which tile the (x, v) coordinate is in for each tiling and represents the state as an integer corresponding to a binary string.
Principles:
* x // tile_width indicate which tile x is in (0-indexed)
Each tiling is offset in the direction of up and to the right
To account for the fact that each tiling is offset, the offset in the given dimension times the displacement of that tiling is subtracted.
To account for the fact that the bottom left corner is not origo, the start range value is subtracted
Finally, accounting for extra tiles added is added. Extra tiles are there to ensure that all feasible points are within each tiling even after offsetting
* Each state is represented by an element in a state vector
Each state vector element corresponds to one tile in one tiling
The state vector represents the different tiles like this: [t^1_(1,1) , t^1_(1,2) ... t^(1)_(n_tiles,n_tiles) , t^(2)_(1,1) ... ... t^(n_tilings)_(n_tiles,n_tiles)]
If the (x, v) coordinate is in a certain tile for a given tiling, the corresponding element in the state vector will be 1
IGNORE
* Each state can be represented as a binary string where each bit corresponds to one tile in one tiling.
The bitstring represents a state vector where each tile is represented like this: [t^1_(1,1) , t^1_(1,2) ... t^(1)_(n_tiles,n_tiles) , t^(2)_(1,1) ... ... t^(n_tilings)_(n_tiles,n_tiles)]
Each bit corresponds to an element in this state vector (the vector itself is not created)
The bitstring is returned as an integer
"""
#print(self.offset[0] * self.tiling_displacement[len(self.tiling_displacement)-1][0] / self.tile_width[0])
#state = 0
n_features = self.total_tiles[0] * self.total_tiles[1] * self.n_tilings
state = np.zeros(n_features, dtype=int)
print(
|
np.shape(state)
|
numpy.shape
|
# -*- coding: utf-8 -*-
import os
import json
from datetime import datetime
import numpy as np
from matplotlib import pyplot as plt
def visualize_result(
experiment_name,
X_test, Y_test, Y_hat, parameters,
losses=None, save_dir="results"
):
"""
结果可视化
"""
# 没有保存目录时创建
now = datetime.now().strftime("%Y%m%d%H%M%S")
save_dir += "_" + experiment_name + os.sep + now
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# 测试数据适用(仅前2轴)
# 创建显示区域
plt.figure()
# 为了同时显示估计值和真值,设定为hold=“on”
#plt.hold("on")
# x_0 vs y 的显示
plt.subplot(211)
plt.plot(X_test[:, 0], Y_test, "+", label="True")
plt.plot(X_test[:, 0], Y_hat, "x", label="Estimate")
plt.xlabel("x_0")
plt.ylabel("y")
plt.legend()
# x_1 vs y 的显示
plt.subplot(212)
plt.plot(X_test[:, 1], Y_test, "+")
plt.plot(X_test[:, 1], Y_hat, "x")
plt.xlabel("x_1")
plt.ylabel("y")
# 保存参数到文件
# NOTE:json形式是设定文件等数据记述方便的形式
# 其实质是结构化文本文件
# 阅读时请使用适当的文本编辑器
# 使用Python时,标准具备处理json的模块
# (名称也是json模块)
# 其他数据记述形式有yaml、xml等
fn_param = "parameters.json"
with open(save_dir + os.sep + fn_param, "w") as fp:
json_str = json.dumps(parameters, indent=4)
fp.write(json_str)
# 保存图像到文件
fn_fit = "fitting.png" # 各种条件
plt.savefig(save_dir + os.sep + fn_fit)
# 表示损失
if losses is not None:
train_losses, test_losses = losses
# NOTE:损失的推移通常是指数的、
# 多以对数比例显示
x_train = range(len(train_losses))
x_test = range(len(test_losses))
plt.figure()
plt.plot(
x_train, np.log(train_losses),
x_test, np.log(test_losses)
)
plt.xlabel("steps")
plt.ylabel("ln(loss)")
plt.legend(["training loss", "test loss"])
fn_loss = "loss.png"
plt.savefig(save_dir + os.sep + fn_loss)
def flat_nd(xs):
"""
返回numpy.array
"""
return np.c_[tuple([x.flatten() for x in xs])]
def genearate_original_data(
dimension=2, nonlinear=False, num_of_samples=10000, noise_amplitude=0.1
):
"""
用其他方法生成返回变量的来源数据
"""
# 次元は最低でも2とします
if dimension < 2:
raise ValueError("'dimension' must be larger than 2")
# NOTE:输入值x的范围为规定值[0,1]。
# 但是,采样的点决定为均匀随机数。
x_sample = np.random.rand(num_of_samples, dimension)
# NOTE: 返回显示时均匀无噪声的数据
# 即使显示多维数据也不知道、
# 为了方便起见,只动了最初的二维、
# 其他次元全部固定为常数
grid_1d =
|
np.arange(0.0, 1.0, 0.01)
|
numpy.arange
|
import numpy as np
import pandas as pd
from typing import List
from brightwind.transform import transform as tf
from brightwind.analyse.plot import plot_scatter, plot_scatter_by_sector, plot_scatter_wdir
from scipy.odr import ODR, RealData, Model
from scipy.linalg import lstsq
from brightwind.analyse.analyse import momm, _binned_direction_series
from brightwind.transform.transform import offset_wind_direction
# from sklearn.svm import SVR as sklearn_SVR
# from sklearn.model_selection import cross_val_score as sklearn_cross_val_score
from brightwind.utils import utils
import pprint
import warnings
__all__ = ['']
class CorrelBase:
def __init__(self, ref_spd, target_spd, averaging_prd, coverage_threshold=None, ref_dir=None, target_dir=None,
sectors=12, direction_bin_array=None, ref_aggregation_method='mean', target_aggregation_method='mean'):
self.ref_spd = ref_spd
self.ref_dir = ref_dir
self.target_spd = target_spd
self.target_dir = target_dir
self.averaging_prd = averaging_prd
self.coverage_threshold = coverage_threshold
self.ref_aggregation_method = ref_aggregation_method
self.target_aggregation_method = target_aggregation_method
# Get the name of the columns so they can be passed around
self._ref_spd_col_name = ref_spd.name if ref_spd is not None and isinstance(ref_spd, pd.Series) else None
self._ref_spd_col_names = ref_spd.columns if ref_spd is not None and isinstance(ref_spd, pd.DataFrame) else None
self._ref_dir_col_name = ref_dir.name if ref_dir is not None else None
self._tar_spd_col_name = target_spd.name if target_spd is not None else None
self._tar_dir_col_name = target_dir.name if target_dir is not None else None
# Average and merge datasets into one df
self.data = CorrelBase._averager(self, ref_spd, target_spd, averaging_prd, coverage_threshold,
ref_dir, target_dir, ref_aggregation_method, target_aggregation_method)
self.num_data_pts = len(self.data)
self.params = {'status': 'not yet run'}
# The self variables defined below are defined for OrdinaryLeastSquares, OrthogonalLeastSquares and SpeedSort
if ref_dir is not None:
self.sectors = sectors
self.direction_bin_array = direction_bin_array
if direction_bin_array is None:
sector_direction_bins = utils.get_direction_bin_array(sectors)
step = float(max(np.unique(np.diff(sector_direction_bins))))
self._dir_sector_max = [angle for i, angle in enumerate(sector_direction_bins)
if offset_wind_direction(float(angle), step/2) > sector_direction_bins[i-1]]
self._dir_sector_min = self._dir_sector_max.copy()
self._dir_sector_min.insert(0, self._dir_sector_min.pop())
else:
raise NotImplementedError("Analysis using direction_bin_array input not implemented yet.")
# self.sectors = len(direction_bin_array) - 1
# self._dir_sector_max = direction_bin_array[1:]
# self._dir_sector_min = direction_bin_array[:-1]
self._ref_dir_bins = _binned_direction_series(self.data[self._ref_dir_col_name], sectors,
direction_bin_array=self.direction_bin_array
).rename('ref_dir_bin')
self._predict_ref_spd = pd.Series()
def _averager(self, ref_spd, target_spd, averaging_prd, coverage_threshold, ref_dir, target_dir,
ref_aggregation_method, target_aggregation_method):
# If directions sent, concat speed and direction first
if ref_dir is not None:
ref_spd = pd.concat([ref_spd, ref_dir], axis=1)
if target_dir is not None:
target_spd = pd.concat([target_spd, target_dir], axis=1)
data = tf.merge_datasets_by_period(data_1=ref_spd, data_2=target_spd, period=averaging_prd,
coverage_threshold_1=coverage_threshold,
coverage_threshold_2=coverage_threshold,
wdir_column_names_1=self._ref_dir_col_name,
wdir_column_names_2=self._tar_dir_col_name,
aggregation_method_1=ref_aggregation_method,
aggregation_method_2=target_aggregation_method)
if len(data.index) <= 1:
raise ValueError("Not enough overlapping data points to perform correlation.")
return data
def show_params(self):
"""Show the dictionary of parameters"""
pprint.pprint(self.params)
def plot(self, figure_size=(10, 10.2)):
"""
Plots scatter plot of reference versus target speed data. If ref_dir is given as input to the correlation then
the plot is showing scatter subplots for each sector. The regression line and the line of slope 1 passing
through the origin are also shown on each plot.
:param figure_size: Figure size in tuple format (width, height)
:type figure_size: tuple
:returns: A matplotlib figure
:rtype: matplotlib.figure.Figure
**Example usage**
::
import brightwind as bw
data = bw.load_csv(bw.demo_datasets.demo_data)
m2_ne = bw.load_csv(bw.demo_datasets.demo_merra2_NE)
# Correlate by directional sector, using 36 sectors.
ols_cor = bw.Correl.OrdinaryLeastSquares(m2_ne['WS50m_m/s'], data['Spd80mN'],
ref_dir=m2_ne['WD50m_deg'], averaging_prd='1D',
coverage_threshold=0.9, sectors=36)
ols_cor.run()
# To plot the scatter subplots by directional sectors, the regression line and the line of
# slope 1 passing through the origin
ols_cor.plot()
# To set the figure size
ols_cor.plot(figure_size=(20, 20.2))
"""
if self.ref_dir is None:
return plot_scatter(self.data[self._ref_spd_col_name],
self.data[self._tar_spd_col_name],
self._predict(self.data[self._ref_spd_col_name]),
x_label=self._ref_spd_col_name, y_label=self._tar_spd_col_name,
line_of_slope_1=True, figure_size=figure_size)
else:
"""For plotting scatter by sector"""
return plot_scatter_by_sector(self.data[self._ref_spd_col_name],
self.data[self._tar_spd_col_name],
self.data[self._ref_dir_col_name],
trendline_y=self._predict_ref_spd, sectors=self.sectors,
line_of_slope_1=True, figure_size=figure_size)
@staticmethod
def _get_r2(target_spd, predict_spd):
"""Returns the r2 score of the model"""
return 1.0 - (sum((target_spd - predict_spd) ** 2) /
(sum((target_spd - target_spd.mean()) ** 2)))
@staticmethod
def _get_logic_dir_sector(ref_dir, sector_min, sector_max):
if sector_max > sector_min:
logic_sector = ((ref_dir >= sector_min) & (ref_dir < sector_max))
else:
logic_sector = ((ref_dir >= sector_min) & (ref_dir <= 360)) | \
((ref_dir < sector_max) & (ref_dir >= 0))
return logic_sector
def _get_synth_start_dates(self):
none_even_freq = ['5H', '7H', '9H', '10H', '11H', '13H', '14H', '15H', '16H', '17H', '18H', '19H',
'20H', '21H', '22H', '23H', 'D', 'W']
if any(freq in self.averaging_prd for freq in none_even_freq):
ref_time_array = pd.date_range(start=self.data.index[0], freq='-' + self.averaging_prd,
end=self.ref_spd.index[0])
if ref_time_array.empty:
ref_start_date = self.data.index[0]
else:
ref_start_date = ref_time_array[-1]
tar_time_array = pd.date_range(start=self.data.index[0], freq='-' + self.averaging_prd,
end=self.target_spd.index[0])
if tar_time_array.empty:
tar_start_date = self.data.index[0]
else:
tar_start_date = tar_time_array[-1]
else:
ref_start_date = self.ref_spd.index[0]
tar_start_date = self.target_spd.index[0]
return ref_start_date, tar_start_date
def synthesize(self, ext_input=None, ref_coverage_threshold=None, target_coverage_threshold=None):
"""
Apply the derived correlation model to the reference dataset used to create the model. The resulting synthesized
dataset is spliced with the target dataset. That is, where a target value is available, it is used instead of
the synthesized value.
:param ext_input: Optional external dataset to apply the derived correlation model to instead
of the original reference. If this is used, the resulting synthesized
dataset is not spliced with the target dataset.
:type ext_input: pd.Series or pd.DataFrame
:param ref_coverage_threshold: Minimum coverage required when aggregating the reference data to calculate
the synthesised data. If None, it uses the coverage_threshold supplied to the
correlation model.
:type ref_coverage_threshold: float
:param target_coverage_threshold: Minimum coverage required when aggregating the target data to splice with
the calculated synthesised data. If None, it uses the coverage_threshold
supplied to the correlation model.
:type target_coverage_threshold: float
:return: The synthesized dataset.
:rtype: pd.Series or pd.DataFrame
"""
if ref_coverage_threshold is None:
ref_coverage_threshold = self.coverage_threshold
if target_coverage_threshold is None:
target_coverage_threshold = self.coverage_threshold
if ext_input is None:
ref_start_date, target_start_date = self._get_synth_start_dates()
target_spd_averaged = tf.average_data_by_period(self.target_spd[target_start_date:], self.averaging_prd,
coverage_threshold=target_coverage_threshold,
return_coverage=False)
if self.ref_dir is None:
ref_spd_averaged = tf.average_data_by_period(self.ref_spd[ref_start_date:], self.averaging_prd,
coverage_threshold=ref_coverage_threshold,
return_coverage=False)
synth_data = self._predict(ref_spd_averaged)
else:
ref_df = pd.concat([self.ref_spd, self.ref_dir], axis=1, join='inner')
ref_averaged = tf.average_data_by_period(ref_df[ref_start_date:], self.averaging_prd,
wdir_column_names=self._ref_dir_col_name,
coverage_threshold=ref_coverage_threshold,
return_coverage=False)
synth_data = ref_averaged[self._ref_spd_col_name].copy() * np.nan
for params_dict in self.params:
if params_dict['num_data_points'] > 1:
logic_sect = self._get_logic_dir_sector(ref_dir=ref_averaged[self._ref_dir_col_name],
sector_min=params_dict['sector_min'],
sector_max=params_dict['sector_max'])
synth_data[logic_sect] = self._predict(ref_spd=ref_averaged[self._ref_spd_col_name][logic_sect],
slope=params_dict['slope'], offset=params_dict['offset'])
output = target_spd_averaged.combine_first(synth_data)
else:
if self.ref_dir is None:
output = self._predict(ext_input)
else:
raise NotImplementedError
if isinstance(output, pd.Series):
return output.to_frame(name=self.target_spd.name + "_Synthesized")
else:
output.columns = [self.target_spd.name + "_Synthesized"]
return output
# def get_error_metrics(self):
# raise NotImplementedError
class OrdinaryLeastSquares(CorrelBase):
"""
Correlate two datasets against each other using the Ordinary Least Squares method. This accepts two wind speed
Series with timestamps as indexes and an averaging period which merges the datasets by this time period before
performing the correlation.
:param ref_spd: Series containing reference wind speed as a column, timestamp as the index.
:type ref_spd: pd.Series
:param target_spd: Series containing target wind speed as a column, timestamp as the index.
:type target_spd: pd.Series
:param averaging_prd: Groups data by the time period specified here. The following formats are supported
- Set period to '10min' for 10 minute average, '30min' for 30 minute average.
- Set period to '1H' for hourly average, '3H' for three hourly average and so on for '4H', '6H' etc.
- Set period to '1D' for a daily average, '3D' for three day average, similarly '5D', '7D', '15D' etc.
- Set period to '1W' for a weekly average, '3W' for three week average, similarly '2W', '4W' etc.
- Set period to '1M' for monthly average with the timestamp at the start of the month.
- Set period to '1A' for annual average with the timestamp at the start of the year.
:type averaging_prd: str
:param coverage_threshold: Minimum coverage required when aggregating the data to the averaging_prd.
:type coverage_threshold: float
:param ref_dir: Series containing reference wind direction as a column, timestamp as the index.
:type ref_dir: pd.Series
:param sectors: Number of direction sectors to bin in to. The first sector is centered at 0 by
default. To change that behaviour specify 'direction_bin_array' which overwrites
'sectors'.
:type sectors: int
:param direction_bin_array: An optional parameter where if you want custom direction bins, pass an array
of the bins. To add custom bins for direction sectors, overwrites sectors. For
instance, for direction bins [0,120), [120, 215), [215, 360) the list would
be [0, 120, 215, 360]
:type direction_bin_array: List()
:param ref_aggregation_method: Default `mean`, returns the mean of the data for the specified period. Can also
use `median`, `prod`, `sum`, `std`,`var`, `max`, `min` which are shorthands for
median, product, summation, standard deviation, variance, maximum and minimum
respectively.
:type ref_aggregation_method: str
:param target_aggregation_method: Default `mean`, returns the mean of the data for the specified period. Can also
use `median`, `prod`, `sum`, `std`,`var`, `max`, `min` which are shorthands for
median, product, summation, standard deviation, variance, maximum and minimum
respectively.
:type target_aggregation_method: str
:returns: An object representing ordinary least squares fit model
**Example usage**
::
import brightwind as bw
data = bw.load_csv(bw.demo_datasets.demo_data)
m2_ne = bw.load_csv(bw.demo_datasets.demo_merra2_NE)
m2_nw = bw.load_csv(bw.demo_datasets.demo_merra2_NW)
# Correlate wind speeds on a monthly basis.
ols_cor = bw.Correl.OrdinaryLeastSquares(m2_ne['WS50m_m/s'], data['Spd80mN'], averaging_prd='1M',
coverage_threshold=0.95)
ols_cor.run()
# To plot the scatter plot and regression line.
ols_cor.plot()
# To change the plot's size.
ols_cor.plot(figure_size=(12,15))
# To show the resulting parameters.
ols_cor.params
# or
ols_cor.show_params()
# To synthesize data at the target site.
ols_cor.synthesize()
# To synthesize data at the target site using a different external reference dataset.
ols_cor.synthesize(ext_input=m2_nw['WS50m_m/s'])
# To run the correlation without immediately showing results.
ols_cor.run(show_params=False)
# To retrieve the merged and aggregated data used in the correlation.
ols_cor.data
# To retrieve the number of data points used for the correlation
ols_cor.num_data_pts
# To retrieve the input parameters.
ols_cor.averaging_prd
ols_cor.coverage_threshold
ols_cor.ref_spd
ols_cor.ref_aggregation_method
ols_cor.target_spd
ols_cor.target_aggregation_method
# Correlate temperature on an hourly basis using a different aggregation method.
ols_cor = bw.Correl.OrdinaryLeastSquares(m2_ne['T2M_degC'], data['T2m'],
averaging_prd='1H', coverage_threshold=0,
ref_aggregation_method='min', target_aggregation_method='min')
# Correlate by directional sector, using 36 sectors.
ols_cor = bw.Correl.OrdinaryLeastSquares(m2_ne['WS50m_m/s'], data['Spd80mN'],
ref_dir=m2_ne['WD50m_deg'], averaging_prd='1D',
coverage_threshold=0.9, sectors=36)
"""
def __init__(self, ref_spd, target_spd, averaging_prd, coverage_threshold=0.9, ref_dir=None, sectors=12,
direction_bin_array=None, ref_aggregation_method='mean', target_aggregation_method='mean'):
CorrelBase.__init__(self, ref_spd, target_spd, averaging_prd, coverage_threshold, ref_dir=ref_dir,
sectors=sectors, direction_bin_array=direction_bin_array,
ref_aggregation_method=ref_aggregation_method,
target_aggregation_method=target_aggregation_method)
def __repr__(self):
return 'Ordinary Least Squares Model ' + str(self.params)
@staticmethod
def _leastsquare(ref_spd, target_spd):
p, res = lstsq(np.nan_to_num(ref_spd.values.flatten()[:, np.newaxis] ** [1, 0]),
np.nan_to_num(target_spd.values.flatten()))[0:2]
return p[0], p[1]
def run(self, show_params=True):
if self.ref_dir is None:
slope, offset = self._leastsquare(ref_spd=self.data[self._ref_spd_col_name],
target_spd=self.data[self._tar_spd_col_name])
self.params = dict([('slope', slope), ('offset', offset)])
self.params['r2'] = self._get_r2(target_spd=self.data[self._tar_spd_col_name],
predict_spd=self._predict(ref_spd=self.data[self._ref_spd_col_name]))
self.params['num_data_points'] = self.num_data_pts
elif type(self.ref_dir) is pd.Series:
self.params = []
for sector, group in pd.concat([self.data, self._ref_dir_bins],
axis=1, join='inner').dropna().groupby(['ref_dir_bin']):
# print('Processing sector:', sector)
if len(group) > 1:
slope, offset = self._leastsquare(ref_spd=group[self._ref_spd_col_name],
target_spd=group[self._tar_spd_col_name])
predict_ref_spd_sector = self._predict(ref_spd=group[self._ref_spd_col_name],
slope=slope, offset=offset)
r2 = self._get_r2(target_spd=group[self._tar_spd_col_name],
predict_spd=predict_ref_spd_sector)
else:
slope = np.nan
offset = np.nan
r2 = np.nan
predict_ref_spd_sector = self._predict(ref_spd=group[self._ref_spd_col_name],
slope=slope, offset=offset)
self._predict_ref_spd = pd.concat([self._predict_ref_spd, predict_ref_spd_sector])
self.params.append({'slope': slope,
'offset': offset,
'r2': r2,
'num_data_points': len(group[self._tar_spd_col_name]),
'sector_min': self._dir_sector_min[sector-1],
'sector_max': self._dir_sector_max[sector-1],
'sector_number': sector})
self._predict_ref_spd.sort_index(ascending=True, inplace=True)
if show_params:
self.show_params()
def _predict(self, ref_spd, slope=None, offset=None):
if slope is None:
slope = self.params['slope']
if offset is None:
offset = self.params['offset']
return ref_spd * slope + offset
class OrthogonalLeastSquares(CorrelBase):
"""
Correlate two datasets against each other using the Orthogonal Least Squares method. This accepts two wind speed
Series with timestamps as indexes and an averaging period which merges the datasets by this time period before
performing the correlation.
:param ref_spd: Series containing reference wind speed as a column, timestamp as the index.
:type ref_spd: pd.Series
:param target_spd: Series containing target wind speed as a column, timestamp as the index.
:type target_spd: pd.Series
:param averaging_prd: Groups data by the time period specified here. The following formats are supported
- Set period to '10min' for 10 minute average, '30min' for 30 minute average.
- Set period to '1H' for hourly average, '3H' for three hourly average and so on for '4H', '6H' etc.
- Set period to '1D' for a daily average, '3D' for three day average, similarly '5D', '7D', '15D' etc.
- Set period to '1W' for a weekly average, '3W' for three week average, similarly '2W', '4W' etc.
- Set period to '1M' for monthly average with the timestamp at the start of the month.
- Set period to '1A' for annual average with the timestamp at the start of the year.
:type averaging_prd: str
:param coverage_threshold: Minimum coverage required when aggregating the data to the averaging_prd.
:type coverage_threshold: float
:param ref_aggregation_method: Default `mean`, returns the mean of the data for the specified period. Can also
use `median`, `prod`, `sum`, `std`,`var`, `max`, `min` which are shorthands for
median, product, summation, standard deviation, variance, maximum and minimum
respectively.
:type ref_aggregation_method: str
:param target_aggregation_method: Default `mean`, returns the mean of the data for the specified period. Can also
use `median`, `prod`, `sum`, `std`,`var`, `max`, `min` which are shorthands for
median, product, summation, standard deviation, variance, maximum and minimum
respectively.
:type target_aggregation_method: str
:returns: An object representing orthogonal least squares fit model
**Example usage**
::
import brightwind as bw
data = bw.load_csv(bw.demo_datasets.demo_data)
m2_ne = bw.load_csv(bw.demo_datasets.demo_merra2_NE)
m2_nw = bw.load_csv(bw.demo_datasets.demo_merra2_NW)
# Correlate wind speeds on a monthly basis.
orthog_cor = bw.Correl.OrthogonalLeastSquares(m2_ne['WS50m_m/s'], data['Spd80mN'], averaging_prd='1M',
coverage_threshold=0.95)
orthog_cor.run()
# To plot the scatter plot and regression line.
ols_cor.plot()
# To change the plot's size.
ols_cor.plot(figure_size=(12,15))
# To show the resulting parameters.
orthog_cor.params
# or
orthog_cor.show_params()
# To synthesize data at the target site.
orthog_cor.synthesize()
# To synthesize data at the target site using a different external reference dataset.
orthog_cor.synthesize(ext_input=m2_nw['WS50m_m/s'])
# To run the correlation without immediately showing results.
orthog_cor.run(show_params=False)
# To retrieve the merged and aggregated data used in the correlation.
orthog_cor.data
# To retrieve the number of data points used for the correlation
orthog_cor.num_data_pts
# To retrieve the input parameters.
orthog_cor.averaging_prd
orthog_cor.coverage_threshold
orthog_cor.ref_spd
orthog_cor.ref_aggregation_method
orthog_cor.target_spd
orthog_cor.target_aggregation_method
# Correlate temperature on an hourly basis using a different aggregation method.
orthog_cor = bw.Correl.OrthogonalLeastSquares(m2_ne['T2M_degC'], data['T2m'],
averaging_prd='1H', coverage_threshold=0,
ref_aggregation_method='min', target_aggregation_method='min')
"""
@staticmethod
def linear_func(p, x):
return p[0] * x + p[1]
def __init__(self, ref_spd, target_spd, averaging_prd, coverage_threshold=0.9,
ref_aggregation_method='mean', target_aggregation_method='mean'):
CorrelBase.__init__(self, ref_spd, target_spd, averaging_prd, coverage_threshold,
ref_aggregation_method=ref_aggregation_method,
target_aggregation_method=target_aggregation_method)
def __repr__(self):
return 'Orthogonal Least Squares Model ' + str(self.params)
def run(self, show_params=True):
fit_data = RealData(self.data[self._ref_spd_col_name].values.flatten(),
self.data[self._tar_spd_col_name].values.flatten())
p, res = lstsq(np.nan_to_num(fit_data.x[:, np.newaxis] ** [1, 0]),
np.nan_to_num(np.asarray(fit_data.y)[:, np.newaxis]))[0:2]
model = ODR(fit_data, Model(OrthogonalLeastSquares.linear_func), beta0=[p[0][0], p[1][0]])
output = model.run()
self.params = dict([('slope', output.beta[0]), ('offset', output.beta[1])])
self.params['r2'] = self._get_r2(target_spd=self.data[self._tar_spd_col_name],
predict_spd=self._predict(ref_spd=self.data[self._ref_spd_col_name]))
self.params['num_data_points'] = self.num_data_pts
# print("Model output:", output.pprint())
if show_params:
self.show_params()
def _predict(self, ref_spd):
def linear_func_inverted(x, p):
return OrthogonalLeastSquares.linear_func(p, x)
return ref_spd.transform(linear_func_inverted, p=[self.params['slope'], self.params['offset']])
class MultipleLinearRegression(CorrelBase):
"""
Correlate multiple reference datasets against a target dataset using ordinary least squares. This accepts a
list of multiple reference wind speeds and a single target wind speed. The wind speed datasets are Pandas
Series with timestamps as indexes. Also sen is an averaging period which merges the datasets by this time period
before performing the correlation.
:param ref_spd: A list of Series containing reference wind speed as a column, timestamp as the index.
:type ref_spd: List(pd.Series)
:param target_spd: Series containing target wind speed as a column, timestamp as the index.
:type target_spd: pd.Series
:param averaging_prd: Groups data by the time period specified here. The following formats are supported
- Set period to '10min' for 10 minute average, '30min' for 30 minute average.
- Set period to '1H' for hourly average, '3H' for three hourly average and so on for '4H', '6H' etc.
- Set period to '1D' for a daily average, '3D' for three day average, similarly '5D', '7D', '15D' etc.
- Set period to '1W' for a weekly average, '3W' for three week average, similarly '2W', '4W' etc.
- Set period to '1M' for monthly average with the timestamp at the start of the month.
- Set period to '1A' for annual average with the timestamp at the start of the year.
:type averaging_prd: str
:param coverage_threshold: Minimum coverage required when aggregating the data to the averaging_prd.
:type coverage_threshold: float
:param ref_aggregation_method: Default `mean`, returns the mean of the data for the specified period. Can also
use `median`, `prod`, `sum`, `std`,`var`, `max`, `min` which are shorthands for
median, product, summation, standard deviation, variance, maximum and minimum
respectively.
:type ref_aggregation_method: str
:param target_aggregation_method: Default `mean`, returns the mean of the data for the specified period. Can also
use `median`, `prod`, `sum`, `std`,`var`, `max`, `min` which are shorthands for
median, product, summation, standard deviation, variance, maximum and minimum
respectively.
:type target_aggregation_method: str
:returns: An object representing Multiple Linear Regression fit model
**Example usage**
::
import brightwind as bw
data = bw.load_csv(bw.demo_datasets.demo_data)
m2_ne = bw.load_csv(bw.demo_datasets.demo_merra2_NE)
m2_nw = bw.load_csv(bw.demo_datasets.demo_merra2_NW)
# Correlate on a monthly basis
mul_cor = bw.Correl.MultipleLinearRegression([m2_ne['WS50m_m/s'], m2_ne['WS50m_m/s']], data['Spd80mN'],
averaging_prd='1M',
coverage_threshold=0.95)
mul_cor.run()
# To plot the scatter plot and line fit.
mul_cor.plot()
# To show the resulting parameters.
mul_cor.params
# or
mul_cor.show_params()
# To calculate the correlation coefficient R^2.
mul_cor.get_r2()
# To synthesize data at the target site.
mul_cor.synthesize()
# To run the correlation without immediately showing results.
mul_cor.run(show_params=False)
# To retrieve the merged and aggregated data used in the correlation.
mul_cor.data
# To retrieve the number of data points used for the correlation
mul_cor.num_data_pts
# To retrieve the input parameters.
mul_cor.averaging_prd
mul_cor.coverage_threshold
mul_cor.ref_spd
mul_cor.ref_aggregation_method
mul_cor.target_spd
mul_cor.target_aggregation_method
# Correlate temperature on an hourly basis using a different aggregation method.
mul_cor = bw.Correl.MultipleLinearRegression([m2_ne['T2M_degC'], m2_nw['T2M_degC']], data['T2m'],
averaging_prd='1H', coverage_threshold=0,
ref_aggregation_method='min', target_aggregation_method='min')
"""
def __init__(self, ref_spd: List, target_spd, averaging_prd, coverage_threshold=0.9,
ref_aggregation_method='mean', target_aggregation_method='mean'):
self.ref_spd = self._merge_ref_spds(ref_spd)
CorrelBase.__init__(self, self.ref_spd, target_spd, averaging_prd, coverage_threshold,
ref_aggregation_method=ref_aggregation_method,
target_aggregation_method=target_aggregation_method)
def __repr__(self):
return 'Multiple Linear Regression Model ' + str(self.params)
@staticmethod
def _merge_ref_spds(ref_spds):
# ref_spds is a list of pd.Series that may have the same names.
for idx, ref_spd in enumerate(ref_spds):
ref_spd.name = ref_spd.name + '_' + str(idx + 1)
return pd.concat(ref_spds, axis=1, join='inner')
def run(self, show_params=True):
p, res = lstsq(np.column_stack((self.data[self._ref_spd_col_names].values, np.ones(len(self.data)))),
self.data[self._tar_spd_col_name].values.flatten())[0:2]
self.params = {'slope': p[:-1], 'offset': p[-1]}
if show_params:
self.show_params()
def show_params(self):
pprint.pprint(self.params)
def _predict(self, x):
def linear_function(x, slope, offset):
return sum(x * slope) + offset
return x.apply(linear_function, axis=1, slope=self.params['slope'], offset=self.params['offset'])
def synthesize(self):
# def synthesize(self, ext_input=None): # REMOVE UNTIL FIXED
ext_input = None
# CorrelBase.synthesize(self.data ???????? Why not??????????????????????????????????????
if ext_input is None:
return pd.concat([self._predict(tf.average_data_by_period(self.ref_spd.loc[:min(self.data.index)],
self.averaging_prd,
return_coverage=False)),
self.data[self._tar_spd_col_name]], axis=0)
else:
return self._predict(ext_input)
def get_r2(self):
return 1.0 - (sum((self.data[self._tar_spd_col_name] -
self._predict(self.data[self._ref_spd_col_names])) ** 2) /
(sum((self.data[self._tar_spd_col_name] - self.data[self._tar_spd_col_name].mean()) ** 2)))
def plot(self, figure_size=(10, 10.2)):
raise NotImplementedError
class SimpleSpeedRatio:
"""
Calculate the simple speed ratio between overlapping datasets and apply to the MOMM of the reference.
The simple speed ratio is calculated by finding the limits of the overlapping period between the target and
reference datasets. The ratio of the mean wind speed of these two datasets for the overlapping period is
calculated i.e. target_overlap_mean / ref_overlap_mean. This ratio is then applied to the Mean of Monthly
Means (MOMM) of the complete reference dataset resulting in a long term wind speed for the target dataset.
This is a "back of the envelope" style long term calculation and is intended to be used as a guide and not
to be used in a robust wind resource assessment.
A warning message will be raised if the data coverage of either the target or the reference overlapping
period is poor.
:param ref_spd: Series containing reference wind speed as a column, timestamp as the index.
:type ref_spd: pd.Series
:param target_spd: Series containing target wind speed as a column, timestamp as the index.
:type target_spd: pd.Series
:return: An object representing the simple speed ratio model
**Example usage**
::
import brightwind as bw
data = bw.load_csv(bw.demo_datasets.demo_data)
m2 = bw.load_csv(bw.demo_datasets.demo_merra2_NE)
# Calculate the simple speed ratio between overlapping datasets
simple_ratio = bw.Correl.SimpleSpeedRatio(m2['WS50m_m/s'], data['Spd80mN'])
simple_ratio.run()
"""
def __init__(self, ref_spd, target_spd):
self.ref_spd = ref_spd
self.target_spd = target_spd
self._start_ts = tf._get_min_overlap_timestamp(ref_spd.dropna().index, target_spd.dropna().index)
self._end_ts = min(ref_spd.dropna().index.max(), ref_spd.dropna().index.max())
self.data = ref_spd[self._start_ts:self._end_ts], target_spd[self._start_ts:self._end_ts]
self.params = {'status': 'not yet run'}
def __repr__(self):
return 'Simple Speed Ratio Model ' + str(self.params)
def run(self, show_params=True):
self.params = dict()
simple_speed_ratio = self.data[1].mean() / self.data[0].mean() # target / ref
ref_long_term_momm = momm(self.ref_spd)
# calculate the coverage of the target data to raise warning if poor
tar_count = self.data[1].dropna().count()
tar_res = tf._get_data_resolution(self.data[1].index)
max_pts = (self._end_ts - self._start_ts) / tar_res
if tar_res == pd.Timedelta(1, unit='M'): # if is monthly
# round the result to 0 decimal to make whole months.
max_pts = np.round(max_pts, 0)
target_overlap_coverage = tar_count / max_pts
self.params["simple_speed_ratio"] = simple_speed_ratio
self.params["ref_long_term_momm"] = ref_long_term_momm
self.params["target_long_term"] = simple_speed_ratio * ref_long_term_momm
self.params["target_overlap_coverage"] = target_overlap_coverage
if show_params:
self.show_params()
if target_overlap_coverage < 0.9:
warnings.warn('\nThe target data overlapping coverage is poor at {}. '
'Please use this calculation with caution.'.format(round(target_overlap_coverage, 3)))
def show_params(self):
"""Show the dictionary of parameters"""
pprint.pprint(self.params)
class SpeedSort(CorrelBase):
class SectorSpeedModel:
def __init__(self, ref_spd, target_spd, cutoff):
self.sector_ref = ref_spd
self.sector_target = target_spd
x_data = sorted([wdspd for wdspd in self.sector_ref.values.flatten()])
y_data = sorted([wdspd for wdspd in self.sector_target.values.flatten()])
start_idx = 0
for idx, wdspd in enumerate(x_data):
if wdspd >= cutoff:
start_idx = idx
break
x_data = x_data[start_idx:]
y_data = y_data[start_idx:]
self.target_cutoff = y_data[0]
self.data_pts = min(len(x_data), len(y_data))
# Line fit
mid_pnt = int(len(x_data) / 2)
xmean1 = np.mean(x_data[:mid_pnt])
xmean2 = np.mean(x_data[mid_pnt:])
ymean1 = np.mean(y_data[:mid_pnt])
ymean2 = np.mean(y_data[mid_pnt:])
self.params = dict()
self.params['slope'] = (ymean2 - ymean1) / (xmean2 - xmean1)
self.params['offset'] = ymean1 - (xmean1 * self.params['slope'])
# print(self.params)
def sector_predict(self, x):
def linear_function(x, slope, offset):
return x * slope + offset
return x.transform(linear_function, slope=self.params['slope'], offset=self.params['offset'])
def plot_model(self):
return plot_scatter(self.sector_ref,
self.sector_target,
self.sector_predict(self.sector_ref),
x_label=self.sector_ref.name, y_label=self.sector_target.name)
def __init__(self, ref_spd, ref_dir, target_spd, target_dir, averaging_prd, coverage_threshold=0.9, sectors=12,
direction_bin_array=None, lt_ref_speed=None):
"""
Correlate two datasets against each other using the SpeedSort method as outlined in 'The SpeedSort, DynaSort
and Scatter Wind Correlation Methods, Wind Engineering 29(3):217-242, <NAME>, <NAME>, May 2005'.
This accepts two wind speed and direction Series with timestamps as indexes and an averaging period which
merges the datasets by this time period before performing the correlation.
:param ref_spd: Series containing reference wind speed as a column, timestamp as the index.
:type ref_spd: pd.Series
:param target_spd: Series containing target wind speed as a column, timestamp as the index.
:type target_spd: pd.Series
:param ref_dir: Series containing reference wind direction as a column, timestamp as the index.
:type ref_dir: pd.Series
:param target_dir: Series containing target wind direction as a column, timestamp as the index.
:type target_dir: pd.Series
:param averaging_prd: Groups data by the time period specified here. The following formats are supported
- Set period to '10min' for 10 minute average, '30min' for 30 minute average.
- Set period to '1H' for hourly average, '3H' for three hourly average and so on for '4H', '6H' etc.
- Set period to '1D' for a daily average, '3D' for three day average, similarly '5D', '7D', '15D' etc.
- Set period to '1W' for a weekly average, '3W' for three week average, similarly '2W', '4W' etc.
- Set period to '1M' for monthly average with the timestamp at the start of the month.
- Set period to '1A' for annual average with the timestamp at the start of the year.
:type averaging_prd: str
:param coverage_threshold: Minimum coverage required when aggregating the data to the averaging_prd.
:type coverage_threshold: float
:param sectors: Number of direction sectors to bin in to. The first sector is centered at 0 by
default. To change that behaviour specify 'direction_bin_array' which overwrites
'sectors'.
:type sectors: int
:param direction_bin_array: An optional parameter where if you want custom direction bins, pass an array
of the bins. To add custom bins for direction sectors, overwrites sectors. For
instance, for direction bins [0,120), [120, 215), [215, 360) the list would
be [0, 120, 215, 360]
:type direction_bin_array: List()
:param lt_ref_speed: An alternative to the long term wind speed for the reference dataset calculated
using mean of monthly means (MOMM).
:type lt_ref_speed: float or int
:returns: An object representing the SpeedSort fit model
**Example usage**
::
import brightwind as bw
data = bw.load_csv(bw.demo_datasets.demo_data)
m2 = bw.load_csv(bw.demo_datasets.demo_merra2_NE)
# Basic usage on an hourly basis
ss_cor = bw.Correl.SpeedSort(m2['WS50m_m/s'], m2['WD50m_deg'], data['Spd80mN'], data['Dir78mS'],
averaging_prd='1H')
ss_cor.run()
ss_cor.plot_wind_directions()
ss_cor.get_result_table()
ss_cor.synthesize()
# Sending an array of direction sectors
ss_cor = bw.Correl.SpeedSort(m2['WS50m_m/s'], m2['WD50m_deg'], data['Spd80mN'], data['Dir78mS'],
averaging_prd='1H', direction_bin_array=[0,90,130,200,360])
ss_cor.run()
"""
CorrelBase.__init__(self, ref_spd, target_spd, averaging_prd, coverage_threshold, ref_dir=ref_dir,
target_dir=target_dir, sectors=sectors, direction_bin_array=direction_bin_array)
if lt_ref_speed is None:
self.lt_ref_speed = momm(self.data[self._ref_spd_col_name])
else:
self.lt_ref_speed = lt_ref_speed
self.cutoff = min(0.5 * self.lt_ref_speed, 4.0)
self.ref_veer_cutoff = self._get_veer_cutoff(self.data[self._ref_spd_col_name])
self.target_veer_cutoff = self._get_veer_cutoff((self.data[self._tar_spd_col_name]))
self._randomize_calm_periods()
self._get_overall_veer()
# for low ref_speed and high target_speed recalculate direction sector
self._adjust_low_reference_speed_dir()
self.speed_model = dict()
def __repr__(self):
return 'SpeedSort Model ' + str(self.params)
def _randomize_calm_periods(self):
idxs = self.data[self.data[self._ref_spd_col_name] < 1].index
self.data.loc[idxs, self._ref_dir_col_name] = 360.0 * np.random.random(size=len(idxs))
idxs = self.data[self.data[self._tar_spd_col_name] < 1].index
self.data.loc[idxs, self._tar_dir_col_name] = 360.0 * np.random.random(size=len(idxs))
def _get_overall_veer(self):
idxs = self.data[(self.data[self._ref_spd_col_name] >= self.ref_veer_cutoff) &
(self.data[self._tar_spd_col_name] >= self.target_veer_cutoff)].index
self.overall_veer = self._get_veer(self.data.loc[idxs, self._ref_dir_col_name],
self.data.loc[idxs, self._tar_dir_col_name]).mean()
def _adjust_low_reference_speed_dir(self):
idxs = self.data[(self.data[self._ref_spd_col_name] < 2) &
(self.data[self._tar_spd_col_name] > (self.data[self._ref_spd_col_name] + 4))].index
self.data.loc[idxs, self._ref_dir_col_name] = (self.data.loc[idxs, self._tar_dir_col_name] -
self.overall_veer).apply(utils._range_0_to_360)
@staticmethod
def _get_veer_cutoff(speed_col):
return 0.5 * (6.0 + (0.5 * speed_col.mean()))
@staticmethod
def _get_veer(ref_d, target_d):
def change_range(veer):
if veer > 180:
return veer - 360.0
elif veer < -180:
return veer + 360.0
else:
return veer
v = target_d - ref_d
return v.apply(change_range)
def _avg_veer(self, sector_data):
sector_data = sector_data[(sector_data[self._ref_spd_col_name] >= self.ref_veer_cutoff) &
(sector_data[self._tar_spd_col_name] >= self.target_veer_cutoff)]
return {'average_veer': round(self._get_veer(sector_data[self._ref_dir_col_name],
sector_data[self._tar_dir_col_name]).mean(), 5),
'num_pts_for_veer': len(sector_data[self._ref_dir_col_name])}
def run(self, show_params=True):
self.params = dict()
self.params['ref_speed_cutoff'] = round(self.cutoff, 5)
self.params['ref_veer_cutoff'] = round(self.ref_veer_cutoff, 5)
self.params['target_veer_cutoff'] = round(self.target_veer_cutoff, 5)
self.params['overall_average_veer'] = round(self.overall_veer, 5)
for sector, group in pd.concat([self.data, self._ref_dir_bins],
axis=1, join='inner').dropna().groupby(['ref_dir_bin']):
# print('Processing sector:', sector)
self.speed_model[sector] = SpeedSort.SectorSpeedModel(ref_spd=group[self._ref_spd_col_name],
target_spd=group[self._tar_spd_col_name],
cutoff=self.cutoff)
self.params[sector] = {'slope': round(self.speed_model[sector].params['slope'], 5),
'offset': round(self.speed_model[sector].params['offset'], 5),
'target_speed_cutoff': round(self.speed_model[sector].target_cutoff, 5),
'num_pts_for_speed_fit': self.speed_model[sector].data_pts,
'num_total_pts': min(group.count()),
'sector_min': self._dir_sector_min[sector - 1],
'sector_max': self._dir_sector_max[sector - 1],
}
self.params[sector].update(self._avg_veer(group))
if show_params:
self.show_params()
def get_result_table(self):
result = pd.DataFrame()
for key in self.params:
if not isinstance(key, str):
result = pd.concat([pd.DataFrame.from_records(self.params[key], index=[key]), result], axis=0)
result = result.sort_index()
return result
def plot(self):
for model in self.speed_model:
self.speed_model[model].plot_model('Sector ' + str(model))
return self.plot_wind_directions()
@staticmethod
def _linear_interpolation(xa, xb, ya, yb, xc):
m = (xc - xa) / (xb - xa)
yc = (yb - ya) * m + ya
return yc
def _predict_dir(self, x_dir):
x_dir = x_dir.dropna().rename('dir')
sector_min = []
sector_max = []
if self.direction_bin_array is None:
# First sector is centered at 0.
step = 360/self.sectors
veer_bins = list(map(float, np.arange(0, 360 + step, step)))
for veer_bin in veer_bins:
sector_min.append(offset_wind_direction(veer_bin, -float(step/2)))
sector_max.append(offset_wind_direction(veer_bin, float(step/2)))
sec_veers = np.empty(np.shape(veer_bins))
sec_veers[:] = np.nan
sec_veers = list(sec_veers)
for key in self.params.keys():
if type(key) is int:
if self.params[key]['sector_min'] in sector_min:
sec_veers[sector_min.index(self.params[key]['sector_min'])] = self.params[key]['average_veer']
if (0 in veer_bins) and (360 in veer_bins):
sec_veers[-1] = sec_veers[0]
else:
veer_bins = []
sec_veers = []
# Calculate middle point of each sector, as each sectoral veer is applied at the mid-point of the sector.
for key in self.params.keys():
if type(key) is int:
sec_veers.append(self.params[key]['average_veer'])
sector_min.append(self.params[key]['sector_min'])
sector_max.append(self.params[key]['sector_max'])
if self.params[key]['sector_min'] < self.params[key]['sector_max']:
veer_bins.append((self.params[key]['sector_min'] + self.params[key]['sector_max']) / 2)
else:
veer_bins.append(offset_wind_direction(self.params[key]['sector_max'],
float(360 - self.params[key]['sector_max']
+ self.params[key]['sector_min']) / 2))
# If first sector is not centered at 0 and 0 and 360 are the extremes of the direction_bin_array
# then the first and the last sectors are taken into account for deriving the veer as for code below.
if (0 in self.direction_bin_array) and (360 in self.direction_bin_array):
sec_veers.insert(0, self._linear_interpolation(0 - (360 - veer_bins[-1]), veer_bins[0],
sec_veers[-1], sec_veers[0], 0))
sec_veers.append(self._linear_interpolation(veer_bins[-1], 360 + veer_bins[0],
sec_veers[-1], sec_veers[1], 360))
veer_bins.insert(0, 0)
veer_bins.append(360)
sector_min.insert(0, sector_min[-1])
sector_min.append(sector_min[0])
sector_max.insert(0, sector_max[0])
sector_max.append(sector_max[0])
# The veer correction is derived linear interpolating the veer between two mid-points of near sectors.
adjustment = x_dir.rename('adjustment').copy() * np.nan
for i in range(1, len(veer_bins)):
if
|
np.isnan(sec_veers[i - 1])
|
numpy.isnan
|
from __future__ import print_function
from __future__ import division
from builtins import str
from flarestack.utils.prepare_catalogue import ps_catalogue_name
from flarestack.data.icecube.ps_tracks.ps_v002_p01 import IC86_1_dict, IC86_234_dict
from flarestack.core.results import ResultsHandler
from flarestack.cluster import run_desy_cluster as rd
from flarestack.shared import plot_output_dir, scale_shortener, make_analysis_pickle
import matplotlib.pyplot as plt
import numpy as np
seasons = [IC86_1_dict, IC86_234_dict]
all_res = dict()
basename = "analyses/angular_error_floor/compare_seasons/"
for gamma in [2.0, 3.0, 3.5]:
gamma_name = basename + str(gamma) + "/"
injection_energy = {
"Name": "Power Law",
"Gamma": gamma,
}
injection_time = {"Name": "Steady"}
inj_dict = {
"Injection Energy PDF": injection_energy,
"Injection Time PDF": injection_time,
"Poisson Smear?": False,
"fixed_n": 100,
}
# sin_decs = np.linspace(1.00, -1.00, 41)
#
# print sin_decs
sin_decs = np.linspace(0.9, -0.9, 37)
# print sin_decs
# raw_input("prompt")
# sin_decs = [-0.5, 0.0, 0.5]
res_dict = dict()
for pull_corrector in ["no_pull", "median_1d"]:
# for pull_corrector in ["median_1d_e", ]:
root_name = gamma_name + pull_corrector + "/"
if "_e" in pull_corrector:
root_key = "Dynamic Pull Corrector " + pull_corrector[-4] + "D "
elif pull_corrector == "no_pull":
root_key = "Base Case"
else:
root_key = "Static Pull Corrector " + pull_corrector[-2] + "D "
for floor in ["no_floor"]:
seed_name = root_name + floor + "/"
if floor == "no_floor":
key = root_key + " (No floor)"
else:
key = root_key + " (" + floor + ")"
config_mh = []
for season in seasons:
name = seed_name + season["Data Sample"] + "/" + season["Name"] + "/"
print(name)
llh_dict = {
"name": "spatial",
"LLH Energy PDF": injection_energy,
"LLH Time PDF": injection_time,
"pull_name": pull_corrector,
"floor_name": floor,
}
# scale = flux_to_k(reference_sensitivity(sin_dec, gamma)) * 10
mh_dict = {
"name": name,
"mh_name": "fixed_weights",
"datasets": [IC86_1_dict],
"catalogue": ps_catalogue_name(-0.2),
"llh_dict": llh_dict,
"inj kwargs": inj_dict,
"n_trials": 50,
"n_steps": 2,
"scale": 1.0,
}
pkl_file = make_analysis_pickle(mh_dict)
# rd.submit_to_cluster(pkl_file, n_jobs=50)
#
# mh = MinimisationHandler.create(mh_dict_power_law)
# mh.iterate_run(n_steps=2, n_trials=10)
config_mh.append(mh_dict)
res_dict[key] = config_mh
all_res[gamma] = res_dict
rd.wait_for_cluster()
for (gamma, res_dict) in all_res.items():
gamma_name = basename + str(gamma) + "/"
sens_dict = dict()
med_bias_dict = dict()
mean_bias_dict = dict()
disc_dict = dict()
for (config, mh_list) in res_dict.items():
sens = []
med_biases = []
mean_biases = []
disc = []
for mh_dict in mh_list:
rh = ResultsHandler(mh_dict)
max_scale = scale_shortener(
max([float(x) for x in list(rh.results.keys())])
)
sens.append(rh.sensitivity)
disc.append(rh.disc_potential)
fit = rh.results[max_scale]["Parameters"]["n_s"]
inj = rh.inj[max_scale]["n_s"]
med_bias = np.median(fit) / inj
med_biases.append(med_bias)
mean_biases.append(np.mean(fit) / inj)
# ax1.plot(sin_decs, sens, label=config)
sens_dict[config] =
|
np.array(sens)
|
numpy.array
|
"""
1HN In-phase/Anti-phase Proton CEST
===================================
Analyzes chemical exchange during the CEST block. Magnetization evolution is
calculated using the (6n)×(6n), two-spin matrix, where n is the number of
states::
{ Ix(a), Iy(a), Iz(a), IxSz(a), IySz(a), IzSz(a),
Ix(b), Iy(b), Iz(b), IxSz(b), IySz(b), IzSz(b), ... }
References
----------
| Yuwen, Sekhar and Kay. Angew Chem Int Ed (2017) 56:6122-6125
| Yuwen and Kay. J Biomol NMR (2017) 67:295-307
| Yuwen and Kay. J Biomol NMR (2018) 70:93-102
Note
----
A sample configuration file for this module is available using the command::
$ chemex config cest_1hn_ip_ap
"""
import functools as ft
import numpy as np
import numpy.linalg as nl
import chemex.experiments.helper as ceh
import chemex.helper as ch
import chemex.nmr.liouvillian as cnl
_SCHEMA = {
"type": "object",
"properties": {
"experiment": {
"type": "object",
"properties": {
"d1": {"type": "number"},
"time_t1": {"type": "number"},
"carrier": {"type": "number"},
"b1_frq": {"type": "number"},
"b1_inh_scale": {"type": "number", "default": 0.1},
"b1_inh_res": {"type": "integer", "default": 11},
"observed_state": {
"type": "string",
"pattern": "[a-z]",
"default": "a",
},
"eta_block": {"type": "integer", "default": 0},
},
"required": ["d1", "time_t1", "carrier", "b1_frq"],
}
},
}
def read(config):
ch.validate(config, _SCHEMA)
config["basis"] = cnl.Basis(type="ixyzsz_eq", spin_system="hn")
config["fit"] = _fit_this()
config["data"]["filter_ref_planes"] = True
return ceh.load_experiment(config=config, pulse_seq_cls=PulseSeq)
def _fit_this():
return {
"rates": [
"r2_i_{states}",
"r1_i_{observed_state}",
"r1_s_{observed_state}",
"etaxy_i_{observed_state}",
"etaz_i_{observed_state}",
],
"model_free": [
"tauc_{observed_state}",
"s2_{observed_state}",
"khh_{observed_state}",
],
}
class PulseSeq:
def __init__(self, config, propagator):
self.prop = propagator
settings = config["experiment"]
self.time_t1 = settings["time_t1"]
self.d1 = settings["d1"]
self.taua = 2.38e-3
self.prop.carrier_i = settings["carrier"]
self.prop.b1_i = settings["b1_frq"]
self.prop.b1_i_inh_scale = settings["b1_inh_scale"]
self.prop.b1_i_inh_res = settings["b1_inh_res"]
self.eta_block = settings["eta_block"]
self.observed_state = settings["observed_state"]
self.prop.detection = f"[2izsz_{self.observed_state}]"
self.dephased = settings["b1_inh_scale"] == np.inf
if self.eta_block > 0:
self.taud = max(self.d1 - self.time_t1, 0.0)
self.taue = 0.5 * self.time_t1 / self.eta_block
else:
self.taud = self.d1
self.p90_i = self.prop.perfect90_i
self.p180_sx = self.prop.perfect180_s[0]
self.p180_isx = self.prop.perfect180_i[0] @ self.prop.perfect180_s[0]
@ft.lru_cache(maxsize=10000)
def calculate(self, offsets, params_local):
self.prop.update(params_local)
self.prop.offset_i = 0.0
d_taud, d_taua = self.prop.delays([self.taud, self.taua])
start = d_taud @ self.prop.get_start_magnetization(terms="ie")
start = self.prop.keep_components(start, terms=["ie", "iz"])
intst = {}
for offset in set(offsets):
self.prop.offset_i = offset
if self.eta_block > 0:
d_2taue = self.prop.delays(2.0 * self.taue)
p_taue = self.prop.pulse_i(self.taue, 0.0, self.dephased)
cest_block = p_taue @ self.p180_sx @ d_2taue @ self.p180_sx @ p_taue
cest = nl.matrix_power(cest_block, self.eta_block)
else:
cest = self.prop.pulse_i(self.time_t1, 0.0, self.dephased)
if abs(offset) > 1e4:
inept = self.p90_i[3] @ d_taua @ self.p180_isx @ d_taua @ self.p90_i[0]
cest = inept @ cest
intst[offset] = self.prop.detect(cest @ start)
return
|
np.array([intst[offset] for offset in offsets])
|
numpy.array
|
from __future__ import division, absolute_import, print_function
import platform
import numpy as np
from numpy import uint16, float16, float32, float64
from numpy.testing import run_module_suite, assert_, assert_equal, dec
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
try:
callable(*args, **kwargs)
except FloatingPointError as exc:
assert_(str(exc).find(strmatch) >= 0,
"Did not raise floating point %s error" % strmatch)
else:
assert_(False,
"Did not raise floating point %s error" % strmatch)
class TestHalf(object):
def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
self.all_f32 = np.array(self.all_f16, dtype=float32)
self.all_f64 = np.array(self.all_f16, dtype=float64)
# An array of all non-NaN float16 values, in sorted order
self.nonan_f16 = np.concatenate(
(np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
self.nonan_f16.dtype = float16
self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
# An array of all finite float16 values, in sorted order
self.finite_f16 = self.nonan_f16[1:-1]
self.finite_f32 = self.nonan_f32[1:-1]
self.finite_f64 = self.nonan_f64[1:-1]
def test_half_conversions(self):
"""Checks that all 16-bit values survive conversion
to/from 32-bit and 64-bit float"""
# Because the underlying routines preserve the NaN bits, every
# value is preserved when converting to/from other floats.
# Convert from float32 back to float16
b = np.array(self.all_f32, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert from float64 back to float16
b = np.array(self.all_f64, dtype=float16)
assert_equal(self.all_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Convert float16 to longdouble and back
# This doesn't necessarily preserve the extra NaN bits,
# so exclude NaNs.
a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
b = np.array(a_ld, dtype=float16)
assert_equal(self.nonan_f16.view(dtype=uint16),
b.view(dtype=uint16))
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
assert_equal(np.spacing(float16(65504)), np.inf)
# Check comparisons of all values with NaN
nan = float16(np.nan)
assert_(not (self.all_f16 == nan).any())
assert_(not (nan == self.all_f16).any())
assert_((self.all_f16 != nan).all())
assert_((nan != self.all_f16).all())
assert_(not (self.all_f16 < nan).any())
assert_(not (nan < self.all_f16).any())
assert_(not (self.all_f16 <= nan).any())
assert_(not (nan <= self.all_f16).any())
assert_(not (self.all_f16 > nan).any())
assert_(not (nan > self.all_f16).any())
assert_(not (self.all_f16 >= nan).any())
assert_(not (nan >= self.all_f16).any())
def test_half_values(self):
"""Confirms a small number of known half values"""
a = np.array([1.0, -1.0,
2.0, -2.0,
0.0999755859375, 0.333251953125, # 1/10, 1/3
65504, -65504, # Maximum magnitude
2.0**(-14), -2.0**(-14), # Minimum normal
2.0**(-24), -2.0**(-24), # Minimum subnormal
0, -1/1e1000, # Signed zeros
np.inf, -np.inf])
b = np.array([0x3c00, 0xbc00,
0x4000, 0xc000,
0x2e66, 0x3555,
0x7bff, 0xfbff,
0x0400, 0x8400,
0x0001, 0x8001,
0x0000, 0x8000,
0x7c00, 0xfc00], dtype=uint16)
b.dtype = float16
assert_equal(a, b)
def test_half_rounding(self):
"""Checks that rounding when converting to half is correct"""
a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
2.0**-25, # Underflows to zero (nearest even mode)
2.0**-26, # Underflows to zero
1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
1.0+2.0**-12, # rounds to 1.0
65519, # rounds to 65504
65520], # rounds to inf
dtype=float64)
rounded = [2.0**-24,
0.0,
0.0,
1.0+2.0**(-10),
1.0,
1.0,
65504,
np.inf]
# Check float64->float16 rounding
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
# Check float32->float16 rounding
a = np.array(a, dtype=float32)
b = np.array(a, dtype=float16)
assert_equal(b, rounded)
def test_half_correctness(self):
"""Take every finite float16, and check the casting functions with
a manual conversion."""
# Create an array of all finite float16s
a_bits = self.finite_f16.view(dtype=uint16)
# Convert to 64-bit float manually
a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
a_man = (a_bits & 0x03ff) * 2.0**(-10)
# Implicit bit of normalized floats
a_man[a_exp != -15] += 1
# Denormalized exponent is -14
a_exp[a_exp == -15] = -14
a_manual = a_sgn * a_man * 2.0**a_exp
a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
if len(a32_fail) != 0:
bad_index = a32_fail[0]
assert_equal(self.finite_f32, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f32[bad_index],
a_manual[bad_index]))
a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
if len(a64_fail) != 0:
bad_index = a64_fail[0]
assert_equal(self.finite_f64, a_manual,
"First non-equal is half value %x -> %g != %g" %
(self.finite_f16[bad_index],
self.finite_f64[bad_index],
a_manual[bad_index]))
def test_half_ordering(self):
"""Make sure comparisons are working right"""
# All non-NaN float16 values in reverse order
a = self.nonan_f16[::-1].copy()
# 32-bit float copy
b = np.array(a, dtype=float32)
# Should sort the same
a.sort()
b.sort()
assert_equal(a, b)
# Comparisons should work
assert_((a[:-1] <= a[1:]).all())
assert_(not (a[:-1] > a[1:]).any())
assert_((a[1:] >= a[:-1]).all())
assert_(not (a[1:] < a[:-1]).any())
# All != except for +/-0
assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
def test_half_funcs(self):
"""Test the various ArrFuncs"""
# fill
assert_equal(np.arange(10, dtype=float16),
np.arange(10, dtype=float32))
# fillwithscalar
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
# nonzero and copyswap
a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
[2, 5, 6])
a = a.byteswap().newbyteorder()
assert_equal(a.nonzero()[0],
[2, 5, 6])
# dot
a = np.arange(0, 10, 0.5, dtype=float16)
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a, b),
95)
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
4)
a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
5)
# getitem
a = np.arange(10, dtype=float16)
for i in range(10):
assert_equal(a.item(i), i)
def test_spacing_nextafter(self):
"""Test np.spacing and np.nextafter"""
# All non-negative finite #'s
a = np.arange(0x7c00, dtype=uint16)
hinf = np.array((np.inf,), dtype=float16)
a_f16 = a.view(dtype=float16)
assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
# switch to negatives
a |= 0x8000
assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def test_half_coercion(self):
"""Test that half gets coerced properly with the other types"""
a16 = np.array((1,), dtype=float16)
a32 = np.array((1,), dtype=float32)
b16 = float16(1)
b32 = float32(1)
assert_equal(np.power(a16, 2).dtype, float16)
assert_equal(np.power(a16, 2.0).dtype, float16)
assert_equal(np.power(a16, b16).dtype, float16)
assert_equal(np.power(a16, b32).dtype, float16)
assert_equal(np.power(a16, a16).dtype, float16)
assert_equal(np.power(a16, a32).dtype, float32)
assert_equal(np.power(b16, 2).dtype, float64)
assert_equal(np.power(b16, 2.0).dtype, float64)
assert_equal(np.power(b16, b16).dtype, float16)
assert_equal(np.power(b16, b32).dtype, float32)
assert_equal(np.power(b16, a16).dtype, float16)
assert_equal(np.power(b16, a32).dtype, float32)
assert_equal(np.power(a32, a16).dtype, float32)
assert_equal(np.power(a32, b16).dtype, float32)
assert_equal(np.power(b32, a16).dtype, float16)
assert_equal(np.power(b32, b16).dtype, float32)
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_half_fpe(self):
with np.errstate(all='raise'):
sx16 = np.array((1e-4,), dtype=float16)
bx16 =
|
np.array((1e4,), dtype=float16)
|
numpy.array
|
from abc import ABCMeta
import numpy as np
from typing import List
import TransportMaps.Distributions as dist
import TransportMaps.Likelihoods as like
from utils.LinAlg import is_spd
class Distribution(metaclass=ABCMeta):
@property
def dim(self) -> int:
raise NotImplementedError
def rvs(self, num_samples: int) -> np.ndarray:
"""
Generate samples from the distribution
:return: samples
:rtype: 2-dimensional numpy array
each row is a sample; each column is a dimension
"""
raise NotImplementedError
def pdf(self, location: np.ndarray) -> np.ndarray:
"""
Compute PDF at given locations
:param location: location at which PDFs are evaluated
each row is a location; each column is a dimension
:return: PDFs at given locations
:rtype: a 1-dimensional numpy array
"""
raise NotImplementedError
def log_pdf(self, location: np.ndarray) -> np.ndarray:
"""
Compute log PDF at given locations
:param location: location at which log PDFs are evaluated
each row is a location; each column is a dimension
:return: log PDFs at given locations
:rtype: a 1-dimensional numpy array
"""
raise NotImplementedError
def grad_x_log_pdf(self, location: np.ndarray) -> np.ndarray:
"""
Compute gradients of log PDF at given locations
:param location: location at which gradients are evaluated
each row is a location; each column is a dimension
:return: PDFs at given locations
:rtype: a 2-dimensional numpy array
each row is a location; each column is a dimension
"""
raise NotImplementedError
class GaussianDistribution(Distribution, metaclass=ABCMeta):
def __init__(self, mu: np.ndarray, sigma: np.ndarray = None,
precision: np.ndarray = None):
if len(mu.shape) != 1:
raise ValueError("Dimensionality of mu is incorrect")
self._mu = mu
if sigma is not None:
if sigma.shape != (self.dim, self.dim):
raise ValueError("Dimensionality of sigma is incorrect")
if not is_spd(sigma):
raise ValueError("sigma must be symmetric positive definite")
self._sigma = sigma
self._precision = np.linalg.inv(self._sigma)
else:
if precision.shape != (self.dim, self.dim):
raise ValueError("Dimensionality of precision matrix is "
"incorrect")
if not is_spd(precision):
raise ValueError("sigma must be symmetric positive definite")
self._precision = precision
self._sigma =
|
np.linalg.inv(self._precision)
|
numpy.linalg.inv
|
"""Module defining backend agnostic containers for visualisation elements."""
from collections import OrderedDict
from collections.abc import Mapping
from copy import deepcopy
from typing import List
import numpy as np
class Element(object):
"""Representation of a single element.
Implemented as a frozen dictionary with attribute access.
"""
def __init__(self, **kwargs):
"""Initialise element."""
self._kwargs = kwargs
def __dir__(self):
"""Get the attributes."""
return list(self._kwargs.keys()) + ["get"]
def get(self, key, default):
"""Return key or default."""
if key in self:
return self[key]
else:
return default
def __repr__(self):
"""Represent object."""
sig = ", ".join([f"{k}={self._kwargs[k]}" for k in sorted(self._kwargs)])
return f"Element({sig})"
def __getitem__(self, key):
"""Return key."""
return self._kwargs[key]
def __iter__(self):
"""Iterate property keys."""
for key in self._kwargs:
yield key
def __getattr__(self, key):
"""Return key."""
if key not in self._kwargs:
raise AttributeError(str(key))
return self._kwargs[key]
def __setattr__(self, name, key):
"""Return key."""
if name != "_kwargs":
raise AttributeError("Element attributes are frozen")
return super().__setattr__(name, key)
def __contains__(self, key):
"""Test if key in object."""
return key in self._kwargs
class DrawElementsBase:
"""Abstract base class to store a set of 3D-visualisation elements."""
etype = None
_protected_keys = ("name", "type", "position", "get")
def __init__(
self, name, coordinates, element_properties=None, group_properties=None
):
"""Initialise the element group."""
self.name = name
self._coordinates = coordinates
self._positions = coordinates
self._axes = np.identity(3)
self._offset = np.zeros(3)
self._el_props = {}
self._grp_props = {}
for key, val in (element_properties or {}).items():
self.set_property(key, val, element=True)
for key, val in (group_properties or {}).items():
self.set_property(key, val, element=False)
@property
def element_properties(self):
"""Return per element properties."""
output = deepcopy(self._el_props)
output["positions"] = np.array(self._positions)
return output
@property
def group_properties(self):
"""Return element group properties."""
return deepcopy(self._grp_props)
def set_property(self, name, value, element=False):
"""Set a group or per element property."""
if name in self._protected_keys:
raise KeyError(f"{name} is a protected key name")
if element:
if len(value) != len(self._coordinates):
raise AssertionError(
f"property '{name}' does not have the same length "
"as the number of elements"
)
assert (
name not in self._grp_props
), f"{name} is already set as a group property"
self._el_props[name] = value
else:
assert (
name not in self._el_props
), f"{name} is already set as an element property"
self._grp_props[name] = value
def set_property_many(self, properties, element=False):
"""Set multiple group or per element properties."""
for key, val in properties.items():
self.set_property(key, val, element=element)
def get_elements_property(self, name):
"""Return a single property."""
if name == "position":
return
|
np.array(self._positions)
|
numpy.array
|
import os
import numpy as np
import matplotlib.pyplot as plt
class YOLO_Kmeans:
def __init__(self, cluster_number, filename, save_path):
self.cluster_number = cluster_number
self.filename = filename
self.save_path = save_path
def iou(self, boxes, clusters): # 1 box -> k clusters
n = boxes.shape[0]
k = self.cluster_number
box_area = boxes[:, 0] * boxes[:, 1]
box_area = box_area.repeat(k)
box_area = np.reshape(box_area, (n, k))
cluster_area = clusters[:, 0] * clusters[:, 1]
cluster_area = np.tile(cluster_area, [1, n])
cluster_area = np.reshape(cluster_area, (n, k))
box_w_matrix = np.reshape(boxes[:, 0].repeat(k), (n, k))
cluster_w_matrix = np.reshape(np.tile(clusters[:, 0], (1, n)), (n, k))
min_w_matrix = np.minimum(cluster_w_matrix, box_w_matrix)
box_h_matrix = np.reshape(boxes[:, 1].repeat(k), (n, k))
cluster_h_matrix = np.reshape(np.tile(clusters[:, 1], (1, n)), (n, k))
min_h_matrix = np.minimum(cluster_h_matrix, box_h_matrix)
inter_area = np.multiply(min_w_matrix, min_h_matrix)
result = inter_area / (box_area + cluster_area - inter_area)
return result
def avg_iou(self, boxes, clusters):
accuracy = np.mean([np.max(self.iou(boxes, clusters), axis=1)])
return accuracy
def kmeans(self, boxes, k, dist=np.median):
box_number = boxes.shape[0]
distances = np.empty((box_number, k))
last_nearest = np.zeros((box_number,))
np.random.seed()
clusters = boxes[np.random.choice(
box_number, k, replace=False)] # init k clusters
while True:
distances = 1 - self.iou(boxes, clusters)
current_nearest = np.argmin(distances, axis=1)
if (last_nearest == current_nearest).all():
break # clusters won't change
for cluster in range(k):
clusters[cluster] = dist( # update clusters
boxes[current_nearest == cluster], axis=0)
last_nearest = current_nearest
return clusters, current_nearest
def result2txt(self, data):
f = open("yolo_anchors.txt", 'w')
row =
|
np.shape(data)
|
numpy.shape
|
"""Module handling the creation and use of migration matrices."""
from copy import deepcopy
from warnings import warn
import numpy as np
from .binning import Binning, CartesianProductBinning
class ResponseMatrix:
"""Matrix that describes the detector response to true events.
Parameters
----------
reco_binning : RectangularBinning
The Binning object describing the reco categorization.
truth_binning : RectangularBinning
The Binning object describing the truth categorization.
nuisance_indices : list of ints, optional
List of indices of nuisance truth bins.
These are treated like their efficiency is exactly 1.
impossible_indices :list of ints, optional
List of indices of impossible reco bins.
These are treated like their probability is exactly 0.
response_binning : CartesianProductBinning, optional
The Binning object describing the reco and truth categorization.
Usually this will be generated from the truth and reco binning.
Notes
-----
The truth and reco binnings will be combined with their
`cartesian_product` method.
The truth bins corresonding to the `nuisance_indices` will be treated
like they have a total efficiency of 1.
The reco bins corresonding to the `impossible_indices` will be treated
like they are filled with a probability of 0.
Two response matrices can be combined by adding them ``new_resp = respA +
respB``. This yields a new matrix that is equivalent to one that has been
filled with the data in both ``respA`` and ``respB``. The truth and reco
binnings in ``respA`` and ``respB`` must be identical for this to make
sense.
Attributes
----------
truth_binning : Binning
The :class:`.Binning` object for the truth information of the events.
reco_binning : Binning
The :class:`.Binning` object for the reco information of the events.
response_binning : CartesianProductBinning
The :class:`.CartesianProductBinning` of reco and truth binning.
nuisance_indices : list of int
The truth data indices that will be handled as nuisance bins.
impossible_indices : list of int
The reco data indices that will be treated as impossible to occur.
filled_truth_indices : list of int
The data indices of truth bins that have at least one event in them.
"""
def __init__(
self,
reco_binning,
truth_binning,
nuisance_indices=None,
impossible_indices=None,
response_binning=None,
):
if nuisance_indices is None:
nuisance_indices = []
if impossible_indices is None:
impossible_indices = []
self.truth_binning = truth_binning
self.reco_binning = reco_binning
if response_binning is None:
self.response_binning = CartesianProductBinning(
[reco_binning.clone(dummy=True), truth_binning.clone(dummy=True)]
)
else:
self.response_binning = response_binning
self.nuisance_indices = nuisance_indices
self.impossible_indices = impossible_indices
self._update_filled_indices()
def _update_filled_indices(self):
"""Update the list of filled truth indices."""
self.filled_truth_indices = np.argwhere(
self.get_truth_entries_as_ndarray() > 0
).flatten()
def fill(self, *args, **kwargs):
"""Fill events into the binnings."""
self.truth_binning.fill(*args, **kwargs)
self.reco_binning.fill(*args, **kwargs)
self.response_binning.fill(*args, **kwargs)
self._update_filled_indices()
def _fix_rounding_errors(self):
"""Fix rounding errors that cause impossible matrices."""
resp = self.get_response_values_as_ndarray()
truth = self.get_truth_values_as_ndarray()
resp = resp.reshape((resp.size // truth.size, truth.size), order="C")
resp = np.sum(resp, axis=0)
diff = truth - resp
if np.any(truth < 0):
raise RuntimeError("Illegal response matrix: Negative true weight!")
if np.any(resp < 0):
raise RuntimeError(
"Illegal response matrix: Negative total reconstructed weight!"
)
if np.any(diff < -1e-9): # Allow rounding errors
raise RuntimeError(
"Illegal response matrix: Higher total reconstructed than true weight!"
)
if np.any(diff < 0.0): # But make sure truth is >= reco
fixed_truth = np.where(diff < 0, resp, truth)
self.truth_binning.set_values_from_ndarray(fixed_truth)
def fill_from_csv_file(self, *args, **kwargs):
"""Fill binnings from csv file.
See :meth:`Binning.fill_from_csv_file
<remu.binning.Binning.fill_from_csv_file>`
for a description of the parameters.
See also
--------
fill_up_truth_from_csv_file : Re-fill only truth bins from different file.
"""
Binning.fill_multiple_from_csv_file(
[self.truth_binning, self.reco_binning, self.response_binning],
*args,
**kwargs
)
self._fix_rounding_errors()
self._update_filled_indices()
def fill_up_truth_from_csv_file(self, *args, **kwargs):
"""Re-fill the truth bins with the given csv file.
This can be used to get proper efficiencies if the true signal events
are saved in a separate file from the reconstructed events.
It takes the same parameters as :meth:`fill_from_csv_file`.
Notes
-----
A new truth binning is created and filled with the events from the
provided file. Each bin is compared to the corresponding bin in the
already present truth binning. The larger value of the two is taken as
the new truth. This way, event types that are not present in the pure
truth data, e.g. background, are not affected by this. It can only
*increase* the value of the truth bins, lowering their efficiency.
For each truth bin, one of the following *must* be true for this
operation to make sense:
* All events in the migration matrix are also present in the truth
file. In this case, the additional truth events lower the
efficiency of the truth bin. This is the case, for example, if not
all true signal events are reconstructed.
* All events in the truth file are also present in the migration
matrix. In this case, the events in the truth file have no
influence on the response matrix. This is the case, for example, if
only a subset of the reconstructed background is saved in the truth
file.
If there are events in the response matrix that are not in the truth
tree *and* there are events in the truth tree that are not in the
response matrix, this method will lead to a *wrong* efficiency of the
affected truth bin.
"""
new_truth_binning = deepcopy(self.truth_binning)
new_truth_binning.reset()
new_truth_binning.fill_from_csv_file(*args, **kwargs)
return self._replace_smaller_truth(new_truth_binning)
def fill_up_truth(self, *args, **kwargs):
"""Re-fill the truth bins with the given events file.
This can be used to get proper efficiencies if the true signal events
are stored separate from the reconstructed events.
It takes the same parameters as :meth:`fill`.
Notes
-----
A new truth binning is created and filled with the events from the
provided events. Each bin is compared to the corresponding bin in the
already present truth binning. The larger value of the two is taken as
the new truth. This way, event types that are not present in the pure
truth data, e.g. background, are not affected by this. It can only
*increase* the value of the truth bins, lowering their efficiency.
For each truth bin, one of the following *must* be true for this
operation to make sense:
* All events in the migration matrix are also present in the new truth
events. In this case, the additional truth events lower the
efficiency of the truth bin. This is the case, for example, if not
all true signal events are reconstructed.
* All events in the new truth events are also present in the migration
matrix. In this case, the events in the new truth events have no
influence on the response matrix. This is the case, for example, if
only a subset of the reconstructed background is saved in the truth
file.
If there are events in the response matrix that are not in the new truth
events *and* there are events in the new truth events that are not in the
response matrix, this method will lead to a *wrong* efficiency of the
affected truth bin.
"""
new_truth_binning = deepcopy(self.truth_binning)
new_truth_binning.reset()
new_truth_binning.fill(*args, **kwargs)
return self._replace_smaller_truth(new_truth_binning)
def _replace_smaller_truth(self, new_truth_binning):
new_values = new_truth_binning.get_values_as_ndarray()
new_entries = new_truth_binning.get_entries_as_ndarray()
new_sumw2 = new_truth_binning.get_sumw2_as_ndarray()
old_values = self.truth_binning.get_values_as_ndarray()
old_entries = self.truth_binning.get_entries_as_ndarray()
old_sumw2 = self.truth_binning.get_sumw2_as_ndarray()
if np.any(new_values < 0):
i = np.argwhere(new_values < 0)
raise RuntimeError(
"Filled-up values are negative in %d bins." % (i.size,), stacklevel=3
)
where = new_values > 0
diff_v = new_values - old_values
diff_e = new_entries - old_entries
# Check for bins where the fill-up is less than the original
if np.any(where & (diff_v < -1e-9)):
i = np.argwhere(where & (diff_v < -1e-9))
warn(
"Filled-up values are less than the original filling in %d bins. This should not happen!"
% (i.size,),
stacklevel=3,
)
if np.any(where & (diff_e < 0)):
i = np.argwhere(where & (diff_e < 0))
warn(
"Filled-up entries are less than the original filling in %d bins. This should not happen!"
% (i.size,),
stacklevel=3,
)
where = where & (diff_v >= 0) & (diff_e >= 0)
self.truth_binning.set_values_from_ndarray(
np.where(where, new_values, old_values)
)
self.truth_binning.set_entries_from_ndarray(
np.where(where, new_entries, old_entries)
)
self.truth_binning.set_sumw2_from_ndarray(np.where(where, new_sumw2, old_sumw2))
self._fix_rounding_errors()
self._update_filled_indices()
def reset(self):
"""Reset all binnings."""
self.truth_binning.reset()
self.reco_binning.reset()
self.response_binning.reset()
self._update_filled_indices()
def set_truth_values_from_ndarray(self, *args, **kwargs):
"""Set the values of the truth binning as `ndarray`."""
self.truth_binning.set_values_from_ndarray(*args, **kwargs)
def set_truth_entries_from_ndarray(self, *args, **kwargs):
"""Set the number of entries in the truth binning as `ndarray`."""
self.truth_binning.set_entries_from_ndarray(*args, **kwargs)
self._update_filled_indices()
def set_truth_sumw2_from_ndarray(self, *args, **kwargs):
"""Set the sum of squared weights in the truth binning as `ndarray`."""
self.truth_binning.set_sumw2_from_ndarray(*args, **kwargs)
def set_reco_values_from_ndarray(self, *args, **kwargs):
"""Set the values of the reco binning as `ndarray`."""
self.reco_binning.set_values_from_ndarray(*args, **kwargs)
def set_reco_entries_from_ndarray(self, *args, **kwargs):
"""Set the number of entries in the reco binning as `ndarray`."""
self.reco_binning.set_entries_from_ndarray(*args, **kwargs)
def set_reco_sumw2_from_ndarray(self, *args, **kwargs):
"""Set the sum of squared weights in the reco binning as `ndarray`."""
self.reco_binning.set_sumw2_from_ndarray(*args, **kwargs)
def set_response_values_from_ndarray(self, *args, **kwargs):
"""Set the values of the response binning as `ndarray`."""
self.response_binning.set_values_from_ndarray(*args, **kwargs)
def set_response_entries_from_ndarray(self, *args, **kwargs):
"""Set the number of entries in the response binning as `ndarray`."""
self.response_binning.set_entries_from_ndarray(*args, **kwargs)
def set_response_sumw2_from_ndarray(self, *args, **kwargs):
"""Set the sum of squared weights in the response binning as `ndarray`."""
self.response_binning.set_sumw2_from_ndarray(*args, **kwargs)
def get_truth_values_as_ndarray(self, *args, **kwargs):
"""Get the values of the truth binning as `ndarray`."""
return self.truth_binning.get_values_as_ndarray(*args, **kwargs)
def get_truth_entries_as_ndarray(self, *args, **kwargs):
"""Get the number of entries in the truth binning as `ndarray`."""
return self.truth_binning.get_entries_as_ndarray(*args, **kwargs)
def get_truth_sumw2_as_ndarray(self, *args, **kwargs):
"""Get the sum of squared weights in the truth binning as `ndarray`."""
return self.truth_binning.get_sumw2_as_ndarray(*args, **kwargs)
def get_reco_values_as_ndarray(self, *args, **kwargs):
"""Get the values of the reco binning as `ndarray`."""
return self.reco_binning.get_values_as_ndarray(*args, **kwargs)
def get_reco_entries_as_ndarray(self, *args, **kwargs):
"""Get the number of entries in the reco binning as `ndarray`."""
return self.reco_binning.get_entries_as_ndarray(*args, **kwargs)
def get_reco_sumw2_as_ndarray(self, *args, **kwargs):
"""Get the sum of squared weights in the reco binning as `ndarray`."""
return self.reco_binning.get_sumw2_as_ndarray(*args, **kwargs)
def get_response_values_as_ndarray(self, *args, **kwargs):
"""Get the values of the response binning as `ndarray`."""
return self.response_binning.get_values_as_ndarray(*args, **kwargs)
def get_response_entries_as_ndarray(self, *args, **kwargs):
"""Get the number of entries in the response binning as `ndarray`."""
return self.response_binning.get_entries_as_ndarray(*args, **kwargs)
def get_response_sumw2_as_ndarray(self, *args, **kwargs):
"""Get the sum of squared weights in the response binning as `ndarray`."""
return self.response_binning.get_sumw2_as_ndarray(*args, **kwargs)
@staticmethod
def _normalize_matrix(M):
"""Make sure all efficiencies are less than or equal to 1."""
eff = np.sum(M, axis=-2)
eff = np.where(eff < 1.0, 1.0, eff)[..., np.newaxis, :]
return M / eff
def get_response_matrix_as_ndarray(self, shape=None, truth_indices=None):
"""Return the ResponseMatrix as a ndarray.
Uses the information in the truth and response binnings to calculate
the response matrix.
Parameters
----------
shape : tuple of ints, optional
The shape of the returned ndarray.
Default: ``(#(reco bins), #(truth bins))``
truth_indices : list of ints, optional
Only return the response of the given truth bins.
Default: Return full matrix.
Returns
-------
ndarray
Notes
-----
If shape is `None`, it s set to ``(#(reco bins), #(truth bins))``. The
expected response of a truth vector can then be calculated like this::
v_reco = response_matrix.dot(v_truth)
If `truth_indices` are provided, a sliced matrix with only the given
columns will be returned.
See also
--------
get_mean_response_matrix_as_ndarray
"""
if truth_indices is None:
truth_indices = slice(None, None, None)
original_shape = (self.reco_binning.data_size, self.truth_binning.data_size)
# Get the bin response entries
M = self.get_response_values_as_ndarray(original_shape)[:, truth_indices]
# Normalize to number of simulated events
N_t = self.get_truth_values_as_ndarray(indices=truth_indices)
M /= np.where(N_t > 0.0, N_t, 1.0)
# Deal with bins where N_reco > N_truth
M = ResponseMatrix._normalize_matrix(M)
if shape is not None:
M = M.reshape(shape, order="C")
return M
def _get_stat_error_parameters(
self,
expected_weight=1.0,
nuisance_indices=None,
impossible_indices=None,
truth_indices=None,
):
r"""Return $\beta^t_1j$, $\beta^t_2j$, $\alpha^t_{ij}$, $\hat{w}^t_{ij}$ and $\sigma(w^t_{ij})$.
Used for calculations of statistical variance.
If `truth_indices` are provided, a sliced matrix with only the given
columns will be returned.
"""
if nuisance_indices is None:
nuisance_indices = self.nuisance_indices
if impossible_indices is None:
impossible_indices = self.impossible_indices
if truth_indices is None:
truth_indices = slice(None, None, None)
else:
# Translate nuisance indices to sliced indices
i = np.searchsorted(truth_indices, nuisance_indices)
mask = i < len(truth_indices)
i = i[mask]
nuisance_indices = np.asarray(nuisance_indices)[mask]
mask = nuisance_indices == np.asarray(truth_indices)[i]
nuisance_indices = np.array(i[mask])
del mask
del i
N_reco = self.reco_binning.data_size
N_truth = self.truth_binning.data_size
orig_shape = (N_reco, N_truth)
epsilon = 1e-50
resp_entries = self.get_response_entries_as_ndarray(orig_shape)[
:, truth_indices
]
truth_entries = self.get_truth_entries_as_ndarray(indices=truth_indices)
# Get parameters of Beta distribution characterizing the efficiency.
# Assume a prior of Beta(1,1), i.e. flat in efficiency.
beta1 = np.sum(resp_entries, axis=0)
# "Waste bin" of not selected events
waste_entries = truth_entries - beta1
if np.any(waste_entries < 0):
raise RuntimeError(
"Illegal response matrix: More reconstructed than true events!"
)
beta1 = np.asfarray(beta1 + 1)
beta2 = np.asfarray(waste_entries + 1)
# Set efficiency of nuisance bins to 1, i.e. beta2 to (almost) zero.
beta2[nuisance_indices] = epsilon
# Get parameters of Dirichlet distribution characterizing the distribution within the reco bins.
# Assume a prior where we expect most of the events to be clustered in a few reco bins.
# Most events should end up divided into about 3 bins per reco variable:
# the correct one and the two neighbouring ones.
# Since the binning is orthogonal, we expect the number of bins to be roughly 3**N_variables.
# This leads to prior parameters >1 for degenerate reco binnings with < 3 bins/variable.
# We protect against that by setting the maximum prior value to 1.
n_vars = len(self.reco_binning.phasespace)
prior = min(1.0, 3.0**n_vars / (N_reco - len(impossible_indices)))
alpha = np.asfarray(resp_entries) + prior
# Set efficiency of impossible bins to (almost) 0
alpha[impossible_indices] = epsilon
# Estimate mean weight
resp1 = self.get_response_values_as_ndarray(orig_shape)[:, truth_indices]
resp2 = self.get_response_sumw2_as_ndarray(orig_shape)[:, truth_indices]
truth1 = self.get_truth_values_as_ndarray(indices=truth_indices)
truth2 = self.get_truth_sumw2_as_ndarray(indices=truth_indices)
# Add truth bin of all events
resp1 = np.append(resp1, truth1[np.newaxis, :], axis=0)
resp2 =
|
np.append(resp2, truth2[np.newaxis, :], axis=0)
|
numpy.append
|
import numpy as np
from numpy import random
from scipy.interpolate import interp1d
import pandas as pd
msun = 1.9891e30
rsun = 695500000.0
G = 6.67384e-11
AU = 149597870700.0
def component_noise(tessmag, readmod=1, zodimod=1):
sys = 59.785
star_mag_level, star_noise_level = np.array(
[
[4.3885191347753745, 12.090570910640581],
[12.023294509151416, 467.96434635620614],
[17.753743760399338, 7779.603209291808],
]
).T
star_pars = np.polyfit(star_mag_level, np.log10(star_noise_level), 1)
zodi_mag_level, zodi_noise_level = np.array(
[
[8.686356073211314, 18.112513551189224],
[13.08901830282862, 688.2812796087189],
[16.68801996672213, 19493.670323892282],
]
).T
zodi_pars = np.polyfit(zodi_mag_level, np.log10(zodi_noise_level), 1)
read_mag_level, read_noise_level = np.array(
[
[8.476705490848586, 12.31474807751376],
[13.019134775374376, 522.4985702369348],
[17.841098169717142, 46226.777232915076],
]
).T
read_pars = np.polyfit(read_mag_level, np.log10(read_noise_level), 1)
c1, c2, c3, c4 = (
10 ** (tessmag * star_pars[0] + star_pars[1]),
10 ** (tessmag * zodi_pars[0] + zodi_pars[1]),
10 ** (tessmag * read_pars[0] + read_pars[1]),
sys,
)
return np.sqrt(
c1 ** 2 + (readmod * c2) ** 2 + (zodimod * c3) ** 2 + c4 ** 2
)
def rndm(a, b, g, size=1):
"""Power-law gen for pdf(x)\propto x^{g-1} for a<=x<=b"""
r = np.random.random(size=size)
ag, bg = a ** g, b ** g
return (ag + (bg - ag) * r) ** (1.0 / g)
def Fressin13_select_extrap(nselect=1):
# create a pot for dressing numbers (balls)
balls = np.array([])
# pot 1 contains rp=0.8-0.1.25, p=0.8-2
p1 = np.zeros(180) + 1
# pot 2 contains rp=1.25-2.0, p=0.8-2
p2 = np.zeros(170) + 2
# pot 3 contains rp=2-4, p=0.8-2
p3 = np.zeros(35) + 3
# pot 4 contains rp=4-6, p=0.8-2
p4 = np.zeros(4) + 4
# pot 5 contains rp=6-22, p=0.8-2
p5 = np.zeros(15) + 5
# pot 6 contains rp=0.8-0.1.25, p=2-3.4
p6 = np.zeros(610) + 6
# pot 7 contains rp=1.25-2.0, p=2-3.4
p7 = np.zeros(740) + 7
# pot 8 contains rp=2-4, p=2-3.4
p8 = np.zeros(180) + 8
# pot 9 contains rp=4-6, p=2-3.4
p9 = np.zeros(6) + 9
# pot 10 contains rp=6-22, p=2-3.4
p10 = np.zeros(67) + 10
# pot 11 contains rp=0.8-0.1.25, p=3.4-5.9
p11 = np.zeros(1720) + 11
# pot 12 contains rp=1.25-2.0, p=3.4-5.9
p12 = np.zeros(1490) + 12
# pot 13 contains rp=2-4, p=3.4-5.9
p13 = np.zeros(730) + 13
# pot 14 contains rp=4-6, p=3.4-5.9
p14 = np.zeros(110) + 14
# pot 15 contains rp=6-22, p=3.4-5.9
p15 = np.zeros(170) + 15
# pot 16 contains rp=0.8-0.1.25, p=5.9-10
p16 = np.zeros(2700) + 16
# pot 17 contains rp=1.25-2.0, p=5.9-10
p17 = np.zeros(2900) + 17
# pot 18 contains rp=2-4, p=5.9-10
p18 = np.zeros(1930) + 18
# pot 19 contains rp=4-6, p=5.9-10
p19 = np.zeros(91) + 19
# pot 20 contains rp=6-22, p=5.9-10
p20 = np.zeros(180) + 20
# pot 21 contains rp=0.8-0.1.25, p=10-17
p21 = np.zeros(2700) + 21
# pot 22 contains rp=1.25-2.0, p=10-17
p22 = np.zeros(4300) + 22
# pot 23 contains rp=2-4, p=10-17
p23 = np.zeros(3670) + 23
# pot 24 contains rp=4-6, p=10-17
p24 = np.zeros(290) + 24
# pot 25 contains rp=6-22, p=10-17
p25 = np.zeros(270) + 25
# pot 26 contains rp=0.8-0.1.25, p=17-29
p26 = np.zeros(2930) + 26
# pot 27 contains rp=1.25-2.0, p=17-29
p27 = np.zeros(4490) + 27
# pot 28 contains rp=2-4, p=17-29
p28 = np.zeros(5290) + 28
# pot 29 contains rp=4-6, p=17-29
p29 = np.zeros(320) + 29
# pot 30 contains rp=6-22, p=17-29
p30 = np.zeros(230) + 30
# pot 31 contains rp=0.8-0.1.25, p=29-50
p31 = np.zeros(4080) + 31
# pot 32 contains rp=1.25-2.0, p=29-50
p32 = np.zeros(5290) + 32
# pot 33 contains rp=2-4, p=29-50
p33 = np.zeros(6450) + 33
# pot 34 contains rp=4-6, p=29-50
p34 = np.zeros(490) + 34
# pot 35 contains rp=6-22, p=29-50
p35 = np.zeros(350) + 35
# pot 36 contains rp=0.8-0.1.25, p=50-85
p36 = np.zeros(3460) + 36
# pot 37 contains rp=1.25-2.0, p=50-85
p37 = np.zeros(3660) + 37
# pot 38 contains rp=2-4, p=50-85
p38 = np.zeros(5250) + 38
# pot 39 contains rp=4-6, p=50-85
p39 = np.zeros(660) + 39
# pot 40 contains rp=6-22, p=50-85
p40 = np.zeros(710) + 40
# pot 36 contains rp=0.8-0.1.25, p=50-85
p41 = np.zeros(3460) + 41
# pot 37 contains rp=1.25-2.0, p=50-85
p42 = np.zeros(3660) + 42
# pot 38 contains rp=2-4, p=50-85
p43 = np.zeros(5250) + 43
# pot 39 contains rp=4-6, p=50-85
p44 = np.zeros(660) + 44
# pot 40 contains rp=6-22, p=50-85
p45 = np.zeros(710) + 45
# pot 36 contains rp=0.8-0.1.25, p=50-85
p46 = np.zeros(3460) + 46
# pot 37 contains rp=1.25-2.0, p=50-85
p47 = np.zeros(3660) + 47
# pot 38 contains rp=2-4, p=50-85
p48 = np.zeros(5250) + 48
# pot 39 contains rp=4-6, p=50-85
p49 = np.zeros(660) + 49
# pot 40 contains rp=6-22, p=50-85
p50 = np.zeros(710) + 50
# pot 36 contains rp=0.8-0.1.25, p=50-85
p51 = np.zeros(3460) + 51
# pot 37 contains rp=1.25-2.0, p=50-85
p52 = np.zeros(3660) + 52
# pot 38 contains rp=2-4, p=50-85
p53 = np.zeros(5250) + 53
# pot 39 contains rp=4-6, p=50-85
p54 = np.zeros(660) + 54
# pot 40 contains rp=6-22, p=50-85
p55 = np.zeros(710) + 55
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
]
# lookup for what the balls mean
# outputs radlow, radhigh, Plow, Phigh
ball_lookup = {
0: [0.0, 0.0, 0.0, 0.0],
1: [0.8, 1.25, 0.8, 2.0],
2: [1.25, 2.0, 0.8, 2.0],
3: [2.0, 4.0, 0.8, 2.0],
4: [4.0, 6.0, 0.8, 2.0],
5: [6.0, 22.0, 0.8, 2.0],
6: [0.8, 1.25, 2.0, 3.4],
7: [1.25, 2.0, 2.0, 3.4],
8: [2.0, 4.0, 2.0, 3.4],
9: [4.0, 6.0, 2.0, 3.4],
10: [6.0, 22.0, 2.0, 3.4],
11: [0.8, 1.25, 3.4, 5.9],
12: [1.25, 2.0, 3.4, 5.9],
13: [2.0, 4.0, 3.4, 5.9],
14: [4.0, 6.0, 3.4, 5.9],
15: [6.0, 22.0, 3.4, 5.9],
16: [0.8, 1.25, 5.9, 10.0],
17: [1.25, 2.0, 5.9, 10.0],
18: [2.0, 4.0, 5.9, 10.0],
19: [4.0, 6.0, 5.9, 10.0],
20: [6.0, 22.0, 5.9, 10.0],
21: [0.8, 1.25, 10.0, 17.0],
22: [1.25, 2.0, 10.0, 17.0],
23: [2.0, 4.0, 10.0, 17.0],
24: [4.0, 6.0, 10.0, 17.0],
25: [6.0, 22.0, 10.0, 17.0],
26: [0.8, 1.25, 17.0, 29.0],
27: [1.25, 2.0, 17.0, 29.0],
28: [2.0, 4.0, 17.0, 29.0],
29: [4.0, 6.0, 17.0, 29.0],
30: [6.0, 22.0, 17.0, 29.0],
31: [0.8, 1.25, 29.0, 50.0],
32: [1.25, 2.0, 29.0, 50.0],
33: [2.0, 4.0, 29.0, 50.0],
34: [4.0, 6.0, 29.0, 50.0],
35: [6.0, 22.0, 29.0, 50.0],
36: [0.8, 1.25, 50.0, 85.0],
37: [1.25, 2.0, 50.0, 85.0],
38: [2.0, 4.0, 50.0, 85.0],
39: [4.0, 6.0, 50.0, 85.0],
40: [6.0, 22.0, 50.0, 85.0],
41: [0.8, 1.25, 50.0, 150.0],
42: [1.25, 2.0, 50.0, 150.0],
43: [2.0, 4.0, 50.0, 150.0],
44: [4.0, 6.0, 50.0, 150.0],
45: [6.0, 22.0, 50.0, 150.0],
46: [0.8, 1.25, 150.0, 270.0],
47: [1.25, 2.0, 150.0, 270.0],
48: [2.0, 4.0, 150.0, 270.0],
49: [4.0, 6.0, 150.0, 270.0],
50: [6.0, 22.0, 150.0, 270.0],
51: [0.8, 1.25, 270.0, 480.0],
52: [1.25, 2.0, 270.0, 480.0],
53: [2.0, 4.0, 270.0, 480.0],
54: [4.0, 6.0, 270.0, 480.0],
55: [6.0, 22.0, 270.0, 480.0],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
if samp in [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]:
# check for giant planets
# if a giant planet than draw power law
radius[i] = rndm(6, 22, -1.7)
else:
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def Dressing15_select_extrap(nselect=1):
"""
period bins = 0.5, 0.91, 1.66, 3.02, 5.49, 10.0, 18.2, 33.1, 60.3, 110., 200.
"""
# create a pot for dressing numbers (balls)
balls = np.array([])
# pot 1 contains rp=0.5-1.0, p=0.5-0.91
p1 = np.zeros(400) + 1
# pot 2 contains rp=1.0-1.5, p=0.5-0.91
p2 = np.zeros(460) + 2
# pot 3 contains rp=1.5-2.0, p=0.5-0.91
p3 = np.zeros(61) + 3
# pot 4 contains rp=2.0-2.5, p=0.5-0.91
p4 = np.zeros(2) + 4
# pot 5 contains rp=2.5-3.0, p=0.5-0.91
p5 = np.zeros(0) + 5
# pot 6 contains rp=3.0-3.5, p=0.5-0.91
p6 = np.zeros(0) + 6
# pot 7 contains rp=3.5-4.0, p=0.5-0.91
p7 = np.zeros(0) + 7
# pot 1 contains rp=0.5-1.0, p=0.91, 1.66
p8 = np.zeros(1500) + 8
# pot 2 contains rp=1.0-1.5, p=0.91, 1.66
p9 = np.zeros(1400) + 9
# pot 3 contains rp=1.5-2.0, p=0.91, 1.66
p10 = np.zeros(270) + 10
# pot 4 contains rp=2.0-2.5, p=0.91, 1.66
p11 = np.zeros(9) + 11
# pot 5 contains rp=2.5-3.0, p=0.91, 1.66
p12 = np.zeros(4) + 12
# pot 6 contains rp=3.0-3.5, p=0.91, 1.66
p13 = np.zeros(6) + 13
# pot 7 contains rp=3.5-4.0, p=0.91, 1.66
p14 = np.zeros(8) + 14
# pot 1 contains rp=0.5-1.0, p=1.66, 3.02
p15 = np.zeros(4400) + 15
# pot 2 contains rp=1.0-1.5, p=1.66, 3.02
p16 = np.zeros(3500) + 16
# pot 3 contains rp=1.5-2.0, p=1.66, 3.02
p17 = np.zeros(1200) + 17
# pot 4 contains rp=2.0-2.5, p=1.66, 3.02
p18 = np.zeros(420) + 18
# pot 5 contains rp=2.5-3.0, p=1.66, 3.02
p19 = np.zeros(230) + 19
# pot 6 contains rp=3.0-3.5, p=1.66, 3.02
p20 = np.zeros(170) + 20
# pot 7 contains rp=3.5-4.0, p=1.66, 3.02
p21 = np.zeros(180) + 21
# pot 1 contains rp=0.5-1.0, p=3.02, 5.49
p22 = np.zeros(5500) + 22
# pot 2 contains rp=1.0-1.5, p=3.02, 5.49
p23 = np.zeros(5700) + 23
# pot 3 contains rp=1.5-2.0, p=3.02, 5.49
p24 = np.zeros(2500) + 24
# pot 4 contains rp=2.0-2.5, p=3.02, 5.49
p25 = np.zeros(1800) + 25
# pot 5 contains rp=2.5-3.0, p=3.02, 5.49
p26 = np.zeros(960) + 26
# pot 6 contains rp=3.0-3.5, p=3.02, 5.49
p27 = np.zeros(420) + 27
# pot 7 contains rp=3.5-4.0, p=3.02, 5.49
p28 = np.zeros(180) + 28
# pot 1 contains rp=0.5-1.0, p=5.49, 10.0
p29 = np.zeros(10000) + 29
# pot 2 contains rp=1.0-1.5, p=5.49, 10.0
p30 = np.zeros(10000) + 30
# pot 3 contains rp=1.5-2.0, p=5.49, 10.0
p31 = np.zeros(6700) + 31
# pot 4 contains rp=2.0-2.5, p=5.49, 10.0
p32 = np.zeros(6400) + 32
# pot 5 contains rp=2.5-3.0, p=5.49, 10.0
p33 = np.zeros(2700) + 33
# pot 6 contains rp=3.0-3.5, p=5.49, 10.0
p34 = np.zeros(1100) + 34
# pot 7 contains rp=3.5-4.0, p=5.49, 10.0
p35 = np.zeros(360) + 35
# pot 1 contains rp=0.5-1.0, p=10.0, 18.2
p36 = np.zeros(12000) + 36
# pot 2 contains rp=1.0-1.5, p=10.0, 18.2
p37 = np.zeros(13000) + 37
# pot 3 contains rp=1.5-2.0, p=10.0, 18.2
p38 = np.zeros(13000) + 38
# pot 4 contains rp=2.0-2.5, p=10.0, 18.2
p39 = np.zeros(9300) + 39
# pot 5 contains rp=2.5-3.0, p=10.0, 18.2
p40 = np.zeros(3800) + 40
# pot 6 contains rp=3.0-3.5, p=10.0, 18.2
p41 = np.zeros(1400) + 41
# pot 7 contains rp=3.5-4.0, p=10.0, 18.2
p42 = np.zeros(510) + 42
# pot 1 contains rp=0.5-1.0, p=18.2, 33.1
p43 = np.zeros(11000) + 43
# pot 2 contains rp=1.0-1.5, p=18.2, 33.1
p44 = np.zeros(16000) + 44
# pot 3 contains rp=1.5-2.0, p=18.2, 33.1
p45 = np.zeros(14000) + 45
# pot 4 contains rp=2.0-2.5, p=18.2, 33.1
p46 = np.zeros(10000) + 46
# pot 5 contains rp=2.5-3.0, p=18.2, 33.1
p47 = np.zeros(4600) + 47
# pot 6 contains rp=3.0-3.5, p=18.2, 33.1
p48 = np.zeros(810) + 48
# pot 7 contains rp=3.5-4.0, p=18.2, 33.1
p49 = np.zeros(320) + 49
# pot 1 contains rp=0.5-1.0, p=33.1, 60.3
p50 = np.zeros(6400) + 50
# pot 2 contains rp=1.0-1.5, p=33.1, 60.3
p51 = np.zeros(6400) + 51
# pot 3 contains rp=1.5-2.0, p=33.1, 60.3
p52 = np.zeros(12000) + 52
# pot 4 contains rp=2.0-2.5, p=33.1, 60.3
p53 = np.zeros(12000) + 53
# pot 5 contains rp=2.5-3.0, p=33.1, 60.3
p54 = np.zeros(5800) + 54
# pot 6 contains rp=3.0-3.5, p=33.1, 60.3
p55 = np.zeros(1600) + 55
# pot 7 contains rp=3.5-4.0, p=33.1, 60.3
p56 = np.zeros(210) + 56
# pot 1 contains rp=0.5-1.0, p=60.3, 110.
p57 = np.zeros(10000) + 57
# pot 2 contains rp=1.0-1.5, p=60.3, 110.
p58 = np.zeros(10000) + 58
# pot 3 contains rp=1.5-2.0, p=60.3, 110.
p59 = np.zeros(8300) + 59
# pot 4 contains rp=2.0-2.5, p=60.3, 110.
p60 = np.zeros(9600) + 60
# pot 5 contains rp=2.5-3.0, p=60.3, 110.
p61 = np.zeros(4200) + 61
# pot 6 contains rp=3.0-3.5, p=60.3, 110.
p62 = np.zeros(1700) + 62
# pot 7 contains rp=3.5-4.0, p=60.3, 110.
p63 = np.zeros(420) + 63
# pot 1 contains rp=0.5-1.0, p=110., 200.
p64 = np.zeros(19000) + 64
# pot 2 contains rp=1.0-1.5, p=110., 200.
p65 = np.zeros(19000) + 65
# pot 3 contains rp=1.5-2.0, p=110., 200.
p66 = np.zeros(10000) + 66
# pot 4 contains rp=2.0-2.5, p=110., 200.
p67 = np.zeros(4500) + 67
# pot 5 contains rp=2.5-3.0, p=110., 200.
p68 = np.zeros(1100) + 68
# pot 6 contains rp=3.0-3.5, p=110., 200.
p69 = np.zeros(160) + 69
# pot 7 contains rp=3.5-4.0, p=110., 200.
p70 = np.zeros(80) + 70
# pot 1 contains rp=0.5-1.0, p=110., 200.
p71 = np.zeros(19000) + 71
# pot 2 contains rp=1.0-1.5, p=110., 200.
p72 = np.zeros(19000) + 72
# pot 3 contains rp=1.5-2.0, p=110., 200.
p73 = np.zeros(10000) + 73
# pot 4 contains rp=2.0-2.5, p=110., 200.
p74 = np.zeros(4500) + 74
# pot 5 contains rp=2.5-3.0, p=110., 200.
p75 = np.zeros(1100) + 75
# pot 6 contains rp=3.0-3.5, p=110., 200.
p76 = np.zeros(160) + 76
# pot 7 contains rp=3.5-4.0, p=110., 200.
p77 = np.zeros(80) + 77
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
p56,
p57,
p58,
p59,
p60,
p61,
p62,
p63,
p64,
p65,
p66,
p67,
p68,
p69,
p70,
p71,
p72,
p73,
p74,
p75,
p76,
p77,
]
# lookup for what the balls mean
# outputs radlow, radhigh, Plow, Phigh
# 0.5, 0.91, 1.66, 3.02, 5.49, 10.0, 18.2, 33.1, 60.3, 110., 200.
ball_lookup = {
1: [0.5, 1.0, 0.5, 0.91],
2: [1.0, 1.5, 0.5, 0.91],
3: [1.5, 2.0, 0.5, 0.91],
4: [2.0, 2.5, 0.5, 0.91],
5: [2.5, 3.0, 0.5, 0.91],
6: [3.0, 3.5, 0.5, 0.91],
7: [3.5, 4.0, 0.5, 0.91],
8: [0.5, 1.0, 0.91, 1.66],
9: [1.0, 1.5, 0.91, 1.66],
10: [1.5, 2.0, 0.91, 1.66],
11: [2.0, 2.5, 0.91, 1.66],
12: [2.5, 3.0, 0.91, 1.66],
13: [3.0, 3.5, 0.91, 1.66],
14: [3.5, 4.0, 0.91, 1.66],
15: [0.5, 1.0, 1.66, 3.02],
16: [1.0, 1.5, 1.66, 3.02],
17: [1.5, 2.0, 1.66, 3.02],
18: [2.0, 2.5, 1.66, 3.02],
19: [2.5, 3.0, 1.66, 3.02],
20: [3.0, 3.5, 1.66, 3.02],
21: [3.5, 4.0, 1.66, 3.02],
22: [0.5, 1.0, 3.02, 5.49],
23: [1.0, 1.5, 3.02, 5.49],
24: [1.5, 2.0, 3.02, 5.49],
25: [2.0, 2.5, 3.02, 5.49],
26: [2.5, 3.0, 3.02, 5.49],
27: [3.0, 3.5, 3.02, 5.49],
28: [3.5, 4.0, 3.02, 5.49],
29: [0.5, 1.0, 5.49, 10.0],
30: [1.0, 1.5, 5.49, 10.0],
31: [1.5, 2.0, 5.49, 10.0],
32: [2.0, 2.5, 5.49, 10.0],
33: [2.5, 3.0, 5.49, 10.0],
34: [3.0, 3.5, 5.49, 10.0],
35: [3.5, 4.0, 5.49, 10.0],
36: [0.5, 1.0, 10.0, 18.2],
37: [1.0, 1.5, 10.0, 18.2],
38: [1.5, 2.0, 10.0, 18.2],
39: [2.0, 2.5, 10.0, 18.2],
40: [2.5, 3.0, 10.0, 18.2],
41: [3.0, 3.5, 10.0, 18.2],
42: [3.5, 4.0, 10.0, 18.2],
43: [0.5, 1.0, 18.2, 33.1],
44: [1.0, 1.5, 18.2, 33.1],
45: [1.5, 2.0, 18.2, 33.1],
46: [2.0, 2.5, 18.2, 33.1],
47: [2.5, 3.0, 18.2, 33.1],
48: [3.0, 3.5, 18.2, 33.1],
49: [3.5, 4.0, 18.2, 33.1],
50: [0.5, 1.0, 33.1, 60.3],
51: [1.0, 1.5, 33.1, 60.3],
52: [1.5, 2.0, 33.1, 60.3],
53: [2.0, 2.5, 33.1, 60.3],
54: [2.5, 3.0, 33.1, 60.3],
55: [3.0, 3.5, 33.1, 60.3],
56: [3.5, 4.0, 33.1, 60.3],
57: [0.5, 1.0, 60.3, 110.0],
58: [1.0, 1.5, 60.3, 110.0],
59: [1.5, 2.0, 60.3, 110.0],
60: [2.0, 2.5, 60.3, 110.0],
61: [2.5, 3.0, 60.3, 110.0],
62: [3.0, 3.5, 60.3, 110.0],
63: [3.5, 4.0, 60.3, 110.0],
64: [0.5, 1.0, 110.0, 200.0],
65: [1.0, 1.5, 110.0, 200.0],
66: [1.5, 2.0, 110.0, 200.0],
67: [2.0, 2.5, 110.0, 200.0],
68: [2.5, 3.0, 110.0, 200.0],
69: [3.0, 3.5, 110.0, 200.0],
70: [3.5, 4.0, 110.0, 200.0],
71: [0.5, 1.0, 200.0, 365.0],
72: [1.0, 1.5, 200.0, 365.0],
73: [1.5, 2.0, 200.0, 365.0],
74: [2.0, 2.5, 200.0, 365.0],
75: [2.5, 3.0, 200.0, 365.0],
76: [3.0, 3.5, 200.0, 365.0],
77: [3.5, 4.0, 200.0, 365.0],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def Petigura18_select(nselect=1):
# create a pot for pedigura numbers (balls)
balls = np.array([])
p1 = np.zeros(2) + 1
p2 = np.zeros(8) + 2
p3 = np.zeros(21) + 3
p4 = np.zeros(8) + 4
p5 = np.zeros(24) + 5
p6 = np.zeros(52) + 6
p7 = np.zeros(77) + 7
p8 = np.zeros(5) + 8
p9 = np.zeros(26) + 9
p10 = np.zeros(24) + 10
p11 = np.zeros(145) + 11
p12 = np.zeros(259) + 12
p13 = np.zeros(5) + 13
p14 = np.zeros(12) + 14
p15 = np.zeros(18) + 15
p16 = np.zeros(17) + 16
p17 = np.zeros(38) + 17
p18 = np.zeros(168) + 18
p19 = np.zeros(12) + 19
p20 = np.zeros(8) + 20
p21 = np.zeros(25) + 21
p22 = np.zeros(56) + 22
p23 = np.zeros(53) + 23
p24 = np.zeros(78) + 24
p25 = np.zeros(84) + 25
p26 = np.zeros(78) + 26
p27 = np.zeros(6) + 27
p28 = np.zeros(8) + 28
p29 = np.zeros(94) + 29
p30 = np.zeros(180) + 30
p31 = np.zeros(185) + 31
p32 = np.zeros(258) + 32
p33 = np.zeros(275) + 33
p34 = np.zeros(312) + 34
p35 = np.zeros(225) + 35
p36 = np.zeros(8) + 36
p37 = np.zeros(77) + 37
p38 = np.zeros(138) + 38
p39 = np.zeros(423) + 39
p40 = np.zeros(497) + 40
p41 = np.zeros(667) + 41
p42 = np.zeros(475) + 42
p43 = np.zeros(270) + 43
p44 = np.zeros(147) + 44
p45 = np.zeros(8) + 45
p46 = np.zeros(34) + 46
p47 = np.zeros(125) + 47
p48 = np.zeros(202) + 48
p49 = np.zeros(279) + 49
p50 = np.zeros(261) + 50
p51 = np.zeros(251) + 51
p52 = np.zeros(186) + 52
p53 = np.zeros(360) + 53
p54 = np.zeros(393) + 54
p55 = np.zeros(12) + 55
p56 = np.zeros(36) + 56
p57 = np.zeros(141) + 57
p58 = np.zeros(263) + 58
p59 = np.zeros(450) + 59
p60 = np.zeros(350) + 60
p61 = np.zeros(287) + 61
p62 = np.zeros(249) + 62
p63 = np.zeros(12) + 63
p64 = np.zeros(52) + 64
p65 = np.zeros(128) + 65
p66 = np.zeros(315) + 66
p67 = np.zeros(205) + 67
p68 = np.zeros(447) + 68
p69 = np.zeros(8) + 69
p70 = np.zeros(50) + 70
balls = np.r_[
balls,
p1,
p2,
p3,
p4,
p5,
p6,
p7,
p8,
p9,
p10,
p11,
p12,
p13,
p14,
p15,
p16,
p17,
p18,
p19,
p20,
p21,
p22,
p23,
p24,
p25,
p26,
p27,
p28,
p29,
p30,
p31,
p32,
p33,
p34,
p35,
p36,
p37,
p38,
p39,
p40,
p41,
p42,
p43,
p44,
p45,
p46,
p47,
p48,
p49,
p50,
p51,
p52,
p53,
p54,
p55,
p56,
p57,
p58,
p59,
p60,
p61,
p62,
p63,
p64,
p65,
p66,
p67,
p68,
p69,
p70,
]
ball_lookup = {
0: [0.0, 0.0, 0.0, 0.0],
1: [11.31, 16.00, 1.00, 1.78],
2: [11.31, 16.00, 1.78, 3.16],
3: [11.31, 16.00, 3.16, 5.62],
4: [11.31, 16.00, 5.62, 10.00],
5: [11.31, 16.00, 31.62, 56.23],
6: [11.31, 16.00, 100.00, 177.83],
7: [11.31, 16.00, 177.83, 316.23],
8: [8.00, 11.31, 3.16, 5.62],
9: [8.00, 11.31, 17.78, 31.62],
10: [8.00, 11.31, 31.62, 56.23],
11: [8.00, 11.31, 100.00, 177.83],
12: [8.00, 11.31, 177.83, 316.23],
13: [5.66, 8.00, 3.16, 5.62],
14: [5.66, 8.00, 5.62, 10.00],
15: [5.66, 8.00, 10.00, 17.78],
16: [5.66, 8.00, 17.78, 31.62],
17: [5.66, 8.00, 31.62, 56.23],
18: [5.66, 8.00, 177.83, 316.23],
19: [4.00, 5.66, 3.16, 5.62],
20: [4.00, 5.66, 5.62, 10.00],
21: [4.00, 5.66, 10.00, 17.78],
22: [4.00, 5.66, 17.78, 31.62],
23: [4.00, 5.66, 31.62, 56.23],
24: [4.00, 5.66, 56.23, 100.00],
25: [4.00, 5.66, 100.00, 177.83],
26: [4.00, 5.66, 177.83, 316.23],
27: [2.83, 4.00, 1.78, 3.16],
28: [2.83, 4.00, 3.16, 5.62],
29: [2.83, 4.00, 5.62, 10.00],
30: [2.83, 4.00, 10.00, 17.78],
31: [2.83, 4.00, 17.78, 31.62],
32: [2.83, 4.00, 31.62, 56.23],
33: [2.83, 4.00, 56.23, 100.00],
34: [2.83, 4.00, 100.00, 177.83],
35: [2.83, 4.00, 177.83, 316.23],
36: [2.00, 2.83, 1.78, 3.16],
37: [2.00, 2.83, 3.16, 5.62],
38: [2.00, 2.83, 5.62, 10.00],
39: [2.00, 2.83, 10.00, 17.78],
40: [2.00, 2.83, 17.78, 31.62],
41: [2.00, 2.83, 31.62, 56.23],
42: [2.00, 2.83, 56.23, 100.00],
43: [2.00, 2.83, 100.00, 177.83],
44: [2.00, 2.83, 177.83, 316.23],
45: [1.41, 2.00, 1.00, 1.78],
46: [1.41, 2.00, 1.78, 3.16],
47: [1.41, 2.00, 3.16, 5.62],
48: [1.41, 2.00, 5.62, 10.00],
49: [1.41, 2.00, 10.00, 17.78],
50: [1.41, 2.00, 17.78, 31.62],
51: [1.41, 2.00, 31.62, 56.23],
52: [1.41, 2.00, 56.23, 100.00],
53: [1.41, 2.00, 100.00, 177.83],
54: [1.41, 2.00, 177.83, 316.23],
55: [1.00, 1.41, 1.00, 1.78],
56: [1.00, 1.41, 1.78, 3.16],
57: [1.00, 1.41, 3.16, 5.62],
58: [1.00, 1.41, 5.62, 10.00],
59: [1.00, 1.41, 10.00, 17.78],
60: [1.00, 1.41, 17.78, 31.62],
61: [1.00, 1.41, 31.62, 56.23],
62: [1.00, 1.41, 56.23, 100.00],
63: [0.71, 1.00, 1.00, 1.78],
64: [0.71, 1.00, 1.78, 3.16],
65: [0.71, 1.00, 3.16, 5.62],
66: [0.71, 1.00, 5.62, 10.00],
67: [0.71, 1.00, 10.00, 17.78],
68: [0.71, 1.00, 17.78, 31.62],
69: [0.50, 0.71, 1.00, 1.78],
70: [0.50, 0.71, 1.78, 3.16],
}
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period = np.zeros(nselect)
for i, samp in enumerate(rsamps):
rl, rh, pl, ph = ball_lookup[samp]
if samp in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]:
# check for giant planets
# if a giant planet than draw power law
radius[i] = rndm(8, 16, -1.7)
else:
radius[i] = random.uniform(low=rl, high=rh)
period[i] = random.uniform(low=pl, high=ph)
return radius, period
def per2ars(per, mstar, rstar):
per_SI = per * 86400.0
mass_SI = mstar * msun
a3 = per_SI ** 2 * G * mass_SI / (4 * np.pi ** 2)
return a3 ** (1.0 / 3.0) / (rstar * rsun)
def get_duration(per, ars, cosi=0.0, b=0, rprs=0.0):
"""
returns the transit duration in days
"""
part1 = per / np.pi
part2 = 1.0 / ars
part3 = np.sqrt((1 + rprs) ** 2 - b ** 2)
part4 = np.sqrt(1 - cosi ** 2)
duration = part1 * np.arcsin(part2 * part3 / part4)
return duration
def get_transit_depth(Prad, rstar_solar):
"""
returns transit depth in ppm
"""
tdep = (Prad * 0.009155 / rstar_solar) ** 2 * 1.0e6 # ppm
return tdep
def get_rprs(Prad, rstar_solar):
return (Prad * 0.009155) / rstar_solar
def make_allplanets_df_vec_extrap(df, starid_zp):
# lets refector the above code to make it array operations
totalRows = df.loc[:, "Nplanets"].sum()
df.loc[:, "planetRadius"] = pd.Series()
df.loc[:, "planetPeriod"] = pd.Series()
df.loc[:, "starID"] = pd.Series()
radper_m = Dressing15_select_extrap(totalRows)
radper_fgk = Petigura18_select(totalRows)
# we need an array of indices
rowIdx = np.repeat(np.arange(df.shape[0]), np.array(df.Nplanets.values))
newdf = df.iloc[rowIdx]
newdf.loc[:, "starID"] = rowIdx + starid_zp
newdf.loc[:, "planetRadius"] = np.where(
newdf.isMdwarf, radper_m[0], radper_fgk[0]
)
newdf.loc[:, "planetPeriod"] = np.where(
newdf.isMdwarf, radper_m[1], radper_fgk[1]
)
newdf.set_index(np.arange(newdf.shape[0]), inplace=True)
return newdf, newdf.starID.iloc[-1]
def kepler_noise_1h(kepmag):
# 1 hour CDPP
# these numbers are from the Q14 measured rmscdpp
mag_level, noise_level = np.array(
[
[0.0, 20.0],
[3.0, 20.0],
[6.0, 20.0],
[8.0, 20.0],
[9.00995575221239, 20.000000000000057],
[9.120575221238939, 22.523364485981347],
[9.253318584070797, 23.925233644859844],
[9.380530973451327, 25.607476635514047],
[9.59070796460177, 27.570093457943983],
[9.773230088495575, 28.41121495327107],
[9.972345132743364, 28.691588785046775],
[10.143805309734514, 29.252336448598186],
[10.326327433628318, 28.97196261682248],
[10.525442477876107, 28.97196261682248],
[10.719026548672566, 28.691588785046775],
[10.857300884955752, 28.97196261682248],
[11.045353982300885, 28.97196261682248],
[11.27212389380531, 29.813084112149596],
[11.48783185840708, 31.214953271028065],
[11.692477876106196, 32.05607476635518],
[11.819690265486726, 32.89719626168227],
[11.996681415929203, 34.57943925233647],
[12.13495575221239, 35.420560747663586],
[12.267699115044248, 36.822429906542084],
[12.411504424778762, 37.943925233644904],
[12.56637168141593, 39.62616822429911],
[12.71570796460177, 41.028037383177605],
[12.876106194690266, 43.27102803738322],
[13.069690265486727, 45.794392523364536],
[13.252212389380531, 48.03738317757015],
[13.4070796460177, 51.12149532710285],
[13.561946902654867, 54.20560747663555],
[13.733407079646017, 58.130841121495365],
[13.83849557522124, 60.37383177570098],
[13.971238938053098, 64.2990654205608],
[14.065265486725664, 67.6635514018692],
[14.153761061946902, 70.74766355140193],
[14.231194690265488, 73.55140186915892],
[14.308628318584072, 76.35514018691595],
[14.386061946902656, 79.71962616822435],
[14.446902654867257, 82.24299065420567],
[14.513274336283185, 85.32710280373837],
[14.596238938053098, 89.53271028037389],
[14.690265486725664, 94.01869158878509],
[14.767699115044248, 97.66355140186923],
[14.823008849557523, 101.02803738317763],
[14.883849557522126, 104.95327102803745],
[14.96128318584071, 109.43925233644865],
[15.011061946902656, 112.52336448598138],
]
).T
mag_interp = interp1d(
mag_level, noise_level, kind="linear", fill_value="extrapolate"
)
return mag_interp(kepmag) * np.sqrt(6.5)
def kepler_noise_1h_quiet(kepmag):
# 1 hour CDPP
# this is calculated from the rrmscdpp06p0
mag_level, noise_level = np.array(
[
[0.0, 20.0],
[3.0, 20.0],
[6.0, 20.0],
[8.0, 20.0],
[9.00995575221239, 20.000000000000057],
[9.2, 21.2625],
[9.299999999999999, 20],
[9.399999999999999, 14.389000000000001],
[9.499999999999998, 24.667499999999997],
[9.599999999999998, 24.392500000000005],
[9.699999999999998, 26.223],
[9.799999999999997, 19.779],
[9.899999999999997, 14.007],
[9.999999999999996, 17.862000000000005],
[10.099999999999996, 20.965],
[10.299999999999995, 20.464],
[10.399999999999995, 19.271],
[10.499999999999995, 16.5505],
[10.599999999999994, 21.195999999999998],
[10.699999999999994, 26.0565],
[10.799999999999994, 27.654],
[10.899999999999993, 25.377],
[10.999999999999993, 22.171],
[11.099999999999993, 24.851],
[11.199999999999992, 24.87],
[11.299999999999992, 27.1965],
[11.399999999999991, 25.774],
[11.499999999999991, 27.665],
[11.59999999999999, 30.0305],
[11.69999999999999, 31.01],
[11.79999999999999, 32.178],
[11.89999999999999, 31.628],
[11.99999999999999, 32.558],
[12.099999999999989, 35.0385],
[12.199999999999989, 35.259],
[12.299999999999988, 36.119],
[12.399999999999988, 37.184],
[12.499999999999988, 39.861999999999995],
[12.599999999999987, 41.931000000000004],
[12.699999999999987, 42.528],
[12.799999999999986, 43.259],
[12.899999999999986, 45.439],
[12.999999999999986, 49.3505],
[13.099999999999985, 50.164],
[13.199999999999985, 53.51300000000001],
[13.299999999999985, 55.575],
[13.399999999999984, 57.218999999999994],
[13.499999999999984, 60.161500000000004],
[13.599999999999984, 62.68],
[13.699999999999983, 65.464],
[13.799999999999983, 70.37],
[13.899999999999983, 73.724],
[13.999999999999982, 77.017],
[14.099999999999982, 81.047],
[14.199999999999982, 85.068],
[14.299999999999981, 89.715],
[14.39999999999998, 95.31],
[14.49999999999998, 101.193],
[14.59999999999998, 106.978],
[14.69999999999998, 112.5995],
[14.79999999999998, 118.04700000000001],
[14.899999999999979, 125.4615],
[14.999999999999979, 133.9125],
[15.099999999999978, 141.15500000000003],
[15.199999999999978, 149.125],
[15.299999999999978, 159.1295],
[15.399999999999977, 168.91],
[15.499999999999977, 179.018],
[15.599999999999977, 192.773],
[15.699999999999976, 202.986],
[15.799999999999976, 218.581],
[15.899999999999975, 234.59900000000002],
[15.999999999999975, 245.80700000000002],
[16.099999999999973, 287.57599999999996],
[16.199999999999974, 282.94399999999996],
[16.299999999999976, 270.305],
[16.399999999999974, 321.54200000000003],
[16.49999999999997, 359.365],
[16.599999999999973, 349.54400000000015],
[16.699999999999974, 417.082],
[16.799999999999972, 425.254],
[16.89999999999997, 419.8280000000001],
[17.099999999999973, 434.58],
]
).T
mag_interp = interp1d(
mag_level, noise_level, kind="linear", fill_value="extrapolate"
)
return mag_interp(kepmag) * np.sqrt(6.)
def make_allplanets_df_vec_extrap_kepler(df, starid_zp, ocrMeasurement):
totalRows = df.loc[:, "Nplanets"].sum()
df.loc[:, "planetRadius"] = pd.Series()
df.loc[:, "planetPeriod"] = pd.Series()
df.loc[:, "starID"] = pd.Series()
radper_m = Dressing15_select_extrap(totalRows)
radper_fgk = Bryson_select(totalRows, ocrMeasurement=ocrMeasurement)
# we need an array of indices
rowIdx = np.repeat(np.arange(df.shape[0]), np.array(df.Nplanets.values))
newdf = df.iloc[rowIdx]
newdf.loc[:, "starID"] = rowIdx + starid_zp
newdf.loc[:, "planetRadius"] = np.where(
newdf.isMdwarf, radper_m[0], radper_fgk[0]
)
newdf.loc[:, "planetPeriod"] = np.where(
newdf.isMdwarf, radper_m[1], radper_fgk[1]
)
newdf.set_index(np.arange(newdf.shape[0]), inplace=True)
return newdf, newdf.starID.iloc[-1]
def Bryson_select(nselect=1, ocrMeasurement='bryson'):
balls = np.array([])
if ocrMeasurement == 'bryson':
fn_occ = "../data/bryson/occurrenceGrid_1100_bryson.npy"
elif ocrMeasurement == 'burke':
fn_occ = "../data/bryson/occurrenceGrid_1100_burke.npy"
elif ocrMeasurement == 'LUVOIR':
# simulate the LUVOIR eta-earth planets
radius = np.zeros(nselect)
period = np.zeros(nselect)
radius = random.uniform(low=0.8, high=1.4, size=nselect)
period = random.uniform(low=338, high=778, size=nselect)
return radius, period
fn_p = "../data/bryson/occurrencePeriod_1100.npy"
fn_r = "../data/bryson/occurrenceRadius_1100.npy"
ocrGrid = np.load(fn_occ)
rp1D = np.load(fn_r)
period1D = np.load(fn_p)
# use a 100 x 100 grid
occBalls = np.round(ocrGrid.flatten() * 1.0e7)
for i in range(occBalls.shape[0]):
balls = np.r_[balls, np.zeros(int(occBalls[i])) + i]
ball_lookup = {}
dPeriod = period1D[1] - period1D[0]
dRadius = rp1D[1] - rp1D[0]
for i in range(300):
for j in range(300):
ball_lookup[i * 300 + j] = [
rp1D[j] - dRadius / 2,
rp1D[j] + dRadius / 2,
period1D[i] - dPeriod / 2,
period1D[i] + dPeriod / 2,
]
rsamps = random.choice(balls, size=nselect)
radius = np.zeros(nselect)
period =
|
np.zeros(nselect)
|
numpy.zeros
|
from sklearn import metrics
import numpy as np
import pandas as pd
import seaborn as sns
from .stats import *
from .scn_train import *
import matplotlib
import matplotlib.pyplot as plt
def divide_sampTab(sampTab, prop, dLevel="cell_ontology_class"):
cts = set(sampTab[dLevel])
trainingids = np.empty(0)
for ct in cts:
aX = sampTab.loc[sampTab[dLevel] == ct, :]
ccount = len(aX.index)
trainingids = np.append(trainingids, np.random.choice(aX.index.values, int(ccount*prop), replace = False))
val_ids = np.setdiff1d(sampTab.index, trainingids, assume_unique = True)
sampTrain = sampTab.loc[trainingids,:]
sampVal = sampTab.loc[val_ids,:]
return([sampTrain, sampVal])
def sc_classAssess(stDat,washedDat, dLevel = "description1", dLevelSID="sample_name", minCells = 40, dThresh = 0, propTrain=0.25, nRand = 50, nTrees=2000):
goodGrps = np.unique(stTrain.newAnn)[stTrain.newAnn.value_counts()>minCells]
stTmp=stDat.loc[np.isin(stDat[dlevel], goodGrps) , :]
expDat_good = washedDat["expDat"].loc[stTmp.index, :]
stTrain, stVal = divide_sampTab(stTmp, propTrain, dLevel = dLevel)
expTrain=expDat_good.loc[stTrain.index,:]
expVal=expDat_good.loc[stVal.index,:]
varGenes = findVarGenes(expDat_good, washedDat["geneStats"])
cellgrps=stTrain[dLevel]
testRFs=sc_makeClassifier(expTrain, genes=varGenes, groups=cellgrps, nRand=nRand, ntrees=nTrees)
ct_scores=rf_classPredict(testRFs, expVal)
assessed= [ct_scores, stVal, stTrain]
return assessed
def sc_classThreshold(vect, classification, thresh):
TP=0;
FN=0;
FP=0;
TN=0;
calledPos = vect.loc[vect>thresh].index.values
calledNeg = vect.loc[vect<=thresh].index.values
if (np.isin(classification, calledPos)):
TP = 1
FN = 0
FP = len(calledPos) - 1
TN = len(calledNeg)
else:
TP = 0
FN = 1
FP = len(calledPos)
TN = len(calledNeg) -1
Accu = (TP + TN)/(TP + TN + FP + FN)
return Accu
def cn_clPerf(vect, sampTab, dLevel, classification, thresh, dLevelSID="sample_id"):
TP=0;
FN=0;
FP=0;
TN=0;
sampIDs = vect.index.values;
classes = sampTab.loc[sampIDs,dLevel];
actualPos = sampTab.loc[sampTab[dLevel]==classification,dLevelSID]
actualNeg = sampTab.loc[sampTab[dLevel]!=classification,dLevelSID]
calledPos = vect.loc[vect>thresh].index.values
calledNeg = vect.loc[vect<=thresh].index.values
TP = len(np.intersect1d(actualPos, calledPos));
FP = len(np.intersect1d(actualNeg, calledPos));
FN = len(actualPos)-TP;
TN = len(actualNeg)-FP;
return([TP, FN, FP, TN]);
def cn_eval(vect, sampTab, dLevel, classification, threshs=np.arange(0,1,0.05),dLevelSID="sample_id"):
ans=np.zeros([len(threshs), 7])
for i in range(0, len(threshs)):
thresh = threshs[i];
ans[i,0:4] = cn_clPerf(vect, sampTab, dLevel, classification, thresh, dLevelSID=dLevelSID);
ans[:,4] = threshs;
ans=pd.DataFrame(data=ans, columns=["TP", "FN", "FP", "TN", "thresh","FPR", "TPR"]);
TPR=ans['TP']/(ans['TP']+ans['FN']);
FPR=ans['FP']/(ans['TN']+ans['FP']);
ans['TPR']=TPR;
ans['FPR']=FPR;
return ans
def cn_classAssess(ct_scores, stVal, classLevels="description2", dLevelSID="sample_id", resolution=0.005):
allROCs = {}
evalAll=np.zeros([len(ct_scores.columns),2])
classifications= ct_scores.columns.values;
i=0
for xname in classifications:
classification=classifications[i];
tmpROC= cn_eval(ct_scores[xname],stVal,classLevels,xname,threshs=np.arange(0,1,resolution), dLevelSID=dLevelSID);
allROCs[xname] = tmpROC;
i = i + 1;
return allROCs;
def assess_comm(aTrain, aQuery, resolution = 0.005, nRand = 50, dLevelSID = "sample_name", classTrain = "cell_ontology_class", classQuery = "description2"):
ct_scores = pd.DataFrame(aQuery.X, index = aQuery.obs[dLevelSID], columns = aQuery.var.index)
stQuery= aQuery.obs
stQuery.index = ct_scores.index
stTrain= aTrain.obs
shared_cell_type = np.intersect1d(np.unique(stTrain[classTrain]), np.unique(stQuery[classQuery]))
stVal_com = stQuery.loc[np.isin(stQuery[classQuery], shared_cell_type),:]
if(nRand > 0):
tmp = np.empty([nRand, len(stVal_com.columns)], dtype=np.object)
tmp[:]="rand"
tmp=pd.DataFrame(data=tmp, columns=stVal_com.columns.values )
tmp[dLevelSID] = ct_scores.index.values[(len(ct_scores.index) - nRand):len(ct_scores.index)]
tmp.index= tmp[dLevelSID]
stVal_com= pd.concat([stVal_com, tmp])
cells_sub = stVal_com[dLevelSID]
ct_score_com = ct_scores.loc[cells_sub,:]
report= {}
ct_scores_t = ct_score_com.T
true_label = stVal_com[classQuery]
y_true=true_label.str.get_dummies()
eps = 1e-15
y_pred = np.maximum(np.minimum(ct_scores, 1 - eps), eps)
multiLogLoss = (-1 / len(ct_scores_t.index)) * np.sum(np.matmul(y_true.T.values, np.log(y_pred.values)))
pred_label = ct_scores.idxmax(axis=1)
cm=pd.crosstab(true_label, pred_label)
cm.index = cm.index.tolist()
if (len(np.setdiff1d(np.unique(true_label), np.unique(pred_label))) != 0):
misCol = np.setdiff1d(np.unique(true_label), np.unique(pred_label))
for i in range(0, len(misCol)):
added = pd.DataFrame(np.zeros([len(cm.index), 1]), index=cm.index)
cm = pd.concat([cm, added], axis=1)
cm.columns.values[(len(cm.columns) - len(misCol)) : len(cm.columns)] = misCol
if (len(np.setdiff1d(np.unique(pred_label), np.unique(true_label))) != 0):
misRow = np.setdiff1d(np.unique(pred_label), np.unique(true_label))
for i in range(0, len(misRow)):
added = pd.DataFrame(np.zeros([1, len(cm.columns)]), columns= cm.columns)
cm = pd.concat([cm, added], axis=0)
cm.index.values[(len(cm.index) - len(misRow)) : len(cm.index)] = misRow
confusionMatrix = cn_classAssess(ct_score_com, stVal_com, classLevels= classQuery, dLevelSID=dLevelSID, resolution=resolution)
cm= cm.loc[cm.index.values,:]
n = np.sum(
|
np.sum(cm)
|
numpy.sum
|
from __future__ import division, print_function
import cmath
import time
from copy import copy
import os
import argparse
import inspect
from collections import OrderedDict
from timeit import default_timer as timer
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
import numpy as np
from numpy import pi, radians, sin, cos, sqrt, clip
from numpy.random import poisson, uniform, randn, rand
from numpy.polynomial.legendre import leggauss
from scipy.integrate import simps
from scipy.special import j1 as J1
try:
from numba import njit, prange
# SAS_NUMBA: 0=None, 1=CPU, 2=GPU
SAS_NUMBA = int(os.environ.get("SAS_NUMBA", "1"))
USE_NUMBA = SAS_NUMBA > 0
USE_CUDA = SAS_NUMBA > 1
except ImportError:
USE_NUMBA = USE_CUDA = False
# Definition of rotation matrices comes from wikipedia:
# https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations
def Rx(angle):
"""Construct a matrix to rotate points about *x* by *angle* degrees."""
a = radians(angle)
R = [[1, 0, 0],
[0, +cos(a), -sin(a)],
[0, +sin(a), +cos(a)]]
return np.matrix(R)
def Ry(angle):
"""Construct a matrix to rotate points about *y* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), 0, +sin(a)],
[0, 1, 0],
[-sin(a), 0, +cos(a)]]
return np.matrix(R)
def Rz(angle):
"""Construct a matrix to rotate points about *z* by *angle* degrees."""
a = radians(angle)
R = [[+cos(a), -sin(a), 0],
[+sin(a), +cos(a), 0],
[0, 0, 1]]
return np.matrix(R)
def pol2rec(r, theta, phi):
"""
Convert from 3D polar coordinates to rectangular coordinates.
"""
theta, phi = radians(theta), radians(phi)
x = +r * sin(theta) * cos(phi)
y = +r * sin(theta)*sin(phi)
z = +r * cos(theta)
return x, y, z
def rotation(theta, phi, psi):
"""
Apply the jitter transform to a set of points.
Points are stored in a 3 x n numpy matrix, not a numpy array or tuple.
"""
return Rx(phi)*Ry(theta)*Rz(psi)
def apply_view(points, view):
"""
Apply the view transform (theta, phi, psi) to a set of points.
Points are stored in a 3 x n numpy array.
View angles are in degrees.
"""
theta, phi, psi = view
return np.asarray((Rz(phi)*Ry(theta)*Rz(psi))*np.matrix(points.T)).T
def invert_view(qx, qy, view):
"""
Return (qa, qb, qc) for the (theta, phi, psi) view angle at detector
pixel (qx, qy).
View angles are in degrees.
"""
theta, phi, psi = view
q = np.vstack((qx.flatten(), qy.flatten(), 0*qx.flatten()))
return np.asarray((Rz(-psi)*Ry(-theta)*Rz(-phi))*np.matrix(q))
class Shape:
rotation = np.matrix([[1., 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([0., 0., 0.])[:, None]
r_max = None
is_magnetic = False
def volume(self):
# type: () -> float
raise NotImplementedError()
def sample(self, density):
# type: (float) -> np.ndarray[N], np.ndarray[N, 3]
raise NotImplementedError()
def dims(self):
# type: () -> float, float, float
raise NotImplementedError()
def rotate(self, theta, phi, psi):
self.rotation = rotation(theta, phi, psi) * self.rotation
return self
def shift(self, x, y, z):
self.center = self.center + np.array([x, y, z])[:, None]
return self
def _adjust(self, points):
points = np.asarray(self.rotation * np.matrix(points.T)) + self.center
return points.T
def r_bins(self, q, over_sampling=1, r_step=None):
return r_bins(q, r_max=self.r_max, r_step=r_step,
over_sampling=over_sampling)
class Composite(Shape):
def __init__(self, shapes, center=(0, 0, 0), orientation=(0, 0, 0)):
self.shapes = shapes
self.rotate(*orientation)
self.shift(*center)
# Find the worst case distance between any two points amongst a set
# of shapes independent of orientation. This could easily be a
# factor of two worse than necessary, e.g., a pair of thin rods
# end-to-end vs the same pair side-by-side.
distances = [((s1.r_max + s2.r_max)/2
+ sqrt(np.sum((s1.center - s2.center)**2)))
for s1 in shapes
for s2 in shapes]
self.r_max = max(distances + [s.r_max for s in shapes])
self.volume = sum(shape.volume for shape in self.shapes)
def sample(self, density):
values, points = zip(*(shape.sample(density) for shape in self.shapes))
return np.hstack(values), self._adjust(np.vstack(points))
class Box(Shape):
def __init__(self, a, b, c,
value, center=(0, 0, 0), orientation=(0, 0, 0)):
self.value = np.asarray(value)
self.rotate(*orientation)
self.shift(*center)
self.a, self.b, self.c = a, b, c
self._scale = np.array([a/2, b/2, c/2])[None, :]
self.r_max = sqrt(a**2 + b**2 + c**2)
self.dims = a, b, c
self.volume = a*b*c
def sample(self, density):
num_points = poisson(density*self.volume)
points = self._scale*uniform(-1, 1, size=(num_points, 3))
values = self.value.repeat(points.shape[0])
return values, self._adjust(points)
class EllipticalCylinder(Shape):
def __init__(self, ra, rb, length,
value, center=(0, 0, 0), orientation=(0, 0, 0)):
self.value = np.asarray(value)
self.rotate(*orientation)
self.shift(*center)
self.ra, self.rb, self.length = ra, rb, length
self._scale = np.array([ra, rb, length/2])[None, :]
self.r_max = sqrt(4*max(ra, rb)**2 + length**2)
self.dims = 2*ra, 2*rb, length
self.volume = pi*ra*rb*length
def sample(self, density):
# randomly sample from a box of side length 2*r, excluding anything
# not in the cylinder
num_points = poisson(density*4*self.ra*self.rb*self.length)
points = uniform(-1, 1, size=(num_points, 3))
radius = points[:, 0]**2 + points[:, 1]**2
points = points[radius <= 1]
values = self.value.repeat(points.shape[0])
return values, self._adjust(self._scale*points)
class EllipticalBicelle(Shape):
def __init__(self, ra, rb, length,
thick_rim, thick_face,
value_core, value_rim, value_face,
center=(0, 0, 0), orientation=(0, 0, 0)):
self.rotate(*orientation)
self.shift(*center)
self.value = value_core
self.ra, self.rb, self.length = ra, rb, length
self.thick_rim, self.thick_face = thick_rim, thick_face
self.value_rim, self.value_face = value_rim, value_face
# reset cylinder to outer dimensions for calculating scale, etc.
ra = self.ra + self.thick_rim
rb = self.rb + self.thick_rim
length = self.length + 2*self.thick_face
self._scale = np.array([ra, rb, length/2])[None, :]
self.r_max = sqrt(4*max(ra, rb)**2 + length**2)
self.dims = 2*ra, 2*rb, length
self.volume = pi*ra*rb*length
def sample(self, density):
# randomly sample from a box of side length 2*r, excluding anything
# not in the cylinder
ra = self.ra + self.thick_rim
rb = self.rb + self.thick_rim
length = self.length + 2*self.thick_face
num_points = poisson(density*4*ra*rb*length)
points = uniform(-1, 1, size=(num_points, 3))
radius = points[:, 0]**2 + points[:, 1]**2
points = points[radius <= 1]
# set all to core value first
values = np.ones_like(points[:, 0])*self.value
# then set value to face value if |z| > face/(length/2))
values[abs(points[:, 2]) > self.length/(self.length + 2*self.thick_face)] = self.value_face
# finally set value to rim value if outside the core ellipse
radius = (points[:, 0]**2*(1 + self.thick_rim/self.ra)**2
+ points[:, 1]**2*(1 + self.thick_rim/self.rb)**2)
values[radius>1] = self.value_rim
return values, self._adjust(self._scale*points)
class TruncatedSphere(Shape):
"""
Sphere of radius r, with points z < -h truncated.
"""
def __init__(self, r, h, value, center=(0, 0, 0), orientation=(0, 0, 0)):
self.value = np.asarray(value)
self.rotate(*orientation)
self.shift(*center)
self.r, self.h = r, h
# Max distance between points in the shape is the maximum diameter
self.r_max = 2*r if h >= 0 else 2*sqrt(r**2 - h**2)
self.dims = self.r_max, self.r_max, r+h
self.volume = pi*(2*r**3/3 + r**2*h - h**3/3)
Vp = pi*(2*r**3/3 + r**2*h - h**3/3)
Vm = pi*(2*r**3/3 - r**2*h + h**3/3)
Vd = Vp + Vm - 4*pi*r**3/3
def sample(self, density):
num_points = poisson(density*np.prod(self.dims))
points = uniform(-1, 1, size=(num_points, 3))
# Translate U ~ [-1, 1] in x,y to [-r_trunc/r, r_trunc/r] when
# truncation starts above the equator, otherwise leave it at [-1, 1].
# This makes for more efficient sampling since we don't have to
# consider the maximum diameter. We already calculated r_max as
# 2*r_trunc in this case, so just use the ratio of r_max to 2*r.
points[:, 0:2] *= 0.5*self.r_max/self.r
# Translate U ~ [-1, 1] in z to [-h/r, 1], with h representing
# distance below equator. So:
# (U + 1)/2 => [0, 1]
# [0, 1] * (1+h/r) => [0, 1+h/r]
# [0, 1+h/r] - h/r => [-h/r, 1]
# Combining:
# (U + 1)/2 * (1+h/r) - h/r
# = U*(1+h/r)/2 + (1+h/r)/2 - h/r
# = U*(1/2 + h/2r) + 1/2 + h/2r - 2h/2r
ratio = 0.5*self.h/self.r
points[:, 2] *= (0.5 + ratio)
points[:, 2] += (0.5 - ratio)
radius = np.sum(points**2, axis=1)
points = self.r*points[radius<=1]
values = self.value.repeat(points.shape[0])
return values, self._adjust(points)
class TriaxialEllipsoid(Shape):
def __init__(self, ra, rb, rc,
value, center=(0, 0, 0), orientation=(0, 0, 0),
magnetism=None):
self.is_magnetic = (magnetism is not None)
self.value = np.asarray(value)
self.magnetism = magnetism if self.is_magnetic else (0., 0., 0.)
self.rotate(*orientation)
self.shift(*center)
self.ra, self.rb, self.rc = ra, rb, rc
self._scale = np.array([ra, rb, rc])[None, :]
self.r_max = 2*max(ra, rb, rc)
self.dims = 2*ra, 2*rb, 2*rc
self.volume = 4*pi/3 * ra * rb * rc
def sample(self, density):
# randomly sample from a box of side length 2*r, excluding anything
# not in the ellipsoid
num_points = poisson(density*8*self.ra*self.rb*self.rc)
points = uniform(-1, 1, size=(num_points, 3))
radius = np.sum(points**2, axis=1)
points = self._scale*points[radius <= 1]
values = self.value.repeat(points.shape[0])
return values, self._adjust(points)
def sample_magnetic(self, density):
values, points = self.sample(density)
magnetism = np.tile(self.magnetism, (points.shape[0], 1)).T
return values, magnetism, points
class Helix(Shape):
def __init__(self, helix_radius, helix_pitch, tube_radius, tube_length,
value, center=(0, 0, 0), orientation=(0, 0, 0)):
self.value = np.asarray(value)
self.rotate(*orientation)
self.shift(*center)
helix_length = helix_pitch * tube_length/sqrt(helix_radius**2 + helix_pitch**2)
total_radius = self.helix_radius + self.tube_radius
self.helix_radius, self.helix_pitch = helix_radius, helix_pitch
self.tube_radius, self.tube_length = tube_radius, tube_length
self.r_max = sqrt(4*total_radius + (helix_length + 2*tube_radius)**2)
self.dims = 2*total_radius, 2*total_radius, helix_length
# small tube radius approximation; for larger tubes need to account
# for the fact that the inner length is much shorter than the outer
# length
self.volume = pi*self.tube_radius**2*self.tube_length
def points(self, density):
num_points = poisson(density*4*self.tube_radius**2*self.tube_length)
points = uniform(-1, 1, size=(num_points, 3))
radius = points[:, 0]**2 + points[:, 1]**2
points = points[radius <= 1]
# Based on math stackexchange answer by <NAME>
# https://math.stackexchange.com/a/461637
# with helix along z rather than x [so tuples in answer are (z, x, y)]
# and with random points in the cross section (p1, p2) rather than
# uniform points on the surface (cos u, sin u).
a, R = self.tube_radius, self.helix_radius
h = self.helix_pitch
scale = 1/sqrt(R**2 + h**2)
t = points[:, 3] * (self.tube_length * scale/2)
cos_t, sin_t = cos(t), sin(t)
# rx = R*cos_t
# ry = R*sin_t
# rz = h*t
# nx = -a * cos_t * points[:, 1]
# ny = -a * sin_t * points[:, 1]
# nz = 0
# bx = (a * h/scale) * sin_t * points[:, 2]
# by = (-a * h/scale) * cos_t * points[:, 2]
# bz = a*R/scale
# x = rx + nx + bx
# y = ry + ny + by
# z = rz + nz + bz
u, v = (R - a*points[:, 1]), (a * h/scale)*points[:, 2]
x = u * cos_t + v * sin_t
y = u * sin_t - v * cos_t
z = a*R/scale + h * t
points = np.hstack((x, y, z))
values = self.value.repeat(points.shape[0])
return values, self._adjust(points)
def csbox(a=10, b=20, c=30, da=1, db=2, dc=3, slda=1, sldb=2, sldc=3, sld_core=4):
core = Box(a, b, c, sld_core)
side_a = Box(da, b, c, slda, center=((a+da)/2, 0, 0))
side_b = Box(a, db, c, sldb, center=(0, (b+db)/2, 0))
side_c = Box(a, b, dc, sldc, center=(0, 0, (c+dc)/2))
side_a2 = copy(side_a).shift(-a-da, 0, 0)
side_b2 = copy(side_b).shift(0, -b-db, 0)
side_c2 = copy(side_c).shift(0, 0, -c-dc)
shape = Composite((core, side_a, side_b, side_c, side_a2, side_b2, side_c2))
shape.dims = 2*da+a, 2*db+b, 2*dc+c
return shape
def barbell(r=20, rbell=50, length=20, rho=2):
h = sqrt(rbell**2 - r**2)
top = TruncatedSphere(rbell, h, value=rho, center=(0, 0, length/2+h))
rod = EllipticalCylinder(r, r, length, value=rho)
bottom = TruncatedSphere(rbell, h, value=rho, center=(0, 0, -length/2-h),
orientation=(180, 0, 0))
shape = Composite((top, rod, bottom))
shape.dims = 2*rbell, 2*rbell, length+2*(rbell+h)
# r_max should be total length?
shape.r_max = (length + 2*(rbell + h))
return shape
def capped_cylinder(r=20, rcap=50, length=20, rho=2):
h = -sqrt(rcap**2 - r**2)
top = TruncatedSphere(rcap, h, value=rho, center=(0, 0, length/2+h))
rod = EllipticalCylinder(r, r, length, value=rho)
bottom = TruncatedSphere(rcap, h, value=rho, center=(0, 0, -length/2-h),
orientation=(180, 0, 0))
shape = Composite((top, rod, bottom))
shape.dims = 2*r, 2*r, length+2*(rcap+h)
# r_max is the length of the diagonal + height of the cap for safety.
# This is a bit larger than necessary, but that's better than truncation.
shape.r_max = sqrt(length**2 + 4*r**2) + (rcap + h)
return shape
def _Iqabc(weight, x, y, z, qa, qb, qc):
"""I(q) = |sum V(r) rho(r) e^(1j q.r)|^2 / sum V(r)"""
#print("calling python")
Iq = [abs(np.sum(weight*np.exp(1j*(qa_k*x + qb_k*y + qc_k*z))))**2
for qa_k, qb_k, qc_k in zip(qa.flat, qb.flat, qc.flat)]
return np.asarray(Iq)
_Iqabcf = _Iqabc
if USE_NUMBA:
# Override simple numpy solution with numba if available
def _Iqabc_py(weight, x, y, z, qa, qb, qc):
#print("calling numba")
Iq = np.empty_like(qa)
for j in prange(len(Iq)):
#total = 0. + 0j
#for k in range(len(weight)):
# total += weight[k]*np.exp(1j*(qa[j]*x[k] + qb[j]*y[k] + qc[j]*z[k]))
total = np.sum(weight * np.exp(1j*(qa[j]*x + qb[j]*y + qc[j]*z)))
Iq[j] = abs(total)**2
return Iq
sig = "f8[:](f8[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:])"
_Iqabc = njit(sig, parallel=True, fastmath=True)(_Iqabc_py)
_Iqabcf = njit(sig.replace("f8", "f4"), parallel=True, fastmath=True)(_Iqabc_py)
if USE_CUDA:
# delayed loading of cuda
_IQABC_CUDA_KERNELS = {}
def _get_Iqabc_kernel(dtype):
#print("calling cuda")
if not _IQABC_CUDA_KERNELS:
from numba import cuda
if not cuda.list_devices():
raise RuntimeError("no cuda devices found")
def _kernel_py(weight, x, y, z, qa, qb, qc, Iq):
j = cuda.grid(1)
if j < qa.size:
total = 0. + 0j
for k in range(x.size):
total += weight[k]*cmath.exp(1j*(qa[j]*x[k] + qb[j]*y[k] + qc[j]*z[k]))
Iq[j] = abs(total)**2
sig_d = "void(f8[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:])"
sig_f = sig_d.replace("f8", "f4")
kernel_f = cuda.jit(sig_f, parallel=True, fastmath=True)(_kernel_py)
kernel_d = cuda.jit(sig_d, parallel=True, fastmath=True)(_kernel_py)
_IQABC_CUDA_KERNELS['f'] = kernel_f
_IQABC_CUDA_KERNELS['d'] = kernel_d
kernel = _IQABC_CUDA_KERNELS[dtype.char]
return kernel
def _Iqabc(weight, x, y, z, qa, qb, qc):
Iq = np.empty_like(qa)
# Apparently numba deals with all the necessary padding of vectors to nice boundaries
# before transfering to the GPU, so we don't need to do so by hand here.
threadsperblock = 32
blockspergrid = (Iq.size + (threadsperblock - 1)) // threadsperblock
kernel = _get_Iqabc_kernel(qa.dtype)
kernel[blockspergrid, threadsperblock](weight, x, y, z, qa, qb, qc, Iq)
return Iq
_Iqabcf = _Iqabc
if 0 and USE_CUDA:
### *** DEPRECATED ***
### Variant on the kernel with padding of vectors that is no faster and doesn't appear
### to be more correct. Leave it around for now in case we decide we don't trust numba.
_IQABC_CUDA_KERNELS = {}
def _get_Iqabc_kernel(dtype):
#print("calling cuda")
if not _IQABC_CUDA_KERNELS:
from numba import cuda
if not cuda.list_devices():
raise RuntimeError("no cuda devices found")
def _kernel_py(nx, nq, weight, x, y, z, qa, qb, qc, Iq):
j = cuda.grid(1)
if j < nq:
total = 0. + 0j
for k in range(nx):
total += weight[k]*cmath.exp(1j*(qa[j]*x[k] + qb[j]*y[k] + qc[j]*z[k]))
Iq[j] = abs(total)**2
sig_d = "void(i4,i4,f8[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:],f8[:])"
sig_f = sig_d.replace("f8", "f4")
kernel_f = cuda.jit(sig_f, parallel=True, fastmath=True)(_kernel_py)
kernel_d = cuda.jit(sig_d, parallel=True, fastmath=True)(_kernel_py)
_IQABC_CUDA_KERNELS['f'] = kernel_f
_IQABC_CUDA_KERNELS['d'] = kernel_d
kernel = _IQABC_CUDA_KERNELS[dtype.char]
return kernel
def _Iqabc(weight, x, y, z, qa, qb, qc):
kernel = _get_Iqabc_kernel(qa.dtype)
nx, nq = len(x), len(qa)
threadsperblock = 32
blockspergrid = (nq + (threadsperblock - 1)) // threadsperblock
weight, x, y, z = pad_vectors(4, weight, x, y, z)
qa, qb, qc = pad_vectors(threadsperblock, qa, qb, qc)
Iq = np.empty_like(qa)
kernel[blockspergrid, threadsperblock](nx, nq, weight, x, y, z, qa, qb, qc, Iq)
return Iq[:nq]
_Iqabcf = _Iqabc
def pad_vectors(boundary, *vectors):
"""
Yields a list of vectors padded with NaN to a multiple of *boundary*.
Yields the original vector if the size is already a mulitple of *boundary*.
"""
for old in vectors:
old_size = len(old)
new_size = ((old_size + boundary-1)//boundary)*boundary
if new_size > old_size:
new = np.empty(new_size, dtype=old.dtype)
new[:old_size] = old
new[old_size:] = np.NaN
yield new
else:
yield old
def calc_Iqxy(qx, qy, rho, points, volume=1.0, view=(0, 0, 0), dtype='f'):
"""
*qx*, *qy* correspond to the detector pixels at which to calculate the
scattering, relative to the beam along the negative z axis.
*points* are three columns (x, y, z), one for each sample in the shape.
*rho* (1e-6/Ang) is the scattering length density of each point.
*volume* should be 1/number_density. That is, each of n particles in the
total value represents volume/n contribution to the scattering.
*view* rotates the points about the axes using Euler angles for pitch
yaw and roll for a beam travelling along the negative z axis.
*dtype* is the numerical precision of the calculation.
"""
# TODO: maybe slightly faster to rotate points, and drop qc*z
qx, qy = np.broadcast_arrays(qx, qy)
qa, qb, qc = invert_view(qx, qy, view)
rho, volume = np.broadcast_arrays(rho, volume)
weight = rho*volume
x, y, z = points.T
# I(q) = |sum V(r) rho(r) e^(1j q.r)|^2 / sum V(r)
if np.dtype(dtype) == np.float64:
weight, x, y, z, qa, qb, qc = [np.asarray(v, 'd') for v in (weight, x, y, z, qa, qb, qc)]
Iq = _Iqabc(weight, x, y, z, qa.flatten(), qb.flatten(), qc.flatten())
else: # float32
weight, x, y, z, qa, qb, qc = [np.asarray(v, 'f') for v in (weight, x, y, z, qa, qb, qc)]
Iq = _Iqabcf(weight, x, y, z, qa.flatten(), qb.flatten(), qc.flatten())
# The scale factor 1e-4 is due to the conversion from rho = 1e-6 squared
# times the conversion of 1e-8 from inverse angstroms to inverse cm.
return np.asarray(Iq).reshape(qx.shape) * (1e-4 / np.sum(volume))
def spin_weights(in_spin, out_spin):
"""
Compute spin cross sections given in_spin and out_spin
To convert spin cross sections to sld b:
uu * (sld - m_sigma_x);
dd * (sld + m_sigma_x);
ud * (m_sigma_y - 1j*m_sigma_z);
du * (m_sigma_y + 1j*m_sigma_z);
weights for spin crosssections: dd du real, ud real, uu, du imag, ud imag
"""
in_spin = clip(in_spin, 0.0, 1.0)
out_spin = clip(out_spin, 0.0, 1.0)
# Previous version of this function took the square root of the weights,
# under the assumption that
#
# w*I(q, rho1, rho2, ...) = I(q, sqrt(w)*rho1, sqrt(w)*rho2, ...)
#
# However, since the weights are applied to the final intensity and
# are not interned inside the I(q) function, we want the full
# weight and not the square root. Anyway no function will ever use
# set_spin_weights as part of calculating an amplitude, as the weights are
# related to polarisation efficiency of the instrument. The weights serve to
# construct various magnet scattering cross sections, which are linear combinations
# of the spin-resolved cross sections. The polarisation efficiency e_in and e_out
# are parameters ranging from 0.5 (unpolarised) beam to 1 (perfect optics).
# For in_spin or out_spin <0.5 one assumes a CS, where the spin is reversed/flipped
# with respect to the initial supermirror polariser. The actual polarisation efficiency
# in this case is however e_in/out = 1-in/out_spin.
norm = 1 - out_spin if out_spin < 0.5 else out_spin
# The norm is needed to make sure that the scattering cross sections are
# correctly weighted, such that the sum of spin-resolved measurements adds up to
# the unpolarised or half-polarised scattering cross section. No intensity weighting
# needed on the incoming polariser side (assuming that a user), has normalised
# to the incoming flux with polariser in for SANSPOl and unpolarised beam, respectively.
weight = [
(1.0 - in_spin) * (1.0 - out_spin) / norm, # dd
(1.0 - in_spin) * out_spin / norm, # du
in_spin * (1.0 - out_spin) / norm, # ud
in_spin * out_spin / norm, # uu
]
return weight
def orth(A, b_hat): # A = 3 x n, and b_hat unit vector
return A - np.sum(A*b_hat[:, None], axis=0)[None, :]*b_hat[:, None]
def magnetic_sld(qx, qy, up_angle, up_phi, rho, rho_m):
"""
Compute the complex sld for the magnetic spin states.
Returns effective rho for spin states [dd, du, ud, uu].
"""
# Handle q=0 by setting px = py = 0
# Note: this is different from kernel_iq, which I(0,0) to 0
q_norm = 1/sqrt(qx**2 + qy**2) if qx != 0. or qy != 0. else 0.
cos_spin, sin_spin = cos(radians(up_angle)), sin(radians(up_angle))
cos_phi, sin_phi = cos(radians(up_phi)), sin(radians(up_phi))
M = rho_m
p_hat = np.array([sin_spin * cos_phi, sin_spin * sin_phi, cos_spin ])
q_hat = np.array([qx, qy, 0]) * q_norm
M_perp = orth(M,q_hat)
M_perpP = orth(M_perp, p_hat)
M_perpP_perpQ = orth(M_perpP, q_hat)
perpx = np.dot(p_hat, M_perp)
perpy = np.sqrt(np.sum(M_perpP_perpQ**2, axis=0))
perpz = np.dot(q_hat, M_perpP)
return [
rho - perpx, # dd => sld - D M_perpx
perpy - 1j * perpz, # du => -D (M_perpy + j M_perpz)
perpy + 1j * perpz, # ud => -D (M_perpy - j M_perpz)
rho + perpx, # uu => sld + D M_perpx
]
def calc_Iq_magnetic(qx, qy, rho, rho_m, points, volume=1.0, view=(0, 0, 0),
up_frac_i=0.5, up_frac_f=0.5, up_angle=0., up_phi=0.):
"""
*qx*, *qy* correspond to the detector pixels at which to calculate the
scattering, relative to the beam along the negative z axis.
*points* are three columns (x, y, z), one for each sample in the shape.
*rho* (1e-6/Ang) is the scattering length density of each point.
*rho_m* (1e-6/Ang) are the (mx, my, mz) components of the magnetic
scattering length density for each point.
*volume* should be 1/number_density. That is, each of n particles in the
total value represents volume/n contribution to the scattering.
*view* rotates the points about the axes using Euler angles for pitch
yaw and roll for a beam travelling along the negative z axis.
*up_frac_i* is the portion of polarizer neutrons which are spin up.
*up_frac_f* is the portion of analyzer neutrons which are spin up.
*up_angle* and *up_phi* are the rotation angle of the spin up direction
in the detector plane and the inclination from the beam direction (z-axis).
*dtype* is the numerical precision of the calculation. [not implemented]
"""
# TODO: maybe slightly faster to rotate points and rho_m, and drop qc*z
qx, qy = np.broadcast_arrays(qx, qy)
qa, qb, qc = invert_view(qx, qy, view)
rho, volume = np.broadcast_arrays(rho, volume)
x, y, z = points.T
weights = spin_weights(up_frac_i, up_frac_f)
# I(q) = |sum V(r) rho(r) e^(1j q.r)|^2 / sum V(r)
shape = qx.shape
Iq = np.zeros(qx.size, 'd')
x, y, z, qx, qy = (np.asarray(v, 'd') for v in (x, y, z, qx, qy))
qx, qy = (v.flatten() for v in (qx, qy))
for k in range(qx.size):
ephase = volume*np.exp(1j*(qa[k]*x + qb[k]*y + qc[k]*z))
dd, du, ud, uu = magnetic_sld(qx[k], qy[k], up_angle, up_phi, rho, rho_m)
for w, xs in zip(weights, (dd, du, ud, uu)):
if w == 0.0:
continue
Iq[k] += w * abs(np.sum(xs*ephase))**2
# The scale factor 1e-4 is due to the conversion from rho = 1e-6 squared
# times the conversion of 1e-8 from inverse angstroms to inverse cm.
return np.asarray(Iq).reshape(shape) * (1e-4 / np.sum(volume))
def _calc_Pr_nonuniform(r, rho, points, volume):
# Make Pr a little be bigger than necessary so that only distances
# min < d < max end up in Pr
n_max = len(r)+1
extended_Pr = np.zeros(n_max+1, 'd')
# r refers to bin centers; find corresponding bin edges
bins = bin_edges(r)
t_next = timer() + 3
for k, rho_k in enumerate(rho[:-1]):
distance = np.linalg.norm(points[k] - points[k+1:], axis=1)
weights = (rho_k * volume[k]) * (rho[k+1:] * volume[k+1:])
#weights = (rho_k * volume[k]) * rho[k+1:]
index = np.searchsorted(bins, distance)
# Note: indices may be duplicated, so "Pr[index] += w" will not work!!
extended_Pr += np.bincount(index, weights, n_max+1)
t = timer()
if t > t_next:
t_next = t + 3
print("processing %d of %d"%(k, len(rho)-1))
Pr = extended_Pr[1:-1]
return Pr
def _calc_Pr_uniform(r, rho, points, volume):
# Make Pr a little be bigger than necessary so that only distances
# min < d < max end up in Pr
dr, n_max = r[0], len(r)
extended_Pr = np.zeros(n_max+1, 'd')
t0 = timer()
t_next = t0 + 3
for k, rho_k in enumerate(rho[:-1]):
distance = np.linalg.norm(points[k] - points[k+1:], axis=1)
#weights = (rho_k * volume[k]) * (rho[k+1:] * volume[k+1:])
weights = rho_k * rho[k+1:] * (volume[k] + volume[k+1:])
index = np.minimum(np.asarray(distance/dr, 'i'), n_max)
# Note: indices may be duplicated, so "Pr[index] += w" will not work!!
extended_Pr += np.bincount(index, weights, n_max+1)
t = timer()
if t > t_next:
t_next = t + 3
print("processing %d of %d"%(k, len(rho)-1))
#print("time py:", timer() - t0)
Pr = extended_Pr[:-1]
#print("vol", np.sum(volume))
return Pr*1e-4
# Can get an additional 2x by going to C. Cuda/OpenCL will allow even
# more speedup, though still bounded by the O(n^2) cost.
"""
void pdfcalc(int n, const double *pts, const double *rho,
int nPr, double *Pr, double rstep)
{
int i,j;
for (i=0; i<n-2; i++) {
for (j=i+1; j<=n-1; j++) {
const double dxx=pts[3*i]-pts[3*j];
const double dyy=pts[3*i+1]-pts[3*j+1];
const double dzz=pts[3*i+2]-pts[3*j+2];
const double d=sqrt(dxx*dxx+dyy*dyy+dzz*dzz);
const int k=rint(d/rstep);
if (k < nPr) Pr[k]+=rho[i]*rho[j];
}
}
}
"""
if USE_NUMBA:
# Override simple numpy solution with numba if available
#@njit("f8[:](f8[::1], f8[::1], f8[::1,:], f8[:])", parallel=True, fastmath=True)
@njit(parallel=True, fastmath=True)
def _calc_Pr_uniform(r, rho, points, volume):
dr = r[0]
n_max = len(r)
Pr = np.zeros_like(r)
for j in prange(len(rho) - 1):
x, y, z = points[j, 0], points[j, 1], points[j, 2]
rho_j, volume_j = rho[j], volume[j]
for k in range(j+1, len(rho)):
distance = sqrt((x - points[k, 0])**2
+ (y - points[k, 1])**2
+ (z - points[k, 2])**2)
index = int(distance/dr)
if index < n_max:
Pr[index] += rho_j*rho[k]*(volume_j + volume[k])
return Pr
def calc_Pr(r, rho, points, volume):
# P(r) with uniform steps in r is 3x faster; check if we are uniform
# before continuing
r, points = [np.asarray(v, 'd') for v in (r, points)]
npoints = points.shape[0]
rho = np.broadcast_to(np.asarray(rho, 'd'), npoints)
volume = np.broadcast_to(np.asarray(volume, 'd'), npoints)
if np.max(np.abs(np.diff(r) - r[0])) > r[0]*0.01:
Pr = _calc_Pr_nonuniform(r, rho, points, volume)
else:
Pr = _calc_Pr_uniform(r, rho, points, volume)
# Note: 1e-4 because (1e-6 rho)^2 = 1e-12 rho^2 time 1e-8 for 1/A to 1/cm
return Pr * 1e-4
def r_bins(q, r_max=None, r_step=None, over_sampling=1):
if r_max is None:
r_max = 2 * pi / q[0]
if r_step is None:
r_step = 2 * pi / q[-1] / over_sampling
return np.arange(r_step, r_max, r_step)
def j0(x):
# use q/pi since np.sinc = sin(pi x)/(pi x)
return np.sinc(x/np.pi)
def calc_Iq_from_Pr(q, r, Pr):
Iq = np.array([simps(Pr * j0(qk*r), r) for qk in q])
#Iq = np.array([np.trapz(Pr * j0(qk*r), r) for qk in q])
#Iq /= Iq[0]
return Iq
def _calc_Iq_avg(Iq, q, r, sld, volume):
weight = sld * volume
for i, qi in enumerate(q):
Fq = np.sum(weight * np.sinc((qi/np.pi)*r))
Iq[i] = Fq**2
if USE_NUMBA:
#sig = njit('(f8[:], f8[:], f8[:], f8[:], f8[:])', parallel=True, fastmath=True)
sig = njit(parallel=True, fastmath=True)
_calc_Iq_avg = sig(_calc_Iq_avg)
def calc_Iq_avg(q, rho, points, volume=1.0):
# Centralize the data
center = 0.5*(np.min(points, axis=0, keepdims=True)
+ np.max(points, axis=0, keepdims=True))
points = points - center
# Find distance from center
r = np.linalg.norm(points, axis=1)
# Call calculator
Iq = np.empty_like(q)
rho = np.broadcast_to(np.asarray(rho, 'd'), points.shape[:1])
volume = np.broadcast_to(np.asarray(volume, 'd'), points.shape[:1])
_calc_Iq_avg(Iq, q, r, rho, volume)
return Iq * (1e-4/np.sum(volume))
# NOTE: copied from sasmodels/resolution.py
def bin_edges(x):
"""
Determine bin edges from bin centers, assuming that edges are centered
between the bins.
Note: this uses the arithmetic mean, which may not be appropriate for
log-scaled data.
"""
if len(x) < 2 or (np.diff(x) < 0).any():
raise ValueError("Expected bins to be an increasing set")
edges = np.hstack([
x[0] - 0.5*(x[1] - x[0]), # first point minus half first interval
0.5*(x[1:] + x[:-1]), # mid points of all central intervals
x[-1] + 0.5*(x[-1] - x[-2]), # last point plus half last interval
])
return edges
# -------------- plotters ----------------
def plot_calc(r, Pr, q, Iq, theory=None, title=None, Iq_avg=None):
import matplotlib.pyplot as plt
plt.subplot(211)
plt.plot(r, Pr, '-', label="Pr")
plt.xlabel('r (A)')
plt.ylabel('Pr (1/A^2)')
if title is not None:
plt.title(title)
plt.grid(True)
plt.subplot(212)
plt.loglog(q, Iq, '-', label='from Pr')
#plt.loglog(q, Iq/theory[1], '-', label='Pr/theory')
if Iq_avg is not None:
plt.loglog(q, Iq_avg, '-', label='from Iq_avg')
plt.xlabel('q (1/A)')
plt.ylabel('Iq')
plt.grid(True)
if theory is not None:
#plt.loglog(theory[0], theory[1]/theory[1][0], '-', label='analytic')
plt.loglog(theory[0], theory[1], '-', label='analytic')
plt.legend()
def plot_calc_2d(qx, qy, Iqxy, theory=None, title=None):
import matplotlib.pyplot as plt
qx, qy = bin_edges(qx), bin_edges(qy)
#qx, qy = np.meshgrid(qx, qy)
if theory is not None:
plt.subplot(131)
#plt.pcolor(qx, qy, np.log10(Iqxy))
extent = [qx[0], qx[-1], qy[0], qy[-1]]
plt.imshow(np.log10(Iqxy), extent=extent, interpolation="nearest",
origin='lower')
plt.colorbar()
plt.xlabel('qx (1/A)')
plt.ylabel('qy (1/A)')
plt.axis('equal')
plt.axis(extent)
#plt.grid(True)
if title is not None:
plt.title(title)
if theory is not None:
plt.subplot(132)
# Skip bad values in theory
index = np.isnan(theory)
theory[index] = Iqxy[index]
plt.imshow(np.log10(theory), extent=extent, interpolation="nearest",
origin='lower')
plt.title("theory")
plt.colorbar()
plt.axis('equal')
plt.axis(extent)
plt.xlabel('qx (1/A)')
if theory is not None:
plt.subplot(133)
rel = (theory-Iqxy)/theory
plt.imshow(rel, extent=extent, interpolation="nearest", origin='lower')
plt.colorbar()
plt.axis('equal')
plt.axis(extent)
plt.xlabel('qx (1/A)')
plt.title('max rel. err=%g' % np.max(abs(rel)))
def plot_points(rho, points):
import mpl_toolkits.mplot3d
import matplotlib.pyplot as plt
ax = plt.axes(projection='3d')
try:
ax.axis('square')
except Exception:
pass
n = len(points)
#print("len points", n)
index = np.random.choice(n, size=500) if n > 500 else slice(None, None)
ax.scatter(points[index, 0], points[index, 1], points[index, 2], c=rho[index])
# make square axes
minmax = np.array([points.min(), points.max()])
ax.scatter(minmax, minmax, minmax, c='w')
#low, high = points.min(axis=0), points.max(axis=0)
#ax.axis([low[0], high[0], low[1], high[1], low[2], high[2]])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.autoscale(True)
# ----------- Analytic models --------------
def sas_sinx_x(x):
with np.errstate(all='ignore'):
retvalue = sin(x)/x
retvalue[x == 0.] = 1.
return retvalue
def sas_2J1x_x(x):
with np.errstate(all='ignore'):
retvalue = 2*J1(x)/x
retvalue[x == 0] = 1.
return retvalue
def sas_3j1x_x(x):
"""return 3*j1(x)/x"""
with np.errstate(all='ignore'):
retvalue = 3*(sin(x) - x*
|
cos(x)
|
numpy.cos
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from scipy.stats import multivariate_normal
from tqdm import tqdm
class GMM:
def __init__(self, x):
self.x = x
self.pts = x.shape[0]
self.k, self.w, self.pi, self.mu, self.sigma = None, None, None, None, None
# TODO:应当更新w, pi, mu, sigma的写法,使之为更迅速的矩阵乘法
def update_w(self, k, mu, sigma, pi):
w = np.zeros((self.x.shape[0], k))
for j in range(k):
w[:, j] = pi[j] * multivariate_normal.pdf(
self.x,
mu[j],
np.diag([sigma[j, _, _] for _ in range(sigma.shape[-1])])
)
# 注意这里的sigma在写代码时应该用var代替,否则难以满足其正定条件
# print(np.sum(w, axis=1).reshape(-1, 1))
w /= np.sum(w, axis=1).reshape(-1, 1)
return w
def update_pi(self, k, w):
pi = np.zeros(k)
pi += np.sum(w, axis=0)
pi /= np.sum(w)
return pi
def update_mu(self, k, w):
mu = np.zeros((k, self.x.shape[-1]))
for j in range(k):
mu[j] += np.dot(w[:, j].T, self.x) # [1, n] * [n, 2]
mu /= np.sum(w, axis=0).reshape(-1, 1)
return mu
def update_sigma(self, k, w, mu):
sigma =
|
np.zeros((k, self.x.shape[-1], self.x.shape[-1]))
|
numpy.zeros
|
""""
Library of different error metrics like mean squared error, KL-divergence, etc.
Used to compute the reconstruction error of the autoencoder
"""
import os
import numpy as np
from src.preprocessing import heartbeat_split
import random
import matplotlib.pyplot as plt
from scipy import signal
from scipy.stats import entropy, wasserstein_distance
from scipy.spatial.distance import jensenshannon
from src.utils.plotting_utils import set_font_size
from src.utils.dsp_utils import get_windowed_time
from src.utils.file_indexer import get_patient_ids
import sys
import logging
def mean_squared_error(reduced_dimensions, model_name, patient_num, save_errors=False):
"""
Computes the mean squared error of the reconstructed signal against the original signal for each lead for each of the patient_num
Each signal's dimensions are reduced from 100 to 'reduced_dimensions', then reconstructed to obtain the reconstructed signal
** Requires intermediate data for the model and patient that this computes the MSE for **
:param reduced_dimensions: [int] number of dimensions the file was originally reduced to
:param model_name: [str] "lstm, vae, ae, pca, test"
:return: [dict(int -> list(np.array))] dictionary of patient_index -> length n array of MSE for each heartbeat (i.e. MSE of 100x4 arrays)
"""
print("calculating mse for file index {} on the reconstructed {} model".format(patient_num, model_name))
original_signals = np.load(
os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx{}.npy".format(str(patient_num))))
reconstructed_signals = load_reconstructed_heartbeats(model_name, patient_num)
# compute mean squared error for each heartbeat
if original_signals.shape != reconstructed_signals.shape:
logging.exception(
f"original signals length of {original_signals.shape[0]} is not equal to reconstructed signal length of {reconstructed_signals.shape[0]}")
sys.exit(1)
mse = np.zeros(np.shape(original_signals)[0])
for i in range(np.shape(original_signals)[0]):
mse[i] = (np.linalg.norm(original_signals[i, :, :] - reconstructed_signals[i, :, :]) ** 2) / (
np.linalg.norm(original_signals[i, :, :]) ** 2)
if save_errors:
np.save(
os.path.join("Working_Data", "{}_errors_{}d_Idx{}.npy".format(model_name, reduced_dimensions, patient_num)),
mse)
return mse
def mean_squared_error_timedelay(reduced_dimensions, model_name, patient_num, save_errors=False):
"""
Computes the mean squared error of the reconstructed signal against the original signal for each lead for each of the patient_num
Each signal's dimensions are reduced from 100 to 'reduced_dimensions', then reconstructed to obtain the reconstructed signal
** Requires intermediate data for the model and patient that this computes the MSE for, including
reconstructions for three iterations of the model **
:param reduced_dimensions: [int] number of dimensions the file was originally reduced to
:param model_name: [str] "lstm, vae, ae, pca, test"
:return: [dict(int -> list(np.array))] dictionary of patient_index -> length n array of MSE for each heartbeat (i.e. MSE of 100x4 arrays)
"""
print("calculating mse time delay for file index {} on the reconstructed {} model".format(patient_num, model_name))
original_signals = np.load(
os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx{}.npy".format(str(patient_num))))
reconstructed_signals = load_and_concatenate_reconstructed_heartbeats(model_name, patient_num)
original_signals = original_signals[-np.shape(reconstructed_signals)[0]:, :, :]
# compute mean squared error for each heartbeat
mse = np.zeros(np.shape(original_signals)[0])
for i in range(np.shape(original_signals)[0]):
mse[i] = (np.linalg.norm(original_signals[i, :, :] - reconstructed_signals[i, :, :]) ** 2) / (
np.linalg.norm(original_signals[i, :, :]) ** 2)
if save_errors:
np.save(
os.path.join("Working_Data", "{}_errors_{}d_Idx{}.npy".format(model_name, reduced_dimensions, patient_num)),
mse)
return mse
def kl_divergence(reduced_dimensions, model_name, patient_num, save_errors=False):
"""
Computes the KL-Divergence between original and reconstructed data (absolute val + normalized to make a valid dist.)
** Requires intermediate data for the model and patient that this computes the MSE for **
:param reduced_dimensions: [int] number of dimensions the file was originally reduced to
:param model_name: [str] "lstm, vae, ae, pca, test"
:return: [dict(int -> list(np.array))] dictionary of patient_index -> length n array of MSE for each heartbeat (i.e. MSE of 100x4 arrays)
"""
print("calculating KL div. for file index {} on the reconstructed {} model".format(patient_num, model_name))
original_signals = np.load(
os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx{}.npy".format(str(patient_num))))
reconstructed_signals = load_reconstructed_heartbeats(model_name, patient_num)
if original_signals.shape != reconstructed_signals.shape:
original_signals = original_signals[-reconstructed_signals.shape[0]:, :, :]
# logging.exception(f"original signals length of {original_signals.shape[0]} is not equal to reconstructed signal length of {reconstructed_signals.shape[0]}")
# sys.exit(1)
# print(original_signals.shape)
# print(reconstructed_signals.shape)
kld = entropy(abs(reconstructed_signals), abs(original_signals), axis=1)
kld =
|
np.mean(kld, axis=1)
|
numpy.mean
|
#! /usr/bin/env python
"""Phase contraint overlap tool. This tool calculates the minimum and maximum phase of
the primary or secondary transit (by default, primary) based on parameters provided by the user.
Authors:
<NAME>, 2018
<NAME>, 2018
<NAME>, 2020
Usage:
calculate_constraint <target_name> [--t0=<t0>] [--period=<p>] [--pre_duration=<pre_duration>] [--transit_duration=<trans_dur>] [--window_size=<win_size>] [--secondary] [--eccentricity=<ecc>] [--omega=<omega>] [--inclination=<inc>] [--winn_approx] [--get_secondary_time]
Arguments:
<target_name> Name of target
Options:
-h --help Show this screen.
--version Show version.
--t0=<t0> The starting time of the transit in BJD or HJD. Only useful if user wants to have the time-of-secondary eclipse returned.
--period=<p> The period of the transit in days.
--pre_duration=<pre_duration> The duration of observations *before* transit/eclipse mid-time in hours.
--transit_duration=<trans_dur> The duration of the transit in hours.
--window_size=<win_size> The window size of the transit in hours [default: 1.0]
--secondary If active, calculate phases for secondary eclipses (user needs to supply eccentricity, omega and inclination).
--eccentricity=<ecc> The eccentricity of the orbit (needed for secondary eclipse constraints).
--omega=<omega> The argument of periastron passage (needed for secondary eclipse constraints).
--inclination=<inc> The inclination of the orbit (needed for secondary eclipse constraints).
--winn_approx If active, instead of running the whole Kepler equation calculation, time of secondary eclipse is calculated using eq. (6) in Winn (2010; https://arxiv.org/abs/1001.2010v5)
--get_secondary_time If active, calculation also returns time-of-secondary eclipse. Needs t0 as input.
"""
import math
import os
import argparse
from docopt import docopt
import numpy as np
import requests
import urllib
from scipy import optimize
from astropy.time import Time
from exoctk.utils import get_target_data
def calculate_phase(period, pre_duration, window_size, t0=None, ecc=None, omega=None, inc=None, secondary=False, winn_approx=False, get_secondary_time=False):
''' Function to calculate the min and max phase.
Parameters
----------
period : float
The period of the transit in days.
pre_duration : float
The duration of observations *before* transit/eclipse mid-time in hours.
window_size : float
The window size of transit in hours. Default is 1 hour.
t0 : float
The time of (primary) transit center (only needed if get_secondary_time is True).
ecc : float
The eccentricity of the orbit (only needed for secondary eclipses).
omega : float
The argument of periastron passage, in degrees (only needed for secondary eclipses).
inc : float
The inclination of the orbit, in degrees (only needed for secondary eclipses).
secondary : boolean
If True, calculation will be done for secondary eclipses.
winn_approx : boolean
If True, secondary eclipse calculation will use the Winn (2010) approximation to estimate time
of secondary eclipse --- (only valid for not very eccentric and inclined orbits).
get_secondary_time : boolean
If True, return time of secondary eclipse along with the phase constraints.
Returns
-------
minphase : float
The minimum phase constraint.
maxphase : float
The maximum phase constraint. '''
if t0 is None:
if get_secondary_time:
raise Exception("Error: can't return time of secondary eclipse without a time-of-transit center.")
t0 = 1.
if not secondary:
minphase = 1.0 - ((pre_duration + window_size)/24./period)
maxphase = 1.0 - ((pre_duration)/24./period)
else:
deg_to_rad = (np.pi/180.)
# Calculate time of secondary eclipse:
tsec = calculate_tsec(period, ecc, omega*deg_to_rad, inc*deg_to_rad, t0=t0, winn_approximation=winn_approx)
# Calculate difference in phase-space between primary and secondary eclipse (note calculate_tsec ensures tsec is
# *the next* secondary eclipse after t0):
phase_diff = (tsec - t0)/period
# Estimate minphase and maxphase centered around this phase (thinking here is that, e.g., if phase_diff is 0.3
# then eclipse happens at 0.3 after 1 (being the latter by definition the time of primary eclipse --- i.e., transit).
# Because phase runs from 0 to 1, this implies eclipse happens at phase 0.3):
minphase = phase_diff - ((pre_duration + window_size)/24./period)
maxphase = phase_diff - ((pre_duration)/24./period)
# Wrap the phases around 0 and 1 in case limits blow in the previous calculation (unlikely, but user might be doing
# something crazy or orbit could be extremely weird such that this can reasonably happen in the future). Note this
# assumes -1 < minphase,maxphase < 2:
if minphase < 0:
minphase = 1. + minphase
if maxphase > 1:
maxphase = maxphase - 1.
if get_secondary_time:
return minphase, maxphase, tsec
return minphase, maxphase
def calculate_pre_duration(transitDur):
''' Function to calculate the pre-transit hours to be spent on target as recommended by the
Tdwell equation:
0.75 + Max(1hr,T14/2) (before transit) + T14 + Max(1hr, T14/2) (after transit) + 1hr (timing window)
The output is, thus, 0.75 + Max(1hr,T14/2) (before transit) + T14/2.
Parameters
----------
transitDur : float
The duration of the transit/eclipse in hours.
Returns
-------
pretransit_duration : float
The duration of the observation prior to transit/eclipse mid-time in hours. '''
pretransit_duration = 0.75 + np.max([1., transitDur/2.]) + transitDur/2.
return pretransit_duration
def drsky_2prime(x, ecc, omega, inc):
''' Second derivative of function drsky. This is the second derivative with respect to f of the drsky function.
Parameters
----------
x : float
True anomaly
ecc : float
Eccentricity of the orbit
omega : float
Argument of periastron passage (in radians)
inc : float
Inclination of the orbit (in radians)
Returns
-------
drsky_2prime : float
Function evaluated at x, ecc, omega, inc'''
sq_sini = np.sin(inc)**2
sin_o_p_f = np.sin(x+omega)
cos_o_p_f = np.cos(x+omega)
ecosf = ecc*np.cos(x)
esinf = ecc*np.sin(x)
f1 = esinf - esinf*sq_sini*(sin_o_p_f**2)
f2 = -sq_sini*(ecosf + 4.)*(sin_o_p_f*cos_o_p_f)
return f1+f2
def drsky_prime(x, ecc, omega, inc):
''' Derivative of function drsky. This is the first derivative with respect to f of the drsky function.
Parameters
----------
x : float
True anomaly
ecc : float
Eccentricity of the orbit
omega : float
Argument of periastron passage (in radians)
inc : float
Inclination of the orbit (in radians)
Returns
-------
drsky_prime : float
Function evaluated at x, ecc, omega, inc'''
sq_sini = np.sin(inc)**2
sin_o_p_f = np.sin(x+omega)
cos_o_p_f = np.cos(x+omega)
ecosf = ecc*np.cos(x)
esinf = ecc*np.sin(x)
f1 = (cos_o_p_f**2 - sin_o_p_f**2)*(sq_sini)*(1. + ecosf)
f2 = -ecosf*(1 - (sin_o_p_f**2)*(sq_sini))
f3 = esinf*sin_o_p_f*cos_o_p_f*sq_sini
return f1+f2+f3
def drsky(x, ecc, omega, inc):
''' Function whose roots we wish to find to obtain time of secondary (and primary) eclipse(s)
When one takes the derivative of equation (5) in Winn (2010; https://arxiv.org/abs/1001.2010v5), and equates that to zero (to find the
minimum/maximum of said function), one gets to an equation of the form g(x) = 0. This function (drsky) is g(x), where x is the true
anomaly.
Parameters
----------
x : float
True anomaly
ecc : float
Eccentricity of the orbit
omega : float
Argument of periastron passage (in radians)
inc : float
Inclination of the orbit (in radians)
Returns
-------
drsky : float
Function evaluated at x, ecc, omega, inc '''
sq_sini = np.sin(inc)**2
sin_o_p_f = np.sin(x+omega)
cos_o_p_f = np.cos(x+omega)
f1 = sin_o_p_f*cos_o_p_f*sq_sini*(1. + ecc*
|
np.cos(x)
|
numpy.cos
|
import pandas as pd
import numpy as np
import codecs
import time
from org.mk.training.dl.rnn import bidirectional_dynamic_rnn
from org.mk.training.dl.rnn import dynamic_rnn
from org.mk.training.dl.rnn import MultiRNNCell
from org.mk.training.dl.nn import embedding_lookup
from org.mk.training.dl.nn import TrainableVariable
from org.mk.training.dl.rnn_cell import LSTMCell
from org.mk.training.dl.core import Dense
from org.mk.training.dl.seq2seq import sequence_loss
from org.mk.training.dl.seq2seq import TrainingHelper, BasicDecoder, dynamic_decode
from org.mk.training.dl.attention import LuongAttention,AttentionWrapper
from org.mk.training.dl.common import WeightsInitializer
from org.mk.training.dl import init_ops
from org.mk.training.dl.optimizer import BatchGradientDescent
from org.mk.training.dl.nmt import print_gradients
from org.mk.training.dl.common import make_mask
from org.mk.training.dl.nmt import parse_arguments
from org.mk.training.dl.nmtdata import get_nmt_data
import org
#np.set_printoptions(threshold=np.nan)
init=np.array([[-2.0387042, -0.7570444, -1.549724, -0.55742437, -0.10309707, -0.2645374,
0.5542126, -0.9948135, -1.4004004, -0.2027762, 1.8161317, 0.02489787,
0.04653463, 0.30181375, -1.0206957, -0.4414572, -0.08976762, 0.86643434,
0.06023955, 0.50390786, -1.1679714, -0.31363872, -0.87805235, -3.808063,
-1.2836251, 0.1762668, -0.4557327, 1.1585172, -0.6317208, -0.7690312,
-1.1819371, -1.0957835, -1.0487816, 0.38563657, 0.7846264, -0.16195902,
2.9963484, -1.1604083, 2.127244, 1.0451506, 2.3449166, -1.11046 ],
[-1.3579369, 1.6391242, 0.51722956, -1.1566479, 0.5217864, 0.6738795,
1.4439716, -1.5845695, -0.5321513, -0.45986208, 0.95982075, -2.7541134,
0.04544061, -0.24134564, 0.01985956, -0.01174978, 0.21752118, -0.96756375,
-1.1478109, -1.4283063, 0.33229867, -2.06857, -1.0454241, -0.60130537,
1.1755886, 0.8240156, -1.4274453, 1.1680154, -1.4401436, 0.16765368,
1.2770568, -0.15272069, -0.70132256, -0.39191842, 0.14498521, 0.52371395,
-1.0711092, 0.7994564, -0.86202085, -0.08277576, 0.6717222, -0.30217007],
[ 1.1651239, 0.8676004, -0.7326845, 1.1329368, 0.33225664, 0.42479947,
2.442528, -0.24212709, -0.31981337, 0.7518857, 0.09940664, 0.733886,
0.16233322, -3.180123, -0.05459447, -1.0913122, 0.6260485, 1.3436326,
0.3324367, -0.4807606, 0.80269957, 0.80319524, -1.0442443, 0.78740156,
-0.40594986, 2.0595453, 0.95093924, 0.05337913, 0.70918155, 1.553336,
0.91851705, -0.79104966, 0.4592584, -1.0875456, 1.0102607, -1.0877079,
-0.61451066, -0.8137477, 0.19382478, -0.7905477, 1.157019, -0.21588814],
[-0.02875052, 1.3946419, 1.3715329, 0.03029069, 0.72896576, 1.556146,
0.62381554, 0.28502566, 0.42641425, -0.9238658, -1.3146611, 0.97760606,
-0.5422947, -0.66413164, -0.57151276, -0.52428764, -0.44747844, -0.07234555,
1.5007111, 0.6677294, -0.7949865, -1.1016922, 0.00385522, 0.2087736,
0.02533335, -0.15060721, 0.41100115, 0.04573904, 1.5402086, -0.5570146,
0.8980145, -1.0776126, 0.25556734, -1.0891188, -0.25838724, 0.28069794,
0.25003937, 0.47946456, -0.36741912, 0.8140413, 0.5821169, -1.8102683 ],
[ 1.4668883, -0.27569455, 0.19961897, 1.0866551, 0.10519085, 1.0896195,
-0.88432556, -0.45068273, 0.37042075, -0.10234109, -0.6915803, -1.1545025,
-0.4954256, -0.10934342, -0.2797343, 0.42959297, -0.6256306, -0.04518669,
-1.5740314, -0.7988373, -0.5571486, -1.4605384, 0.85387, -1.6822307,
0.72871834, 0.47308877, -1.3507669, -1.4545231, 1.1324743, -0.03236655,
0.6779119, 0.9597622, -1.3243811, -0.92739224, -0.18055117, 0.71914613,
0.5413713, -0.3229486, -1.7227241, -1.2969391, 0.27593264, 0.32810318]]
)
def process_encoding_input(target_data, word2int, batch_size):
print("target_data:", target_data)
print("batch_size:", batch_size)
decoding_input = np.concatenate((np.full((batch_size, 1), word2int['TOKEN_GO']), target_data[:, :-1]), 1)
print("decoding_input:", decoding_input)
return decoding_input
def get_rnn_cell(rnn_cell_size, dropout_prob,n_layers,debug):
rnn_cell=None
print("n_layers:",n_layers)
if(n_layers==1):
with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
rnn_cell = LSTMCell(rnn_cell_size,debug=debug)
else:
cell_list=[]
for i in range(n_layers):
with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
cell_list.append(LSTMCell(rnn_cell_size,debug=debug))
rnn_cell=MultiRNNCell(cell_list)
return rnn_cell
def encoding_layer(rnn_cell_size, sequence_len, n_layers, rnn_inputs, dropout_prob):
if(encoder_type=="bi" and n_layers%2 == 0):
n_bi_layer=int(n_layers/2)
encoding_output, encoding_state=bidirectional_dynamic_rnn(get_rnn_cell(rnn_cell_size, dr_prob,n_bi_layer,debug),get_rnn_cell(rnn_cell_size, dr_prob,n_bi_layer,debug), rnn_inputs)
print("encoding_state:",encoding_state)
if(n_bi_layer > 1):
#layers/2
"""
Forward-First(0)
((LSTMStateTuple({'c': array([[0.30450274, 0.30450274, 0.30450274, 0.30450274, 0.30450274]]),
'h': array([[0.16661529, 0.16661529, 0.16661529, 0.16661529, 0.16661529]])}),
Forward-Second(1)
LSTMStateTuple({'c': array([[0.27710986, 0.07844026, 0.18714019, 0.28426586, 0.28426586]]),
'h': array([[0.15019765, 0.04329417, 0.10251247, 0.1539225 , 0.1539225 ]])})),
Backward-First(0)
(LSTMStateTuple({'c': array([[0.30499766, 0.30499766, 0.30499766, 0.30499766, 0.30499766]]),
'h': array([[0.16688152, 0.16688152, 0.16688152, 0.16688152, 0.16688152]])}),
Backward-Second(1)
LSTMStateTuple({'c': array([[0.25328871, 0.17537864, 0.21700339, 0.25627687, 0.25627687]]),
'h': array([[0.13779658, 0.09631104, 0.11861721, 0.1393639 , 0.1393639 ]])})))
"""
encoder_state = []
for layer_id in range(n_bi_layer):
encoder_state.append(encoding_state[0][layer_id]) # forward
encoder_state.append(encoding_state[1][layer_id]) # backward
encoding_state = tuple(encoder_state)
"""
First(0)
((LSTMStateTuple({'c': array([[0.30450274, 0.30450274, 0.30450274, 0.30450274, 0.30450274]]),
'h': array([[0.16661529, 0.16661529, 0.16661529, 0.16661529, 0.16661529]])}),
Second(1)
LSTMStateTuple({'c': array([[0.30499766, 0.30499766, 0.30499766, 0.30499766, 0.30499766]]),
'h': array([[0.16688152, 0.16688152, 0.16688152, 0.16688152, 0.16688152]])})),
Third(2)
(LSTMStateTuple({'c': array([[0.27710986, 0.07844026, 0.18714019, 0.28426586, 0.28426586]]),
'h': array([[0.15019765, 0.04329417, 0.10251247, 0.1539225 , 0.1539225 ]])}),
Fourth(3)
LSTMStateTuple({'c': array([[0.25328871, 0.17537864, 0.21700339, 0.25627687, 0.25627687]]),
'h': array([[0.13779658, 0.09631104, 0.11861721, 0.1393639 , 0.1393639 ]])})))
"""
else:
encoding_output, encoding_state=dynamic_rnn(get_rnn_cell(rnn_cell_size, dr_prob,n_layers,debug), rnn_inputs)
return encoding_output, encoding_state
def create_attention(decoding_cell,encoding_op,encoding_st,fr_len):
if(args.attention_option is "Luong"):
with WeightsInitializer(initializer=init_ops.Constant(0.1)) as vs:
attention_mechanism = LuongAttention(hidden_size, encoding_op, fr_len)
decoding_cell = AttentionWrapper(decoding_cell,attention_mechanism,hidden_size)
attention_zero_state = decoding_cell.zero_state(batch_size)
attention_zero_state = attention_zero_state.clone(cell_state = encoding_st)
print("attentionstate0:",attention_zero_state)
return decoding_cell,attention_zero_state
def training_decoding_layer(decoding_embed_input, en_len, decoding_cell, encoding_op, encoding_st, op_layer,
v_size, fr_len, max_en_len):
if (args.attention_architecture is not None):
decoding_cell,encoding_st=create_attention(decoding_cell,encoding_op,encoding_st,fr_len)
helper = TrainingHelper(inputs=decoding_embed_input, sequence_length=en_len, time_major=False)
dec = BasicDecoder(decoding_cell, helper, encoding_st, op_layer)
logits, _= dynamic_decode(dec, output_time_major=False, impute_finished=True,
maximum_iterations=max_en_len)
return logits
def decoding_layer(decoding_embed_inp, embeddings, encoding_op, encoding_st, v_size, fr_len,
en_len, max_en_len, rnn_cell_size, word2int, dropout_prob, batch_size, n_layers):
out_l = Dense(len(en_word2int) + 1,kernel_initializer=init_ops.Constant(init))
logits_tr = training_decoding_layer(decoding_embed_inp,
en_len,
get_rnn_cell(rnn_cell_size, dr_prob,n_layers,debug),
encoding_op,
encoding_st,
out_l,
v_size,
fr_len,
max_en_len)
return logits_tr
def seq2seq_model(input_data, target_en_data, dropout_prob, fr_len, en_len, max_en_len,
v_size, rnn_cell_size, n_layers, word2int_en, batch_size):
#print("LookupTable.getInstance:")
lt_input=TrainableVariable.getInstance("input_word_embedding",fr_embeddings_matrix)
encoding_embed_input = embedding_lookup(lt_input, input_data)
#encoding_embed_input = embedding_lookup(fr_embeddings_matrix, input_data)
#print("encoding_embed_input:",encoding_embed_input,encoding_embed_input.shape)
encoding_op, encoding_st = encoding_layer(rnn_cell_size, fr_len, n_layers, encoding_embed_input,
dropout_prob)
print("encoding_st:",encoding_st,type(encoding_st))
decoding_input = process_encoding_input(target_en_data, word2int_en, batch_size)
decoding_embed_input = embedding_lookup(en_embeddings_matrix, decoding_input)
#print("decoding_embed_input:",decoding_embed_input)
#print("decoding_embed_input:",decoding_embed_input)
tr_logits = decoding_layer(decoding_embed_input,
en_embeddings_matrix,
encoding_op,
encoding_st,
v_size,
fr_len,
en_len,
max_en_len,
rnn_cell_size,
word2int_en,
dropout_prob,
batch_size,
n_layers)
return encoding_op, encoding_st, tr_logits
def pad_sentences(sentences_batch, word2int):
max_sentence = max([len(sentence) for sentence in sentences_batch])
return [sentence + [word2int['TOKEN_PAD']] * (max_sentence - len(sentence)) for sentence in sentences_batch]
def get_batches(en_text, fr_text, batch_size):
#for batch_idx in range(0, 1):
for batch_idx in range(0, len(fr_text) // batch_size):
start_idx = batch_idx * batch_size
en_batch = en_text[start_idx:start_idx + batch_size]
fr_batch = fr_text[start_idx:start_idx + batch_size]
pad_en_batch = np.array(pad_sentences(en_batch, en_word2int))
pad_fr_batch = np.array(pad_sentences(fr_batch, fr_word2int))
pad_en_lens = []
for en_b in pad_en_batch:
pad_en_lens.append(len(en_b))
pad_fr_lens = []
for fr_b in pad_fr_batch:
pad_fr_lens.append(len(fr_b))
print("pad_en_batch:", pad_en_batch)
print("pad_en_lens:", pad_en_lens)
print("pad_fr_batch:", pad_fr_batch)
print("pad_fr_lens:", pad_fr_lens)
yield pad_en_batch, pad_fr_batch, pad_en_lens, pad_fr_lens
epochs = 0
batch_size = 0
hidden_size = 0
n_layers = 0
n_bi_layer=0
lr = 0.0
dr_prob = 0.75
encoder_type=None
display_steps=0
projectdir="nmt_custom"
min_learning_rate = 0.0006
display_step = 20
stop_early_count = 0
stop_early_max_count = 3
per_epoch = 1
debug=False
display_steps=0
update_loss = 0
batch_loss = 0
summary_update_loss = []
rnn_fw=None
rnn_bw = None
decoding_cell = None
def set_modelparams(args):
global epochs,n_layers,encoder_type,hidden_size,batch_size,lr,rnn_fw,rnn_bw,decoding_cell,gdo,n_bi_layer,debug,per_epoch,logs_path,display_steps
epochs=args.epochs
n_layers=args.num_layers
encoder_type=args.encoder_type
hidden_size=args.num_units
batch_size = args.batch_size
lr = args.learning_rate
debug=args.debug
per_epoch=args.per_epoch
logs_path=args.out_dir
display_steps=args.display_steps
fr_embeddings_matrix,en_embeddings_matrix,fr_word2int,en_word2int,fr_filtered,en_filtered,args=get_nmt_data()
set_modelparams(args)
en_train = en_filtered[0:30000]
fr_train = fr_filtered[0:30000]
update_check = (len(fr_train) // batch_size // per_epoch) - 1
#out_l = Dense(len(en_word2int) + 1,kernel_initializer=init_ops.Constant(init))
for epoch_i in range(1, epochs + 1):
update_loss = 0
batch_loss = 0
for batch_i, (en_batch, fr_batch, en_text_len, fr_text_len) in enumerate(
get_batches(en_train, fr_train, batch_size)):
before = time.time()
encoding_optf, encoding_sttf ,logits_tr= seq2seq_model(fr_batch[:, ::-1], en_batch, dr_prob, fr_text_len, en_text_len,
|
np.amax(en_text_len)
|
numpy.amax
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2014-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`YuEtAl2013`, :class:`YuEtAl2013Tibet`,
:class:`YuEtAl2013Eastern`, :class:`YuEtAl2013Stable`
"""
import numpy as np
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
def gc(coeff, mag):
"""
Returns the set of coefficients to be used for the calculation of GM
as a function of earthquake magnitude
:param coeff:
A dictionary of parameters for the selected IMT
:param mag:
Magnitude value
:returns:
The set of coefficients
"""
if mag > 6.5:
a1ca = coeff['ua']
a1cb = coeff['ub']
a1cc = coeff['uc']
a1cd = coeff['ud']
a1ce = coeff['ue']
a2ca = coeff['ia']
a2cb = coeff['ib']
a2cc = coeff['ic']
a2cd = coeff['id']
a2ce = coeff['ie']
else:
a1ca = coeff['a']
a1cb = coeff['b']
a1cc = coeff['c']
a1cd = coeff['d']
a1ce = coeff['e']
a2ca = coeff['ma']
a2cb = coeff['mb']
a2cc = coeff['mc']
a2cd = coeff['md']
a2ce = coeff['me']
return a1ca, a1cb, a1cc, a1cd, a1ce, a2ca, a2cb, a2cc, a2cd, a2ce
def rbf(ra, coeff, mag):
"""
Calculate the median ground motion for a given magnitude and distance
:param ra:
Distance value [km]
:param coeff:
The set of coefficients
:param mag:
Magnitude value
:returns:
"""
a1ca, a1cb, a1cc, a1cd, a1ce, a2ca, a2cb, a2cc, a2cd, a2ce = gc(coeff, mag)
term1 = a1ca + a1cb * mag + a1cc * np.log(ra + a1cd*np.exp(a1ce*mag))
term2 = a2ca + a2cb * mag
term3 = a2cd*np.exp(a2ce*mag)
return np.exp((term1 - term2) / a2cc) - term3
def fnc(ra, *args):
"""
Function used in the minimisation problem.
:param ra:
Semi-axis of the ellipses used in the Yu et al.
:returns:
The absolute difference between the epicentral distance and the
adjusted distance
"""
#
# epicentral distance
repi = args[0]
#
# azimuth
theta = args[1]
#
# magnitude
mag = args[2]
#
# coefficients
coeff = args[3]
#
# compute the difference between epicentral distances
rb = rbf(ra, coeff, mag)
t1 = ra**2 * (np.sin(np.radians(theta)))**2
t2 = rb**2 * (np.cos(np.radians(theta)))**2
xx = ra * rb / (t1+t2)**0.5
return xx-repi
def get_ras(repi, theta, mag, coeff):
"""
Computes equivalent distance
:param repi:
Epicentral distance
:param theta:
Azimuth value
:param mag:
Magnitude
:param coeff:
GMPE coefficients
"""
rx = 150.
ras = 300.
dff = 1.e0
while abs(dff) > 1e-5:
#
# calculate the difference between epicentral distances
dff = fnc(ras, repi, theta, mag, coeff)
#
# update the value of distance computed
ras -= np.sign(dff) * rx
rx = rx / 2.
if rx < 1e-3:
break
return ras
class YuEtAl2013Ms(GMPE):
"""
Implements the Yu et al. (2013) GMPE used for the calculation of the 2015
version of the national seismic hazard maps for China. Note that magnitude
supported is Ms.
"""
#: Supported tectonic region type is active shallow crust
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are peak ground velocity and
#: peak ground acceleration
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
PGV,
SA
])
#: Supported intensity measure component is geometric mean (supposed)
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.AVERAGE_HORIZONTAL
#: Supported standard deviation types is total
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: No site parameters required
REQUIRES_SITES_PARAMETERS = set(())
#: Required rupture parameter is magnitude
REQUIRES_RUPTURE_PARAMETERS = set(('mag',))
#: Required distance measures are epicentral distance and azimuth
REQUIRES_DISTANCES = set(('repi', 'azimuth'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Check that the requested standard deviation type is available
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
#
# Set parameters
mag = rup.mag
epi = dists.repi
theta = dists.azimuth
#
# Set coefficients
coeff = self.COEFFS[imt]
a1ca, a1cb, a1cc, a1cd, a1ce, a2ca, a2cb, a2cc, a2cd, a2ce = \
gc(coeff, mag)
#
# Get correction coefficients. Here for each site we find the
# the geometry of the ellipses
ras = []
for epi, theta in zip(dists.repi, dists.azimuth):
res = get_ras(epi, theta, mag, coeff)
ras.append(res)
ras = np.array(ras)
rbs = rbf(ras, coeff, mag)
#
# Compute values of ground motion for the two cases. The value of
# 225 is hardcoded under the assumption that the hypocentral depth
# corresponds to 15 km (i.e. 15**2)
mean1 = (a1ca + a1cb * mag +
a1cc * np.log((ras**2+225)**0.5 +
a1cd * np.exp(a1ce * mag)))
mean2 = (a2ca + a2cb * mag +
a2cc * np.log((rbs**2+225)**0.5 +
a2cd * np.exp(a2ce * mag)))
#
# Get distances
x = (mean1 * np.sin(np.radians(dists.azimuth)))**2
y = (mean2 * np.cos(np.radians(dists.azimuth)))**2
mean = mean1 * mean2 / np.sqrt(x+y)
if isinstance(imt, (PGA)):
mean = np.exp(mean)/g/100
elif isinstance(imt, (PGV)):
mean = np.exp(mean)
else:
raise ValueError('Unsupported IMT')
#
# Get the standard deviation
stddevs = self._compute_std(coeff, stddev_types, len(dists.repi))
#
# Return results
return np.log(mean), stddevs
def _compute_std(self, C, stddev_types, num_sites):
return [np.ones(num_sites)*C['sigma']]
#: Coefficient table
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT a b c d e ua ub uc ud ue ma mb mc md me ia ib ic id ie sigma
PGA 4.1193 1.656 -2.389 1.772 0.424 7.8269 1.0856 -2.389 1.772 0.424 2.2609 1.6399 -2.118 0.825 0.465 6.003 1.0649 -2.118 0.825 0.465 0.5428
PGV -1.2581 1.932 -2.181 1.772 0.424 3.013 1.2742 -2.181 1.772 0.424 -3.1073 1.9389 -1.945 0.825 0.465 1.3087 1.2627 -1.945 0.825 0.465 0.6233
""")
class YuEtAl2013MsTibet(YuEtAl2013Ms):
#: Supported tectonic region type is Tibetan plateau
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Coefficient table
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT a b c d e ua ub uc ud ue ma mb mc md me ia ib ic id ie sigma
PGA 5.4901 1.4835 -2.416 2.647 0.366 8.7561 0.9453 -2.416 2.647 0.366 2.3069 1.4007 -1.854 0.612 0.457 5.6511 0.8924 -1.854 0.612 0.457 0.5428
PGV -0.1472 1.7618 -2.205 2.647 0.366 3.9422 1.1293 -2.205 2.647 0.366 -2.9923 1.7043 -1.696 0.612 0.457 1.0189 1.0902 -1.696 0.612 0.457 0.6233
""")
class YuEtAl2013MsEastern(YuEtAl2013Ms):
#: Supported tectonic region type is eastern part of China
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL
#: Coefficient table
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT a b c d e ua ub uc ud ue ma mb mc md me ia ib ic id ie sigma
PGA 4.5517 1.5433 -2.315 2.088 0.399 8.1259 0.9936 -2.315 2.088 0.399 2.7048 1.518 -2.004 0.944 0.447 6.3319 0.9614 -2.004 0.944 0.447 0.5428
PGV -0.8349 1.8193 -2.103 2.088 0.399 3.3051 1.1799 -2.103 2.088 0.399 -2.6381 1.8124 -1.825 0.944 0.447 1.6376 1.1546 -1.825 0.944 0.447 0.6233
""")
class YuEtAl2013MsStable(YuEtAl2013Ms):
#: Supported tectonic region type is stable part of China
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.STABLE_CONTINENTAL
#: Coefficient table
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT a b c d e ua ub uc ud ue ma mb mc md me ia ib ic id ie sigma
PGA 5.5591 1.1454 -2.079 2.802 0.295 8.5238 0.6854 -2.079 2.802 0.295 3.9445 1.0833 -1.723 1.295 0.331 6.187 0.7383 -1.723 1.295 0.331 0.5428
PGV 0.2139 1.4283 -1.889 2.802 0.295 3.772 0.8786 -1.889 2.802 0.295 -1.3547 1.3823 -1.559 1.295 0.331 1.5433 0.9361 -1.559 1.295 0.331 0.6233
""")
class YuEtAl2013Mw(YuEtAl2013Ms):
"""
This is a modified version of the original Yu et al. (2013) that supports
the use of Mw rather than Ms. The Mw to Ms conversion equation used is the
one proposed by Cheng et al. (2017). Note that this version does not
propagate the uncertainty related to the magnitude conversion process.
"""
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# Check that the requested standard deviation type is available
assert all(stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
for stddev_type in stddev_types)
#
# Set parameters
magn = rup.mag
epi = dists.repi
theta = dists.azimuth
#
# Convert Mw into Ms
if magn < 6.58:
mag = (magn - 0.59) / 0.86
else:
mag = (magn + 2.42) / 1.28
#
# Set coefficients
coeff = self.COEFFS[imt]
a1ca, a1cb, a1cc, a1cd, a1ce, a2ca, a2cb, a2cc, a2cd, a2ce = \
gc(coeff, mag)
#
# Get correction coefficients. Here for each site we find the
# the geometry of the ellipses
ras = []
for epi, theta in zip(dists.repi, dists.azimuth):
res = get_ras(epi, theta, mag, coeff)
ras.append(res)
ras = np.array(ras)
rbs = rbf(ras, coeff, mag)
#
# Compute values of ground motion for the two cases. The value of
# 225 is hardcoded under the assumption that the hypocentral depth
# corresponds to 15 km (i.e. 15**2)
mean1 = (a1ca + a1cb * mag +
a1cc * np.log((ras**2+225)**0.5 +
a1cd * np.exp(a1ce * mag)))
mean2 = (a2ca + a2cb * mag +
a2cc * np.log((rbs**2+225)**0.5 +
a2cd * np.exp(a2ce * mag)))
#
# Get distances
x = (mean1 * np.sin(np.radians(dists.azimuth)))**2
y = (mean2 * np.cos(
|
np.radians(dists.azimuth)
|
numpy.radians
|
import string
import torch
from net import RINet, RINet_attention
from database import evalDataset_kitti360, SigmoidDataset_kitti360, SigmoidDataset_train, SigmoidDataset_eval
import numpy as np
from torch.utils.data import DataLoader
from tqdm import tqdm
from sklearn import metrics
import os
import argparse
# from tensorboardX import SummaryWriter
from torch.utils.tensorboard.writer import SummaryWriter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def train(cfg):
writer = SummaryWriter()
net = RINet_attention()
net.to(device=device)
print(net)
sequs = cfg.all_seqs
sequs.remove(cfg.seq)
train_dataset = SigmoidDataset_train(sequs=sequs, neg_ratio=cfg.neg_ratio,
eva_ratio=cfg.eval_ratio, desc_folder=cfg.desc_folder, gt_folder=cfg.gt_folder)
test_dataset = SigmoidDataset_eval(sequs=sequs, neg_ratio=cfg.neg_ratio,
eva_ratio=cfg.eval_ratio, desc_folder=cfg.desc_folder, gt_folder=cfg.gt_folder)
# train_dataset=SigmoidDataset_kitti360(['0009','0003','0007','0002','0004','0006','0010'],1)
# test_dataset=evalDataset_kitti360('0005')
batch_size = cfg.batch_size
train_loader = DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True, num_workers=6)
test_loader = DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=6)
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, net.parameters(
)), lr=cfg.learning_rate, weight_decay=1e-6)
epoch = cfg.max_epoch
starting_epoch = 0
batch_num = 0
if not cfg.model == "":
checkpoint = torch.load(cfg.model)
starting_epoch = checkpoint['epoch']
batch_num = checkpoint['batch_num']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
for i in range(starting_epoch, epoch):
net.train()
pred = []
gt = []
for i_batch, sample_batch in tqdm(enumerate(train_loader), total=len(train_loader), desc='Train epoch '+str(i), leave=False):
optimizer.zero_grad()
out, diff = net(sample_batch["desc1"].to(
device=device), sample_batch["desc2"].to(device=device))
labels = sample_batch["label"].to(device=device)
loss1 = torch.nn.functional.binary_cross_entropy_with_logits(
out, labels)
loss2 = labels*diff*diff+(1-labels)*torch.nn.functional.relu(
cfg.margin-diff)*torch.nn.functional.relu(cfg.margin-diff)
loss2 = torch.mean(loss2)
loss = loss1+loss2
loss.backward()
optimizer.step()
with torch.no_grad():
writer.add_scalar(
'total loss', loss.cpu().item(), global_step=batch_num)
writer.add_scalar('loss1', loss1.cpu().item(),
global_step=batch_num)
writer.add_scalar('loss2', loss2.cpu().item(),
global_step=batch_num)
batch_num += 1
outlabel = out.cpu().numpy()
label = sample_batch['label'].cpu().numpy()
mask = (label > 0.9906840407) | (label < 0.0012710163)
label = label[mask]
label[label < 0.5] = 0
label[label > 0.5] = 1
pred.extend(outlabel[mask].tolist())
gt.extend(label.tolist())
pred = np.array(pred, dtype='float32')
pred =
|
np.nan_to_num(pred)
|
numpy.nan_to_num
|
""" A distributed version of the paraboloid model with an extra input that can be used to shift
each index.
This version is used for testing, so it will have different options.
"""
import numpy as np
import openmdao.api as om
from openmdao.utils.mpi import MPI
from openmdao.utils.array_utils import evenly_distrib_idxs
class DistParab(om.ExplicitComponent):
def initialize(self):
self.options.declare('arr_size', types=int, default=10,
desc="Size of input and output vectors.")
self.options.declare('deriv_type', default='dense',
values=['dense', 'fd', 'cs', 'sparse'],
desc="Method for computing derivatives.")
def setup(self):
arr_size = self.options['arr_size']
deriv_type = self.options['deriv_type']
comm = self.comm
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, arr_size)
start = offsets[rank]
io_size = sizes[rank]
self.offset = offsets[rank]
end = start + io_size
self.add_input('x', val=np.ones(io_size), distributed=True,
src_indices=np.arange(start, end, dtype=int))
self.add_input('y', val=np.ones(io_size), distributed=True,
src_indices=np.arange(start, end, dtype=int))
self.add_input('a', val=-3.0 * np.ones(io_size), distributed=True,
src_indices=np.arange(start, end, dtype=int))
self.add_output('f_xy', val=np.ones(io_size), distributed=True)
if deriv_type == 'dense':
self.declare_partials('f_xy', ['x', 'y', 'a'])
elif deriv_type == 'sparse':
row_col = np.arange(io_size)
self.declare_partials('f_xy', ['x', 'y', 'a'], rows=row_col, cols=row_col)
else:
self.declare_partials('f_xy', ['x', 'y', 'a'], method=deriv_type)
def compute(self, inputs, outputs):
x = inputs['x']
y = inputs['y']
a = inputs['a']
outputs['f_xy'] = (x + a)**2 + x * y + (y + 4.0)**2 - 3.0
def compute_partials(self, inputs, partials):
deriv_type = self.options['deriv_type']
x = inputs['x']
y = inputs['y']
a = inputs['a']
if deriv_type == 'dense':
partials['f_xy', 'x'] = np.diag(2.0 * x + 2.0 * a + y)
partials['f_xy', 'y'] = np.diag(2.0 * y + 8.0 + x)
partials['f_xy', 'a'] = np.diag(2.0 * a + 2.0 * x)
elif deriv_type == 'sparse':
partials['f_xy', 'x'] = 2.0 * x + 2.0 * a + y
partials['f_xy', 'y'] = 2.0 * y + 8.0 + x
partials['f_xy', 'a'] = 2.0 * a + 2.0 * x
# Simplified version for feature docs without the extra testing args.
class DistParabFeature(om.ExplicitComponent):
def initialize(self):
self.options.declare('arr_size', types=int, default=10,
desc="Size of input and output vectors.")
def setup(self):
arr_size = self.options['arr_size']
self.add_input('x', val=1., distributed=False,
shape=arr_size)
self.add_input('y', val=1., distributed=False,
shape=arr_size)
sizes, offsets = evenly_distrib_idxs(self.comm.size, arr_size)
self.start = offsets[self.comm.rank]
self.end = self.start + sizes[self.comm.rank]
self.a = -3.0 + 0.6 * np.arange(self.start,self.end)
self.add_output('f_xy', shape=len(self.a), distributed=True)
self.add_output('f_sum', shape=1, distributed=False)
self.declare_coloring(wrt='*', method='fd')
def compute(self, inputs, outputs):
x = inputs['x'][self.start:self.end]
y = inputs['y'][self.start:self.end]
outputs['f_xy'] = (x + self.a)**2 + x * y + (y + 4.0)**2 - 3.0
local_sum = np.sum(outputs['f_xy'])
global_sum = np.zeros(1)
self.comm.Allreduce(local_sum, global_sum, op=MPI.SUM)
outputs['f_sum'] = global_sum
class DistParabDeprecated(om.ExplicitComponent):
def initialize(self):
self.options.declare('arr_size', types=int, default=10,
desc="Size of input and output vectors.")
def setup(self):
arr_size = self.options['arr_size']
comm = self.comm
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, arr_size)
start = offsets[rank]
io_size = sizes[rank]
self.offset = offsets[rank]
end = start + io_size
self.add_input('x', val=np.ones(io_size), distributed=True,
src_indices=np.arange(start, end, dtype=int))
self.add_input('y', val=np.ones(io_size), distributed=True,
src_indices=np.arange(start, end, dtype=int))
self.add_input('offset', val=-3.0 * np.ones(io_size), distributed=True,
src_indices=np.arange(start, end, dtype=int))
self.add_output('f_xy', val=np.ones(io_size), distributed=True)
row_col =
|
np.arange(io_size)
|
numpy.arange
|
import pytest
import numpy as np
import pandas as pd
from ..utils import _check_random_state
from ..utils import _check_min_supp
from ..utils import _check_growth_rate
from ..utils import filter_maximal
from ..utils import filter_minimal
from ..utils import intersect2d
def test_check_random_state():
random_state = np.random.RandomState(18)
assert random_state == _check_random_state(random_state)
assert isinstance(_check_random_state(4), np.random.RandomState)
def test_check_random_state_error():
with pytest.raises(TypeError):
_check_random_state(object())
def test_wrong_minimum_supports():
wrong_values = [-1, -100, 2.33, 150.55]
for wrong_supp in wrong_values:
with pytest.raises(ValueError):
_check_min_supp(wrong_supp)
with pytest.raises(TypeError):
_check_min_supp("string minimum support")
with pytest.raises(ValueError):
_check_min_supp(12, accept_absolute=False)
def test_minimum_support():
assert _check_min_supp(0.1) == 0.1
assert _check_min_supp(10) == 10
def test_wrong_growth_rate():
for wrong_gr in [0.3, -10]:
with pytest.raises(ValueError):
_check_growth_rate(wrong_gr)
def test_growth_rate():
assert _check_growth_rate(1.5) == 1.5
def test_filter_max():
D = pd.Series(
[
{2, 3},
{2},
{4, 1},
{4, 7},
{4, 1, 8},
]
)
maximums = list(filter_maximal(D))
assert maximums == D.iloc[[0, 3, 4]].tolist()
def test_filter_min():
D = pd.Series(
[
{2, 3},
{2},
{4, 1},
{4, 7},
{4, 1, 8},
]
)
maximums = list(filter_minimal(D))
assert maximums == D.iloc[[1, 2, 3]].tolist()
def test_intersect2d():
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
b = [[1, 3, 5], [7, 1, 2], [4, 5, 6]]
ab, a_ind, b_ind = intersect2d(a, b)
np.testing.assert_array_equal(ab, np.array([a[1]]))
np.testing.assert_array_equal(a_ind,
|
np.array([1])
|
numpy.array
|
#
# Created by: <NAME>, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs(object):
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read().decode())
class TestSytrd(object):
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_sytrd_with_zero_dim_array(self, dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd = get_lapack_funcs('sytrd', (A,))
assert_raises(ValueError, sytrd, A)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('n', (1, 3))
def test_sytrd(self, dtype, n):
A = np.zeros((n, n), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(np.diag(A)))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = hetrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=real_dtype)
k = np.arange(A.shape[0], dtype=int)
T[k, k] = d
k2 = np.arange(A.shape[0]-1, dtype=int)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=complex_dtype)
for i in range(n-1):
v = np.zeros(n, dtype=complex_dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=complex_dtype) \
- tau[i] * np.outer(v, np.conj(v))
Q = np.dot(H, Q)
# Make matrix fully Hermitian
i_lower = np.tril_indices(n, -1)
A[i_lower] = np.conj(A.T[i_lower])
QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(
QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
)
def test_gglse():
# Example data taken from NAG manual
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s,d,c,z> gglse
func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
dtype=dtype)
lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
# For <s,d>gglse
if ind < 2:
a = np.array([[-0.57, -1.28, -0.39, 0.25],
[-1.93, 1.08, -0.31, -2.14],
[2.30, 0.24, 0.40, -0.35],
[-1.93, 0.64, -0.66, 0.08],
[0.15, 0.30, 0.15, -2.13],
[-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
d = np.array([0., 0.], dtype=dtype)
# For <s,d>gglse
else:
a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
[-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
[0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
[0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
[0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
[1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
c = np.array([[-2.54+0.09j],
[1.65-2.26j],
[-2.11-3.96j],
[1.82+3.30j],
[-6.41+3.77j],
[2.07+0.66j]])
d = np.zeros(2, dtype=dtype)
b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
_, _, _, result, _ = func(a, b, c, d, lwork=lwork)
if ind < 2:
expected = np.array([0.48904455,
0.99754786,
0.48904455,
0.99754786])
else:
expected = np.array([1.08742917-1.96205783j,
-0.74093902+3.72973919j,
1.08742917-1.96205759j,
-0.74093896+3.72973895j])
assert_array_almost_equal(result, expected, decimal=4)
def test_sycon_hecon():
seed(1234)
for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
# DTYPES + COMPLEX DTYPES = <s,d,c,z> sycon + <c,z>hecon
n = 10
# For <s,d,c,z>sycon
if ind < 4:
func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
A = (rand(n, n)).astype(dtype)
# For <c,z>hecon
else:
func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype)
funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype)
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
# Since sycon only refers to upper/lower part, conj() is safe here.
A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype)
anorm = norm(A, 1)
lwork = _compute_lwork(func_lwork, n)
ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1)
rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1)
# The error is at most 1-fold
assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1)
def test_sygst():
seed(1234)
for ind, dtype in enumerate(REAL_DTYPES):
# DTYPES = <s,d> sygst
n = 10
potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
'syevd', 'sygvd'),
dtype=dtype)
A = rand(n, n).astype(dtype)
A = (A + A.T)/2
# B must be positive definite
B = rand(n, n).astype(dtype)
B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (sygvd)
eig_gvd, _, info = sygvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = sygst(A, b)
assert_(info == 0)
eig, _, info = syevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_hegst():
seed(1234)
for ind, dtype in enumerate(COMPLEX_DTYPES):
# DTYPES = <c,z> hegst
n = 10
potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst',
'heevd', 'hegvd'),
dtype=dtype)
A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
A = (A + A.conj().T)/2
# B must be positive definite
B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype)
B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype)
# Perform eig (hegvd)
eig_gvd, _, info = hegvd(A, B)
assert_(info == 0)
# Convert to std problem potrf
b, info = potrf(B)
assert_(info == 0)
a, info = hegst(A, b)
assert_(info == 0)
eig, _, info = heevd(a)
assert_(info == 0)
assert_allclose(eig, eig_gvd, rtol=1e-4)
def test_tzrzf():
"""
This test performs an RZ decomposition in which an m x n upper trapezoidal
array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular
and Z is unitary.
"""
seed(1234)
m, n = 10, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork = _compute_lwork(tzrzf_lw, m, n)
if ind < 2:
A = triu(rand(m, n).astype(dtype))
else:
A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype))
# assert wrong shape arg, f2py returns generic error
assert_raises(Exception, tzrzf, A.T)
rz, tau, info = tzrzf(A, lwork=lwork)
# Check success
assert_(info == 0)
# Get Z manually for comparison
R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype)))
V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:]))
Id = np.eye(n, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)]
Z = reduce(np.dot, ref)
assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype),
atol=10*np.spacing(dtype(1.0).real), rtol=0.)
def test_tfsm():
"""
Test for solving a linear system with the coefficient matrix is a
triangular array stored in Full Packed (RFP) format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype)
trans = 'C'
else:
A = triu(rand(n, n) + eye(n)).astype(dtype)
trans = 'T'
trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'),
dtype=dtype)
Afp, _ = trttf(A)
B = rand(n, 2).astype(dtype)
soln = tfsm(-1, Afp, B)
assert_array_almost_equal(soln, solve(-A, B),
decimal=4 if ind % 2 == 0 else 6)
soln = tfsm(-1, Afp, B, trans=trans)
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Make A, unit diagonal
A[np.arange(n), np.arange(n)] = dtype(1.)
soln = tfsm(-1, Afp, B, trans=trans, diag='U')
assert_array_almost_equal(soln, solve(-A.conj().T, B),
decimal=4 if ind % 2 == 0 else 6)
# Change side
B2 = rand(3, n).astype(dtype)
soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R')
assert_array_almost_equal(soln, solve(-A, B2.T).conj().T,
decimal=4 if ind % 2 == 0 else 6)
def test_ormrz_unmrz():
"""
This test performs a matrix multiplication with an arbitrary m x n matric C
and a unitary matrix Q without explicitly forming the array. The array data
is encoded in the rectangular part of A which is obtained from ?TZRZF. Q
size is inferred by m, n, side keywords.
"""
seed(1234)
qm, qn, cn = 10, 15, 15
for ind, dtype in enumerate(DTYPES):
tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'),
dtype=dtype)
lwork_rz = _compute_lwork(tzrzf_lw, qm, qn)
if ind < 2:
A = triu(rand(qm, qn).astype(dtype))
C = rand(cn, cn).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'),
dtype=dtype)
else:
A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype))
C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype)
orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'),
dtype=dtype)
lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn)
rz, tau, info = tzrzf(A, lwork=lwork_rz)
# Get Q manually for comparison
V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:]))
Id = np.eye(qn, dtype=dtype)
ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)]
Q = reduce(np.dot, ref)
# Now that we have Q, we can test whether lapack results agree with
# each case of CQ, CQ^H, QC, and QC^H
trans = 'T' if ind < 2 else 'C'
tol = 10*np.spacing(dtype(1.0).real)
cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol,
rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.)
cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz)
assert_(info == 0)
assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol,
rtol=0.)
def test_tfttr_trttf():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transr = 'C'
else:
A_full = (rand(n, n)).astype(dtype)
transr = 'T'
trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype)
A_tf_U, info = trttf(A_full)
assert_(info == 0)
A_tf_L, info = trttf(A_full, uplo='L')
assert_(info == 0)
A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U')
assert_(info == 0)
A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L')
assert_(info == 0)
# Create the RFP array manually (n is even!)
A_tf_U_m = zeros((n+1, n//2), dtype=dtype)
A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:]
A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T
A_tf_L_m = zeros((n+1, n//2), dtype=dtype)
A_tf_L_m[1:, :] = tril(A_full)[:, :n//2]
A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T
assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_U_T,
A_tf_U_m.conj().T.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F'))
assert_array_almost_equal(A_tf_L_T,
A_tf_L_m.conj().T.reshape(-1, order='F'))
# Get the original array from RFP
A_tr_U, info = tfttr(n, A_tf_U)
assert_(info == 0)
A_tr_L, info = tfttr(n, A_tf_L, uplo='L')
assert_(info == 0)
A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U')
assert_(info == 0)
A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_U_T, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
assert_array_almost_equal(A_tr_L_T, tril(A_full))
def test_tpttr_trttp():
"""
Test conversion routines between the Rectengular Full Packed (RFP) format
and Standard Triangular Array (TR)
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A_full = (rand(n, n)).astype(dtype)
trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype)
A_tp_U, info = trttp(A_full)
assert_(info == 0)
A_tp_L, info = trttp(A_full, uplo='L')
assert_(info == 0)
# Create the TP array manually
inds = tril_indices(n)
A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_U_m[:] = (triu(A_full).T)[inds]
inds = triu_indices(n)
A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype)
A_tp_L_m[:] = (tril(A_full).T)[inds]
assert_array_almost_equal(A_tp_U, A_tp_U_m)
assert_array_almost_equal(A_tp_L, A_tp_L_m)
# Get the original array from TP
A_tr_U, info = tpttr(n, A_tp_U)
assert_(info == 0)
A_tr_L, info = tpttr(n, A_tp_L, uplo='L')
assert_(info == 0)
assert_array_almost_equal(A_tr_U, triu(A_full))
assert_array_almost_equal(A_tr_L, tril(A_full))
def test_pftrf():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
Achol_rfp, info = pftrf(n, Afp)
assert_(info == 0)
A_chol_r, _ = tfttr(n, Achol_rfp)
Achol = cholesky(A)
assert_array_almost_equal(A_chol_r, Achol)
def test_pftri():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array to find its inverse
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
A_inv_rfp, info = pftri(n, A_chol_rfp)
assert_(info == 0)
A_inv_r, _ = tfttr(n, A_inv_rfp)
Ainv = inv(A)
assert_array_almost_equal(A_inv_r, triu(Ainv),
decimal=4 if ind % 2 == 0 else 6)
def test_pftrs():
"""
Test Cholesky factorization of a positive definite Rectengular Full
Packed (RFP) format array and solve a linear system
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
B = ones((n, 3), dtype=dtype)
Bf1 = ones((n+2, 3), dtype=dtype)
Bf2 = ones((n-2, 3), dtype=dtype)
pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs',
'pftrf',
'trttf',
'tfttr'),
dtype=dtype)
# Get the original array from TP
Afp, info = trttf(A)
A_chol_rfp, info = pftrf(n, Afp)
# larger B arrays shouldn't segfault
soln, info = pftrs(n, A_chol_rfp, Bf1)
assert_(info == 0)
assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2)
soln, info = pftrs(n, A_chol_rfp, B)
assert_(info == 0)
assert_array_almost_equal(solve(A, B), soln,
decimal=4 if ind % 2 == 0 else 6)
def test_sfrk_hfrk():
"""
Test for performing a symmetric rank-k operation for matrix in RFP format.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
A = A + A.conj().T + n*eye(n)
else:
A = (rand(n, n)).astype(dtype)
A = A + A.T + n*eye(n)
prefix = 's'if ind < 2 else 'h'
trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', '{}frk'
''.format(prefix)),
dtype=dtype)
Afp, _ = trttf(A)
C = np.random.rand(n, 2).astype(dtype)
Afp_out = shfrk(n, 2, -1, C, 2, Afp)
A_out, _ = tfttr(n, Afp_out)
assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A),
decimal=4 if ind % 2 == 0 else 6)
def test_syconv():
"""
Test for going back and forth between the returned format of he/sytrf to
L and D factors/permutations.
"""
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 10
if ind > 1:
A = (randint(-30, 30, (n, n)) +
randint(-30, 30, (n, n))*1j).astype(dtype)
A = A + A.conj().T
else:
A = randint(-30, 30, (n, n)).astype(dtype)
A = A + A.T + n*eye(n)
tol = 100*np.spacing(dtype(1.0).real)
syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf',
'sytrf_lwork'), dtype=dtype)
lw = _compute_lwork(trf_lwork, n, lower=1)
L, D, perm = ldl(A, lower=1, hermitian=False)
lw = _compute_lwork(trf_lwork, n, lower=1)
ldu, ipiv, info = trf(A, lower=1, lwork=lw)
a, e, info = syconv(ldu, ipiv, lower=1)
assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.)
# Test also upper
U, D, perm = ldl(A, lower=0, hermitian=False)
ldu, ipiv, info = trf(A, lower=0)
a, e, info = syconv(ldu, ipiv, lower=0)
assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.)
class TestBlockedQR(object):
"""
Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt
and tpmqr.
"""
def test_geqrt_gemqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype)
a, t, info = geqrt(n, A)
assert(info == 0)
# Extract elementary reflectors from lower triangle, adding the
# main diagonal of ones.
v = np.tril(a, -1) + np.eye(n, dtype=dtype)
# Generate the block Householder transform I - VTV^H
Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj()
R = np.triu(a)
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol,
rtol=0.)
assert_allclose(Q @ R, A, atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, info = gemqrt(a, t, C, side=side, trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
qC = q @ C
else:
qC = C @ q
assert_allclose(c, qC, atol=tol, rtol=0.)
# Test default arguments
if (side, trans) == ('L', 'N'):
c_default, info = gemqrt(a, t, C)
assert(info == 0)
assert_equal(c_default, c)
# Test invalid side/trans
assert_raises(Exception, gemqrt, a, t, C, side='A')
assert_raises(Exception, gemqrt, a, t, C, trans='A')
def test_tpqrt_tpmqrt(self):
seed(1234)
for ind, dtype in enumerate(DTYPES):
n = 20
if ind > 1:
A = (rand(n, n) + rand(n, n)*1j).astype(dtype)
B = (rand(n, n) + rand(n, n)*1j).astype(dtype)
else:
A = (rand(n, n)).astype(dtype)
B = (rand(n, n)).astype(dtype)
tol = 100*np.spacing(dtype(1.0).real)
tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype)
# Test for the range of pentagonal B, from square to upper
# triangular
for l in (0, n // 2, n):
a, b, t, info = tpqrt(l, n, A, B)
assert(info == 0)
# Check that lower triangular part of A has not been modified
assert_equal(np.tril(a, -1), np.tril(A, -1))
# Check that elements not part of the pentagonal portion of B
# have not been modified.
assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1))
# Extract pentagonal portion of B
B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n)
# Generate elementary reflectors
v = np.concatenate((np.eye(n, dtype=dtype), b_pent))
# Generate the block Householder transform I - VTV^H
Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj()
R = np.concatenate((np.triu(a), np.zeros_like(a)))
# Test columns of Q are orthogonal
assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype),
atol=tol, rtol=0.)
assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)),
atol=tol, rtol=0.)
if ind > 1:
C = (rand(n, n) + rand(n, n)*1j).astype(dtype)
D = (rand(n, n) + rand(n, n)*1j).astype(dtype)
transpose = 'C'
else:
C = (rand(n, n)).astype(dtype)
D = (rand(n, n)).astype(dtype)
transpose = 'T'
for side in ('L', 'R'):
for trans in ('N', transpose):
c, d, info = tpmqrt(l, b, t, C, D, side=side,
trans=trans)
assert(info == 0)
if trans == transpose:
q = Q.T.conj()
else:
q = Q
if side == 'L':
cd = np.concatenate((c, d), axis=0)
CD = np.concatenate((C, D), axis=0)
qCD = q @ CD
else:
cd = np.concatenate((c, d), axis=1)
CD = np.concatenate((C, D), axis=1)
qCD = CD @ q
assert_allclose(cd, qCD, atol=tol, rtol=0.)
if (side, trans) == ('L', 'N'):
c_default, d_default, info = tpmqrt(l, b, t, C, D)
assert(info == 0)
assert_equal(c_default, c)
assert_equal(d_default, d)
# Test invalid side/trans
assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A')
assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A')
def test_pstrf():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstrf
n = 10
r = 2
pstrf = get_lapack_funcs('pstrf', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstrf(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the following assertion.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstrf(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_pstf2():
seed(1234)
for ind, dtype in enumerate(DTYPES):
# DTYPES = <s, d, c, z> pstf2
n = 10
r = 2
pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
# Create positive semidefinite A
if ind > 1:
A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
A = A @ A.conj().T
else:
A = rand(n, n-r).astype(dtype)
A = A @ A.T
c, piv, r_c, info = pstf2(A)
U = triu(c)
U[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# python-dbg 3.5.2 runs cause trouble with the commented assertions.
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
c, piv, r_c, info = pstf2(A, lower=1)
L = tril(c)
L[r_c - n:, r_c - n:] = 0.
assert_equal(info, 1)
# assert_equal(r_c, n - r)
single_atol = 1000 * np.finfo(np.float32).eps
double_atol = 1000 * np.finfo(np.float64).eps
atol = single_atol if ind in [0, 2] else double_atol
assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
def test_geequ():
desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
[1.0000, -0.5619, -1.0000, -1.0000],
[0.5874, -1.0000, -0.0596, -0.5341],
[-1.0000, -0.5946, -0.0294, 0.9957]])
desired_cplx = np.array([[-0.2816+0.5359*1j,
0.0812+0.9188*1j,
-0.7439-0.2561*1j],
[-0.3562-0.2954*1j,
0.9566-0.0434*1j,
-0.0174+0.1555*1j],
[0.8607+0.1393*1j,
-0.2759+0.7241*1j,
-0.1642-0.1365*1j]])
for ind, dtype in enumerate(DTYPES):
if ind < 2:
# Use examples from the NAG documentation
A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
[5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
[1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
[-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
A = A.astype(dtype)
else:
A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
[-1.70e+00, 3.31e+10, -0.15e+00],
[2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
[-1.41e+00, -0.15e+10, 1.34e+00],
[0.39e-10, 1.47e+00, -0.69e-10]])*1j
A = A.astype(dtype)
geequ = get_lapack_funcs('geequ', dtype=dtype)
r, c, rowcnd, colcnd, amax, info = geequ(A)
if ind < 2:
assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
else:
assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
rtol=0, atol=1e-4)
def test_syequb():
desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
for ind, dtype in enumerate(DTYPES):
A = np.eye(10, dtype=dtype)
alpha = dtype(1. if ind < 2 else 1.j)
d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
A += np.rot90(np.diag(d))
syequb = get_lapack_funcs('syequb', dtype=dtype)
s, scond, amax, info = syequb(A)
assert_equal(np.log2(s).astype(int), desired_log2s)
@pytest.mark.skipif(True,
reason="Failing on some OpenBLAS version, see gh-12276")
def test_heequb():
# zheequb has a bug for versions =< LAPACK 3.9.0
# See Reference-LAPACK gh-61 and gh-408
# Hence the zheequb test is customized accordingly to avoid
# work scaling.
A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
s, scond, amax, info = lapack.zheequb(A)
assert_equal(info, 0)
assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
A[5, 5] = 1024
A[5, 0] = 16j
s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
assert_equal(info, 0)
assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
def test_getc2_gesc2():
np.random.seed(42)
n = 10
desired_real = np.random.rand(n)
desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
for ind, dtype in enumerate(DTYPES):
if ind < 2:
A = np.random.rand(n, n)
A = A.astype(dtype)
b = A @ desired_real
b = b.astype(dtype)
else:
A = np.random.rand(n, n) + np.random.rand(n, n)*1j
A = A.astype(dtype)
b = A @ desired_cplx
b = b.astype(dtype)
getc2 = get_lapack_funcs('getc2', dtype=dtype)
gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
if ind < 2:
assert_array_almost_equal(desired_real.astype(dtype),
x/scale, decimal=4)
else:
assert_array_almost_equal(desired_cplx.astype(dtype),
x/scale, decimal=4)
@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R'
@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N'
@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N'
@pytest.mark.parametrize('jobr', [0, 1])
@pytest.mark.parametrize('jobp', [0, 1])
def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
"""Test the lapack routine ?gejsv.
This function tests that a singular value decomposition can be performed
on the random M-by-N matrix A. The test performs the SVD using ?gejsv
then performs the following checks:
* ?gejsv exist successfully (info == 0)
* The returned singular values are correct
* `A` can be reconstructed from `u`, `SIGMA`, `v`
* Ensure that u.T @ u is the identity matrix
* Ensure that v.T @ v is the identity matrix
* The reported matrix rank
* The reported number of singular values
* If denormalized floats are required
Notes
-----
joba specifies several choices effecting the calculation's accuracy
Although all arguments are tested, the tests only check that the correct
solution is returned - NOT that the prescribed actions are performed
internally.
jobt is, as of v3.9.0, still experimental and removed to cut down number of
test cases. However keyword itself is tested externally.
"""
seed(42)
# Define some constants for later use:
m, n = size
atol = 100 * np.finfo(dtype).eps
A = generate_random_dtype_array(size, dtype)
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# Set up checks for invalid job? combinations
# if an invalid combination occurs we set the appropriate
# exit status.
lsvec = jobu < 2 # Calculate left singular vectors
rsvec = jobv < 2 # Calculate right singular vectors
l2tran = (jobt == 1) and (m == n)
is_complex = np.iscomplexobj(A)
invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
# Set the exit status to the expected value.
# Here we only check for invalid combinations, not individual
# parameters.
if invalid_cplx_jobu:
exit_status = -2
elif invalid_real_jobv or invalid_cplx_jobv:
exit_status = -3
else:
exit_status = 0
if (jobu > 1) and (jobv == 1):
assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
else:
sva, u, v, work, iwork, info = gejsv(A,
joba=joba,
jobu=jobu,
jobv=jobv,
jobr=jobr,
jobt=jobt,
jobp=jobp)
# Check that ?gejsv exited successfully/as expected
assert_equal(info, exit_status)
# If exit_status is non-zero the combination of jobs is invalid.
# We test this above but no calculations are performed.
if not exit_status:
# Check the returned singular values
sigma = (work[0] / work[1]) * sva[:n]
assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
if jobu == 1:
# If JOBU = 'F', then u contains the M-by-M matrix of
# the left singular vectors, including an ONB of the orthogonal
# complement of the Range(A)
# However, to recalculate A we are concerned about the
# first n singular values and so can ignore the latter.
# TODO: Add a test for ONB?
u = u[:, :n]
if lsvec and rsvec:
assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
if lsvec:
assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
if rsvec:
assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
assert_equal(iwork[0], np.linalg.matrix_rank(A))
assert_equal(iwork[1], np.count_nonzero(sigma))
# iwork[2] is non-zero if requested accuracy is not warranted for
# the data. This should never occur for these tests.
assert_equal(iwork[2], 0)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_gejsv_edge_arguments(dtype):
"""Test edge arguments return expected status"""
gejsv = get_lapack_funcs('gejsv', dtype=dtype)
# scalar A
sva, u, v, work, iwork, info = gejsv(1.)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 1d A
A = np.ones((1,), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 1))
assert_equal(v.shape, (1, 1))
assert_equal(sva, np.array([1.], dtype=dtype))
# 2d empty A
A = np.ones((1, 0), dtype=dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_equal(info, 0)
assert_equal(u.shape, (1, 0))
assert_equal(v.shape, (1, 0))
assert_equal(sva, np.array([], dtype=dtype))
# make sure "overwrite_a" is respected - user reported in gh-13191
A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype)
A = np.asfortranarray(A + A.T) # make it symmetric and column major
Ac = A.copy('A')
_ = gejsv(A)
assert_allclose(A, Ac)
@pytest.mark.parametrize(('kwargs'),
({'joba': 9},
{'jobu': 9},
{'jobv': 9},
{'jobr': 9},
{'jobt': 9},
{'jobp': 9})
)
def test_gejsv_invalid_job_arguments(kwargs):
"""Test invalid job arguments raise an Exception"""
A = np.ones((2, 2), dtype=float)
gejsv = get_lapack_funcs('gejsv', dtype=float)
assert_raises(Exception, gejsv, A, **kwargs)
@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
[(np.array([[2.27, -1.54, 1.15, -1.94],
[0.28, -1.67, 0.94, -0.78],
[-0.48, -3.09, 0.99, -0.21],
[1.07, 1.22, 0.79, 0.63],
[-2.35, 2.93, -1.45, 2.30],
[0.62, -7.39, 1.03, -2.57]]),
np.array([9.9966, 3.6831, 1.3569, 0.5000]),
np.array([[0.2774, -0.6003, -0.1277, 0.1323],
[0.2020, -0.0301, 0.2805, 0.7034],
[0.2918, 0.3348, 0.6453, 0.1906],
[-0.0938, -0.3699, 0.6781, -0.5399],
[-0.4213, 0.5266, 0.0413, -0.0575],
[0.7816, 0.3353, -0.1645, -0.3957]]),
np.array([[0.1921, -0.8030, 0.0041, -0.5642],
[-0.8794, -0.3926, -0.0752, 0.2587],
[0.2140, -0.2980, 0.7827, 0.5027],
[-0.3795, 0.3351, 0.6178, -0.6017]]))])
def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
"""
This test implements the example found in the NAG manual, f08khf.
An example was not found for the complex case.
"""
# NAG manual provides accuracy up to 4 decimals
atol = 1e-4
gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
sva, u, v, work, iwork, info = gejsv(A)
assert_allclose(sva_expect, sva, atol=atol)
assert_allclose(u_expect, u, atol=atol)
assert_allclose(v_expect, v, atol=atol)
@pytest.mark.parametrize("dtype", DTYPES)
def test_gttrf_gttrs(dtype):
# The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
# tests that the output of ?gttrf define LU matricies, that input
# parameters are unmodified, transposal options function correctly, that
# incompatible matrix shapes raise an error, and singular matrices return
# non zero info.
seed(42)
n = 10
atol = 100 * np.finfo(dtype).eps
# create the matrix in accordance with the data type
du = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
dl = generate_random_dtype_array((n-1,), dtype=dtype)
diag_cpy = [dl.copy(), d.copy(), du.copy()]
A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
x = np.random.rand(n)
b = A @ x
gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
# test to assure that the inputs of ?gttrf are unmodified
assert_array_equal(dl, diag_cpy[0])
assert_array_equal(d, diag_cpy[1])
assert_array_equal(du, diag_cpy[2])
# generate L and U factors from ?gttrf return values
# L/U are lower/upper triangular by construction (initially and at end)
U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
L = np.eye(n, dtype=dtype)
for i, m in enumerate(_dl):
# L is given in a factored form.
# See
# www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
piv = ipiv[i] - 1
# right multiply by permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# right multiply by Li, rank-one modification of identity
L[:, i] += L[:, i+1]*m
# one last permutation
i, piv = -1, ipiv[-1] - 1
# right multiply by final permutation matrix
L[:, [i, piv]] = L[:, [piv, i]]
# check that the outputs of ?gttrf define an LU decomposition of A
assert_allclose(A, L @ U, atol=atol)
b_cpy = b.copy()
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
# test that the inputs of ?gttrs are unmodified
assert_array_equal(b, b_cpy)
# test that the result of ?gttrs matches the expected input
assert_allclose(x, x_gttrs, atol=atol)
# test that ?gttrf and ?gttrs work with transposal options
if dtype in REAL_DTYPES:
trans = "T"
b_trans = A.T @ x
else:
trans = "C"
b_trans = A.conj().T @ x
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
assert_allclose(x, x_gttrs, atol=atol)
# test that ValueError is raised with incompatible matrix shapes
with assert_raises(ValueError):
gttrf(dl[:-1], d, du)
with assert_raises(ValueError):
gttrf(dl, d[:-1], du)
with assert_raises(ValueError):
gttrf(dl, d, du[:-1])
# test that matrix of size n=2 raises exception
with assert_raises(Exception):
gttrf(dl[0], d[:1], du[0])
# test that singular (row of all zeroes) matrix fails via info
du[0] = 0
d[0] = 0
__dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
np.testing.assert_(__d[info - 1] == 0,
"?gttrf: _d[info-1] is {}, not the illegal value :0."
.format(__d[info - 1]))
@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([2.3, -5, -.9, 7.1]),
np.array([3.4, 3.6, 7, -6, -1.015373]),
np.array([-1, 1.9, 8]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.7, 6.6],
[-0.5, 10.8],
[2.6, -3.2],
[0.6, -11.2],
[2.7, 19.1]
]),
np.array([[-4, 5],
[7, -4],
[3, -3],
[-4, -2],
[-3, 1]])),
(
np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j,
-1.3 + 3.3j, - .3 + 4.3j,
-3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
# du exp
np.array([-1.3 + 1.3j, -1.3 + 3.3j,
-0.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
-1.3399 + 0.2875j]),
np.array([2 + 1j, -1 + 1j, 1 - 1j]),
np.array([2, 3, 4, 5, 5]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, - 6.9 - 5.3j],
[-14.7 + 9.7j, - 6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j],
[3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j],
[-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]])
)])
def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
du2_exp, ipiv_exp, b, x):
# test to assure that wrapper is consistent with NAG Library Manual Mark 26
# example problems: f07cdf and f07cef (real)
# examples: f07crf and f07csf (complex)
# (Links may expire, so search for "NAG Library Manual Mark 26" online)
gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
_dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
assert_allclose(du2, du2_exp)
assert_allclose(_du, du_exp)
assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals.
assert_allclose(ipiv, ipiv_exp)
x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
assert_allclose(x_gttrs, x)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
def test_geqrfp_lwork(dtype, shape):
geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrfp_lwork(m=m, n=n)
assert_equal(info, 0)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs(ddtype, dtype):
seed(42)
# set test tolerance appropriate for dtype
atol = 100*np.finfo(dtype).eps
# n is the length diagonal of A
n = 10
# create diagonals according to size and dtype
# diagonal d should always be real.
# add 4 to d so it will be dominant for all dtypes
d = generate_random_dtype_array((n,), ddtype) + 4
# diagonal e may be real or complex.
e = generate_random_dtype_array((n-1,), dtype)
# assemble diagonals together into matrix
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
# store a copy of diagonals to later verify
diag_cpy = [d.copy(), e.copy()]
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
_d, _e, info = pttrf(d, e)
# test to assure that the inputs of ?pttrf are unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_equal(info, 0, err_msg="pttrf: info = {}, should be 0".format(info))
# test that the factors from pttrf can be recombined to make A
L = np.diag(_e, -1) + np.diag(np.ones(n))
D = np.diag(_d)
assert_allclose(A, L@[email protected]().T, atol=atol)
# generate random solution x
x = generate_random_dtype_array((n,), dtype)
# determine accompanying b to get soln x
b = A@x
# determine _x from pttrs
pttrs = get_lapack_funcs('pttrs', dtype=dtype)
_x, info = pttrs(_d, _e.conj(), b)
assert_equal(info, 0, err_msg="pttrs: info = {}, should be 0".format(info))
# test that _x from pttrs matches the expected x
assert_allclose(x, _x, atol=atol)
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that ValueError is raised with incompatible matrix shapes
assert_raises(ValueError, pttrf, d[:-1], e)
assert_raises(ValueError, pttrf, d, e[:-1])
@pytest.mark.parametrize("ddtype,dtype",
zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
n = 10
pttrf = get_lapack_funcs('pttrf', dtype=dtype)
d = generate_random_dtype_array((n,), ddtype) + 2
e = generate_random_dtype_array((n-1,), dtype)
# test that singular (row of all zeroes) matrix fails via info
d[0] = 0
e[0] = 0
_d, _e, info = pttrf(d, e)
assert_equal(_d[info - 1], 0,
"?pttrf: _d[info-1] is {}, not the illegal value :0."
.format(_d[info - 1]))
# test with non-spd matrix
d = generate_random_dtype_array((n,), ddtype)
_d, _e, info = pttrf(d, e)
assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([4, 9, 25, 16, 1]),
np.array([-.5, -.6667, .6, .5]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
[3, -5]])
), (
np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([16, 9, 1, 4]),
np.array([1+1j, 2-1j, 1-4j]),
np.array([[64+16j, -16-32j], [93+62j, 61-66j],
[78-80j, 71-74j], [14-27j, 35+15j]]),
np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
[1-1j, 2+1j]])
)])
def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problems: f07jdf and f07jef (real)
# examples: f07jrf and f07csf (complex)
# NAG examples provide 4 decimals.
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
atol = 1e-4
pttrf = get_lapack_funcs('pttrf', dtype=e[0])
_d, _e, info = pttrf(d, e)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(_e, e_expect, atol=atol)
pttrs = get_lapack_funcs('pttrs', dtype=e[0])
_x, info = pttrs(_d, _e.conj(), b)
assert_allclose(_x, x_expect, atol=atol)
# also test option `lower`
if e.dtype in COMPLEX_DTYPES:
_x, info = pttrs(_d, _e, b, lower=1)
assert_allclose(_x, x_expect, atol=atol)
def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
# used by ?pteqr tests to build parameters
# returns tuple of (d, e, A, z)
if compute_z == 1:
# build Hermitian A from Q**T * tri * Q = A by creating Q and tri
A_eig = generate_random_dtype_array((n, n), dtype)
A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
A_eig = (A_eig + A_eig.conj().T) / 2
# obtain right eigenvectors (orthogonal)
vr = eigh(A_eig)[1]
# create tridiagonal matrix
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), realtype)
tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
# Build A using these factors that sytrd would: (Q**T * tri * Q = A)
A = vr @ tri @ vr.conj().T
# vr is orthogonal
z = vr
else:
# d and e are always real per lapack docs.
d = generate_random_dtype_array((n,), realtype)
e = generate_random_dtype_array((n-1,), realtype)
# make SPD
d = d + 4
A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
return (d, e, A, z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr(dtype, realtype, compute_z):
'''
Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
It generates random SPD matrix diagonals d and e, and then confirms
correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
tests that z can reform A.
'''
seed(42)
atol = 1000*np.finfo(dtype).eps
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_equal(info, 0, "info = {}, should be 0.".format(info))
# compare the routine's eigenvalues with scipy.linalg.eig's.
assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
if compute_z:
# verify z_pteqr as orthogonal
assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
atol=atol)
# verify that z_pteqr recombines to A
assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
A, atol=atol)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_non_spd(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with non-spd matrix
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with incorrect/incompatible array sizes
assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
if compute_z:
assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
@pytest.mark.parametrize("dtype,realtype",
zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
@pytest.mark.parametrize("compute_z", range(3))
def test_pteqr_error_singular(dtype, realtype, compute_z):
seed(42)
pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
n = 10
d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
# test with singular matrix
d[0] = 0
e[0] = 0
d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
assert info > 0
@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
[(2, # "I"
np.array([4.16, 5.25, 1.09, .62]),
np.array([3.17, -.97, .55]),
np.array([8.0023, 1.9926, 1.0014, 0.1237]),
np.array([[0.6326, 0.6245, -0.4191, 0.1847],
[0.7668, -0.4270, 0.4176, -0.2352],
[-0.1082, 0.6071, 0.4594, -0.6393],
[-0.0081, 0.2432, 0.6625, 0.7084]])),
])
def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
'''
Implements real (f08jgf) example from NAG Manual Mark 26.
Tests for correct outputs.
'''
# the NAG manual has 4 decimals accuracy
atol = 1e-4
pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
_d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
assert_allclose(_d, d_expect, atol=atol)
assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
def test_geqrfp(dtype, matrix_size):
# Tests for all dytpes, tall, wide, and square matrices.
# Using the routine with random matrix A, Q and R are obtained and then
# tested such that R is upper triangular and non-negative on the diagonal,
# and Q is an orthagonal matrix. Verifies that A=Q@R. It also
# tests against a matrix that for which the linalg.qr method returns
# negative diagonals, and for error messaging.
# set test tolerance appropriate for dtype
np.random.seed(42)
rtol = 250*np.finfo(dtype).eps
atol = 100*np.finfo(dtype).eps
# get appropriate ?geqrfp for dtype
geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
m, n = matrix_size
# create random matrix of dimentions m x n
A = generate_random_dtype_array((m, n), dtype=dtype)
# create qr matrix using geqrfp
qr_A, tau, info = geqrfp(A)
# obtain r from the upper triangular area
r = np.triu(qr_A)
# obtain q from the orgqr lapack routine
# based on linalg.qr's extraction strategy of q with orgqr
if m > n:
# this adds an extra column to the end of qr_A
# let qqr be an empty m x m matrix
qqr = np.zeros((m, m), dtype=dtype)
# set first n columns of qqr to qr_A
qqr[:, :n] = qr_A
# determine q from this qqr
# note that m is a sufficient for lwork based on LAPACK documentation
q = gqr(qqr, tau=tau, lwork=m)[0]
else:
q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
# test that q and r still make A
assert_allclose(q@r, A, rtol=rtol)
# ensure that q is orthogonal (that q @ transposed q is the identity)
assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
atol=atol)
# ensure r is upper tri by comparing original r to r as upper triangular
assert_allclose(r, np.triu(r), rtol=rtol)
# make sure diagonals of r are positive for this random solution
assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
# ensure that info is zero for this success
assert_(info == 0)
# test that this routine gives r diagonals that are positive for a
# matrix that returns negatives in the diagonal with scipy.linalg.rq
A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
r_rq_neg, q_rq_neg = qr(A_negative)
rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
# assert that any of the entries on the diagonal from linalg.qr
# are negative and that all of geqrfp are positive.
assert_(np.any(np.diag(r_rq_neg) < 0) and
np.all(np.diag(r) > 0))
def test_geqrfp_errors_with_empty_array():
# check that empty array raises good error message
A_empty = np.array([])
geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
assert_raises(Exception, geqrfp, A_empty)
@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_standard_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
try:
_compute_lwork(sc_dlw, n, lower=1)
_compute_lwork(dz_dlw, n, lower=1)
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("driver", ['gv', 'gvx'])
@pytest.mark.parametrize("pfx", ['sy', 'he'])
def test_generalized_eigh_lworks(pfx, driver):
n = 1200 # Some sufficiently big arbitrary number
dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
# Shouldn't raise any exceptions
try:
_compute_lwork(sc_dlw, n, uplo="L")
_compute_lwork(dz_dlw, n, uplo="L")
except Exception as e:
pytest.fail("{}_lwork raised unexpected exception: {}"
"".format(pfx+driver, e))
@pytest.mark.parametrize("dtype_", DTYPES)
@pytest.mark.parametrize("m", [1, 10, 100, 1000])
def test_orcsd_uncsd_lwork(dtype_, m):
seed(1234)
p = randint(0, m)
q = m - p
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
dlw = pfx + 'csd_lwork'
lw = get_lapack_funcs(dlw, dtype=dtype_)
lwval = _compute_lwork(lw, m, p, q)
lwval = lwval if pfx == 'un' else (lwval,)
assert all([x > 0 for x in lwval])
@pytest.mark.parametrize("dtype_", DTYPES)
def test_orcsd_uncsd(dtype_):
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'], lwval))
cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
assert info == 0
U = block_diag(u1, u2)
VH = block_diag(v1t, v2t)
r = min(min(p, q), min(m-p, m-q))
n11 = min(p, q) - r
n12 = min(p, m-q) - r
n21 = min(m-p, q) - r
n22 = min(m-p, m-q) - r
S = np.zeros((m, m), dtype=dtype_)
one = dtype_(1.)
for i in range(n11):
S[i, i] = one
for i in range(n22):
S[p+i, q+i] = one
for i in range(n12):
S[i+n11+r, i+n11+r+n21+n22+r] = -one
for i in range(n21):
S[p+n22+r+i, n11+r+i] = one
for i in range(r):
S[i+n11, i+n11] = np.cos(theta[i])
S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
S[p+n22+i, i+n11] = np.sin(theta[i])
Xc = U @ S @ VH
assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx(dtype, trans_bool, fact):
"""
These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
It tests that the outputs define an LU matrix, that inputs are unmodified,
transposal options, incompatible shapes, singular matrices, and
singular factorizations. It parametrizes DTYPES and the 'fact' value along
with the fact related inputs.
"""
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
b = (A.conj().T if trans_bool else A) @ x
# store a copy of the inputs to check they haven't been modified later
inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_(info == 0, "?gtsvx info = {}, should be zero".format(info))
# assure that inputs are unmodified
assert_array_equal(dl, inputs_cpy[0])
assert_array_equal(d, inputs_cpy[1])
assert_array_equal(du, inputs_cpy[2])
assert_array_equal(b, inputs_cpy[3])
# test that x_soln matches the expected x
assert_allclose(x, x_soln, atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert_(hasattr(rcond, "__len__") is not True,
"rcond should be scalar but is {}".format(rcond))
# ferr should be length of # of cols in x
assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but shoud be {},"
.format(ferr.shape[0], b.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but shoud be {},"
.format(berr.shape[0], b.shape[1]))
@pytest.mark.parametrize("dtype", DTYPES)
@pytest.mark.parametrize("trans_bool", [0, 1])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_singular(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test with singular matrix
# no need to test inputs with fact "F" since ?gttrf already does.
if fact == "N":
# Construct a singular example manually
d[-1] = 0
dl[-1] = 0
# solve using routine
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# test for the singular matrix.
assert info > 0, "info should be > 0 for singular matrix"
elif fact == 'F':
# assuming that a singular factorization is input
df_[-1] = 0
duf_[-1] = 0
du2f_[-1] = 0
gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
du2=du2f_, ipiv=ipiv_)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
# info should not be zero and should provide index of illegal value
assert info > 0, "info should be > 0 for singular matrix"
@pytest.mark.parametrize("dtype", DTYPES*2)
@pytest.mark.parametrize("trans_bool", [False, True])
@pytest.mark.parametrize("fact", ["F", "N"])
def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
seed(42)
# obtain routine
gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
# Generate random tridiagonal matrix A
n = 10
dl = generate_random_dtype_array((n-1,), dtype=dtype)
d = generate_random_dtype_array((n,), dtype=dtype)
du = generate_random_dtype_array((n-1,), dtype=dtype)
A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
# generate random solution x
x = generate_random_dtype_array((n, 2), dtype=dtype)
# create b from x for equation Ax=b
trans = "T" if dtype in REAL_DTYPES else "C"
b = (A.conj().T if trans_bool else A) @ x
# set these to None if fact = 'N', or the output of gttrf is fact = 'F'
dlf_, df_, duf_, du2f_, ipiv_, info_ = \
gttrf(dl, d, du) if fact == 'F' else [None]*6
if fact == "N":
assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
else:
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
duf=duf_, du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
assert_raises(ValueError, gtsvx, dl, d, du, b,
fact=fact, trans=trans, dlf=dlf_, df=df_,
duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
@pytest.mark.parametrize("du,d,dl,b,x",
[(np.array([2.1, -1.0, 1.9, 8.0]),
np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
np.array([3.4, 3.6, 7.0, -6.0]),
np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2],
[.6, -11.2], [2.7, 19.1]]),
np.array([[-4, 5], [7, -4], [3, -3], [-4, -2],
[-3, 1]])),
(np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j,
-.3 + 4.3j, -3.3 + 1.3j]),
np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
np.array([[2.4 - 5j, 2.7 + 6.9j],
[3.4 + 18.2j, -6.9 - 5.3j],
[-14.7 + 9.7j, -6 - .6j],
[31.9 - 7.7j, -3.9 + 9.3j],
[-1 + 1.6j, -3 + 12.2j]]),
np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j],
[4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j],
[1 - 1j, 2 - 2j]]))])
def test_gtsvx_NAG(du, d, dl, b, x):
# Test to ensure wrapper is consistent with NAG Manual Mark 26
# example problems: real (f07cbf) and complex (f07cpf)
gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype)
gtsvx_out = gtsvx(dl, d, du, b)
dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
assert_array_almost_equal(x, x_soln)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx(dtype, realtype, fact, df_de_lambda):
'''
This tests the ?ptsvx lapack routine wrapper to solve a random system
Ax = b for all dtypes and input variations. Tests for: unmodified
input parameters, fact options, incompatible matrix shapes raise an error,
and singular matrices return info of illegal value.
'''
seed(42)
# set test tolerance appropriate for dtype
atol = 100 * np.finfo(dtype).eps
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# create copy to later test that they are unmodified
diag_cpy = [d.copy(), e.copy(), b.copy()]
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
# d, e, and b should be unmodified
assert_array_equal(d, diag_cpy[0])
assert_array_equal(e, diag_cpy[1])
assert_array_equal(b, diag_cpy[2])
assert_(info == 0, "info should be 0 but is {}.".format(info))
assert_array_almost_equal(x_soln, x)
# test that the factors from ptsvx can be recombined to make A
L = np.diag(ef, -1) + np.diag(np.ones(n))
D = np.diag(df)
assert_allclose(A, L@D@(np.conj(L).T), atol=atol)
# assert that the outputs are of correct type or shape
# rcond should be a scalar
assert not hasattr(rcond, "__len__"), \
"rcond should be scalar but is {}".format(rcond)
# ferr should be length of # of cols in x
assert_(ferr.shape == (2,), "ferr.shape is {} but shoud be ({},)"
.format(ferr.shape, x_soln.shape[1]))
# berr should be length of # of cols in x
assert_(berr.shape == (2,), "berr.shape is {} but shoud be ({},)"
.format(berr.shape, x_soln.shape[1]))
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
# test with malformatted array sizes
assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef)
assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef)
assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef)
@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ REAL_DTYPES))
@pytest.mark.parametrize("fact,df_de_lambda",
[("F",
lambda d, e:get_lapack_funcs('pttrf',
dtype=e.dtype)(d, e)),
("N", lambda d, e: (None, None, None))])
def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda):
seed(42)
ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
n = 5
# create diagonals according to size and dtype
d = generate_random_dtype_array((n,), realtype) + 4
e = generate_random_dtype_array((n-1,), dtype)
A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
b = A @ x_soln
# use lambda to determine what df, ef are
df, ef, info = df_de_lambda(d, e)
if fact == "N":
d[3] = 0
# obtain new df, ef
df, ef, info = df_de_lambda(d, e)
# solve using routine
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
# test for the singular matrix.
assert info > 0 and info <= n
# non SPD matrix
d = generate_random_dtype_array((n,), realtype)
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
assert info > 0 and info <= n
else:
# assuming that someone is using a singular factorization
df, ef, info = df_de_lambda(d, e)
df[0] = 0
ef[0] = 0
df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
df=df, ef=ef)
assert info > 0
@pytest.mark.parametrize('d,e,b,x',
[(np.array([4, 10, 29, 25, 5]),
np.array([-2, -6, 15, 8]),
np.array([[6, 10], [9, 4], [2, 9], [14, 65],
[7, 23]]),
np.array([[2.5, 2], [2, -1], [1, -3],
[-1, 6], [3, -5]])),
(np.array([16, 41, 46, 21]),
np.array([16 + 16j, 18 - 9j, 1 - 4j]),
np.array([[64 + 16j, -16 - 32j],
[93 + 62j, 61 - 66j],
[78 - 80j, 71 - 74j],
[14 - 27j, 35 + 15j]]),
np.array([[2 + 1j, -3 - 2j],
[1 + 1j, 1 + 1j],
[1 - 2j, 1 - 2j],
[1 - 1j, 2 + 1j]]))])
def test_ptsvx_NAG(d, e, b, x):
# test to assure that wrapper is consistent with NAG Manual Mark 26
# example problemss: f07jbf, f07jpf
# (Links expire, so please search for "NAG Library Manual Mark 26" online)
# obtain routine with correct type based on e.dtype
ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype)
# solve using routine
df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b)
# determine ptsvx's solution and x are the same.
assert_array_almost_equal(x, x_ptsvx)
@pytest.mark.parametrize('lower', [False, True])
@pytest.mark.parametrize('dtype', DTYPES)
def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower):
seed(1234)
atol = np.finfo(dtype).eps*100
# Manual conversion to/from packed format is feasible here.
n, nrhs = 10, 4
a = generate_random_dtype_array([n, n], dtype=dtype)
b = generate_random_dtype_array([n, nrhs], dtype=dtype)
a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.)
if lower:
inds = ([x for y in range(n) for x in range(y, n)],
[y for y in range(n) for x in range(y, n)])
else:
inds = ([x for y in range(1, n+1) for x in range(y)],
[y-1 for y in range(1, n+1) for x in range(y)])
ap = a[inds]
ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs(
('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'),
dtype=dtype,
ilp64="preferred")
ul, info = pptrf(n, ap, lower=lower)
assert_equal(info, 0)
aul = cholesky(a, lower=lower)[inds]
assert_allclose(ul, aul, rtol=0, atol=atol)
uli, info = pptri(n, ul, lower=lower)
|
assert_equal(info, 0)
|
numpy.testing.assert_equal
|
from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import cv2
import os
import sys
from torch.optim.lr_scheduler import ExponentialLR
from collections import namedtuple
from got10k.trackers import Tracker
from torch.utils.data import DataLoader
from . import ops
from .losses import BalancedCELoss , SmoothL1
from .transforms import SiamRPNTransforms
from .net import SiamRPN
from .datasets import Pair
# import fitlog
_all__ = ['TrackerSiamRPN']
class SiamRPNLoss(nn.Module):
def __init__(self, lamda = 1.0 , num_pos = 16, num_neg = 16):
super(SiamRPNLoss, self).__init__()
self.lamda = lamda
self.num_pos = num_pos
self.num_neg = num_neg
self.LossCls = BalancedCELoss()
self.LossRpn = SmoothL1()
def forward(self, cls_out , reg_out , cls_target , reg_target):
loss_cls = self.LossCls(cls_out , cls_target , num_pos = self.num_pos , num_neg = self.num_neg)
loss_rpn = self.LossRpn(reg_out , reg_target , cls_target , num_pos = self.num_pos)
return loss_cls + self.lamda * loss_rpn
class TrackerSiamRPN(Tracker):
def __init__(self, net_path=None, fit_log = False , **kargs):
super(TrackerSiamRPN, self).__init__(
name='SiamRPN', is_deterministic=True)
self.parse_args(fit_log , **kargs)
#GPU device if available
self.cuda = torch.cuda.is_available()
self.device = torch.device('cuda:1' if self.cuda else 'cpu')
# setup model
self.net = SiamRPN()
ops.init_weights(self.net)
if net_path is not None:
self.net.load_state_dict(torch.load(
net_path, map_location=lambda storage, loc: storage))
self.net = self.net.to(self.device)
# loss func
self.criterion = SiamRPNLoss(lamda = self.cfg.lamda ,num_pos=self.cfg.num_pos, num_neg=self.cfg.num_neg )
# optimizer
self.optimizer = optim.SGD(
self.net.parameters(),
lr=self.cfg.initial_lr,
weight_decay=self.cfg.weight_decay,
momentum=self.cfg.momentum)
# lr schedule
gamma = np.power(
self.cfg.ultimate_lr / self.cfg.initial_lr,
1.0 / self.cfg.epoch_num)
self.lr_scheduler = ExponentialLR(self.optimizer, gamma)
def parse_args(self, fit_log = False , **kargs):
self.cfg = {
'exemplar_sz': 127,
'instance_sz': 271,
'total_stride': 8,
'context': 0.5,
'ratios': [0.33, 0.5, 1, 2, 3],
'scales': [8,],
'penalty_k': 0.055,
'window_influence': 0.42,
'lr': 0.295,
# train para
'batch_size' : 8,
"clip" : 10,
'num_workers': 16,
'epoch_num': 60,
'initial_lr': 3e-2,
'ultimate_lr': 1e-5,
'weight_decay': 5e-4,
'momentum': 0.9,
'lamda': 5,
'num_pos' : 16,
'num_neg' : 48,
}
# if fit_log:
# record_key = self.cfg.copy()
# fitlog.add_other(record_key , name = "used cfg")
for key, val in kargs.items():
self.cfg.update({key: val})
# print(self.cfg)
self.cfg = namedtuple('GenericDict', self.cfg.keys())(**self.cfg)
@torch.no_grad()
def init(self, image, box):
image = np.asarray(image)
# convert box to 0-indexed and center based [y, x, h, w]
box = np.array([
box[1] - 1 + (box[3] - 1) / 2,
box[0] - 1 + (box[2] - 1) / 2,
box[3], box[2]], dtype=np.float32)
self.center, self.target_sz = box[:2], box[2:]
# for small target, use larger search region
if np.prod(self.target_sz) / np.prod(image.shape[:2]) < 0.004:
self.cfg = self.cfg._replace(instance_sz=287)
# generate anchors
self.response_sz = (self.cfg.instance_sz - \
self.cfg.exemplar_sz) // self.cfg.total_stride + 1
self.anchors = ops.create_anchors(self.cfg , self.response_sz)
# create hanning window
self.hann_window = np.outer(
|
np.hanning(self.response_sz)
|
numpy.hanning
|
from abc import ABC, abstractmethod
from typing import Optional
import numpy as np
from gym.spaces import Discrete, MultiDiscrete
from torch.distributions import Normal, kl_divergence
from pearll.common.type_aliases import (
CrossoverFunc,
MutationFunc,
SelectionFunc,
UpdaterLog,
)
from pearll.common.utils import to_torch
from pearll.models.actor_critics import ActorCritic
class BaseEvolutionUpdater(ABC):
"""
The base random search updater class with pre-defined methods for derived classes
:param model: the actor critic model containing the population
:param population_type: the type of population to update, either "actor" or "critic"
"""
def __init__(self, model: ActorCritic, population_type: str = "actor") -> None:
self.model = model
self.population_type = population_type
if population_type == "actor":
self.mean = model.mean_actor
self.std = self.model.population_settings.actor_std
self.population_size = model.num_actors
self.normal_dist = model.normal_dist_actor
self.space_shape = model.actor.space_shape
self.space_range = model.actor.space_range
self.space = model.actor.space
elif population_type == "critic":
self.mean = model.mean_critic
self.std = self.model.population_settings.critic_std
self.population_size = model.num_critics
self.normal_dist = model.normal_dist_critic
self.space_shape = model.critic.space_shape
self.space_range = model.critic.space_range
self.space = model.critic.space
def update_networks(self, population: np.ndarray) -> None:
"""
Update the networks in the population
:param population: the population state to set the networks to
"""
if self.population_type == "actor":
self.model.set_actors_state(population)
elif self.population_type == "critic":
self.model.set_critics_state(population)
@abstractmethod
def __call__(self) -> UpdaterLog:
"""Run an optimization step"""
class NoisyGradientAscent(BaseEvolutionUpdater):
"""
Updater for the Natural Evolutionary Strategy
:param model: the actor critic model containing the population
:param population_type: the type of population to update, either "actor" or "critic"
"""
def __init__(self, model: ActorCritic, population_type: str = "actor") -> None:
super().__init__(model, population_type)
def __call__(
self,
learning_rate: float,
optimization_direction: np.ndarray,
mutation_operator: Optional[MutationFunc] = None,
) -> UpdaterLog:
"""
Perform an optimization step
:param learning_rate: the learning rate
:param optimization_direction: the optimization direction
:param mutation_operator: the mutation operator
:return: the updater log
"""
# Snapshot current population dist for kl divergence
# use copy() to avoid modifying the original
old_dist = Normal(to_torch(self.mean.copy()), self.std)
# Main update
self.mean += learning_rate * optimization_direction
# Generate new population
self.normal_dist = np.random.randn(self.population_size, *self.space_shape)
population = self.mean + (self.std * self.normal_dist)
if mutation_operator is not None:
population = mutation_operator(population, self.space)
# Discretize and clip population as needed
if isinstance(self.space, (Discrete, MultiDiscrete)):
population = np.round(population).astype(np.int32)
population = np.clip(population, self.space_range[0], self.space_range[1])
self.update_networks(population)
# Calculate Log metrics
new_dist = Normal(to_torch(self.mean), self.std)
population_entropy = new_dist.entropy().mean()
population_kl = kl_divergence(old_dist, new_dist).mean()
return UpdaterLog(divergence=population_kl, entropy=population_entropy)
class GeneticUpdater(BaseEvolutionUpdater):
"""
Updater for the Genetic Algorithm
:param model: the actor critic model containing the population
:param population_type: the type of population to update, either "actor" or "critic"
"""
def __init__(self, model: ActorCritic, population_type: str = "actor") -> None:
super().__init__(model, population_type)
def __call__(
self,
rewards: np.ndarray,
selection_operator: Optional[SelectionFunc] = None,
crossover_operator: Optional[CrossoverFunc] = None,
mutation_operator: Optional[MutationFunc] = None,
elitism: float = 0.1,
) -> UpdaterLog:
"""
Perform an optimization step
:param rewards: the rewards for the current population
:param selection_operator: the selection operator function
:param crossover_operator: the crossover operator function
:param mutation_operator: the mutation operator function
:param elitism: fraction of the population to keep as elite
:return: the updater log
"""
# Store elite population
if self.population_type == "actor":
old_population = self.model.numpy_actors()
elif self.population_type == "critic":
old_population = self.model.numpy_critics()
if elitism > 0:
num_elite = int(self.population_size * elitism)
elite_indices = np.argpartition(rewards, -num_elite)[-num_elite:]
elite_population = old_population[elite_indices]
# Main update
if selection_operator is not None:
new_population = selection_operator(old_population, rewards)
if crossover_operator is not None:
new_population = crossover_operator(new_population)
if mutation_operator is not None:
new_population = mutation_operator(new_population, self.space)
if elitism > 0:
new_population[elite_indices] = elite_population
self.update_networks(new_population)
# Calculate Log metrics
divergence = np.mean(np.abs(new_population - old_population))
entropy = np.mean(
np.abs(np.max(new_population, axis=0) -
|
np.min(new_population, axis=0)
|
numpy.min
|
import matplotlib.pyplot as plt
#import matplotlib.axes as axes
import numpy as np
#axes.Axis.set_axisbelow(True)
x = np.array([1,2,3,4,5,6,7])
my_xticks = ['1','2','3','4','5','6','7']
plt.xticks(x, my_xticks)
# for L=1,w=1,d=1
# for L=1,w=2,d=1
# for L=1,w=3,d=1
# for L=1,w=4,d=1
y = np.array([0.207044,np.nan,np.nan,0.206619,np.nan,np.nan,np.nan])
plt.scatter(x, y, marker='^',color='blue',label='L=1,w=4,d=1')
# for l=2,w=1,d=1
# for l=2,w=2,d=1
# for l=2,w=3,d=1
y = np.array([0.376935,np.nan,0.326575,0.182479,np.nan,np.nan,np.nan])
plt.scatter(x, y, marker='o',color='red',label='L=2,w=3,d=1')
# for l=2,w=4,d=1
y = np.array([0.400412,np.nan,np.nan,np.nan,0.593843,np.nan,np.nan])
plt.scatter(x, y, marker='o',color='blue',label='L=2,w=4,d=1')
# for l=3
# for l=4,w=1,d=1
y = np.array([0.116092,0.103657,0.312526,np.nan,np.nan,np.nan,np.nan])
plt.scatter(x, y, marker='s',color='purple',label='L=4,w=1,d=1')
# for l=4,w=2,d=1
y =
|
np.array([np.nan,np.nan,0.375075,0.325434,np.nan,np.nan,np.nan])
|
numpy.array
|
from collections import namedtuple
from rlpyt.utils.collections import namedarraytuple, AttrDict
import numpy as np
Samples = namedarraytuple("Samples", ["agent", "env"])
AgentSamples = namedarraytuple("AgentSamples",
["action", "prev_action", "agent_info"])
AgentSamplesBsv = namedarraytuple("AgentSamplesBsv",
["action", "prev_action", "agent_info", "bootstrap_value"])
EnvSamples = namedarraytuple("EnvSamples",
["observation", "reward", "prev_reward", "done", "env_info"])
class BatchSpec(namedtuple("BatchSpec", "T B")):
"""
T: int Number of time steps, >=1.
B: int Number of separate trajectory segments (i.e. # env instances), >=1.
"""
__slots__ = ()
@property
def size(self):
return self.T * self.B
class TrajInfo(AttrDict):
"""
Because it inits as an AttrDict, this has the methods of a dictionary,
e.g. the attributes can be iterated through by traj_info.items()
Intent: all attributes not starting with underscore "_" will be logged.
(Can subclass for more fields.)
Convention: traj_info fields CamelCase, opt_info fields lowerCamelCase.
"""
_discount = 1 # Leading underscore, but also class attr not in self.__dict__.
def __init__(self, n_obs=None, serial=False, **kwargs):
super().__init__(**kwargs) # (for AttrDict behavior)
self.Length = 0
self.Return = 0
self.NonzeroRewards = 0
self.DiscountedReturn = 0
self._cur_discount = 1
self.TotalCost = 0
self._atari = False
if n_obs is not None:
if hasattr(n_obs,'__iter__'):
self._atari = True
self._window_size = n_obs
self._null_flag = True
else:
self._serial = serial
self._n_obs = n_obs
for i in range(n_obs):
setattr(self,"ObsPercentFeature" + str(i+1),0)
self.OverAllObsPercent = 0
def step(self, observation, action, reward, done, agent_info, env_info, cost=0, obs_act=None):
self.Length += 1
self.Return += reward
self.NonzeroRewards += reward != 0
self.DiscountedReturn += self._cur_discount * reward
self._cur_discount *= self._discount
self.TotalCost += cost
if obs_act is not None:
# assert np.array_equal(obs_act[0],obs_act[1]) and np.array_equal(obs_act[2],obs_act[3]) and np.array_equal(obs_act[0],obs_act[2])
if self._atari:
if self._null_flag:
x_res = int(np.ceil(observation.shape[1] / self._window_size[0]))
y_res = int(np.ceil(observation.shape[2] / self._window_size[1]))
self._masks = np.zeros([x_res,y_res,observation.shape[1],observation.shape[2]],dtype=bool)
zeromask = np.zeros([observation.shape[1],observation.shape[2]],dtype=bool)
for i in range(x_res):
xmask = zeromask.copy()
if i == x_res - 1:
xmask[i*self._window_size[0]:-1,:] = True
else:
xmask[i*self._window_size[0]:(i+1)*self._window_size[0],:] = True
for j in range(y_res):
ymask = zeromask.copy()
if j == y_res - 1:
ymask[j*self._window_size[1]:-1,:] = True
else:
ymask[j*self._window_size[1]:(j+1)*self._window_size[1],:] = True
self._masks[i,j] = np.bitwise_and(xmask,ymask)
setattr(self,"ObsMap" + str(i) + "x" + str(j),0)
for i in range(self._masks.shape[0]):
for j in range(self._masks.shape[1]):
setattr(self,"ObsMap" + str(i) + "x" + str(j),getattr(self,"ObsMap" + str(i) + "x" + str(j)) + np.sum(obs_act[0][self._masks[i,j]]))
else:
self.OverAllObsPercent +=
|
np.sum(obs_act)
|
numpy.sum
|
#!/usr/bin/env python3
def get_input():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'Output-File',
help='Output-File from a RASSI calculation'
)
parser.add_argument(
'-s',
'--sigma',
required=False,
type=float,
default=150,
help='Plotting option for gaussian broadening in cm**-1, default=150 cm**-1'
)
parser.add_argument(
'-x0',
'--begin',
required=False,
type=float,
default=8000,
help='Begin of the spectrum in cm**-1, default=8000 cm**-1'
)
parser.add_argument(
'-p',
'--points',
required=False,
type=int,
default=1000,
help='# of points the spectrum is plotted with, default=1000'
)
parser.add_argument(
'-x1',
'--end',
required=False,
type=float,
default=25000,
help='End of the spectrum in cm**-1, default=25000 cm**-1'
)
parser.add_argument(
'-t',
'--temperature',
required=False,
type=float,
default=298.15,
help='Temperature for Boltzmann distribution in K, default=298.15 K'
)
parser.add_argument(
'-u',
'--unit',
required=False,
type=str,
default="cm**-1",
help='Unit to plot the spectra - "cm**-1" or "nm", default="cm**-1"'
)
parser.add_argument(
'-b',
'--boltzmann',
required=False,
type=float,
default=0.1,
help="Threshold of Boltzmann factor for including initial states, default=0.1"
)
parser.add_argument(
'-f',
'--file',
required=False,
type=str,
default=None,
help="File with experimental values for additional plotting, default=None"
)
return vars(parser.parse_args())
def get_string_block(file,start,end):
'''
Extract a list of strings from a file from start to end.
'''
block = []
with open(file) as f:
Switch_Read = False
for line in f:
if start in line: # Start reading the important stuff here
Switch_Read = True
if Switch_Read == True:
if end in line: # Stop if the end of the block is found
break
block.append(line)
return block
def get_wavenumbers(input_file):
'''
Extract the wavenumbers from the spin-orbit states as a numpy list.
'''
start = 'Eigenvalues of complex Hamiltonian:'
end = 'Weights of the five most important'
pattern = re.compile('^\s+\d+\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(\d+\.\d+)\s*$')
wavenumbers = np.array([])
print("Start reading the excited state energies...",end='')
block = get_string_block(input_file,start,end)
for line in block:
match = pattern.match(line)
if match:
wavenumbers = np.append(wavenumbers,match.group(3)) # Save the wavenumbers
wavenumbers = wavenumbers.astype(float)
print("done. ",end='')
return wavenumbers
def get_transitions(input_file):
'''
Extract the oscillator strengths with the corresponding transitions.
The first array has the initial states, the second the final states and the
third the oscillator strengths.
'''
print("Start reading the transitions..............",end='')
start = '++ Dipole transition strengths (SO states):'
end = '++ Velocity transition strengths (SO states):'
pattern = re.compile('^\s+(\d+)\s+(\d+)\s+(\d\.\d+E[\+\-]\d{2})')
osc = np.array([])
init =
|
np.array([])
|
numpy.array
|
# # Short Assignment 2: Image Restoration
# ## SCC0251.2020.1 - Image Processing
# ### Prof. Dr. <NAME>
# ### 10284952 - <NAME>
# https://github.com/vitorgt/SCC0251
# Imports
import numpy as np
import imageio
# import matplotlib.pyplot as plt
r = imageio.imread(str(input()).rstrip()).astype(np.uint8)
k = int(input())
sigma = float(input())
gamma = float(input())
maxr = np.max(r)
# Normalize function
def scale(image, c=0, d=255):
a = np.min(image)
b = np.max(image)
return (image-a)*((d-c)/(b-a))+c
# Given function for gaussian filter
def gaussian_filter(k=3, sigma=1.0):
arx = np.arange((-k//2) + 1.0, (k//2) + 1.0)
x, y = np.meshgrid(arx, arx)
f = np.exp(-(1/2) * (np.square(x) + np.square(y))/np.square(sigma))
return f/np.sum(f)
# Function to apply filters on Fourier domain to images
def fft_filter(img, flt):
# padding
pad = (img.shape[0]//2)-flt.shape[0]//2
fltpad = np.pad(flt, (pad, pad-1), "constant", constant_values=0)
# transforming to fourier domain
IMG = np.fft.fftn(img)
FLT = np.fft.fftn(fltpad)
# convoluting
RES = np.multiply(FLT, IMG)
# transforming back to space domain
res = np.real(np.fft.fftshift(np.fft.ifftn(RES)))
return res
# Gaussian filter
h = gaussian_filter(k, sigma)
# denoising and normalizing
r_denoi = scale(fft_filter(r, h), d=maxr)
maxd = np.max(r_denoi)
# maybe the right way was to do this
# r_denoi = fft_filter(r, h)
# maxd = np.max(r_denoi)
# r_denoi = scale(r_denoi, d=maxr)
# plt.figure(figsize=(18, 8))
# plt.subplot(121)
# plt.imshow(r, cmap="gray")
# plt.axis('off')
# plt.subplot(122)
# plt.imshow(r_denoi, cmap="gray")
# plt.axis('off')
def clsf(g, h, p, gamma):
# padding
pad = (g.shape[0]//2)-h.shape[0]//2
hpad = np.pad(h, (pad, pad-1), "constant", constant_values=0)
# padding
pad = (g.shape[0]//2)-p.shape[0]//2
ppad = np.pad(p, (pad, pad-1), "constant", constant_values=0)
# transforming to fourier domain
G = np.fft.fftn(g)
H = np.fft.fftn(hpad)
P = np.fft.fftn(ppad)
# restoring the blur using the Constrained Least Squares method
F_hat = (np.conj(H) / (np.abs(H)**2 + gamma*np.abs(P)**2)) * G
# transforming back to space domain
f_hat = np.real(np.fft.fftshift(np.fft.ifftn(F_hat)))
return f_hat
# Laplacian operator
p = np.array([
[0, -1, 0],
[-1, 4, -1],
[0, -1, 0]])
# deblurring and normalizing
r_denoi_deblur = scale(clsf(r_denoi, h, p, gamma), d=maxd)
# plt.figure(figsize=(18, 8))
# plt.subplot(131)
# plt.imshow(r, cmap="gray")
# plt.axis('off')
# plt.subplot(132)
# plt.imshow(r_denoi, cmap="gray")
# plt.axis('off')
# plt.subplot(133)
# plt.imshow(r_denoi_deblur, cmap="gray")
# plt.axis('off')
print("{:.1f}".format(
|
np.std(r_denoi_deblur)
|
numpy.std
|
# -*- coding:utf8 -*-
# File : rng.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 2/23/17
#
# This file is part of TensorArtist.
# This file is part of NeuArtist2
import os
import numpy as np
import numpy.random as npr
__all__ = ['rng', 'reset_rng', 'gen_seed', 'gen_rng', 'shuffle_multiarray']
rng = None
def __initialize_rng():
seed = os.getenv('TART_RANDOM_SEED')
reset_rng(seed)
def reset_rng(seed=None):
global rng
if rng is None:
rng = npr.RandomState(seed)
else:
rng2 = npr.RandomState(seed)
rng.set_state(rng2.get_state())
def gen_seed():
global rng
return rng.randint(4294967296)
def gen_rng(seed=None):
if seed is None:
seed = gen_seed()
return
|
npr.RandomState(seed)
|
numpy.random.RandomState
|
r"""
Python module to compute the Mann-Kendall test for trend in time series data.
This module contains a single function 'test' which implements the Mann-Kendall
test for a linear trend in a given time series.
Introduction to the Mann-Kendall test
-------------------------------------
The Mann-Kendall test is used to determine whether or not there is a linear
monotonic trend in a given time series data. It is a non-parametric trend
closely related to the concept of Kendall's correlation coefficient [1]_. The
null hypothesis, :math:`H_0`, states that there is no monotonic trend, and this
is tested against one of three possible alternative hypotheses, :math:`H_a`:
(i) there is an upward monotonic trend, (ii) there is a downward monotonic
trend, or (iii) there is either an upward monotonic trend or a downward
monotonic trend. It is a robust test for trend detection used widely in
financial, climatological, hydrological, and environmental time series
analysis.
Assumptions underlying the Mann-Kendall test
--------------------------------------------
The Mann-Kendall test involves the following assumptions [2]_ regarding the
given time series data:
1. In the absence of a trend, the data are independently and identically
distributed (iid).
2. The measurements represent the true states of the observables at the
times of measurements.
3. The methods used for sample collection, instrumental measurements and
data handling are unbiased.
Advantages of the Mann-Kendall test
-----------------------------------
The Mann-Kendall test provides the following advantages:
1. It does not assume the data to be distributed according to any
particular rule, i.e., e.g., it does not require that the data be normally
distributed.
2. It is not effected by missing data other than the fact the number of
sample points are reduced and hence might effect the statistical
significance adversely.
3. It is not effected by irregular spacing of the time points of
measurement.
4. It is not effected by the length of the time series.
Limitations of the Mann-Kendall test
------------------------------------
The following limitations have to be kept in mind:
1. The Mann-Kendall test is not suited for data with periodicities (i.e.,
seasonal effects). In order for the test to be effective, it is recommended
that all known periodic effects be removed from the data in a preprocessing
step before computing the Mann-Kendall test.
2. The Mann-Kendall test tends to give more negative results for shorter
datasets, i.e., the longer the time series the more effective is the trend
detection computation.
Formulae
--------
The first step in the Mann-Kendall test for a time series :math:`x_1, x_2,
\dots, x_n` of length :math:`n` is to compute the indicator function
:math:`sgn(x_i - x_j)` such that:
.. math::
sgn(x_i - x_j) &=
\begin{cases}
1, & x_i - x_j > 0\\
0, & x_i - x_j = 0\\
-1, & x_i - x_j < 0
\end{cases},
which tells us whether the difference between the measurements at time
:math:`i` and :math:`j` are positive, negative or zero.
Next, we compute the mean and variance of the above quantity. The mean
:math:`E[S]` is given by:
.. math::
E[S] = \sum_{i=1}^{n-1} \sum_{j=i+1}^{n} sgn(x_i - x_j),
and the variance :math:`VAR(S)` is given by:
.. math::
VAR(S) = \frac{1}{18} \Big( n(n-1)(2n+5) - \sum_{k=1}^p
q_k(q_k-1)(2q_k+5) \Big),
where :math:`p` is the total number of tie groups in the data, and :math:`q_k`
is the number of data points contained in the :math:`k`-th tie group. For
example, if the time series measurements were {12, 56, 23, 12, 67, 45, 56, 56,
10}, we would have two tie groups for the measurements 12 and 56, i.e.
:math:`p=2`, and the number of data points in these tie groups would
:math:`q_1=2` for the tie group with {12}, and :math:`q_2=3` for the tie group
with {56}.
Using the mean :math:`E[S]` and the variance :math:`VAR(S)` we compute the
Mann-Kendall test statistic, using the following transformation, which ensures
that for large sample sizes, the test statistic :math:`Z_{MK}` is distributed
approximately normally:
.. math::
Z_{MK} &=
\begin{cases}
\frac{E[S] - 1} {\sqrt{VAR(S)}}, & E[S] > 0\\
0, & E[S] = 0\\
\frac{E[S] + 1} {\sqrt{VAR(S)}}, & E[S] < 0\\
\end{cases}.
Hypothesis testing
------------------
At a significance level :math:`\alpha` of the test, which is also the Type I
error rate, we compute whether or not to accept the alternative hypothesis
:math:`H_a` for each variant of :math:`H_a` separately:
:math:`H_a`: There exists an upward monotonic trend
If :math:`Z_{MK} \geq Z_{1 - \alpha}` then accept :math:`H_a`, where the
notation :math:`Z_{1 - \alpha}` denotes the :math:`100(1-\alpha)`-th
percentile of the standard normal distribution.
:math:`H_a`: There exists a downward monotonic trend
If :math:`Z_{MK} \leq -Z_{1 - \alpha}` then accept :math:`H_a`.
:math:`H_a`: There exists either an upward or a downward monotonic trend
If :math:`|Z_{MK}| \geq Z_{1 - \alpha/2}` then accept :math:`H_a`,
where the notation :math:`|\cdot|` is used to denote the absolute
value function.
Updated formulae for implementation
-----------------------------------
One crucial notion involved in the Mann-Kendall test statistic is that of
whether the difference between two measurements is greater than, equal to, or
less than zero. This idea is in turn critically linked to the least count
(i.e., the minimum possible measurement value) of the time series measurements
:math:`x_i`. For example, let us consider the case when we measure :math:`x_i`
with a precision :math:`\varepsilon = 0.01`. In such a case, let us say for
some reason, floating point errors in the entries of :math:`x_i` in the
memory, lead to a :math:`x_{11} - x_{27} = 0.000251 > 0`. However, to say that
this difference is actually greater than zero is meaningless! This is because
on the basis of the same measurement process we used on :math:`x`, we could
never ascertain such a small difference. This is why, in this implementation of
the Mann-Kendall test, we have included the least count error
:math:`\varepsilon` as a compulsory requirement for the test statistic
estimation.
This allows us to revise the above formulae fo rthe Mann-Kendall test as:
.. math::
sgn(x_i - x_j) &=
\begin{cases}
1, & x_i - x_j > \varepsilon\\
0, & |x_i - x_j| \leq \varepsilon\\
-1, & x_i - x_j < -\varepsilon
\end{cases},
and:
.. math::
Z_{MK} &=
\begin{cases}
\frac{E[S] - 1} {\sqrt{VAR(S)}}, & E[S] >
\varepsilon\\
0, & |E[S]| \leq \varepsilon\\
\frac{E[S] + 1} {\sqrt{VAR(S)}}, & E[S] <
-\varepsilon\\
\end{cases}.
These revised formulae are the ones that are implemented in the :py:func:`test`
of this module.
Additional estimates
--------------------
In addition to the result of the Mann-Kendall test, which is in the form of a
string indicating whether or not to accept the alternative hypothesis, the
:py:func:`test` function also return a few additional estimates related to the
estimation of a monotonic trend in the time series.
Estimation of the simple linear regression parameters
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The slope :math:`m` and intercept :math:`c` of a straight line fitted through
the time series data are estimated as follows:
.. math::
m = r_{x,t} \frac{\sigma_x}{\sigma_t},
where r_{x,t} is the Pearson's cross-correlation coefficient between
:math:`x` and :math:`t`.
.. math::
c = \mu_x - m \mu_t
where :math:`\mu` denotes the mean of the both variables respectively.
Estimation of :math:`p`-values
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :py:func:`test` function also returns the :math:`p`-values for the given
dataset under the various alternative hypotheses. Note that the estimation of
the :math:`p`-value is not essential to the computation of the test results as
formulated above. The :math:`p`-values need to estimated separately depending
on the type of alternative hypothesis used and the sign of :math:`E[S]`.
Denoting :math:`f(u)` as the probability density function of the standard
normal distribution, we can write down the :math:`p`-values as:
:math:`H_a`: There exists an upward monotonic trend
.. math::
p_{Z_{MK}} &=
\begin{cases}
\int_{Z_{MK}}^{\infty} f(u) \mathrm{d}u,& |E[S]|>\varepsilon\\
0.5, & |E[S]| \leq \varepsilon\\
\end{cases}.
:math:`H_a`: There exists a downward monotonic trend
.. math::
p_{Z_{MK}} &=
\begin{cases}
\int^{Z_{MK}}_{-\infty} f(u) \mathrm{d}u,& |E[S]|>\varepsilon\\
0.5, & |E[S]| \leq \varepsilon\\
\end{cases}.
:math:`H_a`: There exists either an upward or a downward monotonic trend
.. math::
p_{Z_{MK}} &= 0.5
\begin{cases}
\int_{Z_{MK}}^{\infty} f(u) \mathrm{d}u,& E[S]>\varepsilon\\
1, & |E[S]| \leq \varepsilon\\
\int^{Z_{MK}}_{-\infty} f(u) \mathrm{d}u,& E[S]<-\varepsilon\\
\end{cases}.
References
----------
.. [1] | <NAME>.
| "Non-Parametric Trend Tests and Change-Point Detection".
| R-package `trend`. Accessed on: 17 April, 2017.
| https://cran.r-project.org/web/packages/trend/vignettes/trend.pdf
.. [2] | "Mann-Kendall Test For Monotonic Trend".
| Visual Simple Plan. Accessed on: 17 April, 2017.
| http://vsp.pnnl.gov/help/Vsample/Design_Trend_Mann_Kendall.htm
"""
# Created: Mon Apr 17, 2017 01:18PM
# Last modified: Mon Apr 17, 2017 09:24PM
# Copyright: <NAME> <<EMAIL>>
import numpy as np
from scipy.special import ndtri, ndtr
import sys
def test(t, x, eps=None, alpha=None, Ha=None):
"""
Runs the Mann-Kendall test for trend in time series data.
Parameters
----------
t : 1D numpy.ndarray
array of the time points of measurements
x : 1D numpy.ndarray
array containing the measurements corresponding to entries of 't'
eps : scalar, float, greater than zero
least count error of measurements which help determine ties in the data
alpha : scalar, float, greater than zero
significance level of the statistical test (Type I error)
Ha : string, options include 'up', 'down', 'upordown'
type of test: one-sided ('up' or 'down') or two-sided ('updown')
Returns
-------
MK : string
result of the statistical test indicating whether or not to accept hte
alternative hypothesis 'Ha'
m : scalar, float
slope of the linear fit to the data
c : scalar, float
intercept of the linear fit to the data
p : scalar, float, greater than zero
p-value of the obtained Z-score statistic for the Mann-Kendall test
Raises
------
AssertionError : error
least count error of measurements 'eps' is not given
AssertionError : error
significance level of test 'alpha' is not given
AssertionError : error
alternative hypothesis 'Ha' is not given
"""
# assert a least count for the measurements x
assert eps, "Please provide least count error for measurements 'x'"
assert alpha, "Please provide significance level 'alpha' for the test"
assert Ha, "Please provide the alternative hypothesis 'Ha'"
# estimate sign of all possible (n(n-1)) / 2 differences
n = len(t)
sgn = np.zeros((n, n), dtype="int")
for i in range(n):
tmp = x - x[i]
tmp[np.where(np.fabs(tmp) <= eps)] = 0.
sgn[i] = np.sign(tmp)
# estimate mean of the sign of all possible differences
S = sgn[np.triu_indices(n, k=1)].sum()
# estimate variance of the sign of all possible differences
# 1. Determine no. of tie groups 'p' and no. of ties in each group 'q'
np.fill_diagonal(sgn, eps * 1E6)
i, j = np.where(sgn == 0.)
ties = np.unique(x[i])
p = len(ties)
q = np.zeros(len(ties), dtype="int")
for k in range(p):
idx = np.where(np.fabs(x - ties[k]) < eps)[0]
q[k] = len(idx)
# 2. Determine the two terms in the variance calculation
term1 = n * (n - 1) * (2 * n + 5)
term2 = (q * (q - 1) * (2 * q + 5)).sum()
# 3. estimate variance
varS = float(term1 - term2) / 18.
# Compute the Z-score based on above estimated mean and variance
if S > eps:
Zmk = (S - 1) / np.sqrt(varS)
elif np.fabs(S) <= eps:
Zmk = 0.
elif S < -eps:
Zmk = (S + 1) / np.sqrt(varS)
# compute test based on given 'alpha' and alternative hypothesis
# note: for all the following cases, the null hypothesis Ho is:
# Ho := there is no monotonic trend
#
# Ha := There is an upward monotonic trend
if Ha == "up":
Z_ = ndtri(1. - alpha)
if Zmk >= Z_:
MK = "accept Ha := upward trend"
else:
MK = "reject Ha := upward trend"
# Ha := There is a downward monotonic trend
elif Ha == "down":
Z_ = ndtri(1. - alpha)
if Zmk <= -Z_:
MK = "accept Ha := downward trend"
else:
MK = "reject Ha := downward trend"
# Ha := There is an upward OR downward monotonic trend
elif Ha == "upordown":
Z_ = ndtri(1. - alpha / 2.)
if np.fabs(Zmk) >= Z_:
MK = "accept Ha := upward OR downward trend"
else:
MK = "reject Ha := upward OR downward trend"
# ----------
# AS A BONUS
# ----------
# estimate the slope and intercept of the line
m = np.corrcoef(t, x)[0, 1] * (np.std(x) / np.std(t))
c = np.mean(x) - m * np.mean(t)
# ----------
# AS A BONUS
# ----------
# estimate the p-value for the obtained Z-score Zmk
if S > eps:
if Ha == "up":
p = 1. - ndtr(Zmk)
elif Ha == "down":
p = ndtr(Zmk)
elif Ha == "upordown":
p = 0.5 * (1. - ndtr(Zmk))
elif
|
np.fabs(S)
|
numpy.fabs
|
"""Linear operator tests.
"""
from __future__ import division, absolute_import
import unittest
import numpy as np
from bcn.linear_operators import LinearOperatorEntry, LinearOperatorDense, LinearOperatorKsparse, LinearOperatorCustom, integer_to_matrix, sample_n_choose_k, choose_random_matrix_elements
from bcn.data import DataSimulated, estimate_partial_signal_characterists
class TestSimple(unittest.TestCase):
"""Test to verify shapes and outputs of different linear operators are correct.
"""
def setUp(self):
self.n_samples = 10
self.n_features = 9
self.sparsity = 2
self.n = 90
self.signal = np.asarray(np.arange(90), dtype=float).reshape((self.n_samples, self.n_features))
self.signal_with_nan =
|
np.array(self.signal)
|
numpy.array
|
__author__ = '<NAME>'
import numpy as np
def myKMeans(k, Data):
dataL = np.zeros((Data.shape[0], Data.shape[1]+1), dtype=np.float64)
dataL[:,1:] = Data
randInd = np.random.randint(0, Data.shape[0], k, np.int64)
centroids = Data[randInd, :]
labelsC = np.asarray(range(k))
flag = 1
while flag:
for i in range(dataL.shape[0]):
distVec = np.sum((centroids - dataL[i,1:]) ** 2, axis=1) ** 0.5
dataL[i,0] = labelsC[np.argmin(distVec)]
outL = 1
newCentroids = np.zeros(centroids.shape)
for j in range(k):
if len(dataL[dataL[:, 0] == j, 0]):
newCentroids[j,:] = np.mean(dataL[dataL[:,0] == j, :], axis=0)[1:]
else:
meanCent = np.mean(centroids, axis=0)
if outL:
cenDistVec =
|
np.sum((dataL[:, 1:] - meanCent) ** 2, axis=1)
|
numpy.sum
|
import numpy as np
import matplotlib.pyplot as plt
from quat import Quat
from sys import exit
def ori_matrix(phi1,Phi,phi2,passive=True):
'''
Returns (passive) orientation matrix, as a np.matrix from
3 euler angles (in degrees).
'''
phi1=np.radians(phi1)
Phi=np.radians(Phi)
phi2=np.radians(phi2)
R11 = np.cos(phi1)*np.cos(phi2)-np.sin(phi1)*np.cos(Phi)*np.sin(phi2)
R12 = np.sin(phi1)*np.cos(phi2)+ np.cos(phi1)*np.cos(Phi)*np.sin(phi2)
R13 = np.sin(phi2)*np.sin(Phi)
R21 = -np.cos(phi1)*np.sin(phi2)-np.sin(phi1)*np.cos(Phi)*np.cos(phi2)
R22 = - np.sin(phi1)*np.sin(phi2)+np.cos(phi1)*np.cos(Phi)*np.cos(phi2)
R23 = np.cos(phi2)*np.sin(Phi)
R31 = np.sin(phi1)*np.sin(Phi)
R32= -np.sin(Phi)*np.cos(phi1)
R33= np.cos(Phi)
matrix=np.matrix([[R11,R12,R13],[R21,R22,R23],[R31,R32,R33]])
if not passive: # matrix above is for the passive rotation
matrix=matrix.transpose()
return matrix
def get_proj(g,pole,proj='stereo'):
'''
Returns polar projection vector from an orientation matrix (g),
a pole vector (pole) using either stereographic or equal area projection,
'''
n=np.linalg.norm(pole)
pole=np.matrix(pole).T/n
vector=g.T*pole #invert matrix
alpha=np.arccos(vector[2])
if
|
np.isclose(alpha,0.0)
|
numpy.isclose
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from scipy.stats import spearmanr, combine_pvalues, friedmanchisquare
from scikit_posthocs import posthoc_nemenyi_friedman
from tabulate import tabulate
from Orange.evaluation import compute_CD, graph_ranks
from hmeasure import h_score
import os
import baycomp
# from rpy2.robjects.packages import importr
# import rpy2.robjects.numpy2ri
# hmeasure = importr('hmeasure')
# rpy2.robjects.numpy2ri.activate()
# Matplotlib settings for figures:
# plt.style.use('science')
# plt.rcParams.update({'font.size': 14})
# plt.rc('xtick', labelsize=12)
# plt.rc('ytick', labelsize=12)
# plt.rcParams['figure.figsize'] = (7, 6)
# plt.rcParams['figure.dpi'] = 250
def savings(cost_matrix, labels, predictions):
cost_without = cost_without_algorithm(cost_matrix, labels)
cost_with = cost_with_algorithm(cost_matrix, labels, predictions)
savings = 1 - cost_with / cost_without
return savings
def cost_with_algorithm(cost_matrix, labels, predictions):
cost_tn = cost_matrix[:, 0, 0][np.logical_and(predictions == 0, labels == 0)].sum()
cost_fn = cost_matrix[:, 0, 1][np.logical_and(predictions == 0, labels == 1)].sum()
cost_fp = cost_matrix[:, 1, 0][np.logical_and(predictions == 1, labels == 0)].sum()
cost_tp = cost_matrix[:, 1, 1][np.logical_and(predictions == 1, labels == 1)].sum()
return sum((cost_tn, cost_fn, cost_fp, cost_tp))
def cost_without_algorithm(cost_matrix, labels):
# Predict everything as the default class that leads to minimal cost
# Also include cost of TP/TN!
cost_neg = cost_matrix[:, 0, 0][labels == 0].sum() + cost_matrix[:, 0, 1][labels == 1].sum()
cost_pos = cost_matrix[:, 1, 0][labels == 0].sum() + cost_matrix[:, 1, 1][labels == 1].sum()
return min(cost_neg, cost_pos)
def rociv(labels, probabilities, costs):
# Total cost per class
pos_total = costs[labels == 1].sum()
neg_total = costs[labels == 0].sum()
# Sort predictions (1 to 0) and corresponding labels
sorted_indices = np.argsort(probabilities)[::-1]
costs_sorted = costs[sorted_indices]
labels_sorted = labels[sorted_indices]
# probabilities[sorted_indices][labels_sorted == 1]
# Create ROCIV curve
fp_costs = [0]
tp_benefits = [0]
benefits_cum = 0
costs_cum = 0
for i in range(len(labels)):
if labels_sorted[i]:
benefits_cum += costs_sorted[i]
else:
costs_cum += costs_sorted[i]
fp_costs.append(costs_cum / neg_total)
tp_benefits.append(benefits_cum / pos_total)
# Area under curve
auciv = metrics.auc(x=fp_costs, y=tp_benefits)
# auciv = np.trapz(y=tp_benefits, x=fp_costs)
return fp_costs, tp_benefits, auciv
def get_performance_metrics(evaluators, evaluation_matrices, i, index, cost_matrix, labels, probabilities, predictions,
info):
if evaluators['traditional']:
true_pos = (predictions * labels).sum()
true_neg = ((1-predictions) * (1-labels)).sum()
false_pos = (predictions * (1-labels)).sum()
false_neg = ((1-predictions) * labels).sum()
accuracy = (true_pos + true_neg) / len(labels)
recall = true_pos / (true_pos + false_neg)
# Make sure no division by 0!
if (true_pos == 0) and (false_pos == 0):
precision = 0
print('\t\tWARNING: No positive predictions!')
else:
precision = true_pos / (true_pos + false_pos)
if precision == 0:
f1_score = 0
print('\t\tWARNING: Precision = 0!')
else:
f1_score = 2 * (precision * recall) / (precision + recall)
evaluation_matrices['traditional'][index, i] = np.array([accuracy, recall, precision, f1_score])
if evaluators['ROC']:
fpr, tpr, roc_thresholds = metrics.roc_curve(y_true=labels, y_score=probabilities)
evaluation_matrices['ROC'][index, i] = np.array([fpr, tpr, roc_thresholds])
if evaluators['AUC']:
auc = metrics.roc_auc_score(y_true=labels, y_score=probabilities)
evaluation_matrices['AUC'][index, i] = auc
if evaluators['savings']:
# To do: function - savings
cost_without = cost_without_algorithm(cost_matrix, labels)
cost_with = cost_with_algorithm(cost_matrix, labels, predictions)
savings = 1 - cost_with / cost_without
evaluation_matrices['savings'][index, i] = savings
if evaluators['AEC']:
expected_cost = labels * (probabilities * cost_matrix[:, 1, 1] + (1 - probabilities) * cost_matrix[:, 0, 1]) \
+ (1 - labels) * (probabilities * cost_matrix[:, 1, 0] + (1 - probabilities) * cost_matrix[:, 0, 0])
aec = expected_cost.mean()
evaluation_matrices['AEC'][index, i] = aec
if evaluators['ROCIV']:
misclass_costs = np.zeros(len(labels))
misclass_costs[labels == 0] = cost_matrix[:, 1, 0][labels == 0]
misclass_costs[labels == 1] = cost_matrix[:, 0, 1][labels == 1]
fpcosts, tpbenefits, auciv = rociv(labels, probabilities, misclass_costs)
evaluation_matrices['ROCIV'][index, i] = np.array([fpcosts, tpbenefits, auciv], dtype=object)
if evaluators['H_measure']:
# TODO:
# Takes approx 1 sec -> Do from scratch?
# https://github.com/canagnos/hmeasure-python/blob/master/mcp/mcp.py
# Specify cost distribution?
# Uses hmeasure, see https://github.com/cran/hmeasure/blob/master/R/library_metrics.R
misclass_neg = cost_matrix[:, 1, 0][labels == 0]
misclass_pos = cost_matrix[:, 0, 1][labels == 1]
# Todo: explain this severity!
severity = misclass_neg.mean() / misclass_pos.mean()
#h = hmeasure.HMeasure(labels, probabilities[:, None], severity)[0][0][0]
h = h_score(labels, probabilities, severity)
evaluation_matrices['H_measure'][index, i] = h
if evaluators['PR']:
precision, recall, _ = metrics.precision_recall_curve(y_true=labels, probas_pred=probabilities)
# AUC is not recommended here (see sklearn docs)
# We will use Average Precision (AP)
ap = metrics.average_precision_score(y_true=labels, y_score=probabilities)
evaluation_matrices['PR'][index, i] = np.array([precision, recall, ap], dtype=object)
if evaluators['PRIV']:
misclass_costs = np.zeros(len(labels))
misclass_costs[labels == 0] = cost_matrix[:, 1, 0][labels == 0]
misclass_costs[labels == 1] = cost_matrix[:, 0, 1][labels == 1]
precisioniv, recalliv, _ = metrics.precision_recall_curve(y_true=labels, probas_pred=probabilities,
sample_weight=misclass_costs)
# AUC is not recommended here (see sklearn docs)
# We will use Average Precision (AP)
apiv = metrics.average_precision_score(y_true=labels, y_score=probabilities, sample_weight=misclass_costs)
#ap = metrics.auc(recall, precision)
evaluation_matrices['PRIV'][index, i] = np.array([precisioniv, recalliv, apiv], dtype=object)
if evaluators['rankings']:
pos_probas = probabilities[labels == 1]
# Get C_(0,1) (FN - misclassification cost of positive instances)
misclass_costs_pos = cost_matrix[:, 0, 1][labels == 1]
# Sort indices from high to low
sorted_indices_probas = np.argsort(pos_probas)[::-1]
prob_rankings = np.argsort(sorted_indices_probas)
sorted_indices_amounts = np.argsort(misclass_costs_pos)[::-1]
amount_rankings = np.argsort(sorted_indices_amounts)
# Compare rankings of probas with rankings of amounts for all positive instances
spearman_test = spearmanr(prob_rankings, amount_rankings)
#spearman_test = spearmanr(pos_probas[sorted_indices_probas], pos_amounts[sorted_indices_probas])
#spearman_test = spearmanr(probabilities, amounts)
evaluation_matrices['rankings'][index, i] = np.array([misclass_costs_pos[sorted_indices_probas], spearman_test],
dtype=object)
if evaluators['brier']:
brier = ((probabilities - labels)**2).mean()
evaluation_matrices['brier'][index, i] = brier
if evaluators['recall_overlap']:
recalled = labels[labels == 1] * predictions[labels == 1]
evaluation_matrices['recall_overlap'][index, i] = recalled
if evaluators['recall_correlation']:
pos_probas = probabilities[labels == 1]
# Sort indices from high to low
sorted_indices_probas = np.argsort(pos_probas)[::-1]
prob_rankings = np.argsort(sorted_indices_probas)
evaluation_matrices['recall_correlation'][index, i] = prob_rankings
if evaluators['time']:
evaluation_matrices['time'][index, i] = info['time']
if evaluators['lambda1']:
evaluation_matrices['lambda1'][index, i] = info['lambda1']
if evaluators['lambda2']:
evaluation_matrices['lambda2'][index, i] = info['lambda2']
if evaluators['n_neurons']:
evaluation_matrices['n_neurons'][index, i] = info['n_neurons']
return evaluation_matrices
def evaluate_experiments(evaluators, methodologies, evaluation_matrices, directory, name):
table_evaluation = []
n_methodologies = sum(methodologies.values())
names = []
for key in methodologies.keys():
if methodologies[key]:
names.append(key)
if evaluators['traditional']:
table_traditional = [['Method', 'Accuracy', 'Recall', 'Precision', 'F1-score', 'AR', 'sd']]
# Compute F1 rankings (- as higher is better)
all_f1s = []
for i in range(evaluation_matrices['traditional'].shape[0]):
method_f1s = []
for j in range(evaluation_matrices['traditional'][i].shape[0]):
f1 = evaluation_matrices['traditional'][i][j][-1]
method_f1s.append(f1)
all_f1s.append(np.array(method_f1s))
ranked_args = np.argsort(-np.array(all_f1s), axis=0)
rankings = np.arange(len(ranked_args))[ranked_args.argsort(axis=0)]
rankings = rankings + 1
avg_rankings = np.mean(rankings, axis=1)
sd_rankings = np.sqrt(rankings.var(axis=1))
# Summarize all per method
index = 0
for item, value in methodologies.items():
if value:
averages = evaluation_matrices['traditional'][index, :].mean()
table_traditional.append([item, averages[0], averages[1], averages[2], averages[3],
avg_rankings[index], sd_rankings[index]])
index += 1
print(tabulate(table_traditional, headers="firstrow", floatfmt=("", ".4f", ".4f", ".4f", ".4f", ".4f", ".4f")))
table_evaluation.append(table_traditional)
# Do tests if enough measurements are available (at least 3)
if np.array(all_f1s).shape[1] > 2:
friedchisq = friedmanchisquare(*np.transpose(all_f1s))
print('\nF1 - Friedman test')
print('H0: Model performance follows the same distribution')
print('\tChi-square:\t%.4f' % friedchisq[0])
print('\tp-value:\t%.4f' % friedchisq[1])
if friedchisq[1] < 0.05: # If p-value is significant, do Nemenyi post hoc test
# Post-hoc Nemenyi Friedman: Rows are blocks, columns are groups
nemenyi = posthoc_nemenyi_friedman(np.array(all_f1s).T.astype(dtype=np.float32))
print('\nNemenyi post hoc test:')
print(nemenyi)
print('_________________________________________________________________________')
if evaluators['ROC']:
index = 0
# fig, ax = plt.subplots()
# ax.set_title('ROC curve')
# ax.set_xlabel('False positive rate')
# ax.set_ylabel('True positive rate')
# for item, value in methodologies.items():
# if value:
# # See https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html
# tprs = []
# mean_fpr = np.linspace(0, 1, 100)
#
# for i in range(evaluation_matrices['ROC'][index, :].shape[0]):
# fpr, tpr, _ = list(evaluation_matrices['ROC'][index, i])
# interp_tpr = np.interp(mean_fpr, fpr, tpr)
# interp_tpr[0] = 0.0
# tprs.append(interp_tpr)
#
# mean_tpr = np.mean(tprs, axis=0)
# mean_tpr[-1] = 1.0
#
# index += 1
#
# ax.plot(mean_fpr, mean_tpr, label=item, lw=2, alpha=.8)
#
# # std_tpr = np.std(tprs, axis=0)
# # tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
# # tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# # ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2)
#
# ax.legend()
# plt.savefig(str(directory + 'ROC.png'), bbox_inches='tight')
# plt.show()
if evaluators['AUC']:
table_auc = [['Method', 'AUC', 'sd', 'AR', 'sd']]
# Compute rankings (- as higher is better)
ranked_args = (-evaluation_matrices['AUC']).argsort(axis=0)
rankings = np.arange(len(ranked_args))[ranked_args.argsort(axis=0)]
rankings = rankings + 1
avg_rankings = rankings.mean(axis=1)
sd_rankings = np.sqrt(rankings.var(axis=1))
# Summarize per method
index = 0
for item, value in methodologies.items():
if value:
table_auc.append([item, evaluation_matrices['AUC'][index, :].mean(),
np.sqrt(evaluation_matrices['AUC'][index, :].var()), avg_rankings[index],
sd_rankings[index]])
index += 1
print(tabulate(table_auc, headers="firstrow", floatfmt=("", ".4f", ".4f", ".4f", ".4f")))
table_evaluation.append(table_auc)
# Do tests if enough measurements are available (at least 3)
if evaluation_matrices['AUC'].shape[1] > 2:
friedchisq = friedmanchisquare(*evaluation_matrices['AUC'].T)
print('\nAUC - Friedman test')
print('H0: Model performance follows the same distribution')
print('\tChi-square:\t%.4f' % friedchisq[0])
print('\tp-value:\t%.4f' % friedchisq[1])
if friedchisq[1] < 0.05: # If p-value is significant, do Nemenyi post hoc test
nemenyi = posthoc_nemenyi_friedman(evaluation_matrices['AUC'].T.astype(dtype=np.float32))
print('\nNemenyi post hoc test:')
print(nemenyi)
print('_________________________________________________________________________')
if evaluators['savings']:
table_savings = [['Method', 'Savings', 'sd', 'AR', 'sd']]
# Compute rankings (- as higher is better)
ranked_args = (-evaluation_matrices['savings']).argsort(axis=0)
rankings = np.arange(len(ranked_args))[ranked_args.argsort(axis=0)]
rankings = rankings + 1
avg_rankings = rankings.mean(axis=1)
sd_rankings = np.sqrt(rankings.var(axis=1))
# Summarize per method
index = 0
methods_used = []
for item, value in methodologies.items():
if value:
methods_used.append(item)
table_savings.append([item, evaluation_matrices['savings'][index, :].mean(),
np.sqrt(evaluation_matrices['savings'][index, :].var()), avg_rankings[index],
sd_rankings[index]])
index += 1
print(tabulate(table_savings, headers="firstrow", floatfmt=("", ".4f", ".4f", ".4f", ".4f")))
table_evaluation.append(table_savings)
# plt.xlabel('Methods')
# plt.ylabel('Savings')
# # plt.ylim(0, 1)
# plt.boxplot(np.transpose(evaluation_matrices['savings']))
# plt.xticks(np.arange(n_methodologies) + 1, methods_used)
# plt.xticks(rotation=40)
# plt.savefig(str(directory + 'savings_boxplot_' + name + '.png'), bbox_inches='tight')
# plt.show()
# Do tests if enough measurements are available (at least 3)
if evaluation_matrices['savings'].shape[1] > 2:
friedchisq = friedmanchisquare(*evaluation_matrices['savings'].T)
print('\nSavings - Friedman test')
print('H0: Model performance follows the same distribution')
print('\tChi-square:\t%.4f' % friedchisq[0])
print('\tp-value:\t%.4f' % friedchisq[1])
if friedchisq[1] < 0.05: # If p-value is significant, do Nemenyi post hoc test
nemenyi = posthoc_nemenyi_friedman(evaluation_matrices['savings'].T.astype(dtype=np.float32))
print('\nNemenyi post hoc test:')
print(nemenyi)
if n_methodologies > 1:
cd = compute_CD(avg_rankings, n=1, alpha='0.05', test="nemenyi")
print(f'Critical difference: {np.round(cd, 4)}')
graph_ranks(avg_rankings, names, cd=cd, width=9, textspace=3, lowv=1, highv=n_methodologies)
# plt.show()
# # Bayesian testing for all combinations:
# print('Bayesian tests (ROPE) for savings (one vs one):')
# for i in range(0, n_methodologies - 1):
# for j in range(i + 1, n_methodologies):
# print(str('\tComparing ' + names[i] + ' and ' + names[j]))
# probs = baycomp.two_on_single(evaluation_matrices['savings'][i], evaluation_matrices['savings'][j],
# plot=False, names=[names[i], names[j]])
# print(f'\t{probs}')
#
# print('Bayesian tests (ROPE) for rankings (one vs one):')
# for i in range(0, n_methodologies - 1):
# for j in range(i + 1, n_methodologies):
# print(str('\tComparing ' + names[i] + ' and ' + names[j]))
# probs = baycomp.two_on_single(evaluation_matrices['savings'][i], evaluation_matrices['savings'][j],
# rope=1, plot=False, names=[names[i], names[j]])
# print(f'\t{probs}')
#
# # Bayesian testing multiple comparison:
# # Not implemented yet
# # print('Bayesian tests (ROPE) for savings (multiple comparisons):')
print('_________________________________________________________________________')
if evaluators['AEC']:
table_aec = [['Method', 'AEC', 'sd', 'AR', 'sd']]
# Compute rankings (lower is better)
ranked_args = (evaluation_matrices['AEC']).argsort(axis=0)
rankings = np.arange(len(ranked_args))[ranked_args.argsort(axis=0)]
rankings = rankings + 1
avg_rankings = rankings.mean(axis=1)
sd_rankings = np.sqrt(rankings.var(axis=1))
# Summarize per method
index = 0
methods_used = []
for item, value in methodologies.items():
if value:
methods_used.append(item)
table_aec.append([item, evaluation_matrices['AEC'][index, :].mean(),
np.sqrt(evaluation_matrices['AEC'][index, :].var()), avg_rankings[index],
sd_rankings[index]])
index += 1
print(tabulate(table_aec, headers="firstrow", floatfmt=("", ".4f", ".4f", ".4f", ".4f")))
table_evaluation.append(table_aec)
# plt.xlabel('Methods')
# plt.ylabel('AEC')
# # plt.ylim(0, 1)
# plt.boxplot(np.transpose(evaluation_matrices['AEC']))
# plt.xticks(np.arange(n_methodologies) + 1, methods_used)
# plt.xticks(rotation=40)
# plt.savefig(str(directory + 'AEC_boxplot' + '.png'), bbox_inches='tight')
# plt.show()
# Do tests if enough measurements are available (at least 3)
if evaluation_matrices['AEC'].shape[1] > 2:
friedchisq = friedmanchisquare(*evaluation_matrices['AEC'].T)
print('\nSavings - Friedman test')
print('H0: Model performance follows the same distribution')
print('\tChi-square:\t%.4f' % friedchisq[0])
print('\tp-value:\t%.4f' % friedchisq[1])
if friedchisq[1] < 0.05: # If p-value is significant, do Nemenyi post hoc test
nemenyi = posthoc_nemenyi_friedman(evaluation_matrices['AEC'].T.astype(dtype=np.float32))
print('\nNemenyi post hoc test:')
print(nemenyi)
print('_________________________________________________________________________')
if evaluators['ROCIV']:
table_auciv = [['Method', 'AUCIV', 'sd', 'AR', 'sd']]
index = 0
# fig2, ax2 = plt.subplots()
# ax2.set_title('ROCIV curve')
# ax2.set_xlabel('False positive cost')
# ax2.set_ylabel('True positive benefit')
all_aucivs = []
for item, value in methodologies.items():
if value:
# See https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html
tprs = []
mean_fpr = np.linspace(0, 1, 100)
aucivs = []
for i in range(evaluation_matrices['ROCIV'][index, :].shape[0]):
fp_costs, tp_benefits, auciv = list(evaluation_matrices['ROCIV'][index, i])
interp_tpr = np.interp(mean_fpr, fp_costs, tp_benefits)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucivs.append(auciv)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
# ax2.plot(mean_fpr, mean_tpr, label=item, lw=2, alpha=.8)
# std_tpr = np.std(tprs, axis=0)
# tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
# tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
# ax2.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2)
aucivs = np.array(aucivs)
table_auciv.append([item, aucivs.mean(), np.sqrt(aucivs.var())])
all_aucivs.append(aucivs)
index += 1
# ax2.legend()
# plt.savefig(str(directory + 'ROCIV.png'), bbox_inches='tight')
# plt.show()
# Add rankings (higher is better)
ranked_args = np.argsort(-np.array(all_aucivs), axis=0)
rankings = np.arange(len(ranked_args))[ranked_args.argsort(axis=0)]
rankings = rankings + 1
avg_rankings = np.mean(rankings, axis=1)
sd_rankings = np.sqrt(rankings.var(axis=1))
for i in range(1, len(table_auciv)):
table_auciv[i].append(avg_rankings[i-1])
table_auciv[i].append(sd_rankings[i - 1])
print(tabulate(table_auciv, headers="firstrow", floatfmt=("", ".4f", ".4f", ".4f", ".4f")))
table_evaluation.append(table_auciv)
# Do tests if enough measurements are available (at least 3)
if np.array(all_aucivs).shape[1] > 2:
friedchisq = friedmanchisquare(*np.transpose(all_aucivs))
print('\nAUCIV - Friedman test')
print('H0: Model performance follows the same distribution')
print('\tChi-square:\t%.4f' % friedchisq[0])
print('\tp-value:\t%.4f' % friedchisq[1])
if friedchisq[1] < 0.05: # If p-value is significant, do Nemenyi post hoc test
nemenyi = posthoc_nemenyi_friedman(np.array(all_aucivs).T.astype(dtype=np.float32))
print('\nNemenyi post hoc test:')
print(nemenyi)
print('_________________________________________________________________________')
if evaluators['H_measure']:
table_H = [['Method', 'H_measure', 'sd', 'AR', 'sd']]
# Compute rankings (- as higher is better)
ranked_args = (-evaluation_matrices['H_measure']).argsort(axis=0)
rankings = np.arange(len(ranked_args))[ranked_args.argsort(axis=0)]
rankings = rankings + 1
avg_rankings = rankings.mean(axis=1)
sd_rankings = np.sqrt(rankings.var(axis=1))
# Summarize per method
index = 0
for item, value in methodologies.items():
if value:
table_H.append([item, evaluation_matrices['H_measure'][index, :].mean(),
np.sqrt(evaluation_matrices['H_measure'][index, :].var()), avg_rankings[index],
sd_rankings[index]])
index += 1
print(tabulate(table_H, headers="firstrow", floatfmt=("", ".4f", ".4f", ".4f", ".4f")))
table_evaluation.append(table_H)
# Do tests if enough measurements are available (at least 3)
if evaluation_matrices['H_measure'].shape[1] > 2:
friedchisq = friedmanchisquare(*evaluation_matrices['H_measure'].T)
print('\nH-measure - Friedman test')
print('H0: Model performance follows the same distribution')
print('\tChi-square:\t%.4f' % friedchisq[0])
print('\tp-value:\t%.4f' % friedchisq[1])
if friedchisq[1] < 0.05: # If p-value is significant, do Nemenyi post hoc test
nemenyi = posthoc_nemenyi_friedman(evaluation_matrices['H_measure'].T.astype(dtype=np.float32))
print('\nNemenyi post hoc test:')
print(nemenyi)
print('_________________________________________________________________________')
if evaluators['PR']:
table_ap = [['Method', 'Avg Prec', 'sd', 'AR', 'sd']]
index = 0
# fig2, ax2 = plt.subplots()
# ax2.set_title('PR curve')
# ax2.set_xlabel('Recall')
# ax2.set_ylabel('Precision')
all_aps = []
for item, value in methodologies.items():
if value:
# See https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc_crossval.html
precisions = []
mean_recall = np.linspace(0, 1, 100)
aps = []
for i in range(evaluation_matrices['PR'][index, :].shape[0]):
precision, recall, ap = list(evaluation_matrices['PR'][index, i])
interp_precision = np.interp(mean_recall, recall[::-1], precision[::-1])
interp_precision[0] = 1
precisions.append(interp_precision)
aps.append(ap)
mean_precision = np.mean(precisions, axis=0)
mean_precision[-1] = 0
# ax2.plot(mean_recall, mean_precision, label=item, lw=2, alpha=.8)
# std_precision = np.std(precisions, axis=0)
# precisions_upper = np.minimum(mean_precision + std_precision, 1)
# precisions_lower = np.maximum(mean_precision - std_precision, 0)
# ax2.fill_between(mean_recall, precisions_lower, precisions_upper, color='grey', alpha=.2)
aps = np.array(aps)
table_ap.append([item, aps.mean(), np.sqrt(aps.var())])
all_aps.append(aps)
index += 1
# ax2.legend()
# plt.savefig(str(directory + 'PR.png'), bbox_inches='tight')
# plt.show()
# Add rankings (higher is better)
ranked_args = np.argsort(-
|
np.array(all_aps)
|
numpy.array
|
# coding: utf-8
# # Creating a dataset of Ohio injection wells
import matplotlib.pyplot as plt
import random
import numpy as np
import pandas as pd
import os
# set datadir to the directory that holds the zipfile
datadir = 'c:\MyDocs/sandbox/data/datasets/FracFocus/'
outdir = datadir+'output/'
indir = datadir+'OH_injection/'
tempf = outdir+'temp.csv'
tempf1 = outdir+'temp1.csv'
pre_four = outdir+'pre_four.csv'
# print(os.listdir(indir))
# input files are in three different formats:
# oldest: tuple (filename,yr,q)
# all columns are named the same!!
fn_old = [('OH_1ST QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,1),
('OH_2ND QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,2),
('OH_3RD QUARTER 2011 BRINE DISPOSAL FEES-1.xls',2011,3),
('OH_4TH QUARTER 2010 BRINE DISPOSAL FEES.xls',2010,4),
('OH_4TH QUARTER 2011 BRINE DISPOSAL FEES.xls',2011,4),
('OH_Brine Disposal Fee - 3rd Quarter 2010-2.xls',2010,3)]
# the 2012 file is ina funky state - the set of worksheets have two different formats: a blend of old and main
# so we have to process it separately
fn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012.xls'
# fn_2012 = 'OH_BRINE DISPOSAL FEES FOR 2012 CORRECTED.xlsx'
# bulk of the data are here - first four worksheets are quarters. Total worksheet ignored
# tuple: (filename,year)
fn_main = [('BRINE DISPOSAL FEES FOR 2013.xlsx',2013),
('BRINE DISPOSAL FEES FOR 2014.xlsx',2014),
('BRINE DISPOSAL FEES FOR 2015.xlsx',2015),
('BRINE DISPOSAL FEES FOR 2016.xlsx',2016),
('BRINE DISPOSAL FEES FOR 2017.xlsx',2017)]
# current files are of a different format.
fn_2018_etc = [('BRINE DISPOSAL FEES FOR 2018.xlsx',2018),
('BRINE DISPOSAL FEES FOR 2019.xlsx',2019)]
SWDfn = indir+'Copy of SWD locations - July_2018.xls'
ODNR_permit_pickle = outdir+'ODNR_permit.pkl'
ODNR_injection_pickle = outdir+'ODNR_injection.pkl'
inj_excel = outdir+'Inject_wide.xlsx'
# In[59]:
t = pd.read_pickle(ODNR_injection_pickle)
x = t[t.Owner.str.contains('HUNTER')]
t.to_csv(tempf)
# ## get oldest data
# In[60]:
dlst = []
for fnl in fn_old:
print(fnl)
fn = fnl[0]
yr = fnl[1]
quar = fnl[2]
# print(fn,yr,quar)
d = pd.read_excel(indir+fn,skiprows=5,header=None,usecols=[7,8,10,11],
names=['CompanyName','APIstr','Vol','In_Out'])
d.Vol = d.Vol.where(d.Vol.str.lower().str.strip()!='zero',0)
d.Vol = pd.to_numeric(d.Vol)
dIn = d[d.In_Out.str.lower().str[0]=='i']
dIn = dIn.filter(['CompanyName','APIstr','Vol'])
dIn.columns = ['CompanyName','APIstr','Vol_InDist']
dOut = d[d.In_Out.str.lower().str[0]=='o']
dOut = dOut.filter(['APIstr','Vol'])
dOut.columns = ['APIstr','Vol_OutDist']
d['Year'] = fnl[1]
d['Quarter'] = fnl[2]
mg = pd.merge(dIn,dOut,how='outer',left_on='APIstr',right_on='APIstr')
mg['Year'] = fnl[1]
mg['Quarter'] = fnl[2]
dlst.append(mg)
old = pd.concat(dlst)
old.to_csv(tempf)
# In[61]:
old.info()
# ## process the 2012 file
# In[62]:
dlst = []
uc1 = [1,2,4,8]
uc2 = [7,8,10,14]
for ws in [0,1,2,3]: # ws 1 is like 'main'; others like 'old'
# print(ws)
if ws == 1:
uc = uc1
else:
uc= uc2
# print(uc)
d = pd.read_excel(indir+fn_2012,skiprows=6,sheet_name=ws,
usecols=uc,header=None,
names=['CompanyName','APIstr','Vol_InDist','Vol_OutDist'])
d = d.dropna(axis=0,subset=['CompanyName'])
d['Year'] = 2012
d['Quarter'] = ws+1
dlst.append(d)
if ws==1:
tmp = d
trans2012 = pd.concat(dlst)
trans2012.to_csv(tempf)
tmp.head()
# In[63]:
two = pd.concat([old,trans2012])
two.head()
#
# ## get main data files
# In[64]:
dlst = []
for fnl in fn_main:
print(fnl)
fn = fnl[0]
yr = fnl[1]
for ws in [0,1,2,3]: # four quarterly worksheets
d = pd.read_excel(indir+fn,skiprows=6,sheet_name=ws,
usecols=[0,1,2,4,8],header=None,
names=['AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist'])
d = d.dropna(axis=0,subset=['CompanyName'])
d['Year'] = yr
d['Quarter'] = ws+1
# d.columns= ['AltName','CompanyName','APIstr','Desc',
# 'Vol_InDist','GrossIn','NetIn','PercRet',
# 'Vol_OutDist','GrossOut','NetOut','PercRetOut','Comments']
# print(d.columns)
dlst.append(d)
main = pd.concat(dlst)
main.to_csv(tempf)
# In[65]:
three = pd.concat([two,main],sort=True)
# out = two.groupby(['APIstr'],as_index=True)['APIstr','Year','Quarter',
# 'CompanyName','Vol_InDist','Vol_OutDist']
three.to_csv(tempf)
# ## get current file
# In[100]:
dlst = []
for fnl in fn_current:
fn = fnl[0]
yr = fnl[1]
#print(fn,yr)
d = pd.read_excel(indir+fn,skiprows=6,sheet_name=0,
usecols=[0,1,2,3,5,9],header=None,
names=['QtrStr','AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist'])
d = d.dropna(axis=0,subset=['CompanyName'])
d['Year'] = yr
d['Quarter'] = d.QtrStr.str[0]
d = d[d.Quarter != 'Y']
d = d.filter(['AltName','CompanyName','APIstr','Vol_InDist','Vol_OutDist','Year','Quarter'])
dlst.append(d)
four = pd.concat(dlst,sort=True)
four = pd.concat([three,four],sort=True)
four.to_csv(tempf)
four.info()
# ## some clean up of the API string and Yr_Q
#
#
# In[101]:
four.APIstr = four.APIstr.astype('str') # make sure all are strings
# First create some flags base on status of APIstr
four['NoAPIstr'] = four.APIstr.str.strip()==''
print(f'Number of records with no APIstring: {four.NoAPIstr.sum()}')
four.APIstr =
|
np.where(four.NoAPIstr,'No API string recorded',four.APIstr)
|
numpy.where
|
"""
Testing module for Domain.py, Shape.py, BC.py
Work in progress
TO DO:
test inertia
test rigid body calculations
"""
from __future__ import division
from builtins import range
from past.utils import old_div
import unittest
import numpy.testing as npt
import numpy as np
from nose.tools import eq_
from proteus import Comm, Profiling, Gauges
from proteus.Profiling import logEvent as log
from proteus.Domain import (PiecewiseLinearComplexDomain,
PlanarStraightLineGraphDomain)
from proteus.SpatialTools import (Rectangle,
Cuboid,
CustomShape,
assembleDomain)
from proteus.mprans.SpatialTools import (Rectangle as RectangleRANS,
Cuboid as CuboidRANS,
CustomShape as CustomShapeRANS,
assembleDomain as assembleDomainRANS,
Tank2D,
Tank3D)
from proteus.mprans.BodyDynamics import RigidBody
comm = Comm.init()
Profiling.procID = comm.rank()
log("Testing SpatialTools")
def create_domain2D():
return PlanarStraightLineGraphDomain()
def create_domain3D():
return PiecewiseLinearComplexDomain()
def create_rectangle(domain, dim=(0., 0.), coords=(0., 0.), folder=None):
if folder is None:
return Rectangle(domain, dim, coords)
elif folder == 'mprans':
return RectangleRANS(domain, dim, coords)
def create_cuboid(domain, dim=(0., 0., 0.), coords=(0., 0., 0.), folder=None):
if folder is None:
return Cuboid(domain, dim, coords)
elif folder == 'mprans':
return CuboidRANS(domain, dim, coords)
def create_custom2D(domain, folder=None):
domain2D = domain
bt2D = {'bottom': 1, 'right': 2, 'left': 3, 'top': 4}
vertices2D = [[0., 0.], [1., 0.], [1., 1.], [0., 1.]]
vertexFlags2D = [bt2D['bottom'], bt2D['bottom'], bt2D['top'], bt2D['top']]
segments2D = [[0, 1], [1, 2], [2, 3], [3, 0]]
segmentFlags2D = [bt2D['bottom'], bt2D['right'], bt2D['top'], bt2D['left']]
if folder is None:
custom = CustomShape
elif folder == 'mprans':
custom = CustomShapeRANS
custom2D = custom(domain=domain2D, vertices=vertices2D,
vertexFlags=vertexFlags2D, segments=segments2D,
segmentFlags=segmentFlags2D, boundaryTags=bt2D)
return custom2D
def create_custom3D(domain, folder=None):
domain3D = domain
bt3D = {'bottom': 1, 'front':2, 'right':3, 'back': 4, 'left':5,
'top':6}
vertices3D = [[0., 0., 0.], [1., 0., 0.], [1., 1., 0.], [0., 1., 0.],
[0., 0., 1.], [1., 0., 1.], [1., 1., 1.], [0., 1., 1.]]
vertexFlags3D = [bt3D['left'], bt3D['right'], bt3D['right'], bt3D['left'],
bt3D['left'], bt3D['right'], bt3D['right'], bt3D['left']]
facets3D = [[[0, 1, 2, 3]], [[0, 1, 5, 4]], [[1, 2, 6, 5]], [[2, 3, 7, 6]],
[[3, 0, 4, 7]], [[4, 5, 6, 7]]]
facetFlags3D = [bt3D['bottom'], bt3D['front'], bt3D['right'], bt3D['back'],
bt3D['left'], bt3D['top']]
if folder is None:
custom = CustomShape
elif folder == 'mprans':
custom = CustomShapeRANS
custom3D = custom(domain=domain3D, vertices=vertices3D,
vertexFlags=vertexFlags3D, facets=facets3D,
facetFlags=facetFlags3D, boundaryTags=bt3D)
return custom3D
def create_tank2D(domain, dim=(0., 0.), coords=None):
return Tank2D(domain, dim, coords)
def create_tank3D(domain, dim=(0., 0., 0.), coords=None):
return Tank3D(domain, dim, coords)
class TestShapeDomainBuilding(unittest.TestCase):
def test_create_shapes(self):
"""
Testing if shapes can be created
"""
domain2D = create_domain2D()
domain3D = create_domain3D()
rectangle = create_rectangle(domain2D)
rectangleRANS = create_rectangle(domain2D, folder='mprans')
cuboid = create_cuboid(domain3D)
cuboidRANS = create_cuboid(domain3D, folder='mprans')
tand2D = create_tank2D(domain2D)
tank3D = create_tank3D(domain3D)
custom2D = create_custom2D(domain2D)
custom2DRANS = create_custom2D(domain2D, folder='mprans')
custom3D = create_custom3D(domain3D)
custom3DRANS = create_custom3D(domain3D, folder='mprans')
def test_assemble_domain(self):
"""
Testing the assembleDomain() for different domains with multiple shapes
"""
nb_shapes = 10
domain2D = create_domain2D()
domain2DRANS = create_domain2D()
domain3D = create_domain3D()
domain3DRANS = create_domain3D()
dim2D = np.array([1., 1.])
dim3D = np.array([1., 1., 1.])
coords2D = np.array([0.5, 0.5])
coords3D = np.array([0.5, 0.5, 0.5])
nb_bc2D = 0
nb_bc2DRANS = 0
nb_bc3D = 0
nb_bc3DRANS = 0
for shape in range(nb_shapes):
coords2D += 1.5
coords3D += 1.5
a = create_rectangle(domain2D, dim=dim2D, coords=coords2D)
nb_bc2D += len(a.BC_list)
a = create_cuboid(domain3D, dim=dim3D, coords=coords3D)
nb_bc3D += len(a.BC_list)
a = create_rectangle(domain2DRANS, dim=dim2D, coords=coords2D,
folder='mprans')
nb_bc2DRANS += len(a.BC_list)
a = create_cuboid(domain3DRANS, dim=dim3D, coords=coords3D,
folder='mprans')
nb_bc3DRANS += len(a.BC_list)
a = create_tank2D(domain2DRANS, dim=[50., 50.])
nb_bc2DRANS += len(a.BC_list)
a = create_tank3D(domain3DRANS, dim=[50., 50., 50.])
nb_bc3DRANS += len(a.BC_list)
assembleDomain(domain2D)
assembleDomain(domain3D)
assembleDomainRANS(domain2DRANS)
assembleDomainRANS(domain3DRANS)
x2D = domain2D.x
x3D = domain3D.x
x2DRANS = domain2DRANS.x
x3DRANS = domain3DRANS.x
L2D = domain2D.L
L3D = domain3D.L
L2DRANS = domain2DRANS.L
L3DRANS = domain3DRANS.L
# check that each domain has the right number of shapes
npt.assert_equal(len(domain2D.shape_list), nb_shapes)
npt.assert_equal(len(domain3D.shape_list), nb_shapes)
npt.assert_equal(len(domain2DRANS.shape_list), nb_shapes+1)
npt.assert_equal(len(domain3DRANS.shape_list), nb_shapes+1)
# check that the number of boundary conditions is right
npt.assert_equal(len(domain2D.bc), nb_bc2D+1)
npt.assert_equal(len(domain3D.bc), nb_bc3D+1)
npt.assert_equal(len(domain2DRANS.bc), nb_bc2DRANS+1)
npt.assert_equal(len(domain3DRANS.bc), nb_bc3DRANS+1)
# check that bounding boxes are rightly calculated
npt.assert_equal(L2D, [14.5, 14.5])
npt.assert_equal(L3D, [14.5, 14.5, 14.5])
npt.assert_equal(L2DRANS, [50., 50.])
npt.assert_equal(L3DRANS, [50., 50., 50.])
npt.assert_equal(x2D, [1.5, 1.5])
npt.assert_equal(x3D, [1.5, 1.5, 1.5])
npt.assert_equal(x2DRANS, [0., 0.])
npt.assert_equal(x3DRANS, [0., 0., 0.])
def test_BC_flags(self):
"""
Testing the flags of shapes and their in their domain
"""
nb_shapes = 3
domain2D = create_domain2D()
domain3D = create_domain3D()
domain2DRANS = create_domain2D()
domain3DRANS = create_domain3D()
flags_v2D = []
flags_s2D = []
flags_v3D = []
flags_f3D = []
flags_v2DRANS = []
flags_s2DRANS = []
flags_v3DRANS = []
flags_f3DRANS = []
maxf = 0
for i in range(nb_shapes):
# 2D
a = create_custom2D(domain2D)
if flags_v2D:
maxf = np.max([np.max(flags_v2D), np.max(flags_s2D)])
flags_v2D += (a.vertexFlags+maxf).tolist()
flags_s2D += (a.segmentFlags+maxf).tolist()
# 3D
a = create_custom3D(domain3D)
if flags_v3D:
maxf = np.max([np.max(flags_v3D), np.max(flags_f3D)])
flags_v3D += (a.vertexFlags+maxf).tolist()
flags_f3D += (a.facetFlags+maxf).tolist()
# 2D RANS
a = create_custom2D(domain2DRANS, folder='mprans')
if flags_v2DRANS:
maxf = np.max([
|
np.max(flags_v2DRANS)
|
numpy.max
|
#!/usr/bin/env python3
#
# Tests the cone distribution.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import pints
import pints.toy
import unittest
import numpy as np
class TestConeLogPDF(unittest.TestCase):
"""
Tests the cone log-pdf toy problems.
"""
def test_basic(self):
# Tests moments, calls, CDF evaluations and sampling
# Default settings
f = pints.toy.ConeLogPDF()
self.assertEqual(f.n_parameters(), 2)
self.assertEqual(f.beta(), 1)
f1 = f([1, 1])
f2 = f([0, 0])
self.assertTrue(np.isscalar(f1))
self.assertTrue(np.isscalar(f2))
self.assertAlmostEqual(f1, -1.4142135623730951)
self.assertEqual(f.mean_normed(), 2.0)
self.assertEqual(f.var_normed(), 2.0)
# Change dimensions and beta
f = pints.toy.ConeLogPDF(10, 0.5)
self.assertEqual(f.n_parameters(), 10)
self.assertEqual(f.beta(), 0.5)
self.assertEqual(f.mean_normed(), 420.0)
self.assertEqual(f.var_normed(), 36120.0)
f1 = f(np.repeat(1, 10))
self.assertAlmostEqual(f1, -1.7782794100389228)
# Test CDF function
f = pints.toy.ConeLogPDF()
self.assertAlmostEqual(f.CDF(1.0), 0.26424111765711533)
self.assertAlmostEqual(f.CDF(2.5), 0.71270250481635422)
f = pints.toy.ConeLogPDF(3, 2)
self.assertAlmostEqual(f.CDF(1.0), 0.42759329552912018)
self.assertRaises(ValueError, f.CDF, -1)
# Test sample function
x = f.sample(10)
self.assertEqual(len(x), 10)
f = pints.toy.ConeLogPDF(2, 2)
self.assertTrue(np.max(f.sample(1000)) < 10)
self.assertRaises(ValueError, f.sample, 0)
def test_bad_constructors(self):
# Tests bad instantiations and calls
self.assertRaises(
ValueError, pints.toy.ConeLogPDF, 0, 1)
self.assertRaises(
ValueError, pints.toy.ConeLogPDF, 1, 0)
self.assertRaises(
ValueError, pints.toy.ConeLogPDF, 3, -1)
# Bad calls to function
f = pints.toy.ConeLogPDF(4, 0.3)
self.assertRaises(ValueError, f.__call__, [1, 2, 3])
self.assertRaises(ValueError, f.__call__, [1, 2, 3, 4, 5])
def test_bounds(self):
# Tests suggested_bounds()
f = pints.toy.ConeLogPDF()
bounds = f.suggested_bounds()
self.assertTrue(np.array_equal([[-1000, -1000], [1000, 1000]],
bounds))
beta = 3
dimensions = 4
f = pints.toy.ConeLogPDF(beta=beta, dimensions=dimensions)
magnitude = 1000
bounds = np.tile([-magnitude, magnitude], (dimensions, 1))
self.assertEqual(bounds[0][0], -magnitude)
self.assertEqual(bounds[0][1], magnitude)
self.assertTrue(np.array_equal(np.array(bounds).shape, [4, 2]))
def test_sensitivities(self):
# Tests sensitivities
f = pints.toy.ConeLogPDF()
l, dl = f.evaluateS1([-1, 3])
self.assertEqual(len(dl), 2)
self.assertEqual(l, -np.sqrt(10))
self.assertAlmostEqual(dl[0], np.sqrt(1.0 / 10))
self.assertAlmostEqual(dl[1], -3 * np.sqrt(1.0 / 10))
f = pints.toy.ConeLogPDF(10, 0.3)
xx = [-1, 3, 2, 4, 5, 6, 7, 8, 9, 10]
l, dl = f.evaluateS1(xx)
self.assertEqual(len(dl), 10)
self.assertEqual(l, -np.sqrt(385)**0.3)
cons = -(385**(-1 + 0.15)) * 0.3
for i, elem in enumerate(dl):
self.assertAlmostEqual(elem, cons * xx[i])
def test_distance_function(self):
# Tests distance function
f = pints.toy.ConeLogPDF()
x = f.sample(10)
self.assertTrue(f.distance(x) > 0)
x = np.ones((100, 3))
self.assertRaises(ValueError, f.distance, x)
x = np.ones((100, 3, 2))
self.assertRaises(ValueError, f.distance, x)
f = pints.toy.ConeLogPDF(5)
x = f.sample(10)
self.assertTrue(f.distance(x) > 0)
x = np.ones((100, 4))
self.assertRaises(ValueError, f.distance, x)
x =
|
np.ones((100, 6))
|
numpy.ones
|
"""
Calculate the wavelet and its significance.
"""
from __future__ import division, absolute_import
import numpy as np
from scipy.special._ufuncs import gamma, gammainc
from scipy.optimize import fminbound as fmin
from scipy.fftpack import fft, ifft
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ['Wavelet', 'WaveCoherency']
class Wavelet:
"""
Compute the wavelet transform of the given data
with sampling rate dt.
By default, the MORLET wavelet (k0=6) is used.
The wavelet basis is normalized to have total energy=1
at all scales.
Parameters
----------
data : `~numpy.ndarray`
The time series N-D array.
dt : `float`
The time step between each y values.
i.e. the sampling time.
axis: `int`
The axis number to apply wavelet, i.e. temporal axis.
* Default is 0
dj : `float` (optional)
The spacing between discrete scales.
The smaller, the better scale resolution.
* Default is 0.25
s0 : `float` (optional)
The smallest scale of the wavelet.
* Default is :math:`2 \cdot dt`.
j : `int` (optional)
The number of scales minus one.
Scales range from :math:`s0` up to :math:`s_0\cdot 2^{j\cdot dj}`, to give
a total of :math:`j+1` scales.
* Default is :math:`j=\log_2{(\\frac{n dt}{s_0 dj})}`.
mother : `str` (optional)
The mother wavelet function.
The choices are 'MORLET', 'PAUL', or 'DOG'
* Default is **'MORLET'**
param : `int` (optional)
The mother wavelet parameter.\n
For **'MORLET'** param is k0, default is **6**.\n
For **'PAUL'** param is m, default is **4**.\n
For **'DOG'** param is m, default is **2**.\n
pad : `bool` (optional)
If set True, pad time series with enough zeros to get
N up to the next higher power of 2.
This prevents wraparound from the end of the time series
to the beginning, and also speeds up the FFT's
used to do the wavelet transform.
This will not eliminate all edge effects.
Notes
-----
This function based on the IDL code WAVELET.PRO written by <NAME>,
and Python code waveletFuncitions.py written by <NAME>.
References
----------
<NAME>. and <NAME>., 1998, A Practical Guide to Wavelet Analysis,
*Bull. Amer. Meteor. Soc.*, `79, 61-78 <http://paos.colorado.edu/research/wavelets/bams_79_01_0061.pdf>`_.\n
http://paos.colorado.edu/research/wavelets/
Example
-------
>>> from fisspy.analysis import wavelet
>>> res = wavelet.wavelet(data,dt,dj=dj,j=j,mother=mother,pad=True)
>>> wavelet = res.wavelet
>>> period = res.period
>>> scale = res.scale
>>> coi = res.coi
>>> power = res.power
>>> gws = res.gws
>>> res.plot()
"""
def __init__(self, data, dt, axis=0, dj=0.1, s0=None, j=None,
mother='MORLET', param=False, pad=True):
shape0 = np.array(data.shape)
self.n0 = shape0[axis]
shape = np.delete(shape0, axis)
self.axis = axis
if not s0:
s0 = 2*dt
if not j:
j = int(np.log2(self.n0*dt/s0)/dj)
else:
j=int(j)
self.s0 = s0
self.j = j
self.dt = dt
self.dj = dj
self.mother = mother.upper()
self.param = param
self.pad = pad
self.axis = axis
self.data = data
self.ndim = data.ndim
#padding
if pad:
# power = int(np.log2(self.n0)+0.4999)
power = int(np.log2(self.n0))
self.npad = 2**(power+1)-self.n0
self.n = self.n0 + self.npad
else:
self.n = self.n0
#wavenumber
k1 = np.arange(1,self.n//2+1)*2.*np.pi/self.n/dt
k2 = -k1[:int((self.n-1)/2)][::-1]
k =
|
np.concatenate(([0.],k1,k2))
|
numpy.concatenate
|
'''
Contains the following neural pooling functions:
1. min
2. max
3. avg
Which are from
`Tang et al <https://aclanthology.coli.uni-saarland.de/papers/P14-1146/p14-1146>`_.
and the following pooling functions:
4. prod
5. std
Which are from
`Vo and Zhang <https://www.ijcai.org/Proceedings/15/Papers/194.pdf>`_.
and finally the following pooling function:
6. median
From `Bo Wang et al.
<https://aclanthology.coli.uni-saarland.de/papers/E17-1046/e17-1046>`_
All the functions are applied over the columns and not the rows e.g. matrix
of (m, n) size and apply mean it will return a vector of (1, n). Therefore by
default all of the vectors returned are row vectors but if transpose is True
then column vectors are returned.
'''
from functools import wraps
import numpy as np
def inf_nan_check(neural_func):
'''
Contains decorator function that converts any inf or NAN value to a real
number to avoid any potential problems with inf and NAN's latter on in the
processing chain.
Inf conversion - Converts it to the max (min) value of the numpy array/matrix
dtype based on it being positive (negative) value.
NAN conversion - based on the following
`post <https://stackoverflow.com/questions/25506281/what-are-all-the-possib\
le-calculations-that-could-cause-a-nan-in-python>`_ about how NAN's occur.
It converts NAN's to zeros as the majority of the operation should equal
zero or are close to zero. This is a rough approximation but it should not
affect that many numbers.
'''
@wraps(neural_func)
def func_wrapper(matrix, **kwargs):
'''
:param matrix: Numpy array/matrix that could contain NAN or inf values.
:param transpose: If to convert the column vector into row vector
:type matrix: np.ndarray
:type transpose: bool
:returns: The numpy array/matrix with NAN and inf values converted to \
real values.
:rtype: np.ndarray
'''
matrix = neural_func(matrix, **kwargs)
if not issubclass(matrix.dtype.type, np.floating):
raise TypeError('Only accept floating value word embeddings not '\
'{}'.format(matrix.dtype.type))
# Convert all NAN values to zero
if np.any(np.isnan(matrix)):
matrix[np.where(np.isnan(matrix))] = 0
# Find any value that is greater than half the min and max values and
# convert them to half the min or max value respectively. This is
# done to ensure that range can be done without overflow exception
dtype_info = np.finfo(matrix.dtype)
min_value = dtype_info.min / 2
max_value = dtype_info.max / 2
if np.any(matrix[matrix < min_value]) or np.any(matrix[matrix > max_value]):
matrix[matrix < min_value] = min_value
matrix[matrix > max_value] = max_value
return matrix
return func_wrapper
def matrix_checking(neural_func):
'''
Contains decorator function to check argument compbatbility and the
decorated functions return. The functions decorated are the neural functions
which are:
1. :py:func:`bella.neural_pooling.matrix_min`
2. :py:func:`bella.neural_pooling.matrix_max`
3. :py:func:`bella.neural_pooling.matrix_avg`
'''
@wraps(neural_func)
def func_wrapper(matrix, transpose=False):
'''
Checks the matrix is of the correct type and that the return matrix
is of the correct size after the neural_func function has been applied to
the matrix.
inf values are converted to max (min) value defined by the dtype if
the value is positive (negative).
Applies transpose to convert row vectors into column vectors if
transpose == False
:param matrix: matrix or vector
:param transpose: If to convert the column vector into row vector
:type matrix: np.ndarray
:type transpose: bool
:returns: The output of the neural_func function.
:rtype: np.ndarray
'''
# Pre check
if not isinstance(matrix, np.ndarray):
raise TypeError('The matrix has to be of type numpy.ndarray and not '\
'{}'.format(type(matrix)))
# Applying the relevant neural pooling function
reduced_matrix = neural_func(matrix)
# Post check
rm_cols = reduced_matrix.shape[0]
rm_dim = len(reduced_matrix.shape)
if rm_dim != 1:
raise ValueError('The returned matrix should be a vector and have '\
'a dimension of 1 it is: {}'.format(rm_dim))
m_columns = matrix.shape[1]
if rm_cols != m_columns:
raise ValueError('The number of columns has changed during the pooling'\
'func from {} to {}'.format(m_columns, rm_cols))
if transpose:
return reduced_matrix.reshape(rm_cols, 1)
return reduced_matrix.reshape(1, rm_cols)
return func_wrapper
@inf_nan_check
@matrix_checking
def matrix_min(matrix, **kwargs):
'''
:param matrix: matrix or vector
:param kwargs: Can keywords that are accepted by `matrix_checking` function
:type matrix: np.ndarray
:type kwargs: dict
:returns: The minimum column values in the matrix.
:rtype: np.ndarray
'''
return matrix.min(axis=0)
@inf_nan_check
@matrix_checking
def matrix_max(matrix, **kwargs):
'''
:param matrix: matrix or vector
:param kwargs: Can keywords that are accepted by `matrix_checking` function
:type matrix: np.ndarray
:type kwargs: dict
:returns: The maximum column values in the matrix.
:rtype: np.ndarray
'''
return matrix.max(axis=0)
@inf_nan_check
@matrix_checking
def matrix_avg(matrix, **kwargs):
'''
:param matrix: matrix or vector
:param kwargs: Can keywords that are accepted by `matrix_checking` function
:type matrix: np.ndarray
:type kwargs: dict
:returns: The mean column values in the matrix.
:rtype: np.ndarray
'''
return matrix.mean(axis=0)
@inf_nan_check
@matrix_checking
def matrix_median(matrix, **kwargs):
'''
:param matrix: matrix or vector
:param kwargs: Can keywords that are accepted by `matrix_checking` function
:type matrix: np.ndarray
:type kwargs: dict
:returns: The median column values in the matrix.
:rtype: np.ndarray
'''
return np.median(matrix, axis=0)
@inf_nan_check
@matrix_checking
def matrix_std(matrix, **kwargs):
'''
:param matrix: matrix or vector
:param kwargs: Can keywords that are accepted by `matrix_checking` function
:type matrix: np.ndarray
:type kwargs: dict
:returns: The standard deviation of the column values in the matrix.
:rtype: np.ndarray
'''
return
|
np.std(matrix, axis=0)
|
numpy.std
|
import numpy as np
from surpyval import nonparametric as nonp
from scipy.stats import t, norm
from .kaplan_meier import KaplanMeier
from .nelson_aalen import NelsonAalen
from .fleming_harrington import FlemingHarrington_
from scipy.interpolate import interp1d
from autograd import jacobian
import matplotlib.pyplot as plt
import pandas as pd
class NonParametric():
"""
Result of ``.fit()`` method for every non-parametric
surpyval distribution. This means that each of the
methods in this class can be called with a model created
from the ``NelsonAalen``, ``KaplanMeier``,
``FlemingHarrington``, or ``Turnbull`` estimators.
"""
def __repr__(self):
out = ('Non-Parametric SurPyval Model'
+ '\n============================='
+ '\nModel : {dist}'
).format(dist=self.model)
if 'estimator' in self.data:
out += '\nEstimator : {turnbull}'.format(
turnbull=self.data['estimator'])
return out
def sf(self, x, interp='step'):
r"""
Surival (or Reliability) function with the
non-parametric estimates from the data.
Parameters
----------
x : array like or scalar
The values of the random variables at which t
he survival function will be calculated.
Returns
-------
sf : scalar or numpy array
The value(s) of the survival function at each x
Examples
--------
>>> from surpyval import NelsonAalen
>>> x = np.array([1, 2, 3, 4, 5])
>>> model = NelsonAalen.fit(x)
>>> model.sf(2)
array([0.63762815])
>>> model.sf([1., 1.5, 2., 2.5])
array([0.81873075, 0.81873075, 0.63762815, 0.63762815])
"""
x = np.atleast_1d(x)
idx = np.argsort(x)
rev = np.argsort(idx)
x = x[idx]
if interp == 'step':
idx = np.searchsorted(self.x, x, side='right') - 1
R = self.R[idx]
R = np.where(idx < 0, 1, R)
R = np.where(np.isposinf(x), 0, R)
else:
R = np.hstack([[1], self.R])
x_data = np.hstack([[0], self.x])
# R = np.interp(x, x_data, R)
R = interp1d(x_data, R, kind=interp)(x)
R[np.where(x > self.x.max())] = np.nan
return R[rev]
def ff(self, x, interp='step'):
r"""
CDF (failure or unreliability) function with the
non-parametric estimates from the data
Parameters
----------
x : array like or scalar
The values of the random variables at which
the survival function will be calculated.
Returns
-------
ff : scalar or numpy array
The value(s) of the failure function at each x
Examples
--------
>>> from surpyval import NelsonAalen
>>> x = np.array([1, 2, 3, 4, 5])
>>> model = NelsonAalen.fit(x)
>>> model.ff(2)
array([0.36237185])
>>> model.ff([1., 1.5, 2., 2.5])
array([0.18126925, 0.18126925, 0.36237185, 0.36237185])
"""
return 1 - self.sf(x, interp=interp)
def hf(self, x, interp='step'):
r"""
Instantaneous hazard function with the non-parametric
estimates from the data. This is calculated using simply
the difference between consecutive H(x).
Parameters
----------
x : array like or scalar
The values of the random variables at which
the survival function will be calculated
Returns
-------
hf : scalar or numpy array
The value(s) of the failure function at each x
Examples
--------
>>> from surpyval import NelsonAalen
>>> x = np.array([1, 2, 3, 4, 5])
>>> model = NelsonAalen.fit(x)
>>> model.ff(2)
array([0.36237185])
>>> model.ff([1., 1.5, 2., 2.5])
array([0.18126925, 0.18126925, 0.36237185, 0.36237185])
"""
idx = np.argsort(x)
rev = np.argsort(idx)
x = x[idx]
hf = np.diff(np.hstack([self.Hf(x[0], interp=interp),
self.Hf(x, interp=interp)]))
hf[0] = hf[1]
hf = pd.Series(hf)
hf[hf == 0] = np.nan
hf = hf.ffill().values
return hf[rev]
def df(self, x, interp='step'):
r"""
Density function with the non-parametric estimates
from the data. This is calculated using the relationship
between the hazard function and the density:
.. math::
f(x) = h(x)e^{-H(x)}
Parameters
----------
x : array like or scalar
The values of the random variables at which the
survival function will be calculated
Returns
-------
df : scalar or numpy array
The value(s) of the density function at x
Examples
--------
>>> from surpyval import NelsonAalen
>>> x = np.array([1, 2, 3, 4, 5])
>>> model = NelsonAalen.fit(x)
>>> model.df(2)
array([0.28693267])
>>> model.df([1., 1.5, 2., 2.5])
array([0.16374615, 0. , 0.15940704, 0. ])
"""
return self.hf(x, interp=interp) * np.exp(-self.Hf(x, interp=interp))
def Hf(self, x, interp='step'):
r"""
Cumulative hazard rate with the non-parametric estimates
from the data. This is calculated using the relationship
between the hazard function and the density:
.. math::
H(x) = -\ln (R(x))
Parameters
----------
x : array like or scalar
The values of the random variables at which the
function will be calculated.
Returns
-------
Hf : scalar or numpy array
The value(s) of the density function at x
Examples
--------
>>> from surpyval import NelsonAalen
>>> x = np.array([1, 2, 3, 4, 5])
>>> model = NelsonAalen.fit(x)
>>> model.Hf(2)
array([0.45])
>>> model.df([1., 1.5, 2., 2.5])
model.Hf([1., 1.5, 2., 2.5])
"""
return -np.log(self.sf(x, interp=interp))
def cb(self, x, on='sf', bound='two-sided', interp='step',
alpha_ci=0.05, bound_type='exp', dist='z'):
r"""
Confidence bounds of the ``on`` function at the
``alpa_ci`` level of significance. Can be the upper,
lower, or two-sided confidence by changing value of ``bound``.
Can change the bound type to be regular or exponential
using either the 't' or 'z' statistic.
Parameters
----------
x : array like or scalar
The values of the random variables at which the confidence bounds
will be calculated
on : ('sf', 'ff', 'Hf'), optional
The function on which the confidence bound will be calculated.
bound : ('two-sided', 'upper', 'lower'), str, optional
Compute either the two-sided, upper or lower confidence bound(s).
Defaults to two-sided.
interp : ('step', 'linear', 'cubic'), optional
How to interpolate the values between observations. Survival
statistics traditionally uses step functions, but can use
interpolated values if desired. Defaults to step.
alpha_ci : scalar, optional
The level of significance at which the bound will be computed.
bound_type : ('exp', 'regular'), str, optional
The method with which the bounds will be calculated. Using regular
will allow for the bounds to exceed 1 or be less than 0. Defaults
to exp as this ensures the bounds are within 0 and 1.
dist : ('t', 'z'), str, optional
The statistic to use in calculating the bounds (student-t or
normal). Defaults to the normal (z).
Returns
-------
cb : scalar or numpy array
The value(s) of the upper, lower, or both confidence bound(s) of
the selected function at x
Examples
--------
>>> from surpyval import NelsonAalen
>>> x = np.array([1, 2, 3, 4, 5])
>>> model = NelsonAalen.fit(x)
>>> model.cb([1., 1.5, 2., 2.5], bound='lower', dist='t')
array([0.11434813, 0.11434813, 0.04794404, 0.04794404])
>>> model.cb([1., 1.5, 2., 2.5])
array([[0.97789387, 0.16706394],
[0.97789387, 0.16706394],
[0.91235117, 0.10996882],
[0.91235117, 0.10996882]])
References
----------
http://reliawiki.org/index.php/Non-Parametric_Life_Data_Analysis
"""
if on in ['df', 'hf']:
raise ValueError("NonParametric cannot do confidence bounds on "
+ "density or hazard rate functions. Try Hf, "
+ "ff, or sf")
old_err_state = np.seterr(all='ignore')
cb = self.R_cb(x,
bound=bound,
interp=interp,
alpha_ci=alpha_ci,
bound_type=bound_type,
dist=dist
)
if (on == 'ff') or (on == 'F'):
cb = 1. - cb
elif on == 'Hf':
cb = -np.log(cb)
np.seterr(**old_err_state)
return cb
def R_cb(self, x, bound='two-sided', interp='step', alpha_ci=0.05,
bound_type='exp', dist='z'):
if bound_type not in ['exp', 'normal']:
return ValueError("'bound_type' must be in ['exp', 'normal']")
if dist not in ['t', 'z']:
return ValueError("'dist' must be in ['t', 'z']")
confidence = 1. - alpha_ci
old_err_state = np.seterr(all='ignore')
x = np.atleast_1d(x)
if bound in ['upper', 'lower']:
if dist == 't':
stat = t.ppf(1 - confidence, self.r - 1)
else:
stat = norm.ppf(1 - confidence, 0, 1)
if bound == 'upper':
stat = -stat
elif bound == 'two-sided':
if dist == 't':
stat = t.ppf((1 - confidence) / 2, self.r - 1)
else:
stat = norm.ppf((1 - confidence) / 2, 0, 1)
stat = np.array([-1, 1]).reshape(2, 1) * stat
if bound_type == 'exp':
# Exponential Greenwood confidence
R_out = self.greenwood * 1. / (np.log(self.R)**2)
R_out = np.log(-
|
np.log(self.R)
|
numpy.log
|
import os, sys, pdb, pickle
from profilehooks import profile
import time, math, random
import numpy as np
import scipy as sp
from scipy.spatial.distance import cosine
from lr.sks import SKS
from lr.utils import *
def im2col(X, kernel, strides=(1,1), padding=(0,0)):
'''
Views X as the matrix-version of a strded image.
https://stackoverflow.com/questions/30109068/implement-matlabs-im2col-sliding-in-python
@arg X: Input batch of images B x Hi x Wi x Fi.
@arg kernel: Tuple of kernel dimensions kR x kC.
@arg strides: Tuple of strides sR x sC.
@arg padding: Tuple of paddings pR x pC.
@return: X viewed in matrix form B x Ho x Wo x (kH*kW*Fi).
'''
kR, kC = kernel
sR, sC = strides
pR, pC = padding
B, Hi, Wi, Fi = X.shape
sB, sH, sW, sF = X.strides
Ho = int((Hi + 2*pR - kR)/sR) + 1
Wo = int((Wi + 2*pC - kC)/sC) + 1
out_shape = B, Ho, Wo, kR, kC, Fi
out_strides = sB, sR*sH, sC*sW, sH, sW, sF
Xpad = np.pad(X, ((0,0),(pR,pR),(pC,pC),(0,0)), mode='constant')
Xcol = np.lib.stride_tricks.as_strided(Xpad, shape=out_shape, strides=out_strides)
Xcol = Xcol.reshape(B, Ho, Wo, kR * kC * Fi)
return Xcol
class MaxNorm():
def __init__(self, beta=0.999, eps=1e-4):
self.step = 0
self.beta = beta
self.eps = eps
self.x_max = eps
def __call__(self, x):
self.step += 1
x_max = np.max(np.abs(x)) + self.eps
self.x_max = self.beta * self.x_max + (1 - self.beta) * x_max
x_max_tilde = self.x_max / (1 - self.beta**self.step)
x_normed = x / max(x_max, x_max_tilde)
return x_normed
class Module():
def __init__(self, *args, name=None, **kwargs):
self.name = name or self.__class__.__name__
self.mode = {'train':True, 'quant':False, 'qcal':False}
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def recursive_apply(self, fn, *args, **kwargs):
for modn, mod in self.__dict__.items():
if isinstance(mod, Module):
getattr(mod, fn)(*args, **kwargs)
def set_path(self, parent):
self.path = parent
for modn, mod in self.__dict__.items():
if isinstance(mod, Module):
mod.set_path(parent + '/' + modn)
def forward(self, X):
return X
def backward(self, Grad):
return Grad
def update(self, lr):
if hasattr(self, 'my_update'):
self.my_update(lr)
self.recursive_apply('update', lr)
def get_all(self, class_, path_):
insts = []
if isinstance(self, class_):
insts.append((path_, self))
for modn, mod in self.__dict__.items():
if isinstance(mod, Module):
insts += mod.get_all(class_, path_ + '/' + modn)
return insts
def set_mode(self, **kwargs):
self.mode.update(kwargs)
self.recursive_apply('set_mode', **kwargs)
class FixedQuantize(Module):
def __init__(self, bits, clip=1.0):
super(FixedQuantize, self).__init__()
self.signed = bits < 0
self.bits = abs(bits)
self.midrise = 0.5 if self.bits <= 2 else 0
self.n = -2**(self.bits-1) if self.signed else 0
self.p = 2**(self.bits-1) - 1 if self.signed else 2**(self.bits) - 1
self.s = 2**(np.ceil(np.log2(clip) - 1e-8) + self.signed - self.bits)
self.Aq = {}
self.step = 0
self.rel_error = 0
def forward(self, X, fid=0):
if not self.mode['quant']: return X
Af = X / self.s
Aq = np.round(Af - self.midrise)
Q = self.s * (np.clip(Aq, self.n, self.p) + self.midrise)
if self.step % 100 == 0:
relE = np.sum((Af - Aq)**2) / Af.size / (np.std(Af) + 1e-6)
self.rel_error = 0.9 * self.rel_error + 0.1 * relE
self.Aq[fid] = Aq
return Q
def backward(self, Grad, fid=0):
if not self.mode['quant']: return Grad
Aq = self.Aq[fid]
return Grad * ((Aq >= self.n) & (Aq <= self.p))
def my_update(self, lr):
self.step += 1
class ReLU(Module):
def __init__(self, qbits):
super(ReLU, self).__init__()
self.qa = FixedQuantize(qbits['a'], clip=qbits['amax'])
self.qg = FixedQuantize(qbits['g'], clip=qbits['gmax'])
def forward(self, X):
self.A = self.qa(np.maximum(X, 0))
return self.A
def backward(self, Grad):
Grad = self.qa.backward(Grad)
Grad *= (self.A > 0)
return self.qg(Grad)
class SoftMaxCrossEntropyLoss(Module):
def __init__(self, qbits):
super(SoftMaxCrossEntropyLoss, self).__init__()
self.qg = FixedQuantize(qbits['g'], clip=qbits['gmax'])
self.eps = np.exp(-100).astype(dt)
def forward(self, X, Yt):
self.batch_size = X.shape[0]
self.Yt = Yt.astype(dt)
self.X = X
exp = np.exp(X - np.max(X, 1, keepdims=True)) + self.eps
self.Yp = exp / np.sum(exp, 1, keepdims=True)
self.L = -np.sum(self.Yt * np.log(self.Yp)) / self.batch_size
return self.L
def backward(self, Grad=1.0):
Grad = self.qg((Grad * (self.Yp - self.Yt)).astype(dt))
return Grad
class MaxPool2D(Module):
def __init__(self, kernel):
super(MaxPool2D, self).__init__()
self.kernel_size = (kernel, kernel)
def forward(self, X):
self.A = X
Xcol = im2col(X, kernel=self.kernel_size, strides=self.kernel_size)
Xcol = Xcol.reshape(Xcol.shape[:3] + (self.kernel_size[0] * self.kernel_size[1], X.shape[-1]))
max_pos = np.argmax(Xcol, axis=3)
self.idx = list(np.ogrid[[slice(Xcol.shape[ax]) for ax in range(Xcol.ndim) if ax != 3]])
self.idx.insert(3, max_pos)
self.idx = tuple(self.idx)
Z = Xcol[self.idx]
return Z
def backward(self, Grad):
dZ = np.zeros(Grad.shape[:3] + (self.kernel_size[0] * self.kernel_size[1], Grad.shape[-1]))
dZ[self.idx] = Grad
dZ = dZ.reshape(Grad.shape[:3] + self.kernel_size + (Grad.shape[-1],))
dZ = np.transpose(dZ, (0,1,3,2,4,5))
dZ = dZ.reshape(self.A.shape)
return dZ
class Dropout(Module):
def __init__(self, keep_prob, qbits):
super(Dropout, self).__init__()
self.qa = FixedQuantize(qbits['a'], clip=qbits['amax'])
self.p = keep_prob
def forward(self, X):
if self.mode['train']:
self.mask = np.random.binomial(2, self.p, X.shape).astype(dt)
X *= self.mask / self.p
X = self.qa(X)
return X
def backward(self, Grad):
if self.mode['train']:
Grad = self.qa.backward(Grad)
Grad *= self.mask / self.p
return Grad
class StreamBatchNorm(Module):
def __init__(self, channels, conf, update_every_ba=10, update_every_mv=100):
super(StreamBatchNorm, self).__init__()
self.conf = conf
qbits = conf.qbits
self.qgamma = FixedQuantize(qbits['b'], clip=qbits['bmax'])
self.qbeta = FixedQuantize(qbits['b'], clip=qbits['bmax'])
self.qmean = FixedQuantize(qbits['b'], clip=qbits['bmax'])
self.qmsq = FixedQuantize(qbits['b'], clip=qbits['bmax']**2)
self.qa = FixedQuantize(-abs(qbits['a']), clip=qbits['amax'])
self.qg = FixedQuantize(qbits['g'], clip=qbits['gmax'])
self.channels = channels
self.update_every = update_every_ba
self.mom_ba = np.float32(1.0 - 1.0 / update_every_ba)
self.mom_mv = np.float32(1.0 - 1.0 / (update_every_mv/update_every_ba))
self.eps = np.float32(1e-8)
self.step = 0
self.mean_ba = np.zeros((1,channels), dtype=dt)
self.msq_ba = np.zeros((1,channels), dtype=dt)
self.mu_ba = np.zeros((1,channels), dtype=dt)
self.std_ba = np.zeros((1,channels), dtype=dt)
self.mean_mv = np.zeros((1,channels), dtype=dt)
self.var_mv = np.ones((1,channels), dtype=dt)
self.beta = np.zeros((1,channels), dtype=dt)
self.gamma =
|
np.ones((1,channels), dtype=dt)
|
numpy.ones
|
from pickle import load
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from numpy import array, argmax
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r')
# read all text
text = file.read()
# close the file
file.close()
return text
# load a pre-defined list of photo identifiers
def load_set(filename):
doc = load_doc(filename)
dataset = list()
# process line by line
for line in doc.split('\n'):
# skip empty lines
if len(line) < 1:
continue
# get the image identifier
identifier = line.split('.')[0]
dataset.append(identifier)
return set(dataset)
# load clean descriptions into memory
def load_clean_descriptions(filename, dataset):
# load document
doc = load_doc(filename)
descriptions = dict()
for line in doc.split('\n'):
# split line by white space
tokens = line.split()
# split id from description
image_id, image_desc = tokens[0], tokens[1:]
# skip images not in the set
if image_id in dataset:
# create list
if image_id not in descriptions:
descriptions[image_id] = list()
# wrap description in tokens
desc = 'startseq ' + ' '.join(image_desc) + ' endseq'
# store
descriptions[image_id].append(desc)
return descriptions
# load photo features
def load_photo_features(filename, dataset):
# load all features
all_features = load(open(filename, 'rb'))
# filter features
features = {k: all_features[k] for k in dataset}
return features
# covert a dictionary of clean descriptions to a list of descriptions
def to_lines(descriptions):
all_desc = list()
for key in descriptions.keys():
[all_desc.append(d) for d in descriptions[key]]
return all_desc
# fit a tokenizer given caption descriptions
def create_tokenizer(descriptions):
lines = to_lines(descriptions)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# calculate the length of the description with the most words
def max_length(descriptions):
lines = to_lines(descriptions)
return max(len(d.split()) for d in lines)
# create sequences of images, input sequences and output words for an image
def create_sequences(tokenizer, max_len, desc_list, photo, vocab_size):
x1, x2, y = list(), list(), list()
# walk through each description for the image
for desc in desc_list:
# encode the sequence
seq = tokenizer.texts_to_sequences([desc])[0]
# split one sequence into multiple X,y pairs
for i in range(1, len(seq)):
# split into input and output pair
in_seq, out_seq = seq[:i], seq[i]
# pad input sequence
in_seq = pad_sequences([in_seq], maxlen=max_len)[0]
# encode output sequence
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# store
x1.append(photo)
x2.append(in_seq)
y.append(out_seq)
return
|
array(x1)
|
numpy.array
|
import numpy as np
import pandas as pd
import scipy.integrate as intg
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colors import LogNorm
import scipy.ndimage.interpolation as interpol
import scipy.spatial as sp
import decimal
import tecplot as tp
from tecplot.exception import *
from tecplot.constant import *
import os
rsun = 6.957e10 # cm
def generateinterpolatedGrid(layfile, points, coords, variables):
"""
Function to create and save an interpolated tecplot simulation grid for radio emission calculation
This will only work with Tecplot 360 installed on your system.
:param layfile: Tecplot .lay file to be interpolated
:param points: Number of points in each spatial dimension
:param coords: Size of the grid in Rstar
:param variables: The number tecplot assigned to each variable for interpolation and output
:return:
"""
cwd = os.getcwd()
tp.load_layout(layfile)
frame1 = tp.active_frame()
cur_dataset = frame1.dataset
zone1 = cur_dataset.zone(0) # zone1 is what tecplot uses for plotting in the layfile
tp.macro.execute_command('''$!CreateRectangularZone
IMax = {0:}
JMax = {0:}
KMax = {0:}
X1 = -{1:}
Y1 = -{1:}
Z1 = -{1:}
X2 = {1:}
Y2 = {1:}
Z2 = {1:}
XVar = 1
YVar = 2
ZVar = 3'''.format(points, coords))
zone2 = cur_dataset.zone(1) # name second zone for interpolation
tp.data.operate.interpolate_linear(zone2, source_zones=zone1, variables=variables)
# create second zone and fill with variables
tp.data.save_tecplot_ascii(cwd + '/interpol_grid_{0:}Rstar_{1:}points.dat'.format(coords, points),
zones=[zone2],
variables=[0, 1, 2] + variables,
include_text=False,
precision=9,
include_geom=False,
use_point_format=True)
return
def integrationConstant(rstar):
"""
Function to set the integration constant, based off the stellar radius.
:param rstar: the radius of the star in units of rsun
:return: integration constant, int_c
"""
int_c = rstar * rsun
return int_c
def testData(points, gridsize, n0, T0, gam, ordered=True):
"""
Function to produce a grid of sample values of density and temperature.
Either ordered which follows a n ~ R^{-3} profile, or not ordered which has a more randomised distribution.
:param points: Number of gridpoints in each dimension
:param gridsize: The size of the grid radius in rstar
:param n0: base density of the stellar wind
:param T0: base temperature of the stellar wind
:param gam: polytopic index of the wind to derive temperature from density
:param ordered: either cause density to fall off with R^{-3} or be more randomised with a R^{-3} component
:return: ds, n, T. ds is the spacing in the grid used for integration. n is the grid density (shape points^3). T is the grid temperature (shape points^3).
"""
if ordered==True:
o = np.array([int(points / 2), int(points / 2), int(points / 2)])
x = np.linspace(0, points - 1, points)
y = np.linspace(0, points - 1, points)
z = np.linspace(0, points - 1, points)
X, Y, Z = np.meshgrid(x, y, z)
ds = np.linspace(-gridsize,gridsize,points)
d = np.vstack((X.ravel(), Y.ravel(), Z.ravel())).T
sph_dist = sp.distance.cdist(d, o.reshape(1, -1)).ravel()
sph_dist = sph_dist.reshape(points, points, points) /(points/2/gridsize)
sph_dist[int(points/2), int(points/2), int(points/2)] = 1e-40
n = n0 * (sph_dist ** -3)
n[int(points / 2), int(points / 2), int(points / 2)] = 0
# this is getting rid of the centre inf, doesn't matter as it is at centre and is removed anyway!
T = T0 * (n / n0) ** gam
return ds, n, T
else:
o = np.array([int(points / 2), int(points / 2), int(points / 2)])
x = np.linspace(0, points - 1, points)
y = np.linspace(0, points - 1, points)
z = np.linspace(0, points - 1, points)
X, Y, Z = np.meshgrid(x, y, z)
ds = np.linspace(-gridsize, gridsize, points)
d = np.vstack((X.ravel(), Y.ravel(), Z.ravel())).T
sph_dist = sp.distance.cdist(d, o.reshape(1, -1)).ravel()
sph_dist = sph_dist.reshape(points, points, points) / (points / 2 / gridsize)
sph_dist[int(points/2), int(points/2), int(points/2)] = 1e-20
#make random array of data
rand_n = n0*np.random.rand(points,points,points)
n = rand_n * (sph_dist ** -3) #give it a resemblence of falling off with distance
n[n>1e8] = n[n>1e8]+2e8 #cause some change to increase centre contrast in density!
n[int(points / 2), int(points / 2), int(points / 2)] = 0
# this is getting rid of the centre inf, doesn't matter as it is at centre and is removed anyway!
T = T0 * (n / n0) ** gam
return ds, n, T
def readData(filename, skiprows, points):
"""
This function expects an interpolated grid of data. Originally interpolated using the tecplot software.
Not tested yet but I am sure VisIT interpolated produced a similar output and can also be used.
Maybe include grid interpolation function in future.
:param filename: Name of the data file to read from
:param skiprows: Number of rows to skip (according to the pandas read_csv() function.
:param points: Number of gridpoints in each dimension
:return: ds, n, T. Grid spacing, grid density and grid temperature.
"""
df = pd.read_csv(filename, header=None, skiprows=skiprows, sep='\s+')
X = df[0].values.reshape((points, points, points))
ds = X[0, 0, :]
n_grid = (df[3].values.reshape((points, points, points))) / (1.673e-24 * 0.5)
T_grid = df[4].values.reshape((points, points, points))
return ds, n_grid, T_grid
def rotateGrid(n, T, degrees, axis='z'):
"""
Function that rotates the grid so that the emission can be calculated from any angle.
:param n: grid densities
:param T: grid temperatures
:param degrees: number of degrees for grid to rotate. Can be negative or positive, will rotate opposite directions
:param axis: This keyword sets the axis to rotate around. Default is z. A z axis rotation will rotate the grid "left/right". An x-axis rotation would rotate the grid "forwards/backwards" and should be used to set inclination of star.
:return: n and T, rotated!
"""
# The z axis rotates the grid around the vertical axis (used for rotation modulation of a star for example)
if axis == 'z':
n_rot = interpol.rotate(n, degrees, axes=(1, 2), reshape=False)
T_rot = interpol.rotate(T, degrees, axes=(1, 2), reshape=False)
return n_rot, T_rot
# The x axis rotates the grid around the horizontal axis (used for tilting for stellar inclinations)
if axis == 'x':
n_rot = interpol.rotate(n, degrees, axes=(0, 2), reshape=False)
T_rot = interpol.rotate(T, degrees, axes=(0, 2), reshape=False)
return n_rot, T_rot
# The following is only included for completeness, you should never need to rotate around this axis!!!
if axis == 'y':
n_rot = interpol.rotate(n, degrees, axes=(0, 1), reshape=False)
T_rot = interpol.rotate(T, degrees, axes=(0, 1), reshape=False)
return n_rot, T_rot
else:
print("axis is type: ", type(axis))
raise ValueError("Axis is the wrong type")
pass
def emptyBack(n, gridsize, points):
"""
Function that sets the density within and behind the star to zero (or very close to zero).
:param n: grid densities
:param gridsize: size of grid radius in rstar
:param points: number of gridpoints in each dimension
:return: n, the original grid of densities with the necessary densities removed
"""
# First block of code removes the densities from the sphere in the centre
points = int(points)
c = points / 2 # origin of star in grid
o = np.array([c, c, c]) # turn into 3d vector origin
rad = points / (gridsize * 2) # radius of star in indices
x1 = np.linspace(0, points - 1, points) # indices array, identical to y1 and z1
y1 = x1
z1 = x1
X, Y, Z = np.meshgrid(x1, y1, z1) # make 3d meshgrid
d = np.vstack((X.ravel(), Y.ravel(), Z.ravel())).T # a 2d array of all of all the coordinates in the grid
sph_dist = sp.distance.cdist(d, o.reshape(1, -1)).ravel() # the distance of each coordinate from the origin
p_sphere = d[sph_dist < rad] # the indices that exist inside the star at the centre
p_sphere = p_sphere.astype(int, copy=False) # change index values to integers
for i in p_sphere:
n[i[0], i[1], i[2]] = 1e-40
# Now remove the cyclinder behind the star which is out of sight.
o2 = o[:2] # the 2d centre of the xz plane
d2 = np.vstack((X.ravel(), Y.ravel())).T # converting the indices into a 1d array of points
circ_dist = sp.distance.cdist(d2,
o2.reshape(1, -1)).ravel() # find the distance of all points in d2 from the origin o2
p_circ = d2[circ_dist < rad] # find the indices of points inside the circle
p_circ = p_circ.astype(int, copy=False)
for i in range(int(
points / 2)): # iterate over the xz planes moving back from centre of the grid (points/2) to the back (points)
for j in p_circ:
n[int(j[0]), int(i+c), int(j[1])] = 1e-40
#n[int(j[0]), int(j[1]), int(i + c)] = 1e-40
return n
def absorptionBody(n, T, f):
"""
Function that calculates the absorption coefficients and blackbody emission value
for each cell in the interpolated tecplot grid
:param n: density of cell
:param T: temperature of cell
:param f: observing frequency
:return: alpha_v, B(v,T) : absorption coefficients and the blackbody of each cell
"""
gaunt = get_gaunt(T, f)
kb = 1.38e-16
h = 6.62607e-27
c = 2.998e10
absorption_c = 3.692e8 * (1.0 - np.exp(-(h * f) / (kb * T))) * ((n**2 / 4.)) * (T ** -0.5) * (f ** -3.0) * gaunt
bb = ((2.0 * h * (f ** 3.0)) / (c ** 2.0)) * (1.0 / (np.exp((h * f) / (kb * T)) - 1.0))
absorption_c[np.isnan(absorption_c)] = 1e-40
absorption_c[np.isinf(absorption_c)] = 1e-40
return absorption_c, bb
def get_gaunt(T, f):
"""
Function that simply returns grid of values of gaunt factors from temperatures and frequencies
Note: Assumes that Z (ionic charge) is +1.
:param T: grid of temperatures in Kelvin
:param f: observational frequency
:return: grid of gaunt factors the same shape as T
"""
gaunt = 10.6 + (1.90 * np.log10(T)) - (1.26 * np.log10(f))
"""
This assumption for gaunt factor is only applicable to temperatures > 10,000 K, according to Saha equation
I think including the R.M.S of the ionisation fraction as a function of T would fix this
Aline says this is a quicker fix, although less exact
Most emission should occur in the more ionised regions anyway!
"""
if np.any(T < 1.0e4):
print("\nCold wind assumption!!...\n")
print("\n... Assuming the gaunt factor goes to 1 in the case of T < 10,000 K\n")
print("Setting gaunt = 1 if T < 1.0e4\n")
gaunt[T < 1.0e4] = 1.0
return gaunt
def opticalDepth(ds, ab, int_c):
"""
Calculates the optical depth of material given the integration grid and the absorption coefficients.
:param ds: The regular spacing of the interpolated grid (integration distances , ds)
:param ab: grid of absorption coefficients calculated from absorptionBody()
:param int_c: integration constant calculated from integrationConstant()
:return: array of cumulative optical depth (tau)
"""
tau = (intg.cumtrapz(ab, x=ds, initial=0, axis=1)) * int_c
#note that axis=1 is here to ensure integration occurs along y axis.
#Python denotes the arrays as [z,y,x] in Tecplot axes terms.(x and z axes notation are swapped)
return tau
def intensity(ab, bb, tau, ds, int_c):
"""
Name : intensity()
Function : Calculates the intensity of emission given the blackbody emission from each grid cell and the optical depth at each cell
Note : Not sure whether to take the last 2d grid of cells (i.e. - Iv[:,:,-1])
or sum up each column given the bb and tau (i.e. - np.sum(Iv, axis=2).
"""
I = intg.simps((bb * np.exp(-tau)) * ab, x=ds, axis=1) * int_c
return I
def flux_density(I, ds, d, int_c):
"""
Name : flux_density()
Function : Calculates the flux density given a certain intensity and distance (pc)
"""
d *= 3.085678e18 # change d from pc to cm
# flux here given in Jy
Sv = 1e23 * (int_c ** 2.0) * (intg.simps(intg.simps(I, x=ds), x=ds)) / d ** 2.0
return Sv
def get_Rv(contour, points, gridsize):
"""
Function to get the coordinates of a contour.
Input:
contour : The contour object plotted on image
points : number of grid points in image
gridsize : Size of grid in Rstar
Returns:
Rv - Size of radius of emission in Rstar
"""
path = contour.collections[0].get_paths()
path = path[0]
verts = path.vertices
x, y = verts[:, 0], verts[:, 1]
x1, y1 = x - points / 2, y - points / 2
r = np.sqrt(x1 ** 2 + y1 ** 2)
Rv = gridsize * (max(r) / (points / 2.0))
return Rv
def double_plot(I, tau, f_i, points, gridsize):
"""
Plot two images beside each other
"""
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
p = ax1.imshow(I, interpolation='bilinear', origin='lower', norm=LogNorm(vmin=1e-20, vmax=1e-12), cmap=cm.Greens)
fig.suptitle(r'$\nu_{{\rm ob}}$ = {0:.2f} Hz'.format(f_i), bbox=dict(fc="w", ec="C3", boxstyle="round"))
circ1 = plt.Circle((points / 2, points / 2), (points / (2 * gridsize)), color='white', fill=True, alpha=0.4)
ax1.add_artist(circ1)
div1 = make_axes_locatable(ax1)
cax1 = div1.append_axes("right", size="8%", pad=0.1)
cbar1 = plt.colorbar(p, cax=cax1)
cbar1.set_label(r'I$_{\nu}$ (erg/s/cm$^2$/sr/Hz)', fontsize=16)
p2 = ax2.imshow(tau[:, -1, :], interpolation='bilinear', origin='lower', norm=LogNorm(vmin=1e-8, vmax=0.399), cmap=cm.Oranges)
circ2 = plt.Circle(((points) / 2, (points) / 2), (points / (2 * gridsize)), color='white', fill=True, alpha=0.4)
ax2.add_artist(circ2)
cset1 = ax2.contour(tau[:, -1, :], [0.399], colors='k', origin='lower', linestyles='dashed')
Rv_PF = (get_Rv(cset1, points, gridsize))
div2 = make_axes_locatable(ax2)
cax2 = div2.append_axes("right", size="8%", pad=0.1)
cbar2 = plt.colorbar(p2, cax=cax2)
cbar2.set_label(r'$\tau_{\nu}$', fontsize=16)
plt.tight_layout()
ax1.set_xticks(np.linspace(0, 200, 5))
ax2.set_xticks(np.linspace(0, 200, 5))
ax1.set_yticks(np.linspace(0, 200, 5))
ax2.set_yticks(np.linspace(0, 200, 5))
ax1.set_xticklabels(['-10', '-5', '0', '5', '10'], fontsize=12)
ax1.set_yticklabels(['-10', '-5', '0', '5', '10'], fontsize=12)
ax2.set_xticklabels(['-10', '-5', '0', '5', '10'], fontsize=12)
ax2.set_yticklabels(['-10', '-5', '0', '5', '10'], fontsize=12)
ax1.set_xlim([25, 175])
ax2.set_xlim([25, 175])
ax1.set_ylim([25, 175])
ax2.set_ylim([25, 175])
ax1.set_ylabel(r'R$_{\star}$', fontsize=16)
ax1.set_xlabel(r'R$_{\star}$', fontsize=16)
ax2.set_ylabel(r'R$_{\star}$', fontsize=16)
ax2.set_xlabel(r'R$_{\star}$', fontsize=16)
ax1.grid(which='major', linestyle=':', alpha=0.8)
ax2.grid(which='major', linestyle=':', alpha=0.8)
plt.show()
#plt.close()
return Rv_PF
def spectrumCalculate(folder, freqs, ds, n_i, T_i, d, points, gridsize, int_c, plotting=False):
"""
Inputs : folder name, range of frequencies, position coordinate, density, temperature
Function : Calculates flux density (Sv) and radius of emission (Rv) for a range of frequencies
"""
Svs = []
Rvs = []
taus = []
for i, j in enumerate(freqs):
ab, bb = absorptionBody(n_i, T_i, j)
tau = opticalDepth(ds, ab, int_c)
taus.append(
|
np.mean(tau)
|
numpy.mean
|
from __future__ import absolute_import, division, print_function
from java import constructor, method, static_proxy, jint, jarray, jdouble, jboolean, jclass
from java.lang import String
from scipy.signal import butter, lfilter
from sklearn.decomposition import FastICA
import numpy as np
import scipy
class NpScipy(static_proxy()):
@constructor([])
def __init__(self):
super(NpScipy, self).__init__()
'''In PulseRateAlgorithm:
ica = FastICA(whiten=False)
window = (window - np.mean(window, axis=0)) / \
np.std(window, axis=0) # signal normalization
# S = np.c_[array[cutlow:], array_1[cutlow:], array_2[cutlow:]]
# S /= S.std(axis=0)
# ica = FastICA(n_components=3)
# print(np.isnan(window).any())
# print(np.isinf(window).any())
# ica = FastICA()
window = np.reshape(window, (150, 1))
S = ica.fit_transform(window) # ICA Part
...
...
detrend = scipy.signal.detrend(S)'''
@method(jarray(jarray(jdouble)), [jarray(jdouble), jboolean])
def get_detrend(self, window, dummyBoolean):
ica = FastICA(whiten=False)
window = np.asarray(window)
window = (window - np.mean(window, axis=0)) / \
np.std(window, axis=0) # signal normalization
window = np.reshape(window, (150, 1))#NOTE: it was (150, 1)
S = ica.fit_transform(window) # ICA Part
detrend = scipy.signal.detrend(S)
return detrend.tolist()
'''In PulseRateAlgorithm:
y = butter_bandpass_filter(
detrend, lowcut, highcut, fs, order=4)'''
@method(jarray(jarray(jdouble)), [jarray(jarray(jdouble)), jdouble, jdouble, jdouble, jint])
def butter_bandpass_filter(self, data, lowcut, highcut, fs, order):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
y = lfilter(b, a, data)
return y
'''In PulseRateAlgorithm:
powerSpec = np.abs(np.fft.fft(y, axis=0)) ** 2'''
@method(jarray(jarray(jdouble)), [jarray(jarray(jdouble)), jboolean])
def get_powerSpec(self, y, dummyBoolean):
return (np.abs(np.fft.fft(y, axis=0)) ** 2).tolist()
'''In PulseRateAlgorithm:
freqs = np.fft.fftfreq(150, 1.0 / 30)'''
@method(jarray(jdouble), [jint, jdouble])
def fftfreq(self,a, b):
return
|
np.fft.fftfreq(a, b)
|
numpy.fft.fftfreq
|
"""
Main script for semantic experiments
Author: <NAME> (github/VSainteuf)
License: MIT
"""
import argparse
import json
import os
import pickle as pkl
import pprint
import time
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data as data
import torchnet as tnt
from src import utils, model_utils
from src.dataset import PASTIS_Dataset
from src.learning.metrics import confusion_matrix_analysis
from src.learning.miou import IoU
from src.learning.weight_init import weight_init
parser = argparse.ArgumentParser()
# Model parameters
parser.add_argument(
"--model",
default="utae",
type=str,
help="Type of architecture to use. Can be one of: (utae/unet3d/fpn/convlstm/convgru/uconvlstm/buconvlstm)",
)
## U-TAE Hyperparameters
parser.add_argument("--encoder_widths", default="[64,64,64,128]", type=str)
parser.add_argument("--decoder_widths", default="[32,32,64,128]", type=str)
parser.add_argument("--out_conv", default="[32, 20]")
parser.add_argument("--str_conv_k", default=4, type=int)
parser.add_argument("--str_conv_s", default=2, type=int)
parser.add_argument("--str_conv_p", default=1, type=int)
parser.add_argument("--agg_mode", default="att_group", type=str)
parser.add_argument("--encoder_norm", default="group", type=str)
parser.add_argument("--n_head", default=16, type=int)
parser.add_argument("--d_model", default=256, type=int)
parser.add_argument("--d_k", default=4, type=int)
# Set-up parameters
parser.add_argument(
"--dataset_folder",
default="",
type=str,
help="Path to the folder where the results are saved.",
)
parser.add_argument(
"--res_dir",
default="./results",
help="Path to the folder where the results should be stored",
)
parser.add_argument(
"--num_workers", default=8, type=int, help="Number of data loading workers"
)
parser.add_argument("--rdm_seed", default=1, type=int, help="Random seed")
parser.add_argument(
"--device",
default="cuda",
type=str,
help="Name of device to use for tensor computations (cuda/cpu)",
)
parser.add_argument(
"--display_step",
default=50,
type=int,
help="Interval in batches between display of training metrics",
)
parser.add_argument(
"--cache",
dest="cache",
action="store_true",
help="If specified, the whole dataset is kept in RAM",
)
# Training parameters
parser.add_argument("--epochs", default=100, type=int, help="Number of epochs per fold")
parser.add_argument("--batch_size", default=4, type=int, help="Batch size")
parser.add_argument("--lr", default=0.001, type=float, help="Learning rate")
parser.add_argument("--mono_date", default=None, type=str)
parser.add_argument("--ref_date", default="2018-09-01", type=str)
parser.add_argument(
"--fold",
default=None,
type=int,
help="Do only one of the five fold (between 1 and 5)",
)
parser.add_argument("--num_classes", default=20, type=int)
parser.add_argument("--ignore_index", default=-1, type=int)
parser.add_argument("--pad_value", default=0, type=float)
parser.add_argument("--padding_mode", default="reflect", type=str)
parser.add_argument(
"--val_every",
default=1,
type=int,
help="Interval in epochs between two validation steps.",
)
parser.add_argument(
"--val_after",
default=0,
type=int,
help="Do validation only after that many epochs.",
)
list_args = ["encoder_widths", "decoder_widths", "out_conv"]
parser.set_defaults(cache=False)
def iterate(
model, data_loader, criterion, config, optimizer=None, mode="train", device=None
):
loss_meter = tnt.meter.AverageValueMeter()
iou_meter = IoU(
num_classes=config.num_classes,
ignore_index=config.ignore_index,
cm_device=config.device,
)
t_start = time.time()
for i, batch in enumerate(data_loader):
if device is not None:
batch = recursive_todevice(batch, device)
(x, dates), y = batch
y = y.long()
if mode != "train":
with torch.no_grad():
out = model(x, batch_positions=dates)
else:
optimizer.zero_grad()
out = model(x, batch_positions=dates)
loss = criterion(out, y)
if mode == "train":
loss.backward()
optimizer.step()
with torch.no_grad():
pred = out.argmax(dim=1)
iou_meter.add(pred, y)
loss_meter.add(loss.item())
if (i + 1) % config.display_step == 0:
miou, acc = iou_meter.get_miou_acc()
print(
"Step [{}/{}], Loss: {:.4f}, Acc : {:.2f}, mIoU {:.2f}".format(
i + 1, len(data_loader), loss_meter.value()[0], acc, miou
)
)
t_end = time.time()
total_time = t_end - t_start
print("Epoch time : {:.1f}s".format(total_time))
miou, acc = iou_meter.get_miou_acc()
metrics = {
"{}_accuracy".format(mode): acc,
"{}_loss".format(mode): loss_meter.value()[0],
"{}_IoU".format(mode): miou,
"{}_epoch_time".format(mode): total_time,
}
if mode == "test":
return metrics, iou_meter.conf_metric.value() # confusion matrix
else:
return metrics
def recursive_todevice(x, device):
if isinstance(x, torch.Tensor):
return x.to(device)
elif isinstance(x, dict):
return {k: recursive_todevice(v, device) for k, v in x.items()}
else:
return [recursive_todevice(c, device) for c in x]
def prepare_output(config):
os.makedirs(config.res_dir, exist_ok=True)
for fold in range(1, 6):
os.makedirs(os.path.join(config.res_dir, "Fold_{}".format(fold)), exist_ok=True)
def checkpoint(fold, log, config):
with open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "trainlog.json"), "w"
) as outfile:
json.dump(log, outfile, indent=4)
def save_results(fold, metrics, conf_mat, config):
with open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "test_metrics.json"), "w"
) as outfile:
json.dump(metrics, outfile, indent=4)
pkl.dump(
conf_mat,
open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "conf_mat.pkl"), "wb"
),
)
def overall_performance(config):
cm = np.zeros((config.num_classes, config.num_classes))
for fold in range(1, 6):
cm += pkl.load(
open(
os.path.join(config.res_dir, "Fold_{}".format(fold), "conf_mat.pkl"),
"rb",
)
)
if config.ignore_index is not None:
cm =
|
np.delete(cm, config.ignore_index, axis=0)
|
numpy.delete
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
class Lane:
def __init__(self, windows_count = 9, margin = 100, minpix = 50,
color=(255, 0, 0), show_image = False):
self.show_image = show_image
self.color = color
# HYPERPARAMETERS
# Choose the number of sliding windows
self.windows_count = windows_count
# Set the width of the windows +/- margin
self.margin = margin
# Set minimum number of pixels found to recenter window
self.minpix = minpix
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
# average x values of the fitted line over the last n iterations
self.bestx = None
# polynomial coefficients averaged over the last n iterations
self.best_fit = None
# polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
# radius of curvature of the line in some units
self.radius_of_curvature = None
# distance in meters of vehicle center from the line
self.line_base_pos = None
# difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
# x values for detected line pixels
self.allx = None
# y values for detected line pixels
self.ally = None
self.lane_fit = None
def find_lane_pixels(self, binary_warped, x_base, out_img):
if(self.detected):
self.search_around_poly(binary_warped)
if(self.detected):
return
# Set height of windows - based on nwindows above and image shape
window_height = np.int32(binary_warped.shape[0]//self.windows_count)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
x_current = x_base
# Create empty lists to receive left and right lane pixel indices
lane_inds = []
# Step through the windows one by one
for window in range(self.windows_count):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
### TO-DO: Find the four below boundaries of the window ###
win_x_low = x_current - self.margin
win_x_high = x_current + self.margin
# Draw the windows on the visualization image
if self.show_image:
cv2.rectangle(out_img, (win_x_low,win_y_low), (win_x_high,win_y_high), self.color, 2)
good_inds = [index for index, value in enumerate(zip(nonzeroy, nonzerox)) if (value[0] < win_y_high and value[0] >= win_y_low) and (value[1] < win_x_high and value[1] >= win_x_low)]
lane_inds.append(good_inds)
# Append these indices to the lists
if(nonzerox[good_inds].shape[0] > self.minpix):
x_current = int(np.mean(nonzerox[good_inds]))
if self.show_image:
plt.imshow(out_img)
plt.show()
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
lane_inds =
|
np.concatenate(lane_inds)
|
numpy.concatenate
|
from typing import List, Union
import numpy as np
def get_test_function_method_min(n: int, a: List[List[float]], c: List[List[float]],
p: List[List[float]], b: List[float]):
"""
Функция-замыкание, генерирует и возвращает тестовую функцию, применяя метод Фельдбаума,
т. е. применяя оператор минимума к одноэкстремальным степенным функциям.
:param n: количество экстремумов, целое число >= 1
:param a: список коэффициентов крутости экстремумов (длиной n), чем выше значения,
тем быстрее функция убывает/возрастает и тем уже область экстремума, List[List[float]]
:param c: список координат экстремумов длиной n, List[List[float]]
:param p: список степеней гладкости в районе экстремума,
если 0<p[i][j]<=1 функция в точке экстремума будет угловой
:param b: список значений функции (длиной n) в экстремуме, List[float], len(b) = n
:return: возвращает функцию, которой необходимо передавать одномерный список координат точки,
возвращаемая функция вернет значение тестовой функции в данной точке
"""
def func(x):
l = []
for i in range(n):
res = 0
for j in range(len(x)):
res = res + a[i][j] * np.abs(x[j] - c[i][j]) ** p[i][j]
res = res + b[i]
l.append(res)
res =
|
np.array(l)
|
numpy.array
|
"""classify.py"""
import sys
import os
import acl
path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(path, ".."))
sys.path.append(os.path.join(path, "../../../../common/"))
sys.path.append(os.path.join(path, "../../../../common/atlas_utils"))
from constants import ACL_MEM_MALLOC_HUGE_FIRST, ACL_MEMCPY_DEVICE_TO_DEVICE, IMG_EXT
from acl_model import Model
from acl_image import AclImage
from acl_resource import AclResource
from resnet50_classes import get_resnet50_class
from PIL import Image, ImageDraw, ImageFont
import numpy as np
SRC_PATH = os.path.realpath(__file__).rsplit("/", 1)[0]
MODEL_PATH = os.path.join(SRC_PATH, "../model/resnet50.om")
MODEL_WIDTH = 224
MODEL_HEIGHT = 224
class Classify(object):
"""classify"""
def __init__(self, model_path, model_width, model_height):
self._model_path = model_path
self._model_width = model_width
self._model_height = model_height
self._model = Model(model_path)
def __del__(self):
print("[Sample] class Samle release source success")
def pre_process(self, image):
"""preprocess"""
input_image = Image.open(image)
input_image = input_image.resize((224, 224))
# hwc
img = np.array(input_image)
height = img.shape[0]
width = img.shape[1]
h_off = int((height - 224) / 2)
w_off = int((width - 224) / 2)
crop_img = img[h_off:height - h_off, w_off:width - w_off, :]
# rgb to bgr
print("crop shape = ", crop_img.shape)
img = crop_img[:, :, ::-1]
shape = img.shape
print("img shape = ", shape)
img = img.astype("float32")
img[:, :, 0] *= 0.003922
img[:, :, 1] *= 0.003922
img[:, :, 2] *= 0.003922
img[:, :, 0] -= 0.4914
img[:, :, 0] = img[:, :, 0] / 0.2023
img[:, :, 1] -= 0.4822
img[:, :, 1] = img[:, :, 1] / 0.1994
img[:, :, 2] -= 0.4465
img[:, :, 2] = img[:, :, 2] / 0.2010
img = img.reshape([1] + list(shape))
# nhwc -> nchw
result = img.transpose([0, 3, 1, 2]).copy()
return result
def inference(self, resized_image):
"""inference"""
return self._model.execute([resized_image, ])
def post_process(self, infer_output, image_file):
"""postprocess"""
print("post process")
data = infer_output[0]
print("data shape = ", data.shape)
vals = data.flatten()
max = 0
sum = 0
for i in range(0, 10):
if vals[i] > max:
max = vals[i]
for i in range(0, 10):
vals[i] =
|
np.exp(vals[i] - max)
|
numpy.exp
|
# -*- coding: utf-8 -*-
#######################################
# StabilityMap_2d.py
#######################################
# analysis of two coupled tipping elements
# for manuscript:
# "Emergence of cascading dynamics in interacting tipping elements of ecology and climate"
# two coupled tipping elements given by
# subsystem 0
# dx0/dt = a_0*x0 - b_0*x0^3 + c_0 + d_0*x1
# subsystem 1
# dx1/dt = a_1*x1 - b_1*x1^3 + c_1 + d_1*x0
# computes a (2d) matrix of stability maps each giving the number of stable fixed points
# depending on the control parameters of two tipping elements
# the position of the stability map withing the matrix is determined by the coupling strength of the tipping elements
# script generates elements of Figure 4/5/6 in manuscript and Figure 1 in Supplementary Material
# for parameter settings indicated below
###############################################################################
# Import packages
import numpy as np
import matplotlib.pylab as plt
from matplotlib.colors import ListedColormap
from numpy import roots
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# definition of coefficients
b_0 = 1.0
b_1 = 1.0
a_0 = 1.0
a_1 = 1.0
# calulcation of intrinsic tipping points
c_0_crit = 2*np.sqrt((a_0/3)**3/(b_0))
c_1_crit = 2*np.sqrt((a_1/3)**3/(b_1))
# control parameter arrays
anz = 500 # 500 chosen for single stability map, 100 chosen for matrix of stability maps
value_c0 = np.linspace(0.0,0.8,anz)
value_c1 = np.linspace(0.8,0.0,anz)
# coupling strength array
value_d0 = np.array([0.0]) # used:
# for matrix of stability maps (Figure 4 in manuscript): -0.9, -0.7, -0.5, -0.3, -0.2, 0.0
# for undirectional example (Figure 5 in manuscript): 0.0
# for bidirectional example (Figure 6 in manuscript): -0.2
# for undirectional example of high coupling strength (Figure 1 in Suppl.Material): 0.0
value_d1 = np.array([0.2]) # used:
# for matrix of stability maps (Figure 4 in manuscript): 0.9,0.7,0.5,0.3,0.2,0.0
# for unidirectional example (Figure 5 in manuscript): 0.2
# for bidirectional example (Figure 6 in manuscript): 0.2
# for undirectional example of high coupling strength (Figure 1 in Suppl.Material): 0.9
# set quiver to 1 if additional phase portraits shall be plotted onto the stability map
# note: this is only recommended if there is one combination of coupling strength defined
# (i.e. Figure 5,6 in manuscript and Figure 1 in Suppl. Material)
quiver = 1
# if quiver is set to 1, control parameter values need to be provided for which
# phase portraits are added to the stability map
value_c0_q = np.array([0.2, 0.6]) # for example:
# for bidirectional example (Figure 6 in manuscript): 0.1, 0.33,0.5,0.72
# for undirectional example (Figure 5 in manuscript): 0.2, 0.6
# for unidirectional example of high coupling strength (Figure 1 in Suppl. Material): 0.2,0.6
value_c1_q = np.array([0.1,0.3,0.5,0.7])
# for bidirectional example (Figure 6 in manuscript): 0.09,0.3,0.49,0.7
# for undirectional example (Figure 5 in manuscript): 0.1,0.3,0.5,0.7
# for unidirectional example of high coupling strength (Figure 1 in Suppl. Material): 0.2,0.6
# specification of additional arrows/markers/annotations according to Figures found in manuscript
# should be added to plot
F_add = "Figure_5"
# F_add = "Figure_6"
# F_add = "Figure_1_Suppl"
###############################################################################
# definition of functions
###############################################################################
def roots_(*params):
return roots(list(params))
roots3 = np.vectorize(roots_, signature = "(),(),(),()->(n)",otypes=[complex])
roots9 = np.vectorize(roots_, signature = "(),(),(),(),(),(),(),(),(),()->(n)", otypes=[complex])
# find equilibria via roots
def find_roots(a0 = 1,b0 = 1 , c0 = 0 , d0 = 0 , a1 = 1, b1 = 1,c1 = 0,d1 = 0):
if (d0 != 0) and (d1 != 0):
α = []
α += [- b1 * (b0/d0)**3 ] # x^9
α += [0] # x^8
α += [3 * a0 * b0**2 * b0 / d0**3] # x^7
α += [+ 3 * c0*b0**2*b1/d0**3] # x^6
α += [- 3 * b0*a0**2*b1/d0**3] # x^5
α += [- 6 * a0 * b0 * c0 * b1 / d0**3] # x^4
α += [- 3 * b0 * c0**2 *b1 / d0 **3 + a0**3*b1/d0**3 + a1*b0/d0] #x^3 a1**3
α += [3 * a0 **2 * c0 * b1 / d0**3] # x^2
α += [3 * a0 * c0**2 * b1 / d0 **3 + d1 - a1 * a0 / d0] # x^1
α += [-a1*c0/ d0 + c1 + b1 *(c0/d0)**3] # x^0
x0 = roots(α)
x1 = 1 / d0 * (b0 * x0**3 - a0*x0 - c0)
if (d0 == 0):
x0_roots_ = roots([-b0,0,a0,c0])
x0 = []
x1 = []
for x0_root in x0_roots_:
x1 += [roots([-b1,0,a1,c1 + d1 * x0_root])]
x0 += [x0_root]*3
if (d1 == 0):# and (d0 >= 0):
x1_roots_ = roots([- b1,0,a1,c1])
x0 = []
x1 = []
for x1_root in x1_roots_:
x0 += [roots([-b0,0,a0,c0 + d0 * x1_root])]
x1 += [x1_root]*3
return (np.round(np.array(x0).flatten(),decimals=5),np.round(np.array(x1).flatten(),decimals=5))
# determine stability of equilibria by calculating eigenvalues
def stability(x0,x1,a0 = 1, b0 = 1,c0 = 0,d0 = 0,a1 = 1,b1 = 1,c1 = 0,d1 = 0):
D = np.sqrt((a0 + a1 - 3*(b1*x1**2 + b0*x0**2))**2 -4*((a0-3*b0*x0**2)*(a1-3*b1*x1**2)-d0*d1)+1J*0)
return ((a0 + a1 - 3*(b1*x1**2 + b0*x0**2)) - D)/2,((a0 + a1 - 3*(b1*x1**2 + b0*x0**2)) +D)/2
###############################################################################
###############################################################################
# definitions for axis ticks
ya = 0
major_xticks = np.linspace(value_c0[0], value_c0[anz-1],5)
major_yticks = np.linspace(value_c1[anz-1], value_c1[0],5)
# initialization of array nr_stable_all for number of stable fixed points
nr_stable_all = np.zeros((value_c1.shape[0],value_c0.shape[0]))
# initialization of counters
count_c0 = -1
count_c1 = -1
count_loop = 0
# open figure
fig = plt.figure(figsize = (10,10))
###############################################################################
# determine number of stable fixed points
###############################################################################
# loop over coupling stength d1
for d_1 in value_d1:
# loop over coupling strength d0
for d_0 in value_d0:
count_loop = count_loop+1
count_c0 = -1
# loop over control parameter c0
for c_0 in value_c0:
count_c0 = count_c0+1
count_c1 = -1
# loop over control parameter c1
for c_1 in value_c1:
count_c1 = count_c1+1
params = {"c0" : c_0, "c1" : c_1, "d0" : d_0,"d1" : d_1}
# find equilibira for given combination of control parameters
x0, x1 = find_roots(**params)
# determine stability / eigenvalues
l0, l1 = stability(x0, x1, **params)
# find real FP
such_real = np.logical_and(np.isreal(x0),np.isreal(x1))
# find stable FP
such_l = np.logical_and(np.real(l0[such_real])<0,np.real(l1[such_real])<0)
x0_stab = np.array(x0[such_real][such_l])
x1_stab = np.array(x1[such_real][such_l])
# count number of stable FP and save them in result array
nr_stable_all[count_c1,count_c0] = len(x0_stab)
#################################################################
# Plotten
#################################################################
# subplots
ax = fig.add_subplot(value_d1.shape[0],value_d0.shape[0],count_loop)
# definition of color map
cmap_s = np.array(['lightslategrey','lightgray','silver', 'darkgray', 'dimgray' ]) # colors
# choose color range depending on range of number of stable equilibria
crange = np.arange(np.min(nr_stable_all),np.max(nr_stable_all)+1,1)
cMap = ListedColormap(cmap_s[crange.astype(int)])
# plot result array nr_stable_all
plt.imshow(nr_stable_all, interpolation='nearest',cmap = cMap, extent = [value_c0[0],value_c0[anz-1],value_c1[anz-1],value_c1[0]], aspect='auto') # Plotten von nr_stable_all
# add intrinsic tipping points
plt.plot(np.zeros(len(value_c1))+c_0_crit,value_c1,'--', color = 'black',linewidth = 1)
plt.plot(value_c0,np.zeros(len(value_c0))+c_1_crit,'--', color = 'black',linewidth = 1)
# axis labels/ ticks and other properties
plt.xticks(())
plt.yticks(())
if count_loop > ((value_d1.shape[0] * value_d0.shape[0])-value_d0.shape[0]):
plt.xticks(major_xticks, fontsize = 15) # fontsize = 15 for single, fontsize = 10 for multiple
plt.xlabel(r"$c_1$""\n" r"$d_{21} = %s$"%d_0, fontsize = 15 )
if count_loop == (1+(ya*value_d0.shape[0])):
plt.yticks(major_yticks, fontsize = 15)
plt.ylabel(r"$d_{12} = %s$" "\n" r"$c_2$" %d_1, fontsize = 15)
ya = ya + 1
plt.gca().set_aspect('equal', adjustable='box')
for spine in plt.gca().spines.values():
spine.set_visible(False)
fig.tight_layout()
###############################################################################
# add quiver plots
###############################################################################
if quiver == 1:
d_0 = value_d0[0]
d_1 = value_d1[0]
###############################################################################
###############################################################################
# definition of axis ticks
major_xticks = np.array([-2, -1, 0, 1, 2])
major_yticks = np.array([-2, -1, 0, 1, 2])
ya = 0
# initialize counter
count_loop = 0
# loop over control parameter c1
for c_1 in value_c1_q:
# loop over control parameter c0
for c_0 in value_c0_q:
count_loop = count_loop+ 1
#######################################################################
# find Equilibria
#######################################################################
params = {"c0" : c_0, "c1" : c_1, "d0" : d_0,"d1" : d_1}
# find equilibira for given combination of control parameters
x0, x1 = find_roots(**params)
# determine stability / eigenvalues
l0, l1 = stability(x0, x1, **params)
# find real FP
such_real = np.logical_and(np.isreal(x0),np.isreal(x1))
# find stable FP
such_l = np.logical_and(np.real(l0[such_real])<0,np.real(l1[such_real])<0)
x0_stab = np.array(x0[such_real][such_l])
x1_stab = np.array(x1[such_real][such_l])
# find unstable FP / saddles
such_m = np.logical_or(np.real(l0[such_real]) > 0, np.real(l1[such_real]) > 0)
x0_ustab = np.array(x0[such_real][such_m])
x1_ustab = np.array(x1[such_real][such_m])
#######################################################################
# determine flow in phase space
#######################################################################
def fun(x):
return [a_0*x[0] - (b_0*(x[0]**3)) + c_0 + d_0*x[1], a_1*x[1] - (b_1*(x[1]**3)) + c_1 + d_1*x[0]]
raum_begin = -2
raum_end = 2
x0_flow = np.linspace(raum_begin,raum_end,1000)
x1_flow = np.linspace(raum_begin,raum_end,1000)
X0,X1 = np.meshgrid(x0_flow,x1_flow)
DX0,DX1 = fun([X0,X1])
speed = np.sqrt(DX0*DX0 + DX1*DX1)
max_value = speed.max()
speed_norm = [v/max_value for v in speed]
#################################################################################
# Plotten
#################################################################################
axins = inset_axes(ax, width="100%", height="100%",
bbox_to_anchor=(c_0/0.8-0.5*0.12/0.8, c_1/0.8-0.5*0.12/0.8, .12, .12),
bbox_transform=ax.transAxes, loc='lower left')
# flow in phase space via streamplot()
axins.streamplot(X0,X1,DX0,DX1, density=[0.6, 0.6], color = 'white', linewidth =0.4, arrowsize = 0.5 )
# normalised speed speed_norm via contourf()
CF = axins.contourf(X0,X1,speed_norm, levels = np.arange(np.min(speed_norm),np.max(speed_norm),0.025))
# stable FP
axins.plot(np.real(x0_stab), np.real(x1_stab), "o", color = "goldenrod",markersize = 4.5)
# unstable FP / saddle
axins.plot(np.real(x0_ustab), np.real(x1_ustab), "o", color = "orangered", markersize = 4.5)
################################################################################
# additional arrows/markers/text
# this is specific for certain parameter settings and may need to be adjusted
################################################################################
# for unidirectional example (Figure 5 in manuscript)
if F_add == "Figure_5":
# add markers
if c_0 == value_c0_q[0] and c_1 == value_c1_q[0]:
axins.scatter(x0_stab[x0_stab < 0], x1_stab[x0_stab < 0], s=100, marker = 'o', facecolors='none', edgecolors='lime', linewidth = 3)
such00 = np.logical_and(np.real(x0_stab) < 0, np.real(x1_stab < 0))
axins.scatter(np.real(x0_stab[such00]), np.real(x1_stab[such00]), s=300, marker = 'p', facecolors='none', edgecolors='deeppink', linewidth = 3)
such10 = np.logical_and(np.real(x0_stab)> 0, np.real(x1_stab)< 0)
axins.scatter(np.real(x0_stab[such10]), np.real(x1_stab[such10]), s=100, marker = 's', facecolors='none', edgecolors='yellow', linewidth = 3)
# add arrows
if c_0 == value_c0_q[1] and c_1 == value_c1_q[0]:
such10 = np.logical_and(np.real(x0_stab) > 0, np.real(x1_stab) < 0)
axins.annotate("", xy = (np.real(x0_stab[such10])-0.25, np.real(x1_stab[such10])-0.45), xytext = (-1,-1.35), arrowprops=dict(facecolor = 'lime', edgecolor = 'none', width = 3, headwidth = 8) )
such11 = np.logical_and(np.real(x0_stab) > 0,
|
np.real(x1_stab)
|
numpy.real
|
# coding=utf-8
# Copyright 2018 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sufficient_input_subsets.sis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from sufficient_input_subsets import sis
# Function that returns the L2 norm over each set of coordinates in the batch.
_F_L2 = lambda batch_coords: np.linalg.norm(batch_coords, ord=2, axis=-1)
# Function that returns the sum over each array in the batch.
_F_SUM = lambda batch: np.array([np.sum(arr) for arr in batch])
# Function that computes the dot product between a known vector ([1, 2, 0, 1])
# and each array in the batch (analagous to linear regression).
_LINREGRESS_THETA = np.array([1, 2, 0, 1])
_F_LINREGRESS = lambda bt: np.array([np.dot(_LINREGRESS_THETA, b) for b in bt])
class SisTest(parameterized.TestCase):
def test_import(self):
self.assertIsNotNone(sis)
def _assert_backselect_stack_equal(self, actual_backselect_stack,
expected_backselect_stack):
"""Raises an AssertionError if two backselect stacks are not equal."""
if not expected_backselect_stack: # expected empty stack
np.testing.assert_equal(actual_backselect_stack,
expected_backselect_stack)
return
actual_idxs, actual_values = zip(*actual_backselect_stack)
expected_idxs, expected_values = zip(*expected_backselect_stack)
if not (np.array_equal(actual_idxs, expected_idxs) and
np.allclose(actual_values, expected_values)):
raise AssertionError(
'Backselect stacks not equal. Got %s, expected %s.' %
(str(actual_backselect_stack), str(expected_backselect_stack)))
@parameterized.named_parameters(
dict(
testcase_name='sis len 1',
sis_result=sis.SISResult(
sis=np.array([[0]]),
ordering_over_entire_backselect=np.array([[2], [1], [3], [0]]),
values_over_entire_backselect=np.array([10.0, 8.0, 5.0, 0.0]),
mask=np.array([True, False, False, False]),
),
expected_len=1),
dict(
testcase_name='sis, 2-dim idxs, len 3',
sis_result=sis.SISResult(
sis=np.array([[0, 1], [1, 2], [2, 3]]),
ordering_over_entire_backselect=np.array([[2], [1], [3], [0]]),
values_over_entire_backselect=np.array([10.0, 8.0, 5.0, 0.0]),
mask=np.array([True, False, False, False]),
),
expected_len=3),
)
def test_sisresult_len(self, sis_result, expected_len):
actual_len = len(sis_result)
self.assertEqual(actual_len, expected_len)
@parameterized.named_parameters(
dict(
testcase_name='sis equal',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=True,
),
dict(
testcase_name='sis not equal, values very slight different',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.000000001]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on sis',
sis1=sis.SISResult(
sis=np.array([[2]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on ordering',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[1], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on values',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 5.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, fractional difference in values',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 5.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 10.01]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, differ on mask',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, False])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=False,
),
)
def test_sis_result_equality(self, sis1, sis2, expected):
if expected:
self.assertEqual(sis1, sis2)
self.assertEqual(sis2, sis1)
else:
self.assertNotEqual(sis1, sis2)
self.assertNotEqual(sis2, sis1)
@parameterized.named_parameters(
dict(
testcase_name='sis equal',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
expected=True,
),
dict(
testcase_name='sis equal, values very slight different',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.000000001]),
mask=np.array([False, True])),
expected=True,
),
dict(
testcase_name='sis not equal, values too different',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.01, 0.0]),
mask=np.array([False, True])),
expected=False,
),
dict(
testcase_name='sis not equal, different masks',
sis1=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True])),
sis2=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, False])),
expected=False,
),
)
def test_sis_result_approx_equality(self, sis1, sis2, expected):
if expected:
self.assertTrue(sis1.approx_equal(sis2))
self.assertTrue(sis2.approx_equal(sis1))
else:
self.assertFalse(sis1.approx_equal(sis2))
self.assertFalse(sis2.approx_equal(sis1))
@parameterized.named_parameters(
dict(testcase_name='2-dim', shape=(4, 3)),
dict(testcase_name='2-dim transposed', shape=(3, 4)),
dict(testcase_name='1-dim', shape=(3,)),
dict(testcase_name='3-dim', shape=(4, 3, 8)),
)
def test_make_empty_boolean_mask(self, shape):
actual_mask = sis.make_empty_boolean_mask(shape)
self.assertEqual(actual_mask.shape, shape)
self.assertTrue(np.all(actual_mask))
@parameterized.named_parameters(
dict(
testcase_name='2-dim mask over columns',
shape=(2, 3),
axis=0,
expected_shape=(1, 3)),
dict(
testcase_name='2-dim mask over columns, as tuple',
shape=(2, 3),
axis=(0,),
expected_shape=(1, 3)),
dict(
testcase_name='2-dim mask over rows',
shape=(2, 3),
axis=1,
expected_shape=(2, 1)),
dict(
testcase_name='2-dim mask over all',
shape=(2, 3),
axis=(0, 1),
expected_shape=(1, 1)),
dict(
testcase_name='3-dim mask over ax 1',
shape=(4, 5, 6),
axis=1,
expected_shape=(4, 1, 6)),
dict(
testcase_name='3-dim mask over ax (1, 2)',
shape=(4, 5, 6),
axis=(1, 2),
expected_shape=(4, 1, 1)),
)
def test_make_empty_boolean_mask_broadcast_over_axis(self, shape, axis,
expected_shape):
actual_mask = sis.make_empty_boolean_mask_broadcast_over_axis(shape, axis)
self.assertEqual(actual_mask.shape, expected_shape)
self.assertTrue(np.all(actual_mask))
@parameterized.named_parameters(
dict(
testcase_name='disjoint SIS-collection',
collection=[
sis.SISResult(
sis=np.array([[0], [1]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
sis.SISResult(
sis=np.array([[2], [3]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
]),)
def test_assert_sis_collection_disjoint(self, collection):
sis._assert_sis_collection_disjoint(collection)
@parameterized.named_parameters(
dict(
testcase_name='non-disjoint SIS-collection',
collection=[
sis.SISResult(
sis=np.array([[0], [1]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
sis.SISResult(
sis=np.array([[1], [2]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
),
]),)
def test_assert_sis_collection_disjoint_raises_error(self, collection):
with self.assertRaises(AssertionError):
sis._assert_sis_collection_disjoint(collection)
@parameterized.named_parameters(
dict(
testcase_name='1-dim idxs, 1 idx',
idx_array=np.array([[3]]),
expected_tuple=(np.array([0]), np.array([3]))),
dict(
testcase_name='1-dim idxs, 2 idxs',
idx_array=np.array([[1], [2]]),
expected_tuple=(np.array([0, 1]), np.array([1, 2]))),
dict(
testcase_name='2-dim idxs, 2 idxs',
idx_array=np.array([[0, 1], [1, 1]]),
expected_tuple=(np.array([0, 1]), np.array([0, 1]), np.array([1,
1]))),
dict(
testcase_name='3-dim idxs, 4 idxs',
idx_array=np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]),
expected_tuple=(np.array([0, 1, 2, 3]), np.array([1, 4, 7, 10]),
np.array([2, 5, 8, 11]), np.array([3, 6, 9, 12]))),
)
def test_transform_next_masks_index_array_into_tuple(self, idx_array,
expected_tuple):
actual_tuple = sis._transform_next_masks_index_array_into_tuple(idx_array)
self.assertLen(actual_tuple, len(expected_tuple))
for actual_column, expected_column in zip(actual_tuple, expected_tuple):
np.testing.assert_array_equal(actual_column, expected_column)
@parameterized.named_parameters(
dict(testcase_name='1-dim idxs, 1 idx', idx_array=np.array([1])),
dict(testcase_name='1-dim idxs, 2 idxs', idx_array=np.array([1, 2])),
dict(
testcase_name='3-dim idxs, 2 idxs',
idx_array=np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])),
)
def test_transform_next_masks_index_array_into_tuple_raises_error(
self, idx_array):
with self.assertRaises(TypeError):
_ = sis._transform_next_masks_index_array_into_tuple(idx_array)
@parameterized.named_parameters(
dict(
testcase_name='no values masked',
current_mask=np.array([True, True, True]),
expected_next_masks=np.array([[False, True,
True], [True, False, True],
[True, True, False]]),
expected_next_masks_idxs=np.array([[0], [1], [2]])),
dict(
testcase_name='partially masked',
current_mask=np.array([True, False, True]),
expected_next_masks=np.array([[False, False, True],
[True, False, False]]),
expected_next_masks_idxs=np.array([[0], [2]])),
dict(
testcase_name='partially masked 2',
current_mask=np.array([False, False, True]),
expected_next_masks=np.array([[False, False, False]]),
expected_next_masks_idxs=np.array([[2]])),
dict(
testcase_name='partially masked larger',
current_mask=np.array([True, True, False, True, True, False]),
expected_next_masks=np.array([
[False, True, False, True, True, False],
[True, False, False, True, True, False],
[True, True, False, False, True, False],
[True, True, False, True, False, False],
]),
expected_next_masks_idxs=np.array([[0], [1], [3], [4]])),
dict(
testcase_name='all values masked',
current_mask=np.array([False, False, False]),
expected_next_masks=np.array([]),
expected_next_masks_idxs=np.array([])),
dict(
testcase_name='(3, 1) input',
current_mask=np.array([[True], [True], [True]]),
expected_next_masks=np.array([[[False], [True], [True]],
[[True], [False], [True]],
[[True], [True], [False]]]),
expected_next_masks_idxs=np.array([[0, 0], [1, 0], [2, 0]])),
dict(
testcase_name='(1, 3) input',
current_mask=np.array([[True, True, True]]),
expected_next_masks=np.array([[[False, True, True]],
[[True, False, True]],
[[True, True, False]]]),
expected_next_masks_idxs=np.array([[0, 0], [0, 1], [0, 2]])),
dict(
testcase_name='(1, 3) input, partially masked',
current_mask=np.array([[True, False, True]]),
expected_next_masks=np.array([[[False, False, True]],
[[True, False, False]]]),
expected_next_masks_idxs=np.array([[0, 0], [0, 2]])),
dict(
testcase_name='(1, 3) input, all masked',
current_mask=np.array([[False, False, False]]),
expected_next_masks=np.array([]),
expected_next_masks_idxs=np.array([])),
dict(
testcase_name='(2, 2) input',
current_mask=np.array([[True, True], [True, True]]),
expected_next_masks=np.array([[[False, True], [True, True]],
[[True, False], [True, True]],
[[True, True], [False, True]],
[[True, True], [True, False]]]),
expected_next_masks_idxs=np.array([[0, 0], [0, 1], [1, 0], [1, 1]])),
)
def test_produce_next_masks(self, current_mask, expected_next_masks,
expected_next_masks_idxs):
actual_next_masks, actual_next_masks_idxs = sis._produce_next_masks(
current_mask)
np.testing.assert_array_equal(actual_next_masks, expected_next_masks)
np.testing.assert_array_equal(actual_next_masks_idxs,
expected_next_masks_idxs)
@parameterized.named_parameters(
dict(
testcase_name='1-dim, single mask',
input_to_mask=np.array([1, 2, 3, 4, 5]),
fully_masked_input=np.array([0, 0, 0, 0, 0]),
batch_of_masks=np.array([[False, True, False, True, True]]),
expected_masked_inputs=np.array([[0, 2, 0, 4, 5]])),
dict(
testcase_name='1-dim, multiple masks',
input_to_mask=np.array([1, 2, 3]),
fully_masked_input=np.array([0, 0, 0]),
batch_of_masks=np.array([[True, True, False], [True, True, True],
[False, False, False], [False, True,
False]]),
expected_masked_inputs=np.array([[1, 2, 0], [1, 2, 3], [0, 0, 0],
[0, 2, 0]])),
dict(
testcase_name='2-dim, single mask',
input_to_mask=np.array([[1, 2, 3], [4, 5, 6]]),
fully_masked_input=np.array([[0, 0, 0], [0, 0, 0]]),
batch_of_masks=np.array([[[True, False, False], [False, True,
True]]]),
expected_masked_inputs=np.array([[[1, 0, 0], [0, 5, 6]]])),
dict(
testcase_name='2-dim, multiple masks',
input_to_mask=np.array([[1, 2, 3], [4, 5, 6]]),
fully_masked_input=np.array([[0, 0, 0], [0, 0, 0]]),
batch_of_masks=np.array(
[[[True, True, True], [True, True, True]],
[[False, False, False], [False, False, False]],
[[True, False, True], [False, True, False]]]),
expected_masked_inputs=np.array([[[1, 2, 3], [4, 5, 6]],
[[0, 0, 0], [0, 0, 0]],
[[1, 0, 3], [0, 5, 0]]])),
dict(
testcase_name='1-dim, single mask, string inputs',
input_to_mask=np.array(['A', 'B', 'C', 'D']),
fully_masked_input=np.array(['-', '-', '-', '-']),
batch_of_masks=np.array([[False, True, False, True]]),
expected_masked_inputs=np.array([['-', 'B', '-', 'D']])),
)
def test_produce_masked_inputs(self, input_to_mask, fully_masked_input,
batch_of_masks, expected_masked_inputs):
actual_masked_inputs = sis.produce_masked_inputs(
input_to_mask, fully_masked_input, batch_of_masks)
np.testing.assert_array_equal(actual_masked_inputs, expected_masked_inputs)
@parameterized.named_parameters(
dict(
testcase_name='1-dim, single mask, no batch dimension',
input_to_mask=np.array([1, 2, 3]),
fully_masked_input=np.array([0, 0, 0]),
batch_of_masks=np.array([False, True, False])),)
def test_produce_masked_inputs_raises_error(
self, input_to_mask, fully_masked_input, batch_of_masks):
with self.assertRaises(TypeError):
_ = sis.produce_masked_inputs(input_to_mask, fully_masked_input,
batch_of_masks)
@parameterized.named_parameters(
dict(
testcase_name='L2 norm, 2-dim',
f=_F_L2,
current_input=np.array([1, 10]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([0]), 10), (np.array([1]), 0)]),
dict(
testcase_name='L2 norm, 2-dim, all masked',
f=_F_L2,
current_input=np.array([1, 10]),
current_mask=np.array([False, False]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[]),
dict(
testcase_name='L2 norm, 2-dim, reversed',
f=_F_L2,
current_input=np.array([10, 1]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([1]), 10), (np.array([0]), 0)]),
dict(
testcase_name='L2 norm, 2-dim, partially masked',
f=_F_L2,
current_input=np.array([10, 1]),
current_mask=np.array([False, True]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([1]), 0)]),
dict(
testcase_name='L2 norm, 2-dim, partially masked, reversed',
f=_F_L2,
current_input=np.array([10, 1]),
current_mask=np.array([True, False]),
fully_masked_input=np.array([0, 0]),
expected_backselect_stack=[(np.array([0]), 0)]),
dict(
testcase_name='L2 norm, 3-dim, same value',
f=_F_L2,
current_input=np.array([10, 10, 10]),
current_mask=np.array([True, True, True]),
fully_masked_input=np.array([0, 0, 0]),
expected_backselect_stack=[(np.array([0]), np.sqrt(200)),
(np.array([1]), 10), (np.array([2]), 0)]),
dict(
testcase_name='L2 norm, 4-dim, diff values',
f=_F_L2,
current_input=np.array([0.1, 10, 5, 1]),
current_mask=np.array([True, True, True, True]),
fully_masked_input=np.array([0, 0, 0, 0]),
expected_backselect_stack=[(np.array([0]), np.sqrt(126)),
(np.array([3]), np.sqrt(125)),
(np.array([2]), 10), (np.array([1]), 0)]),
dict(
testcase_name='sum, 2x2 input, individual masking',
f=_F_SUM,
current_input=np.array([[10, 5], [2, 3]]),
current_mask=np.array([[True, True], [True, True]]),
fully_masked_input=np.array([[0, 0], [0, 0]]),
expected_backselect_stack=[(np.array([1, 0]), 18),
(np.array([1, 1]), 15),
(np.array([0, 1]), 10),
(np.array([0, 0]), 0)]),
dict(
testcase_name='sum, 2x2 input, mask broadcast over columns',
f=_F_SUM,
current_input=np.array([[10, 5], [2, 3]]),
current_mask=np.array([[True, True]]),
fully_masked_input=np.array([[0, 0], [0, 0]]),
expected_backselect_stack=[(np.array([0, 1]), 12),
(np.array([0, 0]), 0)]),
dict(
testcase_name='sum, 2x2 input, mask broadcast over rows',
f=_F_SUM,
current_input=np.array([[10, 5], [2, 3]]),
current_mask=np.array([[True], [True]]),
fully_masked_input=np.array([[0, 0], [0, 0]]),
expected_backselect_stack=[(np.array([1, 0]), 15),
(np.array([0, 0]), 0)]),
)
def test_backselect(self, f, current_input, current_mask, fully_masked_input,
expected_backselect_stack):
actual_backselect_stack = sis._backselect(f, current_input, current_mask,
fully_masked_input)
self._assert_backselect_stack_equal(actual_backselect_stack,
expected_backselect_stack)
@parameterized.named_parameters(
dict(
testcase_name='empty sis, threshold equals final value',
backselect_stack=[(np.array([0]), 1.0), (np.array([2]), 0.9),
(np.array([1]), 0.7), (np.array([3]), 0.6)],
threshold=0.6,
expected_sis=[]),
dict(
testcase_name='empty sis, threshold less than final value',
backselect_stack=[(np.array([0]), 1.0), (np.array([2]), 0.9),
(np.array([1]), 0.7), (np.array([3]), 0.6)],
threshold=0.5,
expected_sis=[]),
dict(
testcase_name='single element SIS, larger threshold',
backselect_stack=[(np.array([0]), 1.0), (np.array([2]), 0.9),
(np.array([1]), 0.7), (np.array([3]), 0.6)],
threshold=0.65,
expected_sis=[np.array([3])]),
dict(
testcase_name='one element SIS, threshold equals value',
backselect_stack=[(np.array([0]), 1.0), (np.array([2]), 0.9),
(np.array([1]), 0.7), (np.array([3]), 0.6)],
threshold=0.7,
expected_sis=[np.array([3])]),
dict(
testcase_name='two element SIS, threshold between values',
backselect_stack=[(np.array([0]), 1.0), (np.array([2]), 0.9),
(np.array([1]), 0.7), (np.array([3]), 0.6)],
threshold=0.8,
expected_sis=[np.array([3]), np.array([1])]),
dict(
testcase_name='three element SIS',
backselect_stack=[(np.array([0]), 1.0), (np.array([2]), 0.9),
(np.array([1]), 0.7), (np.array([3]), 0.6)],
threshold=0.99,
expected_sis=[np.array([3]),
np.array([1]),
np.array([2])]),
dict(
testcase_name='all element SIS',
backselect_stack=[(np.array([0]), 1.0), (np.array([2]), 0.9),
(np.array([1]), 0.7), (np.array([3]), 0.6)],
threshold=2.0,
expected_sis=[
np.array([3]),
np.array([1]),
np.array([2]),
np.array([0])
]),
)
def test_find_sis_from_backselect(self, backselect_stack, threshold,
expected_sis):
actual_sis = sis._find_sis_from_backselect(backselect_stack, threshold)
self.assertLen(actual_sis, len(expected_sis))
for actual_idx, expected_idx in zip(actual_sis, expected_sis):
np.testing.assert_array_equal(actual_idx, expected_idx)
@parameterized.named_parameters(
dict(
testcase_name='empty backselect_stack',
backselect_stack=[],
threshold=1.0),)
def test_find_sis_from_backselect_raises_error(self, backselect_stack,
threshold):
with self.assertRaises(ValueError):
_ = sis._find_sis_from_backselect(backselect_stack, threshold)
@parameterized.named_parameters(
dict(
testcase_name='L2 norm, 2-dim',
f=_F_L2,
threshold=1.0,
current_input=np.array([.1, 10]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_sis_result=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True]),
)),
dict(
testcase_name='L2 norm, 2-dim, reversed',
f=_F_L2,
threshold=1.0,
current_input=np.array([10, .1]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_sis_result=sis.SISResult(
sis=np.array([[0]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([True, False]),
)),
dict(
testcase_name='L2 norm, 3-dim',
f=_F_L2,
threshold=1.0,
current_input=np.array([.1, 10, 5]),
current_mask=np.array([True, True, True]),
fully_masked_input=np.array([0, 0, 0]),
expected_sis_result=sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [2], [1]]),
values_over_entire_backselect=np.array([np.sqrt(125), 10.0, 0.0]),
mask=np.array([False, True, False]),
)),
dict(
testcase_name='L2 norm, 3-dim, larger threshold',
f=_F_L2,
threshold=10.5,
current_input=np.array([.1, 10, 5]),
current_mask=np.array([True, True, True]),
fully_masked_input=np.array([0, 0, 0]),
expected_sis_result=sis.SISResult(
sis=np.array([[1], [2]]),
ordering_over_entire_backselect=np.array([[0], [2], [1]]),
values_over_entire_backselect=np.array([np.sqrt(125), 10.0, 0.0]),
mask=np.array([False, True, True]),
)),
dict(
testcase_name='L2 norm, 2-dim, all elms SIS',
f=_F_L2,
threshold=5.0,
current_input=np.array([3, 4]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_sis_result=sis.SISResult(
sis=np.array([[1], [0]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([4.0, 0.0]),
mask=np.array([True, True]),
)),
dict(
testcase_name='L2 norm, 2-dim, no SIS',
f=_F_L2,
threshold=5.1,
current_input=np.array([3, 4]),
current_mask=np.array([True, True]),
fully_masked_input=np.array([0, 0]),
expected_sis_result=None),
dict(
testcase_name='L2 norm, 3-dim, no SIS',
f=_F_L2,
threshold=1000,
current_input=np.array([.1, 10, 5]),
current_mask=np.array([True, True, True]),
fully_masked_input=np.array([0, 0, 0]),
expected_sis_result=None),
dict(
testcase_name='L2 norm, 3-dim, partially masked',
f=_F_L2,
threshold=1.0,
current_input=np.array([.1, 10, 5]),
current_mask=np.array([True, False, True]),
fully_masked_input=np.array([0, 0, 0]),
expected_sis_result=sis.SISResult(
sis=np.array([[2]]),
ordering_over_entire_backselect=np.array([[0], [2]]),
values_over_entire_backselect=np.array([5.0, 0.0]),
mask=np.array([False, False, True]),
)),
dict(
testcase_name='L2 norm, 2-dim, all masked',
f=_F_L2,
threshold=1.0,
current_input=np.array([10, .1]),
current_mask=np.array([False, False]),
fully_masked_input=np.array([0, 0]),
expected_sis_result=None),
dict(
testcase_name='sum, (2, 2), individual masking, no initial masked',
f=_F_SUM,
threshold=4.0,
current_input=np.array([[10, 5], [2, 3]]),
current_mask=np.array([[True, True], [True, True]]),
fully_masked_input=np.array([[0, 0], [0, 0]]),
expected_sis_result=sis.SISResult(
sis=np.array([[0, 0]]),
ordering_over_entire_backselect=np.array([[1, 0], [1, 1], [0, 1],
[0, 0]]),
values_over_entire_backselect=np.array([18.0, 15.0, 10.0, 0.0]),
mask=np.array([[True, False], [False, False]]),
)),
dict(
testcase_name='sum, (2, 2), individual masking, broadcast over cols',
f=_F_SUM,
threshold=4.0,
current_input=np.array([[10, 5], [2, 13]]),
current_mask=np.array([[True, True]]),
fully_masked_input=np.array([[0, 0], [0, 0]]),
expected_sis_result=sis.SISResult(
sis=np.array([[0, 1]]),
ordering_over_entire_backselect=np.array([[0, 0], [0, 1]]),
values_over_entire_backselect=np.array([18.0, 0.0]),
mask=np.array([[False, True]]),
)),
dict(
testcase_name='sum, (2, 2), individual masking, broadcast over rows',
f=_F_SUM,
threshold=4.0,
current_input=np.array([[10, 5], [2, 13]]),
current_mask=np.array([[True], [True]]),
fully_masked_input=np.array([[0, 0], [0, 0]]),
expected_sis_result=sis.SISResult(
sis=np.array([[1, 0]]),
ordering_over_entire_backselect=np.array([[0, 0], [1, 0]]),
values_over_entire_backselect=np.array([15.0, 0.0]),
mask=np.array([[False], [True]]),
)),
)
def test_find_sis(self, f, threshold, current_input, current_mask,
fully_masked_input, expected_sis_result):
actual_sis_result = sis.find_sis(f, threshold, current_input, current_mask,
fully_masked_input)
self.assertEqual(actual_sis_result, expected_sis_result)
@parameterized.named_parameters(
dict(
testcase_name='L2 norm, 2-dim, no SIS',
f=_F_L2,
threshold=1000,
initial_input=np.array([.1, 10]),
fully_masked_input=np.array([0, 0]),
expected_sis_collection=[]),
dict(
testcase_name='L2 norm, 2-dim, 1 SIS',
f=_F_L2,
threshold=1.0,
initial_input=np.array([.1, 10]),
fully_masked_input=np.array([0, 0]),
expected_sis_collection=[
sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True]),
),
]),
dict(
testcase_name='L2 norm, 2-dim, 2 SIS',
f=_F_L2,
threshold=0.1,
initial_input=np.array([.1, 10]),
fully_masked_input=np.array([0, 0]),
expected_sis_collection=[
sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([10.0, 0.0]),
mask=np.array([False, True]),
),
sis.SISResult(
sis=np.array([[0]]),
ordering_over_entire_backselect=np.array([[0]]),
values_over_entire_backselect=np.array([0.0]),
mask=np.array([True, False]),
),
]),
dict(
testcase_name='L2 norm, 2-dim, 2 SIS, reverse order',
f=_F_L2,
threshold=0.1,
initial_input=np.array([10, .1]),
fully_masked_input=np.array([0, 0]),
expected_sis_collection=[
sis.SISResult(
sis=np.array([[0]]),
ordering_over_entire_backselect=np.array([[1], [0]]),
values_over_entire_backselect=([10.0, 0.0]),
mask=np.array([True, False]),
),
sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[1]]),
values_over_entire_backselect=np.array([0.0]),
mask=np.array([False, True]),
),
]),
dict(
testcase_name='L2 norm, 2-dim, 1 SIS (both elms)',
f=_F_L2,
threshold=4.5,
initial_input=np.array([3, 4]),
fully_masked_input=np.array([0, 0]),
expected_sis_collection=[
sis.SISResult(
sis=np.array([[1], [0]]),
ordering_over_entire_backselect=np.array([[0], [1]]),
values_over_entire_backselect=np.array([4.0, 0.0]),
mask=np.array([True, True]),
),
]),
dict(
testcase_name='L2 norm, 3-dim, 2 SIS',
f=_F_L2,
threshold=1.0,
initial_input=np.array([.1, 10, 5]),
fully_masked_input=np.array([0, 0, 0]),
expected_sis_collection=[
sis.SISResult(
sis=np.array([[1]]),
ordering_over_entire_backselect=np.array([[0], [2], [1]]),
values_over_entire_backselect=np.array(
[np.sqrt(125), 10.0, 0.0]),
mask=np.array([False, True, False]),
),
sis.SISResult(
sis=np.array([[2]]),
ordering_over_entire_backselect=np.array([[0], [2]]),
values_over_entire_backselect=np.array([5.0, 0.0]),
mask=np.array([False, False, True]),
),
]),
dict(
testcase_name='L2 norm, 3-dim, 3 SIS',
f=_F_L2,
threshold=1.0,
initial_input=np.array([.9, .9, 10, 5]),
fully_masked_input=np.array([0, 0, 0, 0]),
expected_sis_collection=[
sis.SISResult(
sis=
|
np.array([[2]])
|
numpy.array
|
import csv
import os
import numpy as np
import cv2
import matplotlib.image as mpimg
from keras.models import Sequential
from keras.models import Model
import matplotlib.pyplot as plt
from keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D,Cropping2D
from keras.layers.pooling import MaxPooling2D
from keras import regularizers
import sklearn
file_url='../data/'
####### 0. Build generator + datasets
samples = []
with open(file_url+'driving_log.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for line in reader:
samples.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
for i in range(3):
im=mpimg.imread(file_url+batch_sample[i])
images.append(im)
angle=float(batch_sample[3])
if i==1:
angle=np.copy(angle)+0.2
if i==2:
angle=
|
np.copy(angle)
|
numpy.copy
|
"""
This script produces the Figures 13 from Amaral+2021, the
pearson correlation between stellar and planetary mass and
surface water loss percentage.
@autor: <NAME>, Universidad Nacional Autónoma de México, 2021
@email: <EMAIL>
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import pandas as pd
import statsmodels as st
import seaborn as sns
import numpy as np
import matplotlib as mpl
from matplotlib import cm
from collections import OrderedDict
import sys
import os
import subprocess
# Check correct number of arguments
if (len(sys.argv) != 2):
print('ERROR: Incorrect number of arguments.')
print('Usage: '+sys.argv[0]+' <pdf | png>')
exit(1)
if (sys.argv[1] != 'pdf' and sys.argv[1] != 'png'):
print('ERROR: Unknown file format: '+sys.argv[1])
print('Options are: pdf, png')
exit(1)
plt.style.use('ggplot')
f1 = './rgstellar.txt'
f2 = './rgflare.txt'
f3 = './stopstellar.txt'
f4 = './stopflare.txt'
star1 = np.genfromtxt(f1, usecols=0 ,unpack=True)
water1 = np.genfromtxt(f1, usecols=1 ,unpack=True)
planet1 = np.genfromtxt(f1, usecols=2 ,unpack=True)
waterfinal1 = np.genfromtxt(f1, usecols=3 ,unpack=True)
star2 =
|
np.genfromtxt(f2, usecols=0 ,unpack=True)
|
numpy.genfromtxt
|
import numpy as np
from pandas import Series, DataFrame
from scipy.signal import savgol_filter, boxcar
from scipy import interpolate
from matplotlib import pyplot as plt
from numpy import abs
from numpy import array, poly1d, polyfit
def peak_detector(tic, max_tic):
dy = derivate(tic)
indexes = np.where((np.hstack((dy, 0)) < 0) & (np.hstack((0, dy)) > 0))[0]
for index in indexes:
start_index = find_minima(index, tic, right=False)
final_index = find_minima(index, tic)
yield (start_index, index, final_index)
def find_nearest_scan(data, nodes):
array_data =
|
array(nodes)
|
numpy.array
|
import tkinter as tk
import tkinter.filedialog as fd
import tkinter.messagebox as mb
import numpy as np
import pyknotid
import pyknotid.spacecurves as pkidsc
from pyknotid.spacecurves import Knot
import sympy
import csv
import os
# set initial values
gc_str = ""
fileopen = False
t = sympy.Symbol("t") # for use in displaying polynomial invariant
### GENERAL FUNCTIONS ###
# determine equation of line
def defineline(x1, y1, x2, y2):
xbound = [x1, x2]
ybound = [y1, y2]
if x1 == x2:
slope = None
else:
slope = (y2 - y1) / (x2 - x1)
return [xbound, ybound, slope]
# find the y-intercept of a given line
def findyintercept(x, y, m):
b = y - (m * x)
return b
# check if intersect between two lines falls within their range
def checkrange(xbound1, xbound2, ybound1, ybound2, intersect):
line1x, line2x, line1y, line2y = [False] * 4
# check xrange of first line
if intersect[0] > min(xbound1) and intersect[0] < max(xbound1):
line1x = True
# check x range of second line
if intersect[0] > min(xbound2) and intersect[0] < max(xbound2):
line2x = True
# check y range of first line
if intersect[1] > min(ybound1) and intersect[1] < max(ybound1):
line1y = True
# check y range of second line
if intersect[1] > min(ybound2) and intersect[1] < max(ybound2):
line2y = True
if line1x and line2x and line1y and line2y == True:
return True
else:
return False
# TODO
# check if two lines intersect
def checkintersect(xbound1, xbound2, ybound1, ybound2, slope1, slope2):
# check if line 1 is vertical
if slope1 == None:
# in this case, the two lines intersect everwhere
if slope2 == None:
# not correct but sufficient for the purposes of this script
return None
# otherwise, only line 1 is vertical
else:
b2 = findyintercept(xbound2[0], ybound2[0], slope2)
xintersect = xbound1[0]
yintersect = slope2 * xintersect + b2
# check if intersect in range
if (
checkrange(xbound1, xbound2, ybound1, ybound2, [xintersect, yintersect])
== True
):
return [xintersect, yintersect]
else:
return None
# check if line 2 is vertical
elif slope2 == None:
# previous conditial checked if line 1 was vertical
b1 = findyintercept(xbound1[0], ybound1[0], slope1)
xintersect = xbound2[0]
yintersect = slope1 * xintersect + b1
# check if intersect in range
if (
checkrange(xbound1, xbound2, ybound1, ybound2, [xintersect, yintersect])
== True
):
return [xintersect, yintersect]
else:
return None
# if neither line is vertical
else:
b1 = findyintercept(xbound1[0], ybound1[0], slope1)
b2 = findyintercept(xbound2[0], ybound2[0], slope2)
xintersect = (b2 - b1) / (slope1 - slope2)
yintersect = slope1 * xintersect + b1
# check if intersect in range
if (
checkrange(xbound1, xbound2, ybound1, ybound2, [xintersect, yintersect])
== True
):
return [xintersect, yintersect]
else:
return None
# determine which lines to check for intersection
# this function ignores the end point of the lines which is good for our purposes
# I am not sure if this is computationally more efficient than just checking for
# intersections with every line
def potentialintersection(xbound, ybound, linearray):
# take the bounds of one line and the bounds of the other lines stored in an array
# define bounding box
left = min(xbound)
right = max(xbound)
bottom = min(ybound)
top = max(ybound)
# empty array to store lines with potential intersections
potintersections = []
# each element of linearray is a list which contains the xbounds, ybounds and slope
for line in linearray:
xmin = min(line[0])
xmax = max(line[0])
ymin = min(line[1])
ymax = max(line[1])
# check if the line is in the bounding box at all
if (xmax > left and xmax < right) or (xmin > left and xmin < right):
if (ymax > bottom and ymax < top) or (ymin > bottom and ymin < top):
potintersections.append(line)
return potintersections
# determine which of point in an array is closer to the point of interest
def pythagdistance(x0, y0, points):
# points should be an array of [x,y] coordinates
distlist = []
for p in points:
distlist.append(
np.sqrt((
|
np.abs(x0)
|
numpy.abs
|
#- This simulation with gpu (with the below parameters) took 14h
#- In this experiment we also set lr from 0.01 to 0.0025
# but here with masking is like the no masking case (exp2a-d) with 0.03 to 0.0075
# thefactor of corecction is approx 3.
# So: probably we should set the next time for masking case: lr=0.005-0.001
# ssh no100
# screen -S exp1d
# cd /export/lv4/user/jfajardourbina/dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/notebooks/convlstm/predict_displacement
# conda activate phd_parcelsv221
# python3 convlstm_dws_exp1d_wind_bathymetry_to_pred_displacement_standarized_3std_train_test_adaptive_lr_masking_loss_batch_size_continuous_lstm_states_gpu.py &
# to comeback: screen -r exp1d
import os
import sys
import numpy as np
import torch
import torch.nn.functional
from torch.autograd import Variable
import matplotlib.pyplot as plt
from copy import deepcopy
import matplotlib as mpl
import glob
import xarray as xr
import dask as da
from tqdm import tqdm
# import convlstm---
home_dir = "/export/lv4/user/jfajardourbina/"
ml_dir=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/"
convlstm_model_dir=f"{ml_dir}src"
sys.path.append(convlstm_model_dir)
import convlstm
import convlstm_continuous_states
#path to files---
home_dir = "/export/lv4/user/jfajardourbina/"
ml_dir=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/lagrangian_simulation_36years/machine_learning_github/Lagrangian_ML/"
dir_post_proc_data=f"{ml_dir}post_proc_data/"
#
dir_displacement="net_displacement/"
dir_interp_wind="wind/"
dir_interp_bathymetry="bathymetry/"
file_interp_bathymetry="bathymetry_interp_to_particle_grid_for_convlstm.nc"
#for output after train and test---
exp="exp1d"
dir_convlstm_model_out="ouput_convlstm_model_data/"
case_train="training"; file_out_train=f"{exp}_train.nc"
case_test="testing"; file_out_test=f"{exp}_test.nc"
#for plotting---
#dir_wind="{home_dir}dws_ulf_getm_2D_depth_avg/data/atmosphere/" #winds
dir_dws_bound=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_dws_boundaries/" #DWS boundarie with contour0
file_dws_bound0="dws_boundaries_contour0.nc"
dir_topo=f"{home_dir}dws_ulf_getm_2D_depth_avg/experiments_post_proc/analysis_eulerian_data_36years/data_bathy_grid/" #topo data
file_topo="DWS200m.2012.v03.nc"
#
#parameters
#npa_per_dep=12967 #number of particles per deployment
m2=int(12.42*3600+2) #period in seconds
#dx=400/1e3;dy=400/1e3 #particle grid reso
#
#open DWS contours
dsb0=xr.open_dataset(dir_dws_bound+file_dws_bound0)
bdr_dws0=dsb0.bdr_dws.values #points that define DWS with contour0
#
#open topo file
dsto=xr.open_dataset(dir_topo+file_topo)
xct0=dsto.xc.min().values/1e3; yct0=dsto.yc.min().values/1e3 #=(0,0)
mask_topo=dsto.bathymetry.copy(); mask_topo=xr.where(np.isfinite(mask_topo),1,0) #mask ocean=1, land=0
#Hyper-parameter of neural network---
input_channels = 3 # number of input channels: u10,v10 wind
output_channels = 2 #number of output channels: dx, dy displacement
#hidden_channels = [6, 3, output_channels] # the last digit is the output channel of each ConvLSTMCell (so we are using 3 layers)
hidden_channels = [4, output_channels] # the last digit is the output channel of each ConvLSTMCell (so we are using 2 layers)
kernel_size = 3 #3, does not work with kernel=2
mini_batch_size = 25 #Amount of samples for performing forward-backward propagation during 1 iteration (total iterations per epoch = train samples / mini_batch_size)
#mini_batch_size = 706 #aproox 1year. Amount of samples for performing forward-backward propagation during 1 iteration (total iterations per epoch = train samples / mini_batch_size)
#mini_batch_size = -1 #use all data for performing forward-backward propagation at once during 1 epoch. Memory issues for large samples during training.
num_epochs = 200 #3000
#learning parameters:
adaptive_learning=False #False: lr=learning_rate; True: lr=[learning_rate - learning_rate_end]
#learning_rate = 0.0025 #too slow convergence if used since the beginning of simulation
learning_rate = 0.01 #initial lr
learning_rate_end=0.0025 #final lr
save_data_from_model = True #save some outputs from model in NetCDF Format
#
std_fac_dis=3 #standarize using "std_fac_dis" times the standard deviation
std_fac_wind=3 #standarize using "std_fac_wind" times the standard deviation
#
#if: hidden_channels = [6, 3, output_channels]
#the model will create 6GB of data in GPU memory after 400 training time steps
#so, after nt_steps=2000 (around 3y) we will exceed the mem limit of GPU (around 30GB)
#2.5years for training needs approx 26GB for the above model and with: input_channels = 2; output_channels = 2; kernel_size = 3
#this is because in every time step the graph of computations is stored in the cummulative lost (after calling the model), to perform then a backpropagation
#for this reason is sometimes important to use mini_batches and perform backpropagation after finish with 1.
#then use the next mini_batch and so on until using all the data and finishes 1 eppoch.
#open files----
#open net displacement files---
files_displacement=sorted(glob.glob(f'{dir_post_proc_data}{dir_displacement}/*.nc',recursive=True))
#files_displacement=files_displacement[29:31] #2009-2010
#concat all the files
dsdis=xr.open_mfdataset(files_displacement,concat_dim="time",parallel='True',chunks={'time': -1},
decode_cf=True, decode_times=True)#.load() #this are default decodes
#data_vars='minimal', coords='minimal', compat='override') #takes 1second more with this, see https://xarray.pydata.org/en/stable/io.html#reading-multi-file-datasets
#open interp files for wind---
files_interp_wind=sorted(glob.glob(f'{dir_post_proc_data}{dir_interp_wind}/*.nc',recursive=True))
#files_interp_wind=files_interp_wind[29:31]
#concat all the files
dswi=xr.open_mfdataset(files_interp_wind,concat_dim="time",parallel='True',chunks={'time': -1},
decode_cf=True, decode_times=True)#.load() #this are default decodes
#data_vars='minimal', coords='minimal', compat='override') #takes 1second more with this, see https://xarray.pydata.org/en/stable/io.html#reading-multi-file-datasets
#open interp bathymetry---
dsh=xr.open_dataset(dir_post_proc_data+dir_interp_bathymetry+file_interp_bathymetry).load()
#set bathymetry as input data---
in_h=dsh.bathymetry.values
#set training data---
#
#inputs---
#in_tini_train="2004-01-01"; in_tend_train="2009-12-31"
in_tini_train="2009-11-01"; in_tend_train="2009-12-31"
#u10,v10 wind in model coordinates---
#dswi_train=dswi.sel(time=slice("2009-06-01","2011-12-31"))#,x=slice(70000,80000),y=slice(60000,70000))
dswi_train=dswi.sel(time=slice(in_tini_train,in_tend_train))#,x=slice(60000,80000),y=slice(60000,70000))
in_u10_train,in_v10_train=da.compute(dswi_train.u10.values.astype('float32'),dswi_train.v10.values.astype('float32'))
#
#outputs---
#out_tini_train="2004-01-01"; out_tend_train="2009-12-31"
out_tini_train="2009-11-01"; out_tend_train="2009-12-31"
#dx,dy displacement in model coordinates---
#dsdis_train=dsdis.sel(time=slice("2009-06-01","2011-12-31"))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_train=dsdis_train.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
dsdis_train=dsdis.sel(time=slice(out_tini_train,out_tend_train))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_train=dsdis_train.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
out_dx_train,out_dy_train=da.compute(dsdis_train.dx.values.astype('float32'),dsdis_train.dy.values.astype('float32'))
#
times_train=dsdis_train.time.values
nt_train,ny,nx=out_dx_train.shape
print(times_train[[0,-1]],out_dx_train.shape)
#set testing data---
#
#inputs---
in_tini_test="2010-01-01"; in_tend_test="2010-02-28"
#u10,v10 wind in model coordinates---
#dswi_test=dswi.sel(time=slice("2012-01-01",None))#,x=slice(70000,80000),y=slice(60000,70000))
dswi_test=dswi.sel(time=slice(in_tini_test,in_tend_test))#,x=slice(60000,80000),y=slice(60000,70000))
in_u10_test,in_v10_test=da.compute(dswi_test.u10.values.astype('float32'),dswi_test.v10.values.astype('float32'))
#
#outputs---
out_tini_test="2010-01-01"; out_tend_test="2010-02-28"
#dx,dy displacement in model coordinates---
#dsdis_test=dsdis.sel(time=slice("2012-01-01",None))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_test=dsdis_test.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
dsdis_test=dsdis.sel(time=slice(out_tini_test,out_tend_test))#,x=slice(70000,80000),y=slice(60000,70000))#dsdis_test=dsdis_test.fillna(0) #fill nan with 0s in case displacement is on land (not neccesary for the above small domain)
out_dx_test,out_dy_test=da.compute(dsdis_test.dx.values.astype('float32'),dsdis_test.dy.values.astype('float32'))
#
times_test=dsdis_test.time.values
nt_test,ny,nx=out_dx_test.shape
print(times_test[[0,-1]],out_dx_test.shape)
#for plotting maps of predictions---
#mask: ocean=1, land=nan
mask=out_dx_train[0,...]*1.; mask[np.isfinite(mask)]=1.; mask[np.isnan(mask)]=np.nan
xx=dsdis_train.x/1e3; yy=dsdis_train.y/1e3; xx,yy=np.meshgrid(xx,yy)
#for masking values on land when computing loss---
mask_torch=torch.tensor(np.where(np.isnan(mask),0,1)[np.newaxis,np.newaxis,...]*np.ones((output_channels,ny,nx)))*1.
mask_numpy=mask_torch.numpy()*1.
def standarization(var,fac=3):
mean=np.nanmean(var)
std=np.nanstd(var)*fac #using 3 times std (seems to works better than just 1std)
var[np.isnan(var)]=0. #fill with 0 in case of nan. This is modifing our input array
return ((var-mean)/std),mean,std #.astype('float32')
def de_standarization(var,mean,std):
return (var*std+mean) #.astype('float32')
def min_max_normalization(var):
minn=np.nanmin(var); maxx=np.nanmax(var)
var[np.isnan(var)]=0. #fill with 0 in case of nan. This is modifing our input array
return (var-minn)/(maxx-minn),minn,maxx #.astype('float32')
def de_min_max_normalization(var,minn,maxx):
return var*(maxx-minn)+minn #.astype('float32')
#min-max normalization of data---
#input: bathymetry
in_h, in_h_min, in_h_max = min_max_normalization(in_h)
#standarization of data---
#training---
#inputs
in_u10_train, in_u10_mean_train, in_u10_std_train = standarization(in_u10_train,std_fac_wind)
in_v10_train, in_v10_mean_train, in_v10_std_train = standarization(in_v10_train,std_fac_wind)
#outputs
out_dx_train, out_dx_mean_train, out_dx_std_train = standarization(out_dx_train,std_fac_dis)
out_dy_train, out_dy_mean_train, out_dy_std_train = standarization(out_dy_train,std_fac_dis)
print("train info:")
print(f"steps={nt_train}; (ny,nx)=({ny},{nx})")
print("input")
print(f"u10_mean, u10_std*{std_fac_wind}, v10_mean, v10_std*{std_fac_wind}:")
print(in_u10_mean_train, in_u10_std_train, in_v10_mean_train, in_v10_std_train)
print("output")
print(f"dx_mean, dx_std*{std_fac_dis}, dy_mean, dy_std*{std_fac_dis}:")
print(out_dx_mean_train, out_dx_std_train, out_dy_mean_train, out_dy_std_train)
print()
#testing---
#inputs
in_u10_test, in_u10_mean_test, in_u10_std_test = standarization(in_u10_test,std_fac_wind)
in_v10_test, in_v10_mean_test, in_v10_std_test = standarization(in_v10_test,std_fac_wind)
#outputs
out_dx_test, out_dx_mean_test, out_dx_std_test = standarization(out_dx_test,std_fac_dis)
out_dy_test, out_dy_mean_test, out_dy_std_test = standarization(out_dy_test,std_fac_dis)
print("test info:")
print(f"steps={nt_test}; (ny,nx)=({ny},{nx})")
print("input")
print(f"u10_mean, u10_std*{std_fac_wind}, v10_mean, v10_std*{std_fac_wind}:")
print(in_u10_mean_test, in_u10_std_test, in_v10_mean_test, in_v10_std_test)
print("output")
print(f"dx_mean, dx_std*{std_fac_dis}, dy_mean, dy_std*{std_fac_dis}:")
print(out_dx_mean_test, out_dx_std_test, out_dy_mean_test, out_dy_std_test)
print()
#MODEL configuration and helper functions---
#loss functions with and without masking---
class initialization:
def __init__(self, masking=False, mask=None):
self.masking=masking
self.mask=mask
class loss_function:
class mse(initialization):
#we call this function without using its name
def __call__(self, predict=torch.zeros(1), target=torch.zeros(1)):
if self.masking:
#masking land points---
#
#- the masking affect:
# the value of the total loss (that only includes points inside DWS) and hence the last gradient of the backpropagation
# loss=sum(prediction-output)**2/N; dlos/dpred=2*sum(prediction-output)/N,
# with masking N is smaller because we dont consider land points, so seems that its like increasing the overall lr
#- similar effect to masking without using it:
# if we use another custom loss like torch.nn.MSELoss(reduction='sum')
# masking is irrelevant since we dont divide with N
#
#disregard land points (=0) for the mean, so the loss value will increase
#mask_torch: 0=land, 1=ocean
#however, because we only have particles inside DWS, mask_torch=0 for the land and all points outside DWS
loss_val = torch.mean(((predict-target)[self.mask==1])**2)
else:
#original---
loss_val = torch.mean((predict-target)**2) #=torch.nn.MSELoss()
#
return loss_val
class mse_numpy(initialization):
#we call this function without using its name
def __call__(self, predict=np.zeros(1), target=np.zeros(1)):
if self.masking:
#masking land points---
#disregard land points (=0) for the mean, so the loss value will increase
#probably because land points decrease the loss, the model don't perform so well
#mask_torch: 0=land, 1=ocean
#however, because we only have particles inside DWS, mask_torch=0 all points except inside it
loss_val = np.mean(((predict-target)[self.mask==1])**2)
else:
#original---
loss_val = np.mean((predict-target)**2) #=torch.nn.MSELoss()
#
return loss_val
#get times for backward propagation when using mini-batch---
def get_times_for_backward(nt,mini_batch_size=30):
#times relative to t=0
if nt < mini_batch_size: mini_batch_size = nt
t_last = np.mod(nt,mini_batch_size) #remainder of nt
t_backward=np.arange(mini_batch_size,nt+1,mini_batch_size)-1
#iterations = int(nt/mini_batch_size)
#t_backward=np.arange(iterations)*mini_batch_size+mini_batch_size-1
if t_backward[-1]!=nt-1: t_backward[-1]+=t_last
return t_backward
#training---
def training(epoch,num_epochs,nt,t_backward,model):
# Clear stored gradient
model.zero_grad()
optimizer.zero_grad()
# loop through all timesteps
predict=[]; loss0=0. #; pred_bug=[]
for t in range(nt):
#stack data---
#
#old method using torch.autograd.Variable and .view()---
#data_in=np.stack((in_u10_train[t,...],in_v10_train[t,...]))
#data_out=np.stack((out_dx_train[t,...],out_dy_train[t,...]))
#data_in = torch.autograd.Variable(torch.Tensor(data_in).view(-1,input_channels,ny,nx)).to(device)
#data_out = torch.autograd.Variable(torch.Tensor(data_out).view(-1,input_channels,ny,nx)).to(device)
#
#new method using torch.tensor and np.newaxis (the same results as above)---
data_in = torch.tensor(np.stack((in_u10_train[t,...],
in_v10_train[t,...],
in_h),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
data_out = torch.tensor(np.stack((out_dx_train[t,...],
out_dy_train[t,...]),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
# Forward process and loss for:---
# - the entire batch (all the samples). Problems with memory.
# - mini-batch (subset of the full samples).
#
if t==0 or t in t_backward+1:
if t==0: # start hidden and cell states from a normal distribution
predict0, _ = model(data_in, 0)
mae0 = np.mean(abs(predict0-data_out).detach().cpu().numpy())
#mape0 = np.mean( abs((predict0-data_out)/data_out).detach().numpy() ) #problems with mape if denominator = 0
else: #use the last state of the previous mini-batch
if epoch == num_epochs-1: print(f"give init states to model at time-step: {t}")
#print(f"give init states to model at time-step: {t}")
predict0, _ = model(data_in, 0, states) #data_in=(1,input_channels,ny,nx) #predict0=(1,output_channels,ny,nx)
#loss
lossbp0 = loss_fn(predict0, data_out) #data_out=(1,output_channels,ny,nx)
tt0=t
#check if prediction uses random-init states after a backward propgation of a mini-batch
#if epoch == num_epochs-1: pred_bug.append(np.squeeze(predict0.detach().cpu().numpy()))
else:
if t in t_backward:
if epoch == num_epochs-1: print(f"getting states from model at time-step: {t}")
#print(f"getting states from model at time-step: {t}")
predict0, states = model(data_in, t-tt0)
else:
predict0, _ = model(data_in, t-tt0)
#loss
lossbp0 += loss_fn(predict0, data_out)
mae0 += np.mean(abs(predict0-data_out).detach().cpu().numpy())
#mape0 += np.mean( abs((predict0-data_out)/data_out).detach().numpy() )
#Backward propagation for:---
# - the entire batch (all the samples). Problems with memory.
# - mini-batch (subset of the full samples).
if t in t_backward:
if epoch == num_epochs-1:
print(f"performing backward propagation at time-step: {t}")
# Zero out gradient, else they will accumulate between epochs---
model.zero_grad()
optimizer.zero_grad()
# Backward pass---
lossbp0.backward()
# Update parameters---
optimizer.step() #to initiate gradient descent
# Zero out gradient again, in case starting the model for the next mini-batch
model.zero_grad()
optimizer.zero_grad()
#
loss0 += lossbp0.item(); del lossbp0
#cumulative loss from all the time steps (the loss we use for backward propagation)---
if epoch % 50 == 0:
print("Train epoch ", epoch, "; mean(MSE(t)) = ", loss0/nt*std_fac_dis**2, "; mean(MAE(t)) = ", mae0/nt*std_fac_dis)
#print(np.sum(abs((states[-1][0]-predict0).detach().cpu().numpy())))
# save lr
lr0=optimizer.param_groups[0]["lr"]
#predict train data for the last epoch, after updating model parameters
if epoch == num_epochs-1:
with torch.no_grad():
for t in range(nt):
data_in = torch.from_numpy(np.stack((in_u10_train[t,...],
in_v10_train[t,...],
in_h),axis=0)[np.newaxis,...]).to(device) #(1,input_channels,ny,nx)
predict0, _ = model(data_in, t) #data_in=(1,input_channels,ny,nx) predict=(1,output_channels,ny,nx)
predict0 = np.squeeze(predict0.detach().cpu().numpy()) #delete the first dim=1
predict.append(predict0) #save the predictions for the last epoch
predict=
|
np.array(predict)
|
numpy.array
|
from .interval import IntervalGoalEnv
from abc import ABC, abstractmethod
import numpy as np
import copy
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import math
#todo first run jsut the algorithm with the minimizer of collision along side to see what Q values it does create
#A space visualizer V value will be needed that(heat map)
env_dt = 0.02
#becomes obstacles in both steps returns the most current one as 2-dim array
def extract_most_current_obstacles(obstacles_array):
splitted = np.array(np.split(obstacles_array, len(obstacles_array)/4))
most_recent = splitted[0:len(splitted):2, :]
return most_recent
#b_bboxes is expected to be 2 dim array
def check_collisions(a_bbox, b_bboxes):
# b_min_x - a_max_x
d1x = (b_bboxes[:, 0] - b_bboxes[:, 2]) - (a_bbox[0]+a_bbox[2])
d1y = (b_bboxes[:, 1] - b_bboxes[:, 3]) - (a_bbox[1]+a_bbox[3])
d2x = (a_bbox[0] - a_bbox[2]) - (b_bboxes[:, 0] + b_bboxes[:, 2])
d2y = (a_bbox[1] - a_bbox[3]) - (b_bboxes[:, 1] + b_bboxes[:, 3])
d1_bools = np.logical_or(d1x>0., d1y>0.)
d2_bools = np.logical_or(d2x>0., d2y>0.)
d_bools = np.logical_or(d1_bools, d2_bools)
return np.logical_not(d_bools)
def aabbs_max_distances(a_bbox, b_bboxes):
dcxs = np.abs(b_bboxes[:, 0] - a_bbox[0])
extra_x_dist = b_bboxes[:, ] + a_bbox[2]
x_max_dists = dcxs + extra_x_dist
dcys = np.abs(b_bboxes[:, 1] - a_bbox[1])
extra_y_dist = b_bboxes[:, 3] + a_bbox[3]
y_max_dists = dcys + extra_y_dist
d_maxs = np.sqrt(x_max_dists**2 + y_max_dists**2)
return d_maxs
def aabbs_min_distances(a_bbox, b_bboxes):
dcxs = np.abs(b_bboxes[:, 0] - a_bbox[0])
extra_x_dist = b_bboxes[:, 2] + a_bbox[2]
zeros_array = np.zeros(shape=extra_x_dist.shape)
x_min_dists = np.maximum(dcxs - extra_x_dist, zeros_array)
dcys = np.abs(b_bboxes[:, 1] - a_bbox[1])
extra_y_dist = b_bboxes[:, 3] + a_bbox[3]
y_min_dists = np.maximum(dcys - extra_y_dist, zeros_array)
d_mins = np.sqrt(x_min_dists**2 + y_min_dists**2)
return d_mins
def calc_vels(bboxes, bboxes_prev, dt):
pos_dif = bboxes[:, 0:2] - bboxes_prev[:, 0:2]
vel = pos_dif / dt
return vel
def calc_angles(a_bbox, b_bboxes):
if a_bbox[0] == 100. and a_bbox[1] == 100.:
# use a negative angle so it is different in this case
angles = np.repeat(-1., repeats=b_bboxes.shape[0])
else:
angles = np.arctan2(b_bboxes[:, 1] - a_bbox[1], b_bboxes[:, 0] - a_bbox[0]) * 180 / np.pi # to degree
angles = angles % 360.
angles = np.expand_dims(angles, axis=1)
return angles
class ObsExtender(ABC):
def __init__(self, args):
self.args = args
self.counter = 0
@abstractmethod
def extend_obs(self, obs, env):
pass
def step(self):#just here makes sense to increment step
self.counter += 1
def reset_ep(self):
self.counter = 0
#leaves everything as it is, used for test of HGG
class DummyExtender(ObsExtender):
def __init__(self, args):
super(DummyExtender, self).__init__(args)
def extend_obs(self, obs, env):
return obs
class ObsExtenderBbox(ObsExtender):
def __init__(self, args):
super(ObsExtenderBbox, self).__init__(args)
def extend_obs(self, obs, env):
if self.args.vae_dist_help:
extra_goal_state = np.concatenate([obs['achieved_goal_latent'],
obs['achieved_goal_size_latent']])
obstacle_l = obs['obstacle_latent']
obstacle_s_l = obs['obstacle_size_latent']
obstacle_len_shape = len(obstacle_l.shape)
if len(obstacle_l.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l])
else:
extra_goal_state = np.concatenate([obs['achieved_goal'][:2],
obs['real_size_goal'][:2]])
obstacle_info = obs['real_obstacle_info']
obstacle_len_shape = len(obstacle_info.shape)
if len(obstacle_info.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_info[:, :2], obstacle_info[:, -3:-1]], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_info[:2], obstacle_info[-3:-1]])
if self.counter == 0:
extra_goal_state = np.concatenate([extra_goal_state, extra_goal_state])
if obstacle_len_shape > 1:
extra_obstacle_state = np.ravel(np.concatenate([extra_obstacle_state, extra_obstacle_state],
axis=1)
)
else:
extra_obstacle_state = np.concatenate([extra_obstacle_state, extra_obstacle_state])
self.single_step_extra_goal_state_size = len(extra_goal_state) // 2
self.single_step_extra_obstacle_state_size = len(extra_obstacle_state) // 2
self.start_index_extra_observation = len(obs['observation'])
else:
# the first entries will always have the more recent representations
prev_obs = env.last_obs.copy()
begin_index = self.start_index_extra_observation
end_index = begin_index + self.single_step_extra_goal_state_size
prev_goal_state = prev_obs['observation'][begin_index: end_index]
begin_index = self.start_index_extra_observation + 2 * self.single_step_extra_goal_state_size
# This one extract until the end since it might exist more tah one obstacle
end_index = begin_index + self.single_step_extra_obstacle_state_size * 2
prev_obstacle_state = prev_obs['observation'][begin_index: end_index]
prev_obstacle_state = extract_most_current_obstacles(prev_obstacle_state)
# the previous ones are pushed to the back
extra_goal_state = np.concatenate([extra_goal_state, prev_goal_state])
if obstacle_len_shape > 1:
extra_obstacle_state = np.ravel(np.concatenate([extra_obstacle_state, prev_obstacle_state], axis=1))
else:
extra_obstacle_state = np.concatenate([extra_obstacle_state, prev_obstacle_state[0]])
new_state = np.concatenate([obs['observation'], extra_goal_state, extra_obstacle_state])
obs['observation'] = new_state
return obs
#basically the same but does not extend the state that is passed to agent. This class will be inherited to extend the
#class in other ways
class ObsExtBboxInfo(ObsExtender):
def __init__(self, args):
super(ObsExtBboxInfo, self).__init__(args)
def extend_obs(self, obs, env):
if self.args.vae_dist_help:
extra_goal_state = np.concatenate([obs['achieved_goal_latent'],
obs['achieved_goal_size_latent']])
obstacle_l = obs['obstacle_latent']
obstacle_s_l = obs['obstacle_size_latent']
obstacle_len_shape = len(obstacle_l.shape)
if len(obstacle_l.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_l, obstacle_s_l])
extra_obstacle_state = np.expand_dims(extra_obstacle_state, axis=0)
else:
extra_goal_state = np.concatenate([obs['achieved_goal'][:2],
obs['real_size_goal'][:2]])
obstacle_info = obs['real_obstacle_info']
if len(obstacle_info.shape) > 1:
extra_obstacle_state = np.concatenate([obstacle_info[:, :2], obstacle_info[:, -3:-1]], axis=1)
else:
extra_obstacle_state = np.concatenate([obstacle_info[:2], obstacle_info[-3:-1]])
extra_obstacle_state = np.expand_dims(extra_obstacle_state, axis=0)
if self.counter == 0:
#It is the first observation. We cannot assume nothing from previous steps and therefore init every
#field with the same
#might not need to store all goal state since they will not be used
obs['goal_st_t'] = extra_goal_state.copy()
obs['goal_st_t_minus1'] = extra_goal_state.copy()
obs['goal_st_t_minus2'] = extra_goal_state.copy()
obs['obstacle_st_t'] = extra_obstacle_state.copy()
obs['obstacle_st_t_minus1'] = extra_obstacle_state.copy()
obs['obstacle_st_t_minus2'] = extra_obstacle_state.copy()
else:
# the previous ones are pushed to the back
prev_obs = env.last_obs.copy()
obs['goal_st_t'] = extra_goal_state.copy()
obs['goal_st_t_minus1'] = prev_obs['goal_st_t'].copy()
obs['goal_st_t_minus2'] = prev_obs['goal_st_t_minus1'].copy()
obs['obstacle_st_t'] = extra_obstacle_state.copy()
obs['obstacle_st_t_minus1'] = prev_obs['obstacle_st_t'].copy()
obs['obstacle_st_t_minus2'] = prev_obs['obstacle_st_t_minus2'].copy()
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = copy.deepcopy(obs)
new_obs['obstacle_st_t'] = new_obstacle_list.copy()
new_obs['obstacle_st_t_minus1'] = None
new_obs['obstacle_st_t_minus1'] = None
return new_obs
class ObsExtBboxColl(ObsExtBboxInfo):
def __init__(self, args):
super(ObsExtBboxColl, self).__init__(args)
def extend_obs(self, obs, env):
obs = ObsExtBboxInfo.extend_obs(self, obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
# goal object is not in visible range
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
obs['coll'] = 0.
obs['coll_bool_ar'] = np.repeat(False, len(obstacle_bboxes))
else:
cols = check_collisions(goal_bbox, obstacle_bboxes)
obs['coll_bool_ar'] = cols.copy()
ncols = np.sum(cols.astype(np.float))
obs['coll'] = ncols
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtBboxColl, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
# goal object is not in visible range
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
new_obs['coll'] = 0.
new_obs['coll_bool_ar'] = np.repeat(False, len(obstacle_bboxes))
else:
cols = check_collisions(goal_bbox, obstacle_bboxes)
new_obs['coll_bool_ar'] = cols.copy()
ncols = np.sum(cols.astype(np.float))
new_obs['coll'] = ncols
return new_obs
class ObsExtMinDist(ObsExtBboxColl):
def __init__(self, args):
super(ObsExtMinDist, self).__init__(args)
def extend_obs(self, obs, env):
obs = super(ObsExtMinDist, self).extend_obs(obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
# goal object is not in visible range therefore distance really far away
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
dists = np.repeat(1000., repeats=obstacle_bboxes.shape[0])
else:
dists = aabbs_min_distances(goal_bbox, obstacle_bboxes)
obs['dists'] = dists.copy()
new_state = np.concatenate([obs['observation'], dists.copy()])
obs['observation'] = new_state
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtMinDist, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
# goal object is not in visible range therefore distance really far away
if goal_bbox[0] == 100. and goal_bbox[1] == 100.:
dists = np.repeat(1000., repeats=obstacle_bboxes.shape[0])
else:
dists = aabbs_min_distances(goal_bbox, obstacle_bboxes)
new_obs['dists'] = dists.copy()
len_dists = len(dists)
new_obs['observation'][-len_dists:] = dists
return new_obs
class ObsExtP(ObsExtMinDist):
def __init__(self, args):
super(ObsExtP, self).__init__(args)
def extend_obs(self, obs, env):
obs = super(ObsExtP, self).extend_obs(obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
dists = obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
observation_without_dist = obs['observation'][:-len_dists]
extension = np.concatenate([dists, pos], axis=1)
new_state = np.concatenate([observation_without_dist, np.ravel(extension)])
obs['observation'] = new_state
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtP, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
dists = new_obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
extension = np.ravel(np.concatenate([dists, pos], axis=1))
len_extension = len(extension)
new_obs['observation'][-len_extension:] = extension
return new_obs
class ObsExtPAV(ObsExtMinDist):
def __init__(self, args):
super(ObsExtPAV, self).__init__(args)
self.length_extension = None
self.env_dt = None
def extend_obs(self, obs, env):
if self.env_dt is None:
self.env_dt = env.env.env.dt
obs = super(ObsExtPAV, self).extend_obs(obs, env)
goal_bbox = obs['goal_st_t']
obstacle_bboxes = obs['obstacle_st_t']
previous_obstacle_bboxes = obs['obstacle_st_t_minus1']
dists = obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
dt = env.env.dt
vel = calc_vels(obstacle_bboxes, previous_obstacle_bboxes, dt)
angles = calc_angles(goal_bbox, obstacle_bboxes)
observation_without_dist = obs['observation'][:-len_dists]
extension = np.ravel(np.concatenate([dists, pos, angles, vel], axis=1))
if self.length_extension is None:
self.length_extension = len(extension)
new_state = np.concatenate([observation_without_dist, extension])
obs['observation'] = new_state
return obs
def _modify_obs(self, obs, new_obstacle_list, extra_info, index):
new_obs = super(ObsExtPAV, self)._modify_obs(obs, new_obstacle_list, extra_info, index)
goal_bbox = new_obs['goal_st_t']
obstacle_bboxes = new_obs['obstacle_st_t']
dists = new_obs['dists'].copy()
len_dists = len(dists)
dists = np.expand_dims(dists, axis=1)
pos = obstacle_bboxes[:, 0:2]
len_pos_el = len(pos[0])
#vel = calc_vels(obstacle_bboxes, previous_obstacle_bboxes, dt)
angles = calc_angles(goal_bbox, obstacle_bboxes)
mock_extension = np.ravel(np.concatenate([dists, pos, angles, pos], axis=1))
len_extension = len(mock_extension)
unmodified_extension = obs['observation'][-len_extension:].copy()
unmodified_extension =np.reshape(unmodified_extension, newshape=(-1, 1+2*len_pos_el+1))
vel = unmodified_extension[:, -2:]
dir_not_scaled = extra_info['dir_not_scaled']
if self.env_dt is None:
raise Exception('this was called before modification of obs')
vel[index] = dir_not_scaled / self.env_dt
extension = np.ravel(np.concatenate([dists, pos, angles, vel], axis=1))
len_extension = len(extension)
new_obs['observation'][-len_extension:] = extension
return new_obs
def visualize(self, obs, file_name):
extension = obs['observation'][- self.length_extension:].copy()
extension =
|
np.reshape(extension, (-1, 6))
|
numpy.reshape
|
import unittest
import numpy as np
import numpy.testing as npt
from scipy.sparse.csr import csr_matrix
from pylogit.scipy_utils import identity_matrix
def sparse_assert_equal(a1, a2):
"""Assert equality of two sparse matrices"""
assert type(a1) is type(a2)
npt.assert_array_equal(a1.data, a2.data)
|
npt.assert_array_equal(a1.indices, a2.indices)
|
numpy.testing.assert_array_equal
|
import math
import numpy as np
import torch
from torch import nn
from utils.geometric import pairwise_distance, calc_angle, calc_dihedral
from . import spline
from common import config
from common.config import EPS
from common import constants
class OmegaRestraint(nn.Module):
"""Omega angle is defined as dehidral (CA_i, CB_i, CB_j, CA_j)"""
def __init__(self, pred_omega):
"""
pred_omega has shape (L, L, 37)
"""
super().__init__()
L = pred_omega.shape[0]
_filter = torch.tensor(pred_omega[:, :, -1] < config.OMEGA_CUT)
self.mask = nn.Parameter(
torch.triu(torch.ones((L, L)).bool(), diagonal=1).__and__(_filter),
requires_grad=False,
)
_step = 15.0 * math.pi / 180.0
self.cutoffs = torch.linspace(-math.pi + 0.5 * _step, math.pi + 0.5 * _step, steps=25)
_x = self.cutoffs
_ref = -np.log(constants.bg_omega)
_y = -np.log(pred_omega[:, :, :-1] + EPS) - _ref
_y = np.concatenate([_y, _y[:, :, :1]], axis=-1)
self.coeff = nn.Parameter(spline.cubic_spline(_x, _y, period=True), requires_grad=False)
self.cutoffs = nn.Parameter(self.cutoffs, requires_grad=False)
def __str__(self):
return "Omega constraints: %i" % torch.sum(self.mask).item()
def forward(self, coord):
B = coord.CA.shape[0]
x_idx, y_idx = torch.where(self.mask)
x_CA = coord.CA[:, x_idx].view(-1, 3)
x_CB = coord.CB[:, x_idx].view(-1, 3)
y_CA = coord.CA[:, y_idx].view(-1, 3)
y_CB = coord.CB[:, y_idx].view(-1, 3)
x_idx, y_idx = x_idx.repeat(B), y_idx.repeat(B)
x_idx, y_idx, omega = calc_dihedral(x_CA, x_CB, y_CB, y_CA, x_idx, y_idx)
coeff = self.coeff[x_idx, y_idx]
omega_potential = torch.sum(spline.evaluate(coeff, self.cutoffs, omega))
return {
"pairwise_omega": omega_potential,
}
class ThetaRestraint(nn.Module):
"""Theta angle is defined as dehidral (N_i, CA_i, CB_i, CB_j)"""
def __init__(self, pred_theta):
"""
pred_theta has shape (L, L, 37)
"""
super().__init__()
L = pred_theta.shape[0]
_filter = torch.tensor(pred_theta[:, :, -1] < config.THETA_CUT)
self.mask = nn.Parameter(
(torch.eye(pred_theta.shape[0]) == 0).__and__(_filter),
requires_grad=False,
)
_step = 15.0 * math.pi / 180.0
self.cutoffs = torch.linspace(-math.pi + 0.5 * _step, math.pi + 0.5 * _step, steps=25)
_x = self.cutoffs
_ref = -np.log(constants.bg_theta)
_y = -np.log(pred_theta[:, :, :-1] + EPS) - _ref
_y =
|
np.concatenate([_y, _y[:, :, :1]], axis=-1)
|
numpy.concatenate
|
#Entry point for the LEC agent
#Script has anomaly detectors, assurance monitors and risk computations
import os
import cv2
import torch
import torchvision
import carla
import csv
import math
import pathlib
import datetime
import gc
import time
from numba import cuda
from PIL import Image, ImageDraw,ImageFont
import threading
from threading import Thread
from queue import Queue
import numpy as np
import tensorflow as tf
from keras.models import Model, model_from_json
from keras.backend.tensorflow_backend import set_session
from keras import backend as K
from sklearn.utils import shuffle
from keras.models import model_from_json
from sklearn.metrics import mean_squared_error
from carla_project.src.image_model import ImageModel
from carla_project.src.converter import Converter
from team_code.base_agent import BaseAgent
from team_code.pid_controller import PIDController
from team_code.detectors.anomaly_detector import occlusion_detector, blur_detector, assurance_monitor
from team_code.risk_calculation.fault_modes import FaultModes
from team_code.risk_calculation.bowtie_diagram import BowTie
from scipy.stats import norm
import scipy.integrate as integrate
DEBUG = int(os.environ.get('HAS_DISPLAY', 0))
os.environ["CUDA_VISIBLE_DEVICES"]="1"
def get_entry_point():
return 'ImageAgent'
#Display window
def debug_display(tick_data, target_cam, out, steer, throttle, brake, desired_speed, step):
_rgb = Image.fromarray(tick_data['rgb'])
_draw_rgb = ImageDraw.Draw(_rgb)
_draw_rgb.ellipse((target_cam[0]-3,target_cam[1]-3,target_cam[0]+3,target_cam[1]+3), (255, 255, 255))
for x, y in out:
x = (x + 1) / 2 * 256
y = (y + 1) / 2 * 144
_draw_rgb.ellipse((x-2, y-2, x+2, y+2), (0, 0, 255))
_combined = Image.fromarray(np.hstack([tick_data['rgb_left'], _rgb, tick_data['rgb_right']]))
_draw = ImageDraw.Draw(_combined)
font = ImageFont.truetype("arial.ttf", 25)
_draw.text((5, 10), 'Steer: %.3f' % steer)
_draw.text((5, 30), 'Throttle: %.3f' % throttle)
_draw.text((5, 50), 'Brake: %s' % brake)
_draw.text((5, 70), 'Speed: %.3f' % tick_data['speed'])
_draw.text((5, 90), 'Desired: %.3f' % desired_speed)
cv2.imshow('map', cv2.cvtColor(np.array(_combined), cv2.COLOR_BGR2RGB))
cv2.waitKey(1)
#Function that gets in weather from a file
def process_weather_data(weather_file,k):
weather = []
lines = []
with open(weather_file, 'r') as readFile:
reader = csv.reader(readFile)
for row in reader:
weather.append(row)
return weather[k-1],len(weather)
ENV_LABELS = ["precipitation",
"precipitation_deposits",
"cloudiness",
"wind_intensity",
"sun_azimuth_angle",
"sun_altitude_angle",
"fog_density",
"fog_distance",
"wetness"]
FAULT_LABEL = ["fault_type"]
MONITOR_LABELS = ["center_blur_dect",
"left_blur_dect",
"right_blur_dect",
"center_occ_dect",
"left_occ_dect",
"right_occ_dect"]
#"lec_martingale"]
class ImageAgent(BaseAgent):
def setup(self, path_to_conf_file,data_folder,route_folder,k,model_path):
super().setup(path_to_conf_file,data_folder,route_folder,k,model_path)
self.converter = Converter()
self.net = ImageModel.load_from_checkpoint(path_to_conf_file)
self.data_folder = data_folder
self.route_folder = route_folder
self.scene_number = k
self.weather_file = self.route_folder + "/weather_data.csv"
self.model_path = model_path
self.model_vae = None
self.net.cuda()
self.net.eval()
self.run = 0
self.risk = 0
self.state = []
self.monitors = []
self.blur_queue = Queue(maxsize=1)
self.occlusion_queue = Queue(maxsize=1)
self.am_queue = Queue(maxsize=1)
self.pval_queue = Queue(maxsize=1)
self.sval_queue = Queue(maxsize=1)
self.mval_queue = Queue(maxsize=1)
self.avg_risk_queue = Queue(maxsize=1)
self.calib_set = []
self.result = []
self.blur = []
self.occlusion = []
self.detector_file = None
self.detector_file = None
K.clear_session()
config = tf.ConfigProto()
sess = tf.Session(config=config)
set_session(sess)
with open(self.model_path + 'auto_model.json', 'r') as jfile:
self.model_vae = model_from_json(jfile.read())
self.model_vae.load_weights(self.model_path + 'auto_model.h5')
self.model_vae._make_predict_function()
self.fields = ['step',
'monitor_result',
'risk_score',
'rgb_blur',
'rgb_left_blur',
'rgb_right_blur',
'rgb_occluded',
'rgb_left_occluded',
'rgb_right_occluded',
]
self.weather,self.run_number = process_weather_data(self.weather_file,self.scene_number)
print(self.weather)
with open(self.model_path + 'calibration.csv', 'r') as file:
reader = csv.reader(file)
for row in reader:
self.calib_set.append(row)
def _init(self):
super()._init()
self._turn_controller = PIDController(K_P=1.25, K_I=0.75, K_D=0.3, n=40)
self._speed_controller = PIDController(K_P=5.0, K_I=0.5, K_D=1.0, n=40)
def blur_detection(self,result):
self.blur =[]
fm1,rgb_blur = blur_detector(result['rgb'], threshold=20)
fm2,rgb_left_blur = blur_detector(result['rgb_left'], threshold=20)
fm3,rgb_right_blur = blur_detector(result['rgb_right'], threshold=20)
self.blur.append(rgb_blur)
self.blur.append(rgb_left_blur)
self.blur.append(rgb_right_blur)
self.blur_queue.put(self.blur)
def occlusion_detection(self,result):
self.occlusion = []
percent1,rgb_occluded = occlusion_detector(result['rgb'], threshold=25)
percent2,rgb_left_occluded = occlusion_detector(result['rgb_left'], threshold=25)
percent3,rgb_right_occluded = occlusion_detector(result['rgb_right'], threshold=25)
self.occlusion.append(rgb_occluded)
self.occlusion.append(rgb_left_occluded)
self.occlusion.append(rgb_right_occluded)
self.occlusion_queue.put(self.occlusion)
def integrand1(self,p_anomaly):
result = 0.0
for i in range(len(p_anomaly)):
result += p_anomaly[i]
result = result/len(p_anomaly)
return result
def integrand(self,k,p_anomaly):
result = 1.0
for i in range(len(p_anomaly)):
result *= k*(p_anomaly[i]**(k-1.0))
return result
def mse(self, imageA, imageB):
err = np.mean(np.power(imageA - imageB, 2), axis=1)
return err
def assurance_monitor(self,dist):
if(self.step == 0):
p_anomaly = []
prev_value = []
else:
p_anomaly = self.pval_queue.get()
prev_value = self.sval_queue.get()
anomaly=0
m=0
delta = 10
threshold = 20
sliding_window = 15
threshold = 10.0
for i in range(len(self.calib_set)):
if(float(dist) <= float(self.calib_set[i][0])):
anomaly+=1
p_value = anomaly/len(self.calib_set)
if(p_value<0.005):
p_anomaly.append(0.005)
else:
p_anomaly.append(p_value)
if(len(p_anomaly))>= sliding_window:
p_anomaly = p_anomaly[-1*sliding_window:]
m = integrate.quad(self.integrand,0.0,1.0,args=(p_anomaly))
m_val = round(math.log(m[0]),2)
if(self.step==0):
S = 0
S_prev = 0
else:
S = max(0, prev_value[0]+prev_value[1]-delta)
prev_value = []
S_prev = S
m_prev = m[0]
prev_value.append(S_prev)
prev_value.append(m_prev)
self.pval_queue.put(p_anomaly)
self.sval_queue.put(prev_value)
self.mval_queue.put(m_val)
def risk_computation(self,weather,blur_queue,am_queue,occlusion_queue,fault_scenario,fault_type,fault_time,fault_step):
monitors = []
faults = []
faults.append(fault_type)
blur = self.blur_queue.get()
occlusion = self.occlusion_queue.get()
mval = self.mval_queue.get()
if(self.step == 0):
avg_risk = []
else:
avg_risk = self.avg_risk_queue.get()
monitors = blur + occlusion
state = {"enviornment": {}, "fault_modes": None, "monitor_values": {}}
for i in range(len(weather)):
label = ENV_LABELS[i]
state["enviornment"][label] = weather[i]
state["fault_modes"] = fault_type
for j in range(len(monitors)):
label = MONITOR_LABELS[j]
state["monitor_values"][label] = monitors[j]
state["monitor_values"]["lec_martingale"] = mval
fault_modes = state["fault_modes"][0]
environment = state["enviornment"]
monitor_values = state["monitor_values"]
if(fault_scenario == 0):
fault_modes = state["fault_modes"][0]
if(fault_scenario == 1):
if(self.step >= fault_step[0] and self.step < fault_step[0]+fault_time[0]):
fault_modes = state["fault_modes"][0]
if(fault_scenario > 1):
if(self.step >= fault_step[0] and self.step < fault_step[0]+fault_time[0]):
fault_modes = state["fault_modes"][0]
elif(self.step >= fault_step[1] and self.step < fault_step[1]+fault_time[1]):
fault_modes = state["fault_modes"][1]
bowtie = BowTie()
r_t1_top = bowtie.rate_t1(state) * (1 - bowtie.prob_b1(state,fault_modes))
r_t2_top = bowtie.rate_t2(state) * (1 - bowtie.prob_b2(state,fault_modes))
r_top = r_t1_top + r_t2_top
r_c1 = r_top * (1 - bowtie.prob_b3(state))
print("Dynamic Risk Score:%f"%r_c1)
avg_risk.append(r_c1)
if(len(avg_risk))>= 20:
avg_risk = avg_risk[-1*20:]
#m = integrate.cumtrapz(avg_risk)
m = self.integrand1(avg_risk)
self.avg_risk_queue.put(avg_risk)
dict = [{'step':self.step, 'monitor_result':mval, 'risk_score':r_c1, 'rgb_blur':blur[0],'rgb_left_blur':blur[1],'rgb_right_blur':blur[2],
'rgb_occluded':occlusion[0],'rgb_left_occluded':occlusion[1],'rgb_right_occluded':occlusion[2]}]
if(self.step == 0):
self.detector_file = self.data_folder + "/run%d.csv"%(self.scene_number)
file_exists = os.path.isfile(self.detector_file)
with open(self.detector_file, 'a') as csvfile:
# creating a csv dict writer object
writer = csv.DictWriter(csvfile, fieldnames = self.fields)
if not file_exists:
writer.writeheader()
writer.writerows(dict)
return m, mval,blur[0],blur[1],blur[2],occlusion[0],occlusion[1],occlusion[2]
def tick(self, input_data):
self.time = time.time()
result = super().tick(input_data)
result['rgb_detector'] = cv2.resize(result['rgb'],(224,224))
result['rgb_detector_left'] = cv2.resize(result['rgb_left'],(224,224))
result['rgb_detector_right'] = cv2.resize(result['rgb_right'],(224,224))
result['rgb'] = cv2.resize(result['rgb'],(256,144))
result['rgb_left'] = cv2.resize(result['rgb_left'],(256,144))
result['rgb_right'] = cv2.resize(result['rgb_right'],(256,144))
detection_image = cv2.cvtColor(result['rgb_detector_right'], cv2.COLOR_BGR2RGB)
detection_image = detection_image/ 255.
detection_image = np.reshape(detection_image, [-1, detection_image.shape[0],detection_image.shape[1],detection_image.shape[2]])
#B-VAE reconstruction based assurance monitor
# TODO: Move this prediction into a thread. I had problems of using keras model in threads.
predicted_reps = self.model_vae.predict_on_batch(detection_image)
dist = np.square(np.subtract(np.array(predicted_reps),detection_image)).mean()
#start other detectors
BlurDetectorThread = Thread(target=self.blur_detection, args=(result,))
BlurDetectorThread.daemon = True
OccusionDetectorThread = Thread(target=self.occlusion_detection, args=(result,))
OccusionDetectorThread.daemon = True
AssuranceMonitorThread = Thread(target=self.assurance_monitor, args=(dist,)) #image,model,calibration_set,pval_queue,sval_queue
AssuranceMonitorThread.daemon = True
AssuranceMonitorThread.start()
BlurDetectorThread.start()
OccusionDetectorThread.start()
result['image'] = np.concatenate(tuple(result[x] for x in ['rgb', 'rgb_left', 'rgb_right']), -1)
theta = result['compass']
theta = 0.0 if np.isnan(theta) else theta
theta = theta + np.pi / 2
R = np.array([
[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)],
])
gps = self._get_position(result)
result['gps_y'] = gps[0]
result['gps_x'] = gps[1]
far_node,_ = self._command_planner.run_step(gps)
result['actual_y'] = far_node[0]
result['actual_x'] = far_node[1]
result['far_node'] = far_node
target = R.T.dot(far_node - gps)
target *= 5.5
target += [128, 256]
target = np.clip(target, 0, 256)
#waypoints_left = self._command_planner.get_waypoints_remaining(gps)
#print("step:%d waypoints:%d"%(self.step,(9-waypoints_left)))
#Synthetically add noise to the radar data based on the weather
if(self.weather[0]<="20.0" and self.weather[1]<="20.0" and self.weather[2]<="20.0"):
result['cloud'][0] = result['cloud'][0]
result['cloud_right'][0] = result['cloud_right'][0]
elif((self.weather[0]>"20.0" and self.weather[0]<"50.0") and (self.weather[1]>"20.0" and self.weather[1]<"50.0") and (self.weather[2]>"20.0" and self.weather[2]<"50.0")):
noise =
|
np.random.normal(0, 0.5, result['cloud'][0].shape)
|
numpy.random.normal
|
""" Routines for building qutrit gates and models """
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from scipy import linalg as _linalg
from .. import objects as _objs
from ..tools import unitary_to_process_mx, change_basis, Basis
#Define 2 qubit to symmetric (+) antisymmetric space transformation A:
A = _np.matrix([[1, 0, 0, 0],
# [0,0,0,1],
[0, 1. / _np.sqrt(2), 1. / _np.sqrt(2), 0],
[0, 1. / _np.sqrt(2), -1. / _np.sqrt(2), 0],
[0, 0, 0, 1], ])
X = _np.matrix([[0, 1], [1, 0]])
Y = _np.matrix([[0, -1j], [1j, 0]])
def X2qubit(theta):
""" Returns X(theta)^\otimes 2 (2-qubit 'XX' unitary)"""
x = _np.matrix(_linalg.expm(-1j / 2. * theta * _np.matrix([[0, 1], [1, 0]])))
return _np.kron(x, x)
def Y2qubit(theta):
""" Returns Y(theta)^\otimes 2 (2-qubit 'YY' unitary)"""
y = _np.matrix(_linalg.expm(-1j / 2. * theta * _np.matrix([[0, -1j], [1j, 0]])))
return _np.kron(y, y)
def ms2qubit(theta, phi):
""" Returns Molmer-Sorensen gate for two qubits """
return _np.matrix(_linalg.expm(-1j / 2 * theta
* _np.kron(
_np.cos(phi) * X + _np.sin(phi) * Y,
_np.cos(phi) * X + _np.sin(phi) * Y)
))
#Projecting above gates into symmetric subspace (qutrit space)
#(state space ordering is |0> = |00>, |1> ~ |01>+|10>,|2>=|11>, so state |i> corresponds to i detector counts
#Removes columns and rows from inputArr
def _remove_from_matrix(inputArr, columns, rows, outputType=_np.matrix):
inputArr =
|
_np.array(inputArr)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Test nematusLL for consistency with nematus
"""
import os
import unittest
import sys
import numpy as np
import logging
import Pyro4
nem_path = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../'))
sys.path.insert(1, nem_path)
from nematus.pyro_utils import setup_remotes, get_random_key, get_unused_port
from nematus.util import load_dict
from unit_test_utils import initialize
GPU_ID = 0
VOCAB_SIZE = 90000
SRC = 'ro'
TGT = 'en'
DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'test_data')
model_options = dict(factors=1, # input factors
dim_per_factor=None,
# list of word vector dimensionalities (one per factor): [250,200,50] for dimensionality of 500
encoder='gru',
decoder='gru_cond',
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
map_decay_c=0., # L2 regularization penalty towards original weights
alpha_c=0., # alignment regularization
shuffle_each_epoch=True,
finetune=False,
finetune_only_last=False,
sort_by_length=True,
use_domain_interpolation=False,
domain_interpolation_min=0.1,
domain_interpolation_inc=0.1,
domain_interpolation_indomain_datasets=['indomain.en', 'indomain.fr'],
maxibatch_size=20, # How many minibatches to load at one time
model_version=0.1, # store version used for training for compatibility
pyro_key=None, # pyro hmac key
pyro_port=None, # pyro nameserver port
pyro_name=None, # if None, will import instead of assuming a server is running
saveto='model.npz',
reload_=True,
dim_word=500,
dim=1024,
n_words=VOCAB_SIZE,
n_words_src=VOCAB_SIZE,
decay_c=0.,
clip_c=1.,
lrate=0.0001,
optimizer='adadelta',
maxlen=50,
batch_size=80,
valid_batch_size=80,
datasets=[DATA_DIR + '/corpus.bpe.' + SRC, DATA_DIR + '/corpus.bpe.' + TGT],
valid_datasets=[DATA_DIR + '/newsdev2016.bpe.' + SRC, DATA_DIR + '/newsdev2016.bpe.' + TGT],
dictionaries=[DATA_DIR + '/corpus.bpe.' + SRC + '.json',
DATA_DIR + '/corpus.bpe.' + TGT + '.json'],
validFreq=10000,
dispFreq=10,
saveFreq=30000,
sampleFreq=10000,
use_dropout=False,
dropout_embedding=0.2, # dropout for input embeddings (0: no dropout)
dropout_hidden=0.2, # dropout for hidden layers (0: no dropout)
dropout_source=0.1, # dropout source words (0: no dropout)
dropout_target=0.1, # dropout target words (0: no dropout)
overwrite=False,
external_validation_script='./validate.sh',
)
x0 = np.array([[[3602],
[8307],
[7244],
[7],
[58],
[9],
[5893],
[62048],
[11372],
[4029],
[25],
[34],
[2278],
[5],
[4266],
[11],
[2852],
[3],
[2298],
[2],
[23912],
[6],
[16358],
[3],
[730],
[2328],
[5],
[28],
[353],
[4],
[0], ]]) # 0 = EOS
xx0 = np.tile(x0, [1, 1, 2])
x1 = np.array([[[3602],
[8307],
[7244],
[7],
[58],
[9],
[4],
[0], ]]) # 0 = EOS
# False Truth for testing Gradients
y1 = np.array([[[1],
[1],
[1],
[1],
[1],
[1],
[1],
[0], ]]) # 0 = EOS
x_0 = np.array([[[2], [0], [0], [0]]])
x_0_mask = np.array([[1], [1], [1], [0]], dtype=np.float32)
y_0 = np.array([[17], [9], [41], [120], [7], [117], [1087], [476], [1715], [62], [2], [0]])
y_0_mask = np.array([[1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1], [1]], dtype=np.float32)
per_sample_weight_1 = np.array([1, 1], dtype=np.float32)
xx_0 = np.array([[[4, 2], [4, 0], [3, 0], [2, 0]]])
xx_0_mask = np.array([[2, 1], [0, 1], [0, 1], [0, 0]], dtype=np.float32)
yy_0 = np.array([[22, 17], [24, 9], [31, 41], [420, 120], [37, 7], [127, 117], [1387, 1087], [446, 476], [1515, 1715],
[22, 62], [12, 2], [0, 0]])
yy_0_mask = np.array([[1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1], [1, 1]],
dtype=np.float32)
per_sample_weight_2 = np.array([0.5, 0.5, 0.5, 0.5], dtype=np.float32)
xx_1 = np.tile(xx_0, [2])
yy_1 = np.tile(yy_0, [2])
xx_1_mask = np.tile(xx_0_mask, [2])
yy_1_mask = np.tile(yy_0_mask, [2])
per_sample_weight = np.array([0.25, 0.25, 0.25, 0.25], dtype=np.float32)
x_1 = np.array([[[2, 2, 2, 2],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]])
x_1_mask = np.array([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[0., 0., 0., 0.]], dtype=np.float32)
y_1 = np.array([[17, 17, 17, 17],
[9, 9, 9, 9],
[41, 41, 41, 41],
[120, 120, 120, 120],
[7, 7, 7, 7],
[117, 117, 117, 117],
[1087, 1087, 1087, 1087],
[476, 476, 476, 476],
[1715, 1715, 1715, 1715],
[62, 62, 62, 62],
[2, 2, 2, 2],
[0, 0, 0, 0]])
y_1_mask = np.array([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]], dtype=np.float32)
class PyroNematusTests(unittest.TestCase):
# Forces the class to have these fields.
logger = None
context_mgr = None
@classmethod
def setUpClass(cls):
cls.logger = logging.getLogger(__file__)
cls.logger.info("========================================================================================")
cls.logger.info("Setting up the pyro remote server as well as the nematus instance to test consistency.")
cls.logger.info("========================================================================================")
current_script_dir = os.path.dirname(os.path.abspath(__file__))
remote_script = os.path.join(current_script_dir, '../nematus/nmt_remote.py')
pyro_name = 'remote'
pyro_key = get_random_key()
pyro_port = get_unused_port()
cls.context_mgr = setup_remotes(remote_metadata_list=[dict(script=remote_script,
name=pyro_name, gpu_id=GPU_ID)],
pyro_port=pyro_port,
pyro_key=pyro_key)
cls.context_mgr.__enter__()
cls.remote_interface = initialize(model_options=model_options,
pyro_port=pyro_port,
pyro_name=pyro_name,
pyro_key=pyro_key)
@classmethod
def tearDownClass(cls):
cls.logger.info("========================================================================================")
cls.logger.info("Tearing down the pyro remote server as well as the nematus instance")
cls.logger.info("========================================================================================")
cls.context_mgr.__exit__(None, None, None)
def assert_params_same(self, params1, params2):
k1 = params1.keys()
k2 = params2.keys()
k1.sort()
k2.sort()
self.assertTrue(k1 == k2)
for k in k1:
self.assertTrue(np.allclose(params1[k], params2[k]), 'value for %s should match' % k)
def assert_params_different(self, params1, params2):
k1 = params1.keys()
k2 = params2.keys()
k1.sort()
k2.sort()
self.assertTrue(k1 == k2)
diff_vec = [not np.allclose(params1[k], params2[k]) for k in k1]
self.logger.info("Difference vector is: " + str(diff_vec))
self.assertTrue(any(diff_vec))
def test_f_init_dims(self):
"""
Best I can tell, f_init is only ever given one sentence, but it appears to be
written to process multiple sentences.
"""
self.logger.info("========================================================================================")
self.logger.info("Starting the f_init_dims test to determine that x_f_init acts as expected.")
self.logger.info("========================================================================================")
x0_state0, x0_ctx0 = self.remote_interface.x_f_init(x0) # (1, 1024) (31, 1, 2048)
# If tile input, state/context should be tiled too
xx0_state0, xx0_ctx0 = self.remote_interface.x_f_init(xx0) # (2, 1024) (31, 2, 2048)
self.assertTrue(np.allclose(np.tile(x0_state0, [2, 1]), xx0_state0))
self.assertTrue(np.allclose(np.tile(x0_ctx0, [1, 2, 1]), xx0_ctx0))
# Different inputs should create different state
x1_state0, x1_ctx0 = self.remote_interface.x_f_init(x1)
self.assertFalse(np.allclose(x0_state0, x1_state0))
# Different inputs (of same length) should create different state and context
x1_2_state0, x1_2_ctx0 = self.remote_interface.x_f_init(x1 * 2)
self.assertFalse(np.allclose(x1_state0, x1_2_state0))
self.assertFalse(np.allclose(x1_ctx0, x1_2_ctx0))
def test_f_next_dims(self):
self.logger.info("========================================================================================")
self.logger.info("Starting the f_next_dims test to determine that x_f_next acts as expected.")
self.logger.info("========================================================================================")
self.remote_interface.set_noise_val(0)
x0_state0, x0_ctx0 = self.remote_interface.x_f_init(x0)
x0_prob1, x0_word1, x0_state1 = self.remote_interface.x_f_next(np.array([2893, ]), x0_ctx0, x0_state0)
x0_prob2, x0_word2, x0_state2 = self.remote_interface.x_f_next(np.array([9023, ]), x0_ctx0, x0_state1)
self.assertFalse(np.allclose(x0_state0, x0_state1), 'state should be changing')
self.assertFalse(np.allclose(x0_prob1, x0_prob2), 'probability should be changing')
# word might not change...
self.logger.info('x0 prob shape, ' + str(x0_prob1.shape))
self.logger.info('x0 word shape, ' + str(x0_word1.shape))
self.logger.info('x0 state shape, ' + str(x0_state2.shape))
xx0_state0, xx0_ctx0 = self.remote_interface.x_f_init(xx0)
xx0_prob1, xx0_word1, xx0_state1 = self.remote_interface.x_f_next(np.array([2893, 2893]), xx0_ctx0, xx0_state0)
xx0_prob2, xx0_word2, xx0_state2 = self.remote_interface.x_f_next(np.array([9023, 9023]), xx0_ctx0, xx0_state1)
self.logger.info('xx0 prob shape, ' + str(xx0_prob1.shape))
self.logger.info('xx0 word shape, ' + str(xx0_word1.shape))
self.logger.info('xx0 state shape, ' + str(xx0_state2.shape))
self.assertTrue(np.allclose(np.tile(x0_prob1, [2, 1]), xx0_prob1))
self.assertTrue(np.allclose(np.tile(x0_prob2, [2, 1]), xx0_prob2))
# jitter??
# print 'same??', np.tile(x0_word1, [2]), xx0_word1
# self.assertTrue(np.allclose(np.tile(x0_word1, [2]), xx0_word1))
# self.assertTrue(np.allclose(np.tile(x0_word2, [2]), xx0_word2))
self.assertTrue(np.allclose(np.tile(x0_state1, [2, 1]), xx0_state1))
self.assertTrue(np.allclose(
|
np.tile(x0_state2, [2, 1])
|
numpy.tile
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: <NAME> <<EMAIL>>
#
'''Density expansion on plane waves'''
import time
import copy
import numpy
from pyscf import lib
from pyscf import gto
from pyscf.lib import logger
from pyscf.pbc import tools
from pyscf.pbc.gto import pseudo, estimate_ke_cutoff, error_for_ke_cutoff
from pyscf.pbc import gto as pbcgto
from pyscf.pbc.df import ft_ao
from pyscf.pbc.df import incore
from pyscf.pbc.lib.kpts_helper import is_zero, gamma_point
from pyscf.pbc.df import aft_jk
from pyscf.pbc.df import aft_ao2mo
from pyscf import __config__
CUTOFF = getattr(__config__, 'pbc_df_aft_estimate_eta_cutoff', 1e-12)
ETA_MIN = getattr(__config__, 'pbc_df_aft_estimate_eta_min', 0.2)
PRECISION = getattr(__config__, 'pbc_df_aft_estimate_eta_precision', 1e-8)
KE_SCALING = getattr(__config__, 'pbc_df_aft_ke_cutoff_scaling', 0.75)
def estimate_eta(cell, cutoff=CUTOFF):
'''The exponent of the smooth gaussian model density, requiring that at
boundary, density ~ 4pi rmax^2 exp(-eta/2*rmax^2) ~ 1e-12
'''
lmax = min(numpy.max(cell._bas[:,gto.ANG_OF]), 4)
# If lmax=3 (r^5 for radial part), this expression guarantees at least up
# to f shell the convergence at boundary
eta = max(numpy.log(4*numpy.pi*cell.rcut**(lmax+2)/cutoff)/cell.rcut**2*2,
ETA_MIN)
return eta
def estimate_eta_for_ke_cutoff(cell, ke_cutoff, precision=PRECISION):
'''Given ke_cutoff, the upper limit of eta to guarantee the required
precision in Coulomb integrals.
'''
lmax = numpy.max(cell._bas[:,gto.ANG_OF])
kmax = (ke_cutoff*2)**.5
log_rest = numpy.log(precision / (32*numpy.pi**2 * kmax**(lmax*2-1)))
log_eta = -1
eta = kmax**2/4 / (-log_eta - log_rest)
return eta
def estimate_ke_cutoff_for_eta(cell, eta, precision=PRECISION):
'''Given eta, the lower limit of ke_cutoff to guarantee the required
precision in Coulomb integrals.
'''
eta = max(eta, 0.2)
lmax = numpy.max(cell._bas[:,gto.ANG_OF])
log_k0 = 5 + numpy.log(eta) / 2
log_rest = numpy.log(precision / (32*numpy.pi**2*eta))
Ecut = 2*eta * (log_k0*(lmax*2-1) - log_rest)
Ecut = max(Ecut, .5)
return Ecut
def get_nuc(mydf, kpts=None):
# Pseudopotential is ignored when computing just the nuclear attraction
with lib.temporary_env(mydf.cell, _pseudo={}):
return get_pp_loc_part1(mydf, kpts)
def get_pp_loc_part1(mydf, kpts=None):
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
log = logger.Logger(mydf.stdout, mydf.verbose)
t0 = t1 = (time.clock(), time.time())
mesh = numpy.asarray(mydf.mesh)
nkpts = len(kpts_lst)
nao = cell.nao_nr()
nao_pair = nao * (nao+1) // 2
charges = cell.atom_charges()
kpt_allow = numpy.zeros(3)
if mydf.eta == 0:
if cell.dimension > 0:
ke_guess = estimate_ke_cutoff(cell, cell.precision)
mesh_guess = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_guess)
if numpy.any(mesh[:cell.dimension] < mesh_guess[:cell.dimension]*.8):
logger.warn(mydf, 'mesh %s is not enough for AFTDF.get_nuc function '
'to get integral accuracy %g.\nRecommended mesh is %s.',
mesh, cell.precision, mesh_guess)
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
vpplocG = pseudo.pp_int.get_gth_vlocG_part1(cell, Gv)
vpplocG = -numpy.einsum('ij,ij->j', cell.get_SI(Gv), vpplocG)
vpplocG *= kws
vG = vpplocG
vj = numpy.zeros((nkpts,nao_pair), dtype=numpy.complex128)
else:
if cell.dimension > 0:
ke_guess = estimate_ke_cutoff_for_eta(cell, mydf.eta, cell.precision)
mesh_guess = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_guess)
if numpy.any(mesh < mesh_guess*.8):
logger.warn(mydf, 'mesh %s is not enough for AFTDF.get_nuc function '
'to get integral accuracy %g.\nRecommended mesh is %s.',
mesh, cell.precision, mesh_guess)
mesh_min = numpy.min((mesh_guess, mesh), axis=0)
if cell.dimension < 2 or cell.low_dim_ft_type == 'inf_vacuum':
mesh[:cell.dimension] = mesh_min[:cell.dimension]
else:
mesh = mesh_min
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
nuccell = _compensate_nuccell(mydf)
# PP-loc part1 is handled by fakenuc in _int_nuc_vloc
vj = lib.asarray(mydf._int_nuc_vloc(nuccell, kpts_lst))
t0 = t1 = log.timer_debug1('vnuc pass1: analytic int', *t0)
coulG = tools.get_coulG(cell, kpt_allow, mesh=mesh, Gv=Gv) * kws
aoaux = ft_ao.ft_ao(nuccell, Gv)
vG = numpy.einsum('i,xi->x', -charges, aoaux) * coulG
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])
for aoaoks, p0, p1 in mydf.ft_loop(mesh, kpt_allow, kpts_lst,
max_memory=max_memory, aosym='s2'):
for k, aoao in enumerate(aoaoks):
# rho_ij(G) nuc(-G) / G^2
# = [Re(rho_ij(G)) + Im(rho_ij(G))*1j] [Re(nuc(G)) - Im(nuc(G))*1j] / G^2
if gamma_point(kpts_lst[k]):
vj[k] += numpy.einsum('k,kx->x', vG[p0:p1].real, aoao.real)
vj[k] += numpy.einsum('k,kx->x', vG[p0:p1].imag, aoao.imag)
else:
vj[k] += numpy.einsum('k,kx->x', vG[p0:p1].conj(), aoao)
t1 = log.timer_debug1('contracting Vnuc [%s:%s]'%(p0, p1), *t1)
log.timer_debug1('contracting Vnuc', *t0)
vj_kpts = []
for k, kpt in enumerate(kpts_lst):
if gamma_point(kpt):
vj_kpts.append(lib.unpack_tril(vj[k].real.copy()))
else:
vj_kpts.append(lib.unpack_tril(vj[k]))
if kpts is None or numpy.shape(kpts) == (3,):
vj_kpts = vj_kpts[0]
return numpy.asarray(vj_kpts)
def _int_nuc_vloc(mydf, nuccell, kpts, intor='int3c2e', aosym='s2', comp=1):
'''Vnuc - Vloc'''
cell = mydf.cell
nkpts = len(kpts)
# Use the 3c2e code with steep s gaussians to mimic nuclear density
fakenuc = _fake_nuc(cell)
fakenuc._atm, fakenuc._bas, fakenuc._env = \
gto.conc_env(nuccell._atm, nuccell._bas, nuccell._env,
fakenuc._atm, fakenuc._bas, fakenuc._env)
kptij_lst = numpy.hstack((kpts,kpts)).reshape(-1,2,3)
buf = incore.aux_e2(cell, fakenuc, intor, aosym=aosym, comp=comp,
kptij_lst=kptij_lst)
charge = cell.atom_charges()
charge = numpy.append(charge, -charge) # (charge-of-nuccell, charge-of-fakenuc)
nao = cell.nao_nr()
nchg = len(charge)
if aosym == 's1':
nao_pair = nao**2
else:
nao_pair = nao*(nao+1)//2
if comp == 1:
buf = buf.reshape(nkpts,nao_pair,nchg)
mat = numpy.einsum('kxz,z->kx', buf, charge)
else:
buf = buf.reshape(nkpts,comp,nao_pair,nchg)
mat = numpy.einsum('kcxz,z->kcx', buf, charge)
# vbar is the interaction between the background charge
# and the compensating function. 0D, 1D, 2D do not have vbar.
if cell.dimension == 3 and intor in ('int3c2e', 'int3c2e_sph',
'int3c2e_cart'):
assert(comp == 1)
charge = -cell.atom_charges()
nucbar = sum([z/nuccell.bas_exp(i)[0] for i,z in enumerate(charge)])
nucbar *= numpy.pi/cell.vol
ovlp = cell.pbc_intor('int1e_ovlp', 1, lib.HERMITIAN, kpts)
for k in range(nkpts):
if aosym == 's1':
mat[k] -= nucbar * ovlp[k].reshape(nao_pair)
else:
mat[k] -= nucbar * lib.pack_tril(ovlp[k])
return mat
def get_pp(mydf, kpts=None):
'''Get the periodic pseudotential nuc-el AO matrix, with G=0 removed.
'''
cell = mydf.cell
if kpts is None:
kpts_lst = numpy.zeros((1,3))
else:
kpts_lst = numpy.reshape(kpts, (-1,3))
nkpts = len(kpts_lst)
vloc1 = get_pp_loc_part1(mydf, kpts_lst)
vloc2 = pseudo.pp_int.get_pp_loc_part2(cell, kpts_lst)
vpp = pseudo.pp_int.get_pp_nl(cell, kpts_lst)
for k in range(nkpts):
vpp[k] += vloc1[k] + vloc2[k]
if kpts is None or numpy.shape(kpts) == (3,):
vpp = vpp[0]
return vpp
def weighted_coulG(mydf, kpt=numpy.zeros(3), exx=False, mesh=None):
cell = mydf.cell
if mesh is None:
mesh = mydf.mesh
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
coulG = tools.get_coulG(cell, kpt, exx, mydf, mesh, Gv)
coulG *= kws
return coulG
class AFTDF(lib.StreamObject):
'''Density expansion on plane waves
'''
def __init__(self, cell, kpts=numpy.zeros((1,3))):
self.cell = cell
self.stdout = cell.stdout
self.verbose = cell.verbose
self.max_memory = cell.max_memory
self.mesh = cell.mesh
# For nuclear attraction integrals using Ewald-like technique.
# Set to 0 to switch off Ewald tech and use the regular reciprocal space
# method (solving Poisson equation of nuclear charges in reciprocal space).
if cell.dimension == 0:
self.eta = 0.2
else:
ke_cutoff = tools.mesh_to_cutoff(cell.lattice_vectors(), self.mesh)
ke_cutoff = ke_cutoff[:cell.dimension].min()
self.eta = max(estimate_eta_for_ke_cutoff(cell, ke_cutoff, cell.precision),
estimate_eta(cell, cell.precision))
self.kpts = kpts
# to mimic molecular DF object
self.blockdim = getattr(__config__, 'pbc_df_df_DF_blockdim', 240)
# The following attributes are not input options.
self.exxdiv = None # to mimic KRHF/KUHF object in function get_coulG
self._keys = set(self.__dict__.keys())
def dump_flags(self, verbose=None):
logger.info(self, '\n')
logger.info(self, '******** %s ********', self.__class__)
logger.info(self, 'mesh = %s (%d PWs)', self.mesh, numpy.prod(self.mesh))
logger.info(self, 'eta = %s', self.eta)
logger.info(self, 'len(kpts) = %d', len(self.kpts))
logger.debug1(self, ' kpts = %s', self.kpts)
return self
def check_sanity(self):
lib.StreamObject.check_sanity(self)
cell = self.cell
if not cell.has_ecp():
logger.warn(self, 'AFTDF integrals are found in all-electron '
'calculation. It often causes huge error.\n'
'Recommended methods are DF or MDF. In SCF calculation, '
'they can be initialized as\n'
' mf = mf.density_fit()\nor\n'
' mf = mf.mix_density_fit()')
if cell.dimension > 0:
if cell.ke_cutoff is None:
ke_cutoff = tools.mesh_to_cutoff(cell.lattice_vectors(), self.mesh)
ke_cutoff = ke_cutoff[:cell.dimension].min()
else:
ke_cutoff = numpy.min(cell.ke_cutoff)
ke_guess = estimate_ke_cutoff(cell, cell.precision)
mesh_guess = tools.cutoff_to_mesh(cell.lattice_vectors(), ke_guess)
if ke_cutoff < ke_guess * KE_SCALING:
logger.warn(self, 'ke_cutoff/mesh (%g / %s) is not enough for AFTDF '
'to get integral accuracy %g.\nCoulomb integral error '
'is ~ %.2g Eh.\nRecommended ke_cutoff/mesh are %g / %s.',
ke_cutoff, self.mesh, cell.precision,
error_for_ke_cutoff(cell, ke_cutoff), ke_guess, mesh_guess)
else:
mesh_guess = numpy.copy(self.mesh)
if cell.dimension < 3:
err = numpy.exp(-0.436392335*min(self.mesh[cell.dimension:]) - 2.99944305)
err *= cell.nelectron
meshz = pbcgto.cell._mesh_inf_vaccum(cell)
mesh_guess[cell.dimension:] = int(meshz)
if err > cell.precision*10:
logger.warn(self, 'mesh %s of AFTDF may not be enough to get '
'integral accuracy %g for %dD PBC system.\n'
'Coulomb integral error is ~ %.2g Eh.\n'
'Recommended mesh is %s.',
self.mesh, cell.precision, cell.dimension, err, mesh_guess)
if (cell.mesh[cell.dimension:]/(1.*meshz) > 1.1).any():
meshz = pbcgto.cell._mesh_inf_vaccum(cell)
logger.warn(self, 'setting mesh %s of AFTDF too high in non-periodic direction '
'(=%s) can result in an unnecessarily slow calculation.\n'
'For coulomb integral error of ~ %.2g Eh in %dD PBC, \n'
'a recommended mesh for non-periodic direction is %s.',
self.mesh, self.mesh[cell.dimension:], cell.precision,
cell.dimension, mesh_guess[cell.dimension:])
return self
# TODO: Put Gv vector in the arguments
def pw_loop(self, mesh=None, kpti_kptj=None, q=None, shls_slice=None,
max_memory=2000, aosym='s1', blksize=None,
intor='GTO_ft_ovlp', comp=1):
'''
Fourier transform iterator for AO pair
'''
cell = self.cell
if mesh is None:
mesh = self.mesh
if kpti_kptj is None:
kpti = kptj = numpy.zeros(3)
else:
kpti, kptj = kpti_kptj
if q is None:
q = kptj - kpti
ao_loc = cell.ao_loc_nr()
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
b = cell.reciprocal_vectors()
gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])
ngrids = gxyz.shape[0]
if shls_slice is None:
shls_slice = (0, cell.nbas, 0, cell.nbas)
if aosym == 's2':
assert(shls_slice[2] == 0)
i0 = ao_loc[shls_slice[0]]
i1 = ao_loc[shls_slice[1]]
nij = i1*(i1+1)//2 - i0*(i0+1)//2
else:
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
nij = ni*nj
if blksize is None:
blksize = min(max(64, int(max_memory*1e6*.75/(nij*16*comp))), 16384)
sublk = int(blksize//4)
else:
sublk = blksize
buf = numpy.empty(nij*blksize*comp, dtype=numpy.complex128)
pqkRbuf = numpy.empty(nij*sublk*comp)
pqkIbuf = numpy.empty(nij*sublk*comp)
for p0, p1 in self.prange(0, ngrids, blksize):
#aoao = ft_ao.ft_aopair(cell, Gv[p0:p1], shls_slice, aosym,
# b, Gvbase, gxyz[p0:p1], mesh, (kpti, kptj), q)
aoao = ft_ao._ft_aopair_kpts(cell, Gv[p0:p1], shls_slice, aosym,
b, gxyz[p0:p1], Gvbase, q,
kptj.reshape(1,3), intor, comp, out=buf)[0]
aoao = aoao.reshape(p1-p0,nij)
for i0, i1 in lib.prange(0, p1-p0, sublk):
nG = i1 - i0
if comp == 1:
pqkR = numpy.ndarray((nij,nG), buffer=pqkRbuf)
pqkI = numpy.ndarray((nij,nG), buffer=pqkIbuf)
pqkR[:] = aoao[i0:i1].real.T
pqkI[:] = aoao[i0:i1].imag.T
else:
pqkR = numpy.ndarray((comp,nij,nG), buffer=pqkRbuf)
pqkI = numpy.ndarray((comp,nij,nG), buffer=pqkIbuf)
pqkR[:] = aoao[i0:i1].real.transpose(0,2,1)
pqkI[:] = aoao[i0:i1].imag.transpose(0,2,1)
yield (pqkR, pqkI, p0+i0, p0+i1)
def ft_loop(self, mesh=None, q=numpy.zeros(3), kpts=None, shls_slice=None,
max_memory=4000, aosym='s1', intor='GTO_ft_ovlp', comp=1):
'''
Fourier transform iterator for all kpti which satisfy
2pi*N = (kpts - kpti - q)*a, N = -1, 0, 1
'''
cell = self.cell
if mesh is None:
mesh = self.mesh
if kpts is None:
assert(is_zero(q))
kpts = self.kpts
kpts = numpy.asarray(kpts)
nkpts = len(kpts)
ao_loc = cell.ao_loc_nr()
b = cell.reciprocal_vectors()
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])
ngrids = gxyz.shape[0]
if shls_slice is None:
shls_slice = (0, cell.nbas, 0, cell.nbas)
if aosym == 's2':
assert(shls_slice[2] == 0)
i0 = ao_loc[shls_slice[0]]
i1 = ao_loc[shls_slice[1]]
nij = i1*(i1+1)//2 - i0*(i0+1)//2
else:
ni = ao_loc[shls_slice[1]] - ao_loc[shls_slice[0]]
nj = ao_loc[shls_slice[3]] - ao_loc[shls_slice[2]]
nij = ni*nj
blksize = max(16, int(max_memory*.9e6/(nij*nkpts*16*comp)))
blksize = min(blksize, ngrids, 16384)
buf = numpy.empty(nkpts*nij*blksize*comp, dtype=numpy.complex128)
for p0, p1 in self.prange(0, ngrids, blksize):
dat = ft_ao._ft_aopair_kpts(cell, Gv[p0:p1], shls_slice, aosym,
b, gxyz[p0:p1], Gvbase, q, kpts,
intor, comp, out=buf)
yield dat, p0, p1
weighted_coulG = weighted_coulG
_int_nuc_vloc = _int_nuc_vloc
get_nuc = get_nuc
get_pp = get_pp
# Note: Special exxdiv by default should not be used for an arbitrary
# input density matrix. When the df object was used with the molecular
# post-HF code, get_jk was often called with an incomplete DM (e.g. the
# core DM in CASCI). An SCF level exxdiv treatment is inadequate for
# post-HF methods.
def get_jk(self, dm, hermi=1, kpts=None, kpts_band=None,
with_j=True, with_k=True, exxdiv=None):
if kpts is None:
if numpy.all(self.kpts == 0):
# Gamma-point calculation by default
kpts = numpy.zeros(3)
else:
kpts = self.kpts
if kpts.shape == (3,):
return aft_jk.get_jk(self, dm, hermi, kpts, kpts_band, with_j,
with_k, exxdiv)
vj = vk = None
if with_k:
vk = aft_jk.get_k_kpts(self, dm, hermi, kpts, kpts_band, exxdiv)
if with_j:
vj = aft_jk.get_j_kpts(self, dm, hermi, kpts, kpts_band)
return vj, vk
get_eri = get_ao_eri = aft_ao2mo.get_eri
ao2mo = get_mo_eri = aft_ao2mo.general
ao2mo_7d = aft_ao2mo.ao2mo_7d
get_ao_pairs_G = get_ao_pairs = aft_ao2mo.get_ao_pairs_G
get_mo_pairs_G = get_mo_pairs = aft_ao2mo.get_mo_pairs_G
def update_mf(self, mf):
mf = copy.copy(mf)
mf.with_df = self
return mf
def prange(self, start, stop, step):
'''This is a hook for MPI parallelization. DO NOT use it out of the
scope of AFTDF/GDF/MDF.
'''
return lib.prange(start, stop, step)
################################################################################
# With this function to mimic the molecular DF.loop function, the pbc gamma
# point DF object can be used in the molecular code
def loop(self, blksize=None):
cell = self.cell
if cell.dimension == 2 and cell.low_dim_ft_type != 'inf_vacuum':
raise RuntimeError('ERIs of PBC-2D systems are not positive '
'definite. Current API only supports postive '
'definite ERIs.')
if blksize is None:
blksize = self.blockdim
# coulG of 1D and 2D has negative elements.
coulG = self.weighted_coulG()
Lpq = None
for pqkR, pqkI, p0, p1 in self.pw_loop(aosym='s2', blksize=blksize):
vG =
|
numpy.sqrt(coulG[p0:p1])
|
numpy.sqrt
|
"""Defines SinglePath MDP and utils."""
from __future__ import print_function
from __future__ import division
import numpy as np
def sample(mdp, pi):
"""Generate a trajectory from mdp with pi."""
done = False
obs = mdp.reset()
G = 0
path = {}
path['obs'] = []
path['acts'] = []
path['rews'] = []
while not done:
action = np.random.choice(2, p=pi[obs])
path['obs'].append(obs)
path['acts'].append(action)
obs, rew, done = mdp.step(action)
G += rew
path['rews'].append(rew)
return path, G
def evaluate(mdp, pi):
"""Run value iteration to evaluate policy exactly."""
P = mdp.transitions
L = mdp.L
S = mdp.n_states
A = mdp.n_actions
V =
|
np.zeros((L + 1, S + 1))
|
numpy.zeros
|
import unittest
import six
import tensorflow as tf
import numpy as np
import GPflow
from GPflow import settings
float_type = settings.dtypes.float_type
np_float_type = np.float32 if float_type is tf.float32 else np.float64
class TestSetup(object):
def __init__(self, likelihood, Y, tolerance):
self.likelihood, self.Y, self.tolerance = likelihood, Y, tolerance
self.is_analytic = six.get_unbound_function(likelihood.predict_density) is not\
six.get_unbound_function(GPflow.likelihoods.Likelihood.predict_density)
def getTestSetups(includeMultiClass=True, addNonStandardLinks=False):
test_setups = []
rng = np.random.RandomState(1)
for likelihoodClass in GPflow.likelihoods.Likelihood.__subclasses__():
if likelihoodClass == GPflow.likelihoods.Ordinal:
test_setups.append(TestSetup(likelihoodClass(np.array([-1, 1])), rng.randint(0, 3, (10, 2)), 1e-6))
elif likelihoodClass == GPflow.likelihoods.SwitchedLikelihood:
continue # switched likelihood tested separately
elif (likelihoodClass == GPflow.likelihoods.MultiClass):
if includeMultiClass:
sample = rng.randn(10, 2)
# Multiclass needs a less tight tolerance due to presence of clipping.
tolerance = 1e-3
test_setups.append(TestSetup(likelihoodClass(2), np.argmax(sample, 1).reshape(-1, 1), tolerance))
else:
# most likelihoods follow this standard:
test_setups.append(TestSetup(likelihoodClass(), rng.rand(10, 2).astype(np_float_type), 1e-6))
if addNonStandardLinks:
test_setups.append(TestSetup(GPflow.likelihoods.Poisson(invlink=tf.square),
rng.rand(10, 2).astype(np_float_type), 1e-6))
test_setups.append(TestSetup(GPflow.likelihoods.Exponential(invlink=tf.square),
rng.rand(10, 2).astype(np_float_type), 1e-6))
test_setups.append(TestSetup(GPflow.likelihoods.Gamma(invlink=tf.square),
rng.rand(10, 2).astype(np_float_type), 1e-6))
def sigmoid(x):
return 1./(1 + tf.exp(-x))
test_setups.append(TestSetup(GPflow.likelihoods.Bernoulli(invlink=sigmoid),
rng.rand(10, 2).astype(np_float_type), 1e-6))
return test_setups
class TestPredictConditional(unittest.TestCase):
"""
Here we make sure that the conditional_mean and contitional_var functions
give the same result as the predict_mean_and_var function if the prediction
has no uncertainty.
"""
def setUp(self):
tf.reset_default_graph()
self.test_setups = getTestSetups(addNonStandardLinks=True)
self.x = tf.placeholder(float_type)
for test_setup in self.test_setups:
test_setup.likelihood.make_tf_array(self.x)
self.F = tf.placeholder(float_type)
rng = np.random.RandomState(0)
self.F_data = rng.randn(10, 2).astype(np_float_type)
def test_mean(self):
for test_setup in self.test_setups:
l = test_setup.likelihood
with l.tf_mode():
mu1 = tf.Session().run(l.conditional_mean(self.F),
feed_dict={self.x: l.get_free_state(), self.F: self.F_data})
mu2, _ = tf.Session().run(l.predict_mean_and_var(self.F, self.F * 0),
feed_dict={self.x: l.get_free_state(), self.F: self.F_data})
self.assertTrue(np.allclose(mu1, mu2, test_setup.tolerance, test_setup.tolerance))
def test_variance(self):
for test_setup in self.test_setups:
l = test_setup.likelihood
with l.tf_mode():
v1 = tf.Session().run(l.conditional_variance(self.F),
feed_dict={self.x: l.get_free_state(), self.F: self.F_data})
v2 = tf.Session().run(l.predict_mean_and_var(self.F, self.F * 0)[1],
feed_dict={self.x: l.get_free_state(), self.F: self.F_data})
self.assertTrue(np.allclose(v1, v2, atol=test_setup.tolerance))
def test_var_exp(self):
"""
Here we make sure that the variational_expectations gives the same result
as logp if the latent function has no uncertainty.
"""
for test_setup in self.test_setups:
l = test_setup.likelihood
y = test_setup.Y
with l.tf_mode():
r1 = tf.Session().run(l.logp(self.F, y), feed_dict={self.x: l.get_free_state(), self.F: self.F_data})
r2 = tf.Session().run(l.variational_expectations(self.F, self.F * 0, test_setup.Y),
feed_dict={self.x: l.get_free_state(), self.F: self.F_data})
self.assertTrue(np.allclose(r1, r2, test_setup.tolerance, test_setup.tolerance))
class TestQuadrature(unittest.TestCase):
"""
Where quadratre methods have been overwritten, make sure the new code
does something close to the quadrature
"""
def setUp(self):
tf.reset_default_graph()
self.rng = np.random.RandomState()
self.Fmu, self.Fvar, self.Y = self.rng.randn(3, 10, 2).astype(np_float_type)
self.Fvar = 0.01 * self.Fvar ** 2
self.test_setups = getTestSetups(includeMultiClass=False)
def test_var_exp(self):
# get all the likelihoods where variational expectations has been overwritten
for test_setup in self.test_setups:
if not test_setup.is_analytic:
continue
l = test_setup.likelihood
y = test_setup.Y
x_data = l.get_free_state()
x = tf.placeholder(float_type)
l.make_tf_array(x)
# 'build' the functions
with l.tf_mode():
F1 = l.variational_expectations(self.Fmu, self.Fvar, y)
F2 = GPflow.likelihoods.Likelihood.variational_expectations(l, self.Fmu, self.Fvar, y)
# compile and run the functions:
F1 = tf.Session().run(F1, feed_dict={x: x_data})
F2 = tf.Session().run(F2, feed_dict={x: x_data})
self.assertTrue(np.allclose(F1, F2, test_setup.tolerance, test_setup.tolerance))
def test_pred_density(self):
# get all the likelihoods where predict_density has been overwritten.
for test_setup in self.test_setups:
if not test_setup.is_analytic:
continue
l = test_setup.likelihood
y = test_setup.Y
x_data = l.get_free_state()
# make parameters if needed
x = tf.placeholder(float_type)
l.make_tf_array(x)
# 'build' the functions
with l.tf_mode():
F1 = l.predict_density(self.Fmu, self.Fvar, y)
F2 = GPflow.likelihoods.Likelihood.predict_density(l, self.Fmu, self.Fvar, y)
# compile and run the functions:
F1 = tf.Session().run(F1, feed_dict={x: x_data})
F2 = tf.Session().run(F2, feed_dict={x: x_data})
self.assertTrue(np.allclose(F1, F2, test_setup.tolerance, test_setup.tolerance))
class TestRobustMaxMulticlass(unittest.TestCase):
"""
Some specialized tests to the multiclass likelihood with RobustMax inverse link function.
"""
def setUp(self):
tf.reset_default_graph()
def testSymmetric(self):
"""
This test is based on the observation that for
symmetric inputs the class predictions must have equal probability.
"""
nClasses = 5
nPoints = 10
tolerance = 1e-4
epsilon = 1e-3
F = tf.placeholder(float_type)
x = tf.placeholder(float_type)
F_data = np.ones((nPoints, nClasses))
l = GPflow.likelihoods.MultiClass(nClasses)
l.invlink.epsilon = epsilon
rng = np.random.RandomState(1)
Y = rng.randint(nClasses, size=(nPoints, 1))
with l.tf_mode():
mu, _ = tf.Session().run(l.predict_mean_and_var(F, F), feed_dict={x: l.get_free_state(), F: F_data})
pred = tf.Session().run(l.predict_density(F, F, Y), feed_dict={x: l.get_free_state(), F: F_data})
variational_expectations = tf.Session().run(l.variational_expectations(F, F, Y),
feed_dict={x: l.get_free_state(), F: F_data})
expected_mu = (1./nClasses * (1. - epsilon) + (1. - 1./nClasses) * epsilon / (nClasses - 1)) * np.ones((nPoints, 1))
self.assertTrue(np.allclose(mu, expected_mu, tolerance, tolerance))
expected_log_denisty = np.log(expected_mu)
self.assertTrue(np.allclose(pred, expected_log_denisty, 1e-3, 1e-3))
validation_variational_expectation = 1./nClasses * np.log(1. - epsilon) + \
(1. - 1./nClasses) * np.log(epsilon / (nClasses - 1))
self.assertTrue(np.allclose(variational_expectations,
np.ones((nPoints, 1)) * validation_variational_expectation,
tolerance, tolerance))
def testPredictDensity(self):
tol = 1e-4
num_points = 100
mock_prob = 0.73
class MockRobustMax(GPflow.likelihoods.RobustMax):
def prob_is_largest(self, Y, Fmu, Fvar, gh_x, gh_w):
return tf.ones((num_points, 1)) * mock_prob
epsilon = 0.231
num_classes = 5
l = GPflow.likelihoods.MultiClass(num_classes, invlink=MockRobustMax(num_classes, epsilon))
F = tf.placeholder(float_type)
y = tf.placeholder(float_type)
F_data = np.ones((num_points, num_classes))
rng = np.random.RandomState(1)
Y_data = rng.randint(num_classes, size=(num_points, 1))
with l.tf_mode():
pred = tf.Session().run(l.predict_density(F, F, y), feed_dict={F: F_data, y: Y_data})
expected_prediction = -0.5499780059
#^ evaluated on calculator: log( (1-\epsilon) * 0.73 + (1-0.73) * \epsilon/(num_classes -1))
self.assertTrue(np.allclose(pred, expected_prediction, tol, tol))
class TestMulticlassIndexFix(unittest.TestCase):
"""
A regression test for a bug in multiclass likelihood.
"""
def setUp(self):
tf.reset_default_graph()
def testA(self):
mu, var = tf.placeholder(float_type), tf.placeholder(float_type)
Y = tf.placeholder(tf.int32)
lik = GPflow.likelihoods.MultiClass(3)
ve = lik.variational_expectations(mu, var, Y)
tf.gradients(tf.reduce_sum(ve), mu)
class TestSwitchedLikelihood(unittest.TestCase):
"""
SwitchedLikelihood is saparately tested here.
Here, we make sure the partition-stictch works fine.
"""
def setUp(self):
rng = np.random.RandomState(1)
self.Y_list = [rng.randn(3, 2), rng.randn(4, 2), rng.randn(5, 2)]
self.F_list = [rng.randn(3, 2), rng.randn(4, 2), rng.randn(5, 2)]
self.Fvar_list = [np.exp(rng.randn(3, 2)), np.exp(rng.randn(4, 2)), np.exp(rng.randn(5, 2))]
self.Y_label = [np.ones((3, 1))*0, np.ones((4, 1))*1, np.ones((5, 1))*2]
self.Y_perm = list(range(3+4+5))
rng.shuffle(self.Y_perm)
# shuffle the original data
self.Y_sw = np.hstack([np.concatenate(self.Y_list),
|
np.concatenate(self.Y_label)
|
numpy.concatenate
|
"""Test the text data reader"""
import numpy as np
import pytest
import tensorflow as tf
from src.data.text_data_reader import Set, TextDataReader
def test_initialization():
dr = TextDataReader()
assert dr.name == "text"
assert dr.folder == "data/train/text"
for set_type in [Set.TRAIN, Set.VAL, Set.TEST]:
assert dr.file_map[set_type]
def test_reading():
dr = TextDataReader(folder="tests/test_data/text")
assert dr.folder == "tests/test_data/text"
dataset = dr.get_emotion_data("neutral_ekman", Set.TRAIN, batch_size=5)
assert isinstance(dataset, tf.data.Dataset)
batch = 0
for texts, labels in dataset:
batch += 1
assert texts.numpy().shape == (5, 1)
assert labels.numpy().shape == (5, 7)
for text in texts.numpy():
text = str(text)
assert len(text) > 5
for label in labels.numpy():
assert label.shape == (7,)
assert np.sum(label) == 1
assert batch == 6
with pytest.raises(ValueError):
_ = dr.get_emotion_data("wrong")
def test_reading_three():
dr = TextDataReader(folder="tests/test_data/text")
assert dr.folder == "tests/test_data/text"
dataset = dr.get_emotion_data("three", Set.TRAIN, batch_size=4)
assert isinstance(dataset, tf.data.Dataset)
batch = 0
for texts, labels in dataset:
batch += 1
if batch <= 7:
assert texts.numpy().shape == (4, 1)
assert labels.numpy().shape == (4, 3)
for text in texts.numpy():
text = str(text)
assert len(text) > 5
for label in labels.numpy():
assert label.shape == (3,)
assert np.sum(label) == 1
elif batch == 8:
assert texts.numpy().shape == (2, 1)
assert labels.numpy().shape == (2, 3)
for text in texts.numpy():
text = str(text)
assert len(text) > 5
for label in labels.numpy():
assert label.shape == (3,)
assert np.sum(label) == 1
assert batch == 8
def test_labels():
dr = TextDataReader(folder="tests/test_data/text")
dataset = dr.get_emotion_data(
"neutral_ekman", Set.TRAIN, batch_size=5, parameters={"shuffle": False}
)
dataset_labels = np.empty((0,))
dataset_data = np.empty((0, 1))
dataset_raw_labels = np.empty((0, 7))
for data, labels in dataset:
dataset_data = np.concatenate([dataset_data, data.numpy()], axis=0)
labels = labels.numpy()
dataset_raw_labels = np.concatenate(
[dataset_raw_labels, labels], axis=0
)
labels = np.argmax(labels, axis=1)
assert labels.shape == (5,)
dataset_labels = np.concatenate([dataset_labels, labels], axis=0)
true_labels = dr.get_labels(Set.TRAIN)
assert true_labels.shape == (30,)
assert dataset_labels.shape == (30,)
assert np.array_equal(true_labels, dataset_labels)
d_data, d_labels = TextDataReader.convert_to_numpy(dataset)
assert np.array_equal(d_data, dataset_data)
assert np.array_equal(d_labels, dataset_raw_labels)
# Now with shuffle
dataset = dr.get_emotion_data(
"neutral_ekman", Set.TRAIN, batch_size=5, parameters={"shuffle": True}
)
dataset_labels = np.empty((0,))
for _, labels in dataset:
labels = labels.numpy()
labels = np.argmax(labels, axis=1)
assert labels.shape == (5,)
dataset_labels =
|
np.concatenate([dataset_labels, labels], axis=0)
|
numpy.concatenate
|
"""Tests for chebyshev module.
"""
from functools import reduce
import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
def trim(x):
return cheb.chebtrim(x, tol=1e-6)
T0 = [1]
T1 = [0, 1]
T2 = [-1, 0, 2]
T3 = [0, -3, 0, 4]
T4 = [1, 0, -8, 0, 8]
T5 = [0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [0, -7, 0, 56, 0, -112, 0, 64]
T8 = [1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate:
def test__cseries_to_zseries(self):
for i in range(5):
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = cheb._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self):
for i in range(5):
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = cheb._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants:
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
def test_chebzero(self):
assert_equal(cheb.chebzero, [0])
def test_chebone(self):
assert_equal(cheb.chebone, [1])
def test_chebx(self):
assert_equal(cheb.chebx, [0, 1])
class TestArithmetic:
def test_chebadd(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = cheb.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = cheb.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(cheb.chebmulx([0]), [0])
assert_equal(cheb.chebmulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(cheb.chebmulx(ser), tgt)
def test_chebmul(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = cheb.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = cheb.chebadd(ci, cj)
quo, rem = cheb.chebdiv(tgt, ci)
res = cheb.chebadd(cheb.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebpow(self):
for i in range(5):
for j in range(5):
msg = f"At i={i}, j={j}"
c = np.arange(i + 1)
tgt = reduce(cheb.chebmul, [c]*j, np.array([1]))
res = cheb.chebpow(c, j)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_chebval(self):
#check empty input
assert_equal(cheb.chebval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Tlist]
for i in range(10):
msg = f"At i={i}"
tgt = y[i]
res = cheb.chebval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(cheb.chebval(x, [1]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0]).shape, dims)
assert_equal(cheb.chebval(x, [1, 0, 0]).shape, dims)
def test_chebval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = cheb.chebval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_chebval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = cheb.chebval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_chebgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = cheb.chebgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_chebgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = cheb.chebgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = cheb.chebgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral:
def test_chebint(self):
# check exceptions
assert_raises(TypeError, cheb.chebint, [0], .5)
assert_raises(ValueError, cheb.chebint, [0], -1)
assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0])
assert_raises(ValueError, cheb.chebint, [0], lbnd=[0])
assert_raises(ValueError, cheb.chebint, [0], scl=[0])
assert_raises(TypeError, cheb.chebint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = cheb.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i])
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(cheb.chebval(-1, chebint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = cheb.poly2cheb(pol)
chebint = cheb.chebint(chebpol, m=1, k=[i], scl=2)
res = cheb.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1)
res = cheb.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k])
res = cheb.chebint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], lbnd=-1)
res = cheb.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = cheb.chebint(tgt, m=1, k=[k], scl=2)
res = cheb.chebint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebint(c) for c in c2d.T]).T
res = cheb.chebint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c) for c in c2d])
res = cheb.chebint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebint(c, k=3) for c in c2d])
res = cheb.chebint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative:
def test_chebder(self):
# check exceptions
assert_raises(TypeError, cheb.chebder, [0], .5)
assert_raises(ValueError, cheb.chebder, [0], -1)
# check that zeroth derivative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = cheb.chebder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = cheb.chebder(cheb.chebint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_chebder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([cheb.chebder(c) for c in c2d.T]).T
res = cheb.chebder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([cheb.chebder(c) for c in c2d])
res = cheb.chebder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_chebvander(self):
# check for 1d x
x = np.arange(3)
v = cheb.chebvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], cheb.chebval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = cheb.chebvander(x, 3)
|
assert_(v.shape == (3, 2, 4))
|
numpy.testing.assert_
|
import time
import torch
import random
import numpy as np
from tqdm import tqdm, trange
# from torch_geometric.nn import GCNConv
from layers_batch import AttentionModule, TenorNetworkModule
from utils import *
from tensorboardX import SummaryWriter
# from warmup_scheduler import GradualWarmupScheduler
import os
import dgcnn as dgcnn
import torch.nn as nn
from collections import OrderedDict
from sklearn import metrics
class SG(torch.nn.Module):
"""
SimGNN: A Neural Network Approach to Fast Graph Similarity Computation
https://arxiv.org/abs/1808.05689
"""
def __init__(self, args, number_of_labels):
"""
:param args: Arguments object.
:param number_of_labels: Number of node labels.
"""
super(SG, self).__init__()
self.args = args
self.number_labels = number_of_labels
self.setup_layers()
def calculate_bottleneck_features(self):
"""
Deciding the shape of the bottleneck layer.
"""
self.feature_count = self.args.tensor_neurons
def setup_layers(self):
"""
Creating the layers.
"""
self.calculate_bottleneck_features()
self.attention = AttentionModule(self.args)
self.tensor_network = TenorNetworkModule(self.args)
self.fully_connected_first = torch.nn.Linear(self.feature_count, self.args.bottle_neck_neurons)
self.scoring_layer = torch.nn.Linear(self.args.bottle_neck_neurons, 1)
bias_bool = False # TODO
self.dgcnn_s_conv1 = nn.Sequential(
nn.Conv2d(3*2, self.args.filters_1, kernel_size=1, bias=bias_bool),
nn.BatchNorm2d(self.args.filters_1),
nn.LeakyReLU(negative_slope=0.2))
self.dgcnn_f_conv1 = nn.Sequential(
nn.Conv2d(self.number_labels * 2, self.args.filters_1, kernel_size=1, bias=bias_bool),
nn.BatchNorm2d(self.args.filters_1),
nn.LeakyReLU(negative_slope=0.2))
self.dgcnn_s_conv2 = nn.Sequential(
nn.Conv2d(self.args.filters_1*2, self.args.filters_2, kernel_size=1, bias=bias_bool),
nn.BatchNorm2d(self.args.filters_2),
nn.LeakyReLU(negative_slope=0.2))
self.dgcnn_f_conv2 = nn.Sequential(
nn.Conv2d(self.args.filters_1 * 2, self.args.filters_2, kernel_size=1, bias=bias_bool),
nn.BatchNorm2d(self.args.filters_2),
nn.LeakyReLU(negative_slope=0.2))
self.dgcnn_s_conv3 = nn.Sequential(
nn.Conv2d(self.args.filters_2*2, self.args.filters_3, kernel_size=1, bias=bias_bool),
nn.BatchNorm2d(self.args.filters_3),
nn.LeakyReLU(negative_slope=0.2))
self.dgcnn_f_conv3 = nn.Sequential(
nn.Conv2d(self.args.filters_2 * 2, self.args.filters_3, kernel_size=1, bias=bias_bool),
nn.BatchNorm2d(self.args.filters_3),
nn.LeakyReLU(negative_slope=0.2))
self.dgcnn_conv_end = nn.Sequential(nn.Conv1d(self.args.filters_3 * 2,
self.args.filters_3, kernel_size=1, bias=bias_bool),
nn.BatchNorm1d(self.args.filters_3), nn.LeakyReLU(negative_slope=0.2))
def dgcnn_conv_pass(self, x):
self.k = self.args.K
xyz = x[:,:3,:] # Bx3xN
sem = x[:,3:,:] # BxfxN
xyz = dgcnn.get_graph_feature(xyz, k=self.k, cuda=self.args.cuda) #Bx6xNxk
xyz = self.dgcnn_s_conv1(xyz)
xyz1 = xyz.max(dim=-1, keepdim=False)[0]
xyz = dgcnn.get_graph_feature(xyz1, k=self.k, cuda=self.args.cuda)
xyz = self.dgcnn_s_conv2(xyz)
xyz2 = xyz.max(dim=-1, keepdim=False)[0]
xyz = dgcnn.get_graph_feature(xyz2, k=self.k, cuda=self.args.cuda)
xyz = self.dgcnn_s_conv3(xyz)
xyz3 = xyz.max(dim=-1, keepdim=False)[0]
sem = dgcnn.get_graph_feature(sem, k=self.k, cuda=self.args.cuda) # Bx2fxNxk
sem = self.dgcnn_f_conv1(sem)
sem1 = sem.max(dim=-1, keepdim=False)[0]
sem = dgcnn.get_graph_feature(sem1, k=self.k, cuda=self.args.cuda)
sem = self.dgcnn_f_conv2(sem)
sem2 = sem.max(dim=-1, keepdim=False)[0]
sem = dgcnn.get_graph_feature(sem2, k=self.k, cuda=self.args.cuda)
sem = self.dgcnn_f_conv3(sem)
sem3 = sem.max(dim=-1, keepdim=False)[0]
x = torch.cat((xyz3, sem3), dim=1)
# x = self.dgcnn_conv_all(x)
x = self.dgcnn_conv_end(x)
# print(x.shape)
x = x.permute(0, 2, 1) # [node_num, 32]
return x
def forward(self, data):
"""
Forward pass with graphs.
:param data: Data dictionary.
:return score: Similarity score.
"""
features_1 = data["features_1"].cuda(self.args.gpu)
features_2 = data["features_2"].cuda(self.args.gpu)
# features B x (3+label_num) x node_num
abstract_features_1 = self.dgcnn_conv_pass(features_1) # node_num x feature_size(filters-3)
abstract_features_2 = self.dgcnn_conv_pass(features_2) #BXNXF
# print("abstract feature: ", abstract_features_1.shape)
pooled_features_1, attention_scores_1 = self.attention(abstract_features_1) # bxfx1
pooled_features_2, attention_scores_2 = self.attention(abstract_features_2)
# print("pooled_features_1: ", pooled_features_1.shape)
scores = self.tensor_network(pooled_features_1, pooled_features_2)
# print("scores: ", scores.shape)
scores = scores.permute(0,2,1) # bx1xf
# print("scores: ", scores.shape)
scores = torch.nn.functional.relu(self.fully_connected_first(scores))
# print("scores: ", scores.shape)
score = torch.sigmoid(self.scoring_layer(scores)).reshape(-1)
# print("scores: ", score.shape)
return score, attention_scores_1, attention_scores_2
class SGTrainer(object):
"""
SG model trainer.
"""
def __init__(self, args, train=True):
"""
:param args: Arguments object.
"""
self.args = args
self.model_pth = self.args.model
self.initial_label_enumeration(train)
self.setup_model(train)
self.writer = SummaryWriter(logdir=self.args.logdir)
def setup_model(self,train=True):
"""
Creating a SG Net.
"""
self.model = SG(self.args, self.number_of_labels)
if (not train) and self.model_pth != "":
print("loading model: ", self.model_pth)
# original saved file with dataparallel
state_dict = torch.load(self.model_pth, map_location='cuda:'+str(self.args.gpu)) #'cuda:0'
# create new dict that does not contain 'module'
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove 'module'
new_state_dict[name] = v
# load params
self.model.load_state_dict(new_state_dict)
self.model = torch.nn.DataParallel(self.model, device_ids=[self.args.gpu])
self.model.cuda(self.args.gpu)
def initial_label_enumeration(self,train=True):
"""
Collecting the unique node idsentifiers.
"""
print("\nEnumerating unique labels.\n")
if train:
self.training_graphs = []
self.testing_graphs = []
self.evaling_graphs = []
train_sequences = self.args.train_sequences
eval_sequences = self.args.eval_sequences
print("Train sequences: ", train_sequences)
print("evaling sequences: ", eval_sequences)
graph_pairs_dir = self.args.graph_pairs_dir
for sq in train_sequences:
train_graphs=load_paires(os.path.join(self.args.pair_list_dir, sq+".txt"),graph_pairs_dir)
self.training_graphs.extend(train_graphs)
for sq in eval_sequences:
self.evaling_graphs=load_paires(os.path.join(self.args.pair_list_dir, sq+".txt"),graph_pairs_dir)
self.testing_graphs = self.evaling_graphs
assert len(self.evaling_graphs) != 0
assert len(self.training_graphs) != 0
self.global_labels = [i for i in range(12)] # 20
self.global_labels = {val: index for index, val in enumerate(self.global_labels)}
self.number_of_labels = len(self.global_labels)
self.keepnode = self.args.keep_node
print(self.global_labels)
print(self.number_of_labels)
def create_batches(self, split="train"):
"""
Creating batches from the training graph list.
:return batches: List of lists with batches.
"""
if split == "train":
random.shuffle(self.training_graphs)
batches = [self.training_graphs[graph:graph + self.args.batch_size] for graph in
range(0, len(self.training_graphs), self.args.batch_size)]
else:
random.shuffle(self.evaling_graphs)
batches = [self.evaling_graphs[graph:graph + self.args.batch_size] for graph in
range(0, len(self.evaling_graphs), self.args.batch_size)]
return batches
def augment_data(self,batch_xyz_1):
# batch_xyz_1 = flip_point_cloud(batch_xyz_1)
batch_xyz_1 = rotate_point_cloud(batch_xyz_1)
batch_xyz_1 = jitter_point_cloud(batch_xyz_1)
batch_xyz_1 = random_scale_point_cloud(batch_xyz_1)
batch_xyz_1 = rotate_perturbation_point_cloud(batch_xyz_1)
batch_xyz_1 = shift_point_cloud(batch_xyz_1)
return batch_xyz_1
def pc_normalize(self, pc):
""" pc: NxC, return NxC """
l = pc.shape[0]
centroid = np.mean(pc, axis=0)
pc = pc - centroid
m = np.max(np.sqrt(np.sum(pc**2, axis=1)))
pc = pc / m
return pc
def transfer_to_torch(self, data, training=True):
"""
Transferring the data to torch and creating a hash table with the indices, features and target.
:param data: Data dictionary.
:return new_data: Dictionary of Torch Tensors.
"""
# data_ori = data.copy()
# print("data[edge1]: ", data["edges_1"]) # debug
node_num_1 = len(data["nodes_1"])
node_num_2 = len(data["nodes_2"])
if node_num_1 > self.args.node_num:
sampled_index_1 = np.random.choice(node_num_1, self.args.node_num, replace=False)
sampled_index_1.sort()
data["nodes_1"] = np.array(data["nodes_1"])[sampled_index_1].tolist()
data["centers_1"] = np.array(data["centers_1"])[sampled_index_1]
elif node_num_1 < self.args.node_num:
data["nodes_1"] = np.concatenate(
(np.array(data["nodes_1"]), -np.ones(self.args.node_num - node_num_1))).tolist() # padding 0
data["centers_1"] = np.concatenate(
(np.array(data["centers_1"]), np.zeros((self.args.node_num - node_num_1,3)))) # padding 0
if node_num_2 > self.args.node_num:
sampled_index_2 = np.random.choice(node_num_2, self.args.node_num, replace=False)
sampled_index_2.sort()
data["nodes_2"] = np.array(data["nodes_2"])[sampled_index_2].tolist()
data["centers_2"] = np.array(data["centers_2"])[sampled_index_2] # node_num x 3
elif node_num_2 < self.args.node_num:
data["nodes_2"] = np.concatenate((np.array(data["nodes_2"]), -np.ones(self.args.node_num - node_num_2))).tolist()
data["centers_2"] = np.concatenate(
(np.array(data["centers_2"]), np.zeros((self.args.node_num - node_num_2, 3)))) # padding 0
new_data = dict()
features_1 = np.expand_dims(np.array(
[np.zeros(self.number_of_labels).tolist() if node == -1 else [
1.0 if self.global_labels[node] == label_index else 0 for label_index in self.global_labels.values()]
for node in data["nodes_1"]]), axis=0)
features_2 = np.expand_dims(np.array(
[np.zeros(self.number_of_labels).tolist() if node == -1 else [
1.0 if self.global_labels[node] == label_index else 0 for label_index in self.global_labels.values()]
for node in data["nodes_2"]]), axis=0)
# 1xnode_numx3
batch_xyz_1 = np.expand_dims(data["centers_1"], axis=0)
batch_xyz_2 = np.expand_dims(data["centers_2"], axis=0)
if training:
# random flip data
if random.random() > 0.5:
batch_xyz_1[:,:,0] = -batch_xyz_1[:,:,0]
batch_xyz_2[:, :, 0] = -batch_xyz_2[:, :, 0]
batch_xyz_1 = self.augment_data(batch_xyz_1)
batch_xyz_2 = self.augment_data(batch_xyz_2)
# Bxnum_nodex(3+num_label) -> Bx(3+num_label)xnum_node
xyz_feature_1 = np.concatenate((batch_xyz_1, features_1), axis=2).transpose(0,2,1)
xyz_feature_2 = np.concatenate((batch_xyz_2, features_2), axis=2).transpose(0,2,1)
new_data["features_1"] = np.squeeze(xyz_feature_1)
new_data["features_2"] = np.squeeze(xyz_feature_2)
if data["distance"] <= self.args.p_thresh: # TODO
new_data["target"] = 1.0
elif data["distance"] >= 20:
new_data["target"] = 0.0
else:
new_data["target"] = -100.0
print("distance error: ", data["distance"])
exit(-1)
return new_data
def process_batch(self, batch, training=True):
"""
Forward pass with a batch of data.
:param batch: Batch of graph pair locations.
:return loss: Loss on the batch.
"""
self.optimizer.zero_grad()
losses = 0
batch_target = []
batch_feature_1 = []
batch_feature_2 = []
for graph_pair in batch:
data = process_pair(graph_pair)
data = self.transfer_to_torch(data, training)
batch_feature_1.append(data["features_1"])
batch_feature_2.append(data["features_2"])
batch_feature_1.append(data["features_2"])
batch_feature_2.append(data["features_1"])
target = data["target"]
batch_target.append(target)
batch_target.append(target)
data = dict()
data["features_1"] = torch.FloatTensor(np.array(batch_feature_1))
data["features_2"] = torch.FloatTensor(np.array(batch_feature_2))
data["target"] = torch.FloatTensor(np.array(batch_target))
prediction, _,_ = self.model(data)
losses = torch.mean(torch.nn.functional.binary_cross_entropy(prediction, data["target"].cuda(self.args.gpu)))
if training:
losses.backward(retain_graph=True)
self.optimizer.step()
loss = losses.item()
pred_batch = prediction.cpu().detach().numpy().reshape(-1)
gt_batch = data["target"].cpu().detach().numpy().reshape(-1)
return loss, pred_batch, gt_batch
def fit(self):
"""
Fitting a model.
"""
print("\nModel training.\n")
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate,
weight_decay=self.args.weight_decay)
f1_max_his = 0
self.model.train()
epochs = trange(self.args.epochs, leave=True, desc="Epoch")
for epoch in epochs:
batches = self.create_batches()
self.model.train()
self.loss_sum = 0
main_index = 0
for index, batch in tqdm(enumerate(batches), total=len(batches), desc="Batches"):
a = time.time()
loss_score,_,_ = self.process_batch(batch)
main_index = main_index + len(batch)
self.loss_sum = self.loss_sum + loss_score * len(batch)
loss = self.loss_sum / main_index
epochs.set_description("Epoch (Loss=%g)" % round(loss, 5))
self.writer.add_scalar('Train_sum', loss, int(epoch)*len(batches)*int(self.args.batch_size) + main_index)
self.writer.add_scalar('Train loss', loss_score, int(epoch) * len(batches)*int(self.args.batch_size) + main_index)
if epoch % 2 == 0:
print("\nModel saving.\n")
loss, f1_max = self.score("eval")
self.writer.add_scalar("eval_loss", loss, int(epoch)*len(batches)*int(self.args.batch_size))
self.writer.add_scalar("f1_max_score", f1_max, int(epoch) * len(batches) * int(self.args.batch_size))
dict_name = self.args.logdir + "/" + str(epoch)+'.pth'
torch.save(self.model.state_dict(), dict_name)
if f1_max_his <= f1_max:
f1_max_his = f1_max
dict_name = self.args.logdir + "/" + str(epoch)+"_best" + '.pth'
torch.save(self.model.state_dict(), dict_name)
print("\n best model saved ", dict_name)
print("------------------------------")
def score(self, split = 'test'):
"""
Scoring on the test set.
"""
print("\n\nModel evaluation.\n")
self.model.eval()
self.scores = []
self.ground_truth = []
if split == "test":
splits = self.testing_graphs
elif split == "eval":
splits = self.evaling_graphs
else:
print("Check split: ", split)
splits = []
exit(-1)
losses = 0
pred_db = []
gt_db = []
batches = self.create_batches(split="eval")
for index, batch in tqdm(enumerate(batches), total=len(batches), desc="Eval Batches"):
loss_score,pred_b,gt_b = self.process_batch(batch, False)
losses += loss_score
pred_db.extend(pred_b)
gt_db.extend(gt_b)
precision, recall, pr_thresholds = metrics.precision_recall_curve(gt_db, pred_db)
# calc F1-score
F1_score = 2 * precision * recall / (precision + recall)
F1_score = np.nan_to_num(F1_score)
F1_max_score = np.max(F1_score)
print("\nModel " + split + " F1_max_score: " + str(F1_max_score) + ".")
model_loss = losses / len(batches)
print("\nModel " + split + " loss: " + str(model_loss) + ".")
return model_loss, F1_max_score
def print_evaluation(self):
"""
Printing the error rates.
"""
norm_ged_mean = np.mean(self.ground_truth)
base_error = np.mean([(n - norm_ged_mean) ** 2 for n in self.ground_truth])
model_error = np.mean(self.scores)
print("\nBaseline error: " + str(round(base_error, 5)) + ".")
print("\nModel test error: " + str(round(model_error, 5)) + ".")
def eval_pair(self, pair_file):
data = (pair_file)
data = self.transfer_to_torch(data, False)
target = data["target"]
batch_target = []
batch_feature_1 = []
batch_feature_2 = []
batch_feature_1.append(data["features_1"])
batch_feature_2.append(data["features_2"])
batch_target.append(target)
data_torch = dict()
data_torch["features_1"] = torch.FloatTensor(np.array(batch_feature_1))
data_torch["features_2"] = torch.FloatTensor(np.array(batch_feature_2))
data_torch["target"] = torch.FloatTensor(np.array(batch_target))
self.model.eval()
result_1, result_2,result_3 = self.model(data_torch)
prediction = result_1.cpu().detach().numpy().reshape(-1)
att_weights_1 = result_2.cpu().detach().numpy().reshape(-1)
att_weights_2 = result_3.cpu().detach().numpy().reshape(-1)
# print("prediction shape: ", prediction.shape)
return prediction, att_weights_1, att_weights_2
def eval_batch_pair(self, batch):
self.model.eval()
batch_target = []
batch_feature_1 = []
batch_feature_2 = []
for graph_pair in batch:
data = process_pair(graph_pair)
data = self.transfer_to_torch(data, False)
batch_feature_1.append(data["features_1"])
batch_feature_2.append(data["features_2"])
target = data["target"]
batch_target.append(target)
data = dict()
data["features_1"] = torch.FloatTensor(np.array(batch_feature_1))
data["features_2"] = torch.FloatTensor(np.array(batch_feature_2))
data["target"] = torch.FloatTensor(np.array(batch_target))
prediction, _, _ = self.model(data)
prediction = prediction.cpu().detach().numpy().reshape(-1)
gt = np.array(batch_target).reshape(-1)
return prediction, gt
def eval_batch_pair_data(self, batch):
self.model.eval()
batch_target = []
batch_feature_1 = []
batch_feature_2 = []
for graph_pair in batch:
data = self.transfer_to_torch(graph_pair, False)
batch_feature_1.append(data["features_1"])
batch_feature_2.append(data["features_2"])
target = data["target"]
batch_target.append(target)
data = dict()
data["features_1"] = torch.FloatTensor(np.array(batch_feature_1))
data["features_2"] = torch.FloatTensor(np.array(batch_feature_2))
data["target"] = torch.FloatTensor(np.array(batch_target))
forward_t = time.time()
prediction, _, _ = self.model(data)
print("forward time: ", time.time() - forward_t)
prediction = prediction.cpu().detach().numpy().reshape(-1)
gt = np.array(batch_target).reshape(-1)
return prediction, gt
def eval_batch_pair(self, batch):
self.model.eval()
batch_target = []
batch_feature_1 = []
batch_feature_2 = []
for graph_pair in batch:
data = process_pair(graph_pair)
data = self.transfer_to_torch(data, False)
batch_feature_1.append(data["features_1"])
batch_feature_2.append(data["features_2"])
target = data["target"]
batch_target.append(target)
data = dict()
data["features_1"] = torch.FloatTensor(np.array(batch_feature_1))
data["features_2"] = torch.FloatTensor(np.array(batch_feature_2))
data["target"] = torch.FloatTensor(
|
np.array(batch_target)
|
numpy.array
|
import pandas as pd
import numpy as np
import spacy
from tqdm import tqdm
from collections import defaultdict
nlp = spacy.load("en_core_sci_lg", disable=['ner', 'parser'])
path = "../data/"
def tokenize(string):
doc = nlp.make_doc(string)
words = [token.text.lower() for token in doc if token.is_alpha and not token.is_stop and len(token.text) > 1 ]
return words
def tokenization(train_data):
tokenized_texts = []
#print("Tokenization....")
for _, row in train_data.iterrows():
text = str(row['Abstract'])
#text = str(row['Title']) + ' ' + str(row['Abstract'])
words = tokenize(text)
tokenized_texts.append(words)
return tokenized_texts
# TFIDF (Term frequency and inverse document frequency)
def get_word_stat(tokenized_texts):
'''Words counts in documents
finds in how many documents this word
is present
'''
texts_number = len(tokenized_texts)
#print("Word Stat....")
word2text_count = defaultdict(int)
for text in tokenized_texts:
uniquewords = set(text)
for word in uniquewords:
word2text_count[word] +=1
return word2text_count
def get_doc_tfidf(words, word2text_count, N):
num_words = len(words)
word2tfidf = defaultdict(int)
for word in words:
if word2text_count[word] > 0:
idf = np.log(N/(word2text_count[word]))
word2tfidf[word] += (1/num_words) * idf
else:
word2tfidf[word] = 1
return word2tfidf
def create_pmi_dict(tokenized_texts, targets, min_count=5):
#print("PMI dictionary ....")
np.seterr(divide = 'ignore')
# words count
d = {0:defaultdict(int), 1:defaultdict(int), 'tot':defaultdict(int)}
for idx, words in enumerate(tokenized_texts):
target = targets[idx]
for w in words:
d[ target ][w] += 1
Dictionary = set(list(d[0].keys()) + list(d[1].keys()))
d['tot'] = {w:d[0][w] + d[1][w] for w in Dictionary}
# pmi calculation
N_0 = sum(d[0].values())
N_1 = sum(d[1].values())
d[0] = {w: -np.log((v/N_0 + 10**(-15)) / (0.5 * d['tot'][w]/(N_0 + N_1))) / np.log(v/N_0 + 10**(-15))
for w, v in d[0].items() if d['tot'][w] > min_count}
d[1] = {w: -np.log((v/N_1+ 10**(-15)) / (0.5 * d['tot'][w]/(N_0 + N_1))) / np.log(v/N_1 + 10**(-15))
for w, v in d[1].items() if d['tot'][w] > min_count}
del d['tot']
return d
def calc_collinearity(word, words_dict, n=10):
new_word_emb = nlp(word).vector
pmi_new = 0
max_pmis_words = sorted(list(words_dict.items()), key=lambda x: x[1], reverse=True)[:n]
for w, pmi in max_pmis_words:
w_emb = nlp(w).vector
cos_similarity = \
|
np.dot(w_emb, new_word_emb)
|
numpy.dot
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.