prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
from numpy import linalg as LA
import sys
import librosa
from scipy import linalg
import copy
import random
from math import log
# import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import multiprocessing
import logging
import argparse
def sampleS(S, k):
sample = []
if len(S) <= k:
return S
while len(sample) < k:
new = S[random.randint(0, len(S) - 1)]
if not new in sample:
sample.append(new)
return sample
def buffer(signal, L, M):
if M >= L:
logging.info(
'Error: Overlapping windows cannot be larger than frame length!')
sys.exit()
#
len_signal = len(signal)
#
logging.info('The signal length is %s: ' % (len_signal))
#
K = np.ceil(len_signal / L).astype('int') # num_frames
#
logging.info('The number of frames \'K\' is %s: ' % (K))
logging.info('The length of each frame \'L\' is %s: ' % (L))
#
X_tmp = []
k = 1
while (True):
start_ind = ((k - 1) * (L - M) + 1) - 1
end_ind = ((k * L) - (k - 1) * M)
if start_ind == len_signal:
break
elif (end_ind > len_signal):
# logging.info(('k=%s, [%s, %s] ' % (k, start_ind, end_ind - 1))
val_in = len_signal - start_ind
tmp_seg = np.zeros(L)
tmp_seg[:val_in] = signal[start_ind:]
X_tmp.append(tmp_seg)
break
else:
# logging.info(('k=%s, [%s, %s] ' % (k, start_ind, end_ind - 1))
X_tmp.append(signal[start_ind:end_ind])
k += 1
#
return X_tmp
def unbuffer(X, hop):
N, L = X.shape
#
T = N + L * hop
K = np.arange(0, N)
x = np.zeros(T)
H = np.hanning(N)
for k in xrange(0, L):
x[K] = x[K] + np.multiply(H, X[:, k])
K = K + hop
#
return x
class SpeechDenoise:
def __init__(
self, X, params, M, signal=[]
): # X is the np.vstacked transpose of the buffered signal (buffered==split up into overlapping windows)
self.meaningfulNodes = range(
X.shape[1]) # this is pretty much the same thing as self.I
self.X = X
self.D = []
self.params = params
self.n_iter = self.params['rule_1']['n_iter'] # num_iterations
self.error = self.params['rule_2']['error'] # num_iterations
#
self.verbose = self.params['verbose']
#
# THE following K and L were typo/switched in the GAD.py code. they're fixed here:
self.K = self.X.shape[0] # sample length
self.L = self.X.shape[
1] # maximum atoms to be learned (i.e. size of ground set)
#
self.I = np.arange(
0, self.L
) # self.I is the ground set of elements (dictionary atoms) we can choose
self.set_ind = []
self.k_min_sum = 0.0
# Initializating the residual matrix 'R' by using 'X'
self.R = self.X.copy()
# The following are (sortof) optional.
# we use the following 3 instance variables to calculate RMSE after each iter
self.M = M
self.signal = signal # we leave this empty unless we actually want to do the RMSE, which is computationall
#intense and also requires sending the (big) signal across the line...
self.rmse = [] # to hold RMSE after each iter
# and this one to plot solution quality over time
self.k_min_data = []
def function(self, S, big_number=25.0):
# Note: this only works for f(S); it will NOT work on any input except S. to do that, would need to find
# the elements in the function's argument that are not in S, then iteratively add to the value we return.
return (len(S) * big_number - self.k_min_sum)
def functionMarg_quickestimate(self,
new_elements,
curr_elements,
big_number=25.0):
new_elems = [ele for ele in new_elements if ele not in curr_elements]
if not len(new_elems):
return (0)
# This is a bit of a hack...
# Actually, the below version is unfair/inaccurate, as we should really update R by orthogonalizing
# after we add each column. Here, I'm treating the function as a modular function within each
# round of adaptive sampling (and submodular across rounds) which is entertainingly ridiculous.
# BUT if it works at de-noising, then I don't care :)
# NOTE that in the original GAD code, self.k_min_sum is similar to what I'm calling sum_of_norm_ratios.
new_elems = [ele for ele in new_elements if ele not in curr_elements]
R_copy = copy.copy(self.R)
#sum_of_norm_ratios = np.sum(LA.norm(R_copy[:, new_elems], 1)) / np.sum([LA.norm(R_copy[:, I_ind_k_min], 2) for I_ind_k_min in new_elems])
sum_of_norm_ratios = np.sum([
LA.norm(R_copy[:, I_ind_k_min], 1) / LA.norm(
R_copy[:, I_ind_k_min], 2) for I_ind_k_min in new_elems
])
return (len(new_elems) * big_number - sum_of_norm_ratios)
def functionMarg(self, new_elements, curr_elements, big_number=25.0):
# This is the more correct (but slower and more complicated) functionMarg. See note in other simpler version above.
# NOTE: IT ASSUMES THAT S IS THE CURRENT S. IT WILL BE WRONG WHEN input S is NOT the current solution!!!
new_elems = [ele for ele in new_elements if ele not in curr_elements]
if not len(new_elems):
return (0)
# Copy everything important... we have to update them iteratively for each ele in the new sample, but
# we might not use this sample so can't change the originals...
R_copy = copy.copy(self.R)
#print self.R.shape, '=self.R.shape'
D_copy = copy.copy(self.D)
I_copy = copy.copy(self.I)
k_min_sum_copy = copy.copy(self.k_min_sum)
set_ind_copy = self.set_ind
marginal_k_min_sum_copy = 0
# New elements we compute marginal value for
new_elems = [ele for ele in new_elements if ele not in curr_elements]
# do the GAD find_column() routine, but we're not trying to find a new column; we're evaluating
# the quality of ones in the set we sampled. Basically, that means checking for changes in k_min_sum_copy.
#
for I_ind_k_min in new_elems:
sample_avg_k_min = 0
r_k = R_copy[:, I_ind_k_min]
#
k_min = LA.norm(r_k, 1) / LA.norm(r_k, 2)
#logging.info('k_min inside a sample is %s: ' % k_min)
sample_avg_k_min += k_min
#
marginal_k_min_sum_copy = marginal_k_min_sum_copy + k_min
k_min_sum_copy = k_min_sum_copy + k_min
#
r_k_min = R_copy[:, I_ind_k_min]
#
# Set the l-th atom to equal to normalized r_k
psi = r_k_min / LA.norm(r_k_min, 2)
#
# Add to the dictionary D and its index and shrinking set I
D_copy.append(psi)
set_ind_copy.append(I_ind_k_min)
# Compute the new residual for all columns k
for kk in I_copy:
r_kk = R_copy[:, kk]
alpha = np.dot(r_kk, psi)
R_copy[:, kk] = r_kk - np.dot(psi, alpha)
#
I_copy = np.delete(I_copy, [I_ind_k_min])
#print 'sample avg k_min = ', sample_avg_k_min/np.float(len(new_elems))
#logging.info('marginal_k_min_sum_copy of a sample is %s: ' % marginal_k_min_sum_copy)
#logging.info('some sample val is %s: ' % ( - marginal_k_min_sum_copy))
#logging.info('big number is %s: ' % ( big_number))
#logging.info('len(new_elems) %s: ' % ( len(new_elems)))
return (len(new_elems) * big_number - marginal_k_min_sum_copy)
def adaptiveSampling_adam(f,
k,
numSamples,
r,
opt,
alpha1,
alpha2,
compute_rmse=False,
speed_over_accuracy=False,
parallel=False):
# This large uncommented script is not complicated enough, so here we go:
if speed_over_accuracy:
def functionMarg_closure(new_elements, curr_elements, big_number=25.0):
return f.functionMarg_quickestimate(
new_elements, curr_elements, big_number=25.0)
else:
def functionMarg_closure(new_elements, curr_elements, big_number=25.0):
return f.functionMarg(new_elements, curr_elements, big_number=25.0)
S = copy.deepcopy(f.meaningfulNodes)
X = []
while len(X) < k and len(S + X) > k:
currentVal = f.function(X)
logging.info([
currentVal, 'ground set remaining:',
len(S), 'size of current solution:',
len(X)
])
samples = []
samplesVal = []
# PARALLELIZE THIS LOOP it is emb. parallel
def sample_elements(samples, samplesVal):
#logging.info(len(S), 'is len(S)'
sample = sampleS(S, k / r)
#logging.info(len(S), 'is len(S);', k/r, 'is k/r', k,'is k', r, 'is r', len(sample), 'is len sample'
sampleVal = functionMarg_closure(sample, X)
samplesVal.append(sampleVal)
samples.append(sample)
if parallel:
manager = multiprocessing.Manager()
samples = manager.list()
samplesVal = manager.list()
jobs = []
for i in range(numSamples):
p = multiprocessing.Process(
target=sample_elements, args=(samples, samplesVal))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
samples = list(samples)
samplesVal = list(samplesVal)
else:
samples = []
samplesVal = []
for i in range(numSamples):
sample_elements(samples, samplesVal)
maxSampleVal = max(samplesVal)
#print 'max sample val / len', maxSampleVal/np.float(k/r), 'avg sample val', np.mean(samplesVal)/np.float(k/r)
#print 'max sample val / len', maxSampleVal, 'avg sample val', np.mean(samplesVal)
bestSample = samples[samplesVal.index(maxSampleVal)]
if maxSampleVal >= (opt - currentVal) / (alpha1 * float(r)):
X += bestSample
#logging.info(len(X), 'is len(X)'
for element in bestSample:
S.remove(element)
#logging.info(len(S), 'is len(S) after removing an element from best sample'
# Now we need to do some bookkeeping just for the audio de-noising objective:
for I_ind_k_min in bestSample:
r_k_min = f.R[:, I_ind_k_min]
#tmp.append(LA.norm(r_k, 1) / LA.norm(r_k, 2))
#
#k_min = tmp[ind_k_min]
k_min = LA.norm(r_k_min, 1) / LA.norm(r_k_min, 2)
#print 'k_min added to soln', k_min
# print 'k_min in best', k_min
f.k_min_data.append(k_min) # This is just for logging purposes
#
f.k_min_sum = f.k_min_sum + k_min
#logging.info(k_min
#
#r_k_min = f.R[:, I_ind_k_min]
#
# Set the l-th atom to equal to normalized r_k
psi = r_k_min / | LA.norm(r_k_min, 2) | numpy.linalg.norm |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 16 23:34:59 2020
@author: mlampert
"""
import os
import pandas
import copy
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.gridspec import GridSpec
import pickle
import numpy as np
import matplotlib.cm as cm
#Flap imports
import flap
import flap_nstx
flap_nstx.register()
import flap_mdsplus
#Setting up FLAP
flap_mdsplus.register('NSTX_MDSPlus')
thisdir = os.path.dirname(os.path.realpath(__file__))
fn = os.path.join(thisdir,"../flap_nstx.cfg")
flap.config.read(file_name=fn)
#Plot settings for publications
publication=True
if publication:
#figsize=(8.5/2.54,
# 8.5/2.54/1.618*1.1)
figsize=(17/2.54,10/2.54)
plt.rc('font', family='serif', serif='Helvetica')
labelsize=6
linewidth=0.5
major_ticksize=2
plt.rc('text', usetex=False)
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['lines.linewidth'] = linewidth
plt.rcParams['axes.linewidth'] = linewidth
plt.rcParams['axes.labelsize'] = labelsize
plt.rcParams['axes.titlesize'] = labelsize
plt.rcParams['xtick.labelsize'] = labelsize
plt.rcParams['xtick.major.size'] = major_ticksize
plt.rcParams['xtick.major.width'] = linewidth
plt.rcParams['xtick.minor.width'] = linewidth/2
plt.rcParams['xtick.minor.size'] = major_ticksize/2
plt.rcParams['ytick.labelsize'] = labelsize
plt.rcParams['ytick.major.width'] = linewidth
plt.rcParams['ytick.major.size'] = major_ticksize
plt.rcParams['ytick.minor.width'] = linewidth/2
plt.rcParams['ytick.minor.size'] = major_ticksize/2
plt.rcParams['legend.fontsize'] = labelsize
else:
figsize=None
#TODO
#These are for a different analysis and a different method
#define pre ELM time
#define ELM burst time
#define the post ELM time based on the ELM burst time
#Calculate the average, maximum and the variance of the results in those time ranges
#Calculate the averaged velocity trace around the ELM time
#Calculate the correlation coefficients between the +-tau us time range around the ELM time
#Classify the ELMs based on the correlation coefficents
def plot_nstx_gpi_velocity_distribution(window_average=500e-6,
tau_range=[-500e-6,500e-6],
sampling_time=2.5e-6,
pdf=False,
plot=True,
return_results=False,
return_error=False,
plot_variance=True,
plot_error=False,
normalized_velocity=True,
normalized_structure=True,
subtraction_order=4,
opacity=0.2,
correlation_threshold=0.6,
plot_max_only=False,
plot_for_publication=False,
gpi_plane_calculation=True,
plot_scatter=True,
elm_time_base='frame similarity',
n_hist=50,
min_max_range=False,
nocalc=False,
general_plot=True,
plot_for_velocity=False,
plot_for_structure=False,
plot_for_dependence=False,
structure_finding_method='contour',
interpolation=False,
):
if elm_time_base not in ['frame similarity', 'radial velocity']:
raise ValueError('elm_time_base should be either "frame similarity" or "radial velocity"')
wd=flap.config.get_all_section('Module NSTX_GPI')['Working directory']
all_results_file=wd+'/processed_data/all_results_file.pickle'
database_file=wd+'/db/ELM_findings_mlampert_velocity_good.csv'
db=pandas.read_csv(database_file, index_col=0)
elm_index=list(db.index)
n_elm=len(elm_index)
nwin=int(window_average/sampling_time)
time_vec=(np.arange(2*nwin)*sampling_time-window_average)*1e3
all_results={'Velocity ccf':np.zeros([2*nwin,n_elm,2]),
'Velocity str max':np.zeros([2*nwin,n_elm,2]),
'Acceleration ccf':np.zeros([2*nwin,n_elm,2]),
'Frame similarity':np.zeros([2*nwin,n_elm]),
'Correlation max':np.zeros([2*nwin,n_elm]),
'Size max':np.zeros([2*nwin,n_elm,2]),
'Position max':np.zeros([2*nwin,n_elm,2]),
'Separatrix dist max':np.zeros([2*nwin,n_elm]),
'Centroid max':np.zeros([2*nwin,n_elm,2]),
'COG max':np.zeros([2*nwin,n_elm,2]),
'Area max':np.zeros([2*nwin,n_elm]),
'Elongation max':np.zeros([2*nwin,n_elm]),
'Angle max':np.zeros([2*nwin,n_elm]),
'Str number':np.zeros([2*nwin,n_elm]),
'GPI Dalpha':np.zeros([2*nwin,n_elm]),
}
hist_range_dict={'Velocity ccf':{'Radial':[-5e3,15e3],
'Poloidal':[-25e3,5e3]},
'Velocity str max':{'Radial':[-10e3,20e3],
'Poloidal':[-20e3,10e3]},
'Size max':{'Radial':[0,0.1],
'Poloidal':[0,0.1]},
'Separatrix dist max':[-0.05,0.150],
'Elongation max':[-0.5,0.5],
'Str number':[-0.5,10.5],
}
n_hist_str=int(hist_range_dict['Str number'][1]-hist_range_dict['Str number'][0])*4
result_histograms={'Velocity ccf':np.zeros([2*nwin,n_hist,2]),
'Velocity str max':np.zeros([2*nwin,n_hist,2]),
'Size max':np.zeros([2*nwin,n_hist,2]),
'Separatrix dist max':np.zeros([2*nwin,n_hist]),
'Elongation max': | np.zeros([2*nwin,n_hist]) | numpy.zeros |
from operator import add, sub
import numpy as np
from scipy.stats import norm
class Elora:
def __init__(self, times, labels1, labels2, values, biases=0):
"""
Elo regressor algorithm for paired comparison time series prediction
Author: <NAME>
Args:
times (array of np.datetime64): comparison datetimes
labels1 (array of str): comparison labels for first entity
labels2 (array of str): comparison labels for second entity
values (array of float): comparison outcome values
biases (array of float or scalar, optional): comparison bias
corrections
Attributes:
examples (np.recarray): time-sorted numpy record array of
(time, label1, label2, bias, value, value_pred) samples
first_update_time (np.datetime64): time of the first comparison
last_update_time (np.datetime64): time of the last comparison
labels (array of string): unique compared entity labels
median_value (float): median expected comparison value
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels1 = np.array(labels1, dtype='str', ndmin=1)
labels2 = np.array(labels2, dtype='str', ndmin=1)
values = np.array(values, dtype='float', ndmin=1)
if np.isscalar(biases):
biases = np.full_like(times, biases, dtype='float')
else:
biases = np.array(biases, dtype='float', ndmin=1)
self.first_update_time = times.min()
self.last_update_time = times.max()
self.labels = np.union1d(labels1, labels2)
self.median_value = np.median(values)
prior = self.median_value * np.ones_like(values, dtype=float)
self.examples = np.sort(
np.rec.fromarrays([
times,
labels1,
labels2,
biases,
values,
prior,
], names=(
'time',
'label1',
'label2',
'bias',
'value',
'value_pred'
)), order=['time', 'label1', 'label2'], axis=0)
@property
def initial_rating(self):
"""
Customize this function for a given subclass.
It computes the initial rating, equal to the rating one would
expect if all labels were interchangeable.
Default behavior is to return one-half the median outcome value
if the labels commute, otherwise 0.
"""
return .5*self.median_value if self.commutes else 0
def regression_coeff(self, elapsed_time):
"""
Customize this function for a given subclass.
It computes the regression coefficient—prefactor multiplying the
rating of each team evaluated at each update—as a function of
elapsed time since the last rating update for that label.
Default behavior is to return 1, i.e. no rating regression.
"""
return 1.0
def evolve_rating(self, rating, elapsed_time):
"""
Evolves 'state' to 'time', applying rating regression if necessary,
and returns the evolved rating.
Args:
state (dict): state dictionary {'time': time, 'rating': rating}
time (np.datetime64): time to evaluate state
Returns:
state (dict): evolved state dictionary
{'time': time, 'rating': rating}
"""
regress = self.regression_coeff(elapsed_time)
return regress * rating + (1.0 - regress) * self.initial_rating
def fit(self, k, commutes, scale=1, burnin=0):
"""
Primary routine that performs model calibration. It is called
recursively by the `fit` routine.
Args:
k (float): coefficient that multiplies the prediction error to
determine the rating update.
commutes (bool): false if the observed values change sign under
label interchange and true otheriwse.
"""
self.commutes = commutes
self.scale = scale
self.commutator = 0. if commutes else self.median_value
self.compare = add if commutes else sub
record = {label: [] for label in self.labels}
prior_state_dict = {}
for idx, example in enumerate(self.examples):
time, label1, label2, bias, value, value_pred = example
default = (time, self.initial_rating)
prior_time1, prior_rating1 = prior_state_dict.get(label1, default)
prior_time2, prior_rating2 = prior_state_dict.get(label2, default)
rating1 = self.evolve_rating(prior_rating1, time - prior_time1)
rating2 = self.evolve_rating(prior_rating2, time - prior_time2)
value_pred = self.compare(rating1, rating2) + self.commutator + bias
self.examples[idx]['value_pred'] = value_pred
rating_change = k * (value - value_pred)
rating1 += rating_change
rating2 += rating_change if self.commutes else -rating_change
record[label1].append((time, rating1))
record[label2].append((time, rating2))
prior_state_dict[label1] = (time, rating1)
prior_state_dict[label2] = (time, rating2)
for label in record.keys():
record[label] = np.rec.array(
record[label], dtype=[
('time', 'datetime64[s]'), ('rating', 'float')])
self.record = record
residuals = np.rec.fromarrays([
self.examples.time,
self.examples.value - self.examples.value_pred
], names=('time', 'residual'))
return residuals
def get_rating(self, times, labels):
"""
Query label state(s) at the specified time accounting
for rating regression.
Args:
times (array of np.datetime64): Comparison datetimes
labels (array of string): Comparison entity labels
Returns:
rating (array): ratings for each time and label pair
"""
times = np.array(times, dtype='datetime64[s]', ndmin=1)
labels = np.array(labels, dtype='str', ndmin=1)
ratings = | np.empty_like(times, dtype='float') | numpy.empty_like |
from typing import Union
import cv2
import matplotlib.pyplot as plt
import numpy as np
import torch
from astropy.coordinates import spherical_to_cartesian
from matplotlib.collections import EllipseCollection
from numba import njit
from numpy import linalg as LA
from scipy.spatial.distance import cdist
import src.common.constants as const
from src.common.camera import camera_matrix, projection_matrix, Camera
from src.common.coordinates import ENU_system
from src.common.robbins import load_craters, extract_robbins_dataset
def matrix_adjugate(matrix):
"""Return adjugate matrix [1].
Parameters
----------
matrix : np.ndarray
Input matrix
Returns
-------
np.ndarray
Adjugate of input matrix
References
----------
.. [1] https://en.wikipedia.org/wiki/Adjugate_matrix
"""
cofactor = LA.inv(matrix).T * LA.det(matrix)
return cofactor.T
def scale_det(matrix):
"""Rescale matrix such that det(A) = 1.
Parameters
----------
matrix: np.ndarray, torch.Tensor
Matrix input
Returns
-------
np.ndarray
Normalised matrix.
"""
if isinstance(matrix, np.ndarray):
return np.cbrt((1. / LA.det(matrix)))[..., None, None] * matrix
elif isinstance(matrix, torch.Tensor):
val = 1. / torch.det(matrix)
return (torch.sign(val) * torch.pow(torch.abs(val), 1. / 3.))[..., None, None] * matrix
def conic_matrix(a, b, psi, x=0, y=0):
"""Returns matrix representation for crater derived from ellipse parameters
Parameters
----------
a: np.ndarray, torch.Tensor, int, float
Semi-major ellipse axis
b: np.ndarray, torch.Tensor, int, float
Semi-minor ellipse axis
psi: np.ndarray, torch.Tensor, int, float
Ellipse angle (radians)
x: np.ndarray, torch.Tensor, int, float
X-position in 2D cartesian coordinate system (coplanar)
y: np.ndarray, torch.Tensor, int, float
Y-position in 2D cartesian coordinate system (coplanar)
Returns
-------
np.ndarray, torch.Tensor
Array of ellipse matrices
"""
if isinstance(a, (int, float)):
out = np.empty((3, 3))
pkg = np
elif isinstance(a, torch.Tensor):
out = torch.empty((len(a), 3, 3), device=a.device, dtype=torch.float32)
pkg = torch
elif isinstance(a, np.ndarray):
out = np.empty((len(a), 3, 3))
pkg = np
else:
raise TypeError("Input must be of type torch.Tensor, np.ndarray, int or float.")
A = (a ** 2) * pkg.sin(psi) ** 2 + (b ** 2) * pkg.cos(psi) ** 2
B = 2 * ((b ** 2) - (a ** 2)) * pkg.cos(psi) * pkg.sin(psi)
C = (a ** 2) * pkg.cos(psi) ** 2 + b ** 2 * pkg.sin(psi) ** 2
D = -2 * A * x - B * y
F = -B * x - 2 * C * y
G = A * (x ** 2) + B * x * y + C * (y ** 2) - (a ** 2) * (b ** 2)
out[:, 0, 0] = A
out[:, 1, 1] = C
out[:, 2, 2] = G
out[:, 1, 0] = out[:, 0, 1] = B / 2
out[:, 2, 0] = out[:, 0, 2] = D / 2
out[:, 2, 1] = out[:, 1, 2] = F / 2
return out
@njit
def conic_center_numba(A):
a = LA.inv(A[:2, :2])
b = np.expand_dims(-A[:2, 2], axis=-1)
return a @ b
def conic_center(A):
if isinstance(A, torch.Tensor):
return (torch.inverse(A[..., :2, :2]) @ -A[..., :2, 2][..., None])[..., 0]
elif isinstance(A, np.ndarray):
return (LA.inv(A[..., :2, :2]) @ -A[..., :2, 2][..., None])[..., 0]
else:
raise TypeError("Input conics must be of type torch.Tensor or np.ndarray.")
def ellipse_axes(A):
if isinstance(A, torch.Tensor):
lambdas = torch.linalg.eigvalsh(A[..., :2, :2]) / (-torch.det(A) / torch.det(A[..., :2, :2]))[..., None]
axes = torch.sqrt(1 / lambdas)
elif isinstance(A, np.ndarray):
lambdas = LA.eigvalsh(A[..., :2, :2]) / (- | LA.det(A) | numpy.linalg.det |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
# Make the `systemds` package importable
import os
import sys
import warnings
import unittest
import numpy as np
import scipy.stats as st
import random
import math
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../")
sys.path.insert(0, path)
from systemds.context import SystemDSContext
shape = (random.randrange(1, 25), random.randrange(1, 25))
dist_shape = (10, 15)
min_max = (0, 1)
sparsity = random.uniform(0.0, 1.0)
seed = 123
distributions = ["norm", "uniform"]
sds = SystemDSContext()
class TestRand(unittest.TestCase):
def setUp(self):
warnings.filterwarnings(
action="ignore", message="unclosed", category=ResourceWarning)
def tearDown(self):
warnings.filterwarnings(
action="ignore", message="unclosed", category=ResourceWarning)
def test_rand_shape(self):
m = sds.rand(rows=shape[0], cols=shape[1]).compute()
self.assertTrue(m.shape == shape)
def test_rand_min_max(self):
m = sds.rand(rows=shape[0], cols=shape[1], min=min_max[0], max=min_max[1]).compute()
self.assertTrue((m.min() >= min_max[0]) and (m.max() <= min_max[1]))
def test_rand_sparsity(self):
m = sds.rand(rows=shape[0], cols=shape[1], sparsity=sparsity, seed=0).compute()
non_zero_value_percent = np.count_nonzero(m) * 100 / | np.prod(m.shape) | numpy.prod |
# -*- coding: UTF-8 -*-
import glob
import numpy as np
import pandas as pd
from PIL import Image
import random
# h,w = 60,50
h, w = (60, 50)
size = h * w
# Receding_Hairline Wearing_Necktie Rosy_Cheeks Eyeglasses Goatee Chubby
# Sideburns Blurry Wearing_Hat Double_Chin Pale_Skin Gray_Hair Mustache Bald
label_cls = 'Eyeglasses'
pngs = sorted(glob.glob('./data/img_align_celeba/*.jpg'))
data = pd.read_table('./data/list_attr_celeba.txt',
delim_whitespace=True, error_bad_lines=False)
eyeglasses = np.array(data[label_cls])
eyeglasses_cls = (eyeglasses + 1)/2
label_glasses = np.zeros((202599, 2))
correct_list = []
correct_list_test = []
false_list = []
false_list_test = []
for i in range(len(label_glasses)):
if eyeglasses_cls[i] == 1:
label_glasses[i][1] = 1
if i < 160000:
correct_list.append(i)
else:
correct_list_test.append(i)
else:
label_glasses[i][0] = 1
if i < 160000:
false_list.append(i)
else:
false_list_test.append(i)
print(len(correct_list_test), len(false_list_test))
training_set_label = label_glasses[0:160000, :]
test_set_label = label_glasses[160000:, :]
training_set_cls = eyeglasses_cls[0:160000]
test_set_cls = eyeglasses_cls[160000:]
def create_trainbatch(num=10, channel=0):
train_num = random.sample(false_list, num)
if channel == 0:
train_set = np.zeros((num, h, w))
else:
train_set = np.zeros((num, h, w, 3))
train_set_label_ = []
train_set_cls_ = []
for i in range(num):
img = Image.open(pngs[train_num[i]])
img_grey = img.resize((w, h))
if channel == 0:
img_grey = np.array(img_grey.convert('L'))
train_set[i, :, :] = img_grey
else:
img_grey = np.array(img_grey)
train_set[i, :, :, :] = img_grey
train_set_label_.append(training_set_label[train_num[i]])
train_set_cls_.append(training_set_cls[train_num[i]])
# if channel == 0:
# train_set = train_set.reshape(size,num).T
train_set_label_new = | np.array(train_set_label_) | numpy.array |
from torch.utils.data import Dataset
import numpy as np
import os
import random
import torchvision.transforms as transforms
from PIL import Image, ImageOps
import cv2
import torch
from PIL.ImageFilter import GaussianBlur
import trimesh
import logging
import json
from math import sqrt
import datetime
log = logging.getLogger('trimesh')
log.setLevel(40)
def get_part(file, vertices, points, body_parts):
def get_dist(pt1, pt2):
return sqrt((pt1[0]-pt2[0])**2 +(pt1[1]-pt2[1])**2 + (pt1[2]-pt2[2])**2)
part = []
for point in points:
_min = float('inf')
_idx = 0
for idx, vertice in enumerate(vertices[::5]):
dist = get_dist(point, vertice)
if _min > dist:
_min = dist
_idx = idx
tmp = [0 for i in range(20)]
tmp[ body_parts.index(file[str(_idx)]) ] = 1 # one-hot vector making
part.append(tmp)
part = np.array(part)
return part
def load_trimesh(root_dir):
folders = os.listdir(root_dir)
meshs = {}
for i, f in enumerate(folders):
sub_name = f
meshs[sub_name] = trimesh.load(os.path.join(root_dir, f, '%s_posed.obj' % sub_name), process=False, maintain_order=True,
skip_uv=True)
#### mesh = trimesh.load("alvin_t_posed.obj",process=False, maintain_order=True, skip_uv=True)
return meshs
class PTFTrainDataset(Dataset):
@staticmethod
def modify_commandline_options(parser, is_train):
return parser
def __init__(self, opt, phase='train'):
self.opt = opt
self.projection_mode = 'orthogonal'
# Path setup
self.root = self.opt.dataroot
self.RENDER = os.path.join(self.root, 'RENDER')
self.PART = os.path.join(self.root, 'PART')
self.MASK = os.path.join(self.root, 'MASK')
self.PARAM = os.path.join(self.root, 'PARAM')
self.UV_MASK = os.path.join(self.root, 'UV_MASK')
self.UV_NORMAL = os.path.join(self.root, 'UV_NORMAL')
self.UV_RENDER = os.path.join(self.root, 'UV_RENDER')
self.UV_POS = os.path.join(self.root, 'UV_POS')
self.OBJ = os.path.join(self.root, 'GEO', 'OBJ')
self.T_OBJ = os.path.join(self.root, 'GEO', 'T')
self.BG = self.opt.bg_path
self.bg_img_list = []
if self.opt.random_bg:
self.bg_img_list = [os.path.join(self.BG, x) for x in os.listdir(self.BG)]
self.bg_img_list.sort()
self.B_MIN = np.array([-128, -28, -128]) / 128
self.B_MAX = | np.array([128, 228, 128]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Join Hypocenter-Velocity Inversion on Tetrahedral meshes (JHVIT).
6 functions can be called and run in this package:
1- jntHypoVel_T : Joint hypocenter-velocity inversion of P wave data,
parametrized via the velocity model.
2- jntHyposlow_T : Joint hypocenter-velocity inversion of P wave data,
parametrized via the slowness model.
3- jntHypoVelPS_T : Joint hypocenter-velocity inversion of P- and S-wave data,
parametrized via the velocity models.
4- jntHyposlowPS_T : Joint hypocenter-velocity inversion of P- and S-wave data,
parametrized via the slowness models.
5-jointHypoVel_T : Joint hypocenter-velocity inversion of P wave data.
Input data and inversion parameters are downloaded automatically
from external text files.
6-jointHypoVelPS_T : Joint hypocenter-velocity inversion of P- and S-wave data.
Input data and inversion parameters are downloaded automatically
from external text files.
Notes:
- The package ttcrpy must be installed in order to perform the raytracing step.
This package can be downloaded from: https://ttcrpy.readthedocs.io/en/latest/
- To prevent bugs, it would be better to use python 3.7
Created on Sat Sep 14 2019
@author: <NAME>
"""
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spl
import scipy.stats as scps
import re
import sys
import copy
from mesh import MSHReader
from ttcrpy import tmesh
from multiprocessing import Pool, cpu_count, current_process, Manager
import multiprocessing as mp
from collections import OrderedDict
try:
import vtk
from vtk.util.numpy_support import numpy_to_vtk
except BaseException:
print('VTK module not found, saving velocity model in vtk form is disabled')
def msh2vtk(nodes, cells, velocity, outputFilename, fieldname="Velocity"):
"""
Generate a vtk file to store the velocity model.
Parameters
----------
nodes : np.ndarray, shape (nnodes, 3)
Node coordinates.
cells : np.ndarray of int, shape (number of cells, 4)
Indices of nodes forming each cell.
velocity : np.ndarray, shape (nnodes, 1)
Velocity model.
outputFilename : string
The output vtk filename.
fieldname : string, optional
The saved field title. The default is "Velocity".
Returns
-------
float
return 0.0 if no bugs occur.
"""
ugrid = vtk.vtkUnstructuredGrid()
tPts = vtk.vtkPoints()
tPts.SetNumberOfPoints(nodes.shape[0])
for n in range(nodes.shape[0]):
tPts.InsertPoint(n, nodes[n, 0], nodes[n, 1], nodes[n, 2])
ugrid.SetPoints(tPts)
VtkVelocity = numpy_to_vtk(velocity, deep=0, array_type=vtk.VTK_DOUBLE)
VtkVelocity.SetName(fieldname)
ugrid.GetPointData().SetScalars(VtkVelocity)
Tetra = vtk.vtkTetra()
for n in np.arange(cells.shape[0]):
Tetra.GetPointIds().SetId(0, cells[n, 0])
Tetra.GetPointIds().SetId(1, cells[n, 1])
Tetra.GetPointIds().SetId(2, cells[n, 2])
Tetra.GetPointIds().SetId(3, cells[n, 3])
ugrid.InsertNextCell(Tetra.GetCellType(), Tetra.GetPointIds())
gWriter = vtk.vtkUnstructuredGridWriter()
gWriter.SetFileName(outputFilename)
gWriter.SetInputData(ugrid)
gWriter.SetFileTypeToBinary()
gWriter.Update()
return 0.0
def check_hypo_indomain(Hypo_new, P_Dimension, Mesh=None):
"""
Check if the new hypocenter is still inside the domain and
project it onto the domain surface otherwise.
Parameters
----------
Hypo_new : np.ndarray, shape (3, ) or (3,1)
The updated hypocenter coordinates.
P_Dimension : np.ndarray, shape (6, )
Domain borders: the maximum and minimum of its 3 dimensions.
Mesh : instance of the class tmesh, optional
The domain discretization. The default is None.
Returns
-------
Hypo_new : np.ndarray, shape (3, )
The input Hypo_new or its projections on the domain surface.
outside : boolean
True if Hypo_new was outside the domain.
"""
outside = False
Hypo_new = Hypo_new.reshape([1, -1])
if Hypo_new[0, 0] < P_Dimension[0]:
Hypo_new[0, 0] = P_Dimension[0]
outside = True
if Hypo_new[0, 0] > P_Dimension[1]:
Hypo_new[0, 0] = P_Dimension[1]
outside = True
if Hypo_new[0, 1] < P_Dimension[2]:
Hypo_new[0, 1] = P_Dimension[2]
outside = True
if Hypo_new[0, 1] > P_Dimension[3]:
Hypo_new[0, 1] = P_Dimension[3]
outside = True
if Hypo_new[0, 2] < P_Dimension[4]:
Hypo_new[0, 2] = P_Dimension[4]
outside = True
if Hypo_new[0, 2] > P_Dimension[5]:
Hypo_new[0, 2] = P_Dimension[5]
outside = True
if Mesh:
if Mesh.is_outside(Hypo_new):
outside = True
Hypout = copy.copy(Hypo_new)
Hypin = np.array([[Hypo_new[0, 0], Hypo_new[0, 1], P_Dimension[4]]])
distance = np.sqrt(np.sum((Hypin - Hypout)**2))
while distance > 1.e-5:
Hmiddle = 0.5 * Hypout + 0.5 * Hypin
if Mesh.is_outside(Hmiddle):
Hypout = Hmiddle
else:
Hypin = Hmiddle
distance = np.sqrt(np.sum((Hypout - Hypin)**2))
Hypo_new = Hypin
return Hypo_new.reshape([-1, ]), outside
class Parameters:
def __init__(self, maxit, maxit_hypo, conv_hypo, Vlim, VpVslim, dmax,
lagrangians, max_sc, invert_vel=True, invert_VsVp=False,
hypo_2step=False, use_sc=True, save_vel=False, uncrtants=False,
confdce_lev=0.95, verbose=False):
"""
Parameters
----------
maxit : int
Maximum number of iterations.
maxit_hypo : int
Maximum number of iterations to update hypocenter coordinates.
conv_hypo : float
Convergence criterion.
Vlim : tuple of 3 or 6 floats
Vlmin holds the maximum and the minimum values of P- and S-wave
velocity models and the slopes of the penalty functions,
example Vlim = (Vpmin, Vpmax, PAp, Vsmin, Vsmax, PAs).
VpVslim : tuple of 3 floats
Upper and lower limits of Vp/Vs ratio and
the slope of the corresponding Vp/Vs penalty function.
dmax : tuple of four floats
It holds the maximum admissible corrections for the velocity models
(dVp_max and dVs_max), the origin time (dt_max) and
the hypocenter coordinates (dx_max).
lagrangians : tuple of 6 floats
Penalty and constraint weights: λ (smoothing constraint weight),
γ (penalty constraint weight), α (weight of velocity data point const-
raint), wzK (vertical smoothing weight), γ_vpvs (penalty constraint
weight of Vp/Vs ratio), stig (weight of the constraint used to impose
statistical moments on Vp/Vs model).
invert_vel : boolean, optional
Perform velocity inversion if True. The default is True.
invert_VsVp : boolean, optional
Find Vp/Vs ratio model rather than S wave model. The default is False.
hypo_2step : boolean, optional
Relocate hypocenter events in 2 steps. The default is False.
use_sc : boolean, optional
Use static corrections. The default is 'True'.
save_vel : string, optional
Save intermediate velocity models or the final model.
The default is False.
uncrtants : boolean, optional
Calculate the uncertainty of the hypocenter parameters.
The default is False.
confdce_lev : float, optional
The confidence coefficient to calculate the uncertainty.
The default is 0.95.
verbose : boolean, optional
Print information messages about inversion progression.
The default is False.
Returns
-------
None.
"""
self.maxit = maxit
self.maxit_hypo = maxit_hypo
self.conv_hypo = conv_hypo
self.Vpmin = Vlim[0]
self.Vpmax = Vlim[1]
self.PAp = Vlim[2]
if len(Vlim) > 3:
self.Vsmin = Vlim[3]
self.Vsmax = Vlim[4]
self.PAs = Vlim[5]
self.VpVsmin = VpVslim[0]
self.VpVsmax = VpVslim[1]
self.Pvpvs = VpVslim[2]
self.dVp_max = dmax[0]
self.dx_max = dmax[1]
self.dt_max = dmax[2]
if len(dmax) > 3:
self.dVs_max = dmax[3]
self.λ = lagrangians[0]
self.γ = lagrangians[1]
self.γ_vpvs = lagrangians[2]
self.α = lagrangians[3]
self.stig = lagrangians[4]
self.wzK = lagrangians[5]
self.invert_vel = invert_vel
self.invert_VpVs = invert_VsVp
self.hypo_2step = hypo_2step
self.use_sc = use_sc
self.max_sc = max_sc
self.p = confdce_lev
self.uncertainty = uncrtants
self.verbose = verbose
self.saveVel = save_vel
def __str__(self):
"""
Encapsulate the attributes of the class Parameters in a string.
Returns
-------
output : string
Attributes of the class Parameters written in string.
"""
output = "-------------------------\n"
output += "\nParameters of Inversion :\n"
output += "\n-------------------------\n"
output += "\nMaximum number of iterations : {0:d}\n".format(self.maxit)
output += "\nMaximum number of iterations to get hypocenters"
output += ": {0:d}\n".format(self.maxit_hypo)
output += "\nVp minimum : {0:4.2f} km/s\n".format(self.Vpmin)
output += "\nVp maximum : {0:4.2f} km/s\n".format(self.Vpmax)
if self.Vsmin:
output += "\nVs minimum : {0:4.2f} km/s\n".format(self.Vsmin)
if self.Vsmax:
output += "\nVs maximum : {0:4.2f} km/s\n".format(self.Vsmax)
if self.VpVsmin:
output += "\nVpVs minimum : {0:4.2f} km/s\n".format(self.VpVsmin)
if self.VpVsmax:
output += "\nVpVs maximum : {0:4.2f} km/s\n".format(self.VpVsmax)
output += "\nSlope of the penalty function (P wave) : {0:3f}\n".format(
self.PAp)
if self.PAs:
output += "\nSlope of the penalty function (S wave) : {0:3f}\n".format(
self.PAs)
if self.Pvpvs:
output += "\nSlope of the penalty function"
output += "(VpVs ratio wave) : {0:3f}\n".format(self.Pvpvs)
output += "\nMaximum time perturbation by step : {0:4.3f} s\n".format(
self.dt_max)
output += "\nMaximum distance perturbation by step : {0:4.3f} km\n".format(
self.dx_max)
output += "\nMaximum P wave velocity correction by step"
output += " : {0:4.3f} km/s\n".format(self.dVp_max)
if self.dVs_max:
output += "\nMaximum S wave velocity correction by step"
output += " : {0:4.3f} km/s\n".format(self.dVs_max)
output += "\nLagrangians parameters : λ = {0:1.1e}\n".format(self.λ)
output += " : γ = {0:1.1e}\n".format(self.γ)
if self.γ_vpvs:
output += " : γ VpVs ratio = {0:1.1e}\n".format(
self.γ_vpvs)
output += " : α = {0:1.1e}\n".format(self.α)
output += " : wzK factor = {0:4.2f}\n".format(
self.wzK)
if self.stig:
output += " : stats. moment. penalty"
output += "coef. = {0:1.1e}\n".format(self.stig)
output += "\nOther parameters : Inverse Velocity = {0}\n".format(
self.invert_vel)
output += "\n : Use Vs/Vp instead of Vs = {0}\n".format(
self.invert_VpVs)
output += "\n : Use static correction = {0}\n".format(
self.use_sc)
output += "\n : Hyp. parameter Uncertainty estimation = "
output += "{0}\n".format(self.uncertainty)
if self.uncertainty:
output += "\n with a confidence level of"
output += " {0:3.2f}\n".format(self.p)
if self.saveVel == 'last':
output += "\n : Save intermediate velocity models = "
output += "last iteration only\n"
elif self.saveVel == 'all':
output += "\n : Save intermediate velocity models = "
output += "all iterations\n"
else:
output += "\n : Save intermediate velocity models = "
output += "False\n"
output += "\n : Relocate hypoctenters using 2 steps = "
output += "{0}\n".format(self.hypo_2step)
output += "\n : convergence criterion = {0:3.4f}\n".format(
self.conv_hypo)
if self.use_sc:
output += "\n : Maximum static correction = "
output += "{0:3.2f}\n".format(self.max_sc)
return output
class fileReader:
def __init__(self, filename):
"""
Parameters
----------
filename : string
List of data files and other inversion parameters.
Returns
-------
None.
"""
try:
open(filename, 'r')
except IOError:
print("Could not read file:", filename)
sys.exit()
self.filename = filename
assert(self.readParameter('base name')), 'invalid base name'
assert(self.readParameter('mesh file')), 'invalid mesh file'
assert(self.readParameter('rcvfile')), 'invalid rcv file'
assert(self.readParameter('Velocity')), 'invalid Velocity file'
assert(self.readParameter('Time calibration')
), 'invalid calibration data file'
def readParameter(self, parameter, dtype=None):
"""
Read the data filename or the inversion parameter value specified by
the argument parameter.
Parameters
----------
parameter : string
Filename or inversion parameter to read.
dtype : data type, optional
Explicit data type of the filename or the parameter read.
The default is None.
Returns
-------
param : string/int/float
File or inversion parameter.
"""
try:
f = open(self.filename, 'r')
for line in f:
if line.startswith(parameter):
position = line.find(':')
param = line[position + 1:]
param = param.rstrip("\n\r")
if dtype is None:
break
if dtype == int:
param = int(param)
elif dtype == float:
param = float(param)
elif dtype == bool:
if param == 'true' or param == 'True' or param == '1':
param = True
elif param == 'false' or param == 'False' or param == '0':
param = False
else:
print(" non recognized format")
break
return param
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float for " + parameter + "\n")
except NameError as NErr:
print(
parameter +
" is not indicated or has bad value:{0}".format(NErr))
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
finally:
f.close()
def saveVel(self):
"""
Method to read the specified option for saving the velocity model(s).
Returns
-------
bool/string
Save or not the velocity model(s) and for which iteration.
"""
try:
f = open(self.filename, 'r')
for line in f:
if line.startswith('Save Velocity'):
position = line.find(':')
if position > 0:
sv = line[position + 1:].strip()
break
f.close()
if sv == 'last' or sv == 'Last':
return 'last'
elif sv == 'all' or sv == 'All':
return 'all'
elif sv == 'false' or sv == 'False' or sv == '0':
return False
else:
print('bad option to save velocity: default value will be used')
return False
except OSError as err:
print("OS error: {0}".format(err))
except NameError as NErr:
print("save velocity is not indicated :{0}".format(NErr))
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def getIversionParam(self):
"""
Read the inversion parameters and
store them in an object of the class Parameters.
Returns
-------
Params : instance of the class Parameters
Inversion parameters and options.
"""
maxit = self.readParameter('number of iterations', int)
maxit_hypo = self.readParameter('num. iters. to get hypo.', int)
conv_hypo = self.readParameter('convergence Criterion', float)
Vpmin = self.readParameter('Vpmin', float)
Vpmax = self.readParameter('Vpmax', float)
PAp = self.readParameter('PAp', float)
if PAp is None or PAp < 0:
print('PAp : default value will be considered\n')
PAp = 1. # default value
Vsmin = self.readParameter('Vsmin', float)
Vsmax = self.readParameter('Vsmax', float)
PAs = self.readParameter('PAs', float)
if PAs is None or PAs < 0:
print('PAs : default value will be considered\n')
PAs = 1. # default value
VpVsmax = self.readParameter('VpVs_max', float)
if VpVsmax is None or VpVsmax < 0:
print('default value will be considered (5)\n')
VpVsmax = 5. # default value
VpVsmin = self.readParameter('VpVs_min', float)
if VpVsmin is None or VpVsmin < 0:
print('default value will be considered (1.5)\n')
VpVsmin = 1.5 # default value
Pvpvs = self.readParameter('Pvpvs', float)
if Pvpvs is None or Pvpvs < 0:
print('default value will be considered\n')
Pvpvs = 1. # default value
dVp_max = self.readParameter('dVp max', float)
dVs_max = self.readParameter('dVs max', float)
dx_max = self.readParameter('dx max', float)
dt_max = self.readParameter('dt max', float)
Alpha = self.readParameter('alpha', float)
Lambda = self.readParameter('lambda', float)
Gamma = self.readParameter('Gamma', float)
Gamma_ps = self.readParameter('Gamma_vpvs', float)
stigma = self.readParameter('stigma', float)
if stigma is None or stigma < 0:
stigma = 0. # default value
VerSmooth = self.readParameter('vertical smoothing', float)
InverVel = self.readParameter('inverse velocity', bool)
InverseRatio = self.readParameter('inverse Vs/Vp', bool)
Hyp2stp = self.readParameter('reloc.hypo.in 2 steps', bool)
Sc = self.readParameter('use static corrections', bool)
if Sc:
Sc_max = self.readParameter('maximum stat. correction', float)
else:
Sc_max = 0.
uncrtants = self.readParameter('uncertainty estm.', bool)
if uncrtants:
confdce_lev = self.readParameter('confidence level', float)
else:
confdce_lev = np.NAN
Verb = self.readParameter('Verbose ', bool)
saveVel = self.saveVel()
Params = Parameters(maxit, maxit_hypo, conv_hypo,
(Vpmin, Vpmax, PAp, Vsmin, Vsmax, PAs),
(VpVsmin, VpVsmax, Pvpvs),
(dVp_max, dx_max, dt_max, dVs_max),
(Lambda, Gamma, Gamma_ps, Alpha, stigma,
VerSmooth), Sc_max, InverVel, InverseRatio,
Hyp2stp, Sc, saveVel, uncrtants, confdce_lev, Verb)
return Params
class RCVReader:
def __init__(self, p_rcvfile):
"""
Parameters
----------
p_rcvfile : string
File holding receiver coordinates.
Returns
-------
None.
"""
self.rcv_file = p_rcvfile
assert(self.__ChekFormat()), 'invalid format for rcv file'
def getNumberOfStation(self):
"""
Return the number of receivers.
Returns
-------
Nstations : int
Receiver number.
"""
try:
fin = open(self.rcv_file, 'r')
Nstations = int(fin.readline())
fin.close()
return Nstations
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer for the station number.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def getStation(self):
"""
Return coordinates of receivers.
Returns
-------
coordonates : np.ndarray, shape(receiver number,3)
Receiver coordinates.
"""
try:
fin = open(self.rcv_file, 'r')
Nsta = int(fin.readline())
coordonates = np.zeros([Nsta, 3])
for n in range(Nsta):
line = fin.readline()
Coord = re.split(r' ', line)
coordonates[n, 0] = float(Coord[0])
coordonates[n, 1] = float(Coord[2])
coordonates[n, 2] = float(Coord[4])
fin.close()
return coordonates
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in rcvfile.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def __ChekFormat(self):
try:
fin = open(self.rcv_file)
n = 0
for line in fin:
if n == 0:
Nsta = int(line)
num_lines = sum(1 for line in fin)
if(num_lines != Nsta):
fin.close()
return False
if n > 0:
Coord = re.split(r' ', line)
if len(Coord) != 5:
fin.close()
return False
n += 1
fin.close()
return True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in rcvfile.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def readEventsFiles(time_file, waveType=False):
"""
Read a list of seismic events and corresponding data from a text file.
Parameters
----------
time_file : string
Event data filename.
waveType : bool
True if the seismic phase of each event is identified.
The default is False.
Returns
-------
data : np.ndarray or a list of two np.ndarrays
Event arrival time data
"""
if (time_file == ""):
if not waveType:
return (np.array([]))
elif waveType:
return (np.array([]), np.array([]))
try:
fin = open(time_file, 'r')
lstart = 0
for line in fin:
lstart += 1
if line.startswith('Ev_idn'):
break
if not waveType:
data = np.loadtxt(time_file, skiprows=lstart, ndmin=2)
elif waveType:
data = np.loadtxt(fname=time_file, skiprows=2,
dtype='S15', ndmin=2)
ind = np.where(data[:, -1] == b'P')[0]
dataP = data[ind, :-1].astype(float)
ind = np.where(data[:, -1] == b'S')[0]
dataS = data[ind, :-1].astype(float)
data = (dataP, dataS)
return data
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in " + time_file + " file.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def readVelpoints(vlpfile):
"""
Read known velocity points from a text file.
Parameters
----------
vlpfile : string
Name of the file containing the known velocity points.
Returns
-------
data : np.ndarray, shape (number of points , 3)
Data corresponding to the known velocity points.
"""
if (vlpfile == ""):
return (np.array([]))
try:
fin = open(vlpfile, 'r')
lstart = 0
for line in fin:
lstart += 1
if line.startswith('Pt_id'):
break
data = np.loadtxt(vlpfile, skiprows=lstart)
return data
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in " + vlpfile + " file.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def _hypo_relocation(ev, evID, hypo, data, rcv, sc, convergence, par):
"""
Location of a single hypocenter event using P arrival time data.
Parameters
----------
ev : int
Event index in the array evID.
evID : np.ndarray, shape (number of events ,)
Event indices.
hypo : np.ndarray, shape (number of events ,5)
Current hypocenter coordinates and origin time for each event.
data : np.ndarray, shape (arrival times number,3)
Arrival times for all events.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
sc : np.ndarray, shape (receiver number or 0 ,1)
Static correction values.
convergence : boolean list, shape (event number)
Convergence state of each event.
par : instance of the class Parameters
The inversion parameters.
Returns
-------
Hypocenter : np.ndarray, shape (5,)
Updated origin time and coordinates of event evID[ev].
"""
indh = np.where(hypo[:, 0] == evID[ev])[0]
if par.verbose:
print("\nEven N {0:d} is relacated in the ".format(
int(hypo[ev, 0])) + current_process().name + '\n')
sys.stdout.flush()
indr = np.where(data[:, 0] == evID[ev])[0]
rcv_ev = rcv[data[indr, 2].astype(int) - 1, :]
if par.use_sc:
sc_ev = sc[data[indr, 2].astype(int) - 1]
else:
sc_ev = 0.
nst = indr.size
Hypocenter = hypo[indh[0]].copy()
if par.hypo_2step:
print("\nEven N {0:d}: Update longitude and latitude\n".format(
int(hypo[ev, 0])))
sys.stdout.flush()
T0 = np.kron(hypo[indh, 1], np.ones([nst, 1]))
for It in range(par.maxit_hypo):
Tx = np.kron(Hypocenter[2:], np.ones([nst, 1]))
src = np.hstack((ev*np.ones([nst, 1]), T0 + sc_ev, Tx))
tcal, rays = Mesh3D.raytrace(source=src, rcv=rcv_ev, slowness=None,
aggregate_src=False, compute_L=False,
return_rays=True)
slow_0 = Mesh3D.get_s0(src)
Hi = np.ones((nst, 2))
for nr in range(nst):
rayi = rays[nr]
if rayi.shape[0] == 1:
print('\033[43m' + '\nWarning: raypath failed to converge'
' for even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and '
'receiver N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(data[indr[nr], 0]),
Tx[nr, 0], Tx[nr, 1], Tx[nr, 2],
int(data[indr[nr], 2]), rcv_ev[nr, 0],
rcv_ev[nr, 1], rcv_ev[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slow_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 0] = -dx * slw0 / ds
Hi[nr, 1] = -dy * slw0 / ds
convrays = np.where(tcal != 0)[0]
res = data[indr, 1] - tcal
if convrays.size < nst:
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(2))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
print('\nEvent could not be relocated (iteration no ' +
str(It) + '), skipping')
sys.stdout.flush()
break
indH = np.abs(deltaH) > par.dx_max
deltaH[indH] = par.dx_max * np.sign(deltaH[indH])
updatedHypo = np.hstack((Hypocenter[2:4] + deltaH, Hypocenter[-1]))
updatedHypo, _ = check_hypo_indomain(updatedHypo, Dimensions,
Mesh3D)
Hypocenter[2:] = updatedHypo
if np.all(np.abs(deltaH[1:]) < par.conv_hypo):
break
if par.verbose:
print("\nEven N {0:d}: Update all parameters\n".format(int(hypo[ev, 0])))
sys.stdout.flush()
for It in range(par.maxit_hypo):
Tx = np.kron(Hypocenter[2:], np.ones([nst, 1]))
T0 = np.kron(Hypocenter[1], np.ones([nst, 1]))
src = np.hstack((ev*np.ones([nst, 1]), T0 + sc_ev, Tx))
tcal, rays = Mesh3D.raytrace(source=src, rcv=rcv_ev, slowness=None,
aggregate_src=False, compute_L=False,
return_rays=True)
slow_0 = Mesh3D.get_s0(src)
Hi = np.ones([nst, 4])
for nr in range(nst):
rayi = rays[nr]
if rayi.shape[0] == 1:
print('\033[43m' + '\nWarning: raypath failed to converge '
'for even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and '
'receiver N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(data[indr[nr], 0]), Tx[nr, 0],
Tx[nr, 1], Tx[nr, 2], int(data[indr[nr], 2]),
rcv_ev[nr, 0], rcv_ev[nr, 1], rcv_ev[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slow_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
convrays = np.where(tcal != 0)[0]
res = data[indr, 1] - tcal
if convrays.size < nst:
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(4))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
print('\nEvent cannot be relocated (iteration no ' +
str(It) + '), skipping')
sys.stdout.flush()
break
if np.abs(deltaH[0]) > par.dt_max:
deltaH[0] = par.dt_max * np.sign(deltaH[0])
if np.linalg.norm(deltaH[1:]) > par.dx_max:
deltaH[1:] *= par.dx_max / np.linalg.norm(deltaH[1:])
updatedHypo = Hypocenter[2:] + deltaH[1:]
updatedHypo, outside = check_hypo_indomain(updatedHypo,
Dimensions, Mesh3D)
Hypocenter[1:] = np.hstack((Hypocenter[1] + deltaH[0], updatedHypo))
if outside and It == par.maxit_hypo - 1:
print('\nEvent N {0:d} cannot be relocated inside the domain\n'.format(
int(hypo[ev, 0])))
convergence[ev] = 'out'
return Hypocenter
if np.all(np.abs(deltaH[1:]) < par.conv_hypo):
convergence[ev] = True
if par.verbose:
print('\033[42m' + '\nEven N {0:d} has converged at {1:d}'
' iteration(s)\n'.format(int(hypo[ev, 0]), It + 1) + '\n'
+ '\033[0m')
sys.stdout.flush()
break
else:
if par.verbose:
print('\nEven N {0:d} : maximum number of iterations'
' was reached\n'.format(int(hypo[ev, 0])) + '\n')
sys.stdout.flush()
return Hypocenter
def _hypo_relocationPS(ev, evID, hypo, data, rcv, sc, convergence, slow, par):
"""
Relocate a single hypocenter event using P- and S-wave arrival times.
Parameters
----------
ev : int
Event index in the array evID.
evID : np.ndarray, shape (event number ,)
Event indices.
hypo : np.ndarray, shape (event number ,5)
Current hypocenter coordinates and origin times for each event.
data : tuple of two np.ndarrays
Arrival times of P- and S-waves.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
sc : tuple of two np.ndarrays (shape(receiver number or 0,1))
Static correction values of P- and S-waves.
convergence : boolean list, shape (event number)
The convergence state of each event.
slow : tuple of two np.ndarrays (shape(nnodes,1))
P and S slowness models.
par : instance of the class Parameters
The inversion parameters.
Returns
-------
Hypocenter : np.ndarray, shape (5,)
Updated origin time and coordinates of event evID[ev].
"""
(slowP, slowS) = slow
(scp, scs) = sc
(dataP, dataS) = data
indh = np.where(hypo[:, 0] == evID[ev])[0]
if par.verbose:
print("Even N {0:d} is relacated in the ".format(
int(hypo[ev, 0])) + current_process().name + '\n')
sys.stdout.flush()
indrp = np.where(dataP[:, 0] == evID[ev])[0]
rcv_evP = rcv[dataP[indrp, 2].astype(int) - 1, :]
nstP = indrp.size
indrs = np.where(dataS[:, 0] == evID[ev])[0]
rcv_evS = rcv[dataS[indrs, 2].astype(int) - 1, :]
nstS = indrs.size
Hypocenter = hypo[indh[0]].copy()
if par.use_sc:
scp_ev = scp[dataP[indrp, 2].astype(int) - 1]
scs_ev = scs[dataS[indrs, 2].astype(int) - 1]
else:
scp_ev = np.zeros([nstP, 1])
scs_ev = np.zeros([nstS, 1])
if par.hypo_2step:
if par.verbose:
print("\nEven N {0:d}: Update longitude and latitude\n".format(
int(hypo[ev, 0])))
sys.stdout.flush()
for It in range(par.maxit_hypo):
Txp = np.kron(Hypocenter[1:], np.ones([nstP, 1]))
Txp[:, 0] += scp_ev[:, 0]
srcP = np.hstack((ev*np.ones([nstP, 1]), Txp))
tcalp, raysP = Mesh3D.raytrace(source=srcP, rcv=rcv_evP, slowness=slowP,
aggregate_src=False, compute_L=False,
return_rays=True)
slowP_0 = Mesh3D.get_s0(srcP)
Txs = np.kron(Hypocenter[1:], np.ones([nstS, 1]))
Txs[:, 0] += scs_ev[:, 0]
srcS = np.hstack((ev*np.ones([nstS, 1]), Txs))
tcals, raysS = Mesh3D.raytrace(source=srcS, rcv=rcv_evS, slowness=slowS,
aggregate_src=False, compute_L=False,
return_rays=True)
slowS_0 = Mesh3D.get_s0(srcS)
Hi = np.ones((nstP + nstS, 2))
for nr in range(nstP):
rayi = raysP[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' +
'\nWarning: raypath failed to converge for even '
'N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f})and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataP[indrp[nr], 0]),
Txp[nr, 1], Txp[nr, 2], Txp[nr, 3],
int(dataP[indrp[nr], 2]),
rcv_evP[nr, 0], rcv_evP[nr, 1],
rcv_evP[nr, 2]) +
'\033[0m')
sys.stdout.flush()
continue
slw0 = slowP_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 0] = -dx * slw0 / ds
Hi[nr, 1] = -dy * slw0 / ds
for nr in range(nstS):
rayi = raysS[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' +
'\nWarning: raypath failed to converge for even '
'N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataS[indrs[nr], 0]),
Txs[nr, 1], Txs[nr, 2],
Txs[nr, 3], int(dataS[indrs[nr], 2]),
rcv_evS[nr, 0], rcv_evS[nr, 1],
rcv_evS[nr, 2]) +
'\033[0m')
sys.stdout.flush()
continue
slw0 = slowS_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr + nstP, 0] = -dx * slw0 / ds
Hi[nr + nstP, 1] = -dy * slw0 / ds
tcal = np.hstack((tcalp, tcals))
res = np.hstack((dataP[indrp, 1], dataS[indrs, 1])) - tcal
convrays = np.where(tcal != 0)[0]
if convrays.size < (nstP + nstS):
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(2))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
if par.verbose:
print('\nEvent could not be relocated (iteration no ' +
str(It) + '), skipping')
sys.stdout.flush()
break
indH = np.abs(deltaH) > par.dx_max
deltaH[indH] = par.dx_max * np.sign(deltaH[indH])
updatedHypo = np.hstack((Hypocenter[2:4] + deltaH, Hypocenter[-1]))
updatedHypo, _ = check_hypo_indomain(updatedHypo, Dimensions,
Mesh3D)
Hypocenter[2:] = updatedHypo
if np.all(np.abs(deltaH) < par.conv_hypo):
break
if par.verbose:
print("\nEven N {0:d}: Update all parameters\n".format(int(hypo[ev, 0])))
sys.stdout.flush()
for It in range(par.maxit_hypo):
Txp = np.kron(Hypocenter[1:], np.ones([nstP, 1]))
Txp[:, 0] += scp_ev[:, 0]
srcP = np.hstack((ev*np.ones([nstP, 1]), Txp))
tcalp, raysP = Mesh3D.raytrace(source=srcP, rcv=rcv_evP, slowness=slowP,
aggregate_src=False, compute_L=False,
return_rays=True)
slowP_0 = Mesh3D.get_s0(srcP)
Txs = np.kron(Hypocenter[1:], np.ones([nstS, 1]))
Txs[:, 0] += scs_ev[:, 0]
srcS = np.hstack((ev*np.ones([nstS, 1]), Txs))
tcals, raysS = Mesh3D.raytrace(source=srcS, rcv=rcv_evS, slowness=slowS,
aggregate_src=False, compute_L=False,
return_rays=True)
slowS_0 = Mesh3D.get_s0(srcS)
Hi = np.ones((nstP + nstS, 4))
for nr in range(nstP):
rayi = raysP[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' + '\nWarning: raypath failed to converge for '
'even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataP[indrp[nr], 0]), Txp[nr, 1],
Txp[nr, 2], Txp[nr, 3], int(dataP[indrp[nr], 2]),
rcv_evP[nr, 0], rcv_evP[nr, 1],
rcv_evP[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slowP_0[nr]
dx = rayi[2, 0] - Hypocenter[2]
dy = rayi[2, 1] - Hypocenter[3]
dz = rayi[2, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
for nr in range(nstS):
rayi = raysS[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' + '\nWarning: raypath failed to converge for '
'even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataS[indrs[nr], 0]), Txs[nr, 1],
Txs[nr, 2], Txs[nr, 3], int(dataS[indrs[nr], 2]),
rcv_evS[nr, 0], rcv_evS[nr, 1],
rcv_evS[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slowS_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr + nstP, 1] = -dx * slw0 / ds
Hi[nr + nstP, 2] = -dy * slw0 / ds
Hi[nr + nstP, 3] = -dz * slw0 / ds
tcal = np.hstack((tcalp, tcals))
res = np.hstack((dataP[indrp, 1], dataS[indrs, 1])) - tcal
convrays = np.where(tcal != 0)[0]
if convrays.size < (nstP + nstS):
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(4))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
if par.verbose:
print('\nEvent could not be relocated (iteration no ' +
str(It) + '), skipping\n')
sys.stdout.flush()
break
if np.abs(deltaH[0]) > par.dt_max:
deltaH[0] = par.dt_max * np.sign(deltaH[0])
if np.linalg.norm(deltaH[1:]) > par.dx_max:
deltaH[1:] *= par.dx_max / np.linalg.norm(deltaH[1:])
updatedHypo = Hypocenter[2:] + deltaH[1:]
updatedHypo, outside = check_hypo_indomain(updatedHypo,
Dimensions, Mesh3D)
Hypocenter[1:] = np.hstack((Hypocenter[1] + deltaH[0], updatedHypo))
if outside and It == par.maxit_hypo - 1:
if par.verbose:
print('\nEvent N {0:d} could not be relocated inside '
'the domain\n'.format(int(hypo[ev, 0])))
sys.stdout.flush()
convergence[ev] = 'out'
return Hypocenter
if np.all(np.abs(deltaH[1:]) < par.conv_hypo):
convergence[ev] = True
if par.verbose:
print('\033[42m' + '\nEven N {0:d} has converged at '
' iteration {1:d}\n'.format(int(hypo[ev, 0]), It + 1) +
'\n' + '\033[0m')
sys.stdout.flush()
break
else:
if par.verbose:
print('\nEven N {0:d} : maximum number of iterations was'
' reached'.format(int(hypo[ev, 0])) + '\n')
sys.stdout.flush()
return Hypocenter
def _uncertaintyEstimat(ev, evID, hypo, data, rcv, sc, slow, par, varData=None):
"""
Estimate origin time uncertainty and confidence ellipsoid.
Parameters
----------
ev : int
Event index in the array evID.
evID : np.ndarray, shape (event number,)
Event indices.
hypo : np.ndarray, shape (event number,5)
Estimated hypocenter coordinates and origin time.
data : np.ndarray, shape (arrival time number,3) or
tuple if both P and S waves are used.
Arrival times of seismic events.
rcv : np.ndarray, shape (receiver number ,3)
coordinates of receivers.
sc : np.ndarray, shape (receiver number or 0 ,1) or
tuple if both P and S waves are used.
Static correction values.
slow : np.ndarray or tuple, shape(nnodes,1)
P or P and S slowness models.
par : instance of the class Parameters
The inversion parameters.
varData : list of two lists
Number of arrival times and the sum of residuals needed to
compute the noise variance. See Block's Thesis, 1991 (P. 63)
The default is None.
Returns
-------
to_confInterv : float
Origin time uncertainty interval.
axis1 : np.ndarray, shape(3,)
Coordinates of the 1st confidence ellipsoid axis (vector).
axis2 : np.ndarray, shape(3,)
Coordinates of the 2nd confidence ellipsoid axis (vector).
axis3 : np.ndarray, shape(3,)
Coordinates of the 3rd confidence ellipsoid axis (vector).
"""
if par.verbose:
print("Uncertainty estimation for the Even N {0:d}".format(
int(hypo[ev, 0])) + '\n')
sys.stdout.flush()
indh = np.where(hypo[:, 0] == evID[ev])[0]
if len(slow) == 2:
(slowP, slowS) = slow
(dataP, dataS) = data
(scp, scs) = sc
indrp = np.where(dataP[:, 0] == evID[ev])[0]
rcv_evP = rcv[dataP[indrp, 2].astype(int) - 1, :]
nstP = indrp.size
T0p = np.kron(hypo[indh, 1], np.ones([nstP, 1]))
indrs = np.where(dataS[:, 0] == evID[ev])[0]
rcv_evS = rcv[dataS[indrs, 2].astype(int) - 1, :]
nstS = indrs.size
T0s = np.kron(hypo[indh, 1], np.ones([nstS, 1]))
Txp = np.kron(hypo[indh, 2:], np.ones([nstP, 1]))
Txs = np.kron(hypo[indh, 2:], np.ones([nstS, 1]))
if par.use_sc:
scp_ev = scp[dataP[indrp, 2].astype(int) - 1, :]
scs_ev = scs[dataS[indrs, 2].astype(int) - 1, :]
else:
scp_ev = np.zeros([nstP, 1])
scs_ev = np.zeros([nstS, 1])
srcp = np.hstack((ev*np.ones([nstP, 1]), T0p + scp_ev, Txp))
srcs = np.hstack((ev*np.ones([nstS, 1]), T0s + scs_ev, Txs))
tcalp, raysP = Mesh3D.raytrace(source=srcp, rcv=rcv_evP, slowness=slowP,
aggregate_src=False, compute_L=False,
return_rays=True)
tcals, raysS = Mesh3D.raytrace(source=srcs, rcv=rcv_evS, slowness=slowS,
aggregate_src=False, compute_L=False,
return_rays=True)
slowP_0 = Mesh3D.get_s0(srcp)
slowS_0 = Mesh3D.get_s0(srcs)
Hi = np.ones((nstP + nstS, 4))
for nr in range(nstP):
rayi = raysP[nr]
if rayi.shape[0] == 1:
continue
slw0 = slowP_0[nr]
dx = rayi[1, 0] - hypo[indh, 2]
dy = rayi[1, 1] - hypo[indh, 3]
dz = rayi[1, 2] - hypo[indh, 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
for nr in range(nstS):
rayi = raysS[nr]
if rayi.shape[0] == 1:
continue
slw0 = slowS_0[nr]
dx = rayi[1, 0] - hypo[indh, 2]
dy = rayi[1, 1] - hypo[indh, 3]
dz = rayi[1, 2] - hypo[indh, 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
tcal = np.hstack((tcalp, tcals))
res = np.hstack((dataP[indrp, 1], dataS[indrs, 1])) - tcal
convrays = np.where(tcal != 0)[0]
if convrays.size < (nstP + nstS):
res = res[convrays]
Hi = Hi[convrays, :]
elif len(slow) == 1:
indr = np.where(data[0][:, 0] == evID[ev])[0]
rcv_ev = rcv[data[0][indr, 2].astype(int) - 1, :]
if par.use_sc:
sc_ev = sc[data[0][indr, 2].astype(int) - 1]
else:
sc_ev = 0.
nst = indr.size
T0 = np.kron(hypo[indh, 1], np.ones([nst, 1]))
Tx = np.kron(hypo[indh, 2:], np.ones([nst, 1]))
src = np.hstack((ev*np.ones([nst, 1]), T0+sc_ev, Tx))
tcal, rays = Mesh3D.raytrace(source=src, rcv=rcv_ev, slowness=slow[0],
aggregate_src=False, compute_L=False,
return_rays=True)
slow_0 = Mesh3D.get_s0(src)
Hi = np.ones([nst, 4])
for nr in range(nst):
rayi = rays[nr]
if rayi.shape[0] == 1: # unconverged ray
continue
slw0 = slow_0[nr]
dx = rayi[1, 0] - hypo[indh, 2]
dy = rayi[1, 1] - hypo[indh, 3]
dz = rayi[1, 2] - hypo[indh, 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
convrays = np.where(tcal != 0)[0]
res = data[0][indr, 1] - tcal
if convrays.size < nst:
res = res[convrays]
Hi = Hi[convrays, :]
N = res.shape[0]
try:
Q = np.linalg.inv(Hi.T @ Hi)
except np.linalg.linalg.LinAlgError:
if par.verbose:
print("ill-conditioned Jacobian matrix")
sys.stdout.flush()
U, S, V = np.linalg.svd(Hi.T @ Hi)
Q = V.T @ np.diag(1./(S + 1.e-9)) @ U.T
eigenVals, eigenVec = np.linalg.eig(Q[:3, :3])
ind = np.argsort(eigenVals)
if varData:
s2 = 1
varData[0] += [np.sum(res**2)]
varData[1] += [N]
else:
s2 = np.sum(res**2) / (N - 4)
alpha = 1 - par.p
coef = scps.t.ppf(1 - alpha / 2., N - 4)
axis1 = np.sqrt(eigenVals[ind[2]] * s2) * coef * eigenVec[:, ind[2]]
axis2 = np.sqrt(eigenVals[ind[1]] * s2) * coef * eigenVec[:, ind[1]]
axis3 = np.sqrt(eigenVals[ind[0]] * s2) * coef * eigenVec[:, ind[0]]
to_confInterv = np.sqrt(Q[-1, -1] * s2) * coef
return to_confInterv, axis1, axis2, axis3
def jntHypoVel_T(data, caldata, Vinit, cells, nodes, rcv, Hypo0,
par, threads=1, vPoints=np.array([]), basename='Vel'):
"""
Joint hypocenter-velicoty inversion from P wave arrival time data
parametrized using the velocity model.
Parameters
----------
data : np.ndarray, shape(arrival time number, 3)
Arrival times and corresponding receivers for each event..
caldata : np.ndarray, shape(number of calibration shots, 3)
Calibration shot data.
Vinit : np.ndarray, shape(nnodes,1) or (1,1)
Initial velocity model.
cells : np.ndarray of int, shape (cell number, 4)
Indices of nodes forming the cells.
nodes : np.ndarray, shape (nnodes, 3)
Node coordinates.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
Hypo0 : np.ndarray, shape(event number, 5)
First guesses of the hypocenter coordinates (must be all diffirent).
par : instance of the class Parameters
The inversion parameters.
threads : int, optional
Thread number. The default is 1.
vPoints : np.ndarray, shape(point number,4), optional
Known velocity points. The default is np.array([]).
basename : string, optional
The filename used to save the output file. The default is 'Vel'.
Returns
-------
output : python dictionary
It contains the estimated hypocenter coordinates and their origin times,
static correction values, velocity model, convergence states,
parameter uncertainty and residual norm in each iteration.
"""
if par.verbose:
print(par)
print('inversion involves the velocity model\n')
sys.stdout.flush()
if par.use_sc:
nstation = rcv.shape[0]
else:
nstation = 0
Static_Corr = np.zeros([nstation, 1])
nnodes = nodes.shape[0]
# observed traveltimes
if data.shape[0] > 0:
evID = np.unique(data[:, 0]).astype(int)
tObserved = data[:, 1]
numberOfEvents = evID.size
else:
tObserved = np.array([])
numberOfEvents = 0
rcvData = np.zeros([data.shape[0], 3])
for ev in range(numberOfEvents):
indr = np.where(data[:, 0] == evID[ev])[0]
rcvData[indr] = rcv[data[indr, 2].astype(int) - 1, :]
# calibration data
if caldata.shape[0] > 0:
calID = np.unique(caldata[:, 0])
ncal = calID.size
time_calibration = caldata[:, 1]
TxCalib = np.zeros((caldata.shape[0], 5))
TxCalib[:, 2:] = caldata[:, 3:]
TxCalib[:, 0] = caldata[:, 0]
rcvCalib = np.zeros([caldata.shape[0], 3])
if par.use_sc:
Msc_cal = []
for nc in range(ncal):
indr = np.where(caldata[:, 0] == calID[nc])[0]
rcvCalib[indr] = rcv[caldata[indr, 2].astype(int) - 1, :]
if par.use_sc:
Msc_cal.append(sp.csr_matrix(
(np.ones([indr.size, ]),
(range(indr.size), caldata[indr, 2]-1)),
shape=(indr.size, nstation)))
else:
ncal = 0
time_calibration = np.array([])
# initial velocity model
if Vinit.size == 1:
Velocity = Vinit * np.ones([nnodes, 1])
Slowness = 1. / Velocity
elif Vinit.size == nnodes:
Velocity = Vinit
Slowness = 1. / Velocity
else:
print("invalid Velocity Model\n")
sys.stdout.flush()
return 0
# used threads
nThreadsSystem = cpu_count()
nThreads = np.min((threads, nThreadsSystem))
global Mesh3D, Dimensions
Mesh3D = tmesh.Mesh3d(nodes, tetra=cells, method='DSPM', cell_slowness=0,
n_threads=nThreads, n_secondary=2, n_tertiary=1,
process_vel=1, radius_factor_tertiary=2,
translate_grid=1)
Mesh3D.set_slowness(Slowness)
Dimensions = np.empty(6)
Dimensions[0] = min(nodes[:, 0])
Dimensions[1] = max(nodes[:, 0])
Dimensions[2] = min(nodes[:, 1])
Dimensions[3] = max(nodes[:, 1])
Dimensions[4] = min(nodes[:, 2])
Dimensions[5] = max(nodes[:, 2])
# Hypocenter
if numberOfEvents > 0 and Hypo0.shape[0] != numberOfEvents:
print("invalid Hypocenters0 format\n")
sys.stdout.flush()
return 0
else:
Hypocenters = Hypo0.copy()
ResidueNorm = np.zeros([par.maxit])
if par.invert_vel:
if par.use_sc:
U = sp.bsr_matrix(
np.vstack((np.zeros([nnodes, 1]), np.ones([nstation, 1]))))
nbre_param = nnodes + nstation
if par.max_sc > 0. and par.max_sc < 1.:
N = sp.bsr_matrix(
np.hstack((np.zeros([nstation, nnodes]), np.eye(nstation))))
NtN = (1. / par.max_sc**2) * N.T.dot(N)
else:
U = sp.csr_matrix(np.zeros([nnodes, 1]))
nbre_param = nnodes
# build matrix D
if vPoints.size > 0:
if par.verbose:
print('\nBuilding velocity data point matrix D\n')
sys.stdout.flush()
D = Mesh3D.compute_D(vPoints[:, 2:])
D = sp.hstack((D, sp.csr_matrix((D.shape[0], nstation)))).tocsr()
DtD = D.T @ D
nD = spl.norm(DtD)
# Build regularization matrix
if par.verbose:
print('\n...Building regularization matrix K\n')
sys.stdout.flush()
kx, ky, kz = Mesh3D.compute_K(order=2, taylor_order=2,
weighting=1, squared=0,
s0inside=0, additional_points=3)
KX = sp.hstack((kx, sp.csr_matrix((nnodes, nstation))))
KX_Square = KX.transpose().dot(KX)
KY = sp.hstack((ky, sp.csr_matrix((nnodes, nstation))))
KY_Square = KY.transpose().dot(KY)
KZ = sp.hstack((kz, sp.csr_matrix((nnodes, nstation))))
KZ_Square = KZ.transpose().dot(KZ)
KtK = KX_Square + KY_Square + par.wzK * KZ_Square
nK = spl.norm(KtK)
if nThreads == 1:
hypo_convergence = list(np.zeros(numberOfEvents, dtype=bool))
else:
manager = Manager()
hypo_convergence = manager.list(np.zeros(numberOfEvents, dtype=bool))
for i in range(par.maxit):
if par.verbose:
print("Iteration N : {0:d}\n".format(i + 1))
sys.stdout.flush()
if par.invert_vel:
if par.verbose:
print('Iteration {0:d} - Updating velocity model\n'.format(i + 1))
print("Updating penalty vector\n")
sys.stdout.flush()
# Build vector C
cx = kx.dot(Velocity)
cy = ky.dot(Velocity)
cz = kz.dot(Velocity)
# build matrix P and dP
indVmin = np.where(Velocity < par.Vpmin)[0]
indVmax = np.where(Velocity > par.Vpmax)[0]
indPinality = np.hstack([indVmin, indVmax])
dPinality_V = np.hstack(
[-par.PAp * np.ones(indVmin.size), par.PAp * np.ones(indVmax.size)])
pinality_V = np.vstack(
[par.PAp * (par.Vpmin - Velocity[indVmin]), par.PAp *
(Velocity[indVmax] - par.Vpmax)])
d_Pinality = sp.csr_matrix(
(dPinality_V, (indPinality, indPinality)), shape=(
nnodes, nbre_param))
Pinality = sp.csr_matrix(
(pinality_V.reshape([-1, ]),
(indPinality, np.zeros([indPinality.shape[0]]))),
shape=(nnodes, 1))
if par.verbose:
print('Penalties applied at {0:d} nodes\n'.format(
dPinality_V.size))
print('...Start Raytracing\n')
sys.stdout.flush()
if numberOfEvents > 0:
sources = np.empty((data.shape[0], 5))
if par.use_sc:
sc_data = np.empty((data.shape[0], ))
for ev in np.arange(numberOfEvents):
indr = np.where(data[:, 0] == evID[ev])[0]
indh = np.where(Hypocenters[:, 0] == evID[ev])[0]
sources[indr, :] = Hypocenters[indh, :]
if par.use_sc:
sc_data[indr] = Static_Corr[data[indr, 2].astype(int)
- 1, 0]
if par.use_sc:
sources[:, 1] += sc_data
tt, rays, M0 = Mesh3D.raytrace(source=sources,
rcv=rcvData, slowness=None,
aggregate_src=False,
compute_L=True, return_rays=True)
else:
tt, rays, M0 = Mesh3D.raytrace(source=sources,
rcv=rcvData, slowness=None,
aggregate_src=False,
compute_L=True, return_rays=True)
v0 = 1. / Mesh3D.get_s0(sources)
if par.verbose:
inconverged = np.where(tt == 0)[0]
for icr in inconverged:
print('\033[43m' +
'\nWarning: raypath failed to converge for even '
'N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(data[icr, 0]), sources[icr, 2],
sources[icr, 3], sources[icr, 4],
int(data[icr, 2]), rcvData[icr, 0],
rcvData[icr, 1], rcvData[icr, 2])
+ '\033[0m')
print('\033[43m' + 'ray will be temporary removed' +
'\033[0m')
sys.stdout.flush()
else:
tt = np.array([])
if ncal > 0:
if par.use_sc:
TxCalib[:, 1] = Static_Corr[caldata[:, 2].astype(int) - 1, 0]
tt_Calib, Mcalib = Mesh3D.raytrace(
source=TxCalib, rcv=rcvCalib, slowness=None,
aggregate_src=False, compute_L=True, return_rays=False)
else:
tt_Calib, Mcalib = Mesh3D.raytrace(
source=TxCalib, rcv=rcvCalib, slowness=None,
aggregate_src=False, compute_L=True, return_rays=False)
if par.verbose:
inconverged = np.where(tt_Calib == 0)[0]
for icr in inconverged:
print('\033[43m' +
'\nWarning: raypath failed to converge '
'for calibration shot N '
'{0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver'
' N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(caldata[icr, 0]), TxCalib[icr, 2],
TxCalib[icr, 3], TxCalib[icr, 4],
int(caldata[icr, 2]), rcvCalib[icr, 0],
rcvCalib[icr, 1], rcvCalib[icr, 2])
+ '\033[0m')
print('\033[43m' + 'ray will be temporary removed' +
'\033[0m')
sys.stdout.flush()
else:
tt_Calib = np.array([])
Resid = tObserved - tt
convrayData = np.where(tt != 0)[0]
convrayClib = np.where(tt_Calib != 0)[0]
if Resid.size == 0:
Residue = time_calibration[convrayClib] - tt_Calib[convrayClib]
else:
Residue = np.hstack(
(np.zeros([np.count_nonzero(tt) - 4 * numberOfEvents]),
time_calibration[convrayClib] - tt_Calib[convrayClib]))
ResidueNorm[i] = np.linalg.norm(np.hstack(
(Resid[convrayData], time_calibration[convrayClib] -
tt_Calib[convrayClib])))
if par.verbose:
print('...Building matrix M\n')
sys.stdout.flush()
M = sp.csr_matrix((0, nbre_param))
ir = 0
for even in range(numberOfEvents):
indh = np.where(Hypocenters[:, 0] == evID[even])[0]
indr = np.where(data[:, 0] == evID[even])[0]
Mi = M0[even]
nst_ev = Mi.shape[0]
Hi = np.ones([indr.size, 4])
for nr in range(indr.size):
rayi = rays[indr[nr]]
if rayi.shape[0] == 1:
continue
vel0 = v0[indr[nr]]
dx = rayi[1, 0] - Hypocenters[indh[0], 2]
dy = rayi[1, 1] - Hypocenters[indh[0], 3]
dz = rayi[1, 2] - Hypocenters[indh[0], 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx / (vel0 * ds)
Hi[nr, 2] = -dy / (vel0 * ds)
Hi[nr, 3] = -dz / (vel0 * ds)
convrays = np.where(tt[indr] != 0)[0]
if convrays.shape[0] < nst_ev:
Hi = Hi[convrays, :]
nst_ev = convrays.size
Q, _ = np.linalg.qr(Hi, mode='complete')
Ti = sp.csr_matrix(Q[:, 4:])
Ti = Ti.T
if par.use_sc:
Lsc = sp.csr_matrix((np.ones(nst_ev,),
(range(nst_ev),
data[indr[convrays], 2] - 1)),
shape=(nst_ev, nstation))
Mi = sp.hstack((Mi, Lsc))
Mi = sp.csr_matrix(Ti @ Mi)
M = sp.vstack([M, Mi])
Residue[ir:ir + (nst_ev - 4)] = Ti.dot(Resid[indr[convrays]])
ir += nst_ev - 4
for evCal in range(len(Mcalib)):
Mi = Mcalib[evCal]
if par.use_sc:
indrCal = np.where(caldata[:, 0] == calID[evCal])[0]
convraysCal = np.where(tt_Calib[indrCal] != 0)[0]
Mi = sp.hstack((Mi, Msc_cal[evCal][convraysCal]))
M = sp.vstack([M, Mi])
if par.verbose:
print('Assembling matrices and solving system\n')
sys.stdout.flush()
S = np.sum(Static_Corr)
term1 = (M.T).dot(M)
nM = spl.norm(term1[:nnodes, :nnodes])
term2 = (d_Pinality.T).dot(d_Pinality)
nP = spl.norm(term2)
term3 = U.dot(U.T)
λ = par.λ * nM / nK
if nP != 0:
γ = par.γ * nM / nP
else:
γ = par.γ
A = term1 + λ * KtK + γ * term2 + term3
if par.use_sc and par.max_sc > 0. and par.max_sc < 1.:
A += NtN
term1 = (M.T).dot(Residue)
term1 = term1.reshape([-1, 1])
term2 = (KX.T).dot(cx) + (KY.T).dot(cy) + par.wzK * (KZ.T).dot(cz)
term3 = (d_Pinality.T).dot(Pinality)
term4 = U.dot(S)
b = term1 - λ * term2 - γ * term3 - term4
if vPoints.size > 0:
α = par.α * nM / nD
A += α * DtD
b += α * D.T @ (vPoints[:, 1].reshape(-1, 1) -
D[:, :nnodes] @ Velocity)
x = spl.minres(A, b, tol=1.e-8)
deltam = x[0].reshape(-1, 1)
# update velocity vector and static correction
dVmax = np.max(abs(deltam[:nnodes]))
if dVmax > par.dVp_max:
deltam[:nnodes] *= par.dVp_max / dVmax
if par.use_sc and par.max_sc > 0. and par.max_sc < 1.:
sc_mean = np.mean(abs(deltam[nnodes:]))
if sc_mean > par.max_sc * np.mean(abs(Residue)):
deltam[nnodes:] *= par.max_sc * np.mean(abs(Residue)) / sc_mean
Velocity += np.matrix(deltam[:nnodes])
Slowness = 1. / Velocity
Static_Corr += deltam[nnodes:]
if par.saveVel == 'all':
if par.verbose:
print('...Saving Velocity models\n')
sys.stdout.flush()
try:
msh2vtk(nodes, cells, Velocity, basename +
'it{0}.vtk'.format(i + 1))
except ImportError:
print('vtk module is not installed\n')
sys.stdout.flush()
elif par.saveVel == 'last' and i == par.maxit - 1:
try:
msh2vtk(nodes, cells, Velocity, basename + '.vtk')
except ImportError:
print('vtk module is not installed\n')
sys.stdout.flush()
#######################################
# relocate Hypocenters
#######################################
Mesh3D.set_slowness(Slowness)
if numberOfEvents > 0:
print("\nIteration N {0:d} : Relocation of events\n".format(i + 1))
sys.stdout.flush()
if nThreads == 1:
for ev in range(numberOfEvents):
Hypocenters[ev, :] = _hypo_relocation(
ev, evID, Hypocenters, data, rcv,
Static_Corr, hypo_convergence, par)
else:
p = mp.get_context("fork").Pool(processes=nThreads)
updatedHypo = p.starmap(_hypo_relocation,
[(int(ev), evID, Hypocenters, data,
rcv, Static_Corr, hypo_convergence,
par)for ev in range(numberOfEvents)])
p.close() # pool won't take any new tasks
p.join()
Hypocenters = np.array([updatedHypo])[0]
# Calculate the hypocenter parameter uncertainty
uncertnty = []
if par.uncertainty and numberOfEvents > 0:
print("\nUncertainty evaluation\n")
sys.stdout.flush()
# estimate data variance
if nThreads == 1:
varData = [[], []]
for ev in range(numberOfEvents):
uncertnty.append(
_uncertaintyEstimat(ev, evID, Hypocenters, (data,), rcv,
Static_Corr, (Slowness,), par, varData))
else:
varData = manager.list([[], []])
with Pool(processes=nThreads) as p:
uncertnty = p.starmap(
_uncertaintyEstimat,
[(int(ev), evID, Hypocenters, (data, ),
rcv, Static_Corr, (Slowness, ), par,
varData) for ev in range(numberOfEvents)])
p.close() # pool won't take any new tasks
p.join()
sgmData = np.sqrt(np.sum(varData[0]) /
(np.sum(varData[1]) - 4 *
numberOfEvents -
Static_Corr.size))
for ic in range(numberOfEvents):
uncertnty[ic] = tuple([sgmData * x for x in uncertnty[ic]])
output = OrderedDict()
output['Hypocenters'] = Hypocenters
output['Convergence'] = list(hypo_convergence)
output['Uncertainties'] = uncertnty
output['Velocity'] = Velocity
output['Sts_Corrections'] = Static_Corr
output['Residual_norm'] = ResidueNorm
return output
def jntHyposlow_T(data, caldata, Vinit, cells, nodes, rcv, Hypo0,
par, threads=1, vPoints=np.array([]), basename='Slowness'):
"""
Joint hypocenter-velicoty inversion from P wave arrival time data
parametrized using the slowness model.
Parameters
----------
data : np.ndarray, shape(arrival time number, 3)
Arrival times and corresponding receivers for each event.
caldata : np.ndarray, shape(number of calibration shots, 6)
Calibration shot data.
Vinit : np.ndarray, shape(nnodes,1) or (1,1)
Initial velocity model.
Cells : np.ndarray of int, shape (cell number, 4)
Indices of nodes forming the cells.
nodes : np.ndarray, shape (nnodes, 3)
Node coordinates.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
Hypo0 : np.ndarray, shape(event number, 5)
First guesses of the hypocenter coordinates (must be all diffirent).
par : instance of the class Parameters
The inversion parameters.
threads : int, optional
Thread number. The default is 1.
vPoints : np.ndarray, shape(point number,4), optional
Known velocity points. The default is np.array([]).
basename : string, optional
The filename used to save the output files. The default is 'Slowness'.
Returns
-------
output : python dictionary
It contains the estimated hypocenter coordinates and their origin times,
static correction values, velocity model, convergence states,
parameter uncertainty and residual norm in each iteration.
"""
if par.verbose:
print(par)
print('inversion involves the slowness model\n')
sys.stdout.flush()
if par.use_sc:
nstation = rcv.shape[0]
else:
nstation = 0
Static_Corr = np.zeros([nstation, 1])
nnodes = nodes.shape[0]
# observed traveltimes
if data.shape[0] > 0:
evID = np.unique(data[:, 0]).astype(int)
tObserved = data[:, 1]
numberOfEvents = evID.size
else:
tObserved = np.array([])
numberOfEvents = 0
rcvData = np.zeros([data.shape[0], 3])
for ev in range(numberOfEvents):
indr = np.where(data[:, 0] == evID[ev])[0]
rcvData[indr] = rcv[data[indr, 2].astype(int) - 1, :]
# get calibration data
if caldata.shape[0] > 0:
calID = np.unique(caldata[:, 0])
ncal = calID.size
time_calibration = caldata[:, 1]
TxCalib = np.zeros((caldata.shape[0], 5))
TxCalib[:, 2:] = caldata[:, 3:]
TxCalib[:, 0] = caldata[:, 0]
rcvCalib = np.zeros([caldata.shape[0], 3])
if par.use_sc:
Msc_cal = []
for nc in range(ncal):
indr = np.where(caldata[:, 0] == calID[nc])[0]
rcvCalib[indr] = rcv[caldata[indr, 2].astype(int) - 1, :]
if par.use_sc:
Msc_cal.append(sp.csr_matrix(
(np.ones([indr.size, ]),
(range(indr.size), caldata[indr, 2]-1)),
shape=(indr.size, nstation)))
else:
ncal = 0
time_calibration = np.array([])
# initial velocity model
if Vinit.size == 1:
Slowness = 1. / (Vinit * np.ones([nnodes, 1]))
elif Vinit.size == nnodes:
Slowness = 1. / Vinit
else:
print("invalid Velocity Model")
sys.stdout.flush()
return 0
# Hypocenter
if numberOfEvents > 0 and Hypo0.shape[0] != numberOfEvents:
print("invalid Hypocenters0 format\n")
sys.stdout.flush()
return 0
else:
Hypocenters = Hypo0.copy()
# number of threads
nThreadsSystem = cpu_count()
nThreads = np.min((threads, nThreadsSystem))
global Mesh3D, Dimensions
# build mesh object
Mesh3D = tmesh.Mesh3d(nodes, tetra=cells, method='DSPM', cell_slowness=0,
n_threads=nThreads, n_secondary=2, n_tertiary=1,
radius_factor_tertiary=2, translate_grid=1)
Mesh3D.set_slowness(Slowness)
Dimensions = np.empty(6)
Dimensions[0] = min(nodes[:, 0])
Dimensions[1] = max(nodes[:, 0])
Dimensions[2] = min(nodes[:, 1])
Dimensions[3] = max(nodes[:, 1])
Dimensions[4] = min(nodes[:, 2])
Dimensions[5] = max(nodes[:, 2])
ResidueNorm = np.zeros([par.maxit])
if par.invert_vel:
if par.use_sc:
U = sp.bsr_matrix(np.vstack((np.zeros([nnodes, 1]),
np.ones([nstation, 1]))))
nbre_param = nnodes + nstation
if par.max_sc > 0. and par.max_sc < 1.:
N = sp.bsr_matrix(
np.hstack((np.zeros([nstation, nnodes]), np.eye(nstation))))
NtN = (1. / par.max_sc**2) * N.T.dot(N)
else:
U = sp.csr_matrix(np.zeros([nnodes, 1]))
nbre_param = nnodes
# build matrix D
if vPoints.size > 0:
if par.verbose:
print('\nBuilding velocity data point matrix D\n')
sys.stdout.flush()
D = Mesh3D.compute_D(vPoints[:, 2:])
D = sp.hstack((D, sp.csr_matrix((D.shape[0], nstation)))).tocsr()
DtD = D.T @ D
nD = spl.norm(DtD)
# Build regularization matrix
if par.verbose:
print('\n...Building regularization matrix K\n')
sys.stdout.flush()
kx, ky, kz = Mesh3D.compute_K(order=2, taylor_order=2,
weighting=1, squared=0,
s0inside=0, additional_points=3)
KX = sp.hstack((kx, sp.csr_matrix((nnodes, nstation))))
KX_Square = KX.transpose().dot(KX)
KY = sp.hstack((ky, sp.csr_matrix((nnodes, nstation))))
KY_Square = KY.transpose().dot(KY)
KZ = sp.hstack((kz, sp.csr_matrix((nnodes, nstation))))
KZ_Square = KZ.transpose().dot(KZ)
KtK = KX_Square + KY_Square + par.wzK * KZ_Square
nK = spl.norm(KtK)
if nThreads == 1:
hypo_convergence = list(np.zeros(numberOfEvents, dtype=bool))
else:
manager = Manager()
hypo_convergence = manager.list(np.zeros(numberOfEvents, dtype=bool))
for i in range(par.maxit):
if par.verbose:
print("\nIteration N : {0:d}\n".format(i + 1))
sys.stdout.flush()
if par.invert_vel:
if par.verbose:
print(
'\nIteration {0:d} - Updating velocity model\n'.format(i + 1))
print("\nUpdating penalty vector\n")
sys.stdout.flush()
# Build vector C
cx = kx.dot(Slowness)
cy = ky.dot(Slowness)
cz = kz.dot(Slowness)
# build matrix P and dP
indSmin = np.where(Slowness < 1. / par.Vpmax)[0]
indSmax = np.where(Slowness > 1. / par.Vpmin)[0]
indPinality = np.hstack([indSmin, indSmax])
dPinality_V = np.hstack(
[-par.PAp * np.ones(indSmin.size), par.PAp * np.ones(indSmax.size)])
pinality_V = np.vstack([par.PAp *
(1. / par.Vpmax -
Slowness[indSmin]), par.PAp *
(Slowness[indSmax] -
1. / par.Vpmin)])
d_Pinality = sp.csr_matrix(
(dPinality_V, (indPinality, indPinality)), shape=(
nnodes, nbre_param))
Pinality = sp.csr_matrix((
pinality_V.reshape([-1, ]),
(indPinality, np.zeros([indPinality.shape[0]]))),
shape=(nnodes, 1))
if par.verbose:
print('\nPenalties applied at {0:d} nodes\n'.format(
dPinality_V.size))
print('...Start Raytracing\n')
sys.stdout.flush()
if numberOfEvents > 0:
sources = np.empty((data.shape[0], 5))
if par.use_sc:
sc_data = np.empty((data.shape[0], ))
for ev in np.arange(numberOfEvents):
indr = np.where(data[:, 0] == evID[ev])[0]
indh = np.where(Hypocenters[:, 0] == evID[ev])[0]
sources[indr, :] = Hypocenters[indh, :]
if par.use_sc:
sc_data[indr] = Static_Corr[data[indr, 2].astype(int)
- 1, 0]
if par.use_sc:
sources[:, 1] += sc_data
tt, rays, M0 = Mesh3D.raytrace(source=sources,
rcv=rcvData, slowness=None,
aggregate_src=False,
compute_L=True, return_rays=True)
else:
tt, rays, M0 = Mesh3D.raytrace(source=sources,
rcv=rcvData, slowness=None,
aggregate_src=False,
compute_L=True, return_rays=True)
slow_0 = Mesh3D.get_s0(sources)
if par.verbose:
inconverged = np.where(tt == 0)[0]
for icr in inconverged:
print('\033[43m' +
'\nWarning: raypath failed to converge for even '
'N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver'
' N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(data[icr, 0]), sources[icr, 2],
sources[icr, 3], sources[icr, 4],
int(data[icr, 2]), rcvData[icr, 0],
rcvData[icr, 1], rcvData[icr, 2]) +
'\033[0m')
print('\033[43m' + 'ray will be temporary removed' +
'\033[0m')
sys.stdout.flush()
else:
tt = np.array([])
if ncal > 0:
if par.use_sc:
# add static corrections for each station
TxCalib[:, 1] = Static_Corr[caldata[:, 2].astype(int) - 1, 0]
tt_Calib, Mcalib = Mesh3D.raytrace(
source=TxCalib, rcv=rcvCalib, slowness=None,
aggregate_src=False, compute_L=True, return_rays=False)
else:
tt_Calib, Mcalib = Mesh3D.raytrace(
source=TxCalib, rcv=rcvCalib, slowness=None,
aggregate_src=False, compute_L=True, return_rays=False)
if par.verbose:
inconverged = np.where(tt_Calib == 0)[0]
for icr in inconverged:
print('\033[43m' +
'\nWarning: raypath failed to converge'
'for calibration shot N '
'{0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver'
' N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(caldata[icr, 0]), TxCalib[icr, 2],
TxCalib[icr, 3], TxCalib[icr, 4],
int(caldata[icr, 2]), rcvCalib[icr, 0],
rcvCalib[icr, 1], rcvCalib[icr, 2]) +
'\033[0m')
print('\033[43m' + 'ray will be temporary removed' +
'\033[0m')
sys.stdout.flush()
else:
tt_Calib = np.array([])
Resid = tObserved - tt
convrayData = np.where(tt != 0)[0]
convrayClib = np.where(tt_Calib != 0)[0]
if Resid.size == 0:
Residue = time_calibration[convrayClib] - tt_Calib[convrayClib]
else:
Residue = np.hstack((np.zeros([np.count_nonzero(tt) -
4 * numberOfEvents]),
time_calibration[convrayClib]
- tt_Calib[convrayClib]))
ResidueNorm[i] = np.linalg.norm(np.hstack(
(Resid[convrayData], time_calibration[convrayClib] -
tt_Calib[convrayClib])))
if par.verbose:
print('\n...Building matrix M\n')
sys.stdout.flush()
M = sp.csr_matrix((0, nbre_param))
ir = 0
for even in range(numberOfEvents):
indh = np.where(Hypocenters[:, 0] == evID[even])[0]
indr = np.where(data[:, 0] == evID[even])[0]
Mi = M0[even]
nst_ev = Mi.shape[0]
Hi = np.ones([indr.size, 4])
for nr in range(indr.size):
rayi = rays[indr[nr]]
if rayi.shape[0] == 1:
continue
slw0 = slow_0[indr[nr]]
dx = rayi[1, 0] - Hypocenters[indh[0], 2]
dy = rayi[1, 1] - Hypocenters[indh[0], 3]
dz = rayi[1, 2] - Hypocenters[indh[0], 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -slw0 * dx / ds
Hi[nr, 2] = -slw0 * dy / ds
Hi[nr, 3] = -slw0 * dz / ds
convrays = np.where(tt[indr] != 0)[0]
if convrays.shape[0] < indr.size:
Hi = Hi[convrays, :]
Q, _ = np.linalg.qr(Hi, mode='complete')
Ti = sp.csr_matrix(Q[:, 4:])
Ti = Ti.T
if par.use_sc:
Lsc = sp.csr_matrix((np.ones(nst_ev,),
(range(nst_ev),
data[indr[convrays], 2] - 1)),
shape=(nst_ev, nstation))
Mi = sp.hstack((Mi, Lsc))
Mi = sp.csr_matrix(Ti @ Mi)
M = sp.vstack([M, Mi])
Residue[ir:ir + (nst_ev - 4)] = Ti.dot(Resid[indr[convrays]])
ir += nst_ev - 4
for evCal in range(len(Mcalib)):
Mi = Mcalib[evCal]
if par.use_sc:
indrCal = np.where(caldata[:, 0] == calID[evCal])[0]
convraysCal = np.where(tt_Calib[indrCal] != 0)[0]
Mi = sp.hstack((Mi, Msc_cal[evCal][convraysCal]))
M = sp.vstack([M, Mi])
if par.verbose:
print('Assembling matrices and solving system\n')
sys.stdout.flush()
S = np.sum(Static_Corr)
term1 = (M.T).dot(M)
nM = spl.norm(term1[:nnodes, :nnodes])
term2 = (d_Pinality.T).dot(d_Pinality)
nP = spl.norm(term2)
term3 = U.dot(U.T)
λ = par.λ * nM / nK
if nP != 0:
γ = par.γ * nM / nP
else:
γ = par.γ
A = term1 + λ * KtK + γ * term2 + term3
if par.use_sc and par.max_sc > 0. and par.max_sc < 1.:
A += NtN
term1 = (M.T).dot(Residue)
term1 = term1.reshape([-1, 1])
term2 = (KX.T).dot(cx) + (KY.T).dot(cy) + par.wzK * (KZ.T).dot(cz)
term3 = (d_Pinality.T).dot(Pinality)
term4 = U.dot(S)
b = term1 - λ * term2 - γ * term3 - term4
if vPoints.size > 0:
α = par.α * nM / nD
A += α * DtD
b += α * D.T @ (1. / (vPoints[:, 1].reshape(-1, 1)) -
D[:, :nnodes] @ Slowness)
x = spl.minres(A, b, tol=1.e-8)
deltam = x[0].reshape(-1, 1)
# update velocity vector and static correction
deltaV_max = np.max(
abs(1. / (Slowness + deltam[:nnodes]) - 1. / Slowness))
if deltaV_max > par.dVp_max:
print('\n...Rescale P slowness vector\n')
sys.stdout.flush()
L1 = np.max(deltam[:nnodes] / (-par.dVp_max *
(Slowness**2) /
(1 + par.dVp_max * Slowness)))
L2 = np.max(deltam[:nnodes] / (par.dVp_max *
(Slowness**2) /
(1 - par.dVp_max * Slowness)))
deltam[:nnodes] /= np.max([L1, L2])
print('P wave: maximum ds = {0:4.3f}, '
'maximum dV = {1:4.3f}\n'.format(max(abs(
deltam[:nnodes]))[0], np.max(
abs(1. / (Slowness + deltam[:nnodes])
- 1. / Slowness))))
sys.stdout.flush()
if par.use_sc and par.max_sc > 0. and par.max_sc < 1.:
sc_mean = np.mean(abs(deltam[nnodes:]))
if sc_mean > par.max_sc * np.mean(abs(Residue)):
deltam[nnodes:] *= par.max_sc * np.mean(abs(Residue)) / sc_mean
Slowness += np.matrix(deltam[:nnodes])
Mesh3D.set_slowness(Slowness)
Static_Corr += deltam[nnodes:]
if par.saveVel == 'all':
if par.verbose:
print('...Saving Velocity models')
try:
msh2vtk(nodes, cells, 1. / Slowness, basename +
'it{0}.vtk'.format(i + 1))
except ImportError:
print('vtk module is not installed or encouters problems')
elif par.saveVel == 'last' and i == par.maxit - 1:
try:
msh2vtk(nodes, cells, 1. / Slowness, basename + '.vtk')
except ImportError:
print('vtk module is not installed or encouters problems')
#######################################
# relocate Hypocenters
#######################################
if numberOfEvents > 0:
print("\nIteration N {0:d} : Relocation of events".format(
i + 1) + '\n')
sys.stdout.flush()
if nThreads == 1:
for ev in range(numberOfEvents):
Hypocenters[ev, :] = _hypo_relocation(
ev, evID, Hypocenters, data, rcv, Static_Corr,
hypo_convergence, par)
else:
with Pool(processes=nThreads) as p:
updatedHypo = p.starmap(_hypo_relocation,
[(int(ev), evID, Hypocenters, data,
rcv, Static_Corr, hypo_convergence,
par) for ev in range(numberOfEvents)])
p.close() # pool won't take any new tasks
p.join()
Hypocenters = np.array([updatedHypo])[0]
# Calculate the hypocenter parameter uncertainty
uncertnty = []
if par.uncertainty and numberOfEvents > 0:
print("\nUncertainty evaluation\n")
sys.stdout.flush()
# estimate data variance
if nThreads == 1:
varData = [[], []]
for ev in range(numberOfEvents):
uncertnty.append(_uncertaintyEstimat(ev, evID, Hypocenters,
(data,), rcv, Static_Corr,
(Slowness,), par, varData))
else:
varData = manager.list([[], []])
with Pool(processes=nThreads) as p:
uncertnty = p.starmap(_uncertaintyEstimat,
[(int(ev), evID, Hypocenters, (data,),
rcv, Static_Corr, (Slowness,), par,
varData) for ev in range(numberOfEvents)])
p.close() # pool won't take any new tasks
p.join()
sgmData = np.sqrt(np.sum(varData[0]) /
( | np.sum(varData[1]) | numpy.sum |
# BSD 3-Clause License
# Copyright (c) 2019, regain authors
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pandas as pd
from sklearn.metrics import v_measure_score
from regain.utils import error_norm_time, structure_error
def convert_dict_to_df(input_dict, max_samples=5000):
neww = {}
for k, v in input_dict.items():
if k[1] > max_samples: continue
new = {}
for kk, vv in v.items():
new.update({(kk, i): vvv for i, vvv in enumerate(vv)})
neww[k] = new
res_df = pd.DataFrame(neww)
res_df.index.name = ('measure', 'iter')
rr = res_df.T.reset_index()
rr = rr.rename(columns={'level_0': 'method', 'level_1': 'samples'})
rr.method = rr.method.str.upper()
return rr.set_index(['method', 'samples'])
def ensure_ticc_valid(new_r):
for i in range(10):
new_r['valid', i] = True
for r in new_r.loc['TICC', 'model'].iterrows():
sampl = r[0]
for k, rrr in (r[1].iteritems()):
if np.alltrue(rrr.labels_ == np.zeros_like(rrr.labels_)):
new_r.loc[('TICC', sampl), ('structure_error', k)] = None
format_2e = lambda x: "{:.2e} (\pm {:.2e})".format(
(np.nanmean(np.array(x).astype(float))),
(np.nanstd(np.array(x).astype(float))))
format_3f = lambda x: "{:.3f} \pm {:.3f}".format(
(np.nanmean(np.array(x).astype(float))),
(np.nanstd(np.array(x).astype(float))))
def set_results(
vs, model, name, i, labels_true, labels_pred, thetas_true_sparse,
thetas_true_rep, obs_precs_sparse, obs_precs, tac):
th = name in ['wp', 'ticc']
vs.setdefault((name, i), {}).setdefault('model', []).append(model)
vs.setdefault(
(name, i),
{}).setdefault('v_measure',
[]).append(v_measure_score(labels_true, labels_pred))
vs.setdefault((name, i), {}).setdefault('structure_error', []).append(
structure_error(
thetas_true_sparse, obs_precs_sparse, no_diagonal=True,
thresholding=th, eps=1e-5))
vs.setdefault(
(name, i),
{}).setdefault('error_norm',
[]).append(error_norm_time(thetas_true_rep, obs_precs))
vs.setdefault((name, i), {}).setdefault('error_norm_sparse', []).append(
error_norm_time(thetas_true_sparse, obs_precs_sparse))
vs.setdefault((name, i), {}).setdefault('time', []).append(tac)
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == (s.min() if s.name in ['time', 'error_norm'] else s.max())
s.loc[is_max] = s[is_max].apply(lambda x: '\\textbf{%s}' % (x))
return ['background-color: yellow' if v else '' for v in is_max]
def highlight_max_std(s):
'''
highlight the maximum in a Series yellow.
'''
attr = 'background-color: yellow'
if s.ndim == 1: # Series from .apply(axis=0) or axis=1
ss = s.str.split(' ').apply(lambda x: x[0]).astype(float)
is_max = ss.astype(float) == (
ss.min() if s.name in ['time', 'error_norm'] else ss.max())
s.loc[is_max] = s[is_max].apply(lambda x: '\\bm{%s}' % (x))
return [attr if v else '' for v in is_max]
else:
ss = s.applymap(lambda s: float(s.split(' ')[0]))
is_min = ss.groupby(level=0).transform(
lambda x: x.min()
if x.name in ['time', 'error_norm'] else x.max()) == ss
ret = pd.DataFrame(
| np.where(is_min, attr, '') | numpy.where |
import json
import os
from typing import Dict, List, Tuple
import numpy as np
from continuum import download
from continuum.datasets.base import _ContinuumDataset
class MultiNLI(_ContinuumDataset):
"""Continuum version of the MultiNLI dataset.
References:
* A Broad-Coverage Challenge Corpus for Sentence Understanding through Inference
Williams, Nangia, and Bowman
ACL 2018
* Progressive Memory Banks for Incremental Domain Adaptation
Asghar & Mou
ICLR 2020
The dataset is based on the NLI task.
For each example, two sentences are given. The goal is to determine whether
this pair of sentences has:
- Opposite meaning (contradiction)
- Similar meaning (entailment)
- no relation to each other (neutral)
:param data_path: The folder extracted from the official zip file.
:param download: An option useless in this case.
"""
data_url = "https://cims.nyu.edu/~sbowman/multinli/multinli_1.0.zip"
def __init__(self, data_path: str = "", download: bool = True) -> None:
super().__init__(data_path, download)
def _download(self):
if os.path.exists(os.path.join(self.data_path, "multinli_1.0")):
print("Dataset already extracted.")
else:
path = download.download(self.data_url, self.data_path)
download.unzip(path)
print("Dataset extracted.")
@property
def data_type(self) -> str:
return "text"
@property
def transformations(self):
return []
def original_targets(self) -> List[str]:
return ["contradiction", "entailment", "neutral"]
def init(self, train: bool) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""Generate the MultiNLI data.
The dataset has several domains, but always the same targets
("contradiction", "entailment", "neutral"). 5 domains are allocated for
the train set ("fiction", "government", "slate", "telephone", "travel"),
and 5 to the test set ("facetoface", "letters", "nineeleven", "oup",
"verbatim").
While the train is given different task id for each domain, the test set
always has a dummy 0 domain id, as it is supposed to be fixed.
"""
texts, targets, genres = [], [], []
available_targets = ["contradiction", "entailment", "neutral"]
available_genres = [
"fiction",
"government",
"slate",
"telephone",
"travel", # /train
"facetoface",
"letters",
"nineeleven",
"oup",
"verbatim" # /test
]
if train:
json_path = os.path.join(self.data_path, "multinli_1.0", "multinli_1.0_train.jsonl")
else:
json_path = os.path.join(
self.data_path, "multinli_1.0", "multinli_1.0_dev_mismatched.jsonl"
)
with open(json_path) as f:
for line in f:
line_parsed: Dict[str, str] = json.loads(line)
if line_parsed["gold_label"] not in available_targets:
continue # A few cases exist w/o targets.
texts.append((line_parsed["sentence1"], line_parsed["sentence2"]))
targets.append(available_targets.index(line_parsed["gold_label"]))
if train: # We add a new domain id for the train set.
genres.append(available_genres.index(line_parsed["genre"]))
else: # Test set is fixed, therefore we artificially give a unique domain.
genres.append(0)
texts = np.array(texts)
targets = np.array(targets)
genres = | np.array(genres) | numpy.array |
import unittest
import unittest.mock
import numpy as np
from ConfigSpace import CategoricalHyperparameter, \
UniformFloatHyperparameter, UniformIntegerHyperparameter, \
OrdinalHyperparameter, EqualsCondition
from smac.epm.rf_with_instances import RandomForestWithInstances
from smac.epm.util_funcs import get_types
import smac
import smac.configspace
class TestRFWithInstances(unittest.TestCase):
def _get_cs(self, n_dimensions):
configspace = smac.configspace.ConfigurationSpace()
for i in range(n_dimensions):
configspace.add_hyperparameter(UniformFloatHyperparameter('x%d' % i, 0, 1))
return configspace
def test_predict_wrong_X_dimensions(self):
rs = np.random.RandomState(1)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((10,), dtype=np.uint),
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
)
X = rs.rand(10)
self.assertRaisesRegex(ValueError, "Expected 2d array, got 1d array!",
model.predict, X)
X = rs.rand(10, 10, 10)
self.assertRaisesRegex(ValueError, "Expected 2d array, got 3d array!",
model.predict, X)
X = rs.rand(10, 5)
self.assertRaisesRegex(ValueError, "Rows in X should have 10 entries "
"but have 5!",
model.predict, X)
def test_predict(self):
rs = np.random.RandomState(1)
X = rs.rand(20, 10)
Y = rs.rand(10, 1)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((10,), dtype=np.uint),
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
)
model.train(X[:10], Y[:10])
m_hat, v_hat = model.predict(X[10:])
self.assertEqual(m_hat.shape, (10, 1))
self.assertEqual(v_hat.shape, (10, 1))
def test_train_with_pca(self):
rs = np.random.RandomState(1)
X = rs.rand(20, 20)
F = rs.rand(10, 10)
Y = rs.rand(20, 1)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((20,), dtype=np.uint),
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
pca_components=2,
instance_features=F,
)
model.train(X, Y)
self.assertEqual(model.n_params, 10)
self.assertEqual(model.n_feats, 10)
self.assertIsNotNone(model.pca)
self.assertIsNotNone(model.scaler)
def test_predict_marginalized_over_instances_wrong_X_dimensions(self):
rs = np.random.RandomState(1)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((10,), dtype=np.uint),
instance_features=rs.rand(10, 2),
seed=1,
bounds=list(map(lambda x: (0, 10), range(10))),
)
X = rs.rand(10)
self.assertRaisesRegex(ValueError, "Expected 2d array, got 1d array!",
model.predict_marginalized_over_instances, X)
X = rs.rand(10, 10, 10)
self.assertRaisesRegex(ValueError, "Expected 2d array, got 3d array!",
model.predict_marginalized_over_instances, X)
@unittest.mock.patch.object(RandomForestWithInstances, 'predict')
def test_predict_marginalized_over_instances_no_features(self, rf_mock):
"""The RF should fall back to the regular predict() method."""
rs = np.random.RandomState(1)
X = rs.rand(20, 10)
Y = rs.rand(10, 1)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((10,), dtype=np.uint),
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
)
model.train(X[:10], Y[:10])
model.predict(X[10:])
self.assertEqual(rf_mock.call_count, 1)
def test_predict_marginalized_over_instances(self):
rs = np.random.RandomState(1)
X = rs.rand(20, 10)
F = rs.rand(10, 5)
Y = rs.rand(len(X) * len(F), 1)
X_ = rs.rand(200, 15)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((15,), dtype=np.uint),
instance_features=F,
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
)
model.train(X_, Y)
means, vars = model.predict_marginalized_over_instances(X)
self.assertEqual(means.shape, (20, 1))
self.assertEqual(vars.shape, (20, 1))
@unittest.mock.patch.object(RandomForestWithInstances, 'predict')
def test_predict_marginalized_over_instances_mocked(self, rf_mock):
"""Use mock to count the number of calls to predict()"""
class SideEffect(object):
def __call__(self, X):
# Numpy array of number 0 to X.shape[0]
rval = np.array(list(range(X.shape[0]))).reshape((-1, 1))
# Return mean and variance
return rval, rval
rf_mock.side_effect = SideEffect()
rs = np.random.RandomState(1)
F = rs.rand(10, 5)
model = RandomForestWithInstances(
configspace=self._get_cs(10),
types=np.zeros((15,), dtype=np.uint),
instance_features=F,
bounds=list(map(lambda x: (0, 10), range(10))),
seed=1,
)
X = rs.rand(20, 10)
F = rs.rand(10, 5)
Y = rs.randint(1, size=(len(X) * len(F), 1)) * 1.
X_ = rs.rand(200, 15)
model.train(X_, Y)
means, vars = model.predict_marginalized_over_instances(rs.rand(11, 10))
# expected to be 0 as the predict is replaced by manual unloggin the trees
self.assertEqual(rf_mock.call_count, 0)
self.assertEqual(means.shape, (11, 1))
self.assertEqual(vars.shape, (11, 1))
for i in range(11):
self.assertEqual(means[i], 0.)
self.assertEqual(vars[i], 1.e-10)
def test_predict_with_actual_values(self):
X = np.array([
[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[0., 1., 1.],
[1., 0., 0.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.]], dtype=np.float64)
y = np.array([
[.1],
[.2],
[9],
[9.2],
[100.],
[100.2],
[109.],
[109.2]], dtype=np.float64)
model = RandomForestWithInstances(
configspace=self._get_cs(3),
types=np.array([0, 0, 0], dtype=np.uint),
bounds=[(0, np.nan), (0, np.nan), (0, np.nan)],
instance_features=None,
seed=12345,
ratio_features=1.0,
)
model.train(np.vstack((X, X, X, X, X, X, X, X)), np.vstack((y, y, y, y, y, y, y, y)))
y_hat, _ = model.predict(X)
for y_i, y_hat_i in zip(y.reshape((1, -1)).flatten(), y_hat.reshape((1, -1)).flatten()):
self.assertAlmostEqual(y_i, y_hat_i, delta=0.1)
def test_with_ordinal(self):
cs = smac.configspace.ConfigurationSpace()
_ = cs.add_hyperparameter(CategoricalHyperparameter('a', [0, 1], default_value=0))
_ = cs.add_hyperparameter(OrdinalHyperparameter('b', [0, 1], default_value=1))
_ = cs.add_hyperparameter(UniformFloatHyperparameter('c', lower=0., upper=1., default_value=1))
_ = cs.add_hyperparameter(UniformIntegerHyperparameter('d', lower=0, upper=10, default_value=1))
cs.seed(1)
feat_array = np.array([0, 0, 0]).reshape(1, -1)
types, bounds = get_types(cs, feat_array)
model = RandomForestWithInstances(
configspace=cs,
types=types,
bounds=bounds,
instance_features=feat_array,
seed=1,
ratio_features=1.0,
pca_components=9,
)
self.assertEqual(bounds[0][0], 2)
self.assertTrue(bounds[0][1] is np.nan)
self.assertEqual(bounds[1][0], 0)
self.assertEqual(bounds[1][1], 1)
self.assertEqual(bounds[2][0], 0.)
self.assertEqual(bounds[2][1], 1.)
self.assertEqual(bounds[3][0], 0.)
self.assertEqual(bounds[3][1], 1.)
X = np.array([
[0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0., 0., 0.],
[0., 1., 0., 9., 0., 0., 0.],
[0., 1., 1., 4., 0., 0., 0.]], dtype=np.float64)
y = np.array([0, 1, 2, 3], dtype=np.float64)
X_train = np.vstack((X, X, X, X, X, X, X, X, X, X))
y_train = np.vstack((y, y, y, y, y, y, y, y, y, y))
model.train(X_train, y_train.reshape((-1, 1)))
mean, _ = model.predict(X)
for idx, m in enumerate(mean):
self.assertAlmostEqual(y[idx], m, 0.05)
def test_rf_on_sklearn_data(self):
import sklearn.datasets
X, y = sklearn.datasets.load_boston(return_X_y=True)
rs = np.random.RandomState(1)
types = np.zeros(X.shape[1])
bounds = [(np.min(X[:, i]), | np.max(X[:, i]) | numpy.max |
#!/usr/bin/env python3
# Document Scanner
__author__ = "<NAME>"
__date__ = "September 16, 2021"
__email__ = "<EMAIL>"
import cv2
import numpy as np
def preProcessing(image):
imgGray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, ksize=(5, 5), sigmaX=1)
imgCanny = cv2.Canny(imgBlur, threshold1=75, threshold2=75)
k = np.ones((3, 3))
imgDilate = cv2.dilate(imgCanny, kernel=k, iterations=2)
imgThresh = cv2.erode(imgDilate, kernel=k, iterations=1)
cv2.imshow('Threshold', imgThresh)
return imgThresh
def getCorners(image):
w = image.shape[0]
h = image.shape[1]
biggest = np.array([[[0, 0],
[w, 0],
[w, h],
[0, h]]])
maxArea = 0
imgContour = image.copy()
image = preProcessing(image)
contours, _ = cv2.findContours(
image, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 5000:
# cv2.drawContours(imgContour, contours=cnt, contourIdx=-1, color=(255,0,0), thickness=3)
peri = cv2.arcLength(cnt, closed=True)
approx = cv2.approxPolyDP(cnt, epsilon=0.02*peri, closed=True)
if area > maxArea and len(approx) == 4:
biggest = approx
maxArea = area
cv2.drawContours(imgContour, contours=biggest, contourIdx=-1, color=(255, 0, 0), thickness=20)
cv2.imshow('Contours', imgContour)
return biggest
def reorder(corners):
corners = corners.reshape((4,2))
newCorners = np.zeros((4,1,2))
add = np.sum(corners, axis=1)
newCorners[0] = corners[ | np.argmin(add) | numpy.argmin |
import numpy as np
from .onshore_cost_model import onshore_tcc
def offshore_turbine_capex(capacity, hub_height, rotor_diam, depth, distance_to_shore, distance_to_bus=3, foundation="monopile", mooring_count=3, anchor="DEA", turbine_count=80, turbine_spacing=5, turbine_row_spacing=9):
"""
A cost and scaling model (CSM) to calculate the total cost of a 3-bladed, direct drive offshore wind turbine according to the cost model proposed by Fingersh et al. [1] and Maples et al. [2].
The CSM distinguises between seaflor-fixed foundation types; "monopile" and "jacket" and floating foundation types; "semisubmersible" and "spar".
The total turbine cost includes the contributions of the turbine capital cost (TCC), amounting 32.9% for fixed or 23.9% for floating structures, the balance of system costs (BOS) contribution, amounting 46.2% and 60.8% respectively, as well as the finantial costs as the complementary percentage contribution (15.9% and 20.9%) in the same manner [3].
A CSM normalization is done such that a chosen baseline offshore turbine taken by Caglayan et al. [4] (see notes for details) corresponds to an expected specific cost of 2300 €/kW in a 2050 European context as suggested by the 2016 cost of wind energy review by Stehly [3].
Parameters
----------
capacity : numeric or array-like
Turbine's nominal capacity in kW.
hub_height : numeric or array-like
Turbine's hub height in m.
rotor_diam : numeric or array-like
Turbine's rotor diameter in m.
depth : numeric or array-like
Water depth in m (absolute value) at the turbine's location.
distance_to_shore : numeric or array-like
Distance from the turbine's location to the nearest shore in km.
distance_to_bus : numeric or array-like, optional
Distance from the wind farm's bus in km from the turbine's location.
foundation : str or array-like of strings, optional
Turbine's foundation type. Accepted types are: "monopile", "jacket", "semisubmersible" or "spar", by default "monopile"
mooring_count : numeric, optional
Refers to the number of mooring lines are there attaching a turbine only applicable for floating foundation types. By default 3 assuming a triangular attachment to the seafloor.
anchor : str, optional
Turbine's anchor type only applicable for floating foundation types, by default as reccomended by [1].
Arguments accepted are "dea" (drag embedment anchor) or "spa" (suction pile anchor).
turbine_count : numeric, optional
Number of turbines in the offshore windpark. CSM valid for the range [3-200], by default 80
turbine_spacing : numeric, optional
Spacing distance in a row of turbines (turbines that share the electrical connection) to the bus. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-9], by default 5
turbine_row_spacing : numeric, optional
Spacing distance between rows of turbines. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-10], by default 9
Returns
--------
numeric or array-like
Offshore turbine total cost
See also
--------
onshore_turbine_capex(capacity, hub_height, rotor_diam, base_capex, base_capacity, base_hub_height, base_rotor_diam, tcc_share, bos_share)
Notes
-------
The baseline offshore turbine correspongs to the optimal desing for Europe according to Caglayan et al. [4]: capacity = 9400 kW, hub height = 135 m, rotor diameter = 210 m, "monopile" foundation, reference water depth = 40 m, and reference distance to shore = 60 km.
Sources
-------
[1] Fingersh, L., <NAME>., & <NAME>. (2006). Wind Turbine Design Cost and Scaling Model. Nrel. https://www.nrel.gov/docs/fy07osti/40566.pdf
[2] <NAME>., <NAME>., & <NAME>. (2010). Comparative Assessment of Direct Drive High Temperature Superconducting Generators in Multi-Megawatt Class Wind Turbines. Energy. https://doi.org/10.2172/991560
[3] <NAME>., <NAME>., & <NAME>. (2016). Cost of Wind Energy Review. Technical Report. https://www.nrel.gov/docs/fy18osti/70363.pdf
[4] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). The techno-economic potential of offshore wind energy with optimized future turbine designs in Europe. Applied Energy. https://doi.org/10.1016/j.apenergy.2019.113794
[5] <NAME>., <NAME>., & <NAME>. (2017). NREL Offshore Balance-of- System Model NREL Offshore Balance-of- System Model. https://www.nrel.gov/docs/fy17osti/66874.pdf
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2014). Levelised cost of energy for offshore floating wind turbines in a life cycle perspective. Renewable Energy, 66, 714–728. https://doi.org/10.1016/j.renene.2014.01.017
[7] <NAME>., & <NAME>. (2013). Levelised Costs Of Energy For Offshore Floating Wind Turbine Concepts [Norwegian University of Life Sciences]. https://nmbu.brage.unit.no/nmbu-xmlui/bitstream/handle/11250/189073/Bjerkseter%2C C. %26 Ågotnes%2C A. %282013%29 - Levelised Costs of Energy for Offshore Floating Wind Turbine Concepts.pdf?sequence=1&isAllowed=y
[8] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016). IEA Wind Task 26: Offshore Wind Farm Baseline Documentation. https://doi.org/10.2172/1259255
[9] RPG CABLES, & KEC International limited. (n.d.). EXTRA HIGH VOLTAGE cables. RPG CABLES. www.rpgcables.com/images/product/EHV-catalogue.pdf
"""
# TODO: Generalize this function further(like with the onshore cost model)
# PREPROCESS INPUTS
cp = np.array(capacity / 1000)
# rr = np.array(rotor_diam / 2)
rd = np.array(rotor_diam)
hh = np.array(hub_height)
depth = np.abs(np.array(depth))
distance_to_shore = np.array(distance_to_shore)
distance_to_bus = np.array(distance_to_bus)
# COMPUTE COSTS
tcc = onshore_tcc(cp=cp * 1000, hh=hh, rd=rd)
tcc *= 0.7719832742256006
bos = offshore_bos(cp=cp, rd=rd, hh=hh, depth=depth, distance_to_shore=distance_to_shore, distance_to_bus=distance_to_bus, foundation=foundation,
mooring_count=mooring_count, anchor=anchor, turbine_count=turbine_count,
turbine_spacing=turbine_spacing, turbine_row_spacing=turbine_row_spacing, )
bos *= 0.3669156255898912
if foundation == 'monopile' or foundation == 'jacket':
fin = (tcc + bos) * 20.9 / (32.9 + 46.2) # Scaled according to tcc [7]
else:
fin = (tcc + bos) * 15.6 / (60.8 + 23.6) # Scaled according to tcc [7]
return tcc + bos + fin
# return np.array([tcc,bos,fin])
def offshore_bos(cp, rd, hh, depth, distance_to_shore, distance_to_bus, foundation, mooring_count, anchor, turbine_count, turbine_spacing, turbine_row_spacing):
"""
A function to determine the balance of the system cost (BOS) of an offshore turbine based on the capacity, hub height and rotor diamter values according to Fingersh et al. [1].
Parameters
----------
cp : numeric or array-like
Turbine's nominal capacity in kW
rd : numeric or array-like
Turbine's rotor diameter in m
hh : numeric or array-like
Turbine's hub height in m
depth : numeric or array-like
Water depth in m (absolute value) at the turbine's location.
distance_to_shore : numeric or array-like
Distance from the turbine's location to the nearest shore in km.
distance_to_bus : numeric or array-like, optional
Distance from the wind farm's bus in km from the turbine's location.
foundation : str or array-like of strings, optional
Turbine's foundation type. Accepted types are: "monopile", "jacket", "semisubmersible" or "spar", by default "monopile"
mooring_count : numeric, optional
Refers to the number of mooring lines are there attaching a turbine only applicable for floating foundation types. By default 3 assuming a triangular attachment to the seafloor.
anchor : str, optional
Turbine's anchor type only applicable for floating foundation types, by default as reccomended by [1].
Arguments accepted are "dea" (drag embedment anchor) or "spa" (suction pile anchor).
turbine_count : numeric, optional
Number of turbines in the offshore windpark. CSM valid for the range [3-200], by default 80
turbine_spacing : numeric, optional
Spacing distance in a row of turbines (turbines that share the electrical connection) to the bus. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-9], by default 5
turbine_row_spacing : numeric, optional
Spacing distance between rows of turbines. The value must be a multiplyer of rotor diameter. CSM valid for the range [4-10], by default 9
Returns
-------
numeric
Offshore turbine's BOS in monetary units.
Notes
------
Assembly and installation costs could not be implemented due to the excessive number of unspecified constants considered by Smart et al. [8]. Therefore empirical equations were derived which fit the sensitivities to the baseline plants shown in [8]. These ended up being linear equations in turbine capacity and sea depth (only for floating turbines).
Sources
---------
[1] <NAME>., <NAME>., & <NAME>. (2006). Wind Turbine Design Cost and Scaling Model. Nrel. https://www.nrel.gov/docs/fy07osti/40566.pdf
[2] <NAME>., <NAME>., & <NAME>. (2010). Comparative Assessment of Direct Drive High Temperature Superconducting Generators in Multi-Megawatt Class Wind Turbines. Energy. https://doi.org/10.2172/991560
[3] <NAME>., <NAME>., & <NAME>. (2016). Cost of Wind Energy Review. Technical Report. https://www.nrel.gov/docs/fy18osti/70363.pdf
[4] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2019). The techno-economic potential of offshore wind energy with optimized future turbine designs in Europe. Applied Energy. https://doi.org/10.1016/j.apenergy.2019.113794
[5] <NAME>., <NAME>., & <NAME>. (2017). NREL Offshore Balance-of- System Model NREL Offshore Balance-of- System Model. https://www.nrel.gov/docs/fy17osti/66874.pdf
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2014). Levelised cost of energy for offshore floating wind turbines in a life cycle perspective. Renewable Energy, 66, 714–728. https://doi.org/10.1016/j.renene.2014.01.017
[7] <NAME>., & <NAME>. (2013). Levelised Costs Of Energy For Offshore Floating Wind Turbine Concepts [Norwegian University of Life Sciences]
[8] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2016). IEA Wind Task 26: Offshore Wind Farm Baseline Documentation. https://doi.org/10.2172/1259255
[9] RPG CABLES, & KEC International limited. (n.d.). EXTRA HIGH VOLTAGE cables. RPG CABLES. www.rpgcables.com/images/product/EHV-catalogue.pdf
"""
# rr = rd / 2
# prevent problems with negative depth values
depth = np.abs(depth)
foundation = foundation.lower()
anchor = anchor.lower()
if foundation == "monopile" or foundation == "jacket":
fixedType = True
elif foundation == "spar" or foundation == "semisubmersible":
fixedType = False
else:
raise ValueError("Please choose one of the four foundation types: monopile, jacket, spar, or semisubmersible")
# CONSTANTS AND ASSUMPTIONS (all from [1] except where noted)
# Stucture are foundation
# embedmentDepth = 30 # meters
monopileCostRate = 2250 # dollars/tonne
monopileTPCostRate = 3230 # dollars/tonne
sparSCCostRate = 3120 # dollars/tonne
sparTCCostRate = 4222 # dollars/tonne
sparBallCostRate = 100 # dollars/tonne
jacketMLCostRate = 4680 # dollars/tonne
jacketTPCostRate = 4500 # dollars/tonne
jacketPileCostRate = 2250 # dollars/tonne
semiSubmersibleSCCostRate = 3120 # dollars/tonne
semiSubmersibleTCostRate = 6250 # dollars/tonne
semiSubmersibleHPCostRate = 6250 # dollars/tonne
mooringCostRate = 721 # dollars/tonne -- 0.12m diameter is chosen since it is the median in [1]
outfittingSteelCost = 7250 # dollars/tonne
# the values of anchor cost is calculated from Table8 in [2] by assuming a euro to dollar rate of 1.35
DEA_anchorCost = 154 # dollars [2]
SPA_anchorCost = 692 # dollars [2]
# Electrical
# current rating values are taken from source an approximate number is chosen from tables[4]
cable1CurrentRating = 400 # [4]
cable2CurrentRating = 600 # [4]
# exportCableCurrentRating = 1000 # [4]
arrayVoltage = 33
# exportCableVoltage = 220
powerFactor = 0.95
# buriedDepth = 1 # this value is chosen from [5] IF THIS CHANGES FROM ONE "singleStringPower1" needs to be updated
catenaryLengthFactor = 0.04
excessCableFactor = 0.1
numberOfSubStations = 1 # From the example used in [5]
arrayCableCost = 281000 * 1.35 # dollars/km (converted from EUR) [3]
externalCableCost = 443000 * 1.35 # dollars/km (converted from EUR) [3]
singleTurbineInterfaceCost = 0 # Could not find a number...
substationInterfaceCost = 0 # Could not find a number...
dynamicCableFactor = 2
mainPowerTransformerCostRate = 12500 # dollers/MVA
highVoltageSwitchgearCost = 950000 # dollars
mediumVoltageSwitchgearCost = 500000 # dollars
shuntReactorCostRate = 35000 # dollars/MVA
dieselGeneratorBackupCost = 1000000 # dollars
workspaceCost = 2000000 # dollars
otherAncillaryCosts = 3000000 # dollars
fabricationCostRate = 14500 # dollars/tonne
topsideDesignCost = 4500000 # dollars
assemblyFactor = 1 # could not find a number...
offshoreSubstationSubstructureCostRate = 6250 # dollars/tonne
substationSubstructurePileCostRate = 2250 # dollars/tonne
interconnectVoltage = 345 # kV
# GENERAL (APEENDIX B in NREL BOS MODEL)
# hubDiam = cp / 4 + 2
# bladeLength = (rd - hubDiam) / 2
# nacelleWidth = hubDiam + 1.5
# nacelleLength = 2 * nacelleWidth
# RNAMass is rotor nacelle assembly
RNAMass = 2.082 * cp * cp + 44.59 * cp + 22.48
# towerDiam = cp / 2 + 4
# towerMass = (0.4 * np.pi * np.power(rr, 2) * hh - 1500) / 1000
# STRUCTURE AND FOUNDATION
if foundation == 'monopile':
# monopileLength = depth + embedmentDepth + 5
monopileMass = (np.power((cp * 1000), 1.5) + (np.power(hh, 3.7) / 10) + 2100 * np.power(depth, 2.25) + np.power((RNAMass * 1000), 1.13)) / 10000
monopileCost = monopileMass * monopileCostRate
# monopile transition piece mass is called as monopileTPMass
monopileTPMass = np.exp(2.77 + 1.04 * np.power(cp, 0.5) + 0.00127 * np.power(depth, 1.5))
monopileTPCost = monopileTPMass * monopileTPCostRate
foundationCost = monopileCost + monopileTPCost
mooringAndAnchorCost = 0
elif foundation == 'jacket':
# jacket main lattice mass is called as jacketMLMass
jacketMLMass = np.exp(3.71 + 0.00176 * np.power(cp, 2.5) + 0.645 * np.log(np.power(depth, 1.5)))
jacketMLCost = jacketMLMass * jacketMLCostRate
# jacket transition piece mass is called as jacketTPMass
jacketTPMass = 1 / (((-0.0131 + 0.0381) / np.log(cp)) - 0.00000000227 * np.power(depth, 3))
jacketTPCost = jacketTPMass * jacketTPCostRate
# jacket pile mass is called as jacketPileMass
jacketPileMass = 8 * np.power(jacketMLMass, 0.5574)
jacketPileCost = jacketPileMass * jacketPileCostRate
foundationCost = jacketMLCost + jacketTPCost + jacketPileCost
mooringAndAnchorCost = 0
elif foundation == 'spar':
# spar stiffened column mass is called as sparSCMass
sparSCMass = 535.93 + 17.664 * np.power(cp, 2) + 0.02328 * depth * np.log(depth)
sparSCCost = sparSCMass * sparSCCostRate
# spar tapered column mass is called as sparTCMass
sparTCMass = 125.81 * np.log(cp) + 58.712
sparTCCost = sparTCMass * sparTCCostRate
# spar ballast mass is called as sparBallMass
sparBallMass = -16.536 * np.power(cp, 2) + 1261.8 * cp - 1554.6
sparBallCost = sparBallMass * sparBallCostRate
foundationCost = sparSCCost + sparTCCost + sparBallCost
if anchor == 'dea':
anchorCost = DEA_anchorCost
# the equation is derived from [3]
mooringLength = 1.5 * depth + 350
elif anchor == 'spa':
anchorCost = SPA_anchorCost
# since it is assumed to have an angle of 45 degrees it is multiplied by 1.41 which is squareroot of 2 [3]
mooringLength = 1.41 * depth
else:
raise ValueError("Please choose an anchor type!")
mooringAndAnchorCost = mooringLength * mooringCostRate + anchorCost
elif foundation == 'semisubmersible':
# semiSubmersible stiffened column mass is called as semiSubmersibleSCMass
semiSubmersibleSCMass = -0.9571 * np.power(cp, 2) + 40.89 * cp + 802.09
semiSubmersibleSCCost = semiSubmersibleSCMass * semiSubmersibleSCCostRate
# semiSubmersible truss mass is called as semiSubmersibleTMass
semiSubmersibleTMass = 2.7894 * np.power(cp, 2) + 15.591 * cp + 266.03
semiSubmersibleTCost = semiSubmersibleTMass * semiSubmersibleTCostRate
# semiSubmersible heavy plate mass is called as semiSubmersibleHPMass
semiSubmersibleHPMass = -0.4397 * np.power(cp, 2) + 21.145 * cp + 177.42
semiSubmersibleHPCost = semiSubmersibleHPMass * semiSubmersibleHPCostRate
foundationCost = semiSubmersibleSCCost + semiSubmersibleTCost + semiSubmersibleHPCost
if anchor == 'dea':
anchorCost = DEA_anchorCost
# the equation is derived from [3]
mooringLength = 1.5 * depth + 350
elif anchor == 'spa':
anchorCost = SPA_anchorCost
# since it is assumed to have an angle of 45 degrees it is multiplied by 1.41 which is squareroot of 2 [3]
mooringLength = 1.41 * depth
else:
raise ValueError("Please choose an anchor type!")
mooringAndAnchorCost = mooringLength * mooringCostRate + anchorCost
if fixedType:
if cp > 4:
secondarySteelSubstructureMass = 40 + (0.8 * (18 + depth))
else:
secondarySteelSubstructureMass = 35 + (0.8 * (18 + depth))
elif foundation == 'spar':
secondarySteelSubstructureMass = np.exp(3.58 + 0.196 * np.power(cp, 0.5) * np.log(cp) + 0.00001 * depth * np.log(depth))
elif foundation == 'semisubmersible':
secondarySteelSubstructureMass = -0.153 * np.power(cp, 2) + 6.54 * cp + 128.34
secondarySteelSubstructureCost = secondarySteelSubstructureMass * outfittingSteelCost
totalStructureAndFoundationCosts = foundationCost +\
mooringAndAnchorCost * mooring_count +\
secondarySteelSubstructureCost
# ELECTRICAL INFRASTRUCTURE
# in the calculation of singleStringPower1 and 2, bur depth is assumed to be 1. Because of that the equation is simplified.
singleStringPower1 = np.sqrt(3) * cable1CurrentRating * arrayVoltage * powerFactor / 1000
singleStringPower2 = np.sqrt(3) * cable2CurrentRating * arrayVoltage * powerFactor / 1000
numberofStrings = np.floor_divide(turbine_count * cp, singleStringPower2)
# Only no partial string will be implemented
numberofTurbinesperPartialString = 0 # np.round(np.remainder((turbine_count*cp) , singleStringPower2))
numberofTurbinesperArrayCable1 = np.floor_divide(singleStringPower1, cp)
numberofTurbinesperArrayCable2 = np.floor_divide(singleStringPower2, cp)
numberofTurbineInterfacesPerArrayCable1 = numberofTurbinesperArrayCable1 * numberofStrings * 2
max1_Cable1 = np.maximum(numberofTurbinesperArrayCable1 - numberofTurbinesperArrayCable2, 0)
max2_Cable1 = 0
numberofTurbineInterfacesPerArrayCable2 = (max1_Cable1 * numberofStrings + max2_Cable1) * 2
numberofArrayCableSubstationInterfaces = numberofStrings
if fixedType:
arrayCable1Length = (turbine_spacing * rd + depth * 2) * (numberofTurbineInterfacesPerArrayCable1 / 2) * (1 + excessCableFactor)
arrayCable1Length /= 1000 # convert to km
#print("arrayCable1Length:", arrayCable1Length)
else:
systemAngle = -0.0047 * depth + 18.743
freeHangingCableLength = (depth / np.cos(systemAngle * np.pi / 180) * (catenaryLengthFactor + 1)) + 190
fixedCableLength = (turbine_spacing * rd) - (2 * np.tan(systemAngle * np.pi / 180) * depth) - 70
arrayCable1Length = (2 * freeHangingCableLength) * (numberofTurbineInterfacesPerArrayCable1 / 2) * (1 + excessCableFactor)
arrayCable1Length /= 1000 # convert to km
max1_Cable2 = np.maximum(numberofTurbinesperArrayCable2 - 1, 0)
max2_Cable2 = np.maximum(numberofTurbinesperPartialString - numberofTurbinesperArrayCable2 - 1, 0)
strFac = numberofStrings / numberOfSubStations
if fixedType:
arrayCable2Length = (turbine_spacing * rd + 2 * depth) * (max1_Cable2 * numberofStrings + max2_Cable2) +\
numberOfSubStations * (strFac * (rd * turbine_row_spacing) +
(np.sqrt(np.power((rd * turbine_spacing * (strFac - 1)), 2) + np.power((rd * turbine_row_spacing), 2)) / 2) +
strFac * depth) * (excessCableFactor + 1)
arrayCable2Length /= 1000 # convert to km
arrayCable1AndAncillaryCost = arrayCable1Length * arrayCableCost + singleTurbineInterfaceCost *\
(numberofTurbineInterfacesPerArrayCable1 + numberofTurbineInterfacesPerArrayCable2)
arrayCable2AndAncillaryCost = arrayCable2Length * arrayCableCost +\
singleTurbineInterfaceCost * (numberofTurbineInterfacesPerArrayCable1 + numberofTurbineInterfacesPerArrayCable2) +\
substationInterfaceCost * numberofArrayCableSubstationInterfaces
else:
arrayCable2Length = (fixedCableLength + 2 * freeHangingCableLength) * (max1_Cable2 * numberofStrings + max2_Cable2) +\
numberOfSubStations * (strFac * (rd * turbine_row_spacing) +
np.sqrt(np.power(((2 * freeHangingCableLength) * (strFac - 1) + (rd * turbine_row_spacing) - (2 * np.tan(systemAngle * np.pi / 180) * depth) - 70), 2) +
np.power(fixedCableLength + 2 * freeHangingCableLength, 2)) / 2) * (excessCableFactor + 1)
arrayCable2Length /= 1000 # convert to km
arrayCable1AndAncillaryCost = dynamicCableFactor * (arrayCable1Length * arrayCableCost +
singleTurbineInterfaceCost * (numberofTurbineInterfacesPerArrayCable1 + numberofTurbineInterfacesPerArrayCable2))
arrayCable2AndAncillaryCost = dynamicCableFactor * (arrayCable2Length * arrayCableCost +
singleTurbineInterfaceCost * (numberofTurbineInterfacesPerArrayCable1 + numberofTurbineInterfacesPerArrayCable2) +
substationInterfaceCost * numberofArrayCableSubstationInterfaces)
singleExportCablePower = | np.sqrt(3) | numpy.sqrt |
"""Vehicle detector"""
import collections
import cv2
import glob
import numpy as np
import os.path
import time
from sklearn import svm
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from scipy.ndimage.measurements import label
from utils import (read_image, convert_color, get_color_features, get_spatial_features,
get_hog_features, get_channel_hog_features)
SEARCH_OPTIONS = [
# scale, ystart, ystop, cells_per_step, min_confidence
(1.0, 384, 640, 2, 0.1),
(1.5, 376, 640, 2, 0.1),
(2.0, 368, 640, 2, 0.1),
]
HEATMAP_DECAY = 0.8
BBOX_CONFIDENCE_THRESHOLD = 0.8
HEATMAP_THRESHOLD = 0.8
# Class to holds feature parameters.
class FeatureParams(collections.namedtuple('FeatureParams', ' '.join([
'color_space',
'spatial_size',
'window_size',
'color_nbins',
'orient',
'pix_per_cell',
'cell_per_block'
]))):
pass
class Trainer(object):
"""
Class to train car classifier and vector scaler.
"""
def __init__(self, feature_params, car_dir='vehicles', noncar_dir='non-vehicles'):
"""
Initialize Trainer.
"""
self.P = feature_params
self.car_dir = car_dir
self.noncar_dir = noncar_dir
# Loads car and non-car images.
self.car_images = []
for fpath in glob.glob(os.path.join(self.car_dir, '*', '*.png')):
self.car_images.append(read_image(fpath))
self.noncar_images = []
for fpath in glob.glob(os.path.join(self.noncar_dir, '*', '*.png')):
self.noncar_images.append(read_image(fpath))
self.car_features = []
self.noncar_features = []
self.scaler = None
self.clf = svm.LinearSVC()
def extract_image_features(self, img):
"""
Extract features from single image
"""
features = []
cvt_img = convert_color(img, self.P.color_space)
spatial_features = get_spatial_features(cvt_img, size=self.P.spatial_size)
features.append(spatial_features)
color_features = get_color_features(cvt_img, size=self.P.window_size,
nbins=self.P.color_nbins)
features.append(color_features)
if self.P.window_size != (cvt_img.shape[0], cvt_img.shape[1]):
cvt_img = cv2.resize(cvt_img, self.P.window_size)
hog_features = get_hog_features(cvt_img, orient=self.P.orient,
pix_per_cell=self.P.pix_per_cell,
cell_per_block=self.P.cell_per_block)
features.append(hog_features)
return np.concatenate(features)
def extract_features(self):
"""
Extracts features from images.
"""
t = time.time()
print('Extracting features...')
for image in self.car_images:
self.car_features.append(self.extract_image_features(image))
for image in self.noncar_images:
self.noncar_features.append(self.extract_image_features(image))
print(round(time.time() - t, 2), 'Seconds to extract features.')
def train(self):
"""
Trains classifier and set scaler and clf.
"""
if not self.car_features or not self.noncar_features:
print("Features not extract, run extract_feature() first.")
return
# Train classifier.
# Create an array stack of feature vectors
x = np.vstack((self.car_features, self.noncar_features)).astype(np.float64)
# Define the labels vector
y = np.hstack((np.ones(len(self.car_features)), np.zeros(len(self.noncar_features))))
# Split up data into randomized training and test sets
rand_state = np.random.randint(0, 100)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=rand_state)
# Fit a per-column scaler
self.scaler = StandardScaler().fit(x_train)
# Apply the scaler to X
x_train = self.scaler.transform(x_train)
x_test = self.scaler.transform(x_test)
# Shuffle
x_train, y_train = shuffle(x_train, y_train)
print('Feature vector length:', len(x_train[0]))
# Use linear SVC
t = time.time()
print('Training linear SVC...')
self.clf.fit(x_train, y_train)
t2 = time.time()
print(round(t2 - t, 2), 'Seconds to train.')
print('Test Accuracy of linear SVC = ', round(self.clf.score(x_test, y_test), 4))
return self.clf, self.scaler
class VehicleDetector(object):
"""Class to detect vehicles."""
def __init__(self, clf, scaler, feature_params,
search_options=SEARCH_OPTIONS, threshold=HEATMAP_THRESHOLD, decay=HEATMAP_DECAY):
self.clf = clf
self.scaler = scaler
self.P = feature_params
self.search_options = search_options
self.threshold = threshold
self.decay = decay
self.scale_bbox_confs = None
self.history_heatmap = None
self.unfiltered_heatmap = None
def search_cars_with_option(self, img, scale, cells_per_step, ystart, ystop, conf_thresh):
"""
Detects car bboxes of image with given scale in region of img[ystart:ystop:,:,:]
:param img: input image
:param scale: window scale.
:param cells_per_step: cells per step.
:param ystart: y-range start.
:param ystop: y-range stop.
:param conf_thresh: classifier confidence threshold.
:return: list of (bbox, confidence)
"""
cvt_img = convert_color(img, self.P.color_space)
# Crop image on in y-region
ystart = 0 if ystart is None else ystart
ystop = img.shape[1] if ystop is None else ystop
cvt_img = cvt_img[ystart:ystop,:,:]
# Scale the image.
if scale != 1:
cvt_img = cv2.resize(cvt_img, (np.int(cvt_img.shape[1] / scale), np.int(cvt_img.shape[0] / scale)))
# Define blocks and steps as above
nxblocks = (cvt_img.shape[1] // self.P.pix_per_cell) - self.P.cell_per_block + 1
nyblocks = (cvt_img.shape[0] // self.P.pix_per_cell) - self.P.cell_per_block + 1
nblocks_per_window = (self.P.window_size[0] // self.P.pix_per_cell) - self.P.cell_per_block + 1
nxsteps = (nxblocks - nblocks_per_window) // cells_per_step + 1
nysteps = (nyblocks - nblocks_per_window) // cells_per_step + 1
# Compute individual channel HOG features for the entire image
hogs = []
for ch in range(cvt_img.shape[2]):
hogs.append(get_channel_hog_features(
img=cvt_img[:,:,ch], orient=self.P.orient,
pix_per_cell=self.P.pix_per_cell, cell_per_block=self.P.cell_per_block,
feature_vec=False, vis=False))
bbox_confs = []
for xb in range(nxsteps):
for yb in range(nysteps):
ypos = yb * cells_per_step
xpos = xb * cells_per_step
hog_features = []
for ch in range(cvt_img.shape[2]):
hog_features.append(
hogs[ch][ypos:ypos + nblocks_per_window, xpos:xpos + nblocks_per_window].ravel())
hog_features = np.hstack((hog_features[0], hog_features[1], hog_features[2]))
# Extract the image patch
xleft = xpos * self.P.pix_per_cell
ytop = ypos * self.P.pix_per_cell
subimg = cv2.resize(cvt_img[ytop:ytop + self.P.window_size[0],
xleft:xleft + self.P.window_size[0]],
self.P.window_size)
# Get spatial features
spatial_features = get_spatial_features(subimg, self.P.spatial_size)
# Get color features
color_features = get_color_features(subimg, size=self.P.window_size, nbins=self.P.color_nbins)
window_features = self.scaler.transform(np.hstack(
(spatial_features, color_features, hog_features)).reshape(1, -1))
if self.clf.predict(window_features) == 1:
xbox_left = np.int(xleft * scale)
ytop_draw = np.int(ytop * scale)
box_draw = np.int(self.P.window_size[0] * scale)
confidence = self.clf.decision_function(window_features)[0]
if confidence < conf_thresh:
# Only consider window with confidence score >= threshold.
continue
bbox = [(xbox_left, ytop_draw+ystart), (xbox_left+box_draw,ytop_draw+ystart+box_draw)]
bbox_conf = (bbox, confidence)
bbox_confs.append(bbox_conf)
return bbox_confs
def search_cars(self, img, search_options):
"""
Find cars by all scale-region sets provided.
"""
scale_bbox_confs = {}
for (scale, ystart, ystop, cells_per_step, conf_thresh) in search_options:
bbox_confs = self.search_cars_with_option(
img=img, cells_per_step=cells_per_step, scale=scale, ystart=ystart, ystop=ystop, conf_thresh=conf_thresh)
scale_bbox_confs[scale] = bbox_confs
return scale_bbox_confs
def get_heatmap(self, img, scale_bbox_confs):
"""
Gets heat map from list of bounding box-confidence.
:param img: input image.
:param scale_bbox_confs: a map of scale to list of (bbox, confidence).
"""
heatmap = np.zeros_like( | np.zeros_like(img[:, :, 0]) | numpy.zeros_like |
import pytest
from mvlearn.datasets import make_gaussian_mixture
from numpy.testing import assert_equal
import numpy as np
n_samples = 100
centers = [[-1, 0], [1, 0]]
covariances = [[[1, 0], [0, 1]], [[1, 0], [1, 2]]]
class_probs = [0.3, 0.7]
@pytest.mark.parametrize("centers, covariances, class_probs", [
(centers, covariances, class_probs),
(centers[0], covariances[0], None)]
)
def test_formats(centers, covariances, class_probs):
Xs, y, latents = make_gaussian_mixture(
n_samples, centers, covariances, class_probs=class_probs,
return_latents=True)
assert_equal(n_samples, len(latents))
assert_equal(len(covariances[0]), latents.shape[1])
assert_equal(Xs[0], latents)
if class_probs is not None:
for i, p in enumerate(class_probs):
assert_equal(int(p * n_samples), list(y).count(i))
@pytest.mark.parametrize(
"transform", ["linear", "poly", "sin", lambda x: 2 * x + 1])
def test_transforms(transform):
Xs, y, latents = make_gaussian_mixture(
n_samples, centers, covariances, class_probs=class_probs,
return_latents=True, transform=transform, noise_dims=2)
assert_equal(len(Xs), 2)
assert_equal(Xs[0].shape, (n_samples, 4))
assert_equal(Xs[1].shape, (n_samples, 4))
def test_bad_class_probs():
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, covariances, class_probs=[0.3, 0.4]
)
assert str(e.value) == "elements of `class_probs` must sum to 1"
@pytest.mark.parametrize(
"transform", [list(), None])
def test_bad_transform_value(transform):
with pytest.raises(TypeError):
make_gaussian_mixture(
n_samples, centers, covariances, transform=transform)
@pytest.mark.parametrize(
"transform", ["error"])
def test_bad_transform_type(transform):
with pytest.raises(ValueError):
make_gaussian_mixture(
n_samples, centers, covariances, transform=transform)
def test_bad_shapes():
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, None, covariances)
assert str(e.value) == "centers is of the incorrect shape"
# Wrong Length
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, [1], covariances)
assert str(e.value) == \
"The first dimensions of 2D centers and 3D covariances must be equal"
# Inconsistent dimension
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, [np.eye(2), np.eye(3)],
class_probs=class_probs
)
assert str(e.value) == "covariance matrix is of the incorrect shape"
# Wrong uni dimensions
with pytest.raises(ValueError) as e:
make_gaussian_mixture(n_samples, [1, 0], [1, 0])
assert str(e.value) == "covariance matrix is of the incorrect shape"
# Wrong centerslti sizes
with pytest.raises(ValueError) as e:
make_gaussian_mixture(
n_samples, centers, covariances, class_probs=[0.3, 0.1, 0.6]
)
assert str(e.value) == \
"centers, covariances, and class_probs must be of equal length"
@pytest.mark.parametrize("noise", [None, 0, 1])
def test_random_state(noise):
Xs_1, y_1 = make_gaussian_mixture(
10, centers, covariances, class_probs=class_probs,
transform='poly', random_state=42, noise=noise
)
Xs_2, y_2 = make_gaussian_mixture(
10, centers, covariances, class_probs=class_probs,
transform='poly', random_state=42, noise=noise
)
for view1, view2 in zip(Xs_1, Xs_2):
assert np.allclose(view1, view2)
assert np.allclose(y_1, y_2)
def test_noise_dims_not_same_but_reproducible():
Xs_1, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform="poly", noise_dims=2
)
view1_noise, view2_noise = Xs_1[0][:, -2:], Xs_1[1][:, -2:]
assert not np.allclose(view1_noise, view2_noise)
Xs_2, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform="poly", noise_dims=2
)
view1_noise2, view2_noise2 = Xs_2[0][:, -2:], Xs_2[1][:, -2:]
assert np.allclose(view1_noise, view1_noise2)
assert np.allclose(view2_noise, view2_noise2)
@pytest.mark.parametrize(
"transform", ["linear", "poly", "sin", lambda x: 2 * x + 1])
def test_signal_noise_not_same_but_reproducible(transform):
Xs_1, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform, noise=1
)
view1_noise, view2_noise = Xs_1[0], Xs_1[1]
Xs_2, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform, noise=1
)
view1_noise2, view2_noise2 = Xs_2[0], Xs_2[1]
# Noise is reproducible and signal is the same
assert np.allclose(view1_noise, view1_noise2)
assert np.allclose(view2_noise, view2_noise2)
Xs_3, _ = make_gaussian_mixture(
20, centers, covariances, class_probs=class_probs, random_state=42,
transform=transform
)
view1_noise3, view2_noise3 = Xs_3[0], Xs_3[1]
# Noise varies view1, but keeps view 2 unaffects (i.e. the latents)
assert not np.allclose(view1_noise, view1_noise3)
assert np.allclose(view2_noise, view2_noise3)
def test_shuffle():
| np.random.seed(42) | numpy.random.seed |
# -*- coding: utf-8 -*-
# Copyright (c) 2019 the HERA Project
# Licensed under the MIT License
import pytest
import os
import shutil
import hera_qm.xrfi as xrfi
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData
from pyuvdata import UVCal
import hera_qm.utils as utils
from hera_qm.data import DATA_PATH
from pyuvdata import UVFlag
import glob
test_d_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA')
test_uvfits_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.uvfits')
test_uvh5_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvh5')
test_c_file = os.path.join(DATA_PATH, 'zen.2457698.40355.xx.HH.uvcAA.omni.calfits')
test_f_file = test_d_file + '.testuvflag.h5'
test_f_file_flags = test_d_file + '.testuvflag.flags.h5' # version in 'flag' mode
test_outfile = os.path.join(DATA_PATH, 'test_output', 'uvflag_testout.h5')
xrfi_path = os.path.join(DATA_PATH, 'test_output')
test_flag_integrations= os.path.join(DATA_PATH, 'a_priori_flags_integrations.yaml')
test_flag_jds= os.path.join(DATA_PATH, 'a_priori_flags_jds.yaml')
test_flag_lsts= os.path.join(DATA_PATH, 'a_priori_flags_lsts.yaml')
test_uvh5_files = ['zen.2457698.40355191.xx.HH.uvh5',
'zen.2457698.40367619.xx.HH.uvh5',
'zen.2457698.40380046.xx.HH.uvh5']
test_c_files = ['zen.2457698.40355191.xx.HH.uvcAA.omni.calfits',
'zen.2457698.40367619.xx.HH.uvcAA.omni.calfits',
'zen.2457698.40380046.xx.HH.uvcAA.omni.calfits']
for cnum, cf, uvf in zip(range(3), test_c_files, test_uvh5_files):
test_c_files[cnum] = os.path.join(DATA_PATH, cf)
test_uvh5_files[cnum] = os.path.join(DATA_PATH, uvf)
pytestmark = pytest.mark.filterwarnings(
"ignore:The uvw_array does not match the expected values given the antenna positions.",
"ignore:telescope_location is not set. Using known values for HERA.",
"ignore:antenna_positions is not set. Using known values for HERA."
)
def test_uvdata():
uv = UVData()
uv.read_miriad(test_d_file)
xant = uv.get_ants()[0]
xrfi.flag_xants(uv, xant)
assert np.all(uv.flag_array[uv.ant_1_array == xant, :, :, :])
assert np.all(uv.flag_array[uv.ant_2_array == xant, :, :, :])
def test_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
xant = uvc.ant_array[0]
xrfi.flag_xants(uvc, xant)
assert np.all(uvc.flag_array[0, :, :, :, :])
def test_uvflag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
xant = uvf.ant_1_array[0]
xrfi.flag_xants(uvf, xant)
assert np.all(uvf.flag_array[uvf.ant_1_array == xant, :, :, :])
assert np.all(uvf.flag_array[uvf.ant_2_array == xant, :, :, :])
def test_input_error():
pytest.raises(ValueError, xrfi.flag_xants, 4, 0)
def test_uvflag_waterfall_error():
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf.to_flag()
pytest.raises(ValueError, xrfi.flag_xants, uvf, 0)
def test_uvflag_not_flag_error():
uvf = UVFlag(test_f_file)
pytest.raises(ValueError, xrfi.flag_xants, uvf, 0)
def test_not_inplace_uvflag():
uvf = UVFlag(test_f_file)
xant = uvf.ant_1_array[0]
uvf2 = xrfi.flag_xants(uvf, xant, inplace=False)
assert np.all(uvf2.flag_array[uvf2.ant_1_array == xant, :, :, :])
assert np.all(uvf2.flag_array[uvf2.ant_2_array == xant, :, :, :])
def test_not_inplace_uvdata():
uv = UVData()
uv.read_miriad(test_d_file)
xant = uv.get_ants()[0]
uv2 = xrfi.flag_xants(uv, xant, inplace=False)
assert np.all(uv2.flag_array[uv2.ant_1_array == xant, :, :, :])
assert np.all(uv2.flag_array[uv2.ant_2_array == xant, :, :, :])
def test_resolve_xrfi_path_given():
dirname = xrfi.resolve_xrfi_path(xrfi_path, test_d_file)
assert xrfi_path == dirname
def test_resolve_xrfi_path_empty():
dirname = xrfi.resolve_xrfi_path('', test_d_file)
assert os.path.dirname(os.path.abspath(test_d_file)) == dirname
def test_resolve_xrfi_path_does_not_exist():
dirname = xrfi.resolve_xrfi_path(os.path.join(xrfi_path, 'foogoo'), test_d_file)
assert os.path.dirname(os.path.abspath(test_d_file)) == dirname
def test_resolve_xrfi_path_jd_subdir():
dirname = xrfi.resolve_xrfi_path('', test_d_file, jd_subdir=True)
expected_dir = os.path.join(os.path.dirname(os.path.abspath(test_d_file)),
'.'.join(os.path.basename(test_d_file).split('.')[0:3])
+ '.xrfi')
assert dirname == expected_dir
assert os.path.exists(expected_dir)
shutil.rmtree(expected_dir)
def test_check_convolve_dims_3D():
# Error if d.ndims != 2
pytest.raises(ValueError, xrfi._check_convolve_dims, np.ones((3, 2, 3)), 1, 2)
def test_check_convolve_dims_1D():
size = 10
d = np.ones(size)
with uvtest.check_warnings(
UserWarning,
match=f"K1 value {size + 1} is larger than the data",
nwarnings=1
):
K = xrfi._check_convolve_dims(d, size + 1)
assert K == size
def test_check_convolve_dims_kernel_not_given():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=["No K1 input provided.", "No K2 input provided"],
nwarnings=2
):
K1, K2 = xrfi._check_convolve_dims(d)
assert K1 == size
assert K2 == size
def test_check_convolve_dims_Kt_too_big():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=f"K1 value {size + 1} is larger than the data",
nwarnings=1,
):
Kt, Kf = xrfi._check_convolve_dims(d, size + 1, size)
assert Kt == size
assert Kf == size
def test_check_convolve_dims_Kf_too_big():
size = 10
d = np.ones((size, size))
with uvtest.check_warnings(
UserWarning,
match=f"K2 value {size + 1} is larger than the data",
nwarnings=1,
):
Kt, Kf = xrfi._check_convolve_dims(d, size, size + 1)
assert Kt == size
assert Kf == size
def test_check_convolve_dims_K1K2_lt_one():
size = 10
data = np.ones((size, size))
pytest.raises(ValueError, xrfi._check_convolve_dims, data, 0, 2)
pytest.raises(ValueError, xrfi._check_convolve_dims, data, 2, 0)
def test_robus_divide():
a = np.array([1., 1., 1.], dtype=np.float32)
b = np.array([2., 0., 1e-9], dtype=np.float32)
c = xrfi.robust_divide(a, b)
assert np.array_equal(c, np.array([1. / 2., np.inf, np.inf]))
@pytest.fixture(scope='function')
def fake_data():
size = 100
fake_data = np.zeros((size, size))
# yield returns the data and lets us do post test clean up after
yield fake_data
# post-test clean up
del(fake_data)
return
def test_medmin(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# medmin should be .size - 1 for these data
medmin = xrfi.medmin(fake_data)
assert np.allclose(medmin, fake_data.shape[0] - 1)
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.medmin, np.ones((5, 4, 3)))
def test_medminfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# run medmin filt
Kt = 8
Kf = 8
d_filt = xrfi.medminfilt(fake_data, Kt=Kt, Kf=Kf)
# build up "answer" array
ans = np.zeros_like(fake_data)
for i in range(fake_data.shape[1]):
if i < fake_data.shape[0] - Kf:
ans[:, i] = i + (Kf - 1)
else:
ans[:, i] = fake_data.shape[0] - 1
assert np.allclose(d_filt, ans)
def test_detrend_deriv(fake_data):
# make fake data
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = j * i**2 + j**3
# run detrend_deriv in both dimensions
dtdf = xrfi.detrend_deriv(fake_data, df=True, dt=True)
ans = np.ones_like(dtdf)
assert np.allclose(dtdf, ans)
# only run along frequency
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = j**3
df = xrfi.detrend_deriv(fake_data, df=True, dt=False)
ans = np.ones_like(df)
assert np.allclose(df, ans)
# only run along time
for i in range(fake_data.shape[0]):
for j in range(fake_data.shape[1]):
fake_data[i, j] = i**3
dt = xrfi.detrend_deriv(fake_data, df=False, dt=True)
ans = np.ones_like(dt)
assert np.allclose(dt, ans)
# catch error of df and dt both being False
pytest.raises(ValueError, xrfi.detrend_deriv, fake_data, dt=False, df=False)
# Test error when wrong dimensions are passed
pytest.raises(ValueError, xrfi.detrend_deriv, np.ones((5, 4, 3)))
def test_detrend_medminfilt(fake_data):
# make fake data
for i in range(fake_data.shape[1]):
fake_data[:, i] = i * np.ones_like(fake_data[:, i])
# run detrend_medminfilt
Kt = 8
Kf = 8
dm = xrfi.detrend_medminfilt(fake_data, Kt=Kt, Kf=Kf)
# read in "answer" array
# this is output that corresponds to .size==100, Kt==8, Kf==8
ans_fn = os.path.join(DATA_PATH, 'test_detrend_medminfilt_ans.txt')
ans = | np.loadtxt(ans_fn) | numpy.loadtxt |
# -*- coding: utf-8 -*-
"""
Created on Fri May 18 10:15:13 2018
@author: <NAME>
We are going to do basins of attraction, wohoooo
Basically, color the particles according to the end location where they end up
First, we will do the total currents
"""
from netCDF4 import Dataset
from parcels import plotTrajectoriesFile,ParticleSet,JITParticle,FieldSet
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as patches
from matplotlib import colors as c
from datetime import datetime, timedelta
from matplotlib.patches import Polygon
def BasinShaper(finalLon,finalLat,firstLon,firstLat):
lonGrid=np.linspace(-180,179,360)
latGrid=np.linspace(-90,90,181)
identifier=np.zeros((len(latGrid),len(lonGrid)))
for i in range(len(lon)):
if 0<finalLat[i]<50:
#so, first north Atlantic
if -80<finalLon[i]<30:
Endspot=7
if -100<finalLon[i]<-80:
if finalLat[i]>10:
Endspot=7
else:
Endspot=5
#North Pacific
if -100>finalLon[i]>-180:
Endspot=5
if 180>finalLon[i]>100:
Endspot=5
#Indian Ocean
if 40<finalLon[i]<100:
Endspot=3
#Red Sea, which we won't plot
if 10<finalLat[i]<30:
if 20<finalLon[i]<43:
Endspot=np.nan
if 50<finalLat[i]<60:
#North Atlantic
if -80<finalLon[i]<80:
Endspot=7
#North Pacific
if -100>finalLon[i]>-180:
Endspot=5
if 180>finalLon[i]>100:
Endspot=5
if 60<finalLat[i]:
if -120<finalLon[i]<90:
Endspot=1 #arctic Ocean
if -50<finalLat[i]<0:
#South Atlantic
if 20>finalLon[i]>-70:
Endspot=6
#South Pacific
if -70>finalLon[i]>-180:
Endspot=4
if 180>finalLon[i]>150:
Endspot=4
#indian Ocean
if 150>finalLon[i]>20:
Endspot=3
#southern ocean
if finalLat[i]<-50:
Endspot=2
identifier[np.argmin(np.abs(firstLat[i]-latGrid)),np.argmin(np.abs(firstLon[i]-lonGrid))]=Endspot
LonG,LatG= | np.meshgrid(lonGrid,latGrid) | numpy.meshgrid |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 11 09:11:51 2016
@author: tvzyl
"""
import numpy as np
import pandas as pd
from numpy import linalg as la
from numpy.linalg import det, inv
from scipy.stats import multivariate_normal, norm
from math import factorial
from numpy import ones, sum, ndarray, array, pi, dot, sqrt, newaxis, exp
from sklearn.utils.extmath import cartesian
from sklearn.base import BaseEstimator
from ellipsoidpy import Sk
class TrueBlobDensity(BaseEstimator):
def __init__(self, means, covs, ratios=None):
self.means = means
self.covs = covs
self.ratios = ratios
def fit(self, X=None, y=None):
self.norms_ = [multivariate_normal(mean=mean, cov=cov) for mean, cov in zip(self.means,self.covs)]
return self
def predict(self, data):
if self.ratios is not None:
return np.sum( np.c_[[ratio*norm.pdf(data) for norm, ratio in zip(self.norms_,self.ratios)]], axis=0)
else:
return np.mean(np.c_[[norm.pdf(data) for norm in self.norms_]], axis=0)
def getSampleSizes(self, n_samples, n_folds):
ranges = ndarray((n_folds))
ranges[0:int(n_samples%n_folds)] = n_samples//n_folds + 1
ranges[int(n_samples%n_folds):] = n_samples//n_folds
return ranges
def getIntegratedSquaredPDF(self):
result = 0
for fi, fj in cartesian([np.arange(3),np.arange(3)]):
sigma_sum = self.covs[fi]+self.covs[fj]
inv_sigma_sum = inv(sigma_sum)
det_sigma_sum = det(sigma_sum)
mu_diff = (self.means[fi] - self.means[fj])[newaxis]
normalising_const = sqrt(2*pi*det_sigma_sum)*exp(-0.5*dot(dot(mu_diff,inv_sigma_sum), mu_diff.T))
result += self.ratios[fi]*self.ratios[fj]/normalising_const
return result
def sample(self, n_samples=1, random_state=None, withclasses=False):
n_folds, d = self.means.shape
if self.ratios is not None:
sizes = (n_samples*ones((n_folds))*self.ratios).astype(int)
sizes[-1] += n_samples-sum(sizes)
else:
sizes = self.getSampleSizes(n_samples, n_folds)
samples = ndarray((int(n_samples), int(d)))
classes = ndarray((int(d)))
start = 0
for i in range(n_folds):
end = start+int(sizes[i])
samples[start:end] = np.random.multivariate_normal( self.means[i], self.covs[i], size=int(sizes[i]) )
classes[start:end] = i
start=end
if withclasses:
return samples, classes
else:
return samples
class TrueBallDensity(BaseEstimator):
def __init__(self, mean, cov, inner_trials=10):
self.mean = array(mean)
self.cov = cov
self.inner_trials = inner_trials
self.dimensions_ = self.mean.shape[0]
def fit(self, X=None, y=None):
self.a_ = multivariate_normal(mean=self.mean, cov=self.cov)
self.b_ = multivariate_normal(mean=self.mean, cov=self.cov)
return self
def predict(self, data):
return self.normal_fact()**-1. * self.a_.pdf(data)*(1.-self.b_.pdf(data))**self.inner_trials
def normal_fact(self):
#https://en.wikipedia.org/wiki/Triangular_number
#https://en.wikipedia.org/wiki/Tetrahedral_number
tri_num = lambda n,c: factorial(n)/factorial(n-c)/factorial(c)
po = self.inner_trials+1
cov = self.cov
k = self.dimensions_
return sum((1 if term%2==0 else -1)*tri_num(po,term)/sqrt(term+1)/sqrt((2*pi)**k*det(cov))**term for term in range(0,po+1))
def getIntegratedSquaredPDF(self):
raise NotImplemented
def sample(self, n_samples=1, random_state=None, withclasses=False):
if withclasses:
raise NotImplementedError("withclasses")
c_samples = 0
s = ndarray((n_samples,self.dimensions_))
try:
while c_samples < n_samples:
u = np.random.uniform(size=n_samples)
y = self.a_.rvs(n_samples)
tmp_samples = y[u < (1-self.b_.pdf(y))**self.inner_trials/10.0 ]
c_tmp_samples = tmp_samples.shape[0]
s[c_samples:c_samples+c_tmp_samples] = tmp_samples
c_samples += c_tmp_samples
except ValueError:
s[c_samples:] = tmp_samples[:n_samples-c_samples]
return s
class TrueEllipsoidDensity(BaseEstimator):
def __init__(self, radii, var):
self.radii_ = array(radii)
self.dimensions_ = self.radii_.shape[0]
self.var_ = var
def fit(self, X=None, y=None):
return self
def predict(self, data):
radii_ = self.radii_
var_ = self.var_
x2 = np.dot(data, np.diag(1./radii_) )
r2 = la.norm(x2, axis=1)
e2 = np.dot(1./r2[:,np.newaxis]*x2, np.diag(radii_))
v2 = la.norm(e2, axis=1)
u2 = la.norm(e2-data, axis=1)
p = np.array([ norm.pdf(j, loc=0, scale=i*var_) for i, j in zip(v2,u2) ])
return p*(1./Sk(np.ones(1), self.dimensions_))
def getIntegratedSquaredPDF(self):
raise NotImplemented
def sample(self, n_samples=1, random_state=None, withclasses=False):
if withclasses:
raise NotImplementedError("withclasses")
dimensions_ = self.dimensions_
radii_ = self.radii_
var_ = self.var_
x = np.random.normal(size=(n_samples,dimensions_))
r = la.norm(x, axis=1)
e = np.dot(1./r[:,np.newaxis]*x, np.diag(radii_))
v = la.norm(e, axis=1)
u = np.array([norm.rvs(loc=0.,scale=i*var_) for i in v])
s = e + e/ | la.norm(e, axis=1) | numpy.linalg.norm |
"""
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from astropy.utils.misc import NumpyRNGContext
from ..mean_los_velocity_vs_rp import mean_los_velocity_vs_rp
from ...tests.cf_helpers import generate_locus_of_3d_points
__all__ = ('test_mean_los_velocity_vs_rp_correctness1', 'test_mean_los_velocity_vs_rp_correctness2',
'test_mean_los_velocity_vs_rp_correctness3', 'test_mean_los_velocity_vs_rp_correctness4',
'test_mean_los_velocity_vs_rp_parallel', 'test_mean_los_velocity_vs_rp_auto_consistency',
'test_mean_los_velocity_vs_rp_cross_consistency')
fixed_seed = 43
def pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rp_min, rp_max, pi_max, Lbox=None):
""" Brute force pure python function calculating mean los velocities
in a single bin of separation.
"""
if Lbox is None:
xperiod, yperiod, zperiod = np.inf, np.inf, np.inf
else:
xperiod, yperiod, zperiod = Lbox, Lbox, Lbox
npts1, npts2 = len(sample1), len(sample2)
running_tally = []
for i in range(npts1):
for j in range(npts2):
dx = sample1[i, 0] - sample2[j, 0]
dy = sample1[i, 1] - sample2[j, 1]
dz = sample1[i, 2] - sample2[j, 2]
dvz = velocities1[i, 2] - velocities2[j, 2]
if dx > xperiod/2.:
dx = xperiod - dx
elif dx < -xperiod/2.:
dx = -(xperiod + dx)
if dy > yperiod/2.:
dy = yperiod - dy
elif dy < -yperiod/2.:
dy = -(yperiod + dy)
if dz > zperiod/2.:
dz = zperiod - dz
zsign_flip = -1
elif dz < -zperiod/2.:
dz = -(zperiod + dz)
zsign_flip = -1
else:
zsign_flip = 1
d_rp = np.sqrt(dx*dx + dy*dy)
if (d_rp > rp_min) & (d_rp < rp_max) & (abs(dz) < pi_max):
if abs(dz) > 0:
vlos = dvz*dz*zsign_flip/abs(dz)
else:
vlos = dvz
running_tally.append(vlos)
if len(running_tally) > 0:
return np.mean(running_tally)
else:
return 0.
def test_mean_radial_velocity_vs_r_vs_brute_force_pure_python():
""" This function tests that the
`~halotools.mock_observables.mean_radial_velocity_vs_r` function returns
results that agree with a brute force pure python implementation
for a random distribution of points, both with and without PBCs.
"""
npts = 99
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((npts, 3))
sample2 = np.random.random((npts, 3))
velocities1 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
velocities2 = np.random.uniform(-10, 10, npts*3).reshape((npts, 3))
rp_bins, pi_max = np.array([0, 0.1, 0.2, 0.3]), 0.1
############################################
# Run the test with PBCs turned off
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[1], rp_bins[2]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[1], pure_python_s1s2, rtol=0.01)
rmin, rmax = rp_bins[2], rp_bins[3]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max)
assert np.allclose(s1s2[2], pure_python_s1s2, rtol=0.01)
# ############################################
# # Run the test with PBCs operative
s1s2 = mean_los_velocity_vs_rp(sample1, velocities1, rp_bins, pi_max,
sample2=sample2, velocities2=velocities2, do_auto=False, period=1)
rmin, rmax = rp_bins[0], rp_bins[1]
pure_python_s1s2 = pure_python_mean_los_velocity_vs_rp(
sample1, velocities1, sample2, velocities2, rmin, rmax, pi_max, Lbox=1)
assert | np.allclose(s1s2[0], pure_python_s1s2, rtol=0.01) | numpy.allclose |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Classes and functions that support unsupervised clustering
of data using Simulated Annealing (SA) based algorithms
"""
from abc import ABC, abstractmethod
import time
from scipy.spatial.distance import pdist, cdist
import numpy as np
class AbstractCoolingSchedule(ABC):
'''
Encapsulates a cooling schedule for
a simulated annealing algorithm.
Abstract base class.
Concrete implementations can be
customised to extend the range of
cooling schedules available to the SA.
'''
def __init__(self, starting_temp):
self.starting_temp = starting_temp
@abstractmethod
def cool_temperature(self, k):
pass
class ExponentialCoolingSchedule(AbstractCoolingSchedule):
'''
Expenontial Cooling Scheme.
Source:
https://uk.mathworks.com/help/gads/how-simulated-annealing-works.html
'''
def cool_temperature(self, k):
'''
Cool the temperature using the scheme
T = T0 * 0.95^k.
Where
T = temperature after cooling
T0 = starting temperature
k = iteration number (within temperature?)
Keyword arguments:
------
k -- int, iteration number (within temp?)
Returns:
------
float, new temperature
'''
return self.starting_temp * (0.95**k)
class CoruCoolingSchedule(AbstractCoolingSchedule):
def __init__(self, starting_temp, max_iter):
AbstractCoolingSchedule.__init__(self, starting_temp)
self._max_iter = max_iter
def cool_temperature(self, k):
'''
Returns a temperature from 1 tending to 0
Keyword arguments:
------
iteration --int. the iteration number
Returns:
------
float, new temperature
'''
#where did this cooling scheme come from?
#some standard methods:
# https://uk.mathworks.com/help/gads/how-simulated-annealing-works.html
t = np.exp(-(k - 1) * 10 / self._max_iter)
return t
class SACluster(object):
'''
Encapsulates a simulated annealing clustering algorithm
Public methods:
fit() -- runs the SA and fits data to clusters
'''
def __init__(self, n_clusters, cooling_schedule, dist_metric='correlation',
max_iter= | np.int32(1e5) | numpy.int32 |
import logging
import warnings
from typing import Dict, Tuple, Union
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
import xarray as xr
from scipy import signal, spatial
import matlab.engine
# import pharedox_registration
# import matlab
from pharedox import utils
import pkgutil
def to_dataframe(data: xr.DataArray, *args, **kwargs) -> pd.DataFrame:
"""
Replacement for `xr.DataArray.to_dataframe` that adds the attrs for the given
DataArray into the resultant DataFrame.
Parameters
----------
data : xr.DataArray
the data to convert to DataFrame
Returns
-------
pd.DataFrame
a pandas DataFrame containing the data in the given DataArray, including the
global attributes
"""
df = data.to_dataframe(*args, **kwargs)
for k, v in data.attrs.items():
df[k] = v
return df
def align_pa(
intensity_data: xr.DataArray,
reference_wavelength: str = "410",
reference_pair: int = 0,
reference_timepoint: int = 0,
) -> xr.DataArray:
"""
Given intensity profile data, flip each animal along their anterior-posterior axis
if necessary, so that all face the same direction
Parameters
----------
intensity_data
the data to align
reference_wavelength: optional
the wavelength to calculate the alignment for
reference_pair: optional
the pair to calculate the alignment for
reference_timepoint
the timepoint to calculate the alignment for
Returns
-------
aligned_intensity_data
the PA-aligned intensity data
Notes
-----
The alignments are calculated for a single wavelength and pair for each animal, then
applied to all wavelengths and pairs for that animal.
The algorithm works as follows:
- take the derivative of the (trimmed) intensity profiles (this accounts for
differences in absolute intensity between animals)
- use the first animal in the stack as the reference profile
- for all animals:
- compare a forward and reverse profile to the reference profile (using the
cosine-similarity metric)
- keep either the forward or reverse profile accordingly
- finally, determine the location of the peaks in the *average* profile
- reverse all profiles if necessary (this will be necessary if the first
animal happens to be reversed)
"""
data = intensity_data
ref_data = data.sel(
wavelength=reference_wavelength,
pair=reference_pair,
timepoint=reference_timepoint,
)
ref_profile = ref_data.isel(animal=0).data
ref_vecs = np.tile(ref_profile, (data.animal.size, 1))
unflipped = data.sel(
wavelength=reference_wavelength,
pair=reference_pair,
timepoint=reference_timepoint,
).data
flipped = np.fliplr(unflipped)
# cosine-similarity measurements
should_flip = (
spatial.distance.cdist(ref_vecs, unflipped, "cosine")[0, :]
> spatial.distance.cdist(ref_vecs, flipped, "cosine")[0, :]
)
# Do the actual flip
# position needs to be reindexed, otherwise xarray freaks out
intensity_data[should_flip] = np.flip(
intensity_data[should_flip].values, axis=intensity_data.get_axis_num("position")
)
intensity_data = intensity_data.reindex(
position=np.linspace(0, 1, intensity_data.position.size)
)
mean_intensity = trim_profile(
np.mean(
intensity_data.sel(
wavelength=reference_wavelength,
pair=reference_pair,
timepoint=reference_timepoint,
),
axis=0,
).data,
threshold=2000,
new_length=100,
)
# parameters found experimentally
# TODO these could use some tweaking
peaks, _ = signal.find_peaks(
mean_intensity, distance=0.2 * len(mean_intensity), prominence=200, wlen=10
)
if len(peaks) < 2:
return intensity_data
if peaks[0] < len(mean_intensity) - peaks[1]:
logging.warning("Skipping second data flip. Needs further investigation!")
return intensity_data
# intensity_data = np.flip(
# intensity_data, axis=intensity_data.get_axis_num("position")
# )
return intensity_data
def summarize_over_regions(
data: xr.DataArray,
regions: Dict,
eGFP_correction: Dict,
rescale: bool = True,
value_name: str = "value",
pointwise: Union[bool, str] = False,
**redox_params,
):
if pointwise == "both":
# recursively call this function for pointwise=T/F and concat the results
return pd.concat(
[
summarize_over_regions(
data, regions, rescale, value_name, pointwise=False
),
summarize_over_regions(
data, regions, rescale, value_name, pointwise=True
),
]
)
if rescale:
regions = utils.scale_region_boundaries(regions, data.shape[-1])
try:
# Ensure that derived wavelengths are present
data = utils.add_derived_wavelengths(data, **redox_params)
except ValueError:
pass
with warnings.catch_warnings():
warnings.simplefilter("ignore")
all_region_data = []
for _, bounds in regions.items():
if isinstance(bounds, (int, float)):
all_region_data.append(data.interp(position=bounds))
else:
all_region_data.append(
data.sel(position=slice(bounds[0], bounds[1])).mean(
dim="position", skipna=True
)
)
region_data = xr.concat(all_region_data, pd.Index(regions.keys(), name="region"))
region_data = region_data.assign_attrs(**data.attrs)
try:
region_data.loc[dict(wavelength="r")] = region_data.sel(
wavelength=redox_params["ratio_numerator"]
) / region_data.sel(wavelength=redox_params["ratio_denominator"])
region_data.loc[dict(wavelength="oxd")] = r_to_oxd(
region_data.sel(wavelength="r"),
r_min=redox_params["r_min"],
r_max=redox_params["r_max"],
instrument_factor=redox_params["instrument_factor"],
)
region_data.loc[dict(wavelength="e")] = oxd_to_redox_potential(
region_data.sel(wavelength="oxd"),
midpoint_potential=redox_params["midpoint_potential"],
z=redox_params["z"],
temperature=redox_params["temperature"],
)
except ValueError:
pass
# add corrections
if eGFP_correction["should_do_corrections"]:
# add data using xr.to_dataframe so correction values can be added directly next to value column
df = region_data.to_dataframe(value_name)
corrections = eGFP_corrections(df, eGFP_correction, **redox_params)
df["correction_ratio"] = corrections["correction_ratio"]
df["corrected_value"] = corrections["corrected_value"]
df["oxd"] = corrections["oxd"]
df["e"] = corrections["e"]
# add attributes
for k, v in region_data.attrs.items():
df[k] = v
for i in range(df.shape[0]):
x = i % 6
pd.options.mode.chained_assignment = None # default='warn'
# TODO fix chain indexing error warning. Will leave for now but may cause issues
if data["wavelength"][x] == "TL":
df["e"][i] = None
else:
df = to_dataframe(region_data, value_name)
df["pointwise"] = pointwise
try:
df.set_index(["experiment_id"], append=True, inplace=True)
except ValueError:
pass
return df
def eGFP_corrections(
data: DataFrame,
eGFP_correction: Dict,
**redox_params,
):
logging.info("Doing eGFP corrections")
# find the correction factor based of experiment specific eGFP number
correction_ratio = (
eGFP_correction["Cata_Number"] / eGFP_correction["Experiment_Number"]
)
# create empty lists that will contain column values
correction_ratio = [correction_ratio] * data.shape[0]
corrected_value = [None] * data.shape[0]
oxd = [None] * data.shape[0]
e = [None] * data.shape[0]
values = data["value"].tolist()
# loop through all the values
for i in range(data.shape[0]):
# find corrected value
corrected_value[i] = values[i] * correction_ratio[i]
# find oxd using formula
oxd[i] = r_to_oxd(
corrected_value[i],
redox_params["r_min"],
redox_params["r_max"],
redox_params["instrument_factor"],
)
# find e based on oxd
e[i] = oxd_to_redox_potential(oxd[i])
return {
"correction_ratio": correction_ratio,
"corrected_value": corrected_value,
"oxd": oxd,
"e": e,
}
def smooth_profile_data(
profile_data: Union[np.ndarray, xr.DataArray],
lambda_: float = 100.0,
order: float = 4.0,
n_basis: float = 100.0,
n_deriv=0.0,
eng=None,
):
"""
Smooth profile data by fitting smoothing B-splines
Implemented in MATLAB as smooth_profiles
"""
# eng = pharedox_registration.initialize()
try:
import matlab.engine
except ImportError:
logging.warn("MATLAB engine not installed. Skipping smoothing.")
return profile_data
if eng is None:
eng = matlab.engine.start_matlab()
resample_resolution = profile_data.position.size
return xr.apply_ufunc(
lambda x: np.array(
eng.smooth_profiles(
matlab.double(x.tolist()),
resample_resolution,
n_basis,
order,
lambda_,
n_deriv,
)
).T,
profile_data,
input_core_dims=[["position"]],
output_core_dims=[["position"]],
vectorize=True,
)
def standardize_profiles(
profile_data: xr.DataArray,
redox_params,
template: Union[xr.DataArray, np.ndarray] = None,
eng=None,
**reg_kwargs,
) -> Tuple[xr.DataArray, xr.DataArray]:
"""
Standardize the A-P positions of the pharyngeal intensity profiles.
Parameters
----------
profile_data
The data to standardize. Must have the following dimensions:
``["animal", "timepoint", "pair", "wavelength"]``.
redox_params
the parameters used to map R -> OxD -> E
template
a 1D profile to register all intensity profiles to. If None, intensity profiles
are registered to the population mean of the ratio numerator.
eng
The MATLAB engine to use for registration. If ``None``, a new engine is started.
reg_kwargs
Keyword arguments to use for registration. See `registration kwargs` for more
information.
Returns
-------
standardized_data: xr.DataArray
the standardized data
warp_functions: xr.DataArray
the warp functions generated to standardize the data
"""
# eng = pharedox_registration.initialize()
if eng is None:
eng = matlab.engine.start_matlab()
std_profile_data = profile_data.copy()
std_warp_data = profile_data.copy().isel(wavelength=0)
if template is None:
template = profile_data.sel(wavelength=redox_params["ratio_numerator"]).mean(
dim=["animal", "pair"]
)
try:
template = matlab.double(template.values.tolist())
except AttributeError:
template = matlab.double(template.tolist())
for tp in profile_data.timepoint:
for pair in profile_data.pair:
data = std_profile_data.sel(timepoint=tp, pair=pair)
i_num = matlab.double(
data.sel(wavelength=redox_params["ratio_numerator"]).values.tolist()
)
i_denom = matlab.double(
data.sel(wavelength=redox_params["ratio_denominator"]).values.tolist()
)
resample_resolution = float(profile_data.position.size)
reg_num, reg_denom, warp_data = eng.standardize_profiles(
i_num,
i_denom,
template,
resample_resolution,
reg_kwargs["warp_n_basis"],
reg_kwargs["warp_order"],
reg_kwargs["warp_lambda"],
reg_kwargs["smooth_lambda"],
reg_kwargs["smooth_n_breaks"],
reg_kwargs["smooth_order"],
reg_kwargs["rough_lambda"],
reg_kwargs["rough_n_breaks"],
reg_kwargs["rough_order"],
reg_kwargs["n_deriv"],
nargout=3,
)
reg_num, reg_denom = np.array(reg_num).T, np.array(reg_denom).T
std_profile_data.loc[
dict(
timepoint=tp, pair=pair, wavelength=redox_params["ratio_numerator"]
)
] = reg_num
std_profile_data.loc[
dict(
timepoint=tp,
pair=pair,
wavelength=redox_params["ratio_denominator"],
)
] = reg_denom
std_warp_data.loc[dict(timepoint=tp, pair=pair)] = np.array(warp_data).T
std_profile_data = std_profile_data.assign_attrs(**reg_kwargs)
std_profile_data = utils.add_derived_wavelengths(std_profile_data, **redox_params)
return std_profile_data, std_warp_data
def channel_register(
profile_data: xr.DataArray,
redox_params: dict,
reg_params: dict,
eng: matlab.engine.MatlabEngine = None,
) -> Tuple[xr.DataArray, xr.DataArray]:
"""
Perform channel-registration on the given profile data
Parameters
----------
profile_data
the data to register
redox_params
the redox parameters
reg_params
the registration parameters
eng
the MATLAB engine (optional)
Returns
-------
reg_data: xr.DataArray
the registered data
warp_data: xr.DataArray
the warp functions used to register the data
"""
if eng is None:
eng = matlab.engine.start_matlab()
# eng = pharedox_registration.initialize()
reg_profile_data = profile_data.copy()
warp_data = profile_data.copy().isel(wavelength=0)
for p in profile_data.pair:
for tp in profile_data.timepoint:
i_num = matlab.double(
profile_data.sel(
timepoint=tp, pair=p, wavelength=redox_params["ratio_numerator"]
).values.tolist()
)
i_denom = matlab.double(
profile_data.sel(
timepoint=tp, pair=p, wavelength=redox_params["ratio_denominator"]
).values.tolist()
)
resample_resolution = float(profile_data.position.size)
reg_num, reg_denom, warps = eng.channel_register(
i_num,
i_denom,
resample_resolution,
reg_params["warp_n_basis"],
reg_params["warp_order"],
reg_params["warp_lambda"],
reg_params["smooth_lambda"],
reg_params["smooth_n_breaks"],
reg_params["smooth_order"],
reg_params["rough_lambda"],
reg_params["rough_n_breaks"],
reg_params["rough_order"],
reg_params["n_deriv"],
nargout=3,
)
reg_num, reg_denom = np.array(reg_num).T, | np.array(reg_denom) | numpy.array |
#!/usr/bin/env python3
from PIL import Image, ImageTk
import tkinter
import numpy as np
from scipy import misc, signal, ndimage
import sys
INF = float("infinity")
def show_image(I):
Image.fromarray(np.uint8(I)).show()
def total_gradient(I, seam=None):
# TODO: only recompute gradient for cells adjacent to removed seam
kernel_h = np.array([[1, 0, -1]])
r_h = signal.convolve2d(I[:, :, 0], kernel_h, mode="same", boundary="symm")
g_h = signal.convolve2d(I[:, :, 1], kernel_h, mode="same", boundary="symm")
b_h = signal.convolve2d(I[:, :, 2], kernel_h, mode="same", boundary="symm")
kernel_v = np.array([[1], [0], [-1]])
r_v = signal.convolve2d(I[:, :, 0], kernel_v, mode="same", boundary="symm")
g_v = signal.convolve2d(I[:, :, 1], kernel_v, mode="same", boundary="symm")
b_v = signal.convolve2d(I[:, :, 2], kernel_v, mode="same", boundary="symm")
return (np.square(r_h) + np.square(g_h) + np.square(b_h) +
np.square(r_v) + np.square(g_v) + np.square(b_v))
def min_neighbor_index(M, i, j):
rows, cols = M.shape
c = M[i-1, j]
if j > 0 and M[i-1, j-1] < c:
return -1
elif j < cols - 1 and M[i-1, j+1] < c:
return 1
return 0
def calc_dp(G):
rows, cols = G.shape
a = np.copy(G)
kernel_l = np.array([0, 0, 1])
kernel_c = np.array([0, 1, 0])
kernel_r = np.array([1, 0, 0])
for i in range(rows):
lefts = ndimage.filters.convolve1d(a[i-1], kernel_l)
centers = ndimage.filters.convolve1d(a[i-1], kernel_c)
rights = ndimage.filters.convolve1d(a[i-1], kernel_r)
a[i] += np.minimum(np.minimum(lefts, centers), rights)
return a
def find_seam(dp, start_col):
rows, cols = dp.shape
seam = np.zeros((rows,), dtype=np.uint32)
j = seam[-1] = start_col
for i in range(rows - 2, -1, -1):
dc = min_neighbor_index(dp, i + 1, j)
j += dc
seam[i] = j
return seam
def find_best_seam(dp):
start_col = np.argmin(dp[-1])
return find_seam(dp, start_col)
def remove_seam(M, seam):
rows, cols = M.shape[:2]
return np.array([M[i, :][np.arange(cols) != seam[i]] for i in range(rows)])
def resize(I, new_width, new_height):
rows, cols = I.shape[:2]
dr = rows - new_height
dc = cols - new_width
for i in range(dc):
G = total_gradient(I)
dp_v = calc_dp(G)
seam = find_best_seam(dp_v)
I = remove_seam(I, seam)
I = np.swapaxes(I, 0, 1)
for i in range(dr):
G = total_gradient(I)
dp_h = calc_dp(G)
seam = find_best_seam(dp_h)
I = remove_seam(I, seam)
return np.swapaxes(I, 0, 1)
def add_image_to_canvas(I, canvas):
height, width = I.shape[:2]
canvas.img_tk = ImageTk.PhotoImage(Image.fromarray( | np.uint8(I) | numpy.uint8 |
import itertools
import logging
import math
from copy import deepcopy
import numpy as np
import pandas as pd
from scipy.ndimage.filters import uniform_filter1d
import basty.utils.misc as misc
np.seterr(all="ignore")
class SpatioTemporal:
def __init__(self, fps, stft_cfg={}):
self.stft_cfg = deepcopy(stft_cfg)
self.logger = logging.getLogger("main")
assert fps > 0
self.get_delta = lambda x, scale: self.calc_delta(x, scale, fps)
self.get_moving_mean = lambda x, winsize: self.calc_moving_mean(x, winsize, fps)
self.get_moving_std = lambda x, winsize: self.calc_moving_std(x, winsize, fps)
delta_scales_ = [100, 300, 500]
window_sizes_ = [300, 500]
if "delta_scales" not in stft_cfg.keys():
self.logger.info(
"Scale valuess can not be found in configuration for delta features."
+ f"Default values are {str(delta_scales_)[1:-1]}."
)
if "window_sizes" not in stft_cfg.keys():
self.logger.info(
"Window sizes can not be found in configuration for window features."
+ f"Default values are {str(window_sizes_)[1:-1]}."
)
self.stft_cfg["delta_scales"] = stft_cfg.get("delta_scales", delta_scales_)
self.stft_cfg["window_sizes"] = stft_cfg.get("window_sizes", window_sizes_)
self.stft_set = ["pose", "distance", "angle"]
for ft_set in self.stft_set:
ft_set_dt = ft_set + "_delta"
self.stft_cfg[ft_set] = stft_cfg.get(ft_set, [])
self.stft_cfg[ft_set_dt] = stft_cfg.get(ft_set_dt, [])
self.angle_between = self.angle_between_atan
@staticmethod
def angle_between_arccos(v1, v2):
"""
Returns the abs(angle) in radians between vectors 'v1' and 'v2'.
angle_between((1, 0, 0), (0, 1, 0)) --> 1.5707963267948966
angle_between((1, 0, 0), (1, 0, 0)) --> 0.0
angle_between((1, 0, 0), (-1, 0, 0)) --> 3.141592653589793
"""
assert isinstance(v1, np.ndarray) and isinstance(v2, np.ndarray)
v1_u = v1 / np.linalg.norm(v1)
v2_u = v2 / np.linalg.norm(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
@staticmethod
def angle_between_atan(v1, v2):
"""
Returns the abs(angle) in radians between vectors 'v1' and 'v2'.
"""
assert isinstance(v1, np.ndarray) and isinstance(v2, np.ndarray)
angle = np.math.atan2( | np.linalg.det([v1, v2]) | numpy.linalg.det |
import numpy as np
from numba import njit
from numpy.testing import assert_allclose
from pytest import approx, raises
import utilities.percentile as perc
_ = perc # to prevent it appearing to be an unused import
@njit
def np_percentile_jit(x, q):
return np.percentile(x, q)
@njit
def np_nanpercentile_jit(x, q):
return np.nanpercentile(x, q)
def test_scalar_q():
arr = np.array([1, 4, 3, 3.3, 6, 5, 2.2])
q = 2.2
output = np_percentile_jit(arr, q)
expected = np.percentile(arr, q)
assert output == approx(expected)
def test_tuple_q():
arr = np.array([1, 4, 3, 3.3, 6, 5, 2.2])
q = (2.2, 34, 4)
output = np_percentile_jit(arr, q)
expected = np.percentile(arr, q)
assert output == approx(expected)
def test_array_q():
arr = np.array([1, 4, 3, 3.3, 6, 5, 2.2])
q = np.array([0, 2.2, 34, 100])
output = np_percentile_jit(arr, q)
expected = np.percentile(arr, q)
assert output == approx(expected)
def test_array_q_contains_nan():
arr = np.array([1, 4, 3, 3.3, 6, 5, 2.2])
q = np.array([0, 2.2, np.nan, 100])
with raises(ValueError):
_ = np.percentile(arr, q)
with raises(ValueError):
_ = np_percentile_jit(arr, q)
def test_array_arr_contains_nan():
arr = np.array([1, 4, 3, 3.3, 6, np.nan, 2.2])
q = | np.array([0, 2.2, 100]) | numpy.array |
from Segmentation.utilities import *
import json
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
from scipy.misc import imread
from scipy import ndimage, signal
from skimage import morphology, feature, exposure
import neurofinder
import cv2 as cv
import ast
from PIL import Image
def tomask(coords, dims):
mask = np.zeros(dims,dtype=bool)
coords=np.array(coords)
mask[coords[:,0],coords[:,1]] = 1
return mask
# load the regions (training data only)
def plotregions(filename, dims):
with open(filename+'.json') as f:
regions = json.load(f)
masks = np.array([tomask(s['coordinates'], dims) for s in regions])
# generate a list of all masks based on the regions
return masks
#turning the maskes into the json file
# def mask_to_json(mask, filename, prelabeled=False):
# """Receive a mask and a file name and save to a json file"""
# if not prelabeled:
# labeled, n = ndimage.label(mask) # creates one image with numerical labels for each neuron
# else: # prelabeled
# labeled = mask # labeled mask, from watershed
# n = np.max(labeled) # highest label
# myRegions = [] # initialize
# # We don't need size selection anymore, so the loop looks like this now:
# elem_gen = (np.nonzero(labeled == i) for i in range(1, n+1))
# selected_elem_gen = (elem for elem in elem_gen if len(elem[0]) != 0 and len(elem[1]) != 0)
# # elem_gen = (elem for elem in (np.nonzero(labeled == i) for i in range(1, n+1)) if len(elem[0]) != 0 and len(elem[1]) != 0)
# myRegions = [{"id":i, "coordinates":[[x[0], x[1]] for x in zip(elem)]} for i, elem in enumerate(selected_elem_gen)]
# print(myRegions)
# # saving as json
# json.dump(myRegions, open(fix_json_fname(filename),'w'))
# for elem in elem_gen:
# myRegions.append({"id":elem, "coordinates":list(zip(elem))})
# for i,j in zip(xelem, yelem):
# myRegions[-1]["coordinates"].append([int(i),int(j)])
# # for elem in range(1, n+1):
# # xelem, yelem = np.nonzero(labeled == elem)
# # if len(xelem) == 0 or len(yelem) == 0: # skip empty elements
# # continue
# myRegions.append({"id":elem, "coordinates":[]}) # add coordinates as json
# for i,j in zip(xelem, yelem):
# myRegions[-1]["coordinates"].append([int(i),int(j)])
#turning the maskes into the json file
def mask_to_json(mask, filename, prelabeled=False):
"""Receive a mask and a file name and save to a json file"""
if not prelabeled:
labeled, n = ndimage.label(mask) # creates one image with numerical labels for each neuron
else: # prelabeled
labeled = mask # labeled mask, from watershed
n = np.max(labeled) # highest label
myRegions = [] # initialize
# We don't need size selection anymore, so the loop looks like this now:
for elem in range(1, n+1):
xelem, yelem = np.nonzero(labeled == elem)
if len(xelem) == 0 or len(yelem) == 0: # skip empty elements
continue
myRegions.append({"id":elem, "coordinates":[]}) # add coordinates as json
for i,j in zip(xelem, yelem):
myRegions[-1]["coordinates"].append([int(i),int(j)])
# saving as json
json.dump(myRegions, open(fix_json_fname(filename),'w'))
def plotRandomNeuron(imgs, masks):
"""Chooses and plots a random neuron and a random image out of the imgs matrix"""
plt.figure(figsize=(12,12))
i = np.random.randint(0, len(masks))
plt.subplot(1, 2, 1)
plt.imshow(imgs[i], cmap='gray')
plt.title(f'All Neurons - sum of all images ({len(imgs)} timepoints)')
plt.subplot(1, 2, 2)
plt.imshow(masks[i], cmap='gray')
plt.title(f'Neuron #{i} - mask')
plt.tight_layout()
def bandPassFilter(img, radIn=50, radOut=10000, plot=False):
"""Receive image, inner radius size, outer radius size.
Return an image filtered with a disk, using FFT."""
# FFT
fft=np.fft.fftshift(np.fft.fft2(img))
# shape
x,y=np.shape(fft)
xg, yg = np.ogrid[-x//2:x//2, -y//2:y//2]
# define filter disk
inner_circle_pixels = xg**2 + yg**2 <= radIn^2
outer_circle_pixels=xg**2 + yg**2 <= radOut^2
filter_disk = np.ones_like(inner_circle_pixels)
filter_disk[ | np.invert(outer_circle_pixels) | numpy.invert |
from os import path as osp
import h5py
import numpy as np
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation
from utils.logging import logging
from utils.math_utils import unwrap_rpy, wrap_rpy
class DataIO:
def __init__(self):
# raw dataset - ts in us
self.ts_all = None
self.acc_all = None
self.gyr_all = None
self.dataset_size = None
self.init_ts = None
self.R_init = np.eye(3)
# vio data
self.vio_ts = None
self.vio_p = None
self.vio_v = None
self.vio_eul = None
self.vio_R = None
self.vio_rq = None
self.vio_ba = None
self.vio_bg = None
# attitude filter data
self.filter_ts = None
self.filter_eul = None
def load_all(self, dataset, args):
"""
load timestamps, accel and gyro data from dataset
"""
with h5py.File(osp.join(args.root_dir, dataset, "data.hdf5"), "r") as f:
ts_all = np.copy(f["ts"]) * 1e6
acc_all = | np.copy(f["accel_dcalibrated"]) | numpy.copy |
# coding: utf-8
# In[1]:
#first commit -Richie
import pandas as pd
import numpy as np
# In[2]:
data_message = pd.read_csv('../../data/raw_data/AAPL_05222012_0930_1300_message.tar.gz',compression='gzip')
data_lob = pd.read_csv('../../data/raw_data/AAPL_05222012_0930_1300_LOB_2.tar.gz',compression='gzip')
# In[3]:
#drop redundant time
col_names=data_lob.columns
delete_list=[i for i in col_names if 'UPDATE_TIME' in i]
for i in delete_list:
data_lob=data_lob.drop(i,1)
# In[4]:
#functions for renaming
def rename(txt):
txt=txt[16:].split('..')[0]
index=0
ask_bid=''
p_v=''
if txt[-2].isdigit():
index=txt[-2:]
else:
index=txt[-1]
if txt[:3]=="BID":
ask_bid='bid'
else:
ask_bid='ask'
if txt[4:9]=="PRICE":
p_v='P'
else:
p_v='V'
return('_'.join([p_v,index,ask_bid]))
# In[5]:
#rename columns
col_names=data_lob.columns
new_col_names=[]
new_col_names.append('index')
new_col_names.append('Time')
for i in col_names[2:]:
new_col_names.append(rename(i))
len(new_col_names)
data_lob.columns=new_col_names
# In[6]:
#feature: bid-ask spreads and mid price
for i in list(range(1, 11)):
bid_ask_col_name='_'.join(['spreads',str(i)])
p_i_ask='_'.join(['P',str(i),'ask'])
p_i_bid='_'.join(['P',str(i),'bid'])
data_lob[bid_ask_col_name]=data_lob[p_i_ask]-data_lob[p_i_bid]
mid_price_col_name = '_'.join(['mid_price',str(i)])
data_lob[mid_price_col_name]=(data_lob[p_i_ask]+data_lob[p_i_bid])/2
# In[7]:
#convert time
def timetransform(r):
# transform the time to millisecond, starting from 0
timestr = r
return (int(timestr[11:13]) - 9) * 60**2 + (int(timestr[14:16]) - 30) * 60 + float(timestr[17:])
time = list(data_lob['Time'])
time_new = [timetransform(i) for i in time]
data_lob["Time"] = time_new
# In[8]:
time = list(data_message['Time'])
time_new = [timetransform(i) for i in time]
data_message["Time"] = time_new
# In[9]:
#price difference
data_lob['P_diff_ask_10_1']=data_lob['P_10_ask']-data_lob['P_1_ask']
data_lob['P_diff_bid_1_10']=data_lob['P_1_bid']-data_lob['P_1_bid']
for i in list(range(1, 10)):
P_diff_ask_i_name='_'.join(['P','diff','ask',str(i),str(i+1)])
P_diff_bid_i_name='_'.join(['P','diff','bid',str(i),str(i+1)])
P_i_ask='_'.join(['P',str(i),'ask'])
P_i1_ask='_'.join(['P',str(i+1),'ask'])
P_i_bid='_'.join(['P',str(i),'bid'])
P_i1_bid='_'.join(['P',str(i+1),'bid'])
data_lob[P_diff_ask_i_name]=abs(data_lob[P_i1_ask]-data_lob[P_i_ask])
data_lob[P_diff_bid_i_name]=abs(data_lob[P_i1_bid]-data_lob[P_i_bid])
# In[10]:
#mean price and volumns
p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
data_lob['Mean_ask_price']=0.0
data_lob['Mean_bid_price']=0.0
data_lob['Mean_ask_volumn']=0.0
data_lob['Mean_bid_volumn']=0.0
for i in list(range(0, 10)):
data_lob['Mean_ask_price']+=data_lob[p_ask_list[i]]
data_lob['Mean_bid_price']+=data_lob[p_bid_list[i]]
data_lob['Mean_ask_volumn']+=data_lob[v_ask_list[i]]
data_lob['Mean_bid_volumn']+=data_lob[v_bid_list[i]]
data_lob['Mean_ask_price']/=10
data_lob['Mean_bid_price']/=10
data_lob['Mean_ask_volumn']/=10
data_lob['Mean_bid_volumn']/=10
# In[11]:
#accumulated difference
p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
data_lob['Accum_diff_price']=0.0
data_lob['Accum_diff_volumn']=0.0
for i in list(range(0, 10)):
data_lob['Accum_diff_price']+=data_lob[p_ask_list[i]]-data_lob[p_bid_list[i]]
data_lob['Accum_diff_volumn']+=data_lob[v_ask_list[i]]-data_lob[v_bid_list[i]]
data_lob['Accum_diff_price']/=10
data_lob['Accum_diff_volumn']/=10
# In[12]:
# #price and volumn derivatives
# p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
# p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
# v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
# v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
# #data_lob['Time_diff']=list(np.zeros(30)+1)+list(np.array(data_lob['Time'][30:])-np.array(data_lob['Time'][:-30]))
# for i in list(range(0, 10)):
# P_ask_i_deriv='_'.join(['P','ask',str(i+1),'deriv'])
# P_bid_i_deriv='_'.join(['P','bid',str(i+1),'deriv'])
# V_ask_i_deriv='_'.join(['V','ask',str(i+1),'deriv'])
# V_bid_i_deriv='_'.join(['V','bid',str(i+1),'deriv'])
# data_lob[P_ask_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[p_ask_list[i]][30:])-np.array(data_lob[p_ask_list[i]][:-30]))
# data_lob[P_bid_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[p_bid_list[i]][30:])-np.array(data_lob[p_bid_list[i]][:-30]))
# data_lob[V_ask_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[v_ask_list[i]][30:])-np.array(data_lob[v_ask_list[i]][:-30]))
# data_lob[V_bid_i_deriv]=list(np.zeros(30))+list(np.array(data_lob[v_bid_list[i]][30:])-np.array(data_lob[v_bid_list[i]][:-30]))
# data_lob[P_ask_i_deriv]/=30
# data_lob[P_bid_i_deriv]/=30
# data_lob[V_ask_i_deriv]/=30
# data_lob[V_bid_i_deriv]/=30
# #price and volumn derivatives
p_ask_list=['_'.join(['P',str(i),'ask']) for i in list(range(1, 11))]
p_bid_list=['_'.join(['P',str(i),'bid']) for i in list(range(1, 11))]
v_ask_list=['_'.join(['V',str(i),'ask']) for i in list(range(1, 11))]
v_bid_list=['_'.join(['V',str(i),'bid']) for i in list(range(1, 11))]
#data_lob['Time_diff']=list(np.zeros(30)+1)+list(np.array(data_lob['Time'][30:])-np.array(data_lob['Time'][:-30]))
for i in list(range(0, 10)):
P_ask_i_deriv='_'.join(['P','ask',str(i+1),'deriv'])
P_bid_i_deriv='_'.join(['P','bid',str(i+1),'deriv'])
V_ask_i_deriv='_'.join(['V','ask',str(i+1),'deriv'])
V_bid_i_deriv='_'.join(['V','bid',str(i+1),'deriv'])
data_lob[P_ask_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[p_ask_list[i]][1000:])-np.array(data_lob[p_ask_list[i]][:-1000]))
data_lob[P_bid_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[p_bid_list[i]][1000:])-np.array(data_lob[p_bid_list[i]][:-1000]))
data_lob[V_ask_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[v_ask_list[i]][1000:])-np.array(data_lob[v_ask_list[i]][:-1000]))
data_lob[V_bid_i_deriv]=list(np.zeros(1000))+list(np.array(data_lob[v_bid_list[i]][1000:])-np.array(data_lob[v_bid_list[i]][:-1000]))
data_lob[P_ask_i_deriv]/=1000
data_lob[P_bid_i_deriv]/=1000
data_lob[V_ask_i_deriv]/=1000
data_lob[V_bid_i_deriv]/=1000
# In[ ]:
#set labels
diff=data_lob['mid_price_1']
diff_30=np.array(diff[30:])-np.array(diff[:-30])
label=[]
for i in diff_30:
if i>0.01:
label.append('1')
elif i<(-0.01):
label.append('-1')
else:
label.append('0')
data_lob['labels']=label+list( | np.zeros(30) | numpy.zeros |
import numpy as np
class Weno:
def __init__(self):
self.epsilon = 1e-6
def weno(self, NumFl, Fl, L, In, Out):
""" Interface reconstruction using WENO scheme. """
# Built an extenday array with phantom cells to deal with periodicity
#data = np.concatenate((In[-2:], In, In[0:2]))
data = | np.concatenate((In[-3:], In, In[0:2])) | numpy.concatenate |
import logging
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.ensemble import ExtraTreesClassifier
import os
import dateparser
import joblib
from .compat import string_, str_cast, unicode_
from .util import get_and_union_features, convert_segmentation_to_text, fix_encoding
from .blocks import TagCountReadabilityBlockifier
from .features.author import AuthorFeatures
from .sequence_tagger.models import word2features
from sklearn.base import clone
BASE_EXTRACTOR_DIR = __file__.replace('/extractor.py','')
class MultiExtractor(BaseEstimator, ClassifierMixin):
"""
An sklearn-style classifier that extracts the main content (and/or comments)
from an HTML document.
Args:
blockifier (``Blockifier``)
features (str or List[str], ``Features`` or List[``Features``], or List[Tuple[str, ``Features``]]):
One or more features to be used to transform blocks into a matrix of
numeric values. If more than one, a :class:`FeatureUnion` is
automatically constructed. See :func:`get_and_union_features`.
model (:class:`ClassifierMixin`): A scikit-learn classifier that takes
a numeric matrix of features and outputs a binary prediction of
1 for content or 0 for not-content. If None, a :class:`ExtraTreesClassifier`
with default parameters is used.
to_extract (str or Sequence[str]): Type of information to extract from
an HTML document: 'content', 'comments', or both via ['content', 'comments'].
prob_threshold (float): Minimum prediction probability of a block being
classified as "content" for it actually be taken as such.
max_block_weight (int): Maximum weight that a single block may be given
when training the extractor model, where weights are set equal to
the number of tokens in each block.
Note:
If ``prob_threshold`` is not None, then ``model`` must implement the
``predict_proba()`` method.
"""
FIELD_INDEX = {
'content': 0,
'description': 1,
'headlines': 2,
'breadcrumbs': 3
}
INVERTED_INDEX = { value: key for key, value in FIELD_INDEX.items() }
def state_dict(self):
return {
'params': self.params,
'pca': self.auth_feat.pca,
'classifiers': self.classifiers
}
def __init__(self, blockifier=TagCountReadabilityBlockifier,
features=('kohlschuetter', 'weninger', 'readability'),
model=None,
css_tokenizer_path=None,
text_tokenizer_path=None,
num_labels=2, prob_threshold=0.5, max_block_weight=200,
features_type=None, author_feature_transforms=None):
if css_tokenizer_path is None:
css_tokenizer_path = os.path.join(BASE_EXTRACTOR_DIR, 'models/css_tokenizer.pkl.gz')
if text_tokenizer_path is None:
text_tokenizer_path = os.path.join(BASE_EXTRACTOR_DIR, 'models/text_tokenizer.pkl.gz')
self.params = {
'features': features,
'num_labels': num_labels,
'prob_threshold': prob_threshold,
'max_block_weight': max_block_weight,
'features_type': features_type,
}
self.blockifier = blockifier
self.features = features
css_tokenizer = joblib.load(css_tokenizer_path)
text_tokenizer = joblib.load(text_tokenizer_path)
if author_feature_transforms is None:
author_feature_transforms = AuthorFeatures(css_tokenizer, text_tokenizer,
features=('kohlschuetter', 'weninger', 'readability', 'css'),
)
self.auth_feat = author_feature_transforms
self.feature_func = [
self.auth_feat,
self.features,
]
# initialize model
if model is None:
self.model = ExtraTreesClassifier()
elif isinstance(model, list):
self.classifiers = model
else:
self.classifiers = [ clone(model) for _ in range(num_labels)]
if features_type is None:
self.features_type = [0]*len(self.classifiers)
else:
self.features_type = features_type
self.target_features = list(range(len(self.classifiers)))
self.prob_threshold = prob_threshold
self.max_block_weight = max_block_weight
self._positive_idx = None
@staticmethod
def from_pretrained(filename):
checkpoint = joblib.load(filename)
extractor = MultiExtractor(model=checkpoint['classifiers'],
**checkpoint['params'])
extractor.auth_feat.pca = checkpoint['pca']
return extractor
@property
def features(self):
return self._features
@features.setter
def features(self, feats):
self._features = get_and_union_features(feats)
@staticmethod
def validate(labels, block_groups, weights=None):
clean_labels, clean_weights, clean_block_groups = [], [], []
# iterate through all documents
for idx, label in enumerate(labels):
# make sure all labels, weights, block size can be matched
if weights is None and len(label) == len(block_groups[idx]):
clean_labels.append(labels[idx])
clean_block_groups.append(block_groups[idx])
elif len(label) == len(block_groups[idx]) and len(label) == len(weights[idx]):
clean_labels.append(labels[idx])
clean_weights.append(weights[idx])
clean_block_groups.append(block_groups[idx])
if weights is None:
return np.array(clean_labels), np.array(clean_block_groups)
return np.array(clean_labels), np.array(clean_weights), np.array(clean_block_groups)
def fit(self, documents, labels, weights=None, init_models=None, **kwargs):
"""
Fit :class`Extractor` features and model to a training dataset.
Args:
blocks (List[Block])
labels (``np.ndarray``)
weights (``np.ndarray``)
Returns:
:class`Extractor`
"""
mask, block_groups = [], []
for doc in documents:
block_groups.append(self.blockifier.blockify(doc))
mask.append(self._has_enough_blocks(block_groups[-1]))
block_groups = np.array(block_groups, dtype=object)
# filter out mask and validate each document size
if weights is None:
labels, block_groups = self.validate(np.array(labels)[mask], block_groups[mask])
else:
labels, weights, block_groups = self.validate(
np.array(labels)[mask],
block_groups[mask],
np.array(weights)[mask])
weights = np.concatenate(weights)
labels = np.concatenate(labels)
complex_feat_mat = self.auth_feat.fit_transform(
np.concatenate(block_groups)
)
if 1 in self.features_type:
features_mat = np.concatenate([self.features.fit_transform(blocks)
for blocks in block_groups])
for idx, clf in enumerate(self.classifiers):
print('fit model ', idx)
input_feat = complex_feat_mat if self.features_type[idx] == 0 else features_mat
print(input_feat.shape)
init_model = None
if not(init_models is None) and isinstance(init_models, list):
init_model = init_models[idx]
if weights is None:
self.classifiers[idx] = clf.fit(input_feat, labels[:, idx],
init_model=init_model, **kwargs)
else:
self.classifiers[idx] = clf.fit(input_feat, labels[:, idx],
sample_weight=weights[:, idx], init_model=init_model, **kwargs)
return self
def get_html_multi_labels_weights(self, data, attribute_indexes=[0], not_skip_indexes = []):
"""
Gather the html, labels, and weights of many files' data.
Primarily useful for training/testing an :class`Extractor`.
Args:
data: Output of :func:`extractnet.data_processing.prepare_all_data`.
Returns:
Tuple[List[Block], np.array(int), np.array(int)]: All blocks, all
labels, and all weights, respectively.
"""
all_html = []
all_labels = []
all_weights = []
for row in data:
html = row[0]
attributes = row[1:]
skip = False
multi_label = []
multi_weights = []
for attribute_idx in attribute_indexes:
labels, weights = self._get_labels_and_weights(attributes, attribute_idx=attribute_idx)
multi_label.append(labels)
multi_weights.append(weights)
if skip:
continue
if len(html) > 0 and len(multi_label) == len(attribute_indexes):
all_html.append(html)
all_labels.append(np.stack(multi_label, -1))
all_weights.append(np.stack(multi_weights, -1))
return np.array(all_html, dtype=object), | np.array(all_labels, dtype=object) | numpy.array |
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import gudhi as gd
import dataIO as io
import torch
import torch.nn as nn
from topologylayer.nn.features import pad_k
from mpl_toolkits.mplot3d import axes3d
from topologylayer.functional.persistence import SimplicialComplex
from topologylayer.util.construction import unique_simplices
from scipy.spatial import Delaunay
from tqdm import trange
# calculate jaccard
def jaccard(im1, im2):
im1 = np.asarray(im1).astype(np.bool)
im2 = np.asarray(im2).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
return np.double(np.bitwise_and(im1, im2).sum()) / np.double(np.bitwise_or(im1, im2).sum())
# calculate L1
def L1norm(im1, im2):
im1 = | np.asarray(im1) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Python wrapper for LazyMole
<NAME>, University of Tübingen 2018 (<EMAIL>)
Uses LazyMole by <NAME>, University of Southern California (<EMAIL>)
"""
import os
from os.path import join
import numpy as np
import yaml
import subprocess
import errno
class model():
def __init__(self,
basepath,
in_field,
in_source,
in_target,
dx, dy, dz, # model grid cell dimensions
nx, ny, nz, # number of grid cells in model
rx=1, ry=1, rz=1, # graph theory refinement
in_skip=0,
in_log=True, # Is K field logarithmic?
out_config='config.yaml', # Name of configuration file output
foldername='lazymole', # Name of folder where output is saved
exe_path=r'src\lazymole.exe'): # Location of executable.
"""
Run LazyMole connectivity metric
Parameters
----------
basepath : str
basepath for simulations
in_field : numpy array
K values (same dimensions as connectivity grid)
in_source : numpy array
Source cells
in_target : numpy array
Target cells
Filepath to .npz file
"""
self.basepath = basepath
self.in_field = in_field
self.in_source = in_source
self.in_target = in_target
self.in_skip = in_skip
self.in_log = in_log
self.out_config = out_config
self.dx = dx
self.dy = dy
self.dz = dz
self.nx = nx
self.ny = ny
self.nz = nz
self.rx = rx
self.ry = ry
self.rz = rz
self.exe_path = exe_path
self.foldername = foldername
self.fname_field = 'field.dat'
self.fname_source = 'source.dat'
self.fname_target = 'target.dat'
self.fname_res = 'hres.dat'
self.fname_path = 'path.dat'
""" Preliminaries """
# Load K data
# data = np.loadtxt(self.in_field)
# Create folder for the connectivity run
self.lm_path = join(os.path.abspath(basepath), self.foldername)
try_makefolder(self.lm_path)
""" Convert dataset """
# if not os.path.isfile(join(self.lm_path, self.fname_field)):
xvec, yvec, zvec = np.meshgrid( | np.arange(0, nx) | numpy.arange |
# Hungarian algorithm (Kuhn-Munkres) for solving the linear sum assignment
# problem. Taken from scikit-learn. Based on original code by <NAME>,
# adapted to NumPy by <NAME>.
# Further improvements by <NAME>, <NAME> and <NAME>.
#
# Copyright (c) 2008 <NAME> <<EMAIL>>, <NAME>
# Author: <NAME>, <NAME>
# License: 3-clause BSD
import numpy as np
def linear_sum_assignment(cost_matrix):
"""Solve the linear sum assignment problem.
The linear sum assignment problem is also known as minimum weight matching
in bipartite graphs. A problem instance is described by a matrix C, where
each C[i,j] is the cost of matching vertex i of the first partite set
(a "worker") and vertex j of the second set (a "job"). The goal is to find
a complete assignment of workers to jobs of minimal cost.
Formally, let X be a boolean matrix where :math:`X[i,j] = 1` iff row i is
assigned to column j. Then the optimal assignment has cost
.. math::
\min \sum_i \sum_j C_{i,j} X_{i,j}
s.t. each row is assignment to at most one column, and each column to at
most one row.
This function can also solve a generalization of the classic assignment
problem where the cost matrix is rectangular. If it has more rows than
columns, then not every row needs to be assigned to a column, and vice
versa.
The method used is the Hungarian algorithm, also known as the Munkres or
Kuhn-Munkres algorithm.
Parameters
----------
cost_matrix : array
The cost matrix of the bipartite graph.
Returns
-------
row_ind, col_ind : array
An array of row indices and one of corresponding column indices giving
the optimal assignment. The cost of the assignment can be computed
as ``cost_matrix[row_ind, col_ind].sum()``. The row indices will be
sorted; in the case of a square cost matrix they will be equal to
``numpy.arange(cost_matrix.shape[0])``.
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> cost = np.array([[4, 1, 3], [2, 0, 5], [3, 2, 2]])
>>> from scipy.optimize import linear_sum_assignment
>>> row_ind, col_ind = linear_sum_assignment(cost)
>>> col_ind
array([1, 0, 2])
>>> cost[row_ind, col_ind].sum()
5
References
----------
1. http://csclab.murraystate.edu/bob.pilgrim/445/munkres.html
2. <NAME>. The Hungarian Method for the assignment problem.
*Naval Research Logistics Quarterly*, 2:83-97, 1955.
3. <NAME>. Variants of the Hungarian method for assignment
problems. *Naval Research Logistics Quarterly*, 3: 253-258, 1956.
4. <NAME>. Algorithms for the Assignment and Transportation Problems.
*J. SIAM*, 5(1):32-38, March, 1957.
5. https://en.wikipedia.org/wiki/Hungarian_algorithm
"""
cost_matrix = np.asarray(cost_matrix)
if len(cost_matrix.shape) != 2:
raise ValueError("expected a matrix (2-d array), got a %r array"
% (cost_matrix.shape,))
# The algorithm expects more columns than rows in the cost matrix.
if cost_matrix.shape[1] < cost_matrix.shape[0]:
cost_matrix = cost_matrix.T
transposed = True
else:
transposed = False
state = _Hungary(cost_matrix)
# No need to bother with assignments if one of the dimensions
# of the cost matrix is zero-length.
step = None if 0 in cost_matrix.shape else _step1
while step is not None:
step = step(state)
if transposed:
marked = state.marked.T
else:
marked = state.marked
return np.where(marked == 1)
class _Hungary(object):
"""State of the Hungarian algorithm.
Parameters
----------
cost_matrix : 2D matrix
The cost matrix. Must have shape[1] >= shape[0].
"""
def __init__(self, cost_matrix):
self.C = cost_matrix.copy()
n, m = self.C.shape
self.row_uncovered = np.ones(n, dtype=bool)
self.col_uncovered = np.ones(m, dtype=bool)
self.Z0_r = 0
self.Z0_c = 0
self.path = np.zeros((n + m, 2), dtype=int)
self.marked = np.zeros((n, m), dtype=int)
def _clear_covers(self):
"""Clear all covered matrix cells"""
self.row_uncovered[:] = True
self.col_uncovered[:] = True
# Individual steps of the algorithm follow, as a state machine: they return
# the next step to be taken (function to be called), if any.
def _step1(state):
"""Steps 1 and 2 in the Wikipedia page."""
# Step 1: For each row of the matrix, find the smallest element and
# subtract it from every element in its row.
state.C -= state.C.min(axis=1)[:, np.newaxis]
# Step 2: Find a zero (Z) in the resulting matrix. If there is no
# starred zero in its row or column, star Z. Repeat for each element
# in the matrix.
for i, j in zip(*np.where(state.C == 0)):
if state.col_uncovered[j] and state.row_uncovered[i]:
state.marked[i, j] = 1
state.col_uncovered[j] = False
state.row_uncovered[i] = False
state._clear_covers()
return _step3
def _step3(state):
"""
Cover each column containing a starred zero. If n columns are covered,
the starred zeros describe a complete set of unique assignments.
In this case, Go to DONE, otherwise, Go to Step 4.
"""
marked = (state.marked == 1)
state.col_uncovered[np.any(marked, axis=0)] = False
if marked.sum() < state.C.shape[0]:
return _step4
def _step4(state):
"""
Find a noncovered zero and prime it. If there is no starred zero
in the row containing this primed zero, Go to Step 5. Otherwise,
cover this row and uncover the column containing the starred
zero. Continue in this manner until there are no uncovered zeros
left. Save the smallest uncovered value and Go to Step 6.
"""
# We convert to int as numpy operations are faster on int
C = (state.C == 0).astype(int)
covered_C = C * state.row_uncovered[:, np.newaxis]
covered_C *= np.asarray(state.col_uncovered, dtype=int)
n = state.C.shape[0]
m = state.C.shape[1]
while True:
# Find an uncovered zero
row, col = np.unravel_index(np.argmax(covered_C), (n, m))
if covered_C[row, col] == 0:
return _step6
else:
state.marked[row, col] = 2
# Find the first starred element in the row
star_col = np.argmax(state.marked[row] == 1)
if state.marked[row, star_col] != 1:
# Could not find one
state.Z0_r = row
state.Z0_c = col
return _step5
else:
col = star_col
state.row_uncovered[row] = False
state.col_uncovered[col] = True
covered_C[:, col] = C[:, col] * (
np.asarray(state.row_uncovered, dtype=int))
covered_C[row] = 0
def _step5(state):
"""
Construct a series of alternating primed and starred zeros as follows.
Let Z0 represent the uncovered primed zero found in Step 4.
Let Z1 denote the starred zero in the column of Z0 (if any).
Let Z2 denote the primed zero in the row of Z1 (there will always be one).
Continue until the series terminates at a primed zero that has no starred
zero in its column. Unstar each starred zero of the series, star each
primed zero of the series, erase all primes and uncover every line in the
matrix. Return to Step 3
"""
count = 0
path = state.path
path[count, 0] = state.Z0_r
path[count, 1] = state.Z0_c
while True:
# Find the first starred element in the col defined by
# the path.
row = | np.argmax(state.marked[:, path[count, 1]] == 1) | numpy.argmax |
import zerorpc
import pandas as pd
import logging
import numpy as np
logging.basicConfig()
def rand_weights(n):
k = | np.random.rand(n) | numpy.random.rand |
import numpy as np
import itertools
from scipy.sparse.linalg import cg, LinearOperator
from functions import material_coef_at_grid_points, get_matinc, square_weights
# PARAMETERS
dim = 2 # dimension (works for 2D and 3D)
N = 5*np.ones(dim, dtype=np.int) # number of grid points
phase = 10. # material contrast
assert(np.array_equal(N % 2, np.ones(dim, dtype=np.int)))
dN = 2*N-1 # grid value
vec_shape=(dim,)+tuple(dN) # shape of the vector for storing DOFs
# OPERATORS
Agani = material_coef_at_grid_points(N, phase)
dot = lambda A, B: np.einsum('ij...,j...->i...', A, B)
fft = lambda x, N: np.fft.fftshift(np.fft.fftn(np.fft.ifftshift(x), N)) / np.prod(N)
ifft = lambda x, N: np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(x), N)) * np.prod(N)
freq = [np.arange(np.fix(-n/2.), np.fix(n/2.+0.5)) for n in dN]
# SYSTEM MATRIX for Galerkin approximation with exact integration (FFTH-Ga)
mat, inc = get_matinc(dim, phase)
h = 0.6*np.ones(dim) # size of square (rectangle) / cube
char_square = ifft(square_weights(h, dN, freq), dN).real
Aga = np.einsum('ij...,...->ij...', mat+inc, char_square) \
+ np.einsum('ij...,...->ij...', mat, 1.-char_square)
# PROJECTION
Ghat = np.zeros((dim,dim)+ tuple(dN)) # zero initialize
indices = [range(int((dN[k]-N[k])/2), int((dN[k]-N[k])/2+N[k])) for k in range(dim)]
for i,j in itertools.product(range(dim),repeat=2):
for ind in itertools.product(*indices):
q = np.array([freq[ii][ind[ii]] for ii in range(dim)]) # frequency vector
if not q.dot(q) == 0: # zero freq. -> mean
Ghat[(i,j)+ind] = -(q[i]*q[j])/(q.dot(q))
# OPERATORS
G_fun = lambda X: np.real(ifft(dot(Ghat, fft(X, dN)), dN)).reshape(-1)
A_fun = lambda x: dot(Aga, x.reshape(vec_shape))
GA_fun = lambda x: G_fun(A_fun(x))
# CONJUGATE GRADIENT SOLVER
X = np.zeros((dim,) + tuple(dN), dtype=np.float)
E = | np.zeros(vec_shape) | numpy.zeros |
from __future__ import division, print_function, absolute_import
import os
import datetime
from timeit import time
import warnings
import cv2
import numpy as np
import argparse
from PIL import Image
from yolo import YOLO
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from deep_sort.detection import Detection as ddet
from collections import deque
from keras import backend
backend.clear_session()
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--input",help="path to input video", default = "test_video/video.avi")
ap.add_argument("-c", "--class",help="name of class",default = "person")
args = vars(ap.parse_args())
pts = [deque(maxlen=30) for _ in range(9999)]
warnings.filterwarnings('ignore')
# initialize a list of colors to represent each possible class label
np.random.seed(100)
COLORS = np.random.randint(0, 255, size=(200, 3),
dtype="uint8")
def main(yolo):
start = time.time()
#Definition of the parameters
max_cosine_distance = 0.9
nn_budget = None
nms_max_overlap = 0.3 #非极大抑制的阈值
counter1 = []
counter2 = []
counter3 = []
counter4 = []
counter5 = []
counter6 = []
counter7 = []
counter8 = []
counter9 = []
counter10 = []
counter1x = 0
counter2x = 0
counter3x = 0
counter4x = 0
counter5x = 0
counter6x = 0
counter7x = 0
counter8x = 0
counter9x = 0
counter10x = 0
#deep_sort
model_filename = 'model_data/market1501.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
tracker = Tracker(metric)
writeVideo_flag = True
#video_path = "../../yolo_dataset/t1_video/test_video/det_t1_video_00025_test.avi"
video_capture = cv2.VideoCapture(args["input"])
if writeVideo_flag:
# Define the codec and create VideoWriter object
w = int(video_capture.get(3))
h = int(video_capture.get(4))
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter('output/output.avi', fourcc, 30, (w, h))
list_file = open('detection.txt', 'w')
frame_index = -1
fps = 0.0
while True:
ret, frame = video_capture.read() # frame shape 640*480*3
if ret != True:
break
t1 = time.time()
# image = Image.fromarray(frame)
image = Image.fromarray(frame[...,::-1]) #bgr to rgb
boxs,class_names = yolo.detect_image(image)
features = encoder(frame,boxs)
# score to 1.0 here).
detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
# Run non-maxima suppression.
boxes = np.array([d.tlwh for d in detections])
scores = | np.array([d.confidence for d in detections]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 17:29:25 2020
@author: <NAME>
"""
import numpy as np
import AlgoritmiAlgebraLineare as al
# -------- Test del metodo di sostituzione all'indietro ------
print('\n TESTING BACKWARD SUBSTITION')
print(' -------------------------------------')
print(' Dimension: 5x5')
matrix = np.array([[1, 2, 3, 5, 8],
[0, 1, 5, 1, 7],
[0, 0, 2, 5, 2],
[0, 0, 0, 5, 2],
[0, 0, 0, 0, 2]])
#Fisso ad uno le soluzioni del sistema
xsol = np.ones(5)
#Calcolo il vettore dei termini noti
b = np.dot(matrix,xsol)
#Applico backwardSubstition a matrix e b e mi aspetto
#di ritrovare xsol
findSol = al.backwardSubstition(matrix, b)
print(' Solution of linear system:\n ', findSol)
print('\n TESTING BACKWARD SUBSTITION')
print(' -------------------------------------')
print(' Dimension: 50x50')
#Dimensione matrice
n = 50
M = 10
#Creo una matrice 50x50 con valori compresi tra 0 e 20
matrix = np.random.random((n, n))*2*M
#converto in float tipo dei coefficienti
matrix = matrix.astype(float)
#trasformo la matrice in una matrice triangolare superiore
matrix = | np.triu(matrix) | numpy.triu |
################################################################################
# Copyright (c) 2009-2021, National Research Foundation (SARAO)
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Target object used for pointing and flux density calculation."""
from __future__ import print_function, division, absolute_import
from builtins import object, range
from past.builtins import basestring
import numpy as np
import ephem
from .timestamp import Timestamp
from .flux import FluxDensityModel
from .ephem_extra import (StationaryBody, NullBody, is_iterable, lightspeed,
deg2rad, rad2deg, angle_from_degrees, angle_from_hours)
from .conversion import azel_to_enu
from .projection import sphere_to_plane, sphere_to_ortho, plane_to_sphere
class NonAsciiError(ValueError):
"""Exception when non-ascii characters are found."""
pass
class Target(object):
"""A target which can be pointed at by an antenna.
This is a wrapper around a PyEphem :class:`ephem.Body` that adds flux
density, alternate names and descriptive tags. For convenience, a default
antenna and flux frequency can be set, to simplify the calling of pointing
and flux density methods. These are not stored as part of the target object,
however.
The object can be constructed from its constituent components or from a
description string. The description string contains up to five
comma-separated fields, with the format::
<name list>, <tags>, <longitudinal>, <latitudinal>, <flux model>
The <name list> contains a pipe-separated list of alternate names for the
target, with the preferred name either indicated by a prepended asterisk or
assumed to be the first name in the list. The names may contain spaces, and
the list may be empty. The <tags> field contains a space-separated list of
descriptive tags for the target. The first tag is mandatory and indicates
the body type of the target, which should be one of (*azel*, *radec*, *gal*,
*tle*, *special*, *star*, *xephem*).
The longitudinal and latitudinal fields are only relevant to *azel*, *radec*
and *gal* targets, in which case they contain the relevant coordinates. The
following angle string formats are supported::
- Decimal, always in degrees (e.g. '12.5')
- Sexagesimal, in hours for right ascension and degrees for the rest,
with a colon or space separator (e.g. '12:30:00' or '12 30')
- Decimal or sexagesimal with explicit unit suffix 'd' or 'h',
e.g. '12.5h' (hours, not degrees!) or '12:30d'
The <flux model> is a space-separated list of numbers used to represent the
flux density of the target. The first two numbers specify the frequency
range for which the flux model is valid (in MHz), and the rest of the numbers
are model coefficients. The <flux model> may be enclosed in parentheses to
distinguish it from the other fields. An example string is::
name1 | *name 2, radec cal, 12:34:56.7, -04:34:34.2, (1000.0 2000.0 1.0)
For *special* and *star* body types, only the target name is required. The
*special* body name is assumed to be a PyEphem class name, and is typically
one of the major solar system objects. Alternatively, it could be "Nothing",
which indicates a dummy target with no position (useful as a placeholder but
not much else). The *star* name is looked up in the PyEphem star database,
which contains a modest list of bright stars.
For *tle* bodies, the final field in the description string should contain
the three lines of the TLE. If the name list is empty, the target name is
taken from the TLE instead. The *xephem* body contains a string in XEphem
EDB database format as the final field, with commas replaced by tildes. If
the name list is empty, the target name is taken from the XEphem string
instead.
When specifying a description string, the rest of the target parameters are
ignored, except for the default antenna and flux frequency (which do not
form part of the description string).
Parameters
----------
body : :class:`ephem.Body` object or :class:`Target` object or string
Pre-constructed PyEphem Body object to embed in target object, or
existing target object or description string
tags : list of strings, or whitespace-delimited string, optional
Descriptive tags associated with target, starting with its body type
aliases : list of strings, optional
Alternate names of target
flux_model : :class:`FluxDensity` object, optional
Object encapsulating spectral flux density model
antenna : :class:`Antenna` object, optional
Default antenna to use for position calculations
flux_freq_MHz : float, optional
Default frequency at which to evaluate flux density, in MHz
Arguments
---------
name : string
Name of target
Raises
------
ValueError
If description string has the wrong format
"""
def __init__(self, body, tags=None, aliases=None, flux_model=None, antenna=None, flux_freq_MHz=None):
if isinstance(body, Target):
body = body.description
# If the first parameter is a description string, extract the relevant target parameters from it
if isinstance(body, basestring):
body, tags, aliases, flux_model = construct_target_params(body)
self.body = body
self.name = self.body.name
self.tags = []
self.add_tags(tags)
if aliases is None:
self.aliases = []
else:
self.aliases = aliases
self.flux_model = flux_model
self.antenna = antenna
self.flux_freq_MHz = flux_freq_MHz
def __str__(self):
"""Verbose human-friendly string representation of target object."""
descr = str(self.name)
if self.aliases:
descr += ' (%s)' % (', '.join(self.aliases),)
descr += ', tags=%s' % (' '.join(self.tags),)
if 'radec' in self.tags:
descr += ', %s %s' % (self.body._ra, self.body._dec)
if self.body_type == 'azel':
descr += ', %s %s' % (self.body.az, self.body.el)
if self.body_type == 'gal':
l, b = ephem.Galactic(ephem.Equatorial(self.body._ra, self.body._dec)).get()
descr += ', %.4f %.4f' % (rad2deg(l), rad2deg(b))
if self.flux_model is None:
descr += ', no flux info'
else:
descr += ', flux defined for %g - %g MHz' % (self.flux_model.min_freq_MHz, self.flux_model.max_freq_MHz)
if self.flux_freq_MHz is not None:
flux = self.flux_model.flux_density(self.flux_freq_MHz)
if not np.isnan(flux):
descr += ', flux=%.1f Jy @ %g MHz' % (flux, self.flux_freq_MHz)
return descr
def __repr__(self):
"""Short human-friendly string representation of target object."""
sub_type = (' (%s)' % self.tags[1]) if (self.body_type == 'xephem') and (len(self.tags) > 1) else ''
return "<katpoint.Target '%s' body=%s at 0x%x>" % (self.name, self.body_type + sub_type, id(self))
def __reduce__(self):
"""Custom pickling routine based on description string."""
return (self.__class__, (self.description,))
def __eq__(self, other):
"""Equality comparison operator."""
return self.description == (other.description if isinstance(other, Target) else other)
def __ne__(self, other):
"""Inequality comparison operator."""
return not (self == other)
def __lt__(self, other):
"""Less-than comparison operator (needed for sorting and np.unique)."""
return self.description < (other.description if isinstance(other, Target) else other)
def __hash__(self):
"""Base hash on description string, just like equality operator."""
return hash(self.description)
def format_katcp(self):
"""String representation if object is passed as parameter to KATCP command."""
return self.description
def _set_timestamp_antenna_defaults(self, timestamp, antenna):
"""Set defaults for timestamp and antenna, if they are unspecified.
If *timestamp* is None, it is replaced by the current time. If *antenna*
is None, it is replaced by the default antenna for the target.
Parameters
----------
timestamp : :class:`Timestamp` object or equivalent, or sequence, or None
Timestamp(s) in UTC seconds since Unix epoch (None means now)
antenna : :class:`Antenna` object, or None
Antenna which points at target
Returns
-------
timestamp : :class:`Timestamp` object or equivalent, or sequence
Timestamp(s) in UTC seconds since Unix epoch
antenna : :class:`Antenna` object
Antenna which points at target
Raises
------
ValueError
If no antenna is specified, and no default antenna was set either
"""
if timestamp is None:
timestamp = Timestamp()
if antenna is None:
antenna = self.antenna
if antenna is None:
raise ValueError('Antenna object needed to calculate target position')
return timestamp, antenna
@property
def body_type(self):
"""Type of target body, as a string tag."""
return self.tags[0].lower()
@property
def description(self):
"""Complete string representation of target object, sufficient to reconstruct it."""
names = ' | '.join([self.name] + self.aliases)
tags = ' '.join(self.tags)
fluxinfo = self.flux_model.description if self.flux_model is not None else None
fields = [names, tags]
if self.body_type == 'azel':
# Check if it's an unnamed target with a default name
if names.startswith('Az:'):
fields = [tags]
fields += [str(self.body.az), str(self.body.el)]
if fluxinfo:
fields += [fluxinfo]
elif self.body_type == 'radec':
# Check if it's an unnamed target with a default name
if names.startswith('Ra:'):
fields = [tags]
fields += [str(self.body._ra), str(self.body._dec)]
if fluxinfo:
fields += [fluxinfo]
elif self.body_type == 'gal':
# Check if it's an unnamed target with a default name
if names.startswith('Galactic l:'):
fields = [tags]
l, b = ephem.Galactic(ephem.Equatorial(self.body._ra, self.body._dec)).get()
fields += ['%.4f' % (rad2deg(l),), '%.4f' % (rad2deg(b),)]
if fluxinfo:
fields += [fluxinfo]
elif self.body_type == 'tle':
# Switch body type to xephem, as XEphem only saves bodies in xephem edb format (no TLE output)
tags = tags.replace(tags.partition(' ')[0], 'xephem tle')
edb_string = self.body.writedb().replace(',', '~')
# Suppress name if it's the same as in the xephem db string
edb_name = edb_string[:edb_string.index('~')]
if edb_name == names:
fields = [tags, edb_string]
else:
fields = [names, tags, edb_string]
elif self.body_type == 'xephem':
# Replace commas in xephem string with tildes, to avoid clashing with main string structure
# Also remove extra spaces added into string by writedb
edb_string = '~'.join([edb_field.strip() for edb_field in self.body.writedb().split(',')])
# Suppress name if it's the same as in the xephem db string
edb_name = edb_string[:edb_string.index('~')]
if edb_name == names:
fields = [tags]
fields += [edb_string]
return ', '.join(fields)
def add_tags(self, tags):
"""Add tags to target object.
This adds tags to a target, while checking the sanity of the tags. It
also prevents duplicate tags without resorting to a tag set, which would
be problematic since the tag order is meaningful (tags[0] is the body
type). Since tags should not contain whitespace, any string consisting of
whitespace-delimited words will be split into separate tags.
Parameters
----------
tags : string, list of strings, or None
Tag or list of tags to add (strings will be split on whitespace)
Returns
-------
target : :class:`Target` object
Updated target object
"""
if tags is None:
tags = []
if isinstance(tags, basestring):
tags = [tags]
for tag_str in tags:
for tag in tag_str.split():
if tag not in self.tags:
self.tags.append(tag)
return self
def azel(self, timestamp=None, antenna=None):
"""Calculate target (az, el) coordinates as seen from antenna at time(s).
Parameters
----------
timestamp : :class:`Timestamp` object or equivalent, or sequence, optional
Timestamp(s) in UTC seconds since Unix epoch (defaults to now)
antenna : :class:`Antenna` object, optional
Antenna which points at target (defaults to default antenna)
Returns
-------
az : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Azimuth angle(s), in radians
el : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Elevation angle(s), in radians
Raises
------
ValueError
If no antenna is specified, and no default antenna was set either
"""
if self.body_type == 'azel':
if is_iterable(timestamp):
return np.tile(self.body.az, len(timestamp)), np.tile(self.body.el, len(timestamp))
else:
return self.body.az, self.body.el
timestamp, antenna = self._set_timestamp_antenna_defaults(timestamp, antenna)
def _scalar_azel(t):
"""Calculate (az, el) coordinates for a single time instant."""
antenna.observer.date = Timestamp(t).to_ephem_date()
self.body.compute(antenna.observer)
return self.body.az, self.body.alt
if is_iterable(timestamp):
azel = np.array([_scalar_azel(t) for t in timestamp])
return azel[:, 0], azel[:, 1]
else:
return _scalar_azel(timestamp)
def apparent_radec(self, timestamp=None, antenna=None):
"""Calculate target's apparent (ra, dec) coordinates as seen from antenna at time(s).
This calculates the *apparent topocentric position* of the target for
the epoch-of-date in equatorial coordinates. Take note that this is
*not* the "star-atlas" position of the target, but the position as is
actually seen from the antenna at the given times. The difference is on
the order of a few arcminutes. These are the coordinates that a telescope
with an equatorial mount would use to track the target. Some targets are
unable to provide this (due to a limitation of pyephem), notably
stationary (*azel*) targets, and provide the *astrometric geocentric
position* instead.
Parameters
----------
timestamp : :class:`Timestamp` object or equivalent, or sequence, optional
Timestamp(s) in UTC seconds since Unix epoch (defaults to now)
antenna : :class:`Antenna` object, optional
Antenna which points at target (defaults to default antenna)
Returns
-------
ra : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Right ascension, in radians
dec : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Declination, in radians
Raises
------
ValueError
If no antenna is specified, and no default antenna was set either
"""
timestamp, antenna = self._set_timestamp_antenna_defaults(timestamp, antenna)
def _scalar_radec(t):
"""Calculate (ra, dec) coordinates for a single time instant."""
antenna.observer.date = Timestamp(t).to_ephem_date()
self.body.compute(antenna.observer)
return self.body.ra, self.body.dec
if is_iterable(timestamp):
radec = np.array([_scalar_radec(t) for t in timestamp])
return radec[:, 0], radec[:, 1]
else:
return _scalar_radec(timestamp)
def astrometric_radec(self, timestamp=None, antenna=None):
"""Calculate target's astrometric (ra, dec) coordinates as seen from antenna at time(s).
This calculates the J2000 *astrometric geocentric position* of the
target, in equatorial coordinates. This is its star atlas position for
the epoch of J2000.
Parameters
----------
timestamp : :class:`Timestamp` object or equivalent, or sequence, optional
Timestamp(s) in UTC seconds since Unix epoch (defaults to now)
antenna : :class:`Antenna` object, optional
Antenna which points at target (defaults to default antenna)
Returns
-------
ra : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Right ascension, in radians
dec : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Declination, in radians
Raises
------
ValueError
If no antenna is specified, and no default antenna was set either
"""
if self.body_type == 'radec':
# Convert to J2000 equatorial coordinates
original_radec = ephem.Equatorial(self.body._ra, self.body._dec, epoch=self.body._epoch)
ra, dec = ephem.Equatorial(original_radec, epoch=ephem.J2000).get()
if is_iterable(timestamp):
return np.tile(ra, len(timestamp)), np.tile(dec, len(timestamp))
else:
return ra, dec
timestamp, antenna = self._set_timestamp_antenna_defaults(timestamp, antenna)
def _scalar_radec(t):
"""Calculate (ra, dec) coordinates for a single time instant."""
antenna.observer.date = Timestamp(t).to_ephem_date()
self.body.compute(antenna.observer)
return self.body.a_ra, self.body.a_dec
if is_iterable(timestamp):
radec = np.array([_scalar_radec(t) for t in timestamp])
return radec[:, 0], radec[:, 1]
else:
return _scalar_radec(timestamp)
# The default (ra, dec) coordinates are the astrometric ones
radec = astrometric_radec
def galactic(self, timestamp=None, antenna=None):
"""Calculate target's galactic (l, b) coordinates as seen from antenna at time(s).
This calculates the galactic coordinates of the target, based on the
J2000 *astrometric* equatorial coordinates. This is its position relative
to the Galactic reference frame for the epoch of J2000.
Parameters
----------
timestamp : :class:`Timestamp` object or equivalent, or sequence, optional
Timestamp(s) in UTC seconds since Unix epoch (defaults to now)
antenna : :class:`Antenna` object, optional
Antenna which points at target (defaults to default antenna)
Returns
-------
l : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Galactic longitude, in radians
b : :class:`ephem.Angle` object, or array of same shape as *timestamp*
Galactic latitude, in radians
Raises
------
ValueError
If no antenna is specified, and no default antenna was set either
"""
if self.body_type == 'gal':
l, b = ephem.Galactic(ephem.Equatorial(self.body._ra, self.body._dec)).get()
if is_iterable(timestamp):
return np.tile(l, len(timestamp)), np.tile(b, len(timestamp))
else:
return l, b
ra, dec = self.astrometric_radec(timestamp, antenna)
if is_iterable(ra):
lb = np.array([ephem.Galactic(ephem.Equatorial(ra[n], dec[n])).get()
for n in range(len(ra))])
return lb[:, 0], lb[:, 1]
else:
return ephem.Galactic(ephem.Equatorial(ra, dec)).get()
def parallactic_angle(self, timestamp=None, antenna=None):
"""Calculate parallactic angle on target as seen from antenna at time(s).
This calculates the *parallactic angle*, which is the position angle of
the observer's vertical on the sky, measured from north toward east.
This is the angle between the great-circle arc connecting the celestial
North pole to the target position, and the great-circle arc connecting
the zenith above the antenna to the target, or the angle between the
*hour circle* and *vertical circle* through the target, at the given
timestamp(s).
Parameters
----------
timestamp : :class:`Timestamp` object or equivalent, or sequence, optional
Timestamp(s) in UTC seconds since Unix epoch (defaults to now)
antenna : :class:`Antenna` object, optional
Antenna which points at target (defaults to default antenna)
Returns
-------
parangle : float, or array of same shape as *timestamp*
Parallactic angle, in radians
Raises
------
ValueError
If no antenna is specified, and no default antenna was set either
Notes
-----
The formula can be found in the `AIPS++ glossary`_ or in the SLALIB
source code (file pa.f, function sla_PA) which is part of the now
defunct `Starlink project`_.
.. _`AIPS++ Glossary`: http://www.astron.nl/aips++/docs/glossary/p.html
.. _`Starlink Project`: http://www.starlink.rl.ac.uk
"""
timestamp, antenna = self._set_timestamp_antenna_defaults(timestamp, antenna)
# Get apparent hour angle and declination
ra, dec = self.apparent_radec(timestamp, antenna)
ha = antenna.local_sidereal_time(timestamp) - ra
return np.arctan2(np.sin(ha), np.tan(antenna.observer.lat) * np.cos(dec) - np.sin(dec) * np.cos(ha))
def geometric_delay(self, antenna2, timestamp=None, antenna=None):
"""Calculate geometric delay between two antennas pointing at target.
An incoming plane wavefront travelling along the direction from the
target to the reference antenna *antenna* arrives at this antenna at the
given timestamp(s), and *delay* seconds later (or earlier, if *delay* is
negative) at the second antenna, *antenna2*. This delay is known as the
*geometric delay*, also represented by the symbol :math:`\tau_g`, and is
associated with the *baseline* vector from the reference antenna to the
second antenna. Additionally, the rate of change of the delay at the
given timestamp(s) is estimated from the change in delay during a short
interval spanning the timestamp(s).
Parameters
----------
antenna2 : :class:`Antenna` object
Second antenna of baseline pair (baseline vector points toward it)
timestamp : :class:`Timestamp` object or equivalent, or sequence, optional
Timestamp(s) in UTC seconds since Unix epoch (defaults to now)
antenna : :class:`Antenna` object, optional
First (reference) antenna of baseline pair, which also serves as
pointing reference (defaults to default antenna)
Returns
-------
delay : float, or array of same shape as *timestamp*
Geometric delay, in seconds
delay_rate : float, or array of same shape as *timestamp*
Rate of change of geometric delay, in seconds per second
Raises
------
ValueError
If no reference antenna is specified and no default antenna was set
Notes
-----
This is a straightforward dot product between the unit vector pointing
from the reference antenna to the target, and the baseline vector
pointing from the reference antenna to the second antenna, all in local
ENU coordinates relative to the reference antenna.
"""
timestamp, antenna = self._set_timestamp_antenna_defaults(timestamp, antenna)
# Obtain baseline vector from reference antenna to second antenna
baseline_m = antenna.baseline_toward(antenna2)
# Obtain direction vector(s) from reference antenna to target
az, el = self.azel(timestamp, antenna)
targetdir = azel_to_enu(az, el)
# Dot product of vectors is w coordinate, and delay is time taken by EM wave to traverse this
delay = - np.dot(baseline_m, targetdir) / lightspeed
# Numerically estimate delay rate from difference across 1-second interval spanning timestamp(s)
targetdir_before = azel_to_enu(*self.azel(np.array(timestamp) - 0.5, antenna))
targetdir_after = azel_to_enu(*self.azel(np.array(timestamp) + 0.5, antenna))
delay_rate = - (np.dot(baseline_m, targetdir_after) - | np.dot(baseline_m, targetdir_before) | numpy.dot |
import numpy as np
import matplotlib.pyplot as plt
import stanpy as stp
np.set_printoptions(precision=5, linewidth=500)
def assembly(*s_list, deg_freedom=3):
number_elements = len(s_list)
nodes = np.zeros((2 * number_elements, 3))
nodes[0::2, :] = np.array([s["Xi"] for s in s_list]).astype(int)
nodes[1::2, :] = np.array([s["Xk"] for s in s_list]).astype(int)
global_nodes = np.unique(nodes, axis=0)
num_global_nodes = global_nodes.shape[0]
indices = (np.arange(num_global_nodes) * deg_freedom).astype(int)
a = np.zeros((number_elements, 2, num_global_nodes))
a_full = np.zeros((number_elements, 2 * deg_freedom, num_global_nodes * deg_freedom))
for i, node in enumerate(nodes.reshape(number_elements, -1, 3)):
a[i, 0] = (global_nodes == node[0]).all(axis=1).astype(int)
a[i, 1] = (global_nodes == node[1]).all(axis=1).astype(int)
mask = a[i, 0] == 1
a_full[i, 0:deg_freedom, indices[mask].item() : indices[mask].item() + deg_freedom] = np.eye(
deg_freedom, deg_freedom
)
mask = a[i, 1] == 1
a_full[
i,
deg_freedom : 2 * deg_freedom,
indices[mask].item() : indices[mask].item() + deg_freedom,
] = np.eye(deg_freedom, deg_freedom)
return a_full
# def assembly_univ(*elements, deg_freedom=3):
# number_elements = len(elements)
# nodes = np.zeros((2 * number_elements, 3)) # 3Dimensional
# nodes[0::2, :] = np.array([s["Xi"] for s in elements])
# nodes[1::2, :] = np.array([s["Xk"] for s in elements])
# global_nodes = np.unique(nodes, axis=0)
# num_global_nodes = global_nodes.shape[0]
# indices = (np.arange(num_global_nodes) * deg_freedom).astype(int)
# a = np.zeros((number_elements, 2, num_global_nodes))
# a_full = np.zeros((number_elements, 2 * deg_freedom, num_global_nodes * deg_freedom))
# for i, node in enumerate(nodes.reshape(number_elements, -1, 3)):
# a[i, 0] = (global_nodes == node[0]).all(axis=1).astype(int)
# a[i, 1] = (global_nodes == node[1]).all(axis=1).astype(int)
# mask = a[i, 0] == 1
# a_full[i, 0:deg_freedom, indices[mask].item() : indices[mask].item() + deg_freedom] = np.eye(
# deg_freedom, deg_freedom
# )
# mask = a[i, 1] == 1
# a_full[
# i,
# deg_freedom : 2 * deg_freedom,
# indices[mask].item() : indices[mask].item() + deg_freedom,
# ] = np.eye(deg_freedom, deg_freedom)
# return a_full
def element_stiffness_matrix(**s):
R = rotation_matrix(**s)
vec_i = np.array(s["Xi"])
vec_k = np.array(s["Xk"])
vec_R = vec_k - vec_i
QeT = np.array([[1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]).dot(R)
l = np.linalg.norm(vec_R).item()
K = s["EA"] / l * QeT.T.dot(np.array([[1, -1], [-1, 1]])).dot(QeT)
return K
def system_stiffness_matrix(*s_list):
a = assembly(*s_list)
K = a[0].T.dot(element_stiffness_matrix(**s_list[0])).dot(a[0])
for i, s in enumerate(s_list):
if i == 0:
pass
else:
K += a[i].T.dot(element_stiffness_matrix(**s_list[i])).dot(a[i])
K[0, :] = 0
diag = np.copy(np.diag(K))
diag[diag == 0] = 1
np.fill_diagonal(K, diag)
return K
def rotation_matrix(**s):
vec_i = np.array(s["Xi"])
vec_k = np.array(s["Xk"])
vec_R = vec_k - vec_i
norm_R = np.linalg.norm(vec_R)
theta = np.radians(90)
c, s = np.cos(theta), np.sin(theta)
rot_mat = np.array(((c, -s, 0), (s, c, 0), (0, 0, 1)))
e1 = vec_R / norm_R
e2 = rot_mat.dot(e1)
e3 = np.cross(e1, e2)
e_global = np.eye(3, 3)
R = np.zeros((6, 6))
R[0, :3] = e1.dot(e_global)
R[1, :3] = e2.dot(e_global)
R[2, :3] = e3.dot(e_global)
R[3, 3:] = e1.dot(e_global)
R[4, 3:] = e2.dot(e_global)
R[5, 3:] = e3.dot(e_global)
return R
def cosine_alpha(nod):
pass
if __name__ == "__main__":
L = 1
roller = {"w": 0, "M": 0, "H": 0}
hinged = {"w": 0, "M": 0}
node1 = (0, 0, 0)
node2 = (L, 0, 0)
node3 = (2 * L, 0, 0)
node4 = (3 * L, 0, 0)
s1 = {"EA": 1, "Xi": node1, "Xk": node2, "bc_i": hinged}
s2 = {"EA": 1, "Xi": node2, "Xk": node3, "bc_i": roller}
s3 = {"EA": 1, "Xi": node3, "Xk": node4, "bc_i": roller, "bc_k": roller}
s = [s1, s2, s3]
R = rotation_matrix(**s1)
K = system_stiffness_matrix(*s)
P = | np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]) | numpy.array |
""" Matrix profile anomaly detection.
Reference:
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. (2016, December).
Matrix profile I: all pairs similarity joins for time series: a unifying view that includes motifs, discords and shapelets.
In Data Mining (ICDM), 2016 IEEE 16th International Conference on (pp. 1317-1322). IEEE.
"""
# Authors: <NAME>, 2018.
import math
import numpy as np
import pandas as pd
import scipy.signal as sps
from tqdm import tqdm
from .BaseDetector import BaseDetector
# -------------
# CLASSES
# -------------
class MatrixProfileAD(BaseDetector):
""" Anomaly detection in time series using the matrix profile
Parameters
----------
m : int (default=10)
Window size.
contamination : float (default=0.1)
Estimate of the expected percentage of anomalies in the data.
Comments
--------
- This only works on time series data.
"""
def __init__(self, m=10, contamination=0.1,
tol=1e-8, verbose=False):
super(MatrixProfileAD, self).__init__()
self.m = int(m)
self.contamination = float(contamination)
self.tol = float(tol)
self.verbose = bool(verbose)
def ab_join(self, T, split):
""" Compute the ABjoin and BAjoin side-by-side,
where `split` determines the splitting point.
"""
# algorithm options
excZoneLen = int(np.round(self.m * 0.5))
radius = 1.1
dataLen = len(T)
proLen = dataLen - self.m + 1
# change Nan and Inf to zero
T = np.nan_to_num(T)
# precompute the mean, standard deviation
s = pd.Series(T)
dataMu = s.rolling(self.m).mean().values[self.m-1:dataLen]
dataSig = s.rolling(self.m).std().values[self.m-1:dataLen]
matrixProfile = np.ones(proLen) * np.inf
idxOrder = excZoneLen + np.arange(0, proLen, 1)
idxOrder = idxOrder[np.random.permutation(len(idxOrder))]
# construct the matrixprofile
for i, idx in enumerate(idxOrder):
# query
query = T[idx:idx+self.m-1]
# distance profile
distProfile = self._diagonal_dist(T, idx, dataLen, self.m, proLen, dataMu, dataSig)
distProfile = abs(distProfile)
distProfile = np.sqrt(distProfile)
# position magic
pos1 = np.arange(idx, proLen, 1)
pos2 = np.arange(0, proLen-idx+1, 1)
# split magic
distProfile = distProfile[np.where((pos2 <= split) & (pos1 > split))[0]]
pos1Split = pos1[np.where((pos2 <= split) & (pos1 > split))[0]]
pos2Split = pos2[np.where((pos2 <= split) & (pos1 > split))[0]]
pos1 = pos1Split
pos2 = pos2Split
# update magic
updatePos = np.where(matrixProfile[pos1] > distProfile)[0]
matrixProfile[pos1[updatePos]] = distProfile[updatePos]
updatePos = np.where(matrixProfile[pos2] > distProfile)[0]
matrixProfile[pos2[updatePos]] = distProfile[updatePos]
return matrixProfile
def fit_predict(self, T):
""" Fit the model to the time series T.
:param T : np.array(), shape (n_samples)
The time series data for which to compute the matrix profile.
:returns y_score : np.array(), shape (n_samples)
Anomaly score for the samples in T.
:returns y_pred : np.array(), shape (n_samples)
Returns -1 for inliers and +1 for anomalies/outliers.
"""
return self.fit(np.array([])).predict(T)
def fit(self, T=np.array([])):
""" Fit the model to the time series T.
:param T : np.array(), shape (n_samples)
The time series data for which to compute the matrix profile.
:returns self : object
"""
self.T_train = T
return self
def predict(self, T= | np.array([]) | numpy.array |
# <NAME>
# python 3.7
"""
To calculate the extremes of the carbon fluxes based on carbon flux anomalies in gC.
The code is fairly flexible to pass multiple filters to the code.
Output:
* Saving the binarys of extremes
* Saving the TCE binaries at multiple lags [0-4 months)
"""
import os
import netCDF4 as nc4
import numpy as np
import pandas as pd
import datetime as dt
import seaborn as sns
import argparse
from scipy import stats
from functions import time_dim_dates, index_and_dates_slicing, norm, geo_idx, patch_with_gaps_and_eventsize
""" Arguments to input while running the python file
--percentile (-per) : percentile under consideration
looking at the negative/positive tail of gpp events: {eg.1,5,10,90,95,99}
--th_type : Thresholds can be computed at each tail i.e. 'ind' or 'common'.
'common' means that total number of events greater that the modulus of anomalies represent 'per' percentile
--sources (-src) : the models that you want to analyze, separated by hyphens or 'all' for all the models
--variable (-var) : the variable to analyze gpp/nep/npp/nbp
--window (wsize) : time window size in years
# Running: run calc_extremes.py -src cesm -var gpp
"""
print ("Last edit on May 08, 2020")
# The abriviation of the models that will be analyzed:
source_code = { 'cesm' : 'CESM2',
'can' : 'CanESM5',
'ipsl' : 'IPSL-CM6A-LR',
'bcc' : 'BCC-CSM2-MR',
'cnrn-e': 'CNRM-ESM2-1',
'cnrn-c': 'CNRM-CM6-1' }
parser = argparse.ArgumentParser()
parser.add_argument('--percentile' ,'-per' , help = "Threshold Percentile?" , type= int, default= 5 )
parser.add_argument('--th_type' ,'-th' , help = "Threshold Percentile?" , type= str, default= 'common' )
parser.add_argument('--sources' ,'-src' , help = "Which model(s) to analyse?" , type= str, default= 'all' )
parser.add_argument('--variable' ,'-var' , help = "variable? gpp/npp/nep/nbp,,,," , type= str, default= 'gpp' )
parser.add_argument('--window' ,'-wsize' , help = "window size (25 years)?" , type= int, default= 25 )
args = parser.parse_args()
# The inputs:
per = int (args.percentile)
th_type = str (args.th_type)
src = str (args.sources)
variable_run= str (args.variable)
window = int (args.window)
# Model(s) to analyze:
# --------------------
source_selected = []
if len(src.split('-')) >1:
source_selected = src.split('-')
elif src in ['all', 'a']:
source_selected = list(source_code.values() )
elif len(src.split('-')) == 1:
if src in source_code.keys():
source_selected = [source_code[src]]
else:
print (" Enter a valid source id")
#running : run calc_extremes.py -per 5 -var nbp -src cesm
# Reading the dataframe of the selected files
# -------------------------------------------
cori_scratch = '/global/cscratch1/sd/bharat/' # where the anomalies per slave rank are saved
in_path = '/global/homes/b/bharat/results/data_processing/' # to read the filters
#cmip6_filepath_head = '/global/homes/b/bharat/cmip6_data/CMIP6/'
cmip6_filepath_head = '/global/cfs/cdirs/m3522/cmip6/CMIP6/'
#web_path = '/project/projectdirs/m2467/www/bharat/'
web_path = '/global/homes/b/bharat/results/web/'
# exp is actually 'historical + ssp585' but saved as 'ssp585'
exp = 'ssp585'
# Common members per model
# ------------------------
common_members = {}
for source_run in source_selected:
common_members [source_run] = pd.read_csv (cori_scratch + 'add_cmip6_data/common_members/%s_%s_common_members.csv'%(source_run,exp),
header=None).iloc[:,0]
# The spreadsheet with all the available data of cmip 6
# -----------------------------------------------------
df_files = pd.read_csv(in_path + 'df_data_selected.csv')
temp = df_files.copy(deep = True)
# Saving the path of area and lf
filepath_areacella = {}
filepath_sftlf = {}
for s_idx, source_run in enumerate(source_selected):
filters = (temp['source_id'] == source_run) & (temp['variable_id'] == variable_run) # original Variable
filters_area = (temp['source_id'] == source_run) & (temp['variable_id'] == 'areacella') # areacella
filters_lf = (temp['source_id'] == source_run) & (temp['variable_id'] == 'sftlf') # land fraction
#passing the filters to the dataframe
df_tmp = temp[filters]
df_tmp_area = temp[filters_area]
df_tmp_lf = temp[filters_lf]
for member_run in common_members [source_run]:
if source_run == 'BCC-CSM2-MR':
filepath_area = "/global/homes/b/bharat/extra_cmip6_data/areacella_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc"
filepath_lf = "/global/homes/b/bharat/extra_cmip6_data/sftlf_fx_BCC-CSM2-MR_hist-resIPO_r1i1p1f1_gn.nc"
else:
filters_area = (temp['variable_id'] == 'areacella') & (temp['source_id'] == source_run)
filters_lf = (temp['variable_id'] == 'sftlf') & (temp['source_id'] == source_run)
filepath_area = cmip6_filepath_head + "/".join(np.array(temp[filters_area].iloc[-1]))
filepath_lf = cmip6_filepath_head + "/".join(np.array(temp[filters_lf].iloc[-1]))
filepath_areacella [source_run] = filepath_area
filepath_sftlf [source_run] = filepath_lf
# Extracting the area and land fractions of different models
# ==========================================================
data_area = {}
data_lf = {}
for source_run in source_selected:
data_area [source_run] = nc4.Dataset (filepath_areacella[source_run]) . variables['areacella']
data_lf [source_run] = nc4.Dataset (filepath_sftlf [source_run]) . variables['sftlf']
# Saving the paths of anomalies
# hier. : source_id > member_id
# ------------------------------------
paths = {}
for source_run in source_selected:
paths[source_run] = {}
for source_run in source_selected:
for member_run in common_members [source_run]:
saved_ano = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s/'%(source_run,exp,member_run,variable_run)
paths[source_run][member_run] = saved_ano
del saved_ano
# Reading and saving the data:
# ----------------------------
nc_ano = {}
for source_run in source_selected:
nc_ano[source_run] = {}
for source_run in source_selected:
for member_run in common_members [source_run]:
nc_ano[source_run][member_run] = nc4.Dataset(paths[source_run][member_run] + '%s_%s_%s_%s_anomalies_gC.nc'%(source_run,exp,member_run,variable_run))
# Arranging Time Array for plotting and calling
# --------------------------------------------
win_len = 12 * window #number of months in window years
total_years = 251 #years from 1850 to 2100
total_months= total_years * 12
dates_ar = time_dim_dates( base_date = dt.date(1850,1,1),
total_timestamps = 3012 )
start_dates = np.array( [dates_ar[i*win_len] for i in range(int(total_months/win_len))]) #list of start dates of 25 year window
end_dates = np.array( [dates_ar[i*win_len+win_len -1] for i in range(int(total_months/win_len))]) #list of end dates of the 25 year window
idx_yr_2100 = 3012 # upper open index 2100 from the year 1850 if the data is monthly i.e. for complete TS write ts[:3012]
idx_yr_2014 = 1980 # upper open index 2014 from the year 1850 if the data is monthly i.e. for complete TS write ts[:1980]
idx_yr_2099 = 3000 # upper open index 2099 from the year 1850 if the data is monthly i.e. for complete TS write ts[:3000]
# Initiation:
# -----------
def TS_Dates_and_Index (dates_ar = dates_ar,start_dates = start_dates, end_dates=end_dates ):
"""
Returns the TS of the dates and index of consecutive windows of len 25 years
Parameters:
-----------
dates_ar : an array of dates in datetime.date format
the dates are chosen from this array
start_dates: an array of start dates, the start date will decide the dates and index of the first entry for final time series for that window
end_dates: similar to start_dates but for end date
Returns:
--------
dates_win: a 2-d array with len of start dates/ total windows and each row containing the dates between start and end date
idx_dates_win : a 2-d array with len of start dates/ total windows and each row containing the index of dates between start and end date
"""
idx_dates_win = [] #indicies of time in 25yr windows
dates_win = [] #sel dates from time variables in win_len windows
for i in range(len(start_dates)):
idx_loc, dates_loc = index_and_dates_slicing(dates_ar,start_dates[i],end_dates[i]) # see functions.py
idx_dates_win . append (idx_loc)
dates_win . append (dates_loc)
return np.array(dates_win), np.array(idx_dates_win)
# Calling the function "ts_dates_and_index"; Universal for rest of the code
dates_win, idx_dates_win = TS_Dates_and_Index ()
# The saving the results in a dictionary
# --------------------------------------
Results = {}
for source_run in source_selected:
Results[source_run] = {}
for member_run in common_members [source_run]:
Results[source_run][member_run] = {}
# Calculation of thresholds (rth percentile at each tail):
# ------------------------------------------------------------
def Threshold_and_Binary_Ar(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per):
"""
In this method the 1 percentile threshold is calculated are both tails of the pdf of anomalies...
i.e. the same number of values are selected on either tails.
returns the global percentile based thresholds and binary arrays of consecutive windows
Parameters:
-----------
data : The anomalies whose threshold you want to calculate
Universal:
---------
start_dates, idx_dates_win, per
Returns:
--------
threshold_neg: the threshold for negative extremes; size = # windows
threshold_pos: the threshold for positive extremes; size = # windows
bin_ext_neg: the binary array 1's are extremes based on the threshold_neg; shape = same as data
bin_ext_pos: the binary array 1's are extremes based on the threshold_pos; shape = same as data
"""
thresholds_1= [] #thresholds for consecutive windows of defined size for a 'per' percentile
thresholds_2= [] #thresholds for consecutive windows of defined size for a '100-per' percentile
bin_ext_neg = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp loss events
bin_ext_pos = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp gain events
for i in range(len(start_dates)):
ano_loc = data[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:]
threshold_loc_1 = np.percentile(ano_loc[ano_loc.mask == False],per) # calculation of threshold for the local anomalies
thresholds_1 . append(threshold_loc_1)
threshold_loc_2 = np.percentile(ano_loc[ano_loc.mask == False],(100-per))
thresholds_2 . append(threshold_loc_2)
# Binary arrays:
if per <=50:
bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < threshold_loc_1
bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc_2
else:
bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc_1
bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < threshold_loc_2
# Thresholds for consecutive windows:
if per < 50:
threshold_neg = np.ma.array(thresholds_1)
threshold_pos = np.ma.array(thresholds_2)
elif per > 50:
threshold_neg = np.ma.array(thresholds_2)
threshold_pos = np.ma.array(thresholds_1)
return threshold_neg, threshold_pos, bin_ext_neg, bin_ext_pos
# Calculation of thresholds (rth percentile combines for both tails):
# ------------------------------------------------------------
def Threshold_and_Binary_Ar_Common(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per ):
"""
In this method the rth percentile threshold is calculated at sum of both tails of the pdf of anomalies...
i.e. total number of elements on left and right tail make up for rth percentile (jakob 2014, anex A2)...
This can be done by taking a modulus of anomalies and then calcuate the rth percentile th = q
Negative extremes: anomalies < -q
Positive extremes: anomalies > q
Returns the global percentile based thresholds and binary arrays of consecutive windows
Parameters:
-----------
data : The anomalies whose threshold you want to calculate
Universal:
---------
start_dates, idx_dates_win, per
Returns:
--------
threshold_neg: the threshold for negative extremes; size = # windows
threshold_pos: the threshold for positive extremes; size = # windows
bin_ext_neg: the binary array 1's are extremes based on the threshold_neg; shape = same as data
bin_ext_pos: the binary array 1's are extremes based on the threshold_pos; shape = same as data
"""
thresholds_p= [] #thresholds for consecutive windows of defined size for a 'per' percentile
thresholds_n= [] #thresholds for consecutive windows of defined size for a '100-per' percentile
bin_ext_neg = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp loss events
bin_ext_pos = np.ma.zeros((data.shape)) #3d array to capture the True binaray extmalies w.r.t. gpp gain events
assert per <50, "Percentile must be less than 50"
for i in range(len(start_dates)):
ano_loc = data[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:]
threshold_loc = np.percentile(np.abs(ano_loc[ano_loc.mask == False]), (100-per) ) # calculation of threshold for the local anomalies
# The (100-per) is used because after taking the modulus negative extremes fall along positive on the right hand
thresholds_p . append(threshold_loc)
thresholds_n . append(-threshold_loc)
# Binary arrays:
# --------------
bin_ext_neg[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc < -threshold_loc
bin_ext_pos[idx_dates_win[i][0]:idx_dates_win[i][-1]+1,:,:] = ano_loc > threshold_loc
# Thresholds for consecutive windows:
# -----------------------------------
threshold_neg = np.ma.array(thresholds_n)
threshold_pos = np.ma.array(thresholds_p)
return threshold_neg, threshold_pos, bin_ext_neg, bin_ext_pos
limits = {}
limits ['min'] = {}
limits ['max'] = {}
limits ['min']['th_pos'] = 0
limits ['max']['th_pos'] = 0
limits ['min']['th_neg'] = 0
limits ['max']['th_neg'] = 0
p =0
for source_run in source_selected:
for member_run in common_members [source_run]:
p = p+1
# threshold at each tail
if th_type == 'ind':
A,B,C,D = Threshold_and_Binary_Ar(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per )
if th_type == 'common':
A,B,C,D = Threshold_and_Binary_Ar_Common(data = nc_ano[source_run][member_run].variables[variable_run][...], per = per )
Results[source_run][member_run]['th_neg'] = A
Results[source_run][member_run]['th_pos'] = B
Results[source_run][member_run]['bin_ext_neg'] = C
Results[source_run][member_run]['bin_ext_pos'] = D
Results[source_run][member_run]['ts_th_neg'] = np.array([np.array([A[i]]*win_len) for i in range(len(A))]).flatten()
Results[source_run][member_run]['ts_th_pos'] = np.array([np.array([B[i]]*win_len) for i in range(len(B))]).flatten()
# Checking
if p%3 == 0: print ("Calculating Thresholds ......")
elif p%3 == 1: print ("Calculating Thresholds ....")
else: print ("Calculating Thresholds ..")
del A,B,C,D
# Saving the binary data
# ----------------------
save_binary_common = 'n'
if save_binary_common in ['y','yy','Y','yes']:
"""
To save the binary matrix of the so that the location and duration of the extremes can be identified.
If you want to save the binary matrix of extremes as nc files
this was done so that this coulld be used as input the attribution analysis
"""
for source_run in source_selected:
for member_run in common_members [source_run]:
path_TCE = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s_TCE/'%(source_run,exp,member_run,variable_run)
# Check if the directory 'path_TCE' already exists? If not, then create one:
if os.path.isdir(path_TCE) == False:
os.makedirs(path_TCE)
for ext_type in ['neg','pos']:
print("Saving the binary matrix for %s,%s,%s"%(source_run,member_run,ext_type))
with nc4.Dataset( path_TCE + '%s_%s_bin_%s.nc'%(source_run,member_run,ext_type), mode = 'w') as dset:
dset .createDimension( "time" ,size = nc_ano[source_run][member_run].variables['time'].size)
dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size)
dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size)
t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36)
x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36)
y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36)
z = dset.createVariable(varname = variable_run +'_bin' ,datatype = float, dimensions = ("time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext
t.axis = "T"
x.axis = "X"
y.axis = "Y"
t[...] = nc_ano[source_run][member_run].variables['time'] [...]
x[...] = nc_ano[source_run][member_run].variables['lon'][...]
y[...] = nc_ano[source_run][member_run].variables['lat'][...]
z[...] = Results[source_run][member_run]['bin_ext_%s'%ext_type]
z.missing_value = 1e+36
z.stardard_name = variable_run+" binarys for %s extremes based on %dth percentile"%(ext_type,per)
z.units = "0,1"
x.units = nc_ano[source_run][member_run].variables['lon'].units
x.missing_value = 1e+36
x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name)
y.units = nc_ano[source_run][member_run].variables['lat'].units
y.missing_value = 1e+36
y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name)
t.units = nc_ano[source_run][member_run].variables['time'].units
t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar)
t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name)
t.missing_value = 1e+36
# TCE: Calculations:
# ------------------
lags_TCE = np.asarray([0,1,2,3,4], dtype = int)
def Binary_Mat_TCE_Win (bin_ar, win_start_year=2000,lags = lags_TCE, land_frac= data_lf [source_run]):
"""
Aim:
----
To save the binary matrix of the Time Continuous Extremes(TCEs) so that the location and duration of the extremes can be identified.
Returns:
--------
bin_TCE_01s: are the binary values of extreme values in a TCE only at qualified locations with gaps ( actual as value 0) [hightlight extreme values]
bin_TCE_1s : are the binary values of extreme values in a TCE only at qualified locations with gaps ( 0 replaced with value 1) [selecting full TCE with only 1s]
bin_TCE_len : are the len of TCE extreme events, the length of TCE is captured at the trigger locations
shape : These matrix are of shape (5,300,192,288) i.e. lags(0-4 months), time(300 months or 25 years {2000-24}), lat(192) and lon(288).
"""
from functions import create_seq_mat
for i,date in enumerate(start_dates):
if date.year in [win_start_year]:
start_yr_idx = i
data = bin_ar[start_yr_idx*win_len: (start_yr_idx+1)*win_len]
del bin_ar
bin_TCE_1s = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2]))
bin_TCE_01s = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2]))
bin_TCE_len = np.ma.zeros((len(lags), data.shape[0],data.shape[1],data.shape[2]))
for lag in lags:
for lat_i in range( data.shape[1] ):
for lon_i in range( data.shape[2] ):
if land_frac[...][lat_i,lon_i] != 0:
#print lag, lat_i, lon_i
try:
tmp = patch_with_gaps_and_eventsize (data[:,lat_i,lon_i], max_gap =2, min_cont_event_size=3, lag=lag)
for idx, trig in enumerate (tmp[1]):
bin_TCE_01s [lag, trig:trig+len(tmp[0][idx]), lat_i, lon_i] = tmp[0][idx]
bin_TCE_1s [lag, trig:trig+len(tmp[0][idx]), lat_i, lon_i] = np.ones(tmp[0][idx].shape)
bin_TCE_len [lag, trig, lat_i, lon_i] = np.sum(np.ones(tmp[0][idx].shape))
except:
bin_TCE_01s[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_1s [lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_len[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
else:
bin_TCE_01s[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_1s [lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
bin_TCE_len[lag, :, lat_i, lon_i] = np.ma.masked_all(data.shape[0])
return bin_TCE_01s, bin_TCE_1s, bin_TCE_len
all_win_start_years = np.arange(1850,2100,25)
# To do TCE analysis for all windows
win_start_years = np.arange(1850,2100,25)
# To check only for win starting at 2000
#win_start_years = [2000] # Testing with the year 2000-24 dataset first
save_TCE_binary = 'n'
if save_TCE_binary in ['y','yy','Y','yes']:
"""
To save the binary matrix of the Time Continuous Extremes(TCEs) so that the location and duration of the extremes can be identified.
If you want to save the binary matrix of extremes as nc files
this was done so that this coulld be used as input the attribution analysis
"""
for start_yr in win_start_years:
win_idx = np.where( all_win_start_years == start_yr)[0][0]
for source_run in source_selected:
for member_run in common_members [source_run]:
Binary_Data_TCE = {} # Dictionary to save negative and positive Binary TCEs
Binary_Data_TCE ['neg'] = {}
Binary_Data_TCE ['pos'] = {}
bin_neg = Results[source_run][member_run]['bin_ext_neg']
bin_pos = Results[source_run][member_run]['bin_ext_pos']
# Starting with Negative TCEs first
# ---------------------------------
Binary_Data_TCE ['neg']['bin_TCE_01s'], Binary_Data_TCE ['neg']['bin_TCE_1s'], Binary_Data_TCE ['neg']['bin_TCE_len'] = Binary_Mat_TCE_Win (bin_ar = bin_neg, win_start_year = start_yr,lags = lags_TCE, land_frac= data_lf [source_run])
Binary_Data_TCE ['pos']['bin_TCE_01s'], Binary_Data_TCE ['pos']['bin_TCE_1s'], Binary_Data_TCE ['pos']['bin_TCE_len'] = Binary_Mat_TCE_Win (bin_ar = bin_pos, win_start_year = start_yr,lags = lags_TCE, land_frac= data_lf [source_run])
path_TCE = cori_scratch + 'add_cmip6_data/%s/%s/%s/%s_TCE/'%(source_run,exp,member_run,variable_run)
# Check if the directory 'path_TCE' already exists? If not, then create one:
if os.path.isdir(path_TCE) == False:
os.makedirs(path_TCE)
for ext_type in ['neg','pos']:
print("Saving the 01 TCE for %s,%s,%d,%s"%(source_run,member_run,start_yr,ext_type))
with nc4.Dataset( path_TCE + 'bin_TCE_01s_'+ext_type+'_%d.nc'%start_yr, mode = 'w') as dset:
dset .createDimension( "lag",size = lags_TCE.size)
dset .createDimension( "time",size = win_len)
dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size)
dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size)
w = dset.createVariable(varname = "lag" ,datatype = float, dimensions = ("lag") , fill_value = 1e+36)
t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36)
x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36)
y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36)
z = dset.createVariable(varname = variable_run +'_TCE_01s' ,datatype = float, dimensions = ("lag","time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext
w.axis = "T"
t.axis = "T"
x.axis = "X"
y.axis = "Y"
w[...] = lags_TCE
t[...] = nc_ano[source_run][member_run].variables['time'] [...][win_idx * win_len : (win_idx+1)*win_len]
x[...] = nc_ano[source_run][member_run].variables['lon'][...]
y[...] = nc_ano[source_run][member_run].variables['lat'][...]
z[...] = Binary_Data_TCE [ext_type]['bin_TCE_01s']
z.missing_value = 1e+36
z.stardard_name = variable_run+" binary TCE (01s) matrix for 25 years starting at the year %d"%start_yr
z.units = "0,1"
x.units = nc_ano[source_run][member_run].variables['lon'].units
x.missing_value = 1e+36
x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name)
y.units = nc_ano[source_run][member_run].variables['lat'].units
y.missing_value = 1e+36
y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name)
t.units = nc_ano[source_run][member_run].variables['time'].units
t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar)
t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name)
t.missing_value = 1e+36
w.units = "month"
w.setncattr ("standard_name","lags in months")
w.missing_value = 1e+36
print("Saving the 1s TCE for %s,%s,%d,%s"%(source_run,member_run,start_yr,ext_type))
with nc4.Dataset( path_TCE + 'bin_TCE_1s_'+ext_type+'_%d.nc'%start_yr, mode = 'w') as dset:
dset .createDimension( "lag",size = lags_TCE.size)
dset .createDimension( "time",size = win_len)
dset .createDimension( "lat" ,size = nc_ano[source_run][member_run].variables['lat'].size)
dset .createDimension( "lon" ,size = nc_ano[source_run][member_run].variables['lon'].size)
w = dset.createVariable(varname = "lag" ,datatype = float, dimensions = ("lag") , fill_value = 1e+36)
t = dset.createVariable(varname = "time" ,datatype = float, dimensions = ("time"), fill_value = 1e+36)
x = dset.createVariable(varname = "lon" ,datatype = float, dimensions = ("lon") , fill_value = 1e+36)
y = dset.createVariable(varname = "lat" ,datatype = float, dimensions = ("lat") , fill_value = 1e+36)
z = dset.createVariable(varname = variable_run+'_TCE_1s' ,datatype = float, dimensions = ("lag","time","lat","lon"),fill_value = 1e+36) #varible = gpp_bin_ext
w.axis = "T"
t.axis = "T"
x.axis = "X"
y.axis = "Y"
w[...] = lags_TCE
t[...] = nc_ano[source_run][member_run].variables['time'] [...][win_idx * win_len : (win_idx+1)*win_len]
x[...] = nc_ano[source_run][member_run].variables['lon'][...]
y[...] = nc_ano[source_run][member_run].variables['lat'][...]
z[...] = Binary_Data_TCE [ext_type]['bin_TCE_1s']
z.missing_value = 1e+36
z.stardard_name = variable_run +" binary TCE (1s) matrix for 25 years starting at the year %d"%start_yr
z.units = "0,1"
x.units = nc_ano[source_run][member_run].variables['lon'].units
x.missing_value = 1e+36
x.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lon'].standard_name)
y.units = nc_ano[source_run][member_run].variables['lat'].units
y.missing_value = 1e+36
y.setncattr ("standard_name",nc_ano[source_run][member_run].variables['lat'].standard_name)
t.units = nc_ano[source_run][member_run].variables['time'].units
t.setncattr ("calendar", nc_ano[source_run][member_run].variables['time'].calendar)
t.setncattr ("standard_name", nc_ano[source_run][member_run].variables['time'].standard_name)
t.missing_value = 1e+36
w.units = "month"
w.setncattr ("standard_name","lags in months")
w.missing_value = 1e+36
# Calculation of TS of gain or loss of carbon uptake
# --------------------------------------------------
def Global_TS_of_Extremes(bin_ar, ano_gC, area = 0, lf = 0):
"""
Returns the global TS of :
1. total carbon loss/gain associated neg/pos extremes
2. total freq of extremes
3. total area affected by extremes
Parameters:
-----------
bin_ar : the binary array of extremes (pos/neg)
ano_gC : the array which will use the mask or binary arrays to calc the carbon loss/gain
Universal:
----------
2-d area array (nlat, nlon), dates_win (# wins, win_size)
Returns:
--------
1d array of length # wins x win_size for all : ext_gC_ts, ext_freq_ts, ext_area_ts
"""
print (" Calculating Extremes ... " )
ext_ar = bin_ar * ano_gC # extremes array
if (area == 0) and (lf == 0) :
print ("The area under extreme will not be calculated... \nGrid area input and land fraction is not provided ... \nThe returned area is 0 (zeros)")
ext_area_ar = bin_ar * area[...] * lf[...] # area array of extremes
ext_gC_ts = []
ext_freq_ts = []
ext_area_ts = []
for i in range(dates_win.flatten().size):
ext_gC_ts . append(np.ma.sum(ext_ar[i]))
ext_freq_ts . append(np.ma.sum(bin_ar[i]))
ext_area_ts . append(np.ma.sum(ext_area_ar[i]))
return np.ma.array(ext_gC_ts), np.ma.array(ext_freq_ts),np.ma.array(ext_area_ts)
# Calculating the slopes of GPP extremes
# --------------------------------------
def Slope_Intercept_Pv_Trend_Increase ( time, ts, until_idx1=2100, until_idx2=None):
"""
Returns the slope, intercept, r value , p value and trend line points for time period 1850-2100 (as '_21') and 2101-2300 ('_23')
Parameters:
-----------
One dimentional time series of len 5400 from 1850 through 2299
Returns:
--------
single values for slope, intercept, r value , p value, increase percentage**
1d array for same legnth as 'ts' for 'trend'
** it return the percent increase of trend line relavtive to the year 1850 (mean trend line value),..
"""
until_idx1 = int (until_idx1)
if until_idx2 != None:
until_idx2 = int (until_idx2)
# calculation of the magnitudes of global gpp loss and trend from 1850- until idx-1
slope_1, intercept_1,rv_1,pv_1,std_e1 = stats.linregress(time[...][:until_idx1],ts[:until_idx1])
trend_1 = slope_1*time[...][:until_idx1]+intercept_1
increase_1 = (trend_1[-1]-trend_1[0])*100/trend_1[0]
# calculation of the magnitudes of global gpp loss and trend from index-1 to until-idx2
if until_idx2 != None:
slope_2, intercept_23,rv_23,pv_23,std_e23 = stats.linregress(time[...][until_idx1:until_idx2],ts[until_idx1:until_idx22])
trend_2 = slope_2*time[...][until_idx1:until_idx2]+intercept_23
increase_2 = (trend_2[-1]-trend_2[0])*100/trend_2[0]
increase_2_r1850 = (trend_2[-1]-trend_1[0])*100/trend_1[0]
return slope_1,intercept_1,pv_1,trend_1,increase_1,slope_2,intercept_2,pv_2,trend_2,increase_2,increase_2_r1850
else:
return slope_1,intercept_1,pv_1,trend_1,increase_1
# Saving the results of TS carbon loss/gain
for source_run in source_selected:
for member_run in common_members [source_run]:
Results[source_run][member_run]['ts_global_gC'] = {}
Results[source_run][member_run]['ts_global_area'] = {}
Results[source_run][member_run]['ts_global_freq'] = {}
Results[source_run][member_run]['ts_global_gC']['neg_ext'] = {}
Results[source_run][member_run]['ts_global_gC']['pos_ext'] = {}
Results[source_run][member_run]['ts_global_area']['neg_ext']= {}
Results[source_run][member_run]['ts_global_area']['pos_ext']= {}
Results[source_run][member_run]['ts_global_freq']['neg_ext']= {}
Results[source_run][member_run]['ts_global_freq']['pos_ext']= {}
for source_run in source_selected:
print ("Calculating the global TS of Extremes for %s"%source_run)
for member_run in common_members [source_run]:
# Negative Extremes:
# ------------------
ts_ext , ts_freq, ts_area = Global_TS_of_Extremes(bin_ar = Results[source_run][member_run]['bin_ext_neg'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
area = data_area [source_run],
lf = data_lf [source_run])
Results[source_run][member_run]['ts_global_gC' ]['neg_ext']['ts'] = ts_ext
Results[source_run][member_run]['ts_global_area']['neg_ext']['ts'] = ts_area
Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'] = ts_freq
del ts_ext , ts_freq, ts_area
# Positive Extremes:
# -----------------
ts_ext , ts_freq, ts_area = Global_TS_of_Extremes(bin_ar = Results[source_run][member_run]['bin_ext_pos'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
area = data_area [source_run],
lf = data_lf [source_run])
Results[source_run][member_run]['ts_global_gC' ]['pos_ext']['ts'] = ts_ext
Results[source_run][member_run]['ts_global_area']['pos_ext']['ts'] = ts_area
Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'] = ts_freq
del ts_ext , ts_freq, ts_area
# -----------------
for source_run in source_selected:
for member_run in common_members [source_run]:
# Negative Extremes gC:
# ---------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_gC']['neg_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_gC']['neg_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_gC']['neg_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_gC']['neg_ext']['trend_21'] = trend
Results[source_run][member_run]['ts_global_gC']['neg_ext']['inc_21' ] = increase
del slope,intercept,pv,trend,increase
# Positive Extremes gC:
# ---------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_gC']['pos_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_gC']['pos_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_gC']['pos_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_gC']['pos_ext']['trend_21'] = trend
Results[source_run][member_run]['ts_global_gC']['pos_ext']['inc_21' ] = increase
del slope,intercept,pv,trend,increase
# -----------------------------------
# -----------------------------------
# Negative Extremes freq:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_freq']['neg_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_freq']['neg_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_freq']['neg_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_freq']['neg_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# Positive Extremes freq:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_freq']['pos_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_freq']['pos_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_freq']['pos_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_freq']['pos_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# -----------------------------------
# -----------------------------------
# Negative Extremes area:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_area']['neg_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_area']['neg_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_area']['neg_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_area']['neg_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_area']['neg_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# Positive Extremes area:
# -----------------------
slope,intercept,pv,trend,increase = Slope_Intercept_Pv_Trend_Increase (
time = nc_ano[source_run][member_run].variables['time'],
ts = Results[source_run][member_run]['ts_global_area']['pos_ext']['ts'],
until_idx1 = idx_yr_2099)
Results[source_run][member_run]['ts_global_area']['pos_ext']['s21' ] = slope
Results[source_run][member_run]['ts_global_area']['pos_ext']['pv21' ] = pv
Results[source_run][member_run]['ts_global_area']['pos_ext']['trend_21']= trend
Results[source_run][member_run]['ts_global_area']['pos_ext']['inc_21' ]= increase
del slope,intercept,pv,trend,increase
# -----------------------------------
def Sum_and_Diff_of_Fluxes_perWin(ano_gC, bin_ar = None, data_type = 'ext', diff_ref_yr = 1850):
"""
returns a 2-d array sum of fluxes and difference of the sum of fluxes with reference to the ref yr
Parameters:
----------
bin_ar: the binary array of extremes (pos/neg)
ano_gC : the array which will use the mask or binary arrays to calc the carbon loss/gain
diff_ref_yr : the starting year of the reference time window for differencing
data_type : do you want to calculate the sum and difference of extremes or original fluxes? ...
'ext' is for extremes and will mask based on the 'bin_ar' in calculation ...
otherwise it will not multiply by bin_ar and the original flux difference will be calculated.
'ext' will calculate the extremes and anything else with calc on original flux diff
Universal:
----------
start_dates : the start_dates of every 25 year window, size = # wins
Returns:
--------
sum_flux : shape (# wins, nlat,nlon), sum of fluxes per window
diff_flux : shape (# wins, nlat,nlon), difference of sum of fluxes per window and reference window
"""
if data_type != 'ext': bin_ar = np.ma.ones(ano_gC.shape)
sum_ext = []
for i in range(len(start_dates)):
ext_gC = bin_ar[idx_dates_win[i][0] : idx_dates_win [i][-1]+1,:,:] * ano_gC[idx_dates_win[i][0] : idx_dates_win [i][-1]+1,:,:]
sum_ext . append (np.ma.sum(ext_gC, axis = 0))
sum_ext = np.ma.asarray(sum_ext)
#to calculate the index of the reference year starting window:
for i,date in enumerate(start_dates):
if date.year in [diff_ref_yr]:
diff_yr_idx = i
diff_ext = []
for i in range(len(start_dates)):
diff = sum_ext[i] - sum_ext[diff_yr_idx]
diff_ext . append (diff)
diff_ext = np.ma.asarray(diff_ext)
return sum_ext , diff_ext
# ----------------------------------------------------------
# Preparing the storage
# ----------------------------------------------------------
for source_run in source_selected:
for member_run in common_members [source_run]:
# Negative Extremes:
sum_neg_ext , diff_neg_ext = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = Results[source_run][member_run]['bin_ext_neg'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
data_type = 'ext',
diff_ref_yr = 1850)
Results[source_run][member_run]['sum_neg_ext'] = sum_neg_ext
Results[source_run][member_run]['diff_neg_ext'] = diff_neg_ext
# Positive extremes:
sum_pos_ext , diff_pos_ext = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = Results[source_run][member_run]['bin_ext_pos'],
ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
data_type = 'ext',
diff_ref_yr = 1850)
Results[source_run][member_run]['sum_pos_ext'] = sum_pos_ext
Results[source_run][member_run]['diff_pos_ext'] = diff_pos_ext
del sum_neg_ext , diff_neg_ext, sum_pos_ext , diff_pos_ext
#Negative Flux/Ori
#sum_neg_ori , diff_neg_ori = Sum_and_Diff_of_Fluxes_perWin ( bin_ar = None,
# ano_gC = nc_ano[source_run][member_run].variables[variable_run][...],
# data_type = 'ori',
# diff_ref_yr = 1850)
# Results[source_run][member_run]['sum_neg_ori'] = sum_neg_ori
# Results[source_run][member_run]['diff_neg_ori'] = diff_neg_ori
# Results[source_run][member_run]['sum_pos_ext'] = {}
# Results[source_run][member_run]['diff_neg_ext'] = {}
# Results[source_run][member_run]['diff_pos_ext'] = {}
# Regional analysis
# -----------------
import regionmask
# Selection the member_run manually
member_run = common_members[source_run] [0]
lon = nc_ano[source_run][member_run].variables ['lon']
lat = nc_ano[source_run][member_run].variables ['lat']
# for the plotting
lon_bounds = nc_ano[source_run][member_run].variables [lon.bounds]
lat_bounds = nc_ano[source_run][member_run].variables [lat.bounds]
lon_edges = np.hstack (( lon_bounds[:,0], lon_bounds[-1,-1]))
lat_edges = np.hstack (( lat_bounds[:,0], lat_bounds[-1,-1]))
# Creating mask of the regions based on the resolution of the model
mask = regionmask.defined_regions.srex.mask(lon[...], lat[...]).values
# important information:
srex_abr = regionmask.defined_regions.srex.abbrevs
srex_names = regionmask.defined_regions.srex.names
srex_nums = regionmask.defined_regions.srex.numbers
srex_centroids = regionmask.defined_regions.srex.centroids
srex_polygons = regionmask.defined_regions.srex.polygons
mask_ma = np.ma.masked_invalid(mask)
import matplotlib.pyplot as plt
import os
"""
Basemaps not working anymore
===========================
#1- Hack to fix missing PROJ4 env var
import os
import conda
conda_file_dir = conda.__file__
conda_dir = conda_file_dir.split('lib')[0]
proj_lib = os.path.join(os.path.join(conda_dir, 'share'), 'proj')
os.environ["PROJ_LIB"] = proj_lib
#-1 Hack end
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
"""
"""
Regional Plots
--------------
#fig = plt.figure()
#ax = plt.subplot(111, projection=ccrs.PlateCarree())
fig,ax = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400)
bmap = Basemap( projection = 'eck4',
lon_0 = 0.,
resolution = 'c')
LON,LAT = np.meshgrid(lon_edges,lat_edges)
ax = bmap.pcolormesh(LON,LAT, mask_ma, cmap ='viridis')
bmap .drawparallels(np.arange(-90., 90., 30.),fontsize=14, linewidth = .2)
bmap .drawmeridians(np.arange(0., 360., 60.),fontsize=14, linewidth = .2)
bmap .drawcoastlines(linewidth = .25,color='lightgrey')
plt.colorbar(ax, orientation='horizontal', pad=0.04)
fig.savefig (web_path + "SREX_regions.pdf")
# Cartopy Plotting
# ----------------
import cartopy.crs as ccrs
from shapely.geometry.polygon import Polygon
import cartopy.feature as cfeature
# Fixing the error {'GeoAxesSubplot' object has no attribute '_hold'}
from matplotlib.axes import Axes
from cartopy.mpl.geoaxes import GeoAxes
GeoAxes._pcolormesh_patched = Axes.pcolormesh
proj_trans = ccrs.PlateCarree()
fig = plt.figure(figsize = (9,5))
ax = fig.add_subplot(111, projection=ccrs.PlateCarree())
mask_ma = np.ma.masked_invalid(mask)
h = ax.pcolormesh(lon_edges[...], lat_edges[...], mask_ma, transform = proj_trans)#, cmap='viridis')
ax.coastlines()
plt.colorbar(h, orientation='horizontal', pad=0.04)
# Plot the abs at the centroids
for idx, abr in enumerate(srex_abr):
plt.text ( srex_centroids[idx][0], srex_centroids[idx][-1], srex_abr[idx],
horizontalalignment='center',
transform = proj_trans)
ax.add_geometries([srex_polygons[idx]], crs = proj_trans, facecolor='none', edgecolor='red', alpha=0.8)
fig.savefig (web_path + "SREX_regions_cpy.pdf")
plt.close(fig)
"""
# =================================================================================================
# =================================================================================================
## # ## ########
# # # ## ## ##
## # # # ##
# # ## ## ##
# ##### ## ##
# =================================================================================================
# =================================================================================================
# Creating a lis to Unique colors for multiple models:
# ---------------------------------------------------
NUM_COLORS = len(source_selected)
LINE_STYLES = ['solid', 'dashed', 'dashdot', 'dotted']
NUM_STYLES = len(LINE_STYLES)
sns.reset_orig() # get default matplotlib styles back
clrs = sns.color_palette('husl', n_colors=NUM_COLORS)
# Creating the ticks for x axis (every 25 years):
# ----------------------------------------------
tmp_idx = np.arange(0, 3013, 300) #for x ticks
tmp_idx[-1]=tmp_idx[-1]-1
dates_ticks = []
years_ticks = []
for i in tmp_idx:
a = dates_win.flatten()[i]
dates_ticks.append(a)
years_ticks.append(a.year)
# Creating the x-axis years (Monthly)
# -----------------------------------
x_years = [d.year for d in dates_win.flatten()]
# Caption (optional): This dictionary could be used to save the captions of the figures
# -------------------------------------------------------------------------------------
Captions = {}
# PLOTING THE THRESHOLD FOR QUALIFICATION OF EXTREME EVENTS: fig[1-9]
# ===================================================================
if th_type == 'ind':
fig1,ax2 = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400)
ymin = 400
ymax = 8000
for s_idx, source_run in enumerate(source_selected):
for m_idx, member_run in enumerate(common_members [source_run]):
# ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
# 'r', label = "Th$-$ %s"%source_run, alpha = .7)
# ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
# clrs[s_idx], ls='--', label = "Th$-$ %s"%source_run, alpha = .7)
ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
'r', ls='--', label = "Th$-$ %s"%source_run, alpha = .3)
ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14)
ax2.set_xlabel("Time", fontsize = 14)
ax2.set_ylim([ymin,ymax])
#ax2.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.set_yticklabels(-np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.tick_params(axis='y', colors='red')
# ax2.set_xticks(dates_ticks)
ax2.grid(which='major', linestyle=':', linewidth='0.3', color='gray')
ax1=ax2.twinx()
# ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9,
# 'g', label = "Th+ %s"%source_run, alpha = .7)
ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9,
'g', label = "Th+ %s"%source_run, alpha = .3)
ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14)
ax1.set_ylim([ymin,ymax])
#ax1.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax1.tick_params(axis='y', colors='green')
# ax1.set_xticks(dates_ticks)
# ax1.grid(which='major', linestyle=':', linewidth='0.3', color='gray')
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
labels, ids = np.unique(labels, return_index=True)
labels2, ids2 = np.unique(labels2, return_index=True)
lines = [lines[i] for i in ids]
lines2 = [lines2[i] for i in ids2]
# ax2.legend(lines + lines2, labels + labels2, loc= 'best',fontsize =12)
#continue
fig1.savefig(web_path + 'Threshold/ts_threshold_all_scenario_%s_per_%s.pdf'%(variable_run,int(per)))
plt.close(fig1)
del fig1
# Threshold per model for the 'th_type' == 'ind' and per = 1.0
# -------------------------------------------------------------
for source_run in source_selected:
fig2,ax2 = plt.subplots(tight_layout = True, figsize = (9,5), dpi = 400)
pd.plotting.deregister_matplotlib_converters()
if source_run == 'CESM2' : ymin = 400 ; ymax = 700
if source_run == 'CanESM5' : ymin = 2000 ; ymax = 8000
if source_run == 'IPSL-CM6A-LR' : ymin = 1700 ; ymax = 2900
if source_run == 'BCC-CSM2-MR' : ymin = 400 ; ymax = 1000
if source_run == 'CNRM-ESM2-1' : ymin = 1000 ; ymax = 1500
if source_run == 'CNRM-CM6-1' : ymin = 1000 ; ymax = 1800
for m_idx, member_run in enumerate(common_members [source_run]):
L1= ax2.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
'r', label = "Th$-$ %s"%member_run, linewidth = 0.3, alpha = .7)
L1[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES])
ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14)
ax2.set_xlabel("Time", fontsize = 14)
#ax2.set_xlim([dates_ticks[0],dates_ticks[-1]])
#ax2.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.set_yticklabels(-np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax2.tick_params(axis='y', colors='red')
ax2.grid(which='major', linestyle='--', linewidth='0.3', color='gray')
ax1=ax2.twinx()
for m_idx, member_run in enumerate(common_members [source_run]):
L2= ax1.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_pos'])/10**9,
'g', label = "Th+ %s"%member_run, linewidth = 0.3, alpha = .7)
L2[0].set_linestyle(LINE_STYLES[m_idx%NUM_STYLES])
ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14)
#ax1.set_yticklabels([])
#ax1.set_yticks(np.arange(int(np.floor(ymin/100)*100),int(np.ceil(ymax/100)*100),25))
#ax1.tick_params(axis='y', colors='green')
# ax1.grid(which='major', linestyle='--', linewidth='0.3', color='gray')
ax2.set_ylabel("Negative Extremes (GgC)", {'color': 'r'},fontsize =14)
ax2.set_xlabel("Time", fontsize = 14)
ax1.set_ylabel("Positive Extremes (GgC)", {'color': 'g'},fontsize =14)
ax2.set_ylim([ymin,ymax])
ax1.set_ylim([ymin,ymax])
ax1.set_xticks(dates_ticks)
ax1.set_xticklabels(years_ticks)
lines, labels = ax1.get_legend_handles_labels()
lines2, labels2 = ax2.get_legend_handles_labels()
ax2.legend(lines + lines2, labels + labels2, loc=0,fontsize =8)
fig2.savefig(web_path + 'Threshold/ts_threshold_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Threshold/"%(source_run,member_run, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig2.savefig(path_save + 'ts_threshold_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
plt.close(fig2)
del fig2,ax2
# Plotting thresholds when 'th_type' == 'common':
# -----------------------------------------------
if th_type == 'common':
fig3 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.title("TS of Thresholds for CMIP6 models for percentile = %d"%int(per))
pd.plotting.deregister_matplotlib_converters()
for s_idx, source_run in enumerate(source_selected):
for m_idx, member_run in enumerate(common_members [source_run]):
plt.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
color=clrs[s_idx], ls='-', label = "$q$ %s"%source_run, alpha = .8, linewidth = .7)
plt.ylabel("Thresholds (GgC)", {'color': 'k'},fontsize =14)
plt.xlabel("Time", fontsize = 14)
plt.grid(which='major', linestyle=':', linewidth='0.3', color='gray')
plt.legend()
break #Plotting only the first ensemble member
fig3.savefig(web_path + 'Threshold/ts_thresholdc_all_models_%s_per_%s.pdf'%(variable_run,int(per)))
plt.close(fig3)
del fig3
# Threshold per model for the 'th_type' == 'common' and per = 5.0
# ---------------------------------------------------------------
for s_idx, source_run in enumerate(source_selected):
fig4 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.title("TS of %d percentile Thresholds of %s for the model %s"%(per, variable_run.upper(), source_run))
pd.plotting.deregister_matplotlib_converters()
if variable_run == 'gpp':
if source_run == 'CESM2' : ymin = 250 ; ymax = 400
if source_run == 'CanESM5' : ymin = 1500 ; ymax = 4500
if source_run == 'IPSL-CM6A-LR' : ymin = 1200 ; ymax = 2100
if source_run == 'BCC-CSM2-MR' : ymin = 300 ; ymax = 600
if source_run == 'CNRM-ESM2-1' : ymin = 700 ; ymax = 900
if source_run == 'CNRM-CM6-1' : ymin = 600 ; ymax = 1100
if variable_run == 'nbp':
if source_run == 'CESM2' : ymin = 130 ; ymax = 230
if variable_run == 'ra':
if source_run == 'CESM2' : ymin = 180 ; ymax = 240
if variable_run == 'rh':
if source_run == 'CESM2' : ymin = 100 ; ymax = 170
for m_idx, member_run in enumerate(common_members [source_run]):
plt.plot( dates_win.flatten(), abs(Results[source_run][member_run]['ts_th_neg'])/10**9,
color=clrs[s_idx], ls='-', label = "$q$ %s"%source_run, alpha = 1, linewidth = 1)
break #Plotting only the first ensemble member
plt.ylim ((ymin,ymax))
plt.ylabel("Thresholds (GgC)", {'color': 'k'},fontsize =14)
plt.xlabel("Time", fontsize = 14)
plt.grid(which='major', linestyle=':', linewidth='0.4', color='gray')
plt.legend()
fig4.savefig(web_path + 'Threshold/ts_thresholdc_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Threshold/"%(source_run,member_run, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig4.savefig(path_save + 'ts_thresholdc_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
plt.close(fig4)
del fig4
# PLOTING THE GLOBAL TIMESERIES OF THE EXTREME EVENTS : fig[11-19]
# ======================================================================================
for s_idx, source_run in enumerate(source_selected):
fig11 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.style.use("classic")
plt.title ("TS global %s extremes for %s when percentile is %d"%(variable_run.upper(), source_run, per))
pd.plotting.deregister_matplotlib_converters()
if variable_run == 'gpp':
if source_run == 'CESM2' : ymin = -1.2 ; ymax = 1.2
if source_run == 'CanESM5' : ymin = -1.5 ; ymax = 1.5
if source_run == 'IPSL-CM6A-LR' : ymin = -0.5 ; ymax = 0.5
if source_run == 'BCC-CSM2-MR' : ymin = -1.6 ; ymax = 1.6
if source_run == 'CNRM-ESM2-1' : ymin = -0.8 ; ymax = 0.8
if source_run == 'CNRM-CM6-1' : ymin = -1.7 ; ymax = 1.7
if variable_run == 'nbp':
if source_run == 'CESM2' : ymin = -.7 ; ymax = .7
if variable_run == 'ra':
if source_run == 'CESM2' : ymin = -.7 ; ymax = .7
if variable_run == 'rh':
if source_run == 'CESM2' : ymin = -.4 ; ymax = .4
for m_idx, member_run in enumerate(common_members [source_run]):
plt.plot( dates_win.flatten(), Results[source_run][member_run]['ts_global_gC']['neg_ext']['ts'] / 10**15,
'r', label = "Negative Extremes" , linewidth = 0.5, alpha=0.7 )
plt.plot( dates_win.flatten(), Results[source_run][member_run]['ts_global_gC']['pos_ext']['ts'] / 10**15,
'g', label = "Positive Extremes" , linewidth = 0.5, alpha=0.7 )
plt.plot( dates_win.flatten() [:idx_yr_2099], Results[source_run][member_run]['ts_global_gC']['neg_ext']['trend_21'] /10**15,
'k--', label = "Neg Trend 21", linewidth = 0.5, alpha=0.9 )
plt.plot( dates_win.flatten() [:idx_yr_2099], Results[source_run][member_run]['ts_global_gC']['pos_ext']['trend_21'] /10**15,
'k--', label = "Pos Trend 21", linewidth = 0.5, alpha=0.9 )
break #Plotting only the first ensemble member
plt.ylim ((ymin,ymax)) #| waiting for the first set of graphs to remove this comment
plt.xlabel( 'Time', fontsize = 14)
plt.xticks(ticks = dates_ticks, labels = years_ticks, fontsize = 12)
plt.ylabel( "Intensity of Extremes (PgC/mon)", fontsize = 14)
plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray')
plt.text( dates_win.flatten()[900], ymin+0.2,"Slope = %d %s"%(int(Results[source_run][member_run]['ts_global_gC']['neg_ext']['s21']/10**6),
'MgC/month'), size =14, color = 'r' )
plt.text( dates_win.flatten()[900], ymax-0.2,"Slope = %d %s"%(int(Results[source_run][member_run]['ts_global_gC']['pos_ext']['s21']/10**6),
'MgC/month'), size =14, color = 'g' )
fig11.savefig(web_path + 'Intensity/ts_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Intensity/"%(source_run,member_run, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig11.savefig(path_save + 'ts_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
fig11.savefig(path_save + 'ts_global_carbon_%s_source_%s_per_%s.png'%(source_run,variable_run,int(per)))
plt.close(fig11)
del fig11
# Rolling mean of annual losses and gains
# ---------------------------------------
def RM_Nyearly_4m_Mon(ts, rm_years = 5):
"""
The rolling mean is calculated to the right end value
The first 4 years will not be reported in the output of 5 year rolling mean
"""
ts = np.array(ts)
yr = np.array([np.sum(ts[i:i+12]) for i in range(ts.size//12)])
yr_rm = pd.Series(yr).rolling(rm_years).mean()
return yr_rm[rm_years-1:]
# Ploting 5 Year Rolling Mean figures
# -----------------------------------
for s_idx, source_run in enumerate(source_selected):
fig12 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.title ("5yr RM of TS annual global %s for %s when percentile is %d"%(variable_run.upper(), source_run, per))
pd.plotting.deregister_matplotlib_converters()
for m_idx, member_run in enumerate(common_members [source_run]):
print (source_run,member_run)
plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_gC']['neg_ext']['ts'] / 10**15),
'r', label = "Negative Extremes" , linewidth = 0.5, alpha=0.7 )
plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_gC']['pos_ext']['ts'] / 10**15),
'g', label = "Positive Extremes" , linewidth = 0.5, alpha=0.7 )
break #Plotting only the first ensemble member
plt.xlabel( 'Time', fontsize = 14)
plt.ylabel( "Intensity of Extremes (PgC/mon)", fontsize = 14)
plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray')
fig12.savefig(web_path + 'Intensity/ts_rm5yr_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Intensity/"%(source_run,member_run, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig12.savefig(path_save + 'ts_rm5yr_global_carbon_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
plt.close(fig12)
del fig12
# Frequency of extremes:
# ======================
# Ploting 5 Year Rolling Mean figures:
# ------------------------------------
for s_idx, source_run in enumerate(source_selected):
fig14 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.title ("5yr RM of annual TS of global frequency of %s extremes\nfor %s when percentile is %d"%(variable_run.upper(),source_run, per))
pd.plotting.deregister_matplotlib_converters()
for m_idx, member_run in enumerate(common_members [source_run]):
print (source_run,member_run)
plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'] ),
'r', label = "Negative Extremes" , linewidth = 0.5, alpha=0.7 )
plt.plot(np.arange(1854,2100), RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'] ),
'g', label = "Positive Extremes" , linewidth = 0.5, alpha=0.7 )
break #Plotting only the first ensemble member
plt.xlabel( 'Time', fontsize = 14)
plt.ylabel( "Frequency of Extremes (count/yr)", fontsize = 14)
plt.grid( which='major', linestyle=':', linewidth='0.3', color='gray')
fig14.savefig(web_path + 'Freq/ts_rm5yr_global_freq_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
# Saving the plots
path_save = cori_scratch + "add_cmip6_data/%s/ssp585/%s/%s/Global_Extremes/non-TCE/Freq/"%(source_run,member_run, variable_run)
if os.path.isdir(path_save) == False:
os.makedirs(path_save)
fig14.savefig(path_save + 'ts_rm5yr_global_freq_%s_source_%s_per_%s.pdf'%(source_run,variable_run,int(per)))
plt.close(fig14)
del fig14
Captions['fig14'] = " 5 year moving average of the frequency (counts) under positive and negative extremes.\
All ensemble members have same values"
# Ploting 5 Year Rolling Mean figures (normalized) - pending:
# -------------------------------------------------
# Function to normalize positive and negative freq
def Norm_Two_TS(ts1, ts2):
ts = np.concatenate((ts1,ts2))
norm_ts = norm(ts)
norm_ts1 = norm_ts[:len(ts1)]
norm_ts2 = norm_ts[len(ts1):]
return norm_ts, norm_ts1, norm_ts2
# TEST
p = np.array([ 8, 6, 7, 8, 6, 5, 4, 6])
n = np.array([ 5, 6, 6, 4, 5, 7, 8, 6])
_,norm_p, norm_n = Norm_Two_TS(p,n)
norm_np = norm_n/norm_p
norm_pn = norm_p/norm_n
mask_np = np.ma.masked_greater(norm_np,1)
mask_pn = np.ma.masked_greater(norm_pn,1)
fig = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
#plt.plot( mask_np)
#plt.plot( -mask_pn)
#plt.plot(_)
#plt.plot(norm_np)
plt.plot(p/n)
fig.savefig(web_path + 'ratio_test.pdf')
# Dict to capture the ts of ratios pos to neg extremes of models
ts_ratio_freq = {}
for s_idx, source_run in enumerate(source_selected):
fig15 = plt.figure(tight_layout = True, figsize = (9,5), dpi = 400)
plt.title ("5yr Ratio of RM of TS annual global frequency (n/p) for %s when percentile is %d"%(source_run, per))
pd.plotting.deregister_matplotlib_converters()
for m_idx, member_run in enumerate(common_members [source_run]):
print (source_run,member_run)
ts_ratio = np.divide ( RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['neg_ext']['ts'],10) ,
RM_Nyearly_4m_Mon(Results[source_run][member_run]['ts_global_freq']['pos_ext']['ts'],10) )
ts_ratio_freq[source_run] = ts_ratio
plt.plot ( | np.arange(1859,2100) | numpy.arange |
if __name__ == "__main__":
#%%
import sys
import time
from sklearn.model_selection import StratifiedKFold, train_test_split
from tqdm import trange
sys.path.append('..')
import os
import torch
import pandas as pd
import numpy as np
from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
from lens.models.relu_nn import XReluNN
from lens.models.psi_nn import PsiNetwork
from lens.models.tree import XDecisionTreeClassifier
from lens.models.brl import XBRLClassifier
from lens.models.deep_red import XDeepRedClassifier
from lens.utils.base import set_seed, ClassifierNotTrainedError, IncompatibleClassifierError
from lens.utils.metrics import Accuracy, F1Score
from lens.models.general_nn import XGeneralNN
from lens.utils.datasets import StructuredDataset
from lens.logic.base import test_explanation
from lens.logic.metrics import complexity, fidelity, formula_consistency
from data import VDEM
from data.load_structured_datasets import load_vDem
# n_sample = 100
results_dir = f'results/vDem'
if not os.path.isdir(results_dir):
os.makedirs(results_dir)
#%% md
## Loading VDEM data
#%%
dataset_root = "../data/"
dataset_name = VDEM
print(dataset_root)
print(results_dir)
x, c, y, feature_names, concept_names, class_names = load_vDem(dataset_root)
y = y.argmax(dim=1)
n_features = x.shape[1]
n_concepts = c.shape[1]
n_classes = len(class_names)
dataset_low = StructuredDataset(x, c, dataset_name=dataset_name, feature_names=feature_names, class_names=concept_names)
print("Number of features", n_features)
print("Number of concepts", n_concepts)
print("Feature names", feature_names)
print("Concept names", concept_names)
print("Class names", class_names)
#%% md
## Define loss, metrics and methods
#%%
loss_low = BCEWithLogitsLoss()
loss_high = CrossEntropyLoss()
metric = Accuracy()
expl_metric = F1Score()
method_list = ['DTree', 'BRL', 'Psi', 'Relu', 'General'] # 'DeepRed']
print("Methods", method_list)
#%% md
## Training
#%%
epochs = 1000
n_processes = 4
timeout = 60 * 60 # 1 h timeout
l_r = 1e-3
lr_scheduler = False
top_k_explanations = None
simplify = True
seeds = [*range(5)]
print("Seeds", seeds)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print("Device", device)
for method in method_list:
methods = []
splits = []
model_explanations = []
model_accuracies = []
explanation_accuracies = []
elapsed_times = []
explanation_fidelities = []
explanation_complexities = []
skf = StratifiedKFold(n_splits=len(seeds), shuffle=True, random_state=0)
for seed, (trainval_index, test_index) in enumerate(skf.split(x.numpy(), y.numpy())):
set_seed(seed)
x_trainval, c_trainval, y_trainval = x[trainval_index], c[trainval_index], y[trainval_index]
x_test, c_test, y_test = x[test_index], c[test_index], y[test_index]
x_train, x_val, c_train, c_val, y_train, y_val = train_test_split(x_trainval, c_trainval, y_trainval,
test_size=0.3, random_state=0)
train_data_low = StructuredDataset(x_train, c_train, dataset_name, feature_names, concept_names)
val_data_low = StructuredDataset(x_val, c_val, dataset_name, feature_names, concept_names)
test_data_low = StructuredDataset(x_test, c_test, dataset_name, feature_names, concept_names)
data_low = StructuredDataset(x, c, dataset_name, feature_names, concept_names)
name_low = os.path.join(results_dir, f"{method}_{seed}_low")
name_high = os.path.join(results_dir, f"{method}_{seed}_high")
# Setting device
print(f"Training {name_low} classifier...")
start_time = time.time()
if method == 'DTree':
model_low = XDecisionTreeClassifier(name=name_low, n_classes=n_concepts,
n_features=n_features, max_depth=5)
try:
model_low.load(device)
print(f"Model {name_low} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_low.fit(train_data_low, val_data_low, metric=metric, save=True)
c_predicted_train, _ = model_low.predict(train_data_low, device=device)
c_predicted_val, _ = model_low.predict(val_data_low, device=device)
c_predicted_test, _ = model_low.predict(test_data_low, device=device)
accuracy_low = model_low.evaluate(test_data_low, metric=metric)
train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
model_high = XDecisionTreeClassifier(name=name_high, n_classes=n_classes, n_features=n_concepts, max_depth=5)
try:
model_high.load(device)
print(f"Model {name_high} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_high.fit(train_data_high, val_data_high, metric=metric, save=True)
outputs, labels = model_high.predict(test_data_high, device=device)
accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
for i in trange(n_classes):
explanation = model_high.get_global_explanation(i, concept_names)
class_output = torch.as_tensor((outputs > 0.5) == i)
class_label = torch.as_tensor(labels == i)
exp_fidelity = 100
exp_accuracy = expl_metric(class_output, class_label)
explanation_complexity = complexity(explanation)
explanations.append(explanation), exp_accuracies.append(exp_accuracy)
exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
elif method == 'BRL':
train_sample_rate = 1.0
model_low = XBRLClassifier(name=name_low, n_classes=n_concepts, n_features=n_features,
n_processes=n_processes, feature_names=feature_names, class_names=concept_names)
try:
model_low.load(device)
print(f"Model {name_low} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_low.fit(train_data_low, train_sample_rate=train_sample_rate,
verbose=True, eval=False)
c_predicted, _ = model_low.predict(data_low, device=device)
c_predicted_train, c_predicted_test = c_predicted[trainval_index], c_predicted[test_index]
accuracy_low = model_low.evaluate(test_data_low, metric=metric, outputs=c_predicted_test, labels=c_test)
train_data_high = StructuredDataset(c_predicted_train, y_trainval, dataset_name, feature_names, concept_names)
test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
model_high = XBRLClassifier(name=name_high, n_classes=n_classes, n_features=n_concepts,
n_processes=n_processes, feature_names=concept_names, class_names=class_names)
try:
model_high.load(device)
print(f"Model {name_high} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_high.fit(train_data_high, train_sample_rate=train_sample_rate, verbose=True,
eval=False)
outputs, labels = model_high.predict(test_data_high, device=device)
accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
for i in trange(n_classes):
explanation = model_high.get_global_explanation(i, concept_names)
exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test, metric=expl_metric,
concept_names=concept_names)
exp_fidelity = 100
explanation_complexity = complexity(explanation, to_dnf=True)
explanations.append(explanation), exp_accuracies.append(exp_accuracy)
exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
elif method == 'DeepRed':
train_sample_rate = 0.1
model_low = XDeepRedClassifier(name=name_low, n_classes=n_concepts, n_features=n_features)
model_low.prepare_data(dataset_low, dataset_name + "low", seed, trainval_index, test_index, train_sample_rate)
try:
model_low.load(device)
print(f"Model {name_low} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_low.fit(epochs, train_sample_rate=train_sample_rate, verbose=True, eval=False)
c_predicted_train, _ = model_low.predict(train=True, device=device)
c_predicted_test, _ = model_low.predict(train=False, device=device)
accuracy_low = model_low.evaluate(train=False, outputs=c_predicted_test, labels=c_test, metric=metric)
model_low.finish()
c_predicted = torch.vstack((c_predicted_train, c_predicted_test))
y = torch.vstack((y_train, y_test))
dataset_high = StructuredDataset(c_predicted, y, dataset_name, feature_names, concept_names)
model_high = XDeepRedClassifier(n_classes, n_features, name=name_high)
model_high.prepare_data(dataset_high, dataset_name + "high", seed, trainval_index, test_index, train_sample_rate)
try:
model_high.load(device)
print(f"Model {name_high} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_low.fit(epochs, train_sample_rate=train_sample_rate, verbose=True, eval=False)
outputs, labels = model_high.predict(train=False, device=device)
accuracy = model_high.evaluate(train=False, metric=metric, outputs=outputs, labels=labels)
explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
print("Extracting rules...")
t = time.time()
for i in trange(n_classes):
explanation = model_high.get_global_explanation(i, concept_names, simplify=simplify)
exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test,
metric=expl_metric,
concept_names=concept_names, inequalities=True)
exp_predictions = torch.as_tensor(exp_predictions)
class_output = torch.as_tensor(outputs.argmax(dim=1) == i)
exp_fidelity = fidelity(exp_predictions, class_output, expl_metric)
explanation_complexity = complexity(explanation)
explanations.append(explanation), exp_accuracies.append(exp_accuracy)
exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
print(f"{i + 1}/{n_classes} Rules extracted. Time {time.time() - t}")
elif method == 'Psi':
# Network structures
l1_weight = 1e-4
hidden_neurons = [10, 5]
fan_in = 3
lr_psi = 1e-2
print("L1 weight", l1_weight)
print("Hidden neurons", hidden_neurons)
print("Fan in", fan_in)
print("Learning rate", lr_psi)
name_low = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_{lr_psi}_low")
name_high = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_{lr_psi}_high")
model_low = PsiNetwork(n_concepts, n_features, hidden_neurons, loss_low, l1_weight, name=name_low,
fan_in=fan_in)
try:
model_low.load(device)
print(f"Model {name_low} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_low.fit(train_data_low, val_data_low, epochs=epochs, l_r=lr_psi,
metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
c_predicted_train = model_low.predict(train_data_low, device=device)[0].detach().cpu()
c_predicted_val = model_low.predict(val_data_low, device=device)[0].detach().cpu()
c_predicted_test = model_low.predict(test_data_low, device=device)[0].detach().cpu()
accuracy_low = model_low.evaluate(test_data_low, outputs=c_predicted_test, labels=c_test, metric=metric)
train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
model_high = PsiNetwork(n_classes, n_concepts, hidden_neurons, loss_high, l1_weight,
name=name_high, fan_in=fan_in)
try:
model_high.load(device)
print(f"Model {name_high} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_high.fit(train_data_high, val_data_high, epochs=epochs, l_r=lr_psi,
metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
outputs, labels = model_high.predict(test_data_high, device=device)
accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
for i in trange(n_classes):
explanation = model_high.get_global_explanation(i, concept_names, simplify=simplify, x_train=c_predicted_train)
exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test,
metric=expl_metric, concept_names=concept_names)
exp_predictions = torch.as_tensor(exp_predictions)
class_output = torch.as_tensor(outputs.argmax(dim=1) == i)
exp_fidelity = fidelity(exp_predictions, class_output, expl_metric)
explanation_complexity = complexity(explanation, to_dnf=True)
explanations.append(explanation), exp_accuracies.append(exp_accuracy)
exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
elif method == 'General':
# Network structures
l1_weight = 1e-3
hidden_neurons = [100, 30, 10]
fan_in = 5
top_k_explanations = None
name_low = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_low")
name_high = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_high")
model_low = XGeneralNN(n_concepts, n_features, hidden_neurons, fan_in=n_features,
loss=loss_low, name=name_low, l1_weight=l1_weight)
try:
model_low.load(device)
print(f"Model {name_low} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_low.fit(train_data_low, val_data_low, epochs=epochs, l_r=l_r,
metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
c_predicted_train = model_low.predict(train_data_low, device=device)[0].detach().cpu()
c_predicted_val = model_low.predict(val_data_low, device=device)[0].detach().cpu()
c_predicted_test = model_low.predict(test_data_low, device=device)[0].detach().cpu()
accuracy_low = model_low.evaluate(test_data_low, outputs=c_predicted_test, labels=c_test, metric=metric)
train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
model_high = XGeneralNN(n_classes, n_concepts, hidden_neurons, fan_in=fan_in,
loss=loss_high, name=name_high, l1_weight=l1_weight)
try:
model_high.load(device)
print(f"Model {name_high} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_high.fit(train_data_high, val_data_high, epochs=epochs, l_r=l_r*1e-1,
metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
outputs, labels = model_high.predict(test_data_high, device=device)
accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
for i in trange(n_classes):
explanation = model_high.get_global_explanation(c_predicted_train, y_train, i,
top_k_explanations=top_k_explanations,
concept_names=concept_names, simplify=simplify,
metric=expl_metric, x_val=c_predicted_val,
y_val=y_val)
exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test,
metric=expl_metric, concept_names=concept_names)
exp_predictions = torch.as_tensor(exp_predictions)
class_output = torch.as_tensor(outputs.argmax(dim=1) == i)
exp_fidelity = fidelity(exp_predictions, class_output, expl_metric)
explanation_complexity = complexity(explanation, to_dnf=True)
explanations.append(explanation), exp_accuracies.append(exp_accuracy)
exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
elif method == 'Relu':
# Network structures
l1_weight = 1e-4
hidden_neurons = [100, 50, 30, 10]
dropout_rate = 0.01
print("l1 weight", l1_weight)
print("hidden neurons", hidden_neurons)
model_low = XReluNN(n_classes=n_concepts, n_features=n_features, name=name_low, dropout_rate=dropout_rate,
hidden_neurons=hidden_neurons, loss=loss_low, l1_weight=l1_weight*1e-2)
try:
model_low.load(device)
print(f"Model {name_low} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_low.fit(train_data_low, val_data_low, epochs=epochs, l_r=l_r,
metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
c_predicted_train = model_low.predict(train_data_low, device=device)[0].detach().cpu()
c_predicted_val = model_low.predict(val_data_low, device=device)[0].detach().cpu()
c_predicted_test = model_low.predict(test_data_low, device=device)[0].detach().cpu()
accuracy_low = model_low.evaluate(test_data_low, outputs=c_predicted_test, labels=c_test, metric=metric)
train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
model_high = XReluNN(n_classes=n_classes, n_features=n_concepts, name=name_high, dropout_rate=dropout_rate,
hidden_neurons=hidden_neurons, loss=loss_high, l1_weight=l1_weight)
try:
model_high.load(device)
print(f"Model {name_high} already trained")
except (ClassifierNotTrainedError, IncompatibleClassifierError):
model_high.fit(train_data_high, val_data_high, epochs=epochs, l_r=l_r * 1e-1,
metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
outputs, labels = model_high.predict(test_data_high, device=device)
accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
for i in trange(n_classes):
explanation = model_high.get_global_explanation(c_predicted_train, y_train, i,
top_k_explanations=top_k_explanations,
concept_names=concept_names, simplify=simplify,
metric=expl_metric, x_val=c_predicted_val, y_val=y_val)
exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test,
metric=expl_metric, concept_names=concept_names)
exp_predictions = torch.as_tensor(exp_predictions)
class_output = torch.as_tensor(outputs.argmax(dim=1) == i)
exp_fidelity = fidelity(exp_predictions, class_output, expl_metric)
explanation_complexity = complexity(explanation, to_dnf=True)
explanations.append(explanation), exp_accuracies.append(exp_accuracy)
exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
else:
raise NotImplementedError(f"{method} not implemented")
if model_high.time is None:
elapsed_time = time.time() - start_time
# In DeepRed and BRL the training is parallelized to speed up operation
if method == "DeepRed" or method == "BRL":
elapsed_time = elapsed_time * n_processes
model_high.time = elapsed_time
# To save the elapsed time and the explanations
model_high.save(device)
else:
elapsed_time = model_high.time
# To restore the original folder
if method == "DeepRed":
model_high.finish()
methods.append(method)
splits.append(seed)
model_explanations.append(explanations[0])
model_accuracies.append(accuracy)
elapsed_times.append(elapsed_time)
explanation_accuracies.append(np.mean(exp_accuracies))
explanation_fidelities.append(np.mean(exp_fidelities))
explanation_complexities.append( | np.mean(exp_complexities) | numpy.mean |
import nengo
import numpy as np
import scipy.linalg
from scipy.special import legendre
import yaml
import os
#-----------------------------------------------------------------------------------------------------------------------
class NetInfo():
def __init__(self):
config = yaml.load(open(os.path.join('SI_Toolkit_ApplicationSpecificFiles', 'config.yml'), 'r'),
Loader=yaml.FullLoader)
#self.ctrl_inputs = config['training_default']['control_inputs'] # For any reason, this is reading the full vector of [ctrl_in,state_in]
self.ctrl_inputs = ['Q'] # I force it, could not find a way to only read 'Q'
self.state_inputs = config['training_default']['state_inputs']
self.inputs = config['training_default']['control_inputs']
self.inputs.extend(self.state_inputs)
self.outputs = config['training_default']['outputs']
self.net_type = 'SNN'
# This part is forced to read from previous non-SNN network (should be changed when integrated properly to SI_Toolkit)
self.path_to_normalization_info = './SI_Toolkit_ApplicationSpecificFiles/Experiments/Pretrained-RNN-1/Models/GRU-6IN-32H1-32H2-5OUT-0/NI_2021-06-29_12-02-03.csv'
#self.parent_net_name = 'Network trained from scratch'
self.parent_net_name = 'GRU-6IN-32H1-32H2-5OUT-0'
#self.path_to_net = None
self.path_to_net = './SI_Toolkit_ApplicationSpecificFiles/Experiments/Pretrained-RNN-1/Models/GRU-6IN-32H1-32H2-5OUT-0'
#-----------------------------------------------------------------------------------------------------------------------
def minmax(invalues, bound):
'''Scale the input to [-1, 1]'''
out = 2 * (invalues + bound) / (2*bound) - 1
return out
#-----------------------------------------------------------------------------------------------------------------------
def scale_datasets(data, scales):
'''Scale inputs in a list of datasets to -1, 1'''
# Scale all datasets to [-1, 1] based on the maximum value found above
bounds = []
for var, bound in scales.items():
bounds.append(bound)
for ii in range(len(bounds)):
data[:,ii] = minmax(data[:,ii], bounds[ii])
return data
#-----------------------------------------------------------------------------------------------------------------------
class Delay:
def __init__(self, dimensions, timesteps=50):
self.history = | np.zeros((timesteps, dimensions)) | numpy.zeros |
"""
some function are refer in https://github.com/dnddnjs/mujoco-pg
"""
import time
import math
import torch
import matplotlib.pyplot as plt
import numpy as np
import csv
import datetime
def time_change(time_init):
"""
定义将秒转换为时分秒格式的函数
"""
time_list = []
if time_init / 3600 > 1:
time_h = int(time_init / 3600)
time_m = int((time_init - time_h * 3600) / 60)
time_s = int(time_init - time_h * 3600 - time_m * 60)
time_list.append(str(time_h))
time_list.append('h ')
time_list.append(str(time_m))
time_list.append('m ')
elif time_init / 60 > 1:
time_m = int(time_init / 60)
time_s = int(time_init - time_m * 60)
time_list.append(str(time_m))
time_list.append('m ')
else:
time_s = int(time_init)
time_list.append(str(time_s))
time_list.append('s')
time_str = ''.join(time_list)
return time_str
def print_time_information(start, GLOBAL_EP, MAX_GLOBAL_EP, train_round=0,):
"""
记录时间,剩余时间
:param start:
:param GLOBAL_EP:
:param MAX_GLOBAL_EP:
:return:
"""
process = GLOBAL_EP * 1.00 / MAX_GLOBAL_EP
if process > 1:
process = 1
end = time.time()
use_time = end - start
all_time = use_time / (process + 1e-5)
res_time = all_time - use_time
if res_time < 1:
res_time = 1
str_ues_time = time_change(use_time)
str_res_time = time_change(res_time)
print("Round:%s Percentage of progress:%.2f%% Used time:%s Rest time:%s "
% (train_round + 1, process * 100, str_ues_time, str_res_time))
def plt_reward_step(
GLOBAL_RUNNING_R=[],
GLOBAL_RUNNING_STEP=[],
title="mountaincar"):
plt.subplot(2, 1, 1)
plt.title(title) # 表头
plt.plot(np.arange(len(GLOBAL_RUNNING_R)),
GLOBAL_RUNNING_R) # 结束后绘制reward图像
plt.xlabel('episode')
plt.ylabel('reward')
plt.subplot(2, 1, 2)
plt.plot(np.arange(len(GLOBAL_RUNNING_STEP)),
GLOBAL_RUNNING_STEP) # 结束后绘制reward图像
plt.xlabel('episode')
plt.ylabel('step')
plt.show()
time = datetime.datetime.now()
statistical_data = rank_and_average(GLOBAL_RUNNING_R)
with open('TEST_RECORD.csv', 'a', newline='') as myFile:
Writer = csv.writer(myFile, dialect='excel')
Writer.writerow([title] + [str(time)])
Writer.writerow(GLOBAL_RUNNING_STEP)
Writer.writerow(GLOBAL_RUNNING_R)
Writer.writerow(statistical_data)
print("write over")
def plot_error_bar(data):
num_of_methods = data.shape[0]
average = data[:, 2]
average = list(map(float, average))
std_error = data[:, 3]
std_error = list(map(float, std_error))
fig, ax = plt.subplots()
# 画误差线,x轴一共7项,y轴显示平均值,y轴误差为标准差
ax.errorbar(np.arange(num_of_methods), average,
yerr=std_error,
fmt="o", color="blue", ecolor='grey', elinewidth=2, capsize=4)
ax.set_xticks(np.arange(num_of_methods))
ax.set_xticklabels(['method1', 'method2']) # 设置x轴刻度标签,并使其倾斜45度,不至于重叠
plt.title("Comparison")
plt.ylabel("Average Reward")
plt.show()
def rank_and_average(GLOBAL_RUNNING_R=[]):
minInRecord = min(GLOBAL_RUNNING_R)
maxInRecord = max(GLOBAL_RUNNING_R)
average = np.mean(GLOBAL_RUNNING_R)
std_error = np.sqrt(np.var(GLOBAL_RUNNING_R)) / \
np.sqrt( | np.size(GLOBAL_RUNNING_R) | numpy.size |
import numpy as np
import scipy.optimize as optimization
import matplotlib.pyplot as plt
try:
from submm_python_routines.KIDs import calibrate
except:
from KIDs import calibrate
from numba import jit # to get working on python 2 I had to downgrade llvmlite pip install llvmlite==0.31.0
# module for fitting resonances curves for kinetic inductance detectors.
# written by <NAME> 12/21/16
# for example see test_fit.py in this directory
# To Do
# I think the error analysis on the fit_nonlinear_iq_with_err probably needs some work
# add in step by step fitting i.e. first amplitude normalizaiton, then cabel delay, then i0,q0 subtraction, then phase rotation, then the rest of the fit.
# need to have fit option that just specifies tau becuase that never really changes for your cryostat
#Change log
#JDW 2017-08-17 added in a keyword/function to allow for gain varation "amp_var" to be taken out before fitting
#JDW 2017-08-30 added in fitting for magnitude fitting of resonators i.e. not in iq space
#JDW 2018-03-05 added more clever function for guessing x0 for fits
#JDW 2018-08-23 added more clever guessing for resonators with large phi into guess seperate functions
J=np.exp(2j*np.pi/3)
Jc=1/J
@jit(nopython=True)
def cardan(a,b,c,d):
'''
analytical root finding fast: using numba looks like x10 speed up
returns only the largest real root
'''
u=np.empty(2,np.complex128)
z0=b/3/a
a2,b2 = a*a,b*b
p=-b2/3/a2 +c/a
q=(b/27*(2*b2/a2-9*c/a)+d)/a
D=-4*p*p*p-27*q*q
r=np.sqrt(-D/27+0j)
u=((-q-r)/2)**(1/3.)#0.33333333333333333333333
v=((-q+r)/2)**(1/3.)#0.33333333333333333333333
w=u*v
w0=np.abs(w+p/3)
w1=np.abs(w*J+p/3)
w2=np.abs(w*Jc+p/3)
if w0<w1:
if w2<w0 : v*=Jc
elif w2<w1 : v*=Jc
else: v*=J
roots = np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
#print(roots)
where_real = np.where(np.abs(np.imag(roots)) < 1e-15)
#if len(where_real)>1: print(len(where_real))
#print(D)
if D>0: return np.max(np.real(roots)) # three real roots
else: return np.real(roots[np.argsort(np.abs(np.imag(roots)))][0]) #one real root get the value that has smallest imaginary component
#return np.max(np.real(roots[where_real]))
#return np.asarray((u+v-z0, u*J+v*Jc-z0,u*Jc+v*J-z0))
# function to descript the magnitude S21 of a non linear resonator
@jit(nopython=True)
def nonlinear_mag(x,fr,Qr,amp,phi,a,b0,b1,flin):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# b0 DC level of s21 away from resonator
# b1 Frequency dependant gain varation
# flin is probably the frequency of the resonator when a = 0
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0+b1 x_lin)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
xlin = (x - flin)/flin
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
#print(roots)
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#where_real = np.where(np.abs(np.imag(roots)) < 1e-10) #analytic version has some floating point error accumulation
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))#np.max(np.real(roots[where_real]))
z = (b0 +b1*xlin)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
@jit(nopython=True)
def linear_mag(x,fr,Qr,amp,phi,b0):
'''
# simplier version for quicker fitting when applicable
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readout system
# b0 DC level of s21 away from resonator
#
# This is based of fitting code from MUSIC
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# / (j phi) (j phi) \ 2
#|S21|^2 = (b0)* |1 -amp*e^ +amp*(e^ -1) |^
# | ------------ ---- |
# \ (1+ 2jxg) 2 /
#
# no y just xg
# with no nonlinear kinetic inductance
'''
if not np.isscalar(fr): #vectorize
x = np.reshape(x,(x.shape[0],1,1,1,1,1))
xg = (x-fr)/fr
z = (b0)*np.abs(1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*xg*Qr) + amp/2.*(np.exp(1.0j*phi) -1.0))**2
return z
# function to describe the i q loop of a nonlinear resonator
@jit(nopython=True)
def nonlinear_iq(x,fr,Qr,amp,phi,a,i0,q0,tau,f0):
'''
# x is the frequeciesn your iq sweep covers
# fr is the center frequency of the resonator
# Qr is the quality factor of the resonator
# amp is Qr/Qc
# phi is a rotation paramter for an impedance mismatch between the resonaotor and the readou system
# a is the non-linearity paramter bifurcation occurs at a = 0.77
# i0
# q0 these are constants that describes an overall phase rotation of the iq loop + a DC gain offset
# tau cabel delay
# f0 is all the center frequency, not sure why we include this as a secondary paramter should be the same as fr
#
# This is based of fitting code from MUSIC
#
# The idea is we are producing a model that is described by the equation below
# the frist two terms in the large parentasis and all other terms are farmilar to me
# but I am not sure where the last term comes from though it does seem to be important for fitting
#
# (-j 2 pi deltaf tau) / (j phi) (j phi) \
# (i0+j*q0)*e^ *|1 -amp*e^ +amp*(e^ -1) |
# | ------------ ---- |
# \ (1+ 2jy) 2 /
#
# where the nonlineaity of y is described by the following eqution taken from Response of superconducting microresonators
# with nonlinear kinetic inductance
# yg = y+ a/(1+y^2) where yg = Qr*xg and xg = (f-fr)/fr
#
'''
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
#find the roots of the y equation above
for i in range(0,x.shape[0]):
# 4y^3+ -4yg*y^2+ y -(yg+a)
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#roots = np.roots((16.,-16.*yg[i],8.,-8.*yg[i]+4*a*yg[i]/Qr-4*a,1.,-yg[i]+a*yg[i]/Qr-a+a**2/Qr)) #more accurate version that doesn't seem to change the fit at al
# only care about real roots
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
return z
def nonlinear_iq_for_fitter(x,fr,Qr,amp,phi,a,i0,q0,tau,f0,**keywords):
'''
when using a fitter that can't handel complex number
one needs to return both the real and imaginary components seperatly
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
print("hello")
else:
use_given_tau = False
deltaf = (x - f0)
xg = (x-fr)/fr
yg = Qr*xg
y = np.zeros(x.shape[0])
for i in range(0,x.shape[0]):
#roots = np.roots((4.0,-4.0*yg[i],1.0,-(yg[i]+a)))
#where_real = np.where(np.imag(roots) == 0)
#y[i] = np.max(np.real(roots[where_real]))
y[i] = cardan(4.0,-4.0*yg[i],1.0,-(yg[i]+a))
z = (i0 +1.j*q0)* np.exp(-1.0j* 2* np.pi *deltaf*tau) * (1.0 - amp*np.exp(1.0j*phi)/ (1.0 +2.0*1.0j*y) + amp/2.*(np.exp(1.0j*phi) -1.0))
real_z = np.real(z)
imag_z = np.imag(z)
return np.hstack((real_z,imag_z))
def brute_force_linear_mag_fit(x,z,ranges,n_grid_points,error = None, plot = False,**keywords):
'''
x frequencies Hz
z complex or abs of s21
ranges is the ranges for each parameter i.e. np.asarray(([f_low,Qr_low,amp_low,phi_low,b0_low],[f_high,Qr_high,amp_high,phi_high,b0_high]))
n_grid_points how finely to sample each parameter space.
this can be very slow for n>10
an increase by a factor of 2 will take 2**5 times longer
to marginalize over you must minimize over the unwanted axies of sum_dev
i.e for fr np.min(np.min(np.min(np.min(fit['sum_dev'],axis = 4),axis = 3),axis = 2),axis = 1)
'''
if error is None:
error = np.ones(len(x))
fs = np.linspace(ranges[0][0],ranges[1][0],n_grid_points)
Qrs = np.linspace(ranges[0][1],ranges[1][1],n_grid_points)
amps = np.linspace(ranges[0][2],ranges[1][2],n_grid_points)
phis = np.linspace(ranges[0][3],ranges[1][3],n_grid_points)
b0s = np.linspace(ranges[0][4],ranges[1][4],n_grid_points)
evaluated_ranges = np.vstack((fs,Qrs,amps,phis,b0s))
a,b,c,d,e = np.meshgrid(fs,Qrs,amps,phis,b0s,indexing = "ij") #always index ij
evaluated = linear_mag(x,a,b,c,d,e)
data_values = np.reshape(np.abs(z)**2,(abs(z).shape[0],1,1,1,1,1))
error = np.reshape(error,(abs(z).shape[0],1,1,1,1,1))
sum_dev = np.sum(((np.sqrt(evaluated)-np.sqrt(data_values))**2/error**2),axis = 0) # comparing in magnitude space rather than magnitude squared
min_index = np.where(sum_dev == np.min(sum_dev))
index1 = min_index[0][0]
index2 = min_index[1][0]
index3 = min_index[2][0]
index4 = min_index[3][0]
index5 = min_index[4][0]
fit_values = np.asarray((fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5]))
fit_values_names = ('f0','Qr','amp','phi','b0')
fit_result = linear_mag(x,fs[index1],Qrs[index2],amps[index3],phis[index4],b0s[index5])
marginalized_1d = np.zeros((5,n_grid_points))
marginalized_1d[0,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 1)
marginalized_1d[1,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2),axis = 0)
marginalized_1d[2,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1),axis = 0)
marginalized_1d[3,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1),axis = 0)
marginalized_1d[4,:] = np.min(np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1),axis = 0)
marginalized_2d = np.zeros((5,5,n_grid_points,n_grid_points))
#0 _
#1 x _
#2 x x _
#3 x x x _
#4 x x x x _
# 0 1 2 3 4
marginalized_2d[0,1,:] = marginalized_2d[1,0,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 2)
marginalized_2d[2,0,:] = marginalized_2d[0,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 1)
marginalized_2d[2,1,:] = marginalized_2d[1,2,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 3),axis = 0)
marginalized_2d[3,0,:] = marginalized_2d[0,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 1)
marginalized_2d[3,1,:] = marginalized_2d[1,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 2),axis = 0)
marginalized_2d[3,2,:] = marginalized_2d[2,3,:] = np.min(np.min(np.min(sum_dev,axis = 4),axis = 1),axis = 0)
marginalized_2d[4,0,:] = marginalized_2d[0,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 1)
marginalized_2d[4,1,:] = marginalized_2d[1,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 2),axis = 0)
marginalized_2d[4,2,:] = marginalized_2d[2,4,:] = np.min(np.min(np.min(sum_dev,axis = 3),axis = 1),axis = 0)
marginalized_2d[4,3,:] = marginalized_2d[3,4,:] = np.min(np.min(np.min(sum_dev,axis = 2),axis = 1),axis = 0)
if plot:
levels = [2.3,4.61] #delta chi squared two parameters 68 90 % confidence
fig_fit = plt.figure(-1)
axs = fig_fit.subplots(5, 5)
for i in range(0,5): # y starting from top
for j in range(0,5): #x starting from left
if i > j:
#plt.subplot(5,5,i+1+5*j)
#axs[i, j].set_aspect('equal', 'box')
extent = [evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1],evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1]]
axs[i,j].imshow(marginalized_2d[i,j,:]-np.min(sum_dev),extent =extent,origin = 'lower', cmap = 'jet')
axs[i,j].contour(evaluated_ranges[j],evaluated_ranges[i],marginalized_2d[i,j,:]-np.min(sum_dev),levels = levels,colors = 'white')
axs[i,j].set_ylim(evaluated_ranges[i,0],evaluated_ranges[i,n_grid_points-1])
axs[i,j].set_xlim(evaluated_ranges[j,0],evaluated_ranges[j,n_grid_points-1])
axs[i,j].set_aspect((evaluated_ranges[j,0]-evaluated_ranges[j,n_grid_points-1])/(evaluated_ranges[i,0]-evaluated_ranges[i,n_grid_points-1]))
if j == 0:
axs[i, j].set_ylabel(fit_values_names[i])
if i == 4:
axs[i, j].set_xlabel("\n"+fit_values_names[j])
if i<4:
axs[i,j].get_xaxis().set_ticks([])
if j>0:
axs[i,j].get_yaxis().set_ticks([])
elif i < j:
fig_fit.delaxes(axs[i,j])
for i in range(0,5):
#axes.subplot(5,5,i+1+5*i)
axs[i,i].plot(evaluated_ranges[i,:],marginalized_1d[i,:]-np.min(sum_dev))
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*1.,color = 'k')
axs[i,i].plot(evaluated_ranges[i,:],np.ones(len(evaluated_ranges[i,:]))*2.7,color = 'k')
axs[i,i].yaxis.set_label_position("right")
axs[i,i].yaxis.tick_right()
axs[i,i].xaxis.set_label_position("top")
axs[i,i].xaxis.tick_top()
axs[i,i].set_xlabel(fit_values_names[i])
#axs[0,0].set_ylabel(fit_values_names[0])
#axs[4,4].set_xlabel(fit_values_names[4])
axs[4,4].xaxis.set_label_position("bottom")
axs[4,4].xaxis.tick_bottom()
#make a dictionary to return
fit_dict = {'fit_values': fit_values,'fit_values_names':fit_values_names, 'sum_dev': sum_dev, 'fit_result': fit_result,'marginalized_2d':marginalized_2d,'marginalized_1d':marginalized_1d,'evaluated_ranges':evaluated_ranges}#, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_iq(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
# tau forces tau to specific value
# tau_guess fixes the guess for tau without have to specifiy all of x0
'''
if ('tau' in keywords):
use_given_tau = True
tau = keywords['tau']
else:
use_given_tau = False
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),50,.01,-np.pi,0,-np.inf,-np.inf,0,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear(x,z,verbose = True)
print(x0)
if ('fr_guess' in keywords):
x0[0] = keywords['fr_guess']
if ('tau_guess' in keywords):
x0[7] = keywords['tau_guess']
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_given_tau == True:
del bounds[0][7]
del bounds[1][7]
del x0[7]
fit = optimization.curve_fit(lambda x_lamb,a,b,c,d,e,f,g,h: nonlinear_iq_for_fitter(x_lamb,a,b,c,d,e,f,g,tau,h), x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],tau,fit[0][7])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],tau,x0[7])
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_iq_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above funciton but takes fine and gain scans seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),500.,.01,-np.pi,0,-np.inf,-np.inf,1*10**-9,np.min(fine_x)],[np.max(fine_x),1000000,1,np.pi,5,np.inf,np.inf,1*10**-6,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
#fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.mean(np.real(z)),np.mean(np.imag(z)),3*10**-7,fr_guess]
x0 = guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
#print(x0)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
if use_err:
z_err_stacked = np.hstack((np.real(z_err),np.imag(z_err)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,sigma = z_err_stacked,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
if use_err:
#only do it for fine data
#red_chi_sqr = np.sum(z_stacked-np.hstack((np.real(fit_result),np.imag(fit_result))))**2/z_err_stacked**2)/(len(z_stacked)-8.)
#only do it for fine data
red_chi_sqr = np.sum((np.hstack((np.real(fine_z),np.imag(fine_z)))-np.hstack((np.real(fit_result[0:len(fine_z)]),np.imag(fit_result[0:len(fine_z)]))))**2/np.hstack((np.real(fine_z_err),np.imag(fine_z_err)))**2)/(len(fine_z)*2.-8.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
# same function but double fits so that it can get error and a proper covariance matrix out
def fit_nonlinear_iq_with_err(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),2000,.01,-np.pi,0,-5,-5,1*10**-9,np.min(x)],[np.max(x),200000,1,np.pi,5,5,5,1*10**-6,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
x0 = guess_x0_iq_nonlinear(x,z)
#Amplitude normalization?
do_amp_norm = 0
if ('amp_norm' in keywords):
amp_norm = keywords['amp_norm']
if amp_norm == True:
do_amp_norm = 1
elif amp_norm == False:
do_amp_norm = 0
else:
print("please specify amp_norm as True or False")
if do_amp_norm == 1:
z = amplitude_normalization(x,z)
z_stacked = np.hstack((np.real(z),np.imag(z)))
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
fit_result_stacked = nonlinear_iq_for_fitter(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
# get error
var = np.sum((z_stacked-fit_result_stacked)**2)/(z_stacked.shape[0] - 1)
err = np.ones(z_stacked.shape[0])*np.sqrt(var)
# refit
fit = optimization.curve_fit(nonlinear_iq_for_fitter, x, z_stacked,x0,err,bounds = bounds)
fit_result = nonlinear_iq(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7],fit[0][8])
x0_result = nonlinear_iq(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7],x0[8])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
# function for fitting an iq sweep with the above equation
def fit_nonlinear_mag(x,z,**keywords):
'''
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(x)],[np.max(x),200000,1,np.pi,5,np.inf,np.inf,np.max(x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
fr_guess = x[np.argmin(np.abs(z))]
#x0 = [fr_guess,10000.,0.5,0,0,np.abs(z[0])**2,np.abs(z[0])**2,fr_guess]
x0 = guess_x0_mag_nonlinear(x,z,verbose = True)
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#make a dictionary to return
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z}
return fit_dict
def fit_nonlinear_mag_sep(fine_x,fine_z,gain_x,gain_z,**keywords):
'''
# same as above but fine and gain scans are provided seperatly
# keywards are
# bounds ---- which is a 2d tuple of low the high values to bound the problem by
# x0 --- intial guess for the fit this can be very important becuase because least square space over all the parameter is comple
# amp_norm --- do a normalization for variable amplitude. usefull when tranfer function of the cryostat is not flat
'''
if ('bounds' in keywords):
bounds = keywords['bounds']
else:
#define default bounds
print("default bounds used")
bounds = ([np.min(fine_x),100,.01,-np.pi,0,-np.inf,-np.inf,np.min(fine_x)],[np.max(fine_x),1000000,100,np.pi,5,np.inf,np.inf,np.max(fine_x)])
if ('x0' in keywords):
x0 = keywords['x0']
else:
#define default intial guess
print("default initial guess used")
x0 = guess_x0_mag_nonlinear_sep(fine_x,fine_z,gain_x,gain_z)
if (('fine_z_err' in keywords) & ('gain_z_err' in keywords)):
use_err = True
fine_z_err = keywords['fine_z_err']
gain_z_err = keywords['gain_z_err']
else:
use_err = False
#stack the scans for curvefit
x = np.hstack((fine_x,gain_x))
z = np.hstack((fine_z,gain_z))
if use_err:
z_err = np.hstack((fine_z_err,gain_z_err))
z_err = np.sqrt(4*np.real(z_err)**2*np.real(z)**2+4*np.imag(z_err)**2*np.imag(z)**2) #propogation of errors left out cross term
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,sigma = z_err,bounds = bounds)
else:
fit = optimization.curve_fit(nonlinear_mag, x, np.abs(z)**2 ,x0,bounds = bounds)
fit_result = nonlinear_mag(x,fit[0][0],fit[0][1],fit[0][2],fit[0][3],fit[0][4],fit[0][5],fit[0][6],fit[0][7])
x0_result = nonlinear_mag(x,x0[0],x0[1],x0[2],x0[3],x0[4],x0[5],x0[6],x0[7])
#compute reduced chi squared
print(len(z))
if use_err:
#red_chi_sqr = np.sum((np.abs(z)**2-fit_result)**2/z_err**2)/(len(z)-7.)
# only use fine scan for reduced chi squared.
red_chi_sqr = np.sum((np.abs(fine_z)**2-fit_result[0:len(fine_z)])**2/z_err[0:len(fine_z)]**2)/(len(fine_z)-7.)
#make a dictionary to return
if use_err:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x,'red_chi_sqr':red_chi_sqr}
else:
fit_dict = {'fit': fit, 'fit_result': fit_result, 'x0_result': x0_result, 'x0':x0, 'z':z,'fit_freqs':x}
return fit_dict
def amplitude_normalization(x,z):
'''
# normalize the amplitude varation requires a gain scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(x-np.median(x))>100000) #100kHz away from resonator
poly = np.polyfit(x[index_use],np.abs(z[index_use]),2)
poly_func = np.poly1d(poly)
normalized_data = z/poly_func(x)*np.median(np.abs(z[index_use]))
return normalized_data
def amplitude_normalization_sep(gain_x,gain_z,fine_x,fine_z,stream_x,stream_z):
'''
# normalize the amplitude varation requires a gain scan
# uses gain scan to normalize does not use fine scan
#flag frequencies to use in amplitude normaliztion
'''
index_use = np.where(np.abs(gain_x-np.median(gain_x))>100000) #100kHz away from resonator
poly = np.polyfit(gain_x[index_use],np.abs(gain_z[index_use]),2)
poly_func = np.poly1d(poly)
poly_data = poly_func(gain_x)
normalized_gain = gain_z/poly_data*np.median(np.abs(gain_z[index_use]))
normalized_fine = fine_z/poly_func(fine_x)*np.median(np.abs(gain_z[index_use]))
normalized_stream = stream_z/poly_func(stream_x)*np.median(np.abs(gain_z[index_use]))
amp_norm_dict = {'normalized_gain':normalized_gain,
'normalized_fine':normalized_fine,
'normalized_stream':normalized_stream,
'poly_data':poly_data}
return amp_norm_dict
def guess_x0_iq_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_iq_nonlinear_sep
# below. it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#i0 and iq guess
if np.max(np.abs(fine_z))==np.max(np.abs(z)): #if the resonator has an impedance mismatch rotation that makes the fine greater that the cabel delay
i0_guess = np.real(fine_z[np.argmax(np.abs(fine_z))])
q0_guess = np.imag(fine_z[np.argmax(np.abs(fine_z))])
else:
i0_guess = (np.real(fine_z[0])+np.real(fine_z[-1]))/2.
q0_guess = (np.imag(fine_z[0])+np.imag(fine_z[-1]))/2.
#cabel delay guess tau
#y = mx +b
#m = (y2 - y1)/(x2-x1)
#b = y-mx
if len(gain_z)>1: #is there a gain scan?
m = (gain_phase - np.roll(gain_phase,1))/(gain_x-np.roll(gain_x,1))
b = gain_phase -m*gain_x
m_best = np.median(m[~np.isnan(m)])
tau_guess = m_best/(2*np.pi)
else:
tau_guess = 3*10**-9
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("i0 guess = %.2f" %i0_guess)
print("q0 guess = %.2f" %q0_guess)
print("tau guess = %.2f x 10^-7" %(tau_guess/10**-7))
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,i0_guess,q0_guess,tau_guess,fr_guess]
return x0
def guess_x0_mag_nonlinear(x,z,verbose = False):
'''
# this is lest robust than guess_x0_mag_nonlinear_sep
#below it is recommended to use that instead
#make sure data is sorted from low to high frequency
'''
sort_index = np.argsort(x)
x = x[sort_index]
z = z[sort_index]
#extract just fine data
#this will probably break if there is no fine scan
df = np.abs(x-np.roll(x,1))
fine_df = np.min(df[np.where(df != 0)])
fine_z_index = np.where(df<fine_df*1.1)
fine_z = z[fine_z_index]
fine_x = x[fine_z_index]
#extract the gain scan
gain_z_index = np.where(df>fine_df*1.1)
gain_z = z[gain_z_index]
gain_x = x[gain_z_index]
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(z))
#fr_guess = x[fr_guess_index]
fr_guess_index_fine = np.argmin(np.abs(fine_z))
if fr_guess_index_fine == 0:
fr_guess_index_fine = len(fine_x)//2
elif fr_guess_index_fine == (len(fine_x)-1):
fr_guess_index_fine = len(fine_x)//2
fr_guess = fine_x[fr_guess_index_fine]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index_fine:-1]
left = half_distance[0:fr_guess_index_fine]
right_index = np.argmin(np.abs(right))+fr_guess_index_fine
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(z)))-np.min(20*np.log10(np.abs(z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
phi_guess = 0
#guess non-linearity parameter
#might be able to guess this by ratioing the distance between min and max distance between iq points in fine sweep
a_guess = 0
#b0 and b1 guess
if len(gain_z)>1:
xlin = (gain_x - fr_guess)/fr_guess
b1_guess = (np.abs(gain_z)[-1]**2-np.abs(gain_z)[0]**2)/(xlin[-1]-xlin[0])
else:
xlin = (fine_x - fr_guess)/fr_guess
b1_guess = (np.abs(fine_z)[-1]**2-np.abs(fine_z)[0]**2)/(xlin[-1]-xlin[0])
b0_guess = np.median(np.abs(gain_z)**2)
if verbose == True:
print("fr guess = %.2f MHz" %(fr_guess/10**6))
print("Q guess = %.2f kHz, %.1f" % ((Q_guess_Hz/10**3),Q_guess))
print("amp guess = %.2f" %amp_guess)
print("phi guess = %.2f" %phi_guess)
print("b0 guess = %.2f" %b0_guess)
print("b1 guess = %.2f" %b1_guess)
x0 = [fr_guess,Q_guess,amp_guess,phi_guess,a_guess,b0_guess,b1_guess,fr_guess]
return x0
def guess_x0_iq_nonlinear_sep(fine_x,fine_z,gain_x,gain_z,verbose = False):
'''
# this is the same as guess_x0_iq_nonlinear except that it takes
# takes the fine scan and the gain scan as seperate variables
# this runs into less issues when trying to sort out what part of
# data is fine and what part is gain for the guessing
#make sure data is sorted from low to high frequency
'''
#gain phase
gain_phase = np.arctan2(np.real(gain_z),np.imag(gain_z))
#guess f0
fr_guess_index = np.argmin(np.abs(fine_z))
# below breaks if there is not a right and left side in the fine scan
if fr_guess_index == 0:
fr_guess_index = len(fine_x)//2
elif fr_guess_index == (len(fine_x)-1):
fr_guess_index = len(fine_x)//2
fr_guess = fine_x[fr_guess_index]
#guess Q
mag_max = np.max(np.abs(fine_z)**2)
mag_min = np.min(np.abs(fine_z)**2)
mag_3dB = (mag_max+mag_min)/2.
half_distance = np.abs(fine_z)**2-mag_3dB
right = half_distance[fr_guess_index:-1]
left = half_distance[0:fr_guess_index]
right_index = np.argmin(np.abs(right))+fr_guess_index
left_index = np.argmin(np.abs(left))
Q_guess_Hz = fine_x[right_index]-fine_x[left_index]
Q_guess = fr_guess/Q_guess_Hz
#guess amp
d = np.max(20*np.log10(np.abs(gain_z)))-np.min(20*np.log10(np.abs(fine_z)))
amp_guess = 0.0037848547850284574+0.11096782437821565*d-0.0055208783469291173*d**2+0.00013900471000261687*d**3+-1.3994861426891861e-06*d**4#polynomial fit to amp verus depth
#guess impedance rotation phi
#phi_guess = 0
#guess impedance rotation phi
#fit a circle to the iq loop
xc, yc, R, residu = calibrate.leastsq_circle(np.real(fine_z),np.imag(fine_z))
#compute angle between (off_res,off_res),(0,0) and (off_ress,off_res),(xc,yc) of the the fitted circle
off_res_i,off_res_q = ( | np.real(fine_z[0]) | numpy.real |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import math
import numpy as np
from singa import net
from singa import layer
from singa import tensor
from singa import loss
layer.engine = 'singacpp'
# net.verbose = True
class TestFeedForwardNet(unittest.TestCase):
def test_single_input_output(self):
ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
ffn.add(layer.Activation('relu1', input_sample_shape=(2,)))
ffn.add(layer.Activation('relu2'))
x = np.array([[-1, 1], [1, 1], [-1, -2]], dtype=np.float32)
x = tensor.from_numpy(x)
y = tensor.Tensor((3,))
y.set_value(0)
out, _ = ffn.evaluate(x, y)
self.assertAlmostEqual(out * 3,
- math.log(1.0/(1+math.exp(1))) -
math.log(0.5) - math.log(0.5),
5)
def test_mult_inputs(self):
ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
s1 = ffn.add(layer.Activation('relu1', input_sample_shape=(2,)), [])
s2 = ffn.add(layer.Activation('relu2', input_sample_shape=(2,)), [])
ffn.add(layer.Merge('merge', input_sample_shape=(2,)), [s1, s2])
x1 = tensor.Tensor((2, 2))
x1.set_value(1.1)
x2 = tensor.Tensor((2, 2))
x2.set_value(0.9)
out = ffn.forward(False, {'relu1': x1, 'relu2': x2})
out = tensor.to_numpy(out)
self.assertAlmostEqual( | np.average(out) | numpy.average |
import numpy as np
import pyart
import scipy.ndimage.filters
def J_function(winds, parameters):
"""
Calculates the total cost function. This typically does not need to be
called directly as get_dd_wind_field is a wrapper around this function and
:py:func:`pydda.cost_functions.grad_J`.
In order to add more terms to the cost function, modify this
function and :py:func:`pydda.cost_functions.grad_J`.
Parameters
----------
winds: 1-D float array
The wind field, flattened to 1-D for f_min. The total size of the
array will be a 1D array of 3*nx*ny*nz elements.
parameters: DDParameters
The parameters for the cost function evaluation as specified by the
:py:func:`pydda.retrieval.DDParameters` class.
Returns
-------
J: float
The value of the cost function
"""
winds = np.reshape(winds,
(3, parameters.grid_shape[0], parameters.grid_shape[1],
parameters.grid_shape[2]))
Jvel = calculate_radial_vel_cost_function(
parameters.vrs, parameters.azs, parameters.els,
winds[0], winds[1], winds[2], parameters.wts, rmsVr=parameters.rmsVr,
weights=parameters.weights, coeff=parameters.Co)
if(parameters.Cm > 0):
Jmass = calculate_mass_continuity(
winds[0], winds[1], winds[2], parameters.z,
parameters.dx, parameters.dy, parameters.dz,
coeff=parameters.Cm)
else:
Jmass = 0
if(parameters.Cx > 0 or parameters.Cy > 0 or parameters.Cz > 0):
Jsmooth = calculate_smoothness_cost(
winds[0], winds[1], winds[2], Cx=parameters.Cx,
Cy=parameters.Cy, Cz=parameters.Cz)
else:
Jsmooth = 0
if(parameters.Cb > 0):
Jbackground = calculate_background_cost(
winds[0], winds[1], winds[2], parameters.bg_weights,
parameters.u_back, parameters.v_back, parameters.Cb)
else:
Jbackground = 0
if(parameters.Cv > 0):
Jvorticity = calculate_vertical_vorticity_cost(
winds[0], winds[1], winds[2], parameters.dx,
parameters.dy, parameters.dz, parameters.Ut,
parameters.Vt, coeff=parameters.Cv)
else:
Jvorticity = 0
if(parameters.Cmod > 0):
Jmod = calculate_model_cost(
winds[0], winds[1], winds[2],
parameters.model_weights, parameters.u_model,
parameters.v_model,
parameters.w_model, coeff=parameters.Cmod)
else:
Jmod = 0
if parameters.Cpoint > 0:
Jpoint = calculate_point_cost(
winds[0], winds[1], parameters.x, parameters.y, parameters.z,
parameters.point_list, Cp=parameters.Cpoint, roi=parameters.roi)
else:
Jpoint = 0
if(parameters.print_out is True):
print(('| Jvel | Jmass | Jsmooth | Jbg | Jvort | Jmodel | Jpoint |' +
' Max w '))
print(('|' + "{:9.4f}".format(Jvel) + '|' +
"{:9.4f}".format(Jmass) + '|' +
"{:9.4f}".format(Jsmooth) + '|' +
"{:9.4f}".format(Jbackground) + '|' +
"{:9.4f}".format(Jvorticity) + '|' +
"{:9.4f}".format(Jmod) + '|' +
"{:9.4f}".format(Jpoint)) + '|' +
"{:9.4f}".format(np.ma.max(np.ma.abs(winds[2]))))
return Jvel + Jmass + Jsmooth + Jbackground + Jvorticity + Jmod + Jpoint
def grad_J(winds, parameters):
"""
Calculates the gradient of the cost function. This typically does not need
to be called directly as get_dd_wind_field is a wrapper around this
function and :py:func:`pydda.cost_functions.J_function`.
In order to add more terms to the cost function,
modify this function and :py:func:`pydda.cost_functions.grad_J`.
Parameters
----------
winds: 1-D float array
The wind field, flattened to 1-D for f_min
parameters: DDParameters
The parameters for the cost function evaluation as specified by the
:py:func:`pydda.retrieve.DDParameters` class.
Returns
-------
grad: 1D float array
Gradient vector of cost function
"""
winds = np.reshape(winds,
(3, parameters.grid_shape[0],
parameters.grid_shape[1], parameters.grid_shape[2]))
grad = calculate_grad_radial_vel(
parameters.vrs, parameters.els, parameters.azs,
winds[0], winds[1], winds[2], parameters.wts, parameters.weights,
parameters.rmsVr, coeff=parameters.Co, upper_bc=parameters.upper_bc)
if(parameters.Cm > 0):
grad += calculate_mass_continuity_gradient(
winds[0], winds[1], winds[2], parameters.z,
parameters.dx, parameters.dy, parameters.dz,
coeff=parameters.Cm, upper_bc=parameters.upper_bc)
if(parameters.Cx > 0 or parameters.Cy > 0 or parameters.Cz > 0):
grad += calculate_smoothness_gradient(
winds[0], winds[1], winds[2], Cx=parameters.Cx,
Cy=parameters.Cy, Cz=parameters.Cz, upper_bc=parameters.upper_bc)
if(parameters.Cb > 0):
grad += calculate_background_gradient(
winds[0], winds[1], winds[2], parameters.bg_weights,
parameters.u_back, parameters.v_back, parameters.Cb,
upper_bc=parameters.upper_bc)
if(parameters.Cv > 0):
grad += calculate_vertical_vorticity_gradient(
winds[0], winds[1], winds[2], parameters.dx,
parameters.dy, parameters.dz, parameters.Ut,
parameters.Vt, coeff=parameters.Cv)
if(parameters.Cmod > 0):
grad += calculate_model_gradient(
winds[0], winds[1], winds[2],
parameters.model_weights, parameters.u_model, parameters.v_model,
parameters.w_model, coeff=parameters.Cmod)
if parameters.Cpoint > 0:
grad += calculate_point_gradient(
winds[0], winds[1], parameters.x, parameters.y, parameters.z,
parameters.point_list, Cp=parameters.Cpoint, roi=parameters.roi)
if(parameters.print_out is True):
print('Norm of gradient: ' + str(np.linalg.norm(grad, np.inf)))
return grad
def calculate_radial_vel_cost_function(vrs, azs, els, u, v,
w, wts, rmsVr, weights, coeff=1.0):
"""
Calculates the cost function due to difference of the wind field from
radar radial velocities. For more information on this cost function, see
Potvin et al. (2012) and Shapiro et al. (2009).
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
vrs: List of float arrays
List of radial velocities from each radar
els: List of float arrays
List of elevations from each radar
azs: List of float arrays
List of azimuths from each radar
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
wts: List of float arrays
Float array containing fall speed from radar.
rmsVr: float
The sum of squares of velocity/num_points. Use for normalization
of data weighting coefficient
weights: n_radars x_bins x y_bins float array
Data weights for each pair of radars
coeff: float
Constant for cost function
Returns
-------
J_o: float
Observational cost function
References
-----------
<NAME>., <NAME>, and <NAME>, 2012: Impact of a Vertical Vorticity
Constraint in Variational Dual-Doppler Wind Analysis: Tests with Real and
Simulated Supercell Data. J. Atmos. Oceanic Technol., 29, 32–49,
https://doi.org/10.1175/JTECH-D-11-00019.1
<NAME>., <NAME>, and <NAME>, 2009: Use of a Vertical Vorticity
Equation in Variational Dual-Doppler Wind Analysis. J. Atmos. Oceanic
Technol., 26, 2089–2106, https://doi.org/10.1175/2009JTECHA1256.1
"""
J_o = 0
lambda_o = coeff / (rmsVr * rmsVr)
for i in range(len(vrs)):
v_ar = (np.cos(els[i])*np.sin(azs[i])*u +
np.cos(els[i])*np.cos(azs[i])*v +
np.sin(els[i])*(w - np.abs(wts[i])))
the_weight = weights[i]
the_weight[els[i].mask] = 0
the_weight[azs[i].mask] = 0
the_weight[vrs[i].mask] = 0
the_weight[wts[i].mask] = 0
J_o += lambda_o*np.sum(np.square(vrs[i] - v_ar)*the_weight)
return J_o
def calculate_grad_radial_vel(vrs, els, azs, u, v, w,
wts, weights, rmsVr, coeff=1.0, upper_bc=True):
"""
Calculates the gradient of the cost function due to difference of wind
field from radar radial velocities.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
vrs: List of float arrays
List of radial velocities from each radar
els: List of float arrays
List of elevations from each radar
azs: List of azimuths
List of azimuths from each radar
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
coeff: float
Constant for cost function
vel_name: str
Background velocity field name
weights: n_radars x_bins x y_bins float array
Data weights for each pair of radars
Returns
-------
y: 1-D float array
Gradient vector of observational cost function.
More information
----------------
The gradient is calculated by taking the functional derivative of the
cost function. For more information on functional derivatives, see the
Euler-Lagrange Equation:
https://en.wikipedia.org/wiki/Euler%E2%80%93Lagrange_equation
"""
# Use zero for all masked values since we don't want to add them into
# the cost function
p_x1 = np.zeros(vrs[0].shape)
p_y1 = np.zeros(vrs[0].shape)
p_z1 = np.zeros(vrs[0].shape)
lambda_o = coeff / (rmsVr * rmsVr)
for i in range(len(vrs)):
v_ar = (np.cos(els[i])*np.sin(azs[i])*u +
np.cos(els[i])*np.cos(azs[i])*v +
np.sin(els[i])*(w - np.abs(wts[i])))
x_grad = (2*(v_ar - vrs[i]) * np.cos(els[i]) *
np.sin(azs[i]) * weights[i]) * lambda_o
y_grad = (2*(v_ar - vrs[i]) * np.cos(els[i]) *
np.cos(azs[i]) * weights[i]) * lambda_o
z_grad = (2*(v_ar - vrs[i]) * np.sin(els[i]) * weights[i]) * lambda_o
x_grad[els[i].mask] = 0
y_grad[els[i].mask] = 0
z_grad[els[i].mask] = 0
x_grad[azs[i].mask] = 0
y_grad[azs[i].mask] = 0
z_grad[azs[i].mask] = 0
x_grad[els[i].mask] = 0
x_grad[azs[i].mask] = 0
x_grad[vrs[i].mask] = 0
x_grad[wts[i].mask] = 0
y_grad[els[i].mask] = 0
y_grad[azs[i].mask] = 0
y_grad[vrs[i].mask] = 0
y_grad[wts[i].mask] = 0
z_grad[els[i].mask] = 0
z_grad[azs[i].mask] = 0
z_grad[vrs[i].mask] = 0
z_grad[wts[i].mask] = 0
p_x1 += x_grad
p_y1 += y_grad
p_z1 += z_grad
# Impermeability condition
p_z1[0, :, :] = 0
if(upper_bc is True):
p_z1[-1, :, :] = 0
y = np.stack((p_x1, p_y1, p_z1), axis=0)
return y.flatten()
def calculate_smoothness_cost(u, v, w, Cx=1e-5, Cy=1e-5, Cz=1e-5):
"""
Calculates the smoothness cost function by taking the Laplacian of the
wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
Cx: float
Constant controlling smoothness in x-direction
Cy: float
Constant controlling smoothness in y-direction
Cz: float
Constant controlling smoothness in z-direction
Returns
-------
Js: float
value of smoothness cost function
"""
du = np.zeros(w.shape)
dv = np.zeros(w.shape)
dw = np.zeros(w.shape)
scipy.ndimage.filters.laplace(u, du, mode='wrap')
scipy.ndimage.filters.laplace(v, dv, mode='wrap')
scipy.ndimage.filters.laplace(w, dw, mode='wrap')
return np.sum(Cx*du**2 + Cy*dv**2 + Cz*dw**2)
def calculate_smoothness_gradient(u, v, w, Cx=1e-5, Cy=1e-5, Cz=1e-5,
upper_bc=True):
"""
Calculates the gradient of the smoothness cost function
by taking the Laplacian of the Laplacian of the wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
Cx: float
Constant controlling smoothness in x-direction
Cy: float
Constant controlling smoothness in y-direction
Cz: float
Constant controlling smoothness in z-direction
Returns
-------
y: float array
value of gradient of smoothness cost function
"""
du = np.zeros(w.shape)
dv = np.zeros(w.shape)
dw = np.zeros(w.shape)
grad_u = np.zeros(w.shape)
grad_v = np.zeros(w.shape)
grad_w = np.zeros(w.shape)
scipy.ndimage.filters.laplace(u, du, mode='wrap')
scipy.ndimage.filters.laplace(v, dv, mode='wrap')
scipy.ndimage.filters.laplace(w, dw, mode='wrap')
scipy.ndimage.filters.laplace(du, grad_u, mode='wrap')
scipy.ndimage.filters.laplace(dv, grad_v, mode='wrap')
scipy.ndimage.filters.laplace(dw, grad_w, mode='wrap')
# Impermeability condition
grad_w[0, :, :] = 0
if(upper_bc is True):
grad_w[-1, :, :] = 0
y = np.stack([grad_u*Cx*2, grad_v*Cy*2, grad_w*Cz*2], axis=0)
return y.flatten()
def calculate_point_cost(u, v, x, y, z, point_list, Cp=1e-3, roi=500.0):
"""
Calculates the cost function related to point observations. A mean square error cost
function term is applied to points that are within the sphere of influence
whose radius is determined by *roi*.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
x: Float array
X coordinates of grid centers
y: Float array
Y coordinates of grid centers
z: Float array
Z coordinated of grid centers
point_list: list of dicts
List of point constraints.
Each member is a dict with keys of "u", "v", to correspond
to each component of the wind field and "x", "y", "z"
to correspond to the location of the point observation.
In addition, "site_id" gives the METAR code (or name) to the station.
Cp: float
The weighting coefficient of the point cost function.
roi: float
Radius of influence of observations
Returns
-------
J: float
The cost function related to the difference between wind field and points.
"""
J = 0.0
for the_point in point_list:
# Instead of worrying about whole domain, just find points in radius of influence
# Since we know that the weight will be zero outside the sphere of influence anyways
the_box = np.where(np.logical_and.reduce(
(np.abs(x - the_point["x"]) < roi, np.abs(y - the_point["y"]) < roi,
np.abs(z - the_point["z"]) < roi)))
J += np.sum(((u[the_box] - the_point["u"])**2 + (v[the_box] - the_point["v"])**2))
return J * Cp
def calculate_point_gradient(u, v, x, y, z, point_list, Cp=1e-3, roi=500.0):
"""
Calculates the gradient of the cost function related to point observations.
A mean square error cost function term is applied to points that are within the sphere of influence
whose radius is determined by *roi*.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
x: Float array
X coordinates of grid centers
y: Float array
Y coordinates of grid centers
z: Float array
Z coordinated of grid centers
point_list: list of dicts
List of point constraints. Each member is a dict with keys of "u", "v",
to correspond to each component of the wind field and "x", "y", "z"
to correspond to the location of the point observation.
In addition, "site_id" gives the METAR code (or name) to the station.
Cp: float
The weighting coefficient of the point cost function.
roi: float
Radius of influence of observations
Returns
-------
gradJ: float array
The gradient of the cost function related to the difference between wind field and points.
"""
gradJ_u = np.zeros_like(u)
gradJ_v = np.zeros_like(v)
gradJ_w = np.zeros_like(u)
for the_point in point_list:
the_box = np.where(np.logical_and.reduce(
(np.abs(x - the_point["x"]) < roi, np.abs(y - the_point["y"]) < roi,
np.abs(z - the_point["z"]) < roi)))
gradJ_u[the_box] += 2 * (u[the_box] - the_point["u"])
gradJ_v[the_box] += 2 * (v[the_box] - the_point["v"])
gradJ = np.stack([gradJ_u, gradJ_v, gradJ_w], axis=0).flatten()
return gradJ * Cp
def calculate_mass_continuity(u, v, w, z, dx, dy, dz, coeff=1500.0, anel=1):
"""
Calculates the mass continuity cost function by taking the divergence
of the wind field.
All arrays in the given lists must have the same dimensions and represent
the same spatial coordinates.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
dx: float
Grid spacing in x direction.
dy: float
Grid spacing in y direction.
dz: float
Grid spacing in z direction.
z: Float array (1D)
1D Float array with heights of grid
coeff: float
Constant controlling contribution of mass continuity to cost function
anel: int
= 1 use anelastic approximation, 0=don't
Returns
-------
J: float
value of mass continuity cost function
"""
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=1)
dwdz = np.gradient(w, dz, axis=0)
if(anel == 1):
rho = np.exp(-z/10000.0)
drho_dz = np.gradient(rho, dz, axis=0)
anel_term = w/rho*drho_dz
else:
anel_term = np.zeros(w.shape)
return coeff*np.sum(np.square(dudx + dvdy + dwdz + anel_term))/2.0
def calculate_mass_continuity_gradient(u, v, w, z, dx,
dy, dz, coeff=1500.0, anel=1,
upper_bc=True):
"""
Calculates the gradient of mass continuity cost function. This is done by
taking the negative gradient of the divergence of the wind field.
All grids must have the same grid specification.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
z: Float array (1D)
1D Float array with heights of grid
dx: float
Grid spacing in x direction.
dy: float
Grid spacing in y direction.
dz: float
Grid spacing in z direction.
coeff: float
Constant controlling contribution of mass continuity to cost function
anel: int
= 1 use anelastic approximation, 0=don't
Returns
-------
y: float array
value of gradient of mass continuity cost function
"""
dudx = np.gradient(u, dx, axis=2)
dvdy = np.gradient(v, dy, axis=1)
dwdz = np.gradient(w, dz, axis=0)
if(anel == 1):
rho = np.exp(-z/10000.0)
drho_dz = np.gradient(rho, dz, axis=0)
anel_term = w/rho*drho_dz
else:
anel_term = 0
div2 = dudx + dvdy + dwdz + anel_term
grad_u = -np.gradient(div2, dx, axis=2)*coeff
grad_v = -np.gradient(div2, dy, axis=1)*coeff
grad_w = -np.gradient(div2, dz, axis=0)*coeff
# Impermeability condition
grad_w[0, :, :] = 0
if(upper_bc is True):
grad_w[-1, :, :] = 0
y = np.stack([grad_u, grad_v, grad_w], axis=0)
return y.flatten()
def calculate_fall_speed(grid, refl_field=None, frz=4500.0):
"""
Estimates fall speed based on reflectivity.
Uses methodology of <NAME> and <NAME>
Parameters
----------
Grid: Py-ART Grid
Py-ART Grid containing reflectivity to calculate fall speed from
refl_field: str
String containing name of reflectivity field. None will automatically
determine the name.
frz: float
Height of freezing level in m
Returns
-------
3D float array:
Float array of terminal velocities
"""
# Parse names of velocity field
if refl_field is None:
refl_field = pyart.config.get_field_name('reflectivity')
refl = grid.fields[refl_field]['data']
grid_z = grid.point_z['data']
term_vel = np.zeros(refl.shape)
A = np.zeros(refl.shape)
B = np.zeros(refl.shape)
rho = np.exp(-grid_z/10000.0)
A[np.logical_and(grid_z < frz, refl < 55)] = -2.6
B[np.logical_and(grid_z < frz, refl < 55)] = 0.0107
A[np.logical_and(grid_z < frz,
np.logical_and(refl >= 55, refl < 60))] = -2.5
B[np.logical_and(grid_z < frz,
np.logical_and(refl >= 55, refl < 60))] = 0.013
A[np.logical_and(grid_z < frz, refl > 60)] = -3.95
B[np.logical_and(grid_z < frz, refl > 60)] = 0.0148
A[np.logical_and(grid_z >= frz, refl < 33)] = -0.817
B[np.logical_and(grid_z >= frz, refl < 33)] = 0.0063
A[np.logical_and(grid_z >= frz,
np.logical_and(refl >= 33, refl < 49))] = -2.5
B[np.logical_and(grid_z >= frz,
np.logical_and(refl >= 33, refl < 49))] = 0.013
A[np.logical_and(grid_z >= frz, refl > 49)] = -3.95
B[np.logical_and(grid_z >= frz, refl > 49)] = 0.0148
fallspeed = A*np.power(10, refl*B)*np.power(1.2/rho, 0.4)
del A, B, rho
return fallspeed
def calculate_background_cost(u, v, w, weights, u_back, v_back, Cb=0.01):
"""
Calculates the background cost function. The background cost function is
simply the sum of the squared differences between the wind field and the
background wind field multiplied by the weighting coefficient.
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
weights: Float array
Weights for each point to consider into cost function
u_back: 1D float array
Zonal winds vs height from sounding
w_back: 1D float array
Meridional winds vs height from sounding
Cb: float
Weight of background constraint to total cost function
Returns
-------
cost: float
value of background cost function
"""
the_shape = u.shape
cost = 0
for i in range(the_shape[0]):
cost += (Cb*np.sum(np.square(u[i]-u_back[i])*(weights[i]) +
np.square(v[i]-v_back[i])*(weights[i])))
return cost
def calculate_background_gradient(u, v, w, weights, u_back, v_back, Cb=0.01):
"""
Calculates the gradient of the background cost function. For each u, v
this is given as 2*coefficent*(analysis wind - background wind).
Parameters
----------
u: Float array
Float array with u component of wind field
v: Float array
Float array with v component of wind field
w: Float array
Float array with w component of wind field
weights: Float array
Weights for each point to consider into cost function
u_back: 1D float array
Zonal winds vs height from sounding
w_back: 1D float array
Meridional winds vs height from sounding
Cb: float
Weight of background constraint to total cost function
Returns
-------
y: float array
value of gradient of background cost function
"""
the_shape = u.shape
u_grad = | np.zeros(the_shape) | numpy.zeros |
"""Test schmidt_decomposition."""
import numpy as np
from toqito.state_ops import schmidt_decomposition
from toqito.states import max_entangled
def test_schmidt_decomp_max_ent():
"""Schmidt decomposition of the 3-D maximally entangled state."""
singular_vals, u_mat, vt_mat = schmidt_decomposition(max_entangled(3))
expected_u_mat = np.identity(3)
expected_vt_mat = np.identity(3)
expected_singular_vals = 1 / np.sqrt(3) * np.array([[1], [1], [1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
def test_schmidt_decomp_dim_list():
"""Schmidt decomposition with list specifying dimension."""
singular_vals, u_mat, vt_mat = schmidt_decomposition(max_entangled(3), dim=[3, 3])
expected_u_mat = np.identity(3)
expected_vt_mat = np.identity(3)
expected_singular_vals = 1 / np.sqrt(3) * np.array([[1], [1], [1]])
bool_mat = np.isclose(expected_u_mat, u_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_vt_mat, vt_mat)
np.testing.assert_equal(np.all(bool_mat), True)
bool_mat = np.isclose(expected_singular_vals, singular_vals)
np.testing.assert_equal(np.all(bool_mat), True)
if __name__ == "__main__":
| np.testing.run_module_suite() | numpy.testing.run_module_suite |
import os
import argparse
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
from network import SIGNET_static
def get_LF_val(u, v, width=1024, height=1024):
x = np.linspace(0, width-1, width)
y = np.linspace(0, height-1, height)
xv, yv = np.meshgrid(y, x)
img_grid = torch.from_numpy(np.stack([yv, xv], axis=-1))
uv_grid = torch.ones_like(img_grid)
uv_grid[:, :, 0], uv_grid[:, :, 1] = u, v
val_inp_t = torch.cat([uv_grid, img_grid], dim = -1).float()
val_inp_t[..., :2] /= 17
val_inp_t[..., 2] /= width
val_inp_t[..., 3] /= height
del img_grid, xv, yv
return val_inp_t.view(-1, val_inp_t.shape[-1])
def eval_im(val_inp_t, batches, device):
b_size = val_inp_t.shape[0] // batches
with torch.no_grad():
out = []
for b in range(batches):
out.append(model(val_inp_t[b_size*b:b_size*(b+1)].to(device)))
out = torch.cat(out, dim = 0)
out = torch.clamp(out, 0, 1)
out_np = out.view(1024, 1024, 3).cpu().numpy() * 255
return out_np
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-u", type=int, default=0, help="angular dimension u")
parser.add_argument("-v", type=int, default=0, help="angular dimension v")
parser.add_argument("-b", type=int, default=4, help="batch size in inference")
parser.add_argument("--exp_dir", type=str, help="directory to trained weights")
args = parser.parse_args()
OUT_DIR = f'./{args.exp_dir}/eval_output'
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SIGNET_static(hidden_layers=8, alpha=0.5, skips=[], hidden_features=512, with_norm=True, with_res=True)
m_state_dict = torch.load(f'{args.exp_dir}/model.pth')
model.load_state_dict(m_state_dict, strict=False)
model.eval()
model = model.to(device)
val_inp_t = get_LF_val(u=args.u, v=args.v).to(device)
out_np = eval_im(val_inp_t, args.b, device)
Image.fromarray( | np.uint8(out_np) | numpy.uint8 |
import pickle
import pytest
import numpy as np
import scipy.sparse as sp
import joblib
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raises_regexp
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.fixes import parse_version
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone, is_classifier
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.linear_model import _sgd_fast as sgd_fast
from sklearn.model_selection import RandomizedSearchCV
def _update_kwargs(kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
class _SparseSGDClassifier(linear_model.SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super().decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super().predict_proba(X)
class _SparseSGDRegressor(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args,
**kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_alpha(klass):
# Check whether expected ValueError on bad alpha
with pytest.raises(ValueError):
klass(alpha=-.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_penalty(klass):
# Check whether expected ValueError on bad penalty
with pytest.raises(ValueError):
klass(penalty='foobar', l1_ratio=0.85)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_loss(klass):
# Check whether expected ValueError on bad loss
with pytest.raises(ValueError):
klass(loss="foobar")
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(alpha=0.01, eta0=0.01, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=.01)
clf.fit(X, Y)
assert hasattr(clf, '_average_coef')
assert hasattr(clf, '_average_intercept')
assert hasattr(clf, '_standard_intercept')
assert hasattr(clf, '_standard_coef')
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, '_average_coef')
assert not hasattr(clf, '_average_intercept')
assert not hasattr(clf, '_standard_intercept')
assert not hasattr(clf, '_standard_coef')
# TODO: remove in 1.0
@pytest.mark.parametrize('klass', [SGDClassifier, SGDRegressor])
def test_sgd_deprecated_attr(klass):
est = klass(average=True, eta0=.01)
est.fit(X, Y)
msg = "Attribute {} was deprecated"
for att in ['average_coef_', 'average_intercept_',
'standard_coef_', 'standard_intercept_']:
with pytest.warns(FutureWarning, match=msg.format(att)):
getattr(est, att)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_late_onset_averaging_reached(klass):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, max_iter=2, shuffle=False)
clf2 = klass(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, max_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
asgd(klass, X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_alpha_for_optimal_learning_rate(klass):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
with pytest.raises(ValueError):
klass(alpha=0, learning_rate="optimal")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3,
max_iter=max_iter).fit(X, Y)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3,
max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3,
max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate='constant', eta0=0.01,
tol=None, max_iter=max_iter, shuffle=shuffle)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate='constant', eta0=0.01,
tol=None, max_iter=max_iter, shuffle=shuffle)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction,
random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction,
random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [klass(early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4, max_iter=1000
).fit(X, Y).n_iter_
for n_iter_no_change in [2, 3, 10]]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
###############################################################################
# Classification Test Case
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = klass(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, max_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_bad_l1_ratio(klass):
# Check whether expected ValueError on bad l1_ratio
with pytest.raises(ValueError):
klass(l1_ratio=1.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_bad_learning_rate_schedule(klass):
# Check whether expected ValueError on bad learning_rate
with pytest.raises(ValueError):
klass(learning_rate="<unknown>")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_bad_eta0(klass):
# Check whether expected ValueError on bad eta0
with pytest.raises(ValueError):
klass(eta0=0, learning_rate="constant")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_max_iter_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(max_iter=-10000)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_shuffle_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(shuffle="false")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_early_stopping_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(early_stopping="false")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_validation_fraction(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(validation_fraction=-.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_n_iter_no_change(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(n_iter_no_change=0)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_argument_coef(klass):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset
with pytest.raises(TypeError):
klass(coef_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_provide_coef(klass):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
with pytest.raises(ValueError):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept(klass):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
with pytest.raises(ValueError):
klass().fit(X, Y, intercept_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_early_stopping_with_partial_fit(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_binary(klass):
# Checks intercept_ shape for the warm starts in binary case
klass().fit(X5, Y5, intercept_init=0)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can us a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\.")
assert_raises_regexp(ValueError,
regex,
klass(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = .001
alpha = .01
# Multi-class average test case
clf = klass(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
if loss in ('log', 'modified_huber'):
assert hasattr(clf, 'predict_proba')
assert hasattr(clf, 'predict_log_proba')
else:
message = ("probability estimates are not "
"available for loss={!r}".format(loss))
assert not hasattr(clf, 'predict_proba')
assert not hasattr(clf, 'predict_log_proba')
with pytest.raises(AttributeError,
match=message):
clf.predict_proba
with pytest.raises(AttributeError,
match=message):
clf.predict_log_proba
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01,
max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(penalty='l1', alpha=.2, fit_intercept=False,
max_iter=2000, tol=None, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_format(klass):
# ValueError due to wrong class_weight argument type.
clf = klass(alpha=0.1, max_iter=1000, class_weight=[0.5])
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(alpha=0.0001, max_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average='weighted') < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced",
shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average='weighted') > 0.96
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_sample_weights(klass):
# Test if ValueError is raised if sample_weight has wrong shape
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
with pytest.raises(ValueError):
clf.fit(X, Y, sample_weight=np.arange(7))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_exception(klass):
clf = klass(alpha=0.01)
# classes was not specified
with pytest.raises(ValueError):
clf.partial_fit(X3, Y3)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_binary(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert clf.coef_.shape == (1, X.shape[1])
assert clf.intercept_.shape == (1,)
assert clf.decision_function([[0, 0]]).shape == (1, )
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass_average(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01, average=X2.shape[0])
classes = | np.unique(Y2) | numpy.unique |
import os
import numpy as np
from os.path import join as pjoin
from nltk.tokenize import word_tokenize as wt
import torch
from torch.autograd import Variable
from textworld.utils import maybe_mkdir
class SlidingAverage(object):
def __init__(self, name, steps=100):
self.name = name
self.steps = steps
self.t = 0
self.ns = []
self.avgs = []
def add(self, n):
self.ns.append(n)
if len(self.ns) > self.steps:
self.ns.pop(0)
self.t += 1
if self.t % self.steps == 0:
self.avgs.append(self.value)
@property
def value(self):
if len(self.ns) == 0: return 0
return sum(self.ns) / len(self.ns)
def __str__(self):
return "%s=%.4f" % (self.name, self.value)
def __gt__(self, value): return self.value > value
def __lt__(self, value): return self.value < value
def state_dict(self):
return {'t': self.t,
'ns': tuple(self.ns),
'avgs': tuple(self.avgs)}
def load_state_dict(self, state):
self.t = state["t"]
self.ns = list(state["ns"])
self.avgs = list(state["avgs"])
def to_np(x):
if isinstance(x, np.ndarray):
return x
return x.data.cpu().numpy()
def to_pt(np_matrix, enable_cuda=False, type='long'):
if type == 'long':
if enable_cuda:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(torch.LongTensor).cuda())
else:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(torch.LongTensor))
elif type == 'float':
if enable_cuda:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(torch.FloatTensor).cuda())
else:
return torch.autograd.Variable(torch.from_numpy(np_matrix).type(torch.FloatTensor))
def get_experiment_dir(config):
env_id = config['general']['env_id']
exps_dir = config['general']['experiments_dir']
exp_tag = config['general']['experiment_tag']
exp_dir = pjoin(exps_dir, env_id + "_" + exp_tag)
return maybe_mkdir(exp_dir)
def dict2list(id2w_dict):
res = []
for item in id2w_dict:
res.append(id2w_dict[item])
return res
def _words_to_ids(words, word2id):
ids = []
for word in words:
try:
ids.append(word2id[word])
except KeyError:
ids.append(1)
return ids
def preproc(s, str_type='None', lower_case=False):
s = s.replace("\n", ' ')
if s.strip() == "":
return ["nothing"]
if str_type == 'description':
s = s.split("=-")[1]
elif str_type == 'inventory':
s = s.split("carrying")[1]
if s[0] == ':':
s = s[1:]
elif str_type == 'feedback':
if "Welcome to Textworld" in s:
s = s.split("Welcome to Textworld")[1]
if "-=" in s:
s = s.split("-=")[0]
s = s.strip()
if len(s) == 0:
return ["nothing"]
tokens = wt(s)
if lower_case:
tokens = [t.lower() for t in tokens]
return tokens
def max_len(list_of_list):
return max(map(len, list_of_list))
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='pre', truncating='pre', value=0.):
'''
FROM KERAS
Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
# Arguments
sequences: list of lists where each element is a sequence
maxlen: int, maximum length
dtype: type to cast the resulting sequence.
padding: 'pre' or 'post', pad either before or after each sequence.
truncating: 'pre' or 'post', remove values from sequences larger than
maxlen either in the beginning or in the end of the sequence
value: float, value to pad the sequences to the desired value.
# Returns
x: numpy array with dimensions (number_of_sequences, maxlen)
'''
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = | np.asarray(s) | numpy.asarray |
from pathlib import Path
import click
import numpy as np
import torch
from taskspec.data import Vocab
from taskspec.model import MultiClassModel
from taskspec.utils import Config
def compute_accuracy(x, y):
return torch.mean((x == y).float())
@click.command()
@click.argument('model-dir', type=click.Path(exists=True))
@click.option('--ckpt', type=str, default='best.model')
def main(model_dir, ckpt):
model_dir = Path(model_dir)
config = Config(model_dir / 'config.json')
with open(config.vocab.words) as fwords, open(config.vocab.labels) as flabels:
vocab = Vocab.load(fwords, flabels)
model = MultiClassModel(vocab, config.model)
model.load_state_dict(torch.load(model_dir / ckpt, map_location='cpu'))
vecs = | np.array(model._embed.weight.data) | numpy.array |
import torch
import torchvision
import numpy as np
from PIL import Image
import seaborn as sns
import matplotlib.pyplot as plt
from torchvision.transforms import Compose, Resize, ToTensor, transforms, functional as TF
MEAN = np.array([0.48145466, 0.4578275, 0.40821073]).reshape(-1, 1, 1)
STD = np.array([0.26862954, 0.26130258, 0.27577711]).reshape(-1, 1, 1)
def get_image_grid(images):
# preprocess images
image_size = (224, 224)
image_preprocess = Compose([
Resize(image_size, interpolation=Image.BICUBIC),
ToTensor()
])
images = [image_preprocess(img) for img in images]
# stack into a grid and return
image_stack = torch.tensor(np.stack(images))
image_grid = torchvision.utils.make_grid(image_stack, nrow=5)
transform = transforms.ToPILImage()
image_grid = transform(image_grid)
return image_grid
def get_similarity_heatmap(scores, images, text, transpose_flag):
count_images = len(images)
count_text = len(text)
scores = np.round(scores, 2)
scores = scores.T if transpose_flag else scores
# create the figure
fig = plt.figure()
for i, image in enumerate(images):
plt.imshow( | np.asarray(image) | numpy.asarray |
import os
import copy
import math
import numpy as np
import gurobipy as gp
from gurobipy import GRB
def save_checkpoint(model, where):
try:
model_check_point = np.array([abs(var.x) for var in model.getVars()])
np.save(os.path.join("sol", model.ModelName), model_check_point)
except:
pass
class Model:
def __init__(self, model_name, input, output, problem_cnt):
print("Creating model: {}".format(model_name))
self.m = gp.Model(name=model_name)
self.problem = problem_cnt.split(".")[0]
self.data = copy.deepcopy(input)
self.L_cnt = len(self.data.sets.L)
self.N_cnt = len(self.data.sets.N)
self.M_cnt = max(self.data.sets.M)
self.T_cnt = self.data.parameters.T
self.output = copy.deepcopy(output)
def __cal_obj_numpy(self, x):
return (
np.max(np.max(x + self.data.parameters.D - 1, axis=1))
+ np.sum(
self.data.parameters.W * np.max(x + self.data.parameters.D - 1, axis=1)
)
* self.window
)
def __get_sol_result_params(self, path):
try:
saved_model_params = np.load(path)
x_saved = np.empty((self.N_cnt, self.M_cnt)).astype("int")
y_saved = np.zeros((self.N_cnt, self.M_cnt, self.T_cnt)).astype("int")
tmp_T_cnt = (
int(
(len(saved_model_params) - (1 + self.N_cnt))
/ self.N_cnt
/ self.M_cnt
)
- 1
)
npy_idx = 1 + self.N_cnt
for n in range(self.N_cnt):
for m in range(self.M_cnt):
x_saved[n][m] = saved_model_params[npy_idx]
npy_idx += 1
for n in range(self.N_cnt):
for m in range(self.M_cnt):
for t in range(tmp_T_cnt):
y_saved[n][m][t] = saved_model_params[npy_idx]
npy_idx += 1
return x_saved, y_saved
except:
return None, None
def gen_operations_order(self, problem_prefix):
x_saved, _ = self.__get_sol_result_params(
os.path.join("sol", "{}.sol.npy".format(problem_prefix))
)
results = []
for n in range(self.N_cnt):
for m in range(self.M_cnt):
if self.data.parameters.S[n][m]:
results.append(
[
n,
m,
x_saved[n][m],
self.data.parameters.S[n][m],
self.data.parameters.D[n][m],
[],
]
)
return results
def pre_solve(self, window, sort_num=1):
print("Running presolve...")
print("window = {}".format(window))
self.window = window
self.data.parameters.D = (
np.ceil( | np.array(self.data.parameters.D) | numpy.array |
################################################################################
# SSGP: Sparse Spectrum Gaussian Process
# Github: https://github.com/MaxInGaussian/SSGP
# Author: <NAME> (<EMAIL>)
################################################################################
import math
import random
import numpy as np
import scipy.linalg as la
from .SMORMS3 import SMORMS3
class SSGP(object):
""" Sparse Spectrum Gaussian Process """
hashed_name = ""
m, n, d = -1, -1, -1
freq_noisy = True
y_noise, sigma, lengthscales, S = None, None, None, None
X_train, y_train = None, None
X_valid, y_valid = None, None
X_scaler, y_scaler = None, None
# ADDED [!]
nmse, mnlp = None, None
def __init__(self, m=-1, freq_noisy=True):
self.m = m
self.freq_noisy = freq_noisy
self.hashed_name = random.choice("ABCDEF")+str(hash(self)&0xffff)
def transform(self, X=None, y=None):
_X, _y = None, None
if(X is not None):
_X = 3.*(X-self.X_scaler[0])/self.X_scaler[1]
if(y is not None):
_y = (y-self.y_scaler[0])/self.y_scaler[1]
return _X, _y
def inverse_transform(self, X=None, y=None):
_X, _y = None, None
if(X is not None):
_X = X/3.*self.X_scaler[1]+self.X_scaler[0]
if(y is not None):
_y = y*self.y_scaler[1]+self.y_scaler[0]
return _X, _y
def init_params(self, rand_num=100):
if(self.freq_noisy):
log_y_noise = np.random.randn(self.m)*1e-1
else:
log_y_noise = np.random.randn(1)*1e-1
log_sigma = np.random.randn(1)*1e-1
ranges = np.max(self.X_train, 0)-np.min(self.X_train, 0)
log_lengthscales = np.log(ranges/2.)
best_nlml = np.Infinity
best_rand_params = np.zeros(self.d+1+self.m*(1+self.d))
kern_params = np.concatenate((log_y_noise, log_sigma, log_lengthscales))
for _ in range(rand_num):
spectrum_params = np.random.randn(self.m*self.d)
rand_params = np.concatenate((kern_params, spectrum_params))
self.set_params(rand_params)
nlml = self.get_nlml()
if(nlml < best_nlml):
best_nlml = nlml
best_rand_params = rand_params
self.set_params(best_rand_params)
def get_params(self):
sn = 1
if(self.freq_noisy):
sn = self.m
params = np.zeros(self.d+1+self.m*self.d+sn)
params[:sn] = np.log(self.y_noise)/2.
params[sn] = np.log(self.sigma)/2.
log_lengthscales = np.log(self.lengthscales)
params[sn+1:sn+self.d+1] = log_lengthscales
spectrum = self.S*np.tile(self.lengthscales[None, :], (self.m, 1))
params[sn+self.d+1:] = np.reshape(spectrum, (self.m*self.d,))
return params
def set_params(self, params):
sn = 1
if(self.freq_noisy):
sn = self.m
self.y_noise = np.exp(2*params[:sn])
self.sigma = np.exp(2*params[sn])
self.lengthscales = np.exp(params[sn+1:sn+self.d+1])
self.S = np.reshape(params[sn+self.d+1:], (self.m, self.d))
self.S /= np.tile(self.lengthscales[None, :], (self.m, 1))
self.Phi = self.X_train.dot(self.S.T)
cosX = np.cos(self.Phi)
sinX = np.sin(self.Phi)
self.Phi = np.concatenate((cosX, sinX), axis=1)
A = self.sigma/self.m*self.Phi.T.dot(self.Phi)
if(self.freq_noisy):
noise_diag = np.diag(np.concatenate((self.y_noise, self.y_noise)))
else:
noise_diag = np.double(self.y_noise)*np.eye(2*self.m)
self.R = la.cho_factor(A+noise_diag)[0]
self.PhiRi = la.solve_triangular(self.R, self.Phi.T, trans=1).T
self.RtiPhit = self.PhiRi.T
self.Rtiphity = self.RtiPhit.dot(self.y_train)
self.alpha = la.solve_triangular(self.R, self.Rtiphity)
self.alpha *= self.sigma/self.m
def get_nlml(self):
sn = self.m
if(self.freq_noisy):
sn = 1
L1 = np.sum(self.y_train**2)-self.sigma/self.m*np.sum(self.Rtiphity**2.)
L2 = np.sum(np.log(np.diag(self.R)))
L3 = self.n/2*np.log(np.mean(self.y_noise))
L3 -= np.sum(np.log(self.y_noise))*sn
L4 = self.n/2* | np.log(2*np.pi) | numpy.log |
import numpy as np
def max_triangle_1d(width, time_stamps, heights):
"""
:param float width:
:param np.ndarray time_stamps:
:param np.ndarray heights:
"""
distances = np.outer(time_stamps, np.ones(shape=[len(time_stamps)], dtype=np.float32))
distances = np.abs(distances - time_stamps)
effective_distances = width - distances
effective_distances[effective_distances < 0] = 0.0
gain = heights / width
values = (effective_distances * gain).sum(axis=1)
highest_idx = np.argmax(values)
highest_value = values[highest_idx]
return int(highest_idx), highest_value
def max_triangle_2d(width, time_stamps, heights, lens):
"""
:param float width:
:param np.ndarray time_stamps: shape=[n, m], dtype=np.float32
:param np.ndarray heights: shape=[n], dtype=np.float32
:param np.ndarray lens: shape=[n], dtype=np.int32
"""
n, m = time_stamps.shape
flatten_time_stamps = time_stamps.reshape(n * m)
distances = np.outer(flatten_time_stamps, np.ones(shape=[n*m], dtype=np.float32))
distances = np.abs(distances - flatten_time_stamps)
distances = width - distances
distances[distances < 0] = 0.0
# shape=[n1, m1, n2, m2]
distances = distances.reshape(n, m, n, m)
# shape=[n1, m1, m2, n2]
distances = distances.transpose([0, 1, 3, 2])
gain = heights / width
distances = distances * gain
# shape=[n1, m1, n2, m2]
distances = distances.transpose([0, 1, 3, 2])
# shape=[m, n]
len_mask = np.outer(np.arange(m, dtype=np.int32), np.ones(shape=[n], dtype=np.int32))
len_mask = len_mask < lens
# shape=[n, m]
len_mask = len_mask.transpose([1, 0])
distances[:, :, ~len_mask] = 0.0
# shape=[n1, m1, n2]
distances_each = np.max(distances, axis=3)
# shape=[n1, m1]
distances_sum = distances_each.sum(axis=2)
distances_sum[~len_mask] = 0.0
# shape=[n1 * m1]
distances_sum = distances_sum.reshape(n * m)
max_idx = np.argmax(distances_sum)
max_value = distances_sum[max_idx]
max_idx1 = max_idx // m
max_idx2 = max_idx % m
# shape=[n2, m2]
distances_sub_mat = distances[max_idx1, max_idx2]
choices = distances_sub_mat.argmax(axis=1)
choice_mask = distances_sub_mat.max(axis=1) <= 0.0
choices[choice_mask] = -1
return [max_idx1, max_idx2], max_value, choices
def concat_pad_mat(a, pad=0):
"""
:param list[np.ndarray] a:
:param float pad:
:rtype: np.ndarray
"""
max_len = max([len(item) for item in a])
n = len(a)
rst = | np.full(shape=[n, max_len], fill_value=pad, dtype=a[0].dtype) | numpy.full |
#!/usr/bin/env python
#
# This file is part of the Emotions project. The complete source code is
# available at https://github.com/luigivieira/emotions.
#
# Copyright (c) 2016-2017, <NAME> (http://www.luiz.vieira.nom.br)
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import cv2
from collections import OrderedDict
import numpy as np
#=============================================
class FaceData:
"""
Represents the data of a face detected on an image.
"""
_jawLine = [i for i in range(17)]
"""
Indexes of the landmarks at the jaw line.
"""
_rightEyebrow = [i for i in range(17,22)]
"""
Indexes of the landmarks at the right eyebrow.
"""
_leftEyebrow = [i for i in range(22,27)]
"""
Indexes of the landmarks at the left eyebrow.
"""
_noseBridge = [i for i in range(27,31)]
"""
Indexes of the landmarks at the nose bridge.
"""
_lowerNose = [i for i in range(30,36)]
"""
Indexes of the landmarks at the lower nose.
"""
_rightEye = [i for i in range(36,42)]
"""
Indexes of the landmarks at the right eye.
"""
_leftEye = [i for i in range(42,48)]
"""
Indexes of the landmarks at the left eye.
"""
_outerLip = [i for i in range(48,60)]
"""
Indexes of the landmarks at the outer lip.
"""
_innerLip = [i for i in range(60,68)]
"""
Indexes of the landmarks at the inner lip.
"""
#---------------------------------------------
def __init__(self, region = (0, 0, 0, 0),
landmarks = [0 for i in range(136)]):
"""
Class constructor.
Parameters
----------
region: tuple
Left, top, right and bottom coordinates of the region where the face
is located in the image used for detection. The default is all 0's.
landmarks: list
List of x, y coordinates of the 68 facial landmarks in the image
used for detection. The default is all 0's.
"""
self.region = region
"""
Region where the face is found in the image used for detection. This is
a tuple of int values describing the region in terms of the top-left and
bottom-right coordinates where the face is located.
"""
self.landmarks = landmarks
"""
Coordinates of the landmarks on the image. This is a numpy array of
pair of values describing the x and y positions of each of the 68 facial
landmarks.
"""
#---------------------------------------------
def copy(self):
"""
Deep copies the data of the face.
Deep copying means that no mutable attribute (like tuples or lists) in
the new copy will be shared with this instance. In that way, the two
copies can be changed independently.
Returns
-------
ret: FaceData
New instance of the FaceDate class deep copied from this instance.
"""
return FaceData(self.region, self.landmarks.copy())
#---------------------------------------------
def isEmpty(self):
"""
Check if the FaceData object is empty.
An empty FaceData object have region and landmarks with all 0's.
Returns
------
response: bool
Indication on whether this object is empty.
"""
return all(v == 0 for v in self.region) or \
all(vx == 0 and vy == 0 for vx, vy in self.landmarks)
#---------------------------------------------
def crop(self, image):
"""
Crops the given image according to this instance's region and landmarks.
This function creates a subregion of the original image according to the
face region coordinates, and also a new instance of FaceDate object with
the region and landmarks adjusted to the cropped image.
Parameters
----------
image: numpy.array
Image that contains the face.
Returns
-------
croppedImage: numpy.array
Subregion in the original image that contains only the face. This
image is shared with the original image (i.e. its data is not
copied, and changes to either the original image or this subimage
will affect both instances).
croppedFace: FaceData
New instance of FaceData with the face region and landmarks adjusted
to the croppedImage.
"""
left = self.region[0]
top = self.region[1]
right = self.region[2]
bottom = self.region[3]
croppedImage = image[top:bottom+1, left:right+1]
croppedFace = self.copy()
croppedFace.region = (0, 0, right - left, bottom - top)
croppedFace.landmarks = [[p[0]-left, p[1]-top] for p in self.landmarks]
return croppedImage, croppedFace
#---------------------------------------------
def draw(self, image, drawRegion = None, drawFaceModel = None):
"""
Draws the face data over the given image.
This method draws the facial landmarks (in red) to the image. It can
also draw the region where the face was detected (in blue) and the face
model used by dlib to do the prediction (i.e., the connections between
the landmarks, in magenta). This drawing is useful for visual inspection
of the data - and it is fun! :)
Parameters
------
image: numpy.array
Image data where to draw the face data.
drawRegion: bool
Optional value indicating if the region area should also be drawn.
The default is True.
drawFaceModel: bool
Optional value indicating if the face model should also be drawn.
The default is True.
Returns
------
drawnImage: numpy.array
Image data with the original image received plus the face data
drawn. If this instance of Face is empty (i.e. it has no region
and no landmarks), the original image is simply returned with
nothing drawn on it.
"""
if self.isEmpty():
raise RuntimeError('Can not draw the contents of an empty '
'FaceData object')
# Check default arguments
if drawRegion is None:
drawRegion = True
if drawFaceModel is None:
drawFaceModel = True
# Draw the region if requested
if drawRegion:
cv2.rectangle(image, (self.region[0], self.region[1]),
(self.region[2], self.region[3]),
(0, 0, 255), 2)
# Draw the positions of landmarks
color = (0, 255, 255)
for i in range(68):
cv2.circle(image, tuple(self.landmarks[i]), 1, color, 2)
# Draw the face model if requested
if drawFaceModel:
c = (0, 255, 255)
p = | np.array(self.landmarks) | numpy.array |
import numpy as np
import math
def get_square(hashdot):
l = [
list(x) for x in (
row.replace('#', '1').replace('.', '0')
for row in hashdot.strip().split('/')
)
]
return np.array(l, int)
def breakup(square):
height = square.shape[0]
if height != 2 and (height % 2) == 0:
nrows = 2
ncols = 2
elif height != 3 and (height % 3) == 0:
nrows = 3
ncols = 3
else:
return square
return (square.reshape(height // nrows, nrows, -1, ncols)
.swapaxes(1,2)
.reshape(-1, nrows, ncols))
def get_rule_variations(rule):
if ( | np.count_nonzero(rule[0]) | numpy.count_nonzero |
import numpy as np
import healpy as hp
from plancklens import utils as ut, utils_spin as uspin
class qeleg:
def __init__(self, spin_in, spin_out, cl):
self.spin_in = spin_in
self.spin_ou = spin_out
self.cl = cl
def __eq__(self, leg):
if self.spin_in != leg.spin_in or self.spin_ou != leg.spin_ou or self.get_lmax() != self.get_lmax():
return False
return | np.all(self.cl == leg.cl) | numpy.all |
# coding=utf-8
import argparse
import os
import re
import _pickle as cpickle
import numpy as np
from nltk import ngrams, sent_tokenize, word_tokenize
from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
from collections import Counter
from nltk import bigrams, FreqDist
from tqdm import tqdm
from math import inf
def _response_tokenize(response):
"""
Function: 将每个response进行tokenize
Return: [token1, token2, ......]
"""
response_tokens = []
# valid_tokens = set(word2vec.keys())
for token in response.strip().split(' '):
# if token in valid_tokens:
response_tokens.append(token)
# response_tokens = ["__"+token for token in response_tokens]
return response_tokens
def _response_tokenize_reduce_stopwords(response):
from nltk.corpus import stopwords
response_tokens = []
for token in response.strip().split(' '):
if token not in set(stopwords.words('english')):
response_tokens.append(token)
return response_tokens
class NormalMetrics():
def __init__(self, file_path, vocab, word2vec, model_path):
"""
Function: 初始化以下变量
contexts: [context1, context2, ...]
true_responses: [true_response1, true_response2, ...]
gen_responses: [gen_response1, gen_response2, ...]
"""
self.vocab = vocab
self.word2vec = word2vec
self.model_path = model_path
contexts, true_responses, generate_responses = \
self._extract_data(file_path)
self._stasticAndcleanData([contexts, true_responses, generate_responses])
def _extract_data(self, path):
true_responses = []
generate_responses = []
contexts = []
with open(path, 'r', encoding='utf-8') as f:
sentences = f.readlines()
for i in range(len(sentences)):
if sentences[i] == 'sample:\n':
contexts.append(sentences[i + 1].rstrip('\n'))
true_responses.append(sentences[i + 2].rstrip('\n'))
generate_responses.append(sentences[i + 3].rstrip('\n'))
else:
pass
return contexts, true_responses, generate_responses
def _stasticAndcleanData(self, data):
[contexts, true_responses, generated_responses] = data
data_count = len(contexts)
tmp1 = []
tmp2 = []
tmp3 = []
for context, true_response, gen_response in zip(contexts, true_responses,
generated_responses):
if (len(_response_tokenize(true_response)) != 0 and
len(_response_tokenize(gen_response)) > 1 and
len(_response_tokenize(context.replace(' EOT ', ' '))) != 0):
tmp1.append(true_response)
tmp2.append(gen_response)
tmp3.append(context)
self.true_responses = tmp1
self.gen_responses = tmp2
self.contexts = tmp3
valid_data_count = len(self.contexts)
average_len_in_contexts = sum([len(_response_tokenize(sentence))
for sentence in self.contexts]) / valid_data_count
average_len_in_true_response = sum([len(_response_tokenize(sentence))
for sentence in self.true_responses]) / valid_data_count
average_len_in_generated_response = sum([len(_response_tokenize(sentence))
for sentence in self.gen_responses]) / valid_data_count
self.datamsg = [data_count, valid_data_count,
average_len_in_contexts, average_len_in_true_response,
average_len_in_generated_response]
def _consine(self, v1, v2):
"""
Function:计算两个向量的余弦相似度
Return:余弦相似度
"""
return np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
def get_dp_gan_metrics(self, mode='gen_response'):
"""
Function:计算所有true_responses、gen_responses的
token_gram、unigram、bigram、trigram、sent_gram的数量
Return:token_gram、unigram、bigram、trigram、sent_gram的数量
"""
if mode == 'true_response':
responses = self.true_responses
else:
responses = self.gen_responses
token_gram = []
unigram = []
bigram = []
trigram = []
sent_gram = []
for response in responses:
tokens = _response_tokenize(response)
token_gram.extend(tokens)
unigram.extend([element for element in ngrams(tokens, 1)])
bigram.extend([element for element in ngrams(tokens, 2)])
trigram.extend([element for element in ngrams(tokens, 3)])
sent_gram.append(response)
return len(token_gram), len(set(unigram)), len(set(bigram)), \
len(set(trigram)), len(set(sent_gram))
def get_distinct(self, n, mode='gen_responses'):
"""
Function: 计算所有true_responses、gen_responses的ngrams的type-token ratio
Return: ngrams-based type-token ratio
"""
ngrams_list = []
if mode == 'true_responses':
responses = self.true_responses
else:
responses = self.gen_responses
for response in responses:
tokens = _response_tokenize(response)
ngrams_list.extend([element for element in ngrams(tokens, n)])
if len(ngrams_list) == 0:
return 0
else:
return len(set(ngrams_list)) / len(ngrams_list)
def get_batch_distinct(self, n, batch_size, mode='gen_responses'):
"""
Function: 计算每个batch的 true_responses、gen_responses的ngrams的type-token ratio
Return: ngrams-based type-token ratio
"""
ngrams_list = []
if mode == 'true_responses':
responses = self.true_responses
else:
responses = self.gen_responses
batch_distinct = []
for idx, response in enumerate(responses):
if idx and idx%batch_size == 0:
if len(ngrams_list) == 0:
batch_distinct.append(0)
else:
batch_distinct.append(len(set(ngrams_list)) / len(ngrams_list))
ngrams_list = []
tokens = _response_tokenize(response)
ngrams_list.extend([element for element in ngrams(tokens, n)])
if len(batch_distinct) == 0:
return 0
else:
return sum(batch_distinct) / len(batch_distinct)
def get_response_length(self):
""" Reference:
1. paper : <NAME>,et al. A Deep Reinforcement Learning Chatbot
"""
response_lengths = []
for gen_response in self.gen_responses:
response_lengths.append(len(_response_tokenize(gen_response)))
if len(response_lengths) == 0:
return 0
else:
return sum(response_lengths) / len(response_lengths)
def get_bleu(self, n_gram):
"""
Function: 计算所有true_responses、gen_responses的ngrams的bleu
parameters:
n_gram : calculate BLEU-n,
calculate the cumulative 4-gram BLEU score, also called BLEU-4.
The weights for the BLEU-4 are 1/4 (25%) or 0.25 for each of the 1-gram, 2-gram, 3-gram and 4-gram scores.
Reference:
1. https://machinelearningmastery.com/calculate-bleu-score-for-text-python/
2. https://cloud.tencent.com/developer/article/1042161
Return: bleu score BLEU-n
"""
weights = {1: (1.0, 0.0, 0.0, 0.0),
2: (1 / 2, 1 / 2, 0.0, 0.0),
3: (1 / 3, 1 / 3, 1 / 3, 0.0),
4: (1 / 4, 1 / 4, 1 / 4, 1 / 4)}
total_score = []
for true_response, gen_response in zip(self.true_responses, self.gen_responses):
score = sentence_bleu(
[_response_tokenize(true_response)],
_response_tokenize(gen_response),
weights[n_gram],
smoothing_function=SmoothingFunction().method7)
total_score.append(score)
if len(total_score) == 0:
return 0
else:
return sum(total_score) / len(total_score)
def get_greedy_matching(self):
"""
Function: 计算所有true_responses、gen_responses的greedy_matching
Return:greedy_matching
"""
model = self.word2vec
total_cosine = []
for true_response, gen_response in zip(self.true_responses, self.gen_responses):
true_response_token_wv = np.array([model[item] for item in
_response_tokenize(true_response)])
gen_response_token_wv = np.array([model[item] for item in
_response_tokenize(gen_response)])
true_gen_cosine = np.array([[self._consine(gen_token_vec, true_token_vec)
for gen_token_vec in gen_response_token_wv] for true_token_vec
in true_response_token_wv])
gen_true_cosine = np.array([[self._consine(true_token_vec, gen_token_vec)
for true_token_vec in true_response_token_wv] for gen_token_vec
in gen_response_token_wv])
true_gen_cosine = np.max(true_gen_cosine, 1)
gen_true_cosine = np.max(gen_true_cosine, 1)
cosine = (np.sum(true_gen_cosine) / len(true_gen_cosine) + np.sum(gen_true_cosine) / len(
gen_true_cosine)) / 2
total_cosine.append(cosine)
if len(total_cosine) == 0:
return 0
else:
return sum(total_cosine) / len(total_cosine)
def get_embedding_average(self):
model = self.word2vec
total_cosine = []
for true_response, gen_response in zip(self.true_responses, self.gen_responses):
true_response_token_wv = np.array([model[item] for item in
_response_tokenize(true_response)])
gen_response_token_wv = np.array([model[item] for item in
_response_tokenize(gen_response)])
true_response_sentence_wv = np.sum(true_response_token_wv, 0)
gen_response_sentence_wv = np.sum(gen_response_token_wv, 0)
true_response_sentence_wv = true_response_sentence_wv / np.linalg.norm(true_response_sentence_wv)
gen_response_sentence_wv = gen_response_sentence_wv / np.linalg.norm(gen_response_sentence_wv)
cosine = self._consine(true_response_sentence_wv,
gen_response_sentence_wv)
total_cosine.append(cosine)
if len(total_cosine) == 0:
return 0
else:
return sum(total_cosine) / len(total_cosine)
def get_vector_extrema(self):
model = self.word2vec
total_cosine = []
for true_response, gen_response in zip(self.true_responses, self.gen_responses):
true_response_token_wv = np.array([model[item] for item in
_response_tokenize(true_response)])
gen_response_token_wv = np.array([model[item] for item in
_response_tokenize(gen_response)])
true_sent_max_vec = np.max(true_response_token_wv, 0)
true_sent_min_vec = np.min(true_response_token_wv, 0)
true_sent_vec = []
for max_dim, min_dim in zip(true_sent_max_vec, true_sent_min_vec):
if max_dim > | np.abs(min_dim) | numpy.abs |
from __future__ import print_function
import numpy as np
from scipy.io import FortranFile
from scipy.interpolate import griddata
import os
import warnings
np.seterr(all='warn')
def progenitor_probability(density=None, sfr=None, mass=None, redshift=None):
"""
Return the progenitor fraction for input values.
>>> progenitor_probability(redshift=0.4, mass=10.8)
0.266751184855
"""
density = np.nan if density is None else density
sfr = np.nan if sfr is None else sfr
mass = np.nan if mass is None else mass
redshift = np.nan if redshift is None else redshift
values = [density, sfr, mass, redshift]
if values.count(np.nan) > 3:
raise ValueError('Incorrect number of arguments')
# Read datacube
f = FortranFile(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'fractions.dat'))
dims = f.read_record(dtype=np.int32)
data = f.read_record(dtype=np.float32)
data_size = np.product(dims)
n_galaxies = np.reshape(data[0:data_size], dims, order='F')
n_spiral_progenitors = np.reshape(data[data_size:2*data_size], dims, order='F')
bins = np.stack([np.reshape(f.read_record(dtype=np.float32), dims, order='F') for _ in range(dims.size)], axis=0)
# Marginalise over dimensions that are not specified
while np.nan in values:
i = values.index(np.nan)
dims = np.delete(dims, i)
values.pop(i)
weights = n_galaxies
n_galaxies = | np.sum(n_galaxies, axis=i) | numpy.sum |
import numpy as np
import warnings
import scipy.optimize as op
pi = np.pi
#####
__all__ = ["H", "D", "C", "Cmax"]
def H(p, normalize_output=True):
"""
Calculates Shannon information (in nats) from a probability vector.
Parameters
----------
p : array-like
vector of probabilities; will be normalized if not done so already
normalize_output: bool
boolean flag to normalize output to range (0,1); default=True
Returns
-------
Hout : Shannon information
"""
# check probabilities normalization
if np.isclose(np.sum(p),1.0) != True:
warnings.warn('Input probability vector was not normalized...fixing automatically')
p = p/ | np.sum(p) | numpy.sum |
def test_add():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_add_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
output = input1 + input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_iadd():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = cle.push(np.asarray([[4, 5, 6]]))
reference = cle.push(np.asarray([[5, 7, 9]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[1, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[6, 7, 8]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_iadd_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[6, 4, -6]]))
input1 += input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_subtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_subtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
output = input1 - input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_isubtract():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = cle.push(np.asarray([[1, 5, 6]]))
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = 5
reference = cle.push(np.asarray([[-1, -3, -2]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_isubtract_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, 3]]))
input2 = np.asarray([[1, 5, 6]])
reference = cle.push(np.asarray([[3, -3, -3]]))
input1 -= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_divide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_divide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
output = input1 / input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_idivide():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_idivide_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[2, 1, -4]]))
input1 /= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_multiply():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_multiply_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_multiply_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[8, 4, -16]]))
output = input1 * input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_imultiply():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_imultiply_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_imultiply_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[8, 4, -16]]))
input1 *= input2
result = cle.pull(input1)
print(result)
assert np.array_equal(result, reference)
def test_gt():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[1, 0, 0]]))
output = input1 > input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_gt_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[1, 0, 0]]))
output = input1 > input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_gt_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[1, 0, 0]]))
output = input1 > input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ge():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[1, 1, 0]]))
output = input1 >= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ge_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[1, 1, 0]]))
output = input1 >= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ge_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[1, 1, 0]]))
output = input1 >= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_lt():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[0, 0, 1]]))
output = input1 < input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_lt_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[0, 0, 1]]))
output = input1 < input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_lt_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[0, 0, 1]]))
output = input1 < input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_le():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[0, 1, 1]]))
output = input1 <= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_le_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[0, 1, 1]]))
output = input1 <= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_le_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[0, 1, 1]]))
output = input1 <= input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_eq():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[0, 1, 0]]))
output = input1 == input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_eq_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[0, 1, 0]]))
output = input1 == input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_eq_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[0, 1, 0]]))
output = input1 == input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ne():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = cle.push(np.asarray([[2, 2, 2]]))
reference = cle.push(np.asarray([[1, 0, 1]]))
output = input1 != input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ne_with_scalar():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = 2
reference = cle.push(np.asarray([[1, 0, 1]]))
output = input1 != input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_ne_with_np():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
input2 = np.asarray([[2, 2, 2]])
reference = cle.push(np.asarray([[1, 0, 1]]))
output = input1 != input2
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_pos():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
reference = cle.push(np.asarray([[4, 2, -8]]))
output = +input1
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_neg():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push(np.asarray([[4, 2, -8]]))
reference = cle.push(np.asarray([[-4, -2, 8]]))
output = -input1
result = cle.pull(output)
print(result)
assert np.array_equal(result, reference)
def test_power():
import numpy as np
import pyclesperanto_prototype as cle
input1 = cle.push( | np.asarray([[4, 2, -8]]) | numpy.asarray |
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import pytest
from ..association import Association, AssociationPair, AssociationSet, \
SingleTimeAssociation, TimeRangeAssociation
from ..detection import Detection
from ..time import TimeRange
def test_association():
with pytest.raises(TypeError):
Association()
objects = {Detection(np.array([[1], [2]])),
Detection(np.array([[3], [4]])),
Detection(np.array([[5], [6]]))}
assoc = Association(objects)
assert assoc.objects == objects
def test_associationpair():
with pytest.raises(TypeError):
AssociationPair()
objects = [Detection(np.array([[1], [2]])),
Detection(np.array([[3], [4]])),
Detection( | np.array([[5], [6]]) | numpy.array |
#equations for constraints
import numpy as np
from calcModuleTP import ATransformMatrixTHETA as A_Theta, link2index
from calcModuleTP import ATransformMatrix as A_i
def constraintEquation(r1A, r1B, r2B, r2C, r3C):
constraintVector = np.zeros((6,1))
# Pin joint A
constraintPinA = -r1A
for i in range(np.size(constraintPinA)):
# Equation 1-2
constraintVector[i] = constraintPinA[i]
# Pin joint B
constraintPinB = revolutJoint(r1B, r2B)
for i in range(np.size(constraintPinB)):
# Equation 3-4
constraintVector[i+2] = constraintPinB[i]
# Pin joint C
constraintPinC = revolutJoint(r2C, r3C)
for i in range(np.size(constraintPinC)):
# Equation 3-4
constraintVector[i+4] = constraintPinC[i]
return constraintVector
def jacobianMatrix(qi, u_bar_1A, u_bar_1B, u_bar_2B, u_bar_2C, u_bar_3C):
genCoor = | np.size(qi) | numpy.size |
from numpy import loadtxt
import streamlit as st
import numpy as np
import pandas as pd
import altair as alt
n = 25
particle = ['NO2', 'O3', 'NO', 'CO', 'PM1', 'PM2.5', 'PM10']
def actual_vs_predictedpj():
select_model = st.sidebar.radio(
"Choose Model ?", ('Xgboost', 'Randomforest', 'KNN', 'Linear Regression', 'Lasso'))
select_particle = st.sidebar.radio(
"Choose Particle ?", ('NO2', 'O3', 'NO', 'CO', 'PM2.5', 'PM10'))
if select_particle == 'NO2':
loc = 0
if select_particle == 'O3':
loc = 1
if select_particle == 'NO':
loc = 2
if select_particle == 'CO':
loc = 3
# if select_particle == 'PM1':
# loc = 4
if select_particle == 'PM2.5':
loc = 4
if select_particle == 'PM10':
loc = 5
if select_model == 'Xgboost':
get_xgboost(loc)
if select_model == 'KNN':
get_knn(loc)
if select_model == 'Randomforest':
get_randomforest(loc)
if select_model == 'Linear Regression':
get_linear_regression(loc)
if select_model == 'Lasso':
get_lasso(loc)
def get_knn(loc):
knn_y_test = loadtxt('ModelsPJ/knn_y_test.csv', delimiter=',')
knn_y_test_pred = loadtxt('ModelsPJ/knn_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(knn_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 = pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'x'])
l2 = list()
l2.append(['Y_Predicted']*n)
l2.append(np.round(knn_y_test_pred[:n, loc], 9))
l2.append(list(range(1, n+1)))
temp2 = np.array(l2).transpose()
x2 = list(range(n+1, 2*n+1))
chart_data2 = pd.DataFrame(temp2, x2, columns=['Data', particle[loc], 'x'])
frames = [chart_data1, chart_data2]
results = pd.concat(frames)
chart = alt.Chart(results).mark_line().encode(
x='x',
y=particle[loc],
color='Data',
strokeDash='Data',
).properties(
title='Plot of Actual vs Predicted for KNN model for ' +
particle[loc]+' particle'
)
st.altair_chart(chart, use_container_width=True)
def get_xgboost(loc):
xgboost_y_test = loadtxt('ModelsPJ/xgboost_y_test.csv', delimiter=',')
xgboost_y_test_pred = loadtxt(
'ModelsPJ/xgboost_y_test_pred.csv', delimiter=',')
l1 = list()
l1.append(['Y_Actual']*n)
l1.append(np.round(xgboost_y_test[:n, loc], 9))
l1.append(list(range(1, n+1)))
temp1 = np.array(l1).transpose()
x1 = list(range(1, n+1))
chart_data1 = pd.DataFrame(temp1, x1, columns=['Data', particle[loc], 'X'])
l2 = list()
l2.append(['Y_Predicted']*n)
l2.append( | np.round(xgboost_y_test_pred[:n, loc], 9) | numpy.round |
""" Helper methods for loading and parsing KITTI data.
Author: <NAME>, <NAME>
Date: September 2017/2018
https://github.com/kuixu/kitti_object_vis
"""
import numpy as np
cbox = np.array([[0,70],[-40,40],[-2.5,1]])
class Calibration(object):
''' Calibration matrices and utils
3d XYZ in <label>.txt are in rect camera coord.
2d box xy are in image2 coord
Points in <lidar>.bin are in Velodyne coord.
y_image2 = P^2_rect * x_rect
y_image2 = P^2_rect * R0_rect * Tr_velo_to_cam * x_velo
x_ref = Tr_velo_to_cam * x_velo
x_rect = R0_rect * x_ref
P^2_rect = [f^2_u, 0, c^2_u, -f^2_u b^2_x;
0, f^2_v, c^2_v, -f^2_v b^2_y;
0, 0, 1, 0]
= K * [1|t]
image2 coord:
----> x-axis (u)
|
|
v y-axis (v)
velodyne coord:
front x, left y, up z
rect/ref camera coord:
right x, down y, front z
Ref (KITTI paper): http://www.cvlibs.net/publications/Geiger2013IJRR.pdf
TODO(rqi): do matrix multiplication only once for each projection.
'''
def __init__(self, calib_filepath, from_video=False):
calibs = self.read_calib_file(calib_filepath)
# Projection matrix from rect camera coord to image2 coord
self.P = calibs['P2']
self.P = np.reshape(self.P, [3,4])
# Rigid transform from Velodyne coord to reference camera coord
self.V2C = calibs['Tr_velo_to_cam']
self.V2C = np.reshape(self.V2C, [3,4])
self.C2V = inverse_rigid_trans(self.V2C)
# Rotation from reference camera coord to rect camera coord
self.R0 = calibs['R0_rect']
self.R0 = | np.reshape(self.R0,[3,3]) | numpy.reshape |
from astropy.cosmology import Planck15
from multiprocessing import Lock, Pool
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from scipy.spatial import cKDTree
class PairMaker(object):
"""Class for computing distance weighted correlations of a reference sample
with known redshift against a sample with unknown redshifts.
Parameters
----------
r_mins : `list` of `float`s
List of bin edge minimums in Mpc.
r_maxes : `list` of `float`s
List of bin edge maximums in Mpc.
z_min : `float`
Minimum redshift of the reference sample to consider.
z_max : `float`
Maximum redshift of the reference sample to consider.
weight_power : `float`
Expected power-law slope of the projected correlation function. Used
for signal matched weighting.
distance_metric : `astropy.cosmology.LambdaCDM.<distance>`
Cosmological distance metric to use for all calculations. Should be
either comoving_distance or angular_diameter_distance. Defaults to
the Planck15 cosmology and comoving metric.
output_pairs : `string`
Name of a directory to write raw pair counts and distances to. Spawns
a multiprocessing child task to write out data.
n_write_proc : `int`
If an output file name is specified, this sets the number of
subprocesses to spawn to write the data to disk.
n_write_clean_up : `int`
If an output file name is specified, this sets the number reference
objects to process before cleaning up the subprocess queue. Controls
the amount of memory on the main processes.
"""
def __init__(self,
r_mins,
r_maxes,
z_min=0.01,
z_max=5.00,
weight_power=-0.8,
distance_metric=None,
output_pairs=None,
n_write_proc=2,
n_write_clean_up=10000,
n_z_bins=64):
self.r_mins = r_mins
self.r_maxes = r_maxes
self.r_min = | np.min(r_mins) | numpy.min |
import sys
import os
sys.path.insert(0, os.path.abspath('..\\diffpy'))
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from scipy.ndimage.interpolation import rotate
from scipy.spatial import ConvexHull
import msds as msds
def anomalous(x, D, alpha, dt):
# The anomalous diffusion model
return 4*D*(x*dt)**alpha
def anomModels(MSD, dt, skips=1):
# Calculates the anomalous diffusion model parameters for a single particle
# from its MSD profile over multiple timespans
# By default, fits the model to all possible MSD profiles of at least 10 frames
def anom(x, D, alpha):
return anomalous(x, D, alpha, dt=dt)
steps = MSD.shape[0] + 1
N = np.linspace(1, steps-1, steps-1)
alphas = np.zeros(steps-1)
Ds = | np.zeros(steps-1) | numpy.zeros |
import numpy as np
from abc import ABC, abstractmethod
from pathlib import Path
import subprocess
import numpy.ma as ma
import scipy.constants as const
from multiprocessing import Pool
from scipy.interpolate import interp1d
from dans_pymodules import Vector2D
import matplotlib.pyplot as plt
# from scipy import meshgrid
from scipy.special import iv as bessel1
from scipy.optimize import root
# import pickle
# import scipy.constants as const
# import numpy as np
# import platform
# import matplotlib.pyplot as plt
# import gc
import datetime
import time
import copy
import os
import sys
import shutil
from matplotlib.patches import Arc as Arc
load_previous = False
# Check if we can connect to a display, if not disable all plotting and windowed stuff (like gmsh)
# TODO: This does not remotely cover all cases!
if "DISPLAY" in os.environ.keys():
x11disp = True
else:
x11disp = False
# --- Try importing BEMPP
HAVE_BEMPP = False
try:
import bempp.api
from bempp.api.shapes.shapes import __generate_grid_from_geo_string as generate_from_string
HAVE_BEMPP = True
except ImportError:
print("Couldn't import BEMPP, no meshing or BEM field calculation will be possible.")
bempp = None
generate_from_string = None
# --- Try importing mpi4py, if it fails, we fall back to single processor
try:
from mpi4py import MPI
COMM = MPI.COMM_WORLD
RANK = COMM.Get_rank()
SIZE = COMM.Get_size()
HOST = MPI.Get_processor_name()
print("Process {} of {} on host {} started!".format(RANK + 1, SIZE, HOST))
sys.stdout.flush()
except ImportError:
MPI = None
COMM = None
RANK = 0
SIZE = 1
import socket
HOST = socket.gethostname()
print("Could not import mpi4py, falling back to single core (and python multiprocessing in some instances)!")
# --- Try importing pythonocc-core
HAVE_OCC = False
try:
from OCC.Extend.DataExchange import read_stl_file
from OCC.Display.SimpleGui import init_display
from OCC.Core.BRepPrimAPI import BRepPrimAPI_MakeBox, BRepPrimAPI_MakeTorus, BRepPrimAPI_MakeSweep
from OCC.Core.BRepTools import breptools_Write
from OCC.Core.BRepBndLib import brepbndlib_Add
from OCC.Core.Bnd import Bnd_Box
from OCC.Core.gp import gp_Pnt, gp_Pnt2d
from OCC.Core.BRepClass3d import BRepClass3d_SolidClassifier
from OCC.Core.TopAbs import TopAbs_ON, TopAbs_OUT, TopAbs_IN
from OCC.Core.GeomAPI import GeomAPI_Interpolate, GeomAPI_PointsToBSpline
from OCC.Core.Geom import Geom_BSplineCurve
from OCC.Core.Geom2d import Geom2d_BSplineCurve
from OCC.Core.TColgp import TColgp_HArray1OfPnt, TColgp_Array1OfPnt
from OCC.Core.TColStd import TColStd_Array1OfInteger, TColStd_Array1OfReal
from OCC.Core.GeomAbs import GeomAbs_C1, GeomAbs_C2, GeomAbs_G1
from OCC.Core.Geom2dAPI import Geom2dAPI_Interpolate, Geom2dAPI_PointsToBSpline
from OCC.Core.TColgp import TColgp_HArray1OfPnt2d, TColgp_Array1OfPnt2d
from OCCUtils.Common import *
from py_electrodes import ElectrodeObject
HAVE_OCC = True
except ImportError:
ElectrodeObject = None
print("Something went wrong during OCC import. No CAD support possible!")
USE_MULTIPROC = True # In case we are not using mpi or only using 1 processor, fall back on multiprocessing
GMSH_EXE = "/home/daniel/src/gmsh-4.0.6-Linux64/bin/gmsh"
# GMSH_EXE = "E:/gmsh4/gmsh.exe"
HAVE_TEMP_FOLDER = False
np.set_printoptions(threshold=10000)
HAVE_GMSH = True
# Quick test if gmsh path is correct
if not Path(GMSH_EXE).is_file():
print("Gmsh path seems to be wrong! No meshing will be possible!")
HAVE_GMSH = False
# For now, everything involving the pymodules with be done on master proc (RANK 0)
if RANK == 0:
from dans_pymodules import *
colors = MyColors()
else:
colors = None
decimals = 12
__author__ = "<NAME>, <NAME>"
__doc__ = """Calculate RFQ fields from loaded cell parameters"""
# Initialize some global constants
amu = const.value("atomic mass constant energy equivalent in MeV")
echarge = const.value("elementary charge")
clight = const.value("speed of light in vacuum")
# Define the axis directions and vane rotations:
X = 0
Y = 1
Z = 2
XYZ = range(3)
AXES = {"X": 0, "Y": 1, "Z": 2}
rot_map = {"yp": 0.0,
"ym": 180.0,
"xp": 270.0,
"xm": 90.0}
class Polygon2D(object):
"""
Simple class to handle polygon operations such as point in polygon or
orientation of rotation (cw or ccw), area, etc.
"""
def add_point(self, p=None):
"""
Append a point to the polygon
"""
if p is not None:
if isinstance(p, tuple) and len(p) == 2:
self.poly.append(p)
else:
print("Error in add_point of Polygon: p is not a 2-tuple!")
else:
print("Error in add_point of Polygon: No p given!")
return 0
def add_polygon(self, poly=None):
"""
Append a polygon object to the end of this polygon
"""
if poly is not None:
if isinstance(poly, Polygon2D):
self.poly.extend(poly.poly)
# if isinstance(poly.poly, list) and len(poly.poly) > 0:
#
# if isinstance(poly.poly[0], tuple) and len(poly.poly[0]) == 2:
# self.poly.extend(poly.poly)
return 0
def area(self):
"""
Calculates the area of the polygon. only works if there are no crossings
Taken from http://paulbourke.net, algorithm written by <NAME>, 1998
If area is positive -> polygon is given clockwise
If area is negative -> polygon is given counter clockwise
"""
area = 0
poly = self.poly
npts = len(poly)
j = npts - 1
i = 0
for _ in poly:
p1 = poly[i]
p2 = poly[j]
area += (p1[0] * p2[1])
area -= p1[1] * p2[0]
j = i
i += 1
area /= 2
return area
def centroid(self):
"""
Calculate the centroid of the polygon
Taken from http://paulbourke.net, algorithm written by <NAME>, 1998
"""
poly = self.poly
npts = len(poly)
x = 0
y = 0
j = npts - 1
i = 0
for _ in poly:
p1 = poly[i]
p2 = poly[j]
f = p1[0] * p2[1] - p2[0] * p1[1]
x += (p1[0] + p2[0]) * f
y += (p1[1] + p2[1]) * f
j = i
i += 1
f = self.area() * 6
return x / f, y / f
def clockwise(self):
"""
Returns True if the polygon points are ordered clockwise
If area is positive -> polygon is given clockwise
If area is negative -> polygon is given counter clockwise
"""
if self.area() > 0:
return True
else:
return False
def closed(self):
"""
Checks whether the polygon is closed (i.e first point == last point)
"""
if self.poly[0] == self.poly[-1]:
return True
else:
return False
def nvertices(self):
"""
Returns the number of vertices in the polygon
"""
return len(self.poly)
def point_in_poly(self, p=None):
"""
Check if a point p (tuple of x,y) is inside the polygon
This is called the "ray casting method": If a ray cast from p crosses
the polygon an even number of times, it's outside, otherwise inside
From: http://www.ariel.com.au/a/python-point-int-poly.html
Note: Points directly on the edge or identical with a vertex are not
considered "inside" the polygon!
"""
if p is None:
return None
poly = self.poly
x = p[0]
y = p[1]
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def remove_last(self):
"""
Remove the last tuple in the ploygon
"""
self.poly.pop(-1)
return 0
def reverse(self):
"""
Reverses the ordering of the polygon (from cw to ccw or vice versa)
"""
temp_poly = []
nv = self.nvertices()
for i in range(self.nvertices() - 1, -1, -1):
temp_poly.append(self.poly[i])
self.poly = temp_poly
return temp_poly
def rotate(self, index):
"""
rotates the polygon, so that the point with index 'index' before now has
index 0
"""
if index > self.nvertices() - 1:
return 1
for i in range(index):
self.poly.append(self.poly.pop(0))
return 0
def __init__(self, poly=None):
"""
construct a polygon object
If poly is not specified, an empty polygon is created
if poly is specified, it has to be a list of 2-tuples!
"""
self.poly = []
if poly is not None:
if isinstance(poly, list) and len(poly) > 0:
if isinstance(poly[0], tuple) and len(poly[0]) == 2:
self.poly = poly
def __getitem__(self, index):
return self.poly[index]
def __setitem__(self, index, value):
if isinstance(value, tuple) and len(value) == 2:
self.poly[index] = value
class PyRFQCell(object):
def __init__(self,
cell_type,
prev_cell=None,
next_cell=None,
debug=False,
**kwargs):
"""
:param cell_type:
STA: Start cell without length (necessary at beginning of RMS if there are no previous cells)
RMS: Radial Matching Section.
NCS: Normal Cell. A regular RFQ cell
TCS: Transition Cell.
DCS: Drift Cell. No modulation.
TRC: Trapezoidal cell (experimental, for re-bunching only!).
:param prev_cell:
:param next_cell:
:param debug:
Keyword Arguments (mostly from Parmteq Output File):
V: Intervane voltage in V
Wsyn: Energy of the synchronous particle in MeV
Sig0T: Transverse zero-current phase advance in degrees per period
Sig0L: Longitudinal zero-current phase advance in degrees per period
A10: Acceleration term [first theta-independent term in expansion]
Phi: Synchronous phase in degrees
a: Minimum radial aperture in m
m: Modulation (dimensionless)
B: Focusing parameter (dimensionless) B = q V lambda^2/(m c^2 r0^2)
L: Cell length in cm
A0: Quadrupole term [first z-independent term in expansion]
RFdef: RF defocusing term
Oct: Octupole term
A1: Duodecapole term [second z-independent term in expansion]
"""
assert cell_type in ["start", "rms", "regular",
"transition", "transition_auto", "drift", "trapezoidal"], \
"cell_type not recognized!"
self._type = cell_type
self._params = {"voltage": None,
"Wsyn": None,
"Sig0T": None,
"Sig0L": None,
"A10": None,
"Phi": None,
"a": None,
"m": None,
"B": None,
"L": None,
"A0": None,
"RFdef": None,
"Oct": None,
"A1": None,
"flip_z": False,
"shift_cell_no": False,
"fillet_radius": None
}
self._prev_cell = prev_cell
self._next_cell = next_cell
self._debug = debug
for key, item in self._params.items():
if key in kwargs.keys():
self._params[key] = kwargs[key]
if self.initialize() != 0:
print("Cell failed self-check! Aborting.")
exit(1)
self._profile_itp = None # Interpolation of the cell profile
def __str__(self):
return "Type: '{}', Aperture: {:.6f}, Modulation: {:.4f}, " \
"Length: {:.6f}, flip: {}, shift: {}".format(self._type,
self._params["a"],
self._params["m"],
self._params["L"],
self._params["flip_z"],
self._params["shift_cell_no"])
@property
def length(self):
return self._params["L"]
@property
def aperture(self):
return self._params["a"]
@property
def avg_radius(self):
return 0.5 * (self._params["a"] + self._params["m"] * self._params["a"])
@property
def cell_type(self):
return self._type
@property
def modulation(self):
return self._params["m"]
@property
def prev_cell(self):
return self._prev_cell
@property
def next_cell(self):
return self._next_cell
def calculate_transition_cell_length(self):
le = self._params["L"]
m = self._params["m"]
a = self._params["a"]
r0 = self.avg_radius
k = np.pi / np.sqrt(3.0) / le
def eta(kk):
return bessel1(0.0, kk * r0) / (3.0 * bessel1(0.0, 3.0 * kk * r0))
def func(kk):
return (bessel1(0.0, kk * m * a) + eta(kk) * bessel1(0.0, 3.0 * kk * m * a)) / \
(bessel1(0.0, kk * a) + eta(kk) * bessel1(0.0, 3.0 * kk * a)) \
+ ((m * a / r0) ** 2.0 - 1.0) / ((a / r0) ** 2.0 - 1.0)
k = root(func, k).x[0]
tcs_length = np.pi / 2.0 / k
print("Transition cell has length {} which is {} * cell length, ".format(tcs_length, tcs_length / le), end="")
assert tcs_length <= le, "Numerical determination of transition cell length " \
"yielded value larger than cell length parameter!"
if tcs_length > le:
print("the remainder will be filled with a drift.")
return tcs_length
def initialize(self):
# TODO: Refactor this maybe? seems overly complicated...
# Here we check the different cell types for consistency and minimum necessary parameters
if self._type in ["transition", "transition_auto"]:
assert self.prev_cell is not None, "A transition cell needs a preceeeding cell."
assert self.prev_cell.cell_type == "regular", "Currently a transition cell must follow a regular cell."
# Aperture:
assert self._params["a"] is not None, "No aperture given for {} cell".format(self._type)
if self._params["a"] == 'auto':
assert self._type in ["drift", "trapezoidal", "transition", "transition_auto"], \
"Unsupported cell type '{}' for auto-aperture".format(self._type)
assert self.prev_cell is not None, "Need a preceeding cell for auto aperture!"
if self.prev_cell.cell_type in ["transition", "transition_auto"]:
self._params["a"] = self.prev_cell.avg_radius
else:
self._params["a"] = self.prev_cell.aperture
self._params["a"] = np.round(self._params["a"], decimals)
# Modulation:
if self._type in ["start", "rms", "drift"]:
self._params["m"] = 1.0
assert self._params["m"] is not None, "No modulation given for {} cell".format(self._type)
if self._params["m"] == 'auto':
assert self._type in ["transition", "transition_auto"], \
"Only transition cell can have 'auto' modulation at the moment!"
self._params["m"] = self.prev_cell.modulation
self._params["m"] = np.round(self._params["m"], decimals)
# Length:
if self._type == "start":
self._params["L"] = 0.0
assert self._params["L"] is not None, "No length given for {} cell".format(self._type)
if self._params["L"] == "auto":
assert self._type == "transition_auto", "Only transition_auto cells allow auto-length!"
self._params["L"] = self.prev_cell.length # use preceeding cell length L for calculation of L'
self._params["L"] = self.calculate_transition_cell_length()
self._params["L"] = np.round(self._params["L"], decimals)
if self._type == "trapezoidal":
assert self._params["fillet_radius"] is not None, "For 'TRC' cell a fillet radius must be given!"
return 0
def set_prev_cell(self, prev_cell):
assert isinstance(prev_cell, PyRFQCell), "You are trying to set a PyRFQCell with a non-cell object!"
self._prev_cell = prev_cell
def set_next_cell(self, next_cell):
assert isinstance(next_cell, PyRFQCell), "You are trying to set a PyRFQCell with a non-cell object!"
self._next_cell = next_cell
def calculate_profile_rms(self, vane_type, cell_no):
# Assemble RMS section by finding adjacent RMS cells and get their apertures
cc = self
pc = cc.prev_cell
rms_cells = [cc]
shift = 0.0
while pc is not None and pc.cell_type == "rms":
rms_cells = [pc] + rms_cells
shift += pc.length
cc = pc
pc = cc.prev_cell
cc = self
nc = cc._next_cell
while nc is not None and nc.cell_type == "rms":
rms_cells = rms_cells + [nc]
cc = nc
nc = cc.next_cell
# Check for starting cell
assert rms_cells[0].prev_cell is not None, "Cannot assemble RMS section without a preceding cell! " \
"At the beginning ofthe RFQ consider using a start (STA) cell."
a = [0.5 * rms_cells[0].prev_cell.aperture * (1.0 + rms_cells[0].prev_cell.modulation)]
z = [0.0]
for _cell in rms_cells:
a.append(_cell.aperture)
z.append(z[-1] + _cell.length)
self._profile_itp = interp1d(np.array(z) - shift, np.array(a), kind='cubic')
return 0
def calculate_profile_transition(self, vane_type, cell_no):
le = self._params["L"]
m = self._params["m"]
a = self._params["a"]
k = np.pi / np.sqrt(3.0) / le # Initial guess
r0 = 0.5 * (a + m * a)
if self.cell_type == "transition_auto":
tcl = le
else:
tcl = self.calculate_transition_cell_length()
z = np.linspace(0.0, le, 200)
idx = np.where(z <= tcl)
vane = | np.ones(z.shape) | numpy.ones |
"""
test_distance.py
Tests distance module.
Copyright 2018 Spectre Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from unittest.mock import patch
import numpy as np
import spdivik.distance as dist
class IntradistanceCall(NotImplementedError):
pass
class InterdistanceCall(NotImplementedError):
pass
class DummyDistanceMetric(dist.DistanceMetric):
def _intradistance(self, *_):
raise IntradistanceCall(self._intradistance.__name__)
def _interdistance(self, *_):
raise InterdistanceCall(self._interdistance.__name__)
# noinspection PyCallingNonCallable
class DistanceMetricTest(unittest.TestCase):
def setUp(self):
self.metric = DummyDistanceMetric()
self.first = np.array([[1], [2], [3]])
self.second = np.array([[4], [5]])
def test_throws_when_input_is_not_an_array(self):
with self.assertRaises(ValueError):
self.metric([[1]], np.array([[1]]))
with self.assertRaises(ValueError):
self.metric(np.array([[1]]), [[1]])
def test_throws_when_input_is_not_2D(self):
with self.assertRaises(ValueError):
self.metric(np.array([1]), np.array([[1]]))
with self.assertRaises(ValueError):
self.metric(np.array([[1]]), | np.array([1]) | numpy.array |
import numpy as np
import matplotlib as mpl
mpl.use("agg", warn=False) # noqa
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics.pairwise
import scipy.cluster.hierarchy as sch
import scipy.sparse as spsp
import scedar.eda as eda
import pytest
class TestSampleDistanceMatrix(object):
"""docstring for TestSampleDistanceMatrix"""
x_3x2 = [[0, 0], [1, 1], [2, 2]]
x_2x4_arr = np.array([[0, 1, 2, 3], [1, 2, 0, 6]])
def test_valid_init(self):
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric='euclidean')
dist_mat = np.array([[0, np.sqrt(2), np.sqrt(8)],
[np.sqrt(2), 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
np.testing.assert_allclose(sdm.d, dist_mat)
sdm2 = eda.SampleDistanceMatrix(
self.x_2x4_arr, metric='euclidean', nprocs=5)
sdm2_d1 = np.sqrt(
np.power(self.x_2x4_arr[0] - self.x_2x4_arr[1], 2).sum())
np.testing.assert_allclose(sdm2.d,
np.array([[0, sdm2_d1], [sdm2_d1, 0]]))
sdm3 = eda.SampleDistanceMatrix(
self.x_2x4_arr, metric='correlation', nprocs=5)
sdm3_corr_d = (1 - np.dot(
self.x_2x4_arr[0] - self.x_2x4_arr[0].mean(),
self.x_2x4_arr[1] - self.x_2x4_arr[1].mean()) /
(np.linalg.norm(self.x_2x4_arr[0] - self.x_2x4_arr[0].mean(),
2) *
np.linalg.norm(self.x_2x4_arr[1] - self.x_2x4_arr[1].mean(),
2)))
np.testing.assert_allclose(sdm3.d,
np.array([[0, 0.3618551],
[0.3618551, 0]]))
np.testing.assert_allclose(sdm3.d,
np.array([[0, sdm3_corr_d],
[sdm3_corr_d, 0]]))
sdm4 = eda.SampleDistanceMatrix(self.x_3x2, dist_mat)
sdm5 = eda.SampleDistanceMatrix(
self.x_3x2, dist_mat, metric='euclidean')
sdm5 = eda.SampleDistanceMatrix([[1, 2]], metric='euclidean')
assert sdm5.tsne(n_iter=250).shape == (1, 2)
def test_empty_init(self):
with pytest.raises(ValueError) as excinfo:
eda.SampleDistanceMatrix(np.empty(0), metric='euclidean')
sdm = eda.SampleDistanceMatrix(np.empty((0, 0)), metric='euclidean')
assert len(sdm.sids) == 0
assert len(sdm.fids) == 0
assert sdm._x.shape == (0, 0)
assert sdm._d.shape == (0, 0)
assert sdm._col_sorted_d.shape == (0, 0)
assert sdm._col_argsorted_d.shape == (0, 0)
assert sdm.tsne(n_iter=250).shape == (0, 0)
def test_init_wrong_metric(self):
# when d is None, metric cannot be precomputed
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric='precomputed')
# lazy load d
eda.SampleDistanceMatrix(self.x_3x2, metric='unknown')
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric='unknown').d
eda.SampleDistanceMatrix(self.x_3x2, metric=1)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1).d
eda.SampleDistanceMatrix(self.x_3x2, metric=1.)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=1.).d
eda.SampleDistanceMatrix(self.x_3x2, metric=('euclidean', ))
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=('euclidean', )).d
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean'])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, metric=['euclidean']).d
def test_init_wrong_d_type(self):
d_3x3 = np.array([[0, np.sqrt(2), np.sqrt(8)],
['1a1', 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x3)
def test_init_wrong_d_size(self):
d_2x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0]])
d_2x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0]])
d_1x6 = np.arange(6)
d_3x2 = np.array([[0, np.sqrt(2)],
[np.sqrt(2), 0],
[1, 2]])
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_2x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_3x2)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix(self.x_3x2, d_1x6)
def test_to_classified(self):
sdm = eda.SampleDistanceMatrix(np.arange(100).reshape(50, -1),
metric='euclidean')
# initialize cached results
sdm.tsne_plot()
sdm.pca_plot()
sdm.s_knn_graph(2)
sdm.s_ith_nn_d(1)
sdm.s_ith_nn_ind(1)
labs = [0]*10 + [1]*20 + [0]*10 + [2]*10
slcs = sdm.to_classified(labs)
assert slcs.labs == labs
assert slcs._lazy_load_d is sdm._lazy_load_d
assert slcs._lazy_load_d is not None
assert slcs._metric == sdm._metric
assert slcs._nprocs == sdm._nprocs
assert slcs.sids == sdm.sids
assert slcs.fids == sdm.fids
# tsne
assert slcs._tsne_lut is not None
assert slcs._tsne_lut == sdm._tsne_lut
assert slcs._lazy_load_last_tsne is not None
assert slcs._lazy_load_last_tsne is sdm._lazy_load_last_tsne
# knn
assert slcs._lazy_load_col_sorted_d is not None
assert slcs._lazy_load_col_sorted_d is sdm._lazy_load_col_sorted_d
assert slcs._lazy_load_col_argsorted_d is not None
assert (slcs._lazy_load_col_argsorted_d is
sdm._lazy_load_col_argsorted_d)
assert slcs._knn_ng_lut is not None
assert slcs._knn_ng_lut == sdm._knn_ng_lut
# pca
assert slcs._pca_n_components is not None
assert slcs._lazy_load_skd_pca is not None
assert slcs._lazy_load_pca_x is not None
assert slcs._pca_n_components == sdm._pca_n_components
assert slcs._lazy_load_skd_pca is sdm._lazy_load_skd_pca
assert slcs._lazy_load_pca_x is sdm._lazy_load_pca_x
def test_sort_x_by_d(self):
x1 = np.array([[0, 5, 30, 10],
[1, 5, 30, 10],
[0, 5, 33, 10],
[2, 5, 30, 7],
[2, 5, 30, 9]])
x2 = x1.copy()
opt_inds = eda.HClustTree.sort_x_by_d(
x=x2.T, metric='euclidean', optimal_ordering=True)
assert opt_inds == [2, 3, 1, 0]
np.testing.assert_equal(x1, x2)
x3 = np.array([[0, 0, 30, 10],
[1, 2, 30, 10],
[0, 3, 33, 10],
[2, 4, 30, 7],
[2, 5, 30, 9]])
x4 = x3.copy()
opt_inds = eda.HClustTree.sort_x_by_d(
x=x4.T, metric='euclidean', optimal_ordering=True)
assert opt_inds == [2, 3, 1, 0]
np.testing.assert_equal(x3, x4)
def test_sort_features(self):
x = np.array([[0, 2, 30, 10],
[1, 2, 30, 10],
[0, 3, 33, 10],
[2, 5, 30, 7],
[2, 5, 30, 9]])
sdm = eda.SampleDistanceMatrix(
x, metric='euclidean')
sdm2 = eda.SampleDistanceMatrix(
x, metric='euclidean')
sdm2.sort_features(fdist_metric='euclidean', optimal_ordering=True)
assert sdm2.fids == [2, 3, 1, 0]
def test_get_tsne_kv(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(1) is None
assert sdm.get_tsne_kv(0) is None
assert sdm.get_tsne_kv(2) is None
def test_get_tsne_kv_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv([1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.get_tsne_kv({1: 2})
def test_put_tsne_wrong_args(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne(1, [1, 2, 3])
with pytest.raises(ValueError) as excinfo:
sdm.put_tsne({1: 2}, [1, 2, 3])
def test_tsne(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2, **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
assert sdm.tsne_lut == {}
tsne1 = sdm.tsne(n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=False, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 1
with pytest.raises(Exception) as excinfo:
wrong_metric_kwargs = tsne_kwargs.copy()
wrong_metric_kwargs['metric'] = 'correlation'
sdm.tsne(**wrong_metric_kwargs)
assert len(sdm.tsne_lut) == 1
tsne3 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne3)
# (param, ind) as key, so same params get an extra entry.
assert len(sdm.tsne_lut) == 2
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(2)[1])
assert tsne1 is not sdm.get_tsne_kv(1)[1]
assert tsne3 is not sdm.get_tsne_kv(2)[1]
tsne4 = sdm.tsne(store_res=True, n_iter=250, random_state=123)
np.testing.assert_allclose(ref_tsne, tsne4)
np.testing.assert_allclose(sdm.get_tsne_kv(3)[1], tsne4)
assert len(sdm.tsne_lut) == 3
tsne5 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
tsne6 = sdm.tsne(store_res=True, n_iter=251, random_state=123)
np.testing.assert_allclose(tsne6, tsne5)
np.testing.assert_allclose(tsne5, sdm.get_tsne_kv(4)[1])
np.testing.assert_allclose(tsne6, sdm.get_tsne_kv(5)[1])
assert len(sdm.tsne_lut) == 5
def test_par_tsne(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2, **param_list[0])
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_par_tsne_mp(self):
tmet = 'euclidean'
param_list = [{'metric': tmet, 'n_iter': 250, 'random_state': 123},
{'metric': tmet, 'n_iter': 250, 'random_state': 125},
{'metric': tmet, 'n_iter': 250, 'random_state': 123}]
ref_tsne = eda.tsne(self.x_3x2, **param_list[0])
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
# If not store, should not update lut
sdm.par_tsne(param_list, store_res=False, nprocs=3)
assert sdm._lazy_load_last_tsne is None
assert sdm.tsne_lut == {}
# store results
tsne1, tsne2, tsne3 = sdm.par_tsne(param_list, nprocs=3)
np.testing.assert_allclose(ref_tsne, tsne1)
np.testing.assert_allclose(ref_tsne, tsne3)
np.testing.assert_allclose(ref_tsne, sdm._last_tsne)
assert tsne1.shape == (3, 2)
assert len(sdm.tsne_lut) == 3
np.testing.assert_allclose(tsne1, sdm.get_tsne_kv(1)[1])
np.testing.assert_allclose(tsne2, sdm.get_tsne_kv(2)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(3)[1])
np.testing.assert_allclose(tsne3, sdm.get_tsne_kv(1)[1])
def test_tsne_default_init(self):
tmet = 'euclidean'
tsne_kwargs = {'metric': tmet, 'n_iter': 250,
'random_state': 123}
ref_tsne = eda.tsne(self.x_3x2, **tsne_kwargs)
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
init_tsne = sdm._last_tsne
assert init_tsne.shape == (3, 2)
assert len(sdm.tsne_lut) == 1
tsne2 = sdm.tsne(store_res=True, **tsne_kwargs)
np.testing.assert_allclose(ref_tsne, tsne2)
assert len(sdm.tsne_lut) == 2
def test_ind_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sdm = sdm.ind_x([0, 5], list(range(9)))
assert ss_sdm._x.shape == (2, 9)
assert ss_sdm.sids == ['a', 'f']
assert ss_sdm.fids == list(range(10, 19))
np.testing.assert_equal(
ss_sdm.d, sdm._d[np.ix_((0, 5), (0, 5))])
# select with Default
ss_sdm = sdm.ind_x()
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select with None
ss_sdm = sdm.ind_x(None, None)
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select non-existent inds
with pytest.raises(IndexError) as excinfo:
sdm.ind_x([6])
with pytest.raises(IndexError) as excinfo:
sdm.ind_x(None, ['a'])
def test_ind_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sdm.ind_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._d.shape == (0, 0)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sdm.ind_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._d.shape == (6, 6)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sdm.ind_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._d.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_id_x(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
# select sf
ss_sdm = sdm.id_x(['a', 'f'], list(range(10, 15)))
assert ss_sdm._x.shape == (2, 5)
assert ss_sdm.sids == ['a', 'f']
assert ss_sdm.fids == list(range(10, 15))
np.testing.assert_equal(
ss_sdm.d, sdm._d[np.ix_((0, 5), (0, 5))])
# select with Default
ss_sdm = sdm.id_x()
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select with None
ss_sdm = sdm.id_x(None, None)
assert ss_sdm._x.shape == (6, 10)
assert ss_sdm.sids == list("abcdef")
assert ss_sdm.fids == list(range(10, 20))
np.testing.assert_equal(ss_sdm.d, sdm._d)
# select non-existent inds
# id lookup raises ValueError
with pytest.raises(ValueError) as excinfo:
sdm.id_x([6])
with pytest.raises(ValueError) as excinfo:
sdm.id_x(None, ['a'])
def test_id_x_empty(self):
sids = list("abcdef")
fids = list(range(10, 20))
sdm = eda.SampleDistanceMatrix(
np.random.ranf(60).reshape(6, -1), sids=sids, fids=fids)
empty_s = sdm.id_x([])
assert empty_s._x.shape == (0, 10)
assert empty_s._d.shape == (0, 0)
assert empty_s._sids.shape == (0,)
assert empty_s._fids.shape == (10,)
empty_f = sdm.id_x(None, [])
assert empty_f._x.shape == (6, 0)
assert empty_f._d.shape == (6, 6)
assert empty_f._sids.shape == (6,)
assert empty_f._fids.shape == (0,)
empty_sf = sdm.id_x([], [])
assert empty_sf._x.shape == (0, 0)
assert empty_sf._d.shape == (0, 0)
assert empty_sf._sids.shape == (0,)
assert empty_sf._fids.shape == (0,)
def test_getter(self):
tmet = 'euclidean'
sdm = eda.SampleDistanceMatrix(self.x_3x2, metric=tmet)
dist_mat = np.array([[0, np.sqrt(2), np.sqrt(8)],
[np.sqrt(2), 0, np.sqrt(2)],
[np.sqrt(8), np.sqrt(2), 0]])
np.testing.assert_allclose(sdm.d, dist_mat)
assert sdm.d is not sdm._d
assert sdm.metric == tmet
assert sdm.tsne_lut == {}
assert sdm.tsne_lut is not sdm._tsne_lut
assert sdm.tsne_lut == sdm._tsne_lut
sdm.tsne(n_iter=250)
assert sdm.tsne_lut is not sdm._tsne_lut
for k in sdm.tsne_lut:
np.testing.assert_equal(sdm.tsne_lut[k], sdm._tsne_lut[k])
def test_num_correct_dist_mat(self):
tdmat = np.array([[0, 1, 2],
[0.5, 0, 1.5],
[1, 1.6, 0.5]])
# upper triangle is assgned with lower triangle values
ref_cdmat = np.array([[0, 0.5, 1],
[0.5, 0, 1.6],
[1, 1.6, 0]])
with pytest.warns(UserWarning):
cdmat = eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat)
np.testing.assert_equal(cdmat, ref_cdmat)
ref_cdmat2 = np.array([[0, 0.5, 1],
[0.5, 0, 1],
[1, 1, 0]])
# with upper bound
cdmat2 = eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat, 1)
np.testing.assert_equal(cdmat2, ref_cdmat2)
# wrong shape
tdmat3 = np.array([[0, 0.5],
[0.5, 0],
[1, 1]])
# with upper bound
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat3, 1)
with pytest.raises(Exception) as excinfo:
eda.SampleDistanceMatrix.num_correct_dist_mat(tdmat3)
def test_s_ith_nn_d(self):
nn_sdm = eda.SampleDistanceMatrix([[0], [1], [5], [6], [10], [20]],
metric='euclidean')
np.testing.assert_allclose([0, 0, 0, 0, 0, 0],
nn_sdm.s_ith_nn_d(0))
np.testing.assert_allclose([1, 1, 1, 1, 4, 10],
nn_sdm.s_ith_nn_d(1))
np.testing.assert_allclose([5, 4, 4, 4, 5, 14],
nn_sdm.s_ith_nn_d(2))
def test_s_ith_nn_ind(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
np.testing.assert_allclose([0, 1, 2, 3, 4, 5],
nn_sdm.s_ith_nn_ind(0))
np.testing.assert_allclose([1, 0, 3, 2, 3, 4],
nn_sdm.s_ith_nn_ind(1))
np.testing.assert_allclose([2, 2, 1, 4, 2, 3],
nn_sdm.s_ith_nn_ind(2))
# Because summary dist plot calls hist_dens_plot immediately after
# obtaining the summary statistics vector, the correctness of summary
# statistics vector and hist_dens_plot implies the correctness of the
# plots.
@pytest.mark.filterwarnings("ignore:The 'normed' kwarg is depreca")
def test_s_ith_nn_d_dist(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
nn_sdm.s_ith_nn_d_dist(1)
def test_knn_ind_lut(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
assert nn_sdm.s_knn_ind_lut(0) == dict(zip(range(6), [[]]*6))
assert (nn_sdm.s_knn_ind_lut(1) ==
dict(zip(range(6), [[1], [0], [3], [2], [3], [4]])))
assert (nn_sdm.s_knn_ind_lut(2) ==
dict(zip(range(6), [[1, 2], [0, 2], [3, 1],
[2, 4], [3, 2], [4, 3]])))
assert (nn_sdm.s_knn_ind_lut(3) ==
dict(zip(range(6), [[1, 2, 3], [0, 2, 3], [3, 1, 0],
[2, 4, 1], [3, 2, 1], [4, 3, 2]])))
nn_sdm.s_knn_ind_lut(5)
def test_knn_ind_lut_wrong_args(self):
nn_sdm = eda.SampleDistanceMatrix([[0, 0, 0], [1, 1, 1], [5, 5, 5],
[6, 6, 6], [10, 10, 10],
[20, 20, 20]],
metric='euclidean')
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(-1)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(-0.5)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(6)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(6.5)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(7)
with pytest.raises(ValueError) as excinfo:
nn_sdm.s_knn_ind_lut(7)
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.tsne_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plus10_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.tsne_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_tsne_feature_gradient_plot_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_tsne_feature_gradient_plot_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.tsne_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_tsne_feature_gradient_plot_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(x, sids=sids, fids=fids)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.tsne_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_tsne_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
g = x_sorted[:, 5]
sdm = eda.SampleDistanceMatrix(x_sorted, sids=sids, fids=fids)
return sdm.tsne_plot(g, figsize=(10, 10), s=50)
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.pca_feature_gradient_plot(
'5', figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plus10_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.pca_feature_gradient_plot(
'5', transform=lambda x: x + 10, figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot_sslabs(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
transform=lambda x: np.log(x+1),
figsize=(10, 10), s=50)
fig = sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels='a',
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
@pytest.mark.mpl_image_compare
def test_sdm_pca_feature_gradient_plot_sslabs_empty(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
fig = sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[],
figsize=(10, 10), s=50)
np.testing.assert_equal(sdm._x, x_sorted)
np.testing.assert_equal(sdm._sids, sids)
np.testing.assert_equal(sdm._fids, fids)
return fig
def test_sdm_pca_feature_gradient_plot_sslabs_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(
x_sorted, sids=sids, fids=fids)
# Mismatch labels
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=[11],
figsize=(10, 10), s=50)
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', labels=list('abcdefgh'), selected_labels=['i'],
figsize=(10, 10), s=50)
# labels not provided
with pytest.raises(ValueError) as excinfo:
sdm.pca_feature_gradient_plot(
'5', selected_labels=[11], figsize=(10, 10), s=50)
def test_sdm_pca_feature_gradient_plot_wrong_args(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = np.random.ranf(80).reshape(8, -1)
x_sorted = x[np.argsort(x[:, 5])]
sdm = eda.SampleDistanceMatrix(x, sids=sids, fids=fids)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', transform=2)
# wrong labels size
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[1])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('5', figsize=(10, 10),
s=50, labels=[2])
# wrong gradient length
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot([0, 1])
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(11)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(-1)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot(5)
with pytest.raises(ValueError):
sdm.pca_feature_gradient_plot('123')
@pytest.mark.mpl_image_compare
def test_sdm_pca_plot(self):
sids = list(range(8))
fids = [str(i) for i in range(10)]
np.random.seed(123)
x = | np.random.ranf(80) | numpy.random.ranf |
# have to import PySide2 first so the matplotlib backend figures out we want PySide2
from HSTB.kluster.gui.backends._qt import QtCore, QtGui, QtWidgets, Signal
# apparently there is some problem with PySide2 + Pyinstaller, for future reference
# https://stackoverflow.com/questions/56182256/figurecanvas-not-interpreted-as-qtwidget-after-using-pyinstaller/62055972#62055972
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT
from matplotlib.figure import Figure
from matplotlib.widgets import RectangleSelector
from matplotlib.backend_bases import MouseEvent
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import sys
import numpy as np
# see block plots for surface maybe
# https://scitools.org.uk/cartopy/docs/v0.13/matplotlib/advanced_plotting.html
class MapView(FigureCanvasQTAgg):
"""
Map view using cartopy/matplotlib to view multibeam tracklines and surfaces with a map context.
"""
box_select = Signal(float, float, float, float)
def __init__(self, parent=None, width: int = 5, height: int = 4, dpi: int = 100, map_proj=ccrs.PlateCarree(), settings=None):
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.map_proj = map_proj
self.axes = self.fig.add_subplot(projection=map_proj)
# self.axes.coastlines(resolution='10m')
self.fig.add_axes(self.axes)
#self.fig.subplots_adjust(left=0, right=1, bottom=0, top=1)
self.axes.gridlines(draw_labels=True, crs=self.map_proj)
self.axes.add_feature(cfeature.LAND)
self.axes.add_feature(cfeature.COASTLINE)
self.line_objects = {} # dict of {line name: [lats, lons, lineplot]}
self.surface_objects = {} # nested dict {surfname: {layername: [lats, lons, surfplot]}}
self.active_layers = {} # dict of {surfname: [layername1, layername2]}
self.data_extents = {'min_lat': 999, 'max_lat': -999, 'min_lon': 999, 'max_lon': -999}
self.selected_line_objects = []
super(MapView, self).__init__(self.fig)
self.navi_toolbar = NavigationToolbar2QT(self.fig.canvas, self)
self.rs = RectangleSelector(self.axes, self._line_select_callback, drawtype='box', useblit=False,
button=[1], minspanx=5, minspany=5, spancoords='pixels', interactive=True)
self.set_extent(90, -90, 100, -100)
def set_background(self, layername: str, transparency: float, surf_transparency: float):
"""
A function for rendering different background layers in QGIS. Disabled for cartopy
"""
pass
def set_extent(self, max_lat: float, min_lat: float, max_lon: float, min_lon: float, buffer: bool = True):
"""
Set the extent of the 2d window
Parameters
----------
max_lat
set the maximum latitude of the displayed map
min_lat
set the minimum latitude of the displayed map
max_lon
set the maximum longitude of the displayed map
min_lon
set the minimum longitude of the displayed map
buffer
if True, will extend the extents by half the current width/height
"""
self.data_extents['min_lat'] = np.min([min_lat, self.data_extents['min_lat']])
self.data_extents['max_lat'] = np.max([max_lat, self.data_extents['max_lat']])
self.data_extents['min_lon'] = np.min([min_lon, self.data_extents['min_lon']])
self.data_extents['max_lon'] = np.max([max_lon, self.data_extents['max_lon']])
if self.data_extents['min_lat'] != 999 and self.data_extents['max_lat'] != -999 and self.data_extents[
'min_lon'] != 999 and self.data_extents['max_lon'] != -999:
if buffer:
lat_buffer = np.max([(max_lat - min_lat) * 0.5, 0.5])
lon_buffer = np.max([(max_lon - min_lon) * 0.5, 0.5])
else:
lat_buffer = 0
lon_buffer = 0
self.axes.set_extent([np.clip(min_lon - lon_buffer, -179.999999999, 179.999999999), np.clip(max_lon + lon_buffer, -179.999999999, 179.999999999),
np.clip(min_lat - lat_buffer, -90, 90), np.clip(max_lat + lat_buffer, -90, 90)],
crs=ccrs.Geodetic())
def add_line(self, line_name: str, lats: np.ndarray, lons: np.ndarray, refresh: bool = False):
"""
Draw a new multibeam trackline on the cartopy display, unless it is already there
Parameters
----------
line_name
name of the multibeam line
lats
numpy array of latitude values to plot
lons
numpy array of longitude values to plot
refresh
set to True if you want to show the line after adding here, kluster will redraw the screen after adding
lines itself
"""
if line_name in self.line_objects:
return
# this is about 3x slower, use transform_points instead
# lne = self.axes.plot(lons, lats, color='blue', linewidth=2, transform=ccrs.Geodetic())
ret = self.axes.projection.transform_points(ccrs.Geodetic(), lons, lats)
x = ret[..., 0]
y = ret[..., 1]
lne = self.axes.plot(x, y, color='blue', linewidth=2)
self.line_objects[line_name] = [lats, lons, lne[0]]
if refresh:
self.refresh_screen()
def remove_line(self, line_name, refresh=False):
"""
Remove a multibeam line from the cartopy display
Parameters
----------
line_name
name of the multibeam line
refresh
optional screen refresh, True most of the time, unless you want to remove multiple lines and then refresh
at the end
"""
if line_name in self.line_objects:
lne = self.line_objects[line_name][2]
lne.remove()
self.line_objects.pop(line_name)
if refresh:
self.refresh_screen()
def add_surface(self, surfname: str, lyrname: str, surfx: np.ndarray, surfy: np.ndarray, surfz: np.ndarray,
surf_crs: int):
"""
Add a new surface/layer with the provided data
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
surfx
1 dim numpy array for the grid x values
surfy
1 dim numpy array for the grid y values
surfz
2 dim numpy array for the grid values (depth, uncertainty, etc.)
surf_crs
integer epsg code
"""
try:
addlyr = True
if lyrname in self.active_layers[surfname]:
addlyr = False
except KeyError:
addlyr = True
if addlyr:
self._add_surface_layer(surfname, lyrname, surfx, surfy, surfz, surf_crs)
self.refresh_screen()
def hide_surface(self, surfname: str, lyrname: str):
"""
Hide the surface layer that corresponds to the given names.
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
"""
try:
hidelyr = True
if lyrname not in self.active_layers[surfname]:
hidelyr = False
except KeyError:
hidelyr = False
if hidelyr:
self._hide_surface_layer(surfname, lyrname)
return True
else:
return False
def show_surface(self, surfname: str, lyrname: str):
"""
Cartopy backend currently just deletes/adds surface data, doesn't really hide or show. Return False here to
signal we did not hide
"""
return False
def remove_surface(self, surfname: str):
"""
Remove the surface from memory by removing the name from the surface_objects dict
Parameters
----------
surfname
path to the surface that is used as a name
"""
if surfname in self.surface_objects:
for lyr in self.surface_objects[surfname]:
self.hide_surface(surfname, lyr)
surf = self.surface_objects[surfname][lyr][2]
surf.remove()
self.surface_objects.pop(surfname)
self.refresh_screen()
def _add_surface_layer(self, surfname: str, lyrname: str, surfx: np.ndarray, surfy: np.ndarray, surfz: np.ndarray,
surf_crs: int):
"""
Add a new surface/layer with the provided data
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
surfx
1 dim numpy array for the grid x values
surfy
1 dim numpy array for the grid y values
surfz
2 dim numpy array for the grid values (depth, uncertainty, etc.)
surf_crs
integer epsg code
"""
try:
makelyr = True
if lyrname in self.surface_objects[surfname]:
makelyr = False
except KeyError:
makelyr = True
if makelyr:
desired_crs = self.map_proj
lon2d, lat2d = np.meshgrid(surfx, surfy)
xyz = desired_crs.transform_points(ccrs.epsg(int(surf_crs)), lon2d, lat2d)
lons = xyz[..., 0]
lats = xyz[..., 1]
if lyrname != 'depth':
vmin, vmax = np.nanmin(surfz), np.nanmax(surfz)
else: # need an outlier resistant min max depth range value
twostd = np.nanstd(surfz)
med = np.nanmedian(surfz)
vmin, vmax = med - twostd, med + twostd
# print(vmin, vmax)
surfplt = self.axes.pcolormesh(lons, lats, surfz.T, vmin=vmin, vmax=vmax, zorder=10)
setextents = False
if not self.line_objects and not self.surface_objects: # if this is the first thing you are loading, jump to it's extents
setextents = True
self._add_to_active_layers(surfname, lyrname)
self._add_to_surface_objects(surfname, lyrname, [lats, lons, surfplt])
if setextents:
self.set_extents_from_surfaces()
else:
surfplt = self.surface_objects[surfname][lyrname][2]
newsurfplt = self.axes.add_artist(surfplt)
# update the object with the newly added artist
self.surface_objects[surfname][lyrname][2] = newsurfplt
self._add_to_active_layers(surfname, lyrname)
def _hide_surface_layer(self, surfname: str, lyrname: str):
"""
Hide the surface layer that corresponds to the given names.
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
"""
surfplt = self.surface_objects[surfname][lyrname][2]
surfplt.remove()
self._remove_from_active_layers(surfname, lyrname)
self.refresh_screen()
def _add_to_active_layers(self, surfname: str, lyrname: str):
"""
Add the surface layer to the active layers dict
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
"""
if surfname in self.active_layers:
self.active_layers[surfname].append(lyrname)
else:
self.active_layers[surfname] = [lyrname]
def _add_to_surface_objects(self, surfname: str, lyrname: str, data: list):
"""
Add the surface layer data to the surface objects dict
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
data
list of [2dim y values for the grid, 2dim x values for the grid, matplotlib.collections.QuadMesh]
"""
if surfname in self.surface_objects:
self.surface_objects[surfname][lyrname] = data
else:
self.surface_objects[surfname] = {lyrname: data}
def _remove_from_active_layers(self, surfname: str, lyrname: str):
"""
Remove the surface layer from the active layers dict
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
"""
if surfname in self.active_layers:
if lyrname in self.active_layers[surfname]:
self.active_layers[surfname].remove(lyrname)
def _remove_from_surface_objects(self, surfname, lyrname):
"""
Remove the surface layer from the surface objects dict
Parameters
----------
surfname
path to the surface that is used as a name
lyrname
band layer name for the provided data
"""
if surfname in self.surface_objects:
if lyrname in self.surface_objects[surfname]:
self.surface_objects[surfname].pop(lyrname)
def change_line_colors(self, line_names: list, color: str):
"""
Change the provided line names to the provided color
Parameters
----------
line_names
list of line names to use as keys in the line objects dict
color
string color identifier, ex: 'r' or 'red'
"""
for line in line_names:
lne = self.line_objects[line][2]
lne.set_color(color)
self.selected_line_objects.append(lne)
self.refresh_screen()
def reset_line_colors(self):
"""
Reset all lines back to the default color
"""
for lne in self.selected_line_objects:
lne.set_color('b')
self.selected_line_objects = []
self.refresh_screen()
def _line_select_callback(self, eclick: MouseEvent, erelease: MouseEvent):
"""
Handle the return of the Matplotlib RectangleSelector, provides an event with the location of the click and
an event with the location of the release
Parameters
----------
eclick
MouseEvent with the position of the initial click
erelease
MouseEvent with the position of the final release of the mouse button
"""
x1, y1 = eclick.xdata, eclick.ydata
x2, y2 = erelease.xdata, erelease.ydata
self.rs.set_visible(False)
# set the visible property back to True so that the next move event shows the box
self.rs.visible = True
# signal with min lat, max lat, min lon, max lon
self.box_select.emit(y1, y2, x1, x2)
# print("(%3.2f, %3.2f) --> (%3.2f, %3.2f)" % (x1, y1, x2, y2))
def set_extents_from_lines(self):
"""
Set the maximum extent based on the line_object coordinates
"""
lats = []
lons = []
for ln in self.line_objects:
lats.append(self.line_objects[ln][0])
lons.append(self.line_objects[ln][1])
if not lats or not lons:
self.set_extent(90, -90, 100, -100)
else:
lats = np.concatenate(lats)
lons = | np.concatenate(lons) | numpy.concatenate |
import numpy as np
import tflearn
import tensorflow as tf
import random
import json
import pickle
import nltk
from nltk import word_tokenize,sent_tokenize
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
from pathlib import Path
file_dir = Path(__file__).resolve().parent.parent
file_dir = str(file_dir) + '\\Discord_Model\\'
def set_model(name):
if name=='No Model':
with open(file_dir+"intents.json") as file:
data = json.load(file)
try:
with open(file_dir+"data.pickle","rb") as f:
words, labels, training, output = pickle.load(f)
except:
aa = 0
with open(file_dir+"intents.json") as file:
data = json.load(file)
try:
with open(file_dir+"data.pickle","rb") as f:
words, labels, training, output = pickle.load(f)
except:
aa = 0
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load(file_dir+"model.tflearn")
except:
print('Model not found')
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return | np.array(bag) | numpy.array |
from __future__ import division
import os
import sys
import time
import numpy as np
from math import pi
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import style
from scipy import interpolate
from sklearn.preprocessing import MinMaxScaler
import multiprocessing as mp
from multiprocessing import Pool
import string
import warnings
warnings.filterwarnings("ignore")
mpl.use('Agg')
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
################## Fourier #######################
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def random_fourier(seed):
np.random.seed(seed)
Coeffs = np.random.rand(2,fmax)
y = np.multiply(Template,Coeffs)
y = np.sum(y,axis=(1,2))
l,h=np.sort(np.random.rand(2))
y = MinMaxScaler(feature_range=(l,h)).fit_transform(y.reshape(-1, 1)).reshape(-1)
# y = MinMaxScaler(feature_range=(l,h)).fit_transform(y)
return y
################## Lines #######################
def line_family(seed):
np.random.seed(seed)
y1 = np.random.random()
y2 = | np.random.random() | numpy.random.random |
import numpy as np
def decompLU(A):
L = np.eye( | np.shape(A) | numpy.shape |
# Routines for general quantum chemistry (no particular software package)
# Python3 and pandas
# <NAME>
#
import re, sys
#import string, copy
import copy
import numpy as np
import pandas as pd
import quaternion
from scipy.spatial.distance import cdist
from scipy import interpolate
from scipy import optimize
import matplotlib.pyplot as plt
#
# CODATA 2018 constants from physics.nist.gov, retrieved 7/13/2020
AVOGADRO = 6.02214076e23 # mol^-1 (exact, defined value)
BOLTZMANN = 1.380649e-23 # J/K (exact, defined value)
RGAS = AVOGADRO * BOLTZMANN # J/mol/K (exact)
PLANCK = 6.62607015e-34 # J s (exact, defined value)
CLIGHT = 299792458. # m/s (exact, defined value)
CM2KJ = PLANCK * AVOGADRO * CLIGHT / 10 # convert from cm^-1 to kJ/mol
CM2K = 100 * CLIGHT * PLANCK / BOLTZMANN # convert from cm^-1 to Kelvin
AMU = 1.66053906660e-27 # kg/u
HARTREE = 4.3597447222071e-18 # J; uncertainty is 85 in last two digits
AU2CM = 2.1947463136320e05 # Hartree in cm^-1; unc. is 43 in last two digits
AU2KJMOL = HARTREE * AVOGADRO / 1000. # Hartree in kJ/mol
AU2EV = 27.211386245988 # Hartree in eV; unc. is 53 in last two digits
CALORIE = 4.184 # multipy cal * CALORIE to get J
ATM_KPA = 101.325 # convert pressure in atm to kPa
EMASS = 9.1093837015e-31 # electron mass in kg; unc. is 28 in last two digits
BOHR = 0.529177210903 # Bohr radius in Angstrom; unc. is 80 in last two digits
AMU2AU = AMU / EMASS # amu expressed in a.u. (viz., electron masses)
EV2CM = AU2CM / AU2EV # eV expressed in cm^-1
EPS0 = 8.8541878128e-12 # vacuum permittivity in F/m
PI = np.pi
#
GOLD = (1 + np.sqrt(5))/2 # golden ratio
def isotopic_mass(atlabel):
# Given a label like '1-H' or 'pt195', return the atomic mass
# Data from from https://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl
rxn = re.compile('\d+')
rxsym = re.compile('[a-zA-Z]+')
n = int(rxn.search(atlabel).group(0))
sym = rxsym.search(atlabel).group(0)
Z = elz(sym)
# table of masses; major index = Z, minor = n
mtable = {1: {1: 1.00782503223, 2: 2.01410177812, 3: 3.0160492779},
2: {3: 3.0160293201, 4: 4.00260325413},
3: {6: 6.0151228874, 7: 7.0160034366},
4: {9: 9.012183065},
5: {10: 10.01293695, 11: 11.00930536},
6: {12: 12., 13: 13.00335483507, 14: 14.0032419884},
7: {14: 14.00307400443, 15: 15.00010889888},
8: {16: 15.99491461957, 17: 16.99913175650, 18: 17.99915961286},
9: {19: 18.99840316273},
16: {32: 31.9720711744, 33: 32.9714589098, 34: 33.967867004, 36: 35.96708071},
17: {35: 34.968852682, 37: 36.965902602},
35: {79: 78.9183376, 81: 80.9162897},
53: {127: 126.9044719},
78: {190: 189.9599297, 192: 191.9610387, 194: 193.9626809, 195: 194.9647917, 196: 195.96495209, 198: 197.9678949},
}
try:
m = mtable[Z][n]
except KeyError:
# invalid or just not typed here yet
m = np.nan
return m
##
def dominant_isotope(el):
# given element symbol or atomic number,
# return the mass of the most abundant isotope
# source: https://www.chem.ualberta.ca/~massspec/atomic_mass_abund.pdf,
# which cites mass data from Audi & Wapstra, Nucl. Phys. A (1993 & 1995)
# and abundance data from 1997 IUPAC report [Rosman & Taylor,
# Pure Appl. Chem. (1999)]
try:
Z = int(el)
except:
Z = elz(el)
mtable = [0, 1.007825, 4.002603, 7.016004, 9.012182, 11.009305, 12., # C
14.003074, 15.994915, 18.998403, 19.992440, 22.989770, # Na
23.985042, 26.981538, 27.976927, 30.973762, 31.972071, # S
34.968853, 39.962383, 38.963707, 39.962591, 44.955910, # Sc
47.947947, 50.943964, 51.940512, 54.938050, 55.934942, # Fe
58.933200, 57.935348, 62.929601, 63.929147, 68.925581, # Ga
73.921178, 74.921596, 79.916522, 78.918338, 83.911507, # Kr
84.911789, 87.905614, 88.905848, 89.904704, 92.906378, # Nb
97.905408, 97.907216, 101.904350, 102.905504, 105.903483, # Pd
106.905093, 113.903358, 114.903878, 119.902197, # Sn
120.903818, 129.906223, 126.904468, 131.904154, # Xe
132.905447, 137.905241, 138.906348, 139.905434, # Ce
140.907648, 141.907719, 144.912744, 151.919728, # Sm
152.921226, 157.924101, 158.925343, 163.929171, # Dy
164.930319, 165.930290, 168.934211, 173.938858, # Yb
174.940768, 179.946549, 180.947996, 183.950933, # W
186.955751, 191.961479, 192.962924, 194.964774, # Pt
196.966552, 201.970626, 204.974412, 207.976636, # Pb
208.980383, 208.982416, 209.987131, 222.017570, # Rn
223.019731, 226.025403, 227.027747, 232.038050, # Th
231.035879, 238.050783, 237.048167, 244.064198] # Pu
return mtable[Z]
##
def RRHO_symmtop(freqs, Emax, binwidth, ABC_GHz, Bunit='GHz'):
# RRHO with symmetric-top approximation.
# Use Stein-Rabinovitch counting method (less roundoff error than
# with Beyer-Swinehart)
# ** Does not account for any symmetry **
n = int(Emax/binwidth) # number of bins
nos = np.zeros(n) # number of states in each bin
nos[0] = 1 # the zero-point level
for freq in freqs:
Eladder = np.arange(freq, Emax+binwidth, freq)
iladder = np.rint(Eladder / binwidth).astype(int)
miyo = nos.copy() # temporary copy of 'nos'
# add each value in ladder to existing count in 'nos'
for irung in iladder:
for ibin in range(irung, n):
miyo[ibin] += nos[ibin - irung]
nos = miyo.copy()
# Do similar thing for the rotational levels.
E_rot, g_rot = rotational_levels_symmtop(ABC_GHz, Emax, Bunit=Bunit)
ilist = np.rint(E_rot / binwidth).astype(int).reshape(-1)
miyo = nos.copy()
for idx in range(1, len(ilist)):
# Loop over this index, instead of the 'iladder' values,
# to find the matching rotational degeneracies.
# Start from 1 instead of 0 to skip the (non-degenerate) J=0
irung = ilist[idx]
degen = g_rot[idx]
# vectorized version
binrange = np.arange(irung, n).astype(int)
miyo[binrange] = miyo[binrange] + nos[binrange - irung] * degen
nos = miyo.copy()
# find centers of energy bins
centers = binwidth * (0.5 + np.arange(n))
return nos, centers
##
def rotational_levels_symmtop(ABC, Emax, Bunit='cm-1'):
# Rigid-rotor levels for a symmetric top
# Return two arrays: energies (in cm^-1) and degeneracies
# 'ABC' are the three rotational constants, either in GHz or cm^-1
# 'Emax' is the upper bound on energy, in cm^-1
ABC = np.array(ABC)
ABC[::-1].sort() # sort in descending order
if Bunit.lower() == 'ghz':
# convert ABC to cm^-1
ABC *= 1.0e7 / CLIGHT
if (ABC[0]-ABC[1] > ABC[1]-ABC[2]):
# call it prolate
B = np.sqrt(ABC[1]*ABC[2]) # geometric mean; "perpendicular"
A = ABC[0]
Jmax = int(-0.5 + 0.5 * np.sqrt(1 + 4*Emax/B))
else:
# call it oblate
B = np.sqrt(ABC[1]*ABC[0]) # geometric mean; "perpendicular"
A = ABC[2]
Jmax = int( (-B + np.sqrt(B*B+4*A*Emax)) / (2*A) )
J = np.arange(Jmax+1) # all allowed values of J, including Jmax
# K = 0 cases
E = B * J * (J + 1)
degen = 2*J + 1
# K != 0 cases
C = A-B
for J in range(1,Jmax+1):
# now J is a scalar
K = np.arange(1, J+1)
Kstack = B*J*(J+1) + C * K * K
g = 2 * (2*J+1) * np.ones_like(K)
E = np.concatenate((E, Kstack))
degen = np.concatenate((degen, g))
# sort by increasing energy
idx = np.argsort(E)
E = E[idx]
degen = degen[idx]
# filter out energies that exceed Emax
idx = np.argwhere(E <= Emax)
return E[idx], degen[idx]
##
def rotational_levels_spherical(B, Emax, Bunit='cm-1'):
# Rigid-rotor levels for a spherical top
# Return two arrays: energies (in cm^-1) and degeneracies
# 'B' is the rotational constant, either in GHz or cm^-1
# 'Emax' is the upper bound on energy, in cm^-1
if Bunit.lower() == 'ghz':
# convert B to cm^-1
B *= 1.0e7 / CLIGHT
Jmax = int(-0.5 + 0.5 * np.sqrt(1 + 4*Emax/B))
J = np.arange(Jmax+1) # all allowed values of J, including Jmax
E = B * J * (J+1)
degen = 2*J + 1
degen *= degen # this line is the only difference from the linear case
return E, degen
##
def rotational_levels_linear(B, Emax, Bunit='cm-1'):
# Rigid-rotor levels for a linear molecule
# Return two arrays: energies (in cm^-1) and degeneracies
# 'B' is the rotational constant, either in GHz or cm^-1
# 'Emax' is the upper bound on energy, in cm^-1
if Bunit.lower() == 'ghz':
# convert B to cm^-1
B *= 1.0e7 / CLIGHT
Jmax = int(-0.5 + 0.5 * np.sqrt(1 + 4*Emax/B))
J = np.arange(Jmax+1) # all allowed values of J, including Jmax
E = B * J * (J+1)
degen = 2*J + 1
return E, degen
##
def Beyer_Swinehart(freqs, Emax, binwidth):
# Return a harmonic vibrational density of states (numpy array)
# whose index is the energy bin number.
# Also return an array of the bin center energies.
# Not vectorized
n = int(Emax/binwidth) # number of bins
nos = np.zeros(n) # number of states in each bin
nos[0] = 1 # the zero-point level
for freq in freqs:
# outer loop in BS paper
ifreq = np.rint(freq/binwidth).astype(int)
for ibin in range(ifreq, n):
# inner loop
nos[ibin] += nos[ibin - ifreq]
# find centers of energy bins
centers = binwidth * (0.5 + np.arange(n))
return nos, centers
##
def thermo_RRHO(T, freqs, symno, ABC_GHz, mass, pressure=1.0e5, deriv=0):
# Return S, Cp, and [H(T)-H(0)] at the specified temperature
lnQ = lnQvrt(T, freqs, symno, ABC_GHz, mass)
d = lnQvrt(T, freqs, symno, ABC_GHz, mass, deriv=1) # derivative of lnQ
deriv = T * d + lnQ # derivative of TlnQ
S = RGAS * (deriv - np.log(AVOGADRO) + 1)
d2 = lnQvrt(T, freqs, symno, ABC_GHz, mass, deriv=2) # 2nd derivative of lnQ
deriv2 = 2 * d + T * d2 # 2nd derivative of TlnQ
Cp = RGAS + RGAS * T * deriv2
ddH = RGAS * T * (1 + T * d) / 1000
return (S, Cp, ddH)
##
def lnQvrt(T, freqs, symno, ABC_GHz, mass, pressure=1.0e5, deriv=0):
# Return the total (vib + rot + transl) ln(Q) partition function
# or a derivative. RRHO approximation
lnQv = lnQvib(T, freqs, deriv=deriv)
lnQr = lnQrot(T, symno, ABC_GHz, deriv=deriv)
lnQt = lnQtrans(T, mass, pressure=pressure, deriv=deriv)
lnQ = lnQv + lnQr + lnQt
return lnQ
##
def lnQtrans(T, mass, pressure=1.0e5, deriv=0):
# Given a temperature (in K), a molecular mass (in amu),
# and optionally a pressure (in Pa), return ln(Q), where
# Q is the ideal-gas translational partition function.
# If deriv > 0, return a (1st or 2nd) derivative of TlnQ
# instead of lnQ.
if deriv == 1:
# return (d/dT)lnQ = (3/2T)
return (1.5 / T)
if deriv == 2:
# return (d2/dT2)lnQ = -(3/2T**2)
return (-1.5 / (T*T))
kT = BOLTZMANN * T # in J
m = mass * AMU # in kg
V = RGAS * T / pressure # in m**3
lnQ = 1.5 * np.log(2 * PI * m * kT)
lnQ -= 3 * np.log(PLANCK)
lnQ += np.log(V)
return lnQ
##
def lnQrot(T, symno, ABC_GHz, deriv=0):
# Given a temperature (in K), symmetry number, and list of
# rotational constants (in GHz), return ln(Q), where Q is
# the rigid-rotor partition function.
n = len(ABC_GHz)
if n == 0:
# atom; no rotations possible
return 0.
if deriv == 1:
# first derivative of lnQ depends only on temperature
if n < 3:
# linear case
return (1/T)
else:
# non-linear
return (1.5/T)
if deriv == 2:
# second derivative of lnQ
if n < 3:
# linear case
return (-1 / (T*T))
else:
# non-linear
return (-1.5 / (T*T))
ln_kTh = np.log(T) + np.log(BOLTZMANN) - np.log(PLANCK) # ln(kT/h) expressed in ln(Hz)
if n < 3:
# linear molecule
B = ABC_GHz[0] * 1.0e9 # convert to Hz
lnQ = ln_kTh - np.log(symno * B)
else:
# polyatomic molecule with 3 constants
lnQ = 1.5 * ln_kTh + 0.5 * np.log(PI) - np.log(symno)
for c in ABC_GHz:
B = c * 1.0e9 # convert to Hz
lnQ -= 0.5 * np.log(B)
return lnQ
##
def lnQvib(T, freqs, deriv=0):
# Given a temperature (in K) and array of vibrational
# frequencies (in cm^-1), return ln(Q) where Q is
# the harmonic-oscillator partition function.
kTh = T * BOLTZMANN / PLANCK # kT/h expressed in Hz
lnQ = 0.
nu = freqs * 100 # convert to m^-1 (as array)
nu = nu * CLIGHT # convert to Hz
fred = nu / kTh # reduced frequencies
x = np.exp(-fred) # exponentiated, reduced frequencies
xm1 = 1 - x
if deriv == 1:
# derivative of lnQ
term = nu * x / xm1
d = term.sum()
return (d / (kTh*T))
if deriv == 2:
# 2nd derivative of lnQ
t1 = nu * (1/xm1 - 1)
sum1 = -2 * t1.sum() / (kTh * T * T)
t2 = nu * nu * x / (xm1 * xm1)
sum2 = t2.sum() / (kTh * kTh * T * T)
return (sum1 + sum2)
# return lnQ itself
lnq = np.log(xm1)
lnQ = -1 * lnq.sum()
return lnQ
##
def typeCoord(crds):
# 'Geometry' (a Geometry object)
# 'cartesian' (a list of elements and list/array of cartesians)
# 'ZMatrix' (a ZMatrix object)
if isinstance(crds, Geometry):
intype = 'Geometry'
elif isinstance(crds, ZMatrix):
intype = 'ZMatrix'
elif isinstance(crds, list) and (len(crds) == 2) and (
(len(crds[0]) == len(crds[1])) or (len(crds[0]) * 3 == len(crds[1])) ):
# 'cartesian' is plausible
intype = 'cartesian'
else:
print_err('autodetect')
return intype
##
def parse_ZMatrix(zlist, unitR='angstrom', unitA='degree'):
# Given a list of all the lines of a z-matrix,
# return a ZMatrix object
el = []
refat = []
var = []
val = {}
intop = True
maxlen = 0 # keep track of max number of words in line,
# because its decrease will signal the beginning of the
# second section of the z-matrix (if any)
regexSplit = re.compile('[\s,=]+')
for line in zlist:
words = regexSplit.split(line) # split on whitespace, comma, or equals
nwords = len(words)
if nwords < 1:
continue # ignore blank line
maxlen = max(maxlen, nwords)
if nwords < maxlen:
intop = False
if intop:
# list of atoms and variable names (or floats)
# add element symbol
el.append(words[0])
# add variable (str|float)'s
var.append([])
for i in range(2, nwords, 2):
try:
var[-1].append(float(words[i]))
except:
# symbolic z-matrix variable (str type)
var[-1].append(words[i])
# add list of atoms to which variables refer
refat.append([])
for i in range(1, nwords, 2):
refat[-1].append(int(words[i]) - 1) # subtract one from user-viewed index
else:
# values of any z-matrix variables
val[words[0]] = float(words[1])
ZM = ZMatrix(el, refat, var, val, unitR=unitR, unitA=unitA)
return ZM
##
class ZMatrix(object):
# symbolic or numerical z-matrix
# initialize empty and then add to it
# indices are zero-based but user will be one-based
def __init__(self, el=[], refat=[], var=[], val={}, vtype={}, unitR='angstrom', unitA='radian'):
# this structure corresponds with the usual way of writing
# a z-matrix, with one atom defined per line
self.el = el # element symbols; should be in correct order
self.refat = refat # list of [list of ref. atoms that define position of this atom]
self.var = var # list of [list of z-matrix vars/constants that define this atom pos.]
self.val = val # dict of float values of any symbolic z-matrix variables
self.vtype = vtype # dict of names of variable types ('distance', 'angle', 'dihedral')
self.unitR = unitR # for distances
self.unitA = unitA # for angles and dihedrals ('radian' or 'degree')
self.coordtype = 'ZMatrix'
self.charge = None # optional
self.spinmult = None # optional
if len(val) != len(vtype):
# generate the vtype's automatically
self.vtypeBuild()
def vtypeBuild(self):
# categorize the variables
# this is important because they have different units
category = ['distance', 'angle', 'dihedral']
for iat in range(self.natom()):
# loop over atoms
for ivar in range(len(self.var[iat])):
# loop over names of z-matrix variables for this atom
# it's left-to-right, so vars are in the order in 'category'
v = self.var[iat][ivar] # name of a variable
if ivar > 2:
self.vtype[v] = 'unknown'
else:
self.vtype[v] = category[ivar]
return
def varMask(self, varlist):
# given a list of z-matrix variable names, return a numpy array of Boolean
# showing which indices [from ZMatrix.fromVector()] correspond
blist = []
for var in sorted(self.val):
blist.append(var in varlist)
return np.array(blist)
def canonical_angles(self):
# shift all dihedral angles into the range (-pi, pi]
for varname in self.val:
if self.vtype[varname] == 'dihedral':
self.val[varname] = angle_canon(self.val[varname], unit=self.unitA)
return
def cap_angles(self):
# force all bond angles to be in the range (0, pi)
for varname in self.val:
if self.vtype[varname] == 'angle':
if self.unitA == 'degree':
if self.val[varname] >= 180.:
self.val[varname] = 179.9
if self.val[varname] < 0.:
self.val[varname] = 0.1
else:
# radian
if self.val[varname] >= PI:
self.val[varname] = PI - 0.0002
if self.val[varname] < 0.:
self.val[varname] = 0.0002
return
def adjust_dTau(self, dX):
# given a vector of coordinate differences, move
# dihedral angle differences into the range (-pi, pi]
i = 0
for k in sorted(self.val):
if self.vtype[k] == 'dihedral':
dX[i] = angle_canon(dX[i], unit=self.unitA)
i += 1
return dX
def toRadian(self):
# make sure all angles/dihedrals are in radian
if self.unitA == 'degree':
for v in self.val:
if self.vtype[v] in ['angle', 'dihedral']:
self.val[v] = np.deg2rad(self.val[v])
self.unitA = 'radian'
return
def toDegree(self):
# make sure all angles/dihedrals are in degree
if self.unitA == 'radian':
for v in self.val:
if self.vtype[v] in ['angle', 'dihedral']:
self.val[v] = np.rad2deg(self.val[v])
self.unitA = 'degree'
return
def toAngstrom(self):
# make sure all distances are in angstrom
if self.unitR == 'bohr':
for v in self.val:
if self.vtype[v] == 'distance':
self.val[v] *= BOHR
self.unitR = 'angstrom'
return
def toBohr(self):
# make sure all distances are in bohr
if self.unitR == 'angstrom':
for v in self.val:
if self.vtype[v] == 'distance':
self.val[v] /= BOHR
self.unitR = 'bohr'
return
def unitX(self):
# return (tuple) of units
return (self.unitR, self.unitA)
def toUnits(self, unitS):
# given (unitR, unitA), in either order, convert to those units
if 'angstrom' in unitS:
self.toAngstrom()
if 'bohr' in unitS:
self.toBohr()
if 'degree' in unitS:
self.toDegree()
if 'radian' in unitS:
self.toRadian()
return
def varlist(self):
# return a list of the variable names in standard (sorted) order
vlist = [k for k in sorted(self.val)]
return vlist
def toVector(self):
# return a numpy array containing the values of the coordinates
# they are sorted according to their names
vec = [self.val[k] for k in sorted(self.val)]
return | np.array(vec) | numpy.array |
""" Unit tests for neural.py"""
# Author: <NAME>
# License: BSD 3 clause
import unittest
import numpy as np
# The following functions/classes are not automatically imported at
# initialization, so must be imported explicitly from neural.py and
# activation.py.
from mlrose.neural import (flatten_weights, unflatten_weights,
gradient_descent, NetworkWeights, ContinuousOpt,
NeuralNetwork, LogisticRegression, LinearRegression)
from mlrose.activation import identity, sigmoid, softmax
class TestNeural(unittest.TestCase):
"""Tests for neural.py functions."""
@staticmethod
def test_flatten_weights():
"""Test flatten_weights function"""
x = | np.arange(12) | numpy.arange |
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal, assert_raises
from statsmodels.tsa.stattools import acovf
from statsmodels.tsa.innovations.arma_innovations import arma_innovations
from statsmodels.tsa.arima.datasets.brockwell_davis_2002 import dowj, lake
from statsmodels.tsa.arima.estimators.yule_walker import yule_walker
@pytest.mark.low_precision('Test against Example 5.1.1 in Brockwell and Davis'
' (2016)')
def test_brockwell_davis_example_511():
# Make the series stationary
endog = dowj.diff().iloc[1:]
# Should have 77 observations
assert_equal(len(endog), 77)
# Autocovariances
desired = [0.17992, 0.07590, 0.04885]
assert_allclose(acovf(endog, fft=True, nlag=2), desired, atol=1e-5)
# Yule-Walker
yw, _ = yule_walker(endog, ar_order=1, demean=True)
assert_allclose(yw.ar_params, [0.4219], atol=1e-4)
assert_allclose(yw.sigma2, 0.1479, atol=1e-4)
@pytest.mark.low_precision('Test against Example 5.1.4 in Brockwell and Davis'
' (2016)')
def test_brockwell_davis_example_514():
# Note: this example is primarily tested in
# test_burg::test_brockwell_davis_example_514.
# Get the lake data, demean
endog = lake.copy()
# Yule-Walker
res, _ = yule_walker(endog, ar_order=2, demean=True)
| assert_allclose(res.ar_params, [1.0538, -0.2668], atol=1e-4) | numpy.testing.assert_allclose |
import torch
from torch.utils.data import TensorDataset, Dataset
from torch.utils.data import DataLoader
import numpy as np
class Dataset_V5(Dataset):
def __init__(self,X_signal,X_ci,Y,Y_mask,final_len_x,diff_window_ppg,device):
self.X_signal = X_signal
self.X_ci = X_ci
self.Y = Y
self.Y_mask = Y_mask
self.final_len_x = final_len_x
self.diff_window_ppg = diff_window_ppg
self.device = device
def __getitem__(self, index):
x = self.X_signal[index]
x_ci = self.X_ci[index]
y = self.Y[index]
y_mask = self.Y_mask[index]
# Desplazamos la señal
idx_roll = np.random.randint(0, self.diff_window_ppg)
x = x[:,idx_roll:idx_roll+self.final_len_x]
x_s = x[0:1,:]
# Por ultimo normalizamos [0, 1] a la señal
a_max = np.amax(x_s, axis=1)
a_min = np.amin(x_s, axis=1)
x_s = (x_s - a_min[None,:]) / (a_max[None,:] - a_min[None,:])
x_d1 = x[1:2,:]
a_max = np.amax(x_d1, axis=1)
a_min = np.amin(x_d1, axis=1)
x_d1 = (x_d1 - a_min[None,:]) / (a_max[None,:] - a_min[None,:])
x = np.concatenate((x_s,x_d1))
x = torch.from_numpy(x).float().to(self.device)
x_ci = torch.from_numpy(x_ci).float().to(self.device)
y = torch.from_numpy(y).float().to(self.device)
y_mask = torch.from_numpy(y_mask).float().to(self.device)
return (x,x_ci,y,y_mask)
def __len__(self):
return len(self.X_signal)
class Dataset_V6(Dataset):
def __init__(self,X_signal,Y,Y_mask,final_len_x,diff_window_ppg,device):
self.X_signal = X_signal
self.Y = Y
self.Y_mask = Y_mask
self.final_len_x = final_len_x
self.diff_window_ppg = diff_window_ppg
self.device = device
def __getitem__(self, index):
x = self.X_signal[index]
y = self.Y[index]
y_mask = self.Y_mask[index]
# Desplazamos la señal
idx_roll = np.random.randint(0, self.diff_window_ppg)
x = x[:,idx_roll:idx_roll+self.final_len_x]
x_s = x[0:1,:]
# Por ultimo normalizamos [0, 1] a la señal
a_max = | np.amax(x_s, axis=1) | numpy.amax |
import unittest
import numpy
import test_utils
class TestBasicAddition(unittest.TestCase):
# Test basic addition of all combinations of all types, not checking for any edge cases specifically.
ZERO = numpy.float32(0)
ONE = numpy.float32(1)
MIN_SUBNORM = numpy.float32(1e-45)
MAX_SUBNORM = numpy.float32(1.1754942e-38)
MIN_NORM = numpy.float32(1.1754944e-38)
MAX_NORM = numpy.float32(3.4028235e38)
INF = numpy.float32(numpy.inf)
NAN = numpy.float32(numpy.nan)
# Initialise the tester object used to run the assembled code.
@classmethod
def setUpClass(cls):
cls.tester = test_utils.SubroutineTester("test_addition.s")
cls.tester.initialise()
# Run a test to compare the expected sum of two floats to the actual sum.
def run_test(self, float1: numpy.float32, float2: numpy.float32):
expected = float1 + float2
if numpy.isnan(expected):
self.assertTrue(numpy.isnan(TestBasicAddition.tester.run_test(float1, float2)))
else:
self.assertEqual(float1 + float2,
TestBasicAddition.tester.run_test(float1, float2))
def test_zero(self):
# Test that ±0 + x = x for all types of x.
self.run_test(self.ZERO, self.ZERO)
self.run_test(self.ZERO, -self.ZERO)
self.run_test(-self.ZERO, self.ZERO)
self.run_test(-self.ZERO, -self.ZERO)
self.run_test(self.ZERO, self.ONE)
self.run_test(self.ZERO, -self.ONE)
self.run_test(-self.ZERO, self.ONE)
self.run_test(-self.ZERO, -self.ONE)
self.run_test(self.ZERO, self.MIN_SUBNORM)
self.run_test(self.ZERO, -self.MIN_SUBNORM)
self.run_test(-self.ZERO, self.MIN_SUBNORM)
self.run_test(-self.ZERO, -self.MIN_SUBNORM)
self.run_test(self.ZERO, numpy.float32(9.060464e-39))
self.run_test(self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, numpy.float32(9.060464e-39))
self.run_test(-self.ZERO, -numpy.float32(9.060464e-39))
self.run_test(self.ZERO, self.MAX_SUBNORM)
self.run_test(self.ZERO, -self.MAX_SUBNORM)
self.run_test(-self.ZERO, self.MAX_SUBNORM)
self.run_test(-self.ZERO, -self.MAX_SUBNORM)
self.run_test(self.ZERO, self.MIN_NORM)
self.run_test(self.ZERO, -self.MIN_NORM)
self.run_test(-self.ZERO, self.MIN_NORM)
self.run_test(-self.ZERO, -self.MIN_NORM)
self.run_test(self.ZERO, numpy.float32(395.6166))
self.run_test(self.ZERO, -numpy.float32(395.6166))
self.run_test(-self.ZERO, numpy.float32(395.6166))
self.run_test(-self.ZERO, -numpy.float32(395.6166))
self.run_test(self.ZERO, self.MAX_NORM)
self.run_test(self.ZERO, -self.MAX_NORM)
self.run_test(-self.ZERO, self.MAX_NORM)
self.run_test(-self.ZERO, -self.MAX_NORM)
self.run_test(self.ZERO, self.INF)
self.run_test(self.ZERO, -self.INF)
self.run_test(-self.ZERO, self.INF)
self.run_test(-self.ZERO, -self.INF)
self.run_test(self.ZERO, self.NAN)
self.run_test(-self.ZERO, self.NAN)
def test_one(self):
# Test ±1 + x for all types of x.
self.run_test(self.ONE, self.ZERO)
self.run_test(self.ONE, -self.ZERO)
self.run_test(-self.ONE, self.ZERO)
self.run_test(-self.ONE, -self.ZERO)
self.run_test(self.ONE, self.ONE)
self.run_test(self.ONE, -self.ONE)
self.run_test(-self.ONE, self.ONE)
self.run_test(-self.ONE, -self.ONE)
self.run_test(self.ONE, self.MIN_SUBNORM)
self.run_test(self.ONE, -self.MIN_SUBNORM)
self.run_test(-self.ONE, self.MIN_SUBNORM)
self.run_test(-self.ONE, -self.MIN_SUBNORM)
self.run_test(self.ONE, numpy.float32(1.902965e-39))
self.run_test(self.ONE, -numpy.float32(1.902965e-39))
self.run_test(-self.ONE, numpy.float32(1.902965e-39))
self.run_test(-self.ONE, -numpy.float32(1.902965e-39))
self.run_test(self.ONE, self.MAX_SUBNORM)
self.run_test(self.ONE, -self.MAX_SUBNORM)
self.run_test(-self.ONE, self.MAX_SUBNORM)
self.run_test(-self.ONE, -self.MAX_SUBNORM)
self.run_test(self.ONE, self.MIN_NORM)
self.run_test(self.ONE, -self.MIN_NORM)
self.run_test(-self.ONE, self.MIN_NORM)
self.run_test(-self.ONE, -self.MIN_NORM)
self.run_test(self.ONE, numpy.float32(7918.158))
self.run_test(self.ONE, -numpy.float32(7918.158))
self.run_test(-self.ONE, numpy.float32(7918.158))
self.run_test(-self.ONE, -numpy.float32(7918.158))
self.run_test(self.ONE, self.MAX_NORM)
self.run_test(self.ONE, -self.MAX_NORM)
self.run_test(-self.ONE, self.MAX_NORM)
self.run_test(-self.ONE, -self.MAX_NORM)
self.run_test(self.ONE, self.INF)
self.run_test(self.ONE, -self.INF)
self.run_test(-self.ONE, self.INF)
self.run_test(-self.ONE, -self.INF)
self.run_test(self.ONE, self.NAN)
self.run_test(-self.ONE, self.NAN)
def test_min_subnorm(self):
# Test ±MIN_SUBNORM + x for all types of x.
self.run_test(self.MIN_SUBNORM, self.ZERO)
self.run_test(self.MIN_SUBNORM, -self.ZERO)
self.run_test(-self.MIN_SUBNORM, self.ZERO)
self.run_test(-self.MIN_SUBNORM, -self.ZERO)
self.run_test(self.MIN_SUBNORM, self.ONE)
self.run_test(self.MIN_SUBNORM, -self.ONE)
self.run_test(-self.MIN_SUBNORM, self.ONE)
self.run_test(-self.MIN_SUBNORM, -self.ONE)
self.run_test(self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_SUBNORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, numpy.float32(6.927885e-39))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(6.927885e-39))
self.run_test(self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_SUBNORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_SUBNORM)
self.run_test(self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, self.MIN_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MIN_NORM)
self.run_test(self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, numpy.float32(466603.3))
self.run_test(-self.MIN_SUBNORM, -numpy.float32(466603.3))
self.run_test(self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, self.MAX_NORM)
self.run_test(-self.MIN_SUBNORM, -self.MAX_NORM)
self.run_test(self.MIN_SUBNORM, self.INF)
self.run_test(self.MIN_SUBNORM, -self.INF)
self.run_test(-self.MIN_SUBNORM, self.INF)
self.run_test(-self.MIN_SUBNORM, -self.INF)
self.run_test(self.MIN_SUBNORM, self.NAN)
self.run_test(-self.MIN_SUBNORM, self.NAN)
def test_subnorm(self):
# Test ±x + y for subnormal x and all types of y.
self.run_test(numpy.float32(7.518523e-39), self.ZERO)
self.run_test(numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), self.ZERO)
self.run_test(-numpy.float32(7.518523e-39), -self.ZERO)
self.run_test(numpy.float32(2.028916e-39), self.ONE)
self.run_test(numpy.float32(2.028916e-39), -self.ONE)
self.run_test(-numpy.float32(2.028916e-39), self.ONE)
self.run_test(-numpy.float32(2.028916e-39), -self.ONE)
self.run_test(numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.042427e-39), -self.MIN_SUBNORM)
self.run_test(numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(numpy.float32(9.636327e-39), -numpy.float32(1.0185049e-38))
self.run_test(-numpy.float32(9.636327e-39), numpy.float32(1.0185049e-38))
self.run_test(-numpy.float32(9.636327e-39), -numpy.float32(1.0185049e-38))
self.run_test(numpy.float32(1.989006e-39), self.MAX_SUBNORM)
self.run_test(numpy.float32(1.989006e-39), -self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.989006e-39), self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.989006e-39), -self.MAX_SUBNORM)
self.run_test(numpy.float32(2.952435e-39), self.MIN_NORM)
self.run_test(numpy.float32(2.952435e-39), -self.MIN_NORM)
self.run_test(-numpy.float32(2.952435e-39), self.MIN_NORM)
self.run_test(-numpy.float32(2.952435e-39), -self.MIN_NORM)
self.run_test(numpy.float32(1.154907e-38), numpy.float32(4.0687437e-36))
self.run_test(numpy.float32(1.154907e-38), -numpy.float32(4.0687437e-36))
self.run_test(-numpy.float32(1.154907e-38), numpy.float32(4.0687437e-36))
self.run_test(-numpy.float32(1.154907e-38), -numpy.float32(4.0687437e-36))
self.run_test(numpy.float32(9.79494e-39), self.MAX_NORM)
self.run_test(numpy.float32(9.79494e-39), -self.MAX_NORM)
self.run_test(-numpy.float32(9.79494e-39), self.MAX_NORM)
self.run_test(-numpy.float32(9.79494e-39), -self.MAX_NORM)
self.run_test(numpy.float32(1.54569e-39), self.INF)
self.run_test(numpy.float32(1.54569e-39), -self.INF)
self.run_test(-numpy.float32(1.54569e-39), self.INF)
self.run_test(-numpy.float32(1.54569e-39), -self.INF)
self.run_test(numpy.float32(3.974073e-39), self.NAN)
self.run_test(-numpy.float32(3.974073e-39), self.NAN)
def test_max_subnorm(self):
# Test ±MAX_SUBNORM + x for all types of x.
self.run_test(self.MAX_SUBNORM, self.ZERO)
self.run_test(self.MAX_SUBNORM, -self.ZERO)
self.run_test(-self.MAX_SUBNORM, self.ZERO)
self.run_test(-self.MAX_SUBNORM, -self.ZERO)
self.run_test(self.MAX_SUBNORM, self.ONE)
self.run_test(self.MAX_SUBNORM, -self.ONE)
self.run_test(-self.MAX_SUBNORM, self.ONE)
self.run_test(-self.MAX_SUBNORM, -self.ONE)
self.run_test(self.MAX_SUBNORM, self.MIN_SUBNORM)
self.run_test(self.MAX_SUBNORM, -self.MIN_SUBNORM)
self.run_test(-self.MAX_SUBNORM, self.MIN_SUBNORM)
self.run_test(-self.MAX_SUBNORM, -self.MIN_SUBNORM)
self.run_test(self.MAX_SUBNORM, numpy.float32(2.736488e-39))
self.run_test(self.MAX_SUBNORM, -numpy.float32(2.736488e-39))
self.run_test(-self.MAX_SUBNORM, numpy.float32(2.736488e-39))
self.run_test(-self.MAX_SUBNORM, -numpy.float32(2.736488e-39))
self.run_test(self.MAX_SUBNORM, self.MAX_SUBNORM)
self.run_test(self.MAX_SUBNORM, -self.MAX_SUBNORM)
self.run_test(-self.MAX_SUBNORM, self.MAX_SUBNORM)
self.run_test(-self.MAX_SUBNORM, -self.MAX_SUBNORM)
self.run_test(self.MAX_SUBNORM, self.MIN_NORM)
self.run_test(self.MAX_SUBNORM, -self.MIN_NORM)
self.run_test(-self.MAX_SUBNORM, self.MIN_NORM)
self.run_test(-self.MAX_SUBNORM, -self.MIN_NORM)
self.run_test(self.MAX_SUBNORM, numpy.float32(8.027242e-35))
self.run_test(self.MAX_SUBNORM, -numpy.float32(8.027242e-35))
self.run_test(-self.MAX_SUBNORM, numpy.float32(8.027242e-35))
self.run_test(-self.MAX_SUBNORM, -numpy.float32(8.027242e-35))
self.run_test(self.MAX_SUBNORM, self.MAX_NORM)
self.run_test(self.MAX_SUBNORM, -self.MAX_NORM)
self.run_test(-self.MAX_SUBNORM, self.MAX_NORM)
self.run_test(-self.MAX_SUBNORM, -self.MAX_NORM)
self.run_test(self.MAX_SUBNORM, self.INF)
self.run_test(self.MAX_SUBNORM, -self.INF)
self.run_test(-self.MAX_SUBNORM, self.INF)
self.run_test(-self.MAX_SUBNORM, -self.INF)
self.run_test(self.MAX_SUBNORM, self.NAN)
self.run_test(-self.MAX_SUBNORM, self.NAN)
def test_min_norm(self):
# Test ±MIN_NORM + x for all types of x.
self.run_test(self.MIN_NORM, self.ZERO)
self.run_test(self.MIN_NORM, -self.ZERO)
self.run_test(-self.MIN_NORM, self.ZERO)
self.run_test(-self.MIN_NORM, -self.ZERO)
self.run_test(self.MIN_NORM, self.ONE)
self.run_test(self.MIN_NORM, -self.ONE)
self.run_test(-self.MIN_NORM, self.ONE)
self.run_test(-self.MIN_NORM, -self.ONE)
self.run_test(self.MIN_NORM, self.MIN_SUBNORM)
self.run_test(self.MIN_NORM, -self.MIN_SUBNORM)
self.run_test(-self.MIN_NORM, self.MIN_SUBNORM)
self.run_test(-self.MIN_NORM, -self.MIN_SUBNORM)
self.run_test(self.MIN_NORM, numpy.float32(7.235862e-39))
self.run_test(self.MIN_NORM, -numpy.float32(7.235862e-39))
self.run_test(-self.MIN_NORM, numpy.float32(7.235862e-39))
self.run_test(-self.MIN_NORM, -numpy.float32(7.235862e-39))
self.run_test(self.MIN_NORM, self.MAX_SUBNORM)
self.run_test(self.MIN_NORM, -self.MAX_SUBNORM)
self.run_test(-self.MIN_NORM, self.MAX_SUBNORM)
self.run_test(-self.MIN_NORM, -self.MAX_SUBNORM)
self.run_test(self.MIN_NORM, self.MIN_NORM)
self.run_test(self.MIN_NORM, -self.MIN_NORM)
self.run_test(-self.MIN_NORM, self.MIN_NORM)
self.run_test(-self.MIN_NORM, -self.MIN_NORM)
self.run_test(self.MIN_NORM, numpy.float32(3.0655702e-37))
self.run_test(self.MIN_NORM, -numpy.float32(3.0655702e-37))
self.run_test(-self.MIN_NORM, numpy.float32(3.0655702e-37))
self.run_test(-self.MIN_NORM, -numpy.float32(3.0655702e-37))
self.run_test(self.MIN_NORM, self.MAX_NORM)
self.run_test(self.MIN_NORM, -self.MAX_NORM)
self.run_test(-self.MIN_NORM, self.MAX_NORM)
self.run_test(-self.MIN_NORM, -self.MAX_NORM)
self.run_test(self.MIN_NORM, self.INF)
self.run_test(self.MIN_NORM, -self.INF)
self.run_test(-self.MIN_NORM, self.INF)
self.run_test(-self.MIN_NORM, -self.INF)
self.run_test(self.MIN_NORM, self.NAN)
self.run_test(-self.MIN_NORM, self.NAN)
def test_norm(self):
# Test ±x + y for normal x and all types of y.
self.run_test(numpy.float32(3.2528998e8), self.ZERO)
self.run_test(numpy.float32(3.2528998e8), -self.ZERO)
self.run_test(-numpy.float32(3.2528998e8), self.ZERO)
self.run_test(-numpy.float32(3.2528998e8), -self.ZERO)
self.run_test(numpy.float32(5781.5137), self.ONE)
self.run_test(numpy.float32(5781.5137), -self.ONE)
self.run_test(-numpy.float32(5781.5137), self.ONE)
self.run_test(-numpy.float32(5781.5137), -self.ONE)
self.run_test(numpy.float32(4.0233208e-35), self.MIN_SUBNORM)
self.run_test(numpy.float32(4.0233208e-35), -self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.0233208e-35), self.MIN_SUBNORM)
self.run_test(-numpy.float32(4.0233208e-35), -self.MIN_SUBNORM)
self.run_test(numpy.float32(3.4244755e-37), numpy.float32(7.951416e-39))
self.run_test(numpy.float32(3.4244755e-37), -numpy.float32(7.951416e-39))
self.run_test(-numpy.float32(3.4244755e-37), numpy.float32(7.951416e-39))
self.run_test(-numpy.float32(3.4244755e-37), -numpy.float32(7.951416e-39))
self.run_test(numpy.float32(1.772688e-35), self.MAX_SUBNORM)
self.run_test(numpy.float32(1.772688e-35), -self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.772688e-35), self.MAX_SUBNORM)
self.run_test(-numpy.float32(1.772688e-35), -self.MAX_SUBNORM)
self.run_test(numpy.float32(9.7266296e-36), self.MIN_NORM)
self.run_test(numpy.float32(9.7266296e-36), -self.MIN_NORM)
self.run_test(-numpy.float32(9.7266296e-36), self.MIN_NORM)
self.run_test(-numpy.float32(9.7266296e-36), -self.MIN_NORM)
self.run_test(numpy.float32(9.964942e17), numpy.float32(3.0321312e16))
self.run_test(numpy.float32(9.964942e17), -numpy.float32(3.0321312e16))
self.run_test(-numpy.float32(9.964942e17), numpy.float32(3.0321312e16))
self.run_test(-numpy.float32(9.964942e17), -numpy.float32(3.0321312e16))
self.run_test(numpy.float32(3.3541464e35), self.MAX_NORM)
self.run_test(numpy.float32(3.3541464e35), -self.MAX_NORM)
self.run_test(-numpy.float32(3.3541464e35), self.MAX_NORM)
self.run_test(-numpy.float32(3.3541464e35), -self.MAX_NORM)
self.run_test(numpy.float32(1.8177568e25), self.INF)
self.run_test(numpy.float32(1.8177568e25), -self.INF)
self.run_test(- | numpy.float32(1.8177568e25) | numpy.float32 |
from .helpers import quat_inv_trans, quat_trans, check_filepath, import_value, quat_mult, quat_conj, quat_to_euler, euler_to_quat
from .airplane import Airplane
from .standard_atmosphere import StandardAtmosphere
from .exceptions import SolverNotConvergedError
import json
import time
import copy
import warnings
import numpy as np
import math as m
import scipy.interpolate as sinterp
import scipy.optimize as sopt
import matplotlib.pyplot as plt
from stl import mesh
from mpl_toolkits.mplot3d import Axes3D
from airfoil_db import DatabaseBoundsError
class Scene:
"""A class defining a scene containing one or more aircraft.
Parameters
----------
scene_input : string or dict, optional
Dictionary or path to the JSON object specifying the scene parameters (see
'Creating Input Files for MachUp'). If not specified, all default values are chosen.
Raises
------
IOError
If input filepath or filename is invalid
"""
def __init__(self, scene_input={}):
# Initialize basic storage objects
self._airplanes = {}
self._N = 0
self._num_aircraft = 0
# Track whether the scene in its current state has been solved
# Should be set to False any time any state variable is changed without immediately thereafter calling solve_forces()
self._solved = False
# Import information from the input
self._load_params(scene_input)
# Set the error handling state
self.set_err_state()
def _load_params(self, scene_input):
# Loads JSON object and stores input parameters and aircraft
# File
if isinstance(scene_input, str):
check_filepath(scene_input,".json")
with open(scene_input) as input_json_handle:
self._input_dict = json.load(input_json_handle)
# Dictionary
elif isinstance(scene_input, dict):
self._input_dict = copy.deepcopy(scene_input)
# Input format not recognized
else:
raise IOError("Input to Scene class initializer must be a file path or Python dictionary, not type {0}.".format(type(scene_input)))
# Store solver parameters
solver_params = self._input_dict.get("solver", {})
self._solver_type = solver_params.get("type", "nonlinear")
self._solver_convergence = solver_params.get("convergence", 1e-10)
self._solver_relaxation = solver_params.get("relaxation", 1.0)
self._max_solver_iterations = solver_params.get("max_iterations", 100)
self._use_swept_sections = solver_params.get("use_swept_sections", True)
self._use_total_velocity = solver_params.get("use_total_velocity", True)
self._use_in_plane = solver_params.get("use_in_plane", True)
self._match_machup_pro = solver_params.get("match_machup_pro", False)
self._impingement_threshold = solver_params.get("impingement_threshold", 1e-10)
# Store unit system
self._unit_sys = self._input_dict.get("units", "English")
# Setup atmospheric property getter functions
scene_dict = self._input_dict.get("scene", {})
atmos_dict = scene_dict.get("atmosphere", {})
self._std_atmos = StandardAtmosphere(unit_sys=self._unit_sys)
self._get_density = self._initialize_density_getter(**atmos_dict)
self._get_wind = self._initialize_wind_getter(**atmos_dict)
self._get_viscosity = self._initialize_viscosity_getter(**atmos_dict)
self._get_sos = self._initialize_sos_getter(**atmos_dict)
# Initialize aircraft geometries
aircraft_dict = scene_dict.get("aircraft", {})
for key in aircraft_dict:
# Get inputs
airplane_file = self._input_dict["scene"]["aircraft"][key]["file"]
state = self._input_dict["scene"]["aircraft"][key].get("state",{})
control_state = self._input_dict["scene"]["aircraft"][key].get("control_state",{})
# Instantiate
self.add_aircraft(key, airplane_file, state=state, control_state=control_state)
def _initialize_density_getter(self, **kwargs):
# Load value from dictionary
default_density = self._std_atmos.rho(0.0)
rho = import_value("rho", kwargs, self._unit_sys, default_density)
# Constant value
if isinstance(rho, float):
self._constant_rho = rho
def density_getter(position):
return self._constant_rho
# Atmospheric table name
elif isinstance(rho, str):
# Profile
if not rho in ["standard"]:
raise IOError("{0} is not an allowable profile name.".format(rho))
def density_getter(position):
pos = position.T
return self._std_atmos.rho(-pos[2])
# Array
elif isinstance(rho, np.ndarray):
self._density_data = rho
# Create getters
if self._density_data.shape[1] == 2: # Density profile
def density_getter(position):
pos = position.T
return np.interp(-pos[2], self._density_data[:,0], self._density_data[:,1])
elif self._density_data.shape[1] == 4: # Density field
self._density_field_interpolator = sinterp.LinearNDInterpolator(self._density_data[:,:3],self._density_data[:,3])
def density_getter(position):
return self._density_field_interpolator(position)
# Improper specification
else:
raise IOError("Density improperly specified as {0}.".format(rho))
return density_getter
def _initialize_wind_getter(self, **kwargs):
# Load value from dict
default_wind = [0.0, 0.0, 0.0]
V_wind = import_value("V_wind", kwargs, self._unit_sys, default_wind)
# Store wind
if isinstance(V_wind, np.ndarray):
if V_wind.shape == (3,): # Constant wind vector
self._constant_wind = V_wind
def wind_getter(position):
return self._constant_wind*np.ones(position.shape)
else: # Array
self._wind_data = V_wind
# Create getters
if self._wind_data.shape[1] == 6: # Wind field
self._wind_field_x_interpolator = sinterp.LinearNDInterpolator(self._wind_data[:,:3], self._wind_data[:,3], fill_value=0.0)
self._wind_field_y_interpolator = sinterp.LinearNDInterpolator(self._wind_data[:,:3], self._wind_data[:,4], fill_value=0.0)
self._wind_field_z_interpolator = sinterp.LinearNDInterpolator(self._wind_data[:,:3], self._wind_data[:,5], fill_value=0.0)
def wind_getter(position):
single = len(position.shape)==1
Vx = self._wind_field_x_interpolator(position)
Vy = self._wind_field_y_interpolator(position)
Vz = self._wind_field_z_interpolator(position)
if single:
return np.array([Vx.item(), Vy.item(), Vz.item()])
else:
return np.array([Vx, Vy, Vz]).T
elif self._wind_data.shape[1] == 4: # wind profile
def wind_getter(position):
single = len(position.shape)==1
pos_T = position.T
Vx = np.interp(-pos_T[2], self._wind_data[:,0], self._wind_data[:,1])
Vy = np.interp(-pos_T[2], self._wind_data[:,0], self._wind_data[:,2])
Vz = np.interp(-pos_T[2], self._wind_data[:,0], self._wind_data[:,3])
if single:
return np.array([Vx.item(), Vy.item(), Vz.item()])
else:
return np.array([Vx, Vy, Vz]).T
else:
raise IOError("Wind array has the wrong number of columns.")
else:
raise IOError("Wind velocity improperly specified as {0}".format(V_wind))
return wind_getter
def _initialize_viscosity_getter(self, **kwargs):
# Load value from dictionary
default_visc = self._std_atmos.nu(0.0)
nu = import_value("viscosity", kwargs, self._unit_sys, default_visc)
# Constant value
if isinstance(nu, float):
self._constant_nu = nu
def viscosity_getter(position):
return self._constant_nu*np.ones((position.shape[:-1]))
# Atmospheric profile name
elif isinstance(nu, str):
# Check we have that profile
if not nu in ["standard"]:
raise IOError("{0} is not an allowable profile name.".format(nu))
def viscosity_getter(position):
pos = np.transpose(position)
return self._std_atmos.nu(-pos[2])
return viscosity_getter
def _initialize_sos_getter(self, **kwargs):
# Load value from dictionary
default_sos = self._std_atmos.a(0.0)
a = import_value("speed_of_sound", kwargs, self._unit_sys, default_sos)
# Constant value
if isinstance(a, float):
self._constant_a = a
def sos_getter(position):
return self._constant_a*np.ones((position.shape[:-1]))
# Atmospheric profile name
elif isinstance(a, str):
# Check we have that profile
if not a in ["standard"]:
raise IOError("{0} is not an allowable profile name.".format(a))
def sos_getter(position):
pos = np.transpose(position)
return self._std_atmos.a(-pos[2])
return sos_getter
def add_aircraft(self, airplane_name, airplane_input, state={}, control_state={}):
"""Inserts an aircraft into the scene. Note if an aircraft was specified
in the input object, it has already been added to the scene.
Parameters
----------
airplane_name : str
Name of the airplane to be added.
airplane_input : str or dict
JSON object (path) or dictionary describing the airplane.
state : dict
Dictionary describing the state of the airplane.
control_state : dict
Dictionary describing the state of the controls.
"""
# Determine the local wind vector for setting the state of the aircraft
aircraft_position = np.array(state.get("position", [0.0, 0.0, 0.0]))
v_wind = self._get_wind(aircraft_position)
# Create and store the aircraft object
self._airplanes[airplane_name] = Airplane(airplane_name, airplane_input, self._unit_sys, self, init_state=state, init_control_state=control_state, v_wind=v_wind)
# Update member variables
self._N += self._airplanes[airplane_name].N
self._num_aircraft += 1
# Update geometry
self._initialize_storage_arrays()
self._store_aircraft_properties()
self._perform_geometry_and_atmos_calcs()
def remove_aircraft(self, airplane_name):
"""Removes an aircraft from the scene.
Parameters
----------
airplane_name : str
Name of the airplane to be removed.
"""
# Remove aircraft from dictionary
try:
deleted_aircraft = self._airplanes.pop(airplane_name)
except KeyError:
raise RuntimeError("The scene has no aircraft named {0}.".format(airplane_name))
# Update quantities
self._N -= deleted_aircraft.get_num_cps()
self._num_aircraft -= 1
# Reinitialize arrays
if self._num_aircraft != 0:
self._initialize_storage_arrays()
self._store_aircraft_properties()
self._perform_geometry_and_atmos_calcs()
def _initialize_storage_arrays(self):
# Initialize arrays
# Section geometry
self._c_bar = np.zeros(self._N) # Average chord
self._dS = np.zeros(self._N) # Differential planform area
self._PC = np.zeros((self._N,3)) # Control point location
self._r_CG = np.zeros((self._N,3)) # Radii from airplane CG to control points
self._dl = np.zeros((self._N,3)) # Differential LAC elements
self._section_sweep = np.zeros(self._N)
# Node locations
self._P0 = np.zeros((self._N,self._N,3)) # Inbound vortex node location; takes into account effective LAC where appropriate
self._P0_joint = np.zeros((self._N,self._N,3)) # Inbound vortex joint node location
self._P1 = np.zeros((self._N,self._N,3)) # Outbound vortex node location
self._P1_joint = np.zeros((self._N,self._N,3)) # Outbound vortex joint node location
# Spatial node vectors and magnitudes
self._r_0 = np.zeros((self._N,self._N,3))
self._r_1 = np.zeros((self._N,self._N,3))
self._r_0_joint = np.zeros((self._N,self._N,3))
self._r_1_joint = np.zeros((self._N,self._N,3))
self._r_0_mag = np.zeros((self._N,self._N))
self._r_0_joint_mag = np.zeros((self._N,self._N))
self._r_1_mag = np.zeros((self._N,self._N))
self._r_1_joint_mag = np.zeros((self._N,self._N))
# Spatial node vector magnitude products
self._r_0_r_0_joint_mag = np.zeros((self._N,self._N))
self._r_0_r_1_mag = np.zeros((self._N,self._N))
self._r_1_r_1_joint_mag = np.zeros((self._N,self._N))
# Section unit vectors
self._u_a = np.zeros((self._N,3))
self._u_n = np.zeros((self._N,3))
self._u_s = np.zeros((self._N,3))
# Control point atmospheric properties
self._rho = np.zeros(self._N) # Density
self._nu = np.zeros(self._N) # Viscosity
self._a = np.ones(self._N) # Speed of sound
# Airfoil parameters
self._Re = np.zeros(self._N) # Reynolds number
self._M = np.zeros(self._N) # Mach number
self._aL0 = np.zeros(self._N) # Zero-lift angle of attack
self._CLa = np.zeros(self._N) # Lift slope
self._CL = np.zeros(self._N) # Lift coefficient
self._CD = np.zeros(self._N) # Drag coefficient
self._Cm = np.zeros(self._N) # Moment coefficient
# Velocities
self._v_wind = np.zeros((self._N,3))
self._v_inf = np.zeros((self._N,3)) # Control point freestream vector
if self._match_machup_pro:
self._v_inf_w_o_rotation = np.zeros((self._N,3)) # Control point freestream vector minus influence of aircraft rotation
self._P0_joint_v_inf = np.zeros((self._N,3))
self._P1_joint_v_inf = np.zeros((self._N,3))
# Misc
self._diag_ind = np.diag_indices(self._N)
self._gamma = np.zeros(self._N)
self._solved = False
def _store_aircraft_properties(self):
# Get properties of the aircraft that don't change with state
index = 0
self._airplane_objects = []
self._airplane_slices = []
# Loop through airplanes
for _, airplane_object in self._airplanes.items():
# Store airplane objects to make sure they are always accessed in the same order
self._airplane_objects.append(airplane_object)
# Section of the arrays belonging to this airplane
airplane_N = airplane_object.N
airplane_slice = slice(index, index+airplane_N)
self._airplane_slices.append(airplane_slice)
# Get properties
self._c_bar[airplane_slice] = airplane_object.c_bar
self._dS[airplane_slice] = airplane_object.dS
self._section_sweep[airplane_slice] = airplane_object.section_sweep
index += airplane_N
# Swept section corrections based on thin airfoil theory
if self._use_swept_sections:
C_lambda = np.cos(self._section_sweep)
self._c_bar *= C_lambda
self._C_sweep_inv = 1.0/C_lambda
self._solved = False
def _perform_geometry_and_atmos_calcs(self):
# Performs calculations necessary for solving NLL which are only dependent on geometry.
# This speeds up repeated calls to _solve(). This method should be called any time the
# geometry is updated, an aircraft is added to the scene, or the position or orientation
# of an aircraft changes. Note that all calculations occur in the Earth-fixed frame.
# Loop through airplanes
for airplane_object, airplane_slice in zip(self._airplane_objects, self._airplane_slices):
# Get airplane
q = airplane_object.q
p = airplane_object.p_bar
# Get geometries
PC = quat_inv_trans(q, airplane_object.PC)
self._r_CG[airplane_slice,:] = quat_inv_trans(q, airplane_object.PC_CG)
self._PC[airplane_slice,:] = p+PC
self._dl[airplane_slice,:] = quat_inv_trans(q, airplane_object.dl)
# Get section vectors
if self._use_swept_sections:
self._u_a[airplane_slice,:] = quat_inv_trans(q, airplane_object.u_a)
self._u_n[airplane_slice,:] = quat_inv_trans(q, airplane_object.u_n)
self._u_s[airplane_slice,:] = quat_inv_trans(q, airplane_object.u_s)
else:
self._u_a[airplane_slice,:] = quat_inv_trans(q, airplane_object.u_a_unswept)
self._u_n[airplane_slice,:] = quat_inv_trans(q, airplane_object.u_n_unswept)
self._u_s[airplane_slice,:] = quat_inv_trans(q, airplane_object.u_s_unswept)
# Node locations
# Note the first index indicates which control point this is the effective LAC for
self._P0[airplane_slice,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P0_eff)
self._P1[airplane_slice,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P1_eff)
self._P0_joint[airplane_slice,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P0_joint_eff)
self._P1_joint[airplane_slice,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P1_joint_eff)
# Get node locations for other aircraft from this aircraft
# This does not need to take the effective LAC into account
if self._num_aircraft > 1:
this_ind = range(airplane_slice.start, airplane_slice.stop)
other_ind = [i for i in range(self._N) if i not in this_ind] # control point indices for other airplanes
self._P0[other_ind,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P0)
self._P1[other_ind,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P1)
self._P0_joint[other_ind,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P0_joint)
self._P1_joint[other_ind,airplane_slice,:] = p+quat_inv_trans(q, airplane_object.P1_joint)
# Spatial node vectors
self._r_0[airplane_slice,airplane_slice,:] = quat_inv_trans(q, airplane_object.r_0)
self._r_1[airplane_slice,airplane_slice,:] = quat_inv_trans(q, airplane_object.r_1)
self._r_0_joint[airplane_slice,airplane_slice,:] = quat_inv_trans(q, airplane_object.r_0_joint)
self._r_1_joint[airplane_slice,airplane_slice,:] = quat_inv_trans(q, airplane_object.r_1_joint)
# Spatial node vector magnitudes
self._r_0_mag[airplane_slice,airplane_slice] = airplane_object.r_0_mag
self._r_0_joint_mag[airplane_slice,airplane_slice] = airplane_object.r_0_joint_mag
self._r_1_mag[airplane_slice,airplane_slice] = airplane_object.r_1_mag
self._r_1_joint_mag[airplane_slice,airplane_slice] = airplane_object.r_1_joint_mag
# Spatial node vector magnitude products
self._r_0_r_0_joint_mag[airplane_slice,airplane_slice] = airplane_object.r_0_r_0_joint_mag
self._r_0_r_1_mag[airplane_slice,airplane_slice] = airplane_object.r_0_r_1_mag
self._r_1_r_1_joint_mag[airplane_slice,airplane_slice] = airplane_object.r_1_r_1_joint_mag
# Fill in spatial node vectors between airplanes
if self._num_aircraft > 1:
for airplane_slice in self._airplane_slices:
this_ind = range(airplane_slice.start, airplane_slice.stop)
other_ind = [i for i in range(self._N) if i not in this_ind] # control point indices for other airplanes
# Spatial node vectors
self._r_0[airplane_slice,other_ind,:] = self._PC[airplane_slice,np.newaxis,:]-self._P0[airplane_slice,other_ind,:]
self._r_1[airplane_slice,other_ind,:] = self._PC[airplane_slice,np.newaxis,:]-self._P1[airplane_slice,other_ind,:]
self._r_0_joint[airplane_slice,other_ind,:] = self._PC[airplane_slice,np.newaxis,:]-self._P0_joint[airplane_slice,other_ind,:]
self._r_1_joint[airplane_slice,other_ind,:] = self._PC[airplane_slice,np.newaxis,:]-self._P1_joint[airplane_slice,other_ind,:]
# Calculate spatial node vector magnitudes
self._r_0_mag[airplane_slice,other_ind] = np.sqrt(np.einsum('ijk,ijk->ij', self._r_0[airplane_slice,other_ind,:], self._r_0[airplane_slice,other_ind,:]))
self._r_0_joint_mag[airplane_slice,other_ind] = np.sqrt(np.einsum('ijk,ijk->ij', self._r_0_joint[airplane_slice,other_ind,:], self._r_0_joint[airplane_slice,other_ind,:]))
self._r_1_mag[airplane_slice,other_ind] = np.sqrt(np.einsum('ijk,ijk->ij', self._r_1[airplane_slice,other_ind,:], self._r_1[airplane_slice,other_ind,:]))
self._r_1_joint_mag[airplane_slice,other_ind] = np.sqrt(np.einsum('ijk,ijk->ij', self._r_1_joint[airplane_slice,other_ind,:], self._r_1_joint[airplane_slice,other_ind,:]))
# Calculate magnitude products
self._r_0_r_0_joint_mag[airplane_slice,other_ind] = self._r_0_mag[airplane_slice,other_ind]*self._r_0_joint_mag[airplane_slice,other_ind]
self._r_0_r_1_mag[airplane_slice,other_ind] = self._r_0_mag[airplane_slice,other_ind]*self._r_1_mag[airplane_slice,other_ind]
self._r_1_r_1_joint_mag[airplane_slice,other_ind] = self._r_1_mag[airplane_slice,other_ind]*self._r_1_joint_mag[airplane_slice,other_ind]
# In-plane projection matrices
if self._use_in_plane:
self._P_in_plane = np.repeat(np.identity(3)[np.newaxis,:,:], self._N, axis=0)-np.matmul(self._u_s[:,:,np.newaxis], self._u_s[:,np.newaxis,:])
# Influence of bound and jointed vortex segments
with np.errstate(divide='ignore', invalid='ignore'):
# Bound
numer = ((self._r_0_mag+self._r_1_mag)[:,:,np.newaxis]*np.cross(self._r_0, self._r_1))
denom = self._r_0_r_1_mag*(self._r_0_r_1_mag+np.einsum('ijk,ijk->ij', self._r_0, self._r_1))
V_ji_bound = np.true_divide(numer, denom[:,:,np.newaxis])
V_ji_bound[np.diag_indices(self._N)] = 0.0 # Ensure this actually comes out to be zero
# Jointed 0
numer = (self._r_0_joint_mag+self._r_0_mag)[:,:,np.newaxis]*np.cross(self._r_0_joint, self._r_0)
denom = self._r_0_r_0_joint_mag*(self._r_0_r_0_joint_mag+np.einsum('ijk,ijk->ij', self._r_0_joint, self._r_0))
V_ji_joint_0 = | np.true_divide(numer, denom[:,:,np.newaxis]) | numpy.true_divide |
#!/usr/bin/env python3
import numpy as np
class AZQuiz:
actions = 28
N = 7
C = 4
def __init__(self, randomized):
self._board = | np.zeros([self.N, self.N], dtype=np.uint8) | numpy.zeros |
import os
import numpy as np
from sklearn.cluster import KMeans
from scipy.stats import norm
from matplotlib import pyplot as plt
import pickle as pkl
class NDB:
def __init__(self, training_data=None, number_of_bins=100, significance_level=0.05, z_threshold=None,
whitening=False, max_dims=None, cache_folder=None):
"""
NDB Evaluation Class
:param training_data: Optional - the training samples - array of m x d floats (m samples of dimension d)
:param number_of_bins: Number of bins (clusters) default=100
:param significance_level: The statistical significance level for the two-sample test
:param z_threshold: Allow defining a threshold in terms of difference/SE for defining a bin as statistically different
:param whitening: Perform data whitening - subtract mean and divide by per-dimension std
:param max_dims: Max dimensions to use in K-means. By default derived automatically from d
:param bins_file: Optional - file to write / read-from the clusters (to avoid re-calculation)
"""
self.number_of_bins = number_of_bins
self.significance_level = significance_level
self.z_threshold = z_threshold
self.whitening = whitening
self.ndb_eps = 1e-6
self.training_mean = 0.0
self.training_std = 1.0
self.max_dims = max_dims
self.cache_folder = cache_folder
self.bin_centers = None
self.bin_proportions = None
self.ref_sample_size = None
self.used_d_indices = None
self.results_file = None
self.test_name = 'ndb_{}_bins_{}'.format(self.number_of_bins, 'whiten' if self.whitening else 'orig')
self.cached_results = {}
if self.cache_folder:
self.results_file = os.path.join(cache_folder, self.test_name+'_results.pkl')
if os.path.isfile(self.results_file):
# print('Loading previous results from', self.results_file, ':')
self.cached_results = pkl.load(open(self.results_file, 'rb'))
# print(self.cached_results.keys())
if training_data is not None or cache_folder is not None:
bins_file = None
if cache_folder:
os.makedirs(cache_folder, exist_ok=True)
bins_file = os.path.join(cache_folder, self.test_name+'.pkl')
self.construct_bins(training_data, bins_file)
def construct_bins(self, training_samples, bins_file):
"""
Performs K-means clustering of the training samples
:param training_samples: An array of m x d floats (m samples of dimension d)
"""
if self.__read_from_bins_file(bins_file):
return
n, d = training_samples.shape
k = self.number_of_bins
if self.whitening:
self.training_mean = np.mean(training_samples, axis=0)
self.training_std = np.std(training_samples, axis=0) + self.ndb_eps
if self.max_dims is None and d > 1000:
# To ran faster, perform binning on sampled data dimension (i.e. don't use all channels of all pixels)
self.max_dims = d//6
whitened_samples = (training_samples-self.training_mean)/self.training_std
d_used = d if self.max_dims is None else min(d, self.max_dims)
self.used_d_indices = np.random.choice(d, d_used, replace=False)
print('Performing K-Means clustering of {} samples in dimension {} / {} to {} clusters ...'.format(n, d_used, d, k))
print('Can take a couple of minutes...')
if n//k > 1000:
print('Training data size should be ~500 times the number of bins (for reasonable speed and accuracy)')
clusters = KMeans(n_clusters=k, max_iter=100, n_jobs=-1).fit(whitened_samples[:, self.used_d_indices])
bin_centers = np.zeros([k, d])
for i in range(k):
bin_centers[i, :] = np.mean(whitened_samples[clusters.labels_ == i, :], axis=0)
# Organize bins by size
label_vals, label_counts = np.unique(clusters.labels_, return_counts=True)
bin_order = np.argsort(-label_counts)
self.bin_proportions = label_counts[bin_order] / np.sum(label_counts)
self.bin_centers = bin_centers[bin_order, :]
self.ref_sample_size = n
self.__write_to_bins_file(bins_file)
print('Done.')
def evaluate(self, query_samples, model_label=None):
"""
Assign each sample to the nearest bin center (in L2). Pre-whiten if required. and calculate the NDB
(Number of statistically Different Bins) and JS divergence scores.
:param query_samples: An array of m x d floats (m samples of dimension d)
:param model_label: optional label string for the evaluated model, allows plotting results of multiple models
:return: results dictionary containing NDB and JS scores and array of labels (assigned bin for each query sample)
"""
n = query_samples.shape[0]
query_bin_proportions, query_bin_assignments = self.__calculate_bin_proportions(query_samples)
# print(query_bin_proportions)
different_bins = NDB.two_proportions_z_test(self.bin_proportions, self.ref_sample_size, query_bin_proportions,
n, significance_level=self.significance_level,
z_threshold=self.z_threshold)
ndb = np.count_nonzero(different_bins)
js = NDB.jensen_shannon_divergence(self.bin_proportions, query_bin_proportions)
results = {'NDB': ndb,
'JS': js,
'Proportions': query_bin_proportions,
'N': n,
'Bin-Assignment': query_bin_assignments,
'Different-Bins': different_bins}
if model_label:
print('Results for {} samples from {}: '.format(n, model_label), end='')
self.cached_results[model_label] = results
if self.results_file:
# print('Storing result to', self.results_file)
pkl.dump(self.cached_results, open(self.results_file, 'wb'))
print('NDB =', ndb, 'NDB/K =', ndb/self.number_of_bins, ', JS =', js)
return results
def print_results(self):
print('NSB results (K={}{}):'.format(self.number_of_bins, ', data whitening' if self.whitening else ''))
for model in sorted(list(self.cached_results.keys())):
res = self.cached_results[model]
print('%s: NDB = %d, NDB/K = %.3f, JS = %.4f' % (model, res['NDB'], res['NDB']/self.number_of_bins, res['JS']))
def plot_results(self, models_to_plot=None):
"""
Plot the binning proportions of different methods
:param models_to_plot: optional list of model labels to plot
"""
K = self.number_of_bins
w = 1.0 / (len(self.cached_results)+1)
assert K == self.bin_proportions.size
assert self.cached_results
# Used for plotting only
def calc_se(p1, n1, p2, n2):
p = (p1 * n1 + p2 * n2) / (n1 + n2)
return np.sqrt(p * (1 - p) * (1/n1 + 1/n2))
if not models_to_plot:
models_to_plot = sorted(list(self.cached_results.keys()))
# Visualize the standard errors using the train proportions and size and query sample size
train_se = calc_se(self.bin_proportions, self.ref_sample_size,
self.bin_proportions, self.cached_results[models_to_plot[0]]['N'])
plt.bar(np.arange(0, K)+0.5, height=train_se*2.0, bottom=self.bin_proportions-train_se,
width=1.0, label='Train$\pm$SE', color='gray')
ymax = 0.0
for i, model in enumerate(models_to_plot):
results = self.cached_results[model]
label = '%s (%i : %.4f)' % (model, results['NDB'], results['JS'])
ymax = max(ymax, np.max(results['Proportions']))
if K <= 70:
plt.bar(np.arange(0, K)+(i+1.0)*w, results['Proportions'], width=w, label=label)
else:
plt.plot(np.arange(0, K)+0.5, results['Proportions'], '--*', label=label)
plt.legend(loc='best')
plt.ylim((0.0, min(ymax, np.max(self.bin_proportions)*4.0)))
plt.grid(True)
plt.title('Binning Proportions Evaluation Results for {} bins (NDB : JS)'.format(K))
plt.show()
def __calculate_bin_proportions(self, samples):
if self.bin_centers is None:
print('First run construct_bins on samples from the reference training data')
assert samples.shape[1] == self.bin_centers.shape[1]
n, d = samples.shape
k = self.bin_centers.shape[0]
D = np.zeros([n, k], dtype=samples.dtype)
print('Calculating bin assignments for {} samples...'.format(n))
whitened_samples = (samples-self.training_mean)/self.training_std
for i in range(k):
print('.', end='', flush=True)
D[:, i] = np.linalg.norm(whitened_samples[:, self.used_d_indices] - self.bin_centers[i, self.used_d_indices],
ord=2, axis=1)
print()
labels = np.argmin(D, axis=1)
probs = np.zeros([k])
label_vals, label_counts = np.unique(labels, return_counts=True)
probs[label_vals] = label_counts / n
return probs, labels
def __read_from_bins_file(self, bins_file):
if bins_file and os.path.isfile(bins_file):
print('Loading binning results from', bins_file)
bins_data = pkl.load(open(bins_file,'rb'))
self.bin_proportions = bins_data['proportions']
self.bin_centers = bins_data['centers']
self.ref_sample_size = bins_data['n']
self.training_mean = bins_data['mean']
self.training_std = bins_data['std']
self.used_d_indices = bins_data['d_indices']
return True
return False
def __write_to_bins_file(self, bins_file):
if bins_file:
print('Caching binning results to', bins_file)
bins_data = {'proportions': self.bin_proportions,
'centers': self.bin_centers,
'n': self.ref_sample_size,
'mean': self.training_mean,
'std': self.training_std,
'd_indices': self.used_d_indices}
pkl.dump(bins_data, open(bins_file, 'wb'))
@staticmethod
def two_proportions_z_test(p1, n1, p2, n2, significance_level, z_threshold=None):
# Per http://stattrek.com/hypothesis-test/difference-in-proportions.aspx
# See also http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/binotest.htm
p = (p1 * n1 + p2 * n2) / (n1 + n2)
se = np.sqrt(p * (1 - p) * (1/n1 + 1/n2))
z = (p1 - p2) / se
# Allow defining a threshold in terms as Z (difference relative to the SE) rather than in p-values.
if z_threshold is not None:
return abs(z) > z_threshold
p_values = 2.0 * norm.cdf(-1.0 * np.abs(z)) # Two-tailed test
return p_values < significance_level
@staticmethod
def jensen_shannon_divergence(p, q):
"""
Calculates the symmetric Jensen–Shannon divergence between the two PDFs
"""
m = (p + q) * 0.5
return 0.5 * (NDB.kl_divergence(p, m) + NDB.kl_divergence(q, m))
@staticmethod
def kl_divergence(p, q):
"""
The Kullback–Leibler divergence.
Defined only if q != 0 whenever p != 0.
"""
assert np.all(np.isfinite(p))
assert np.all( | np.isfinite(q) | numpy.isfinite |
from Node3D.base.node import GeometryNode
from Node3D.opengl import Mesh
from Node3D.vendor.NodeGraphQt.constants import *
import numpy as np
from Node3D.base.mesh.base_primitives import generate_tube, generate_sphere, \
generate_cylinder, generate_cone, generate_torus
import open3d as o3d
class Tube(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Tube'
def __init__(self):
super(Tube, self).__init__()
params = [{'name': 'Bottom center', 'type': 'vector3', 'value': [0, 0, 0]},
{'name': 'Top center', 'type': 'vector3', 'value': [0, 1, 0]},
{'name': 'Outer radius', 'type': 'vector2', 'value': [0.5, 0.5]},
{'name': 'Inner radius', 'type': 'vector2', 'value': [0.3, 0.3]},
{'name': 'Segments', 'type': 'int', 'value': 10, 'limits': (3, 30)},
{'name': 'Quad', 'type': 'bool', 'value': True}]
self.set_parameters(params)
self.cook()
def run(self):
outer = self.get_property("Outer radius")
inner = self.get_property("Inner radius")
s = self.get_property("Segments")
if s < 3:
self.geo = None
return
vertices, faces = generate_tube(self.get_property("Bottom center"), self.get_property("Top center"),
outer[0], outer[1], inner[0], inner[1], s,
self.get_property("Quad"))
self.geo = Mesh()
self.geo.addVertices(vertices)
self.geo.addFaces(faces)
class Box(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Box'
def __init__(self):
super(Box, self).__init__()
self.create_property("Size", value=[1, 1, 1], widget_type=NODE_PROP_VECTOR3)
self.cook()
def run(self):
size = self.get_property("Size")
x = size[0] * 0.5
y = size[1] * 0.5
z = size[2] * 0.5
self.geo = Mesh()
v1 = self.geo.addVertex([x, -y, -z])
v2 = self.geo.addVertex([x, -y, z])
v3 = self.geo.addVertex([x, y, z])
v4 = self.geo.addVertex([x, y, -z])
v5 = self.geo.addVertex([-x, -y, -z])
v6 = self.geo.addVertex([-x, -y, z])
v7 = self.geo.addVertex([-x, y, z])
v8 = self.geo.addVertex([-x, y, -z])
self.geo.addFace([v1, v2, v3, v4])
self.geo.addFace([v2, v6, v7, v3])
self.geo.addFace([v6, v5, v8, v7])
self.geo.addFace([v5, v1, v4, v8])
self.geo.addFace([v4, v3, v7, v8])
self.geo.addFace([v5, v6, v2, v1])
self.geo.mesh.update_normals()
class Grid(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Grid'
def __init__(self):
super(Grid, self).__init__()
params = [{'name': 'Size', 'type': 'vector2', 'value': [10, 10]},
{'name': 'Resolution', 'type': 'vector2i', 'value': [10, 10]}]
self.set_parameters(params)
self.cook()
def run(self):
size = self.get_property("Size")
resolution = self.get_property("Resolution")
x = size[0] * 0.5
z = size[1] * 0.5
fx = resolution[0]
fz = resolution[1]
if fx < 2 or fz < 2:
self.geo = None
return
x_range = np.linspace(-x, x, fx)
z_range = np.linspace(-z, z, fz)
vertices = np.dstack(np.meshgrid(x_range, z_range, np.array([0.0]))).reshape(-1, 3)
a = np.add.outer(np.array(range(fx - 1)), fx * np.array(range(fz - 1)))
faces = np.dstack([a, a + 1, a + fx + 1, a + fx]).reshape(-1, 4)
nms = np.zeros((vertices.shape[0], 3), dtype=float)
nms[..., 1] = 1
self.geo = Mesh()
self.geo.addVertices(vertices[:, [0, 2, 1]])
self.geo.addFaces(faces)
self.geo.setVertexAttribData('normal', nms, attribType='vector3', defaultValue=[0, 0, 0])
class Arrow(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Arrow'
def __init__(self):
super(Arrow, self).__init__()
params = [{'name': 'Radius', 'type': 'vector2', 'value': [1, 1.5]},
{'name': 'Height', 'type': 'vector2', 'value': [2, 4]},
{'name': 'Cylinder split', 'type': 'int', 'value': 1, 'limits': (1, 10)},
{'name': 'Cone split', 'type': 'int', 'value': 1, 'limits': (1, 10)},
{'name': 'Resolution', 'type': 'int', 'value': 20, 'limits': (3, 30)}]
self.set_parameters(params)
self.cook()
def run(self):
radius = self.get_property("Radius")
height = self.get_property("Height")
tri = o3d.geometry.TriangleMesh.create_arrow(radius[0], radius[1], height[0], height[1],
self.get_property("Resolution"),
self.get_property("Cylinder split"),
self.get_property("Cone split"))
self.geo = Mesh()
self.geo.addVertices(np.array(tri.vertices)[:, [0, 2, 1]])
self.geo.addFaces(np.array(tri.triangles))
class Cone(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Cone'
def __init__(self):
super(Cone, self).__init__()
params = [{'name': 'Radius', 'type': 'float', 'value': 1.0},
{'name': 'Height', 'type': 'float', 'value': 2.0},
{'name': 'Split', 'type': 'int', 'value': 1, 'limits': (1, 10)},
{'name': 'Resolution', 'type': 'int', 'value': 20, 'limits': (3, 30)},
{'name': 'Cap', 'type': 'bool', 'value': True}]
self.set_parameters(params)
self.cook()
def run(self):
s = self.get_property("Resolution")
if s < 3:
self.geo = None
return
tri, quad, vt = generate_cone(self.get_property("Radius"),
self.get_property("Height"),
s,
self.get_property("Split"))
self.geo = Mesh()
self.geo.addVertices(vt)
self.geo.addFaces(quad)
self.geo.addFaces(tri)
if not self.get_property("Cap"):
self.geo.removeVertex(0, True)
class CoordinateFrame(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Coordinate Frame'
def __init__(self):
super(CoordinateFrame, self).__init__()
params = [{'name': 'Size', 'type': 'float', 'value': 1.0},
{'name': 'Origin', 'type': 'vector3', 'value': [0, 0, 0]}]
self.set_parameters(params)
self.cook()
def run(self):
size = self.get_property("Size")
if size == 0:
size = 0.0001
tri = o3d.geometry.TriangleMesh.create_coordinate_frame(size, self.get_property("Origin"))
self.geo = Mesh()
self.geo.addVertices(np.array(tri.vertices)[:, [0, 2, 1]])
self.geo.addFaces(np.array(tri.triangles))
class Cylinder(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Cylinder'
def __init__(self):
super(Cylinder, self).__init__()
params = [{'name': 'Radius', 'type': 'float', 'value': 1.0},
{'name': 'Height', 'type': 'float', 'value': 2.0},
{'name': 'Split', 'type': 'int', 'value': 4, 'limits': (1, 10)},
{'name': 'Resolution', 'type': 'int', 'value': 20, 'limits': (3, 30)},
{'name': 'Cap', 'type': 'bool', 'value': True}]
self.set_parameters(params)
self.cook()
def run(self):
s = self.get_property("Resolution")
if s < 3:
self.geo = None
return
tri, quad, vt = generate_cylinder(self.get_property("Radius"),
self.get_property("Height"),
s,
self.get_property("Split"))
self.geo = Mesh()
self.geo.addVertices(vt)
self.geo.addFaces(quad)
if self.get_property("Cap"):
self.geo.addFaces(tri)
else:
self.geo.removeVertices([0, 1])
class Icosahedron(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Icosahedron'
def __init__(self):
super(Icosahedron, self).__init__()
params = [{'name': 'Radius', 'type': 'float', 'value': 1.0}]
self.set_parameters(params)
self.cook()
def run(self):
rad = self.get_property("Radius")
if rad == 0:
rad = 0.0001
tri = o3d.geometry.TriangleMesh.create_icosahedron(rad)
self.geo = Mesh()
self.geo.addVertices(np.array(tri.vertices)[:, [0, 2, 1]])
self.geo.addFaces(np.array(tri.triangles))
class Moebius(GeometryNode):
__identifier__ = 'Primitives'
NODE_NAME = 'Moebius'
def __init__(self):
super(Moebius, self).__init__()
params = [{'name': 'Length Split', 'type': 'int', 'value': 70, 'limits': (1, 100)},
{'name': 'Width Split', 'type': 'int', 'value': 15, 'limits': (1, 100)},
{'name': 'Twists', 'type': 'int', 'value': 1, 'limits': (0, 10)},
{'name': 'Raidus', 'type': 'float', 'value': 1, 'limits': (0, 10)},
{'name': 'Flatness', 'type': 'float', 'value': 1, 'limits': (0, 30)},
{'name': 'Width', 'type': 'float', 'value': 1, 'limits': (0, 10)},
{'name': 'Scale', 'type': 'float', 'value': 1, 'limits': (0, 30)}]
self.set_parameters(params)
self.cook()
def run(self):
tri = o3d.geometry.TriangleMesh.create_moebius(self.get_property('Length Split'),
self.get_property('Width Split'),
self.get_property("Twists"),
self.get_property("Raidus"),
self.get_property("Flatness"),
self.get_property("Width"),
self.get_property("Scale"))
self.geo = Mesh()
self.geo.addVertices( | np.array(tri.vertices) | numpy.array |
# -*- coding: utf-8 -*-
import math
import os
import torch
import random
import wandb
import pandas as pd
import numpy as np
from timeit import default_timer as timer
from sklearn.utils import shuffle
def build_nfs_scenarios(n_s: int, x: np.array, y_scaler, flow, conditioner_args, max:int=1, gpu:bool=True, tag:str= 'pv', non_null_indexes:list=[]):
"""
Build scenarios for a NFs multi-output.
Scenarios are generated into an array (n_periods, n_s) where n_periods = 24 * n_days
:return: scenarios (n_periods, n_s)
"""
# to assign the data to GPU with .to(device) on the data
if gpu:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
device = "cpu"
flow.to(device)
if tag == 'pv':
n_periods_before = non_null_indexes[0]
n_periods_after = 24 - non_null_indexes[-1] - 1
print(n_periods_after, n_periods_before)
n_days = len(x)
nb_output, cond_in = conditioner_args['in_size'], conditioner_args['cond_in']
time_tot = 0.
scenarios = []
for i in range(n_days):
start = timer()
# sample nb_scenarios per day
predictions = flow.invert(z=torch.randn(n_s, nb_output).to(device), context=torch.tensor(np.tile(x[i, :], n_s).reshape(n_s, cond_in)).to(device).float()).cpu().detach().numpy()
predictions = y_scaler.inverse_transform(predictions)
# corrections -> genereration is always > 0 and < max capacity
predictions[predictions < 0] = 0
predictions[predictions > max] = max
if tag == 'pv':
# fill time period where PV is not 0 are given by non_null_indexes
# for instance it could be [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
# then it is needed to add 0 for periods [0, 1, 2, 3] and [20, 21, 22, 23]
scenarios_tmp = np.concatenate((np.zeros((predictions.shape[0], n_periods_before)), predictions, np.zeros((predictions.shape[0], n_periods_after))), axis=1) # shape = (n_s, 24)
else:
scenarios_tmp = predictions
scenarios.append(scenarios_tmp.transpose()) # list of arrays of shape (24, n_s)
end = timer()
time_tot += end - start
print("day {:.0f} Approximate time left : {:2f} min".format(i, time_tot / (i + 1) * (n_days - (i + 1))/60), end="\r",flush=True)
# if i % 20 == 0:
# print("day {:.0f} Approximate time left : {:2f} min".format(i, time_tot / (i + 1) * (nb_days - (i + 1)) / 60))
print('Scenario generation time_tot %.1f min' % (time_tot / 60))
return | np.concatenate(scenarios,axis=0) | numpy.concatenate |
import numpy as np
import h5py
from deepposekit.models import load_model
from videoreader import VideoReader
import leap_utils as lu
import xarray_behave as xb
from leap_utils import preprocessing
from pathlib import Path
import xarray as xr
import pandas as pd
import logging
import zarr
import os
def deepposekit(tracksfilename: str, savename:str, modelname: str):
datename = os.path.split(os.path.split(tracksfilename)[0])[1]
root = os.getcwd()
logging.info(f'using model from {modelname}')
model = load_model(modelname)
logging.info(f'assembling data for {datename}')
dataset = xb.assemble(datename, root, dat_path='dat', res_path='res')
logging.info(dataset)
logging.info(f'will save to {savename}')
vr = VideoReader(dataset.attrs['video_filename'])
logging.info(vr)
frame_numbers, frame_indices = np.unique(dataset.nearest_frame, return_index=True)
frame_indices = frame_indices
CENTER = 1
box_centers = dataset.body_positions[frame_indices, :, CENTER, :].values
nb_frames = len(frame_numbers)
logging.info(f'loading boxes from {nb_frames} frames.')
skeleton = pd.read_csv(os.path.dirname(modelname) + '/skeleton_initialized.csv')
body_parts_skeleton = skeleton['name'].tolist()
logging.info('body parts from skeleton: {}.'.format(body_parts_skeleton))
body_parts = ['head', 'neck', 'front_left_leg', 'middle_left_leg', 'back_left_leg', 'front_right_leg', 'middle_right_leg', 'back_right_leg', 'thorax', 'left_wing', 'right_wing', 'tail']
logging.info('overriding with: {}.'.format(body_parts))
nb_flies = len(dataset.flies)
nb_parts = len(body_parts)
frame_start = min(frame_numbers)
frame_stop = max(frame_numbers)
batch_size = 100
box_size = (64, 64) if 'scaled' in modelname else (96, 96)
batch_idx = list(range(0, nb_frames, batch_size))
nb_batches = len(batch_idx) - 1
poses = | np.full((frame_stop, nb_flies, nb_parts, 2), np.nan) | numpy.full |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
''' A hyperboloid class, based on prolate spheroid coordinates.
The hyperboloid is here defined by
the distance where it is closest to the plane (minimum z)
and its radius at the tip,
as well as its x and y positions.
The methods of the class compensates for its x and y positions
when e.g. converting between coordinate systems.
'''
# General imports
import numpy as np
import logging
# Import from project files
from .geometrical import cart2ps
from .geometrical import ps2cart
from .coordinate_functions import find_nearest
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
ooo = np.array([0, 0, 0]).reshape(3, -1)
iio = np.array([1, 1, 0]).reshape(3, -1)
iii = np.array([1, 1, 1]).reshape(3, -1)
eps = np.finfo(float).eps # 2.22e-16 for double
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# #
# DEFINE HYPERBOLOID #
# #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
class GeometricalHyperboloid():
'''Geometrical hyperboloid.
Parameters
----------
pos : array
position, (3, 1)
rp : float
tip radius
Properties
----------
d : float
distance to plane
a : float
distance
nu0 : float
ps coordinate
r : float
distance from z-axis
'''
def __init__(self, pos, rp=None):
if rp is None:
rp = eps # sharp hyperboloid
# Note the setters of `rp` and `pos` invokes `_update`
self._rp = rp # hack-set this one to prevent `_update`
self.pos = pos # set this one properly to `_update`
def _update(self):
# updates properties after pos or rp is changed
# Coordinates
self.x = self.pos[0]
self.y = self.pos[1]
self.z = self.pos[2]
if self.z < 0:
logger.warning(5, f'Hyperbole below zero {self.pos[2, 0]}')
# Geometric
self.d = np.asscalar(self.z) # the minimum z-value of the hyperbole
self.a = self.d + self.rp / 2 # the distance to the focus
self.nu0 = np.arccos(self.d / self.a) # asymptotic angle
self.cos_nu0 = self.d / self.a
self.r = np.asscalar(np.sqrt(self.x**2 + self.y**2))
@property
def pos(self):
''' The position of the tip of the hyperbole. '''
return self._pos
@pos.setter
def pos(self, pos):
pos = np.array(pos).reshape(3, 1) # force error if wrong
self._pos = pos
self._update() # the reason for having a getter and setter
@property
def rp(self):
''' The hyperbole tip radius. '''
return self._rp
@rp.setter
def rp(self, rp):
self._rp = np.asscalar(np.array(rp)) # ensure scalar
self._update() # the reason for having a getter and setter
def __repr__(self):
return f'GeometricalHyperboloid(pos={self.pos}, rp={self.rp})'
def copy(self):
''' Return a copy of self. '''
return eval(repr(self))
# note: to_dict is mainly used for saving data
# it is easier to manage later than repr
def to_dict(self):
''' Return a dict with the instance parameters. '''
return {'rp': self.rp, 'pos': self.pos}
@classmethod
def from_dict(cls, d):
''' Return a class instance from a dictionary. '''
return cls(**d)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Methods using positions #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def ps2cart(self, pos_ps, to_calc=['x', 'y', 'z']):
''' Return Cartesian position corresponding to `pos_ps`.
The origin of the hyperbole is added to the output.
Parameters
----------
pos_ps : array(3, -1)
prolate spheroid positions
to_calc : lst(char)
list of coordinates to calculate.
['x', 'y', 'z'] is default
Returns
-------
pos_cart : array(3, -1)
Cartesian positions
'''
pos_ps = np.array(pos_ps).reshape(3, -1)
pos_cart = np.zeros_like(pos_ps)
if ('x' in to_calc) or ('y' in to_calc):
sinh_sin = np.sinh(pos_ps[0]) * np.sin(pos_ps[1])
if 'x' in to_calc:
pos_cart[0] = self.a * sinh_sin * np.cos(pos_ps[2])
pos_cart[0] += self.x
if 'y' in to_calc:
pos_cart[1] = self.a * sinh_sin * np.sin(pos_ps[2])
pos_cart[1] += self.y
if 'z' in to_calc:
pos_cart[2] = self.a * np.cosh(pos_ps[0]) * np.cos(pos_ps[1])
# todo: assure that this is required.
pos_cart[np.isnan(pos_cart)] = 0
return pos_cart
def cart2ps(self, pos_in, to_calc=['mu', 'nu', 'phi']):
''' Return prolate spheroid position corresponding to `pos_in`
relative to the position of the hyperbole.
Parameters
----------
pos_in : array(3, -1)
Cartesian positions
to_calc : lst(char)
list of coordinates to calculate.
['mu', 'nu', 'phi'] is default
Returns
-------
pos_cart : array(3, -1)
prolate spheroid positions
'''
pos_cart = pos_in - self.pos * iio # change origin
return cart2ps(pos_cart, ps_a=self.a, to_calc=to_calc)
def is_inside(self, pos, nu0=None):
''' Return True where the position inside the hyperbole.
Parameters
----------
pos : array(3, -1)
Cartesian positions
nu0 : float or array
defaults to nu0
Returns
-------
array[bool]
True for positions within where (pos.nu0 < nu0).
'''
assert pos.shape[0] == 3, 'pos.shape[0] =! 3'
if nu0 is None:
nu0 = self.nu0
# pre-calculation
# marginally faster, and much more readable
# than inserting the expressions directly
x2 = (pos[0, :] - self.x)**2
y2 = (pos[1, :] - self.y)**2
zp = (pos[2, :] + self.a)**2
zm = (pos[2, :] - self.a)**2
cos_nu_2a = np.sqrt(x2 + y2 + zp) - np.sqrt(x2 + y2 + zm)
cos_nu0_2a = 2 * self.a * np.cos(nu0)
# Note: for nu < nu_0, cos nu > cos nu_0.
return cos_nu_2a > cos_nu0_2a
def find_nearest(self, pos_j=None, no=None):
''' Find the pos_j which is closest to the tip of the hyperbole.'''
return find_nearest(self.pos, pos_j=pos_j, no=no)
def dist_to(self, pos):
''' Find the distance from the tip of the hyperbole to each position.
'''
return np.linalg.norm(self.pos - pos, axis=0)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Methods giving positions #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def trace_nu(self, nu0=None, xdir='xz', mu_lim=0.5, num=50):
''' Calculate the z values for a constant nu0 line
in xz or yz direction.
Parameters
----------
nu0 : float
self.nu0 is default
xdir : str
plane to plot
mu_lim : float
mu limit
num : int
number of points
Returns
-------
array[float]
x or y values
array[float]
z values
'''
assert xdir in ['xz', 'yz'], 'wrong xdir'
if nu0 is None:
nu0 = self.nu0
# Create linspace and calculate values
mu = np.linspace(-mu_lim, mu_lim, num=num)
x = self.a * np.sinh(mu) * np.sin(nu0)
z = self.a * np.cosh(mu) * np.cos(nu0)
# Adjust for origin
if xdir == 'xz':
x += self.x
elif xdir == 'yz':
x += self.y
# fixme:
if __debug__ and False:
if xdir == 'xz':
poses = np.vstack((x, 0 * x, z))
elif xdir == 'yz':
poses = np.vstack((0 * x, x, z))
pos_ps = self.cart2ps(poses)
# Tolerance due to mu=0 returning mu=1e-8
assert np.allclose(np.absolute(mu), pos_ps[0, :], atol=1e-7)
# Assert all neighbors are equal
assert np.allclose(pos_ps[1, :], np.roll(pos_ps[1, :], 1))
return x, z
def trace_nu_2D(self, nu=None, xdir='xz', offset=None, xlim=None, num=100):
''' Calculate the z values for a constant nu line
in xz or yz direction.
Parameters
----------
nu : float
self.nu0 is default
xdir : str
plane to plot
offset: float
None: through self,
0: through origin
x_lim : float
x limit
num : int
number of points
Returns
-------
array[float]
x or y values
array[float]
z values
'''
assert xdir in ['xz', 'yz'], 'wrong xdir'
if nu is None:
nu = self.nu0
if xlim is None:
xlim = self.rp * 20
if offset is None:
offset = 0
else:
if xdir == 'xz':
offset -= self.y
elif xdir == 'yz':
offset -= self.x
# Create linspace and calculate
x = np.linspace(-xlim, xlim, num=num)
r2 = x**2 + offset**2
a2_snu2_inv = 1 / (self.a**2 * np.sin(nu)**2)
z = self.a * np.cos(nu) * np.sqrt(1 + r2 * a2_snu2_inv)
# Adjust for origin
if xdir == 'xz':
x += self.x
elif xdir == 'yz':
x += self.y
debug = False
if debug and __debug__:
if xdir == 'xz':
poses = | np.vstack((x, 0 * x + offset, z)) | numpy.vstack |
import pickle as pkl
import numpy as np
import scipy.sparse as sp
import torch
import networkx as nx
from sklearn.metrics import roc_auc_score, average_precision_score, accuracy_score, f1_score, log_loss
import matplotlib.pyplot as plt
## Miscellaneous useful functions ##
def load_graph_to_numpy(path_to_edgelist):
pass
def load_graph(path_to_edgelist, subgraph=None, weighted=False, seed=0):
"""
:param subgraph: None or int - number of nodes to sample
"""
# nx_graph = nx.from_edgelist([l.split() for l in open(path_to_edgelist)])
if weighted:
nx_graph = nx.read_weighted_edgelist(path_to_edgelist)
else:
nx_graph = nx.read_edgelist(path_to_edgelist)
print('The graph has {} nodes and {} edges'.format(nx_graph.number_of_nodes(),
nx_graph.number_of_edges()))
if subgraph is None:
return nx_graph
if seed:
np.random.seed(seed)
print('Sampling {}-node subgraph from original graph'.format(subgraph))
return nx_graph.subgraph(np.random.choice(nx_graph.nodes(),
size=subgraph, replace=False))
def get_dual(graph, sparse=True):
# graph is a networkx Graph
L = nx.line_graph(graph)
nodelist = sorted(L.nodes())
# may wrap sp.csr around numpy
if sparse:
return nx.to_scipy_sparse_matrix(L, nodelist), nodelist
return nx.to_numpy_matrix(L, nodelist), nodelist
# def get_dual(adj):
# # adj is a networkx Graph
# adj = nx.from_numpy_array(adj)
# L = nx.line_graph(adj)
# nodelist = sorted(L.nodes())
# return nx.to_numpy_matrix(L, nodelist), {i: n for i, n in enumerate(nodelist)}
def get_features(adj, sparse=True):
if sparse:
return sp.eye(adj.shape[0])
return np.identity(adj.shape[0])
############### VGAE-specific ################
########################################################################################
# ------------------------------------
# Some functions borrowed from:
# https://github.com/tkipf/pygcn and
# https://github.com/tkipf/gae
# ------------------------------------
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def eval_gae_lp(edges_pos, edges_neg, emb, adj_orig, threshold=0.5, verbose=False):
"""
Evaluate VGAE learned embeddings on Link Prediction task.
"""
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Predict on test set of edges
emb = emb.data.numpy()
adj_rec = np.dot(emb, emb.T)
preds = []
pos = []
for e in edges_pos:
preds.append(sigmoid(adj_rec[e[0], e[1]]))
pos.append(adj_orig[e[0], e[1]])
preds_neg = []
neg = []
for e in edges_neg:
preds_neg.append(sigmoid(adj_rec[e[0], e[1]]))
neg.append(adj_orig[e[0], e[1]])
preds_all = np.hstack([preds, preds_neg])
labels_all = np.hstack([np.ones(len(preds)), np.zeros(len(preds))])
if verbose:
print("EVAL GAE")
p = np.random.choice(range(len(preds_all)), replace=False, size=min([len(preds_all), 100]))
print(preds_all[p])
print(labels_all[p])
accuracy = accuracy_score((preds_all > threshold).astype(float), labels_all)
roc_score = roc_auc_score(labels_all, preds_all)
ap_score = average_precision_score(labels_all, preds_all)
f1score = f1_score(labels_all, (preds_all > threshold).astype(float))
logloss = log_loss(labels_all, preds_all)
return accuracy, roc_score, ap_score, f1score, logloss
def make_sparse(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(np.vstack((sparse_mx.row,
sparse_mx.col))).long()
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def load_data(dataset):
# load the data: x, tx, allx, graph
names = ['x', 'tx', 'allx', 'graph']
objects = []
for i in range(len(names)):
objects.append(
pkl.load(open("data/ind.{}.{}".format(dataset, names[i]), 'rb'), encoding='latin1'))
x, tx, allx, graph = tuple(objects)
test_idx_reorder = parse_index_file(
"data/ind.{}.test.index".format(dataset))
test_idx_range = np.sort(test_idx_reorder)
if dataset == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(
min(test_idx_reorder), max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
return adj, features
# Subsample sparse variables
def get_subsampler(variable):
data = variable.view(1, -1).data.numpy()
edges = np.where(data == 1)[1]
nonedges = np.where(data == 0)[1]
def sampler():
idx = np.random.choice(
nonedges.shape[0], edges.shape[0], replace=False)
return torch.LongTensor(np.append(edges, nonedges[idx]))
return sampler
def plot_results(results, test_freq, path='results.png'):
# Init
plt.close('all')
fig = plt.figure(figsize=(8, 8))
x_axis_train = range(len(results['train_elbo']))
x_axis_test = range(0, len(x_axis_train), test_freq)
# Elbo
ax = fig.add_subplot(2, 2, 1)
ax.plot(x_axis_train, results['train_elbo'])
ax.set_ylabel('Loss (ELBO)')
ax.set_title('Loss (ELBO)')
ax.legend(['Train'], loc='upper right')
# Accuracy
ax = fig.add_subplot(2, 2, 2)
ax.plot(x_axis_train, results['accuracy_train'])
ax.plot(x_axis_test, results['accuracy_test'])
ax.set_ylabel('Accuracy')
ax.set_title('Accuracy')
ax.legend(['Train', 'Test'], loc='lower right')
# ROC
ax = fig.add_subplot(2, 2, 3)
ax.plot(x_axis_train, results['roc_train'])
ax.plot(x_axis_test, results['roc_test'])
ax.set_xlabel('Epoch')
ax.set_ylabel('ROC AUC')
ax.set_title('ROC AUC')
ax.legend(['Train', 'Test'], loc='lower right')
# Precision
ax = fig.add_subplot(2, 2, 4)
ax.plot(x_axis_train, results['ap_train'])
ax.plot(x_axis_test, results['ap_test'])
ax.set_xlabel('Epoch')
ax.set_ylabel('Precision')
ax.set_title('Precision')
ax.legend(['Train', 'Test'], loc='lower right')
# Save
fig.tight_layout()
plt.show()
# fig.savefig(path)
def load_multiclass(path="../data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
if 'cora' in path:
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)} # node_name -> number
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# idx_train = range(140)
# idx_val = range(200, 500)
# idx_test = range(500, 1500)
rng = list(range(1500))
np.random.shuffle(rng)
idx_train = rng[:1000]
idx_val = rng[1000:1200]
idx_test = rng[1200:1500]
# idx_train = np.random.choice(range(1500), replace=False, size=1000)
# idx_val = np.random.choice(range(1500), replace=False, size=300)
# idx_test = np.random.choice(range(1500), replace=False, size=200)
labels = torch.LongTensor( | np.where(labels) | numpy.where |
"""Generate a diffusion map embedding
"""
import numpy as np
def compute_diffusion_map(L, alpha=0.5, n_components=None, diffusion_time=0,
skip_checks=False, overwrite=False):
"""Compute the diffusion maps of a symmetric similarity matrix
L : matrix N x N
L is symmetric and L(x, y) >= 0
alpha: float [0, 1]
Setting alpha=1 and the diffusion operator approximates the
Laplace-Beltrami operator. We then recover the Riemannian geometry
of the data set regardless of the distribution of the points. To
describe the long-term behavior of the point distribution of a
system of stochastic differential equations, we can use alpha=0.5
and the resulting Markov chain approximates the Fokker-Planck
diffusion. With alpha=0, it reduces to the classical graph Laplacian
normalization.
n_components: int
The number of diffusion map components to return. Due to the
spectrum decay of the eigenvalues, only a few terms are necessary to
achieve a given relative accuracy in the sum M^t.
diffusion_time: float >= 0
use the diffusion_time (t) step transition matrix M^t
t not only serves as a time parameter, but also has the dual role of
scale parameter. One of the main ideas of diffusion framework is
that running the chain forward in time (taking larger and larger
powers of M) reveals the geometric structure of X at larger and
larger scales (the diffusion process).
t = 0 empirically provides a reasonable balance from a clustering
perspective. Specifically, the notion of a cluster in the data set
is quantified as a region in which the probability of escaping this
region is low (within a certain time t).
skip_checks: bool
Avoid expensive pre-checks on input data. The caller has to make
sure that input data is valid or results will be undefined.
overwrite: bool
Optimize memory usage by re-using input matrix L as scratch space.
References
----------
[1] https://en.wikipedia.org/wiki/Diffusion_map
[2] <NAME>.; <NAME>. (2006). "Diffusion maps". Applied and
Computational Harmonic Analysis 21: 5-30. doi:10.1016/j.acha.2006.04.006
"""
import numpy as np
import scipy.sparse as sps
use_sparse = False
if sps.issparse(L):
use_sparse = True
if not skip_checks:
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
if not _graph_is_connected(L):
raise ValueError('Graph is disconnected')
ndim = L.shape[0]
if overwrite:
L_alpha = L
else:
L_alpha = L.copy()
if alpha > 0:
# Step 2
d = np.array(L_alpha.sum(axis=1)).flatten()
d_alpha = np.power(d, -alpha)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
L_alpha.data *= d_alpha[L_alpha.indices]
L_alpha = sps.csr_matrix(L_alpha.transpose().toarray())
else:
L_alpha = d_alpha[:, np.newaxis] * L_alpha
L_alpha = L_alpha * d_alpha[np.newaxis, :]
# Step 3
d_alpha = np.power(np.array(L_alpha.sum(axis=1)).flatten(), -1)
if use_sparse:
L_alpha.data *= d_alpha[L_alpha.indices]
else:
L_alpha = d_alpha[:, np.newaxis] * L_alpha
M = L_alpha
from scipy.sparse.linalg import eigsh, eigs
# Step 4
func = eigs
if n_components is not None:
lambdas, vectors = func(M, k=n_components + 1)
else:
lambdas, vectors = func(M, k=max(2, int( | np.sqrt(ndim) | numpy.sqrt |
# coding: utf-8
# # Mask R-CNN - Train on Shapes Dataset
#
#
# This notebook shows how to train Mask R-CNN on your own dataset. To keep things simple we use a synthetic dataset of shapes (squares, triangles, and circles) which enables fast training. You'd still need a GPU, though, because the network backbone is a Resnet101, which would be too slow to train on a CPU. On a GPU, you can start to get okay-ish results in a few minutes, and good results in less than an hour.
#
# The code of the *Shapes* dataset is included below. It generates images on the fly, so it doesn't require downloading any data. And it can generate images of any size, so we pick a small image size to train faster.
# In[1]:
import os
import pickle
import traceback
import tensorflow as tf
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(tf.Session(config=config))
import sys
import random
import skimage.io
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
from config import Config
import utils
import model as modellib
import visualize
from model import log
from USDataset import USDataset
# get_ipython().run_line_magic('matplotlib', 'inline')
# Root directory of the project
ROOT_DIR = os.getcwd()
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
dirs = os.listdir(MODEL_DIR)
dirs = [os.path.join(MODEL_DIR, filename) for filename in dirs]
ctimes = [os.path.getctime(filename) for filename in dirs]
MODEL_DIR = dirs[ctimes.index(max(ctimes))]
# Path to COCO trained weights
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# ## Configurations
# In[2]:
class ShapesConfig(Config):
"""Configuration for training on the toy shapes dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "ultar_sound"
# Train on 1 GPU and 8 images per GPU. We can put multiple images on each
# GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Number of classes (including background)
NUM_CLASSES = 1 + 1 # background + 3 shapes
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels
RPN_ANCHOR_SCALES = (32, 64, 128) # anchor side in pixels
# # Reduce training ROIs per image because the images are small and have
# # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 320
#
# # Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
#
# # use small validation steps since the epoch is small
# VALIDATION_STEPS = 5
config = ShapesConfig()
# config.display()
# ## Notebook Preferences
# In[3]:
def get_ax(rows=1, cols=1, size=8):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Change the default size attribute to control the size
of rendered images
"""
_, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
return ax
# In[5]:
# Training dataset
dataset_train = USDataset('train.txt')
dataset_train.prepare()
# Validation dataset
dataset_val = USDataset('data/label.txt')
dataset_val.prepare()
# In[6]:
# Load and display random samples
# image_ids = np.random.choice(dataset_train.image_ids, 4)
# for image_id in image_ids:
# image = dataset_train.load_image(image_id)
# mask, class_ids = dataset_train.load_mask(image_id)
# visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
# ## Ceate Model
# In[ ]:
# ## Detection
# In[11]:
class InferenceConfig(ShapesConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
inference_config = InferenceConfig()
print(MODEL_DIR)
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
# model_path = os.path.join(MODEL_DIR, "mask_rcnn_ultar_sound_000%s.h5" % sys.argv[-1])
model_path = os.path.join(MODEL_DIR, "mask_rcnn_ultar_sound_000%s.h5" % 8)
# model_path = os.path.join("mask_rcnn_shapes.h5")
# model_path = model.find_last()[1]
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# In[12]:
# Test on a random image
# image_id = random.choice(dataset_val.image_ids)
# original_image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, inference_config,
# image_id, use_mini_mask=False)
#
# log("original_image", original_image)
# log("image_meta", image_meta)
# log("gt_class_id", gt_bbox)
# log("gt_bbox", gt_bbox)
# log("gt_mask", gt_mask)
#
# visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
# dataset_train.class_names, figsize=(8, 8))
# In[13]:
# results = model.detect([original_image], verbose=1)
#
# r = results[0]
# visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
# dataset_val.class_names, r['scores'], ax=get_ax())
# ## Evaluation
# In[14]:
# Compute VOC-Style mAP @ IoU=0.5
# Running on 10 images. Increase for better accuracy.
# image_ids = np.random.choice(dataset_val.image_ids, 330)
image_ids = dataset_val.image_ids
APs = []
for image_id in image_ids:
try:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask = modellib.load_image_gt(dataset_val, inference_config,
image_id, use_mini_mask=False)
# print(image.shape)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
rois = r['rois']
area = (rois[:, 2] - rois[:, 0]) * (rois[:, 3] - rois[:, 1])
ids = np.transpose(np.asarray(np.where(area > 1000)), (0, 1))[0]
rois = | np.asarray(r['rois']) | numpy.asarray |
### This script combines position data from multiple cameras.
### It also corrects frame time offset errors in PosLog.csv files
### It also removes bad position data lines
### Use as follows:
### import CombineTrackingData as combPos
### combPos.combdata(Path-To-Recording-Folder)
### By <NAME>, May 2017, UCL
from itertools import combinations
import numpy as np
from scipy.spatial.distance import euclidean
from openEPhys_DACQ import NWBio
from tqdm import tqdm
def combineCamerasData(cameraPos, lastCombPos, cameraIDs, CameraSettings, arena_size):
# This outputs position data based on which camera is closest to tracking target.
# cameraPos - list of numpy vecors with 4 elements (x1,y1,x2,y2) for each camera
# lastCombPos - Last known output from this function. If None, the function will attempt to locate the animal.
# cameraIDs - list of of CameraSettings.keys() in corresponding order to cameraPos and lastCombPos
# CameraSettings - settings dictionary created by CameraSettings.CameraSettingsApp
# arena_size - 2 element numpy array with arena height and width.
# Output - numpy vector (x1,y1,x2,y2) with data from closest camera
# Animal detection finding method in case lastCombPos=None
# simultaneously from at least 2 cameras with smaller separation than half of CameraSettings['camera_transfer_radius']
# If successful, closest mean coordinate is set as output
# If unsuccessful, output is None
N_RPis = len(cameraPos)
cameraPos = np.array(cameraPos, dtype=np.float32)
camera_locations = []
for cameraID in cameraIDs:
camera_locations.append(CameraSettings['CameraSpecific'][cameraID]['location_xy'])
camera_locations = np.array(camera_locations, dtype=np.float32)
# Only work with camera data from inside the enviornment
# Find bad pos data lines
idxBad = np.zeros(cameraPos.shape[0], dtype=bool)
# Points beyond arena size
x_too_big = cameraPos[:,0] > arena_size[0] + 20
y_too_big = cameraPos[:,1] > arena_size[1] + 20
idxBad = np.logical_or(idxBad, np.logical_or(x_too_big, y_too_big))
x_too_small = cameraPos[:,0] < -20
y_too_small = cameraPos[:,1] < -20
idxBad = np.logical_or(idxBad, np.logical_or(x_too_small, y_too_small))
# Only keep camera data from within the environment
N_RPis = np.sum(np.logical_not(idxBad))
# Only continue if at least one RPi data remains
if N_RPis > 0:
cameraPos = cameraPos[np.logical_not(idxBad),:]
camera_locations = camera_locations[np.logical_not(idxBad),:]
if np.any(lastCombPos):
# Check which cameras provide data close enough to lastCombPos
RPi_correct = []
for nRPi in range(N_RPis):
lastCombPos_distance = euclidean(cameraPos[nRPi, :2], lastCombPos[:2])
RPi_correct.append(lastCombPos_distance < CameraSettings['General']['camera_transfer_radius'])
RPi_correct = np.array(RPi_correct, dtype=bool)
# If none were found to be withing search radius, set output to None
if not np.any(RPi_correct):
combPos = None
else:
# Use the reading from closest camera to target mean location that detects correct location
if np.sum(RPi_correct) > 1:
# Only use correct cameras
N_RPis = np.sum(RPi_correct)
cameraPos = cameraPos[RPi_correct, :]
camera_locations = camera_locations[RPi_correct, :]
meanPos = np.mean(cameraPos[:, :2], axis=0)
# Find mean position distance from all cameras
cam_distances = []
for nRPi in range(N_RPis):
camera_loc = camera_locations[nRPi, :]
cam_distances.append(euclidean(camera_loc, meanPos))
# Find closest distance camera and output its location coordinates
closest_camera = np.argmin(np.array(cam_distances))
combPos = cameraPos[closest_camera, :]
else:
# If target only detected close enough to lastCombPos in a single camera, use it as output
combPos = cameraPos[np.where(RPi_correct)[0][0], :]
else:
# If no lastCombPos provided, check if position can be verified from more than one camera
#### NOTE! This solution breaks down if more than two cameras incorrectly identify the same object
#### as the brightes spot, instead of the target LED.
cameraPairs = []
pairDistances = []
for c in combinations(range(N_RPis), 2):
pairDistances.append(euclidean(cameraPos[c[0], :2], cameraPos[c[1], :2]))
cameraPairs.append(np.array(c))
cameraPairs = np.array(cameraPairs)
cameraPairs_Match = np.array(pairDistances) < (CameraSettings['General']['camera_transfer_radius'] / 2.0)
# If position can not be verified from multiple cameras, set output to none
if not np.any(cameraPairs_Match):
combPos = None
else:
# Otherwise, set output to mean of two cameras with best matching detected locations
pairToUse = np.argmin(pairDistances)
camerasToUse = np.array(cameraPairs[pairToUse, :])
combPos = np.mean(cameraPos[camerasToUse, :2], axis=0)
# Add NaN values for second LED
combPos = np.append(combPos, np.empty(2) * np.nan)
else:
combPos = None
return combPos
def remove_tracking_data_outside_boundaries(posdata, arena_size, max_error=20):
NotNaN = np.where(np.logical_not(np.isnan(posdata[:,1])))[0]
idxBad = np.zeros(NotNaN.size, dtype=bool)
x_too_big = posdata[NotNaN,1] > arena_size[0] + max_error
y_too_big = posdata[NotNaN,2] > arena_size[1] + max_error
idxBad = np.logical_or(idxBad, | np.logical_or(x_too_big, y_too_big) | numpy.logical_or |
# -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from data import read_marcap, load_datas_scaled
from model import build_model_rnn
data_dir = './data'
if not os.path.exists(data_dir):
data_dir = '../data'
print(os.listdir(data_dir))
marcap_dir = os.path.join(data_dir, 'marcap')
marcap_data = os.path.join(marcap_dir, 'data')
os.listdir(marcap_data)
train_start = pd.to_datetime('2000-01-01')
train_end = pd.to_datetime('2020-06-30')
test_start = pd.to_datetime('2020-08-01')
test_end = pd.to_datetime('2020-10-31')
train_start, test_end
# 삼성전기 code '009150'
df_sem = read_marcap(train_start, test_end, ['009150'], marcap_data)
df_sem.drop(df_sem[df_sem['Marcap'] == 0].index, inplace=True)
df_sem.drop(df_sem[df_sem['Amount'] == 0].index, inplace=True)
df_sem['LogMarcap'] = np.log(df_sem['Marcap'])
df_sem['LogAmount'] = np.log(df_sem['Amount'])
df_sem
n_seq = 10
x_cols = ['LogMarcap', 'LogAmount', 'Open', 'High', 'Low', 'Close']
y_col = 'LogMarcap'
train_inputs, train_labels, test_inputs, test_labels, scaler_dic = load_datas_scaled(df_sem, x_cols, y_col, train_start, train_end, test_start, test_end, n_seq)
train_inputs.shape, train_labels.shape, test_inputs.shape, test_labels.shape
model = build_model_rnn(n_seq, len(x_cols))
model.summary()
model.compile(loss=tf.losses.MeanSquaredError(), optimizer=tf.optimizers.Adam())
# early stopping
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
# save weights
save_weights = tf.keras.callbacks.ModelCheckpoint(os.path.join('1corp_scaled_logmarcap_logamount_prices.hdf5'), monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_freq='epoch', save_weights_only=True)
# save weights
csv_log = tf.keras.callbacks.CSVLogger(os.path.join('1corp_scaled_logmarcap_logamount_prices.csv'), separator=',', append=False)
# train
history = model.fit(train_inputs, train_labels, epochs=100, batch_size=32, validation_data=(test_inputs, test_labels), callbacks=[early_stopping, save_weights, csv_log])
model = build_model_rnn(n_seq, len(x_cols))
model.summary()
model.load_weights(save_weights.filepath)
#
# train eval
#
n_batch = 32
train_preds = []
for i in range(0, len(train_inputs), n_batch):
batch_inputs = train_inputs[i:i + n_batch]
y_pred = model.predict(batch_inputs)
y_pred = y_pred.squeeze(axis=-1)
train_preds.extend(y_pred)
train_preds = np.array(train_preds)
assert len(train_labels) == len(train_preds)
train_labels.shape, train_preds.shape
scaler = scaler_dic[y_col]
train_labels_scaled = [scaler.inv_scale_value(v) for v in train_labels]
train_preds_scaled = [scaler.inv_scale_value(v) for v in train_preds]
train_labels_log = np.array(train_labels_scaled)
train_preds_log = np.array(train_preds_scaled)
plt.figure(figsize=(16, 4))
plt.plot(train_labels_log, 'b-', label='y_true')
plt.plot(train_preds_log, 'r--', label='y_pred')
plt.legend()
plt.show()
plt.figure(figsize=(16, 4))
plt.plot(train_labels_log - train_preds_log, 'g-', label='y_diff')
plt.legend()
plt.show()
# https://bkshin.tistory.com/entry/%EB%A8%B8%EC%8B%A0%EB%9F%AC%EB%8B%9D-17-%ED%9A%8C%EA%B7%80-%ED%8F%89%EA%B0%80-%EC%A7%80%ED%91%9C
# https://m.blog.naver.com/PostView.nhn?blogId=limitsinx&logNo=221578145366&proxyReferer=https:%2F%2Fwww.google.com%2F
rmse = tf.sqrt(tf.keras.losses.MSE(train_labels_log, train_preds_log))
mae = tf.keras.losses.MAE(train_labels_log, train_preds_log)
mape = tf.keras.losses.MAPE(train_labels_log, train_preds_log)
print(pd.DataFrame([rmse, mae, mape], index=['RMSE', 'MAE', 'MAPE']).head())
#
# test eval
#
test_preds = []
for i in range(0, len(test_inputs), n_batch):
batch_inputs = test_inputs[i:i + n_batch]
y_pred = model.predict(batch_inputs)
y_pred = y_pred.squeeze(axis=-1)
test_preds.extend(y_pred)
test_preds = np.array(test_preds)
assert len(test_labels) == len(test_preds)
test_labels.shape, test_preds.shape
scaler = scaler_dic[y_col]
test_labels_scaled = [scaler.inv_scale_value(v) for v in test_labels]
test_preds_scaled = [scaler.inv_scale_value(v) for v in test_preds]
test_labels_log = np.array(test_labels_scaled)
test_preds_log = | np.array(test_preds_scaled) | numpy.array |
import numpy as np
from matplotlib import pyplot
# %matplotlib inline
# Defines how "wide" RBFs are (higher = RBF extends further)
BETA = 1
# Proportion of test data of all the data
PROPORTION_TEST = 0.3
# Prepare the data
# Note: For simplicity, we do not do grid-space over x/y,
# even though task could be interpreted so
x = np.linspace(0, 2 * np.pi, 30)
y = np.linspace(0, 2 * np.pi, 30)
z = np.sin(x) * np.cos(y)
# Combine x and y into one matrix
xy = | np.vstack([x, y]) | numpy.vstack |
from keras.optimizers import Adam
from keras.layers import Dense, Input, Activation
import random
import keras.backend as K
from keras.models import Sequential, Model, load_model
import numpy as np
import tensorflow as tf
from time import time
from keras.callbacks import History
from collections import deque
class PG():
def __init__(self, load_network = False, load_weight = False, load_file = None):
self.learning_rate = 0.0001
self.discount_factor = 0.99
self.state_memory = []
self.reward_memory = []
self.action_memory = []
self.num_actions = 4
self.curr_disc_rewards = None
self.policy_n, self.predict_n = self.create_network(load_network, load_weight, load_file)
def create_network(self, load_network = False, load_weight = False, load_file = None):
if load_network is True:
model = load_model(load_file)
return (None, model)
input = Input(shape = (363,))
disc_rewards = Input(shape = [1])
dense1 = Dense(128, activation = 'relu')(input)
dense2 = Dense(64, activation = 'relu')(dense1)
prob_output = Dense(self.num_actions, activation = 'softmax')(dense2)
opt = Adam(self.learning_rate)
def custom_loss(y_true, y_pred):
clip_out = K.clip(y_pred,1e-8, 1-1e-8)
log_lik = y_true * K.log(clip_out)
return K.sum(-log_lik * disc_rewards)
policy_n = Model(inputs = [input, disc_rewards], outputs = [prob_output])
policy_n.compile(loss = custom_loss, optimizer=opt)
predict_n = Model(inputs= [input], outputs = [prob_output])
if load_weight is True:
predict_n.load_weights(load_file)
return (None, predict_n)
return policy_n, predict_n
def predict_action(self, state):
predicted_probs = self.predict_n.predict(state)[0]
pred_action = np.random.choice(range(self.num_actions), p = predicted_probs)
return pred_action
def remember(self, state, action, reward):
self.state_memory.append(state.reshape((363,)))
self.action_memory.append(action)
self.reward_memory.append(reward)
def update_policy(self):
state_mem = np.array(self.state_memory)
action_mem = np.array(self.action_memory)
reward_mem = | np.array(self.reward_memory) | numpy.array |
#!/usr/bin/python
"""
Python implementation of the icc method implemented originally by <NAME> in MATLAB from
<NAME>, <NAME>. Agreement and reliability statistics for shapes. PLoS One. 2018;
13(8):e0202087. Published 2018 Aug 23. doi:10.1371/journal.pone.0202087
and extended to include F-Value and lower and upper bounds.
"""
#MIT License
#
#Copyright (c) <NAME> 2018-2020
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import numpy as np
import scipy.io as sio
import scipy.stats as sstats
def icc(M):
"""ICC Intraclass correlation coefficient.
Calculates ICC is for a two-way, fully crossed random efects model.
This type of ICC is appropriate to describe the absolute agreement
among shape measurements from a group of k raters, randomly selected
from the population of all raters, made on a set of n items.
Shrout and Fleiss: ICC(2,1)
<NAME> Wong: ICC(A,1)
M is the array of measurements
The dimensions of M are n x k, where
n is the # of subjects / groups
k is the # of raters """
if not isinstance(M, np.ndarray):
raise TypeError("Input must be a numpy array")
n, k = M.shape
u1 = np.mean(M, axis = 0)
u2 = np.mean(M, axis = 1)
u = np.mean(M[:])
SS = | np.sum((M - u) ** 2) | numpy.sum |
import numpy as np
import pandas as pd
import os
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d
from hyperparameters import load_params
from tqdm import tqdm
import torch
import os
def plot_kde(params, vae_name, device):
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
vae = torch.load('Model_pkl/' + vae_name)
dataset = torch.load('Data/vae_dataset.pkl')
index = torch.utils.data.sampler.SubsetRandomSampler(np.random.choice(range(len(dataset)), params['KDE_samples']))
sample = []
for i in index:
sample.append(torch.cat((dataset[i][0], dataset[i][1]), dim=0).numpy())
sample = torch.tensor( | np.array(sample) | numpy.array |
## Program: VMTK
## Language: Python
## Date: February 12, 2018
## Version: 1.4
## Copyright (c) <NAME>, <NAME>, All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
## Note: this code was contributed by
## <NAME> (Github @rlizzo)
## University at Buffalo
import pytest
import vmtk.vmtkbranchsections as branchsections
from vtk.numpy_interface import dataset_adapter as dsa
import numpy as np
@pytest.fixture(scope='module')
def branch_sections_one_sphere(aorta_centerline_branches ,aorta_surface_branches):
sections = branchsections.vmtkBranchSections()
sections.Surface = aorta_surface_branches
sections.Centerlines = aorta_centerline_branches
sections.NumberOfDistanceSpheres = 1
sections.ReverseDirection = 0
sections.RadiusArrayName = 'MaximumInscribedSphereRadius'
sections.GroupIdsArrayName = 'GroupIds'
sections.CenterlineIdsArrayName = 'CenterlineIds'
sections.TractIdsArrayName = 'TractIds'
sections.BlankingArrayName = 'Blanking'
sections.Execute()
return sections.BranchSections
@pytest.fixture(scope='module')
def branch_sections_two_spheres(aorta_centerline_branches ,aorta_surface_branches):
sections = branchsections.vmtkBranchSections()
sections.Surface = aorta_surface_branches
sections.Centerlines = aorta_centerline_branches
sections.NumberOfDistanceSpheres = 2
sections.ReverseDirection = 0
sections.RadiusArrayName = 'MaximumInscribedSphereRadius'
sections.GroupIdsArrayName = 'GroupIds'
sections.CenterlineIdsArrayName = 'CenterlineIds'
sections.TractIdsArrayName = 'TractIds'
sections.BlankingArrayName = 'Blanking'
sections.Execute()
return sections.BranchSections
def test_number_of_cells_one_sphere(branch_sections_one_sphere):
numberOfCells = branch_sections_one_sphere.GetNumberOfCells()
assert numberOfCells == 24
def test_number_of_cells_two_sphere(branch_sections_two_spheres):
numberOfCells = branch_sections_two_spheres.GetNumberOfCells()
assert numberOfCells == 13
@pytest.mark.parametrize("branch_sections,expectedValue",[
(branch_sections_one_sphere, np.array([0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3])),
(branch_sections_two_spheres, np.array([0, 0, 0, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]))
])
def test_branch_section_group_ids_correct(aorta_centerline_branches ,aorta_surface_branches,
branch_sections, expectedValue):
wrapped_bifur_section = dsa.WrapDataObject(branch_sections(aorta_centerline_branches ,aorta_surface_branches))
assert np.allclose(wrapped_bifur_section.CellData.GetArray('BranchSectionGroupIds'), expectedValue) == True
@pytest.mark.parametrize("branch_sections,expectedValue",[
(branch_sections_one_sphere, np.array([ 195.53117732, 182.23256278, 186.21267186, 187.0400059, 177.20153242,
176.25756012, 85.82336158, 73.02829417, 63.50445732, 62.40968092,
62.22208797, 60.62570948, 60.78477703, 60.01402702, 63.08210028,
99.06819265, 80.5269763, 64.12766266, 64.57327767, 67.13289619,
60.67602206, 59.98268905, 58.48300609, 58.6038296 ])),
(branch_sections_two_spheres, np.array([ 195.53117732, 186.21267186, 177.20153242, 85.82336158, 63.50445732,
62.22208797, 60.78477703, 63.08210028, 99.06819265, 64.12766266,
67.13289619, 59.98268905, 58.6038296 ]))
])
def test_branch_section_area_correct(aorta_centerline_branches ,aorta_surface_branches,
branch_sections, expectedValue):
wrapped_bifur_section = dsa.WrapDataObject(branch_sections(aorta_centerline_branches ,aorta_surface_branches))
assert np.allclose(wrapped_bifur_section.CellData.GetArray('BranchSectionArea'), expectedValue) == True
@pytest.mark.parametrize("branch_sections,expectedValue",[
(branch_sections_one_sphere, np.array([ 15.25387687, 14.25260369, 14.66768231, 15.25974257, 14.6356421,
13.64498788, 10.89010723, 9.18671219, 8.86926931, 8.74859368,
8.56866816, 8.61375309, 8.58205574, 8.49087216, 8.73891524,
11.33372646, 9.5789813, 8.91067727, 8.55769926, 8.87761983,
8.63328033, 8.53398992, 8.28912586, 8.73934951])),
(branch_sections_two_spheres, np.array([ 15.25387687, 14.66768231, 14.6356421, 10.89010723, 8.86926931,
8.56866816, 8.58205574, 8.73891524, 11.33372646, 8.91067727,
8.87761983, 8.53398992, 8.73934951]))
])
def test_branch_section_min_size_correct(aorta_centerline_branches ,aorta_surface_branches,
branch_sections, expectedValue):
wrapped_bifur_section = dsa.WrapDataObject(branch_sections(aorta_centerline_branches ,aorta_surface_branches))
assert np.allclose(wrapped_bifur_section.CellData.GetArray('BranchSectionMinSize'), expectedValue) == True
@pytest.mark.parametrize("branch_sections,expectedValue",[
(branch_sections_one_sphere, np.array([ 17.08821628, 16.06283909, 16.22629607, 15.95819134, 16.01361226,
16.17715589, 11.69644525, 10.22110037, 9.35342472, 9.36595157,
9.21275981, 9.20696121, 9.04795408, 9.16998689, 9.37937275,
12.45697059, 10.97796263, 9.27120319, 9.39964383, 9.83837421,
9.22775579, 9.13391134, 8.9179931, 8.86614888])),
(branch_sections_two_spheres, np.array([ 17.08821628, 16.22629607, 16.01361226, 11.69644525, 9.35342472,
9.21275981, 9.04795408, 9.37937275, 12.45697059, 9.27120319,
9.83837421, 9.13391134, 8.86614888]))
])
def test_branch_section_max_size_correct(aorta_centerline_branches ,aorta_surface_branches,
branch_sections, expectedValue):
wrapped_bifur_section = dsa.WrapDataObject(branch_sections(aorta_centerline_branches ,aorta_surface_branches))
assert np.allclose(wrapped_bifur_section.CellData.GetArray('BranchSectionMaxSize'), expectedValue) == True
@pytest.mark.parametrize("branch_sections,expectedValue",[
(branch_sections_one_sphere, np.array([ 0.89265472, 0.8873029, 0.90394519, 0.95623259, 0.91395007, 0.84347261,
0.93106127, 0.89879875, 0.94823763, 0.93408487, 0.930087, 0.93556961,
0.94850788, 0.92594158, 0.93171638, 0.90983007, 0.87256458, 0.96111336,
0.91042804, 0.90234622, 0.93557746, 0.93431933, 0.92948332, 0.98569848])),
(branch_sections_two_spheres, np.array([ 0.89265472, 0.90394519, 0.91395007, 0.93106127, 0.94823763, 0.930087,
0.94850788, 0.93171638, 0.90983007, 0.96111336, 0.90234622, 0.93431933,
0.98569848]))
])
def test_branch_section_shape_correct(aorta_centerline_branches ,aorta_surface_branches,
branch_sections, expectedValue):
wrapped_bifur_section = dsa.WrapDataObject(branch_sections(aorta_centerline_branches ,aorta_surface_branches))
assert np.allclose(wrapped_bifur_section.CellData.GetArray('BranchSectionShape'), expectedValue) == True
@pytest.mark.parametrize("branch_sections,expectedValue",[
(branch_sections_one_sphere, np.array([1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1])),
(branch_sections_two_spheres, np.array([1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1]))
])
def test_branch_section_closed_correct(aorta_centerline_branches ,aorta_surface_branches,
branch_sections, expectedValue):
wrapped_bifur_section = dsa.WrapDataObject(branch_sections(aorta_centerline_branches ,aorta_surface_branches))
assert np.allclose(wrapped_bifur_section.CellData.GetArray('BranchSectionClosed'), expectedValue) == True
@pytest.mark.parametrize("branch_sections,expectedValue",[
(branch_sections_one_sphere, np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 1, 2, 3, 4, 5, 6, 7, 8])),
(branch_sections_two_spheres, np.array([0, 2, 4, 0, 2, 4, 6, 8, 0, 2, 4, 6, 8]))
])
def test_branch_section_distance_to_spheres_correct(aorta_centerline_branches ,aorta_surface_branches,
branch_sections, expectedValue):
wrapped_bifur_section = dsa.WrapDataObject(branch_sections(aorta_centerline_branches ,aorta_surface_branches))
assert np.allclose(wrapped_bifur_section.CellData.GetArray('BranchSectionDistanceSpheres'), expectedValue) == True
@pytest.mark.parametrize("expectedvalue,paramid",[
(0, 0),
(129, 1),
(246, 2),
(369, 3),
(491, 4),
(610, 5),
(712, 6),
(784, 7),
(864, 8),
(938, 9),
(1012, 10),
(1084, 11),
(1157, 12),
(1227, 13),
(1294, 14),
(1362, 15),
(1440, 16),
(1530, 17),
(1604, 18),
(1685, 19),
(1760, 20),
(1838, 21),
(1916, 22),
(1986, 23)
])
def test_cell_data_pointId_start_indices_one_sphere(branch_sections_one_sphere, expectedvalue, paramid):
bcx = branch_sections_one_sphere.GetCell(paramid)
pointIdStart = bcx.GetPointId(0)
assert pointIdStart == expectedvalue
@pytest.mark.parametrize("expectedvalue,paramid",[
(0, 0),
(129, 1),
(252, 2),
(371, 3),
(443, 4),
(517, 5),
(589, 6),
(659, 7),
(727, 8),
(805, 9),
(879, 10),
(954, 11),
(1032, 12),
])
def test_cell_data_pointId_start_indices_two_sphere(branch_sections_two_spheres, expectedvalue, paramid):
bcx = branch_sections_two_spheres.GetCell(paramid)
pointIdStart = bcx.GetPointId(0)
assert pointIdStart == expectedvalue
@pytest.mark.parametrize("expectedvalue,numberofpoints,paramid",[
(128, 129, 0),
(245, 117, 1),
(368, 123, 2),
(490, 122, 3),
(609, 119, 4),
(711, 102, 5),
(783, 72, 6),
(863, 80, 7),
(937, 74, 8),
(1011, 74, 9),
(1083, 72, 10),
(1156, 73, 11),
(1226, 70, 12),
(1293, 67, 13),
(1361, 68, 14),
(1439, 78, 15),
(1529, 90, 16),
(1603, 74, 17),
(1684, 81, 18),
(1759, 75, 19),
(1837, 78, 20),
(1915, 78, 21),
(1985, 70, 22),
(2064, 79, 23)
])
def test_cell_data_pointId_end_indices_one_sphere(branch_sections_one_sphere, expectedvalue, numberofpoints, paramid):
bcx = branch_sections_one_sphere.GetCell(paramid)
pointIdEnd = bcx.GetPointId(numberofpoints - 1)
assert pointIdEnd == expectedvalue
@pytest.mark.parametrize("expectedvalue,numberofpoints,paramid",[
(128, 129, 0),
(251, 123, 1),
(370, 119, 2),
(442, 72, 3),
(516, 74, 4),
(588, 72, 5),
(658, 70, 6),
(726, 68, 7),
(804, 78, 8),
(878, 74, 9),
(953, 75, 10),
(1031, 78, 11),
(1110, 79, 12),
])
def test_cell_data_pointId_end_indices_two_spheres(branch_sections_two_spheres, expectedvalue, numberofpoints, paramid):
bcx = branch_sections_two_spheres.GetCell(paramid)
pointIdEnd = bcx.GetPointId(numberofpoints - 1)
assert pointIdEnd == expectedvalue
@pytest.mark.parametrize("pointidstart,numberofpoints,expectedlocationstart,expectedlocationend,paramid",[
(0, 129, np.array([217.96841430664062, 173.62118530273438, 13.255617141723633]), np.array([218.3564910888672, 173.5325927734375, 13.078214645385742]), 0),
(129, 117, np.array([220.60513305664062, 168.85765075683594, 14.689851760864258]), np.array([220.7154998779297, 168.8603515625, 14.662433624267578]), 1),
(246, 123, np.array([220.60513305664062, 161.35403442382812, 14.91808795928955]), np.array([221.1270294189453, 161.36172485351562, 14.869494438171387]), 2),
(369, 122, np.array([217.96841430664062, 153.799560546875, 16.24483871459961]), np.array([218.7977752685547, 153.759033203125, 15.950116157531738]), 3),
(491, 119, np.array([220.60513305664062, 147.16163635253906, 16.457529067993164]), np.array([220.84371948242188, 147.17149353027344, 16.445602416992188]), 4),
(610, 102, np.array([214.2646026611328, 138.3168182373047, 20.781129837036133]), np.array([215.2498321533203, 139.0978546142578, 28.59796714782715]), 5),
(712, 72, np.array([221.03619384765625, 133.0021209716797, 19.41550064086914]), np.array([223.09197998046875, 133.60549926757812, 29.31968116760254]), 6),
(784, 80, np.array([225.22320556640625, 128.92987060546875, 19.44786834716797]), np.array([225.2543182373047, 128.94456481933594, 19.4281005859375]), 7),
(864, 74, np.array([224.99966430664062, 125.61617279052734, 20.987215042114258]), np.array([225.03799438476562, 125.61742401123047, 20.96105194091797]), 8),
(938, 74, np.array([226.9380340576172, 121.55354309082031, 20.983095169067383]), np.array([227.32139587402344, 121.60404968261719, 20.776121139526367]), 9),
(1012, 72, np.array([230.27310180664062, 117.59968566894531, 20.985858917236328]), np.array([230.3741455078125, 117.61669921875, 20.980459213256836]), 10),
(1084, 73, np.array([229.39419555664062, 112.8595962524414, 22.42799949645996]), np.array([229.47718811035156, 112.87579345703125, 22.390670776367188]), 11),
(1157, 70, np.array([230.27310180664062, 108.89535522460938, 23.66469383239746]), np.array([230.82315063476562, 108.984375, 23.356685638427734]), 12),
(1227, 67, np.array([232.90982055664062, 105.00150299072266, 23.84995460510254]), np.array([232.92904663085938, 105.00645446777344, 23.845577239990234]), 13),
(1294, 68, np.array([232.12461853027344, 101.83521270751953, 25.46320915222168]), np.array([232.1511993408203, 101.83283996582031, 25.44672393798828]), 14),
(1362, 78, np.array([221.04122924804688, 132.8127899169922, 19.44769287109375]), np.array([223.05599975585938, 133.10491943359375, 29.166749954223633]), 15),
(1440, 90, np.array([221.40382385253906, 128.67816162109375, 22.391569137573242]), np.array([221.80430603027344, 128.6477508544922, 24.464847564697266]), 16),
(1530, 74, np.array([216.21060180664062, 125.59019470214844, 21.670682907104492]), np.array([216.93014526367188, 125.52423095703125, 21.99653434753418]), 17),
(1604, 81, np.array([214.45278930664062, 121.28471374511719, 22.488676071166992]), np.array([214.6112518310547, 121.27181243896484, 22.50135040283203]), 18),
(1685, 75, np.array([212.6949920654297, 117.73933410644531, 23.282371520996094]), np.array([213.42185974121094, 117.62139892578125, 23.06829071044922]), 19),
(1760, 78, np.array([213.5738983154297, 113.08839416503906, 23.80427360534668]), | np.array([214.01332092285156, 112.93942260742188, 23.97063446044922]) | numpy.array |
import sys
sys.path.append('../')
import cosmopy as cosmo
import numpy as np
from scipy import sparse
from math import sqrt, exp
# Unit Test
import unittest
import numpy.testing as nptest
class basic_tests(unittest.TestCase):
def test_QP(self):
P = sparse.csc_matrix([[4., 1], [1, 2]])
q = np.array([1., 1])
A = sparse.csc_matrix([[1., 1], [1, 0], [0, 1], [-1., -1], [-1, 0], [0, -1]])
b = np.array([1, 0.7, 0.7, -1, 0, 0])
cone = {"l" : 6 }
model = cosmo.Model()
model.setup(P, q, A, b, cone, eps_abs = 1e-5, eps_rel = 1e-5)
self.assertTrue(model.setup_complete)
model.optimize()
self.assertTrue(model.solved)
obj_val, x, y, s = model.get_sol()
self.assertEqual(model.get_status(), 'Solved')
nptest.assert_array_almost_equal(x, np.array([0.3, 0.7]), decimal = 3)
nptest.assert_array_almost_equal(y, np.array([0.0033, 0., 0.2, 2.903, 0., 0.]), decimal = 3)
nptest.assert_array_almost_equal(s, np.array([0., 0.4, 0., 0., 0.3, 0.7]), decimal = 3)
nptest.assert_almost_equal(obj_val, 1.88)
def test_wrong_dims(self):
model = cosmo.Model()
q = np.array([1.,0.])
A = sparse.eye(3, format = "csc")
b = np.zeros(3)
cone = {"f" : 3}
with self.assertRaises(ValueError):
model.setup(q = q, A = A, b = b, cone = cone)
q = np.array([1.,0., 0.])
cone = {"f" : 2}
with self.assertRaises(ValueError):
model.setup(q = q, A = A, b = b, cone = cone)
def test_wrong_order(self):
model = cosmo.Model()
self.assertWarns(Warning, model.optimize)
self.assertWarns(Warning, model.get_objective_value)
self.assertWarns(Warning, model.get_x)
def test_reset(self):
P = sparse.csc_matrix([[4., 1], [1, 2]])
q = np.array([1., 1])
A = sparse.csc_matrix([[1., 1], [1, 0], [0, 1], [-1., -1], [-1, 0], [0, -1]])
b = np.array([1, 0.7, 0.7, -1, 0, 0])
cone = {"l" : 6 }
model = cosmo.Model()
model.setup(P, q, A, b, cone, eps_abs = 1e-5, eps_rel = 1e-5, check_termination = 1)
model.optimize()
model.reset()
self.assertTrue(model.result == None)
self.assertTrue(model.solved == False)
self.assertTrue(model.setup_complete == False)
def test_warm_start(self):
P = sparse.csc_matrix([[4., 1], [1, 2]])
q = np.array([1., 1])
A = sparse.csc_matrix([[1., 1], [1, 0], [0, 1], [-1., -1], [-1, 0], [0, -1]])
b = np.array([1, 0.7, 0.7, -1, 0, 0])
cone = {"l" : 6 }
model = cosmo.Model()
model.setup(P, q, A, b, cone, eps_abs = 1e-5, eps_rel = 1e-5, check_termination = 1)
model.optimize()
obj_val, xopt, yopt, sopt = model.get_sol()
iter_cold = model.get_iter()
# now run again but warm start the variables
model = cosmo.Model()
model.setup(P, q, A, b, cone, eps_abs = 1e-5, eps_rel = 1e-5, check_termination = 1)
model.warm_start( x= xopt, y = yopt)
model.optimize()
iter_warm = model.get_iter()
self.assertTrue(iter_warm < iter_cold)
def test_SDP(self):
n = 6
q = np.array([1., 4, 9, 6, 0, 7]) #upper(C)
b = np.hstack((np.array([11., 19.]), np.zeros(6)))
A1_t = sparse.csc_matrix([1.0, 0, 3, 2, 14, 5])
A2_t = sparse.csc_matrix([0.0, 4, 6, 16, 0, 4])
Is = -sparse.eye(n, format = "csc")
Is[1, 1] = Is[3, 3] = Is[4, 4] = -sqrt(2.)
A = sparse.vstack([A1_t, A2_t, Is], format = "csc" )
cone = {"f" : 2, "s" : [6]}
model = cosmo.Model()
model.setup(q = q, A = A, b = b, cone = cone, eps_abs = 1e-5, eps_rel = 1e-5)
model.optimize()
obj_val = model.get_objective_value()
self.assertEqual(model.get_status(), 'Solved')
nptest.assert_almost_equal(obj_val, 13.902, decimal = 3)
def test_EXP(self):
# # max x
# # s.t. y * exp(x / y) <= z
# # y == 1, z == exp(5)
q = np.array([-1., 0, 0])
A1 = sparse.csc_matrix([[0., 1, 0], [0., 0, 1]])
b1 = np.array([1., exp(5)])
A2 = -sparse.eye(3, format = "csc")
b2 = np.zeros(3)
A = sparse.vstack([A1, A2], format = "csc" )
b = np.hstack((b1, b2))
cone = {"f" : 2, "ep" : 1}
model = cosmo.Model()
model.setup(q = q, A = A, b = b, cone = cone, eps_abs = 1e-5, eps_rel = 1e-5)
model.optimize()
obj_val = model.get_objective_value()
status = model.get_status()
self.assertEqual(status, 'Solved')
nptest.assert_almost_equal(obj_val, -5, decimal = 3)
def test_POW(self):
# max x1^0.6 y^0.4 + x2^0.1
# s.t. x1, y, x2 >= 0
# x1 + 2y + 3x2 == 3
# which is equivalent to
# max z1 + z2
# s.t. (x1, y, z1) in K_pow(0.6)
# (x2, 1, z2) in K_pow(0.1)
# x1 + 2y + 3x2 == 3
# x = (x1, y, z1, x2, y2, z2)
q = np.array([0, 0, -1., 0, 0, -1.])
# x1 + 2y + 3x2 == 3
A1 = sparse.csc_matrix([1., 2, 0, 3., 0, 0])
b1 = np.array([3.])
# y2 == 1
A2 = sparse.csc_matrix([0, 0, 0, 0, 1.0, 0])
b2 = np.array([1.])
# (x1, y, z1) in K_pow(0.6)
A3 = sparse.hstack([-sparse.eye(3), np.zeros((3, 3))], format = "csc")
b3 = np.zeros(3)
# (x2, y2, z2) in K_pow(0.1)
A4 = sparse.hstack([np.zeros((3, 3)), -sparse.eye(3)], format = "csc")
b4 = np.zeros(3)
A = sparse.vstack([A1, A2, A3, A4], format = "csc" )
b = | np.hstack((b1, b2, b3, b4)) | numpy.hstack |
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from svgpathtools import svg2paths, wsvg
import re
import numpy as np
import glob
def plot_heatmap(data,
xlabel,
ylabel,
minimization=False,
savefig_path=None,
):
ax = sns.heatmap(data)
ax.invert_yaxis()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# get figure to save to file
if savefig_path:
ht_figure = ax.get_figure()
ht_figure.savefig(savefig_path+"/heatmap_"+xlabel+"_"+ylabel, dpi=400)
plt.clf()
plt.cla()
plt.close()
def plot_svg(xml, filename):
root = ET.fromstring(xml)
svg_path = root.find(NAMESPACE + 'path').get('d')
wsvg(svg_path, filename=filename+'.svg')
def getImage(path):
return OffsetImage(plt.imread(path))
def plot_fives(dir_path, xlabel, ylabel):
paths = glob.glob(dir_path + "/"+xlabel+"_"+ylabel+"/*.png")
x=[]
y=[]
for a in paths:
pattern = re.compile('([\d\.]+),([\d\.]+)')
segments = pattern.findall(a)
for se in segments:
x.append(int(se[0]))
y.append(int(se[1]))
plt.cla()
fig, ax = plt.subplots(figsize=(10,10))
#ax.scatter(x, y)
for x0, y0, path in zip(x, y,paths):
ab = AnnotationBbox(getImage(path), (y0, x0), frameon=False)
ax.add_artist(ab)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.xticks( | np.arange(-1, 25, 1) | numpy.arange |
import os
import numpy as np
import matplotlib.pyplot as plt
from glssm import *
#################################################
#A constant, dim y = 1, dim x = 1, randomwalk(estPHI=False)
xdict = dict()
ydict = dict()
nsubj = 20
ns = np.zeros(nsubj, dtype=np.int)
mu0 = np.array([0.0])
SIGMA0 = np.array([1.0]).reshape(1,1)
R = np.array([2.5]).reshape(1,1)
PHI = np.array([1.0]).reshape(1,1)
Q = np.array([1.5]).reshape(1,1)
for i in range(nsubj):
ns[i] = int(np.random.uniform(200,400,1))
A = np.ones([ns[i],1,1])
s1 = dlm(mu0,SIGMA0,A,R,PHI,Q)
tempx, tempy = s1.simulate(ns[i])
ydict.update({i: tempy})
xdict.update({i: tempx})
bigxmat = xdict[0]
bigns = [ns[0]]
bigymat = ydict[0]
for i in range(1,nsubj):
bigxmat = np.vstack((bigxmat, xdict[i]))
bigymat = np.vstack((bigymat, ydict[i]))
bigns.append(ns[i])
ntimes = np.array(bigns)
#check the first series
tempy = ydict[0]
tempx = xdict[0]
tempy[50:70] = None
A = np.ones([ns[0],1,1])
s1 = dlm(mu0,SIGMA0,A,R,PHI,Q)
xp,xf,pp,pf,K_last = s1.filtering(tempy)
xs,ps,pcov,xp,pp = s1.smoothing(tempy)
#plots
fig, ax = plt.subplots()
plt.plot(tempx, label="x")
plt.plot(tempy, label="y")
plt.plot(xp, label="xp")
plt.plot(xf, label="xf")
plt.plot(xf, label="xs")
legend = ax.legend(loc='upper left')
plt.show()
s2 = dlm(mu0,SIGMA0,A,R,PHI,Q)
s2.EM(tempy, np.array([ns[0]]),estPHI=True,maxit=100, tol=1e-4,verbose=False)
s2.mu0, s2.SIGMA0, s2.R, s2.PHI, s2.Q
#filter and forecast
x_filter, P_filter = s2.onestep_filter(tempy[ns[0]-1,:],xp[ns[0]-1,:],pp[ns[0]-1,:,:],A[ns[0]-1,:,:])
x_forecast, P_forecast = s2.onestep_forecast(x_filter,P_filter)
#EM fitting
bigA = np.ones([bigymat.shape[0], 1, 1])
dlm1 = dlm(mu0,SIGMA0,bigA,R,PHI,Q)
dlm1.EM(bigymat, ntimes, estPHI=True, maxit=100, tol=1e-4,verbose=True)
dlm1.mu0, dlm1.SIGMA0, dlm1.R, dlm1.PHI, dlm1.Q
dlm1 = dlm(mu0,SIGMA0,bigA,R,PHI,Q)
#dlm1._EM(yy,A,mu0,SIGMA0,PHI0,Q0,R0,ntimes,maxit=40, tol=1e-4, estPHI=True)
dlm1.EM(bigymat, ntimes, estPHI=False, maxit=100, tol=1e-4,verbose=True)
dlm1.mu0, dlm1.SIGMA0, dlm1.R, dlm1.PHI, dlm1.Q
####################################################################################
#real data testing
ana1 = pd.read_csv("analysis.csv")
ana1.head()
ana1.shape
#units: ppb both
#reporting prediction errors
ana1.describe()
yy = ana1["sqrty"].values.reshape(8784,1)
zz = ana1["sqrtz"].values.reshape(8784,1)
plt.plot(yy)
plt.plot(zz)
sum(pd.isnull(yy))
indices = [k for k,val in enumerate(yy) if pd.isnull(yy[k])]
train = 6588
trainid = np.arange(train) #%75
testid = np.arange(train,8784)
##############################################
#DLM1: random walk + noise
#y_t = mu_t + v_t
#mu_t = mu_{t-1} + w_t
train = 6588
trainid = np.arange(train) #%75
testid = np.arange(train,8784)
fullA = np.repeat(1.0,8784).reshape(8784,1,1)
A = np.repeat(1.0,train).reshape(train,1,1)
mu0 = np.array([5.0])
SIGMA0 = np.array([2.0]).reshape(1,1)
R0 = np.array([2.0]).reshape(1,1)
PHI0 = np.array([1.0]).reshape(1,1)
Q0 = np.array([1.0]).reshape(1,1)
dlm1 = dlm(mu0,SIGMA0,A,R0,PHI0,Q0)
ntimes = np.array([train])
dlm1.EM(yy[:train,:], ntimes, estPHI=False, maxit=30)
dlm1.mu0, dlm1.SIGMA0, dlm1.R, dlm1.PHI, dlm1.Q
#loglik = -5786
xp,xf,pp,pf,K_last = dlm1.filtering(yy[:train,:])
diff = 0
thisxf = xf[train-1,:]
thispf = pf[train-1,:,:]
for i in range(len(testid)):
thisx,thisp = dlm1.onestep_forecast(thisxf,thispf)
if not pd.isnull(yy[train]):
diff += np.sum(np.abs(yy[train,:] - np.dot(fullA[train,:,:],thisx) ))
thisxf,thispf = dlm1.onestep_filter(yy[train,:],thisx,thisp,fullA[train,:,:])
train += 1
diff / len(testid)
#error: 0.3980
#####################################
#DLM2: local linear trend
#y_t = mu_t + v_t
#mu_t = mu_{t-1} + b_{t-1} + w1_t
#b_t = b_{t-1} + w2_t
train = 6588
trainid = np.arange(train) #%75
testid = np.arange(train,8784)
fullA = np.ones([8784,1,2])
fullA[:,:,1] = 0.0
A = np.ones([train,1,2])
A[:,:,0] = 1.0
A[:,:,1] = 0.0
mu0 = np.array([5.0,1.0])
SIGMA0 = np.array([1.0,0.0,0.0,1.0]).reshape(2,2)
PHI0 = np.array([1.0,1.0,0.0,1.0]).reshape(2,2)
Q0 = np.array([0.3,0,0,0.3]).reshape(2,2)
R0 = np.array([2.0]).reshape(1,1)
ntimes = np.array([train])
dlm2 = dlm(mu0,SIGMA0,A,R0,PHI0,Q0)
dlm2.EM(yy[trainid,:], ntimes, estPHI=False, maxit=30, tol=1e-3)
dlm2.R, dlm2.PHI, dlm2.Q
#loglik = 1644947
xp,xf,pp,pf,K_last = dlm2.filtering(yy[trainid,:])
diff = 0
thisxf = xf[train-1,:]
thispf = pf[train-1,:]
for i in range(len(testid)):
thisx,thisp = dlm2.onestep_forecast(thisxf,thispf)
if not pd.isnull(yy[train]):
diff += np.sum(np.abs(yy[train,:] - np.dot(fullA[train,:,:],thisx) ))
thisxf,thispf = dlm2.onestep_filter(yy[train,:],thisx,thisp,fullA[train,:,:])
train += 1
diff / len(testid)
#error: 0.467
##########################
#can be viewed as a regression with time-varying coefficients.
#y_t = a_1t + a_2t x_t + e_t
#(a_1t,a_2t) = G_t %*% (a_{1,t-1},a_{2,t-1})+w_t
#################################
#DLM3: seasonal trend ( 1 harmonic )
#y_t = [1 1 0] %*% [mu_t, s1_t, s1*_t] + v_t
'''
PHI = [1 0 0 ]
[0 cosw sinw]
[0 -sinw cosw]
'''
train = 6588
trainid = np.arange(train) #%75
testid = np.arange(train,8784)
fullA = np.ones([8784,1,3])
fullA[:,:,2] = 0.0
A = np.ones([train,1,3])
A[:,:,2] = 0.0
mu0 = np.array([10.0,0.0,0.0])
SIGMA0 = np.array([1.0,0.0,0.0,\
0.0,1.0,0.0,\
0.0,0.0,1.0]).reshape(3,3)
PHI0 = np.array([1.0,0.0,0.0,\
0.0,np.cos(2*np.pi/24),np.sin(2*np.pi/24),\
0.0,-np.sin(2*np.pi/24),np.cos(2*np.pi/24)]).reshape(3,3)
Q0 = np.array([1.0,0.0,0.0,\
0.0,1.0,0.0,\
0.0,0.0,1.0]).reshape(3,3)
R0 = np.array([1.0]).reshape(1,1)
ntimes = np.array([train])
dlm3 = dlm(mu0,SIGMA0,A,R0,PHI0,Q0)
dlm3.EM(yy[trainid,:],ntimes,maxit=30, tol=1e-4,estPHI=False)
dlm3.R,dlm3.PHI,dlm3.Q
#loglik: 5603
xp,xf,pp,pf,K_last = dlm3.filtering(yy[trainid,:])
diff = 0
thisxf = xf[train-1,:]
thispf = pf[train-1,:]
for i in range(len(testid)):
thisx,thisp = dlm3.onestep_forecast(thisxf,thispf)
if not pd.isnull(yy[train]):
diff += np.sum(np.abs(yy[train,:] - np.dot(fullA[train,:,:],thisx) ))
thisxf,thispf = dlm3.onestep_filter(yy[train,:],thisx,thisp,fullA[train,:,:])
train += 1
diff / len(testid)
#error: 0.4008
#################################
#DLM4: WITH EXOGENOUS INPUT
#exogenous input with time-varying coefficient
##y_t = [1 1 0 no2_t] %*% [mu_t, s1_t, s1*_t, beta] + v_t
train = 6588
trainid = np.arange(train) #%75
testid = np.arange(train,8784)
fullA = np.ones([8784,1,4])
fullA[:,:,2] = 0.0
fullA[:,:,3] = zz
A = np.ones([train,1,4])
A[:,:,2] = 0.0
A[:,:,3] = zz[:train,:]
mu0 = np.array([10.0,0.0,0.0,0.0])
SIGMA0 = np.array([1.0,0.0,0.0,0.0,\
0.0,1.0,0.0,0.0,\
0.0,0.0,1.0,0.0,\
0.0,0.0,0.0,1.0]).reshape(4,4)
PHI0 = np.array([1.0,0.0,0.0,0.0,\
0.0,np.cos(2*np.pi/24),np.sin(2*np.pi/24),0.0,\
0.0,-np.sin(2*np.pi/24),np.cos(2*np.pi/24),0.0,\
0.0,0.0,0.0,1.0]).reshape(4,4)
Q0 = np.array([1.0,0.0,0.0,0.0,\
0.0,1.0,0.0,0.0,\
0.0,0.0,1.0,0.0,\
0.0,0.0,0.0,1.0]).reshape(4,4)
R0 = np.array([2.0]).reshape(1,1)
#BETA0 = np.array([0.1]).reshape(1,1)
#BETA IS INCORPORATED AS A TIME-VARYING COEFFICIENT
ntimes = np.array([train])
dlm4 = dlm(mu0,SIGMA0,A,R0,PHI0,Q0)
dlm4.EM(yy[trainid,:],ntimes,maxit=30)
dlm4.R,dlm4.PHI,dlm4.Q
#loglik = 9314
xp,xf,pp,pf,K_last = dlm4.filtering(yy[trainid,:])
diff = 0
thisxf = xf[train-1,:]
thispf = pf[train-1,:]
for i in range(len(testid)):
thisx,thisp = dlm4.onestep_forecast(thisxf,thispf)
if not pd.isnull(yy[train]):
diff += np.sum(np.abs(yy[train,:] - np.dot(fullA[train,:,:],thisx) ))
thisxf,thispf = dlm4.onestep_filter(yy[train,:],thisx,thisp,fullA[train,:,:])
train += 1
diff / len(testid)
#0.2694
#save the results for plot in R
final = dlm(mu0,SIGMA0,fullA,R0,PHI0,Q0)
ntimes = np.array([8784])
final.EM(yy, ntimes, estPHI=False,maxit=30)
final.R,final.PHI,final.Q
#smooth the series
xs,ps,pcov,xp,pp = final.smoothing(yy)
series_smooth = xs[:,0] + xs[:,1] + zz.ravel() * xs[:,3]
series_upper = np.zeros(8784)
series_lower = np.zeros(8784)
multvec = np.array([1,1,0,0.5])
for i in range(8784):
multvec[3] = zz[i]
thiscov = np.dot(np.dot(multvec.T, ps[i,:,:]),multvec)
series_upper[i] = series_smooth[i] + 1.96 * thiscov
series_lower[i] = np.maximum(series_smooth[i] - 1.96 * thiscov,0)
plt.plot(yy)
plt.plot(series_smooth)
plt.plot(series_upper)
plt.plot(series_lower)
#smooth the trends
trendmu = xs[:,0]
trendmu_upper = np.zeros(8784)
trendmu_lower = np.zeros(8784)
for i in range(8784):
thisvar = ps[i,0,0]
trendmu_upper[i] = trendmu[i] + 1.96 * thisvar
trendmu_lower[i] = np.maximum(trendmu[i] - 1.96 * thisvar,0)
plt.plot(trendmu)
plt.plot(trendmu_upper)
plt.plot(trendmu_lower)
trendseason = xs[:,1]
trendseason_upper = np.zeros(8784)
trendseason_lower = np.zeros(8784)
for i in range(8784):
thisvar = ps[i,1,1]
trendseason_upper[i] = trendseason[i] + 1.96 * thisvar
trendseason_lower[i] = trendseason[i] - 1.96 * thisvar
plt.plot(trendseason)
plt.plot(trendseason_upper)
plt.plot(trendseason_lower)
trendno2 = xs[:,3]
trendno2_upper = np.zeros(8784)
trendno2_lower = np.zeros(8784)
for i in range(8784):
thisvar = ps[i,3,3]
trendno2_upper[i] = trendno2[i] + 1.96 * thisvar
trendno2_lower[i] = trendno2[i] - 1.96 * thisvar
plt.plot(trendno2)
plt.plot(trendno2_upper)
plt.plot(trendno2_lower)
dictionary = {'date':ana1['date'],
'time':ana1['time'],
'rawozone':ana1['y'],
'rawno2':ana1['z'],
'series_smooth':series_smooth,
'series_upper':series_upper,
'series_lower':series_lower,
'trendmu':trendmu,
'trendmu_upper':trendmu_upper,
'trendmu_lower':trendmu_lower,
'trendseason':trendseason,
'trendseason_upper':trendseason_upper,
'trendseason_lower':trendseason_lower,
'trendno2':trendno2,
'trendno2_upper':trendno2_upper,
'trendno2_lower':trendno2_lower}
data = pd.DataFrame(dictionary)
#########################################################################
#################################################################
#bivariate y, univariate x
xdict = dict()
ydict = dict()
nsubj = 20
ns = np.zeros(nsubj, dtype=np.int)
mu0 = np.array([0.0])
SIGMA0 = np.array([1.0]).reshape(1,1)
R = np.array([2.5,0,0,1.5]).reshape(2,2)
PHI = np.array([0.1]).reshape(1,1)
Q = np.array([0.8]).reshape(1,1)
for i in range(nsubj):
ns[i] = int(np.random.uniform(200,400,1))
A = np.ones([ns[i],2,1])
s1 = dlm(mu0,SIGMA0,A,R,PHI,Q)
tempx, tempy = s1.simulate(ns[i])
ydict.update({i: tempy})
xdict.update({i: tempx})
bigxmat = xdict[0]
bigns = [ns[0]]
bigymat = ydict[0]
for i in range(1,nsubj):
bigxmat = np.vstack((bigxmat, xdict[i]))
bigymat = np.vstack((bigymat, ydict[i]))
bigns.append(ns[i])
ntimes = np.array(bigns)
#check the first series
tempy = ydict[0]
tempx = xdict[0]
tempy[50:55,:] = None
A = np.ones([ns[0],2,1])
s1 = dlm(mu0,SIGMA0,A,R,PHI,Q)
xp,xf,pp,pf,K_last = s1.filtering(tempy)
xs,ps,pcov,xp,pp = s1.smoothing(tempy)
#plots
fig, ax = plt.subplots()
plt.plot(tempx, label="x")
plt.plot(tempy, label="y")
plt.plot(xp, label="xp")
plt.plot(xf, label="xf")
plt.plot(xf, label="xs")
legend = ax.legend(loc='upper left')
plt.show()
s2 = dlm(mu0,SIGMA0,A,R,PHI,Q)
s2.EM(tempy, np.array([ns[0]]),estPHI=True,maxit=100, tol=1e-4,verbose=False)
s2.mu0, s2.SIGMA0, s2.R, s2.PHI, s2.Q
#filter and forecast
x_filter, P_filter = s2.onestep_filter(tempy[ns[0]-1,:],xp[ns[0]-1,:],pp[ns[0]-1,:,:],A[ns[0]-1,:,:])
x_forecast, P_forecast = s2.onestep_forecast(x_filter,P_filter)
#EM fitting
bigA = np.ones([bigymat.shape[0], 2, 1])
dlm1 = dlm(mu0,SIGMA0,bigA,R,PHI,Q)
dlm1.EM(bigymat, ntimes, estPHI=True, maxit=100, tol=1e-4,verbose=True)
dlm1.mu0, dlm1.SIGMA0, dlm1.R, dlm1.PHI, dlm1.Q
#################################################################
#bivariate y, bivariate x
xdict = dict()
ydict = dict()
nsubj = 20
ns = np.zeros(nsubj, dtype=np.int)
mu0 = np.array([0.0,0.0])
SIGMA0 = np.array([1.0,0,0,1.0]).reshape(2,2)
R = np.array([2.5,0,0,1.5]).reshape(2,2)
PHI = np.array([0.1,0,0,0.2]).reshape(2,2)
Q = np.array([0.8,0,0,0.5]).reshape(2,2)
for i in range(nsubj):
ns[i] = int(np.random.uniform(200,400,1))
A = np.ones([ns[i],2,2])
for j in range(ns[i]):
A[j,:,:] = np.diag([1,1])
s1 = dlm(mu0,SIGMA0,A,R,PHI,Q)
tempx, tempy = s1.simulate(ns[i])
ydict.update({i: tempy})
xdict.update({i: tempx})
bigxmat = xdict[0]
bigns = [ns[0]]
bigymat = ydict[0]
for i in range(1,nsubj):
bigxmat = np.vstack((bigxmat, xdict[i]))
bigymat = np.vstack((bigymat, ydict[i]))
bigns.append(ns[i])
ntimes = np.array(bigns)
#check the first series
tempy = ydict[0]
tempx = xdict[0]
tempy[50:55,:] = None
A = np.ones([ns[0],2,2])
for j in range(ns[0]):
A[j,:,:] = | np.diag([1,1]) | numpy.diag |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.