prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
def optionPriceMCGeneral(type_option,S,K,T,r):
# S is a vector of Monte Carlo samples at T
result = np.zeros([len(K),1])
if type_option == 'c' or type_option == 1:
for (idx,k) in enumerate(K):
result[idx] = np.exp(-r*T)*np.mean(np.maximum(S-k,0.0))
elif type_option == 'p' or type_option == -1:
for (idx,k) in enumerate(K):
result[idx] = np.exp(-r*T)*np.mean(np.maximum(k-S,0.0))
return result.T[0]
def hestonEuler(NumberPaths,N,T,r,s0,kappa,gamma,rho,vbar,v0):
Z1 = np.random.normal(0.0,1.0,[NumberPaths,N])
Z2 = np.random.normal(0.0,1.0,[NumberPaths,N])
W1 = np.zeros([NumberPaths, N + 1])
W2 = np.zeros([NumberPaths, N + 1])
V = np.zeros([NumberPaths, N + 1])
X = np.zeros([NumberPaths, N + 1])
V[:,0]=v0
X[:,0]=np.log(s0)
time = np.zeros([N+1])
dt = T / float(N)
for i in range(0, N):
Z2[:,i] = rho * Z1[:,i] + np.sqrt(1.0 - rho**2)*Z2[:,i]
V[:,i+1] = V[:,i] + kappa*(vbar - V[:,i]) * dt + gamma * np.sqrt(V[:,i]) * np.sqrt(dt) * Z1[:,i]
V[:,i+1] = np.maximum(V[:,i+1],0.0)
X[:,i+1] = X[:,i] + (r - 0.5*V[:,i])*dt + np.sqrt(V[:,i]) * np.sqrt(dt) * Z2[:,i]
time[i+1] = time[i] +dt
S = np.exp(X)
return time, S
def hestonMilstein(NumberPaths, N, T, r, s0, kappa, gamma, rho, vbar, v0):
Z1 = np.random.normal(0.0,1.0,[NumberPaths,N])
Z2 = np.random.normal(0.0,1.0,[NumberPaths,N])
W1 = np.zeros([NumberPaths, N+1])
W2 = np.zeros([NumberPaths, N+1])
V = np.zeros([NumberPaths, N+1])
X = np.zeros([NumberPaths, N+1])
V[:,0]=v0
X[:,0]=s0
time = np.zeros([N+1])
dt = T / float(N)
for i in range(0,N):
Z2[:,i] = rho * Z1[:,i] + np.sqrt(1.0 - rho**2)*Z2[:,i]
V[:,i+1] = V[:,i] + kappa * (vbar - V[:,i]) * dt + gamma * np.sqrt(V[:,i]) * np.sqrt(dt) * Z1[:,i] +\
0.25 * gamma * ((W1[:,i+1]-W1[:,i])**2 - dt)
V[:,i+1] = np.maximum(V[:,i+1],0.0)
X[:, i+1] = X[:, i] + (r * X[:, i]) * dt + np.sqrt(V[:, i]) * X[:, i] * np.sqrt(dt) * Z2[:,i]+\
0.5 * V[:, i] * X[:, i] * ((np.sqrt(dt) * Z2[:,i])**2 - dt)
time[i+1] = time[i] + dt
S = X
return time, S
def heston_stoch_corr(NumberPaths, N, T, r, v0, s0, rho0, rho1, rho2, kappa, gamma, rho, vbar, kappa_rho, mu_rho, sigma_rho):
Z1 = np.random.normal(0.0,1.0, [NumberPaths,N])
Z2 = np.random.normal(0.0,1.0, [NumberPaths,N])
Z3 = np.random.normal(0.0, 1.0, [NumberPaths,N])
W1 = np.zeros([NumberPaths, N + 1])
W2 = np.zeros([NumberPaths, N + 1])
W3 = np.zeros([NumberPaths, N + 1])
V = np.zeros([NumberPaths, N+1])
X = | np.zeros([NumberPaths, N+1]) | numpy.zeros |
import numpy as np
import numpy.ma as ma
def rot_matrix(theta):
r = np.array(( (np.cos(theta), -np.sin(theta)),
(np.sin(theta), np.cos(theta)) ))
return r
def angle_between(vector1, vector2):
""" Returns the angle in radians between given vectors"""
v1_u = unit_vector(vector1)
v2_u = unit_vector(vector2)
minor = np.linalg.det(
np.stack((v1_u[-2:], v2_u[-2:]))
)
if minor == 0:
sign = 1
else:
sign = -np.sign(minor)
dot_p = np.dot(v1_u, v2_u)
dot_p = min(max(dot_p, -1.0), 1.0)
angle = sign * np.arccos(dot_p)
return angle
# @njit(cache=True, nogil=True)
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def get_trajs_data(labels_traj,X,vecsize):
trajs_state=[]
len_trajs_state = []
for idx in np.unique(labels_traj.compressed()):
mask = labels_traj==idx
segments = np.where(np.abs(np.diff(np.concatenate([[False], mask, [False]]))))[0].reshape(-1, 2)
segments = segments[segments[:,0]>int(vecsize/2)]
trajs = []
len_trajs = []
for t0,tf in segments:
traj = X[t0-int(vecsize/2):tf]
traj = traj-traj[0]
trajs.append(traj)
len_trajs.append(tf-t0)
trajs_state.append(trajs)
len_trajs_state.append(np.hstack(len_trajs))
return trajs_state,len_trajs_state
def get_data_indices(cluster_traj):
data_indices = []
t0 = 0
tf=0
while tf < len(cluster_traj):
if tf>=len(cluster_traj)-1:
break
if ma.count_masked(cluster_traj[tf])==0:
while cluster_traj[tf+1]==cluster_traj[tf]:
if tf+1>=len(cluster_traj)-1 and tf-t0>0:
data_indices = | np.vstack(data_indices) | numpy.vstack |
"""
Function :
Title :
Written by:
Email : <EMAIL>
Date :
Last edit :
Language : Python 3.8 or >
Aeronautical Institute of Technology - Airbus Brazil
Description:
-
Inputs:
-
Outputs:
-
TODO's:
-
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from numpy import linalg as LA
from framework.Stability.Dynamic.skew import skew
# =============================================================================
# CLASSES
# =============================================================================
# =============================================================================
# FUNCTIONS
# =============================================================================
def Cmat(n, angle_rad):
nn = n
# if len(nn)>1:
# nn = 4
if nn == 1: # Gamma rotation
C = np.array([[1, 0, 0],
[0, np.cos(angle_rad), np.sin(angle_rad)],
[0, -np.sin(angle_rad), np.cos(angle_rad)]])
elif nn == 2: # Theta rotations
C = np.array([[np.cos(angle_rad), 0, -np.sin(angle_rad)],
[0, 1, 0],
[np.sin(angle_rad), 0, np.cos(angle_rad)]])
elif nn == 3: # Phi rotation
C = np.array([[np.cos(angle_rad), np.sin(angle_rad), 0],
[-np.sin(angle_rad), np.cos(angle_rad), 0],
[0, 0, 1]])
else:
n = n/ | LA.norm(n) | numpy.linalg.norm |
import numpy as np
from math import ceil
import copy
import random
from imblearn.under_sampling import NearMiss
from imblearn.over_sampling import SMOTE, ADASYN
from sklearn.ensemble import RandomForestClassifier
try: # < version 1.1.0
from art.attacks import BoundaryAttack, ZooAttack, HopSkipJump
except ImportError: # >= version 1.1.0
from art.attacks.evasion import BoundaryAttack, ZooAttack, HopSkipJump
from art.classifiers import SklearnClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.metrics.pairwise import euclidean_distances
from drift_dac.features_utils import is_integer_feature
from drift_dac.perturbation_shared_utils import Shift, sample_random_indices, PerturbationConstants, any_other_label
__all__ = ['GaussianNoise', 'SwitchCategorical', 'SubsampleJoint', 'SubsampleNumeric', 'SubsampleCategorical',
'UnderSample', 'OverSample', 'Adversarial', 'ConstantNumeric', 'ConstantCategorical', 'MissingValues',
'PlusMinusSomePercent', 'FlipSign', 'Outliers', 'Scaling', 'SwappedValues', 'ErrorBasedSampling',
'NearestNeighbors']
class GaussianNoise(Shift):
""" Apply Gaussian noise to a portion of data.
Args:
noise_key (str): value in ['small', 'medium', 'large] indicating the level of noise.
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
clip (bool): flag to clip the value of a perturbed feature to the maximum value in the feature range.
ceil_int (bool): flag to ceil the value of a perturbed feature for integer features.
noise (float): level of noise
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
clip (bool): flag to clip the value of a perturbed feature to the maximum value in the feature range.
ceil_int (bool): flag to ceil the value of a perturbed feature for integer features.
noise (float): amount of noise
"""
def __init__(self, noise_key='small', samples_fraction=1.0, features_fraction=1.0, clip=True, ceil_int=True,
noise=None):
super(GaussianNoise, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.clip = clip
self.ceil_int = ceil_int
if noise is None:
noise_levels = {
'large': 100.0,
'medium': 10.0,
'small': 1.0
}
self.noise = noise_levels[noise_key]
else:
self.noise = noise
self.name = noise_key + '_gn_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt, yt, self.shifted_indices, self.shifted_features = gaussian_noise_shift(Xt, yt,
self.noise, self.samples_fraction,
self.features_fraction, self.clip,
self.ceil_int)
return Xt, yt
class SwitchCategorical(Shift):
""" Assign a random category of categorical variables to a portion of data.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=0.5, features_fraction=1.0):
super(SwitchCategorical, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.name = 'switch_categorical_features_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.CATEGORICAL
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt, yt, self.shifted_indices, self.shifted_features = switch_categorical_features_shift(Xt, yt,
self.samples_fraction,
self.features_fraction)
return Xt, yt
class SubsampleJoint(Shift):
""" Keep an observation with a probability which decreases as points are further away from the samples mean.
Args:
gamma (float): fraction of samples close to the mean to remove.
Attributes:
gamma (float): fraction of samples close to the mean to remove.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, gamma=0.2):
super(SubsampleJoint, self).__init__()
self.gamma = 1 - gamma # fraction of samples to keep
self.name = 'subsample_joint_shift_%.2f' % gamma
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt, yt, self.shifted_features = subsample_joint_shift(Xt, yt, gamma=self.gamma)
return Xt, yt
class SubsampleNumeric(Shift):
""" Subsample with probability p when feature value less than the feature median value. If one_side=False, this also
subsamples with probability 1-p when the feature value is larger than the feature median value.
Args:
features_fraction (float): proportion of samples to perturb.
p (float): probability of sampling small values of features
one_side (bool): flag to subsample only samples with small feature values (True, default) or also large values.
Attributes:
features_fraction (float): proportion of samples to perturb.
p (float): probability of sampling small values of features
one_side (bool): flag to subsample only samples with small feature values (True, default) or also large values.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, p=0.5, features_fraction=0.5, one_side=True):
super(SubsampleNumeric, self).__init__()
self.features_fraction = features_fraction
self.p = p
self.one_side = one_side
self.name = 'subsample_feature_shift_%.2f' % features_fraction
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt, yt, self.shifted_features = subsample_feature_shift(Xt, yt, feat_delta=self.features_fraction, p=self.p,
one_side=self.one_side)
return Xt, yt
class SubsampleCategorical(Shift):
""" Subsample with probability p when feature value falls in a sub list of categories (randomly defined).
If one_side=False, this also subsamples with probability 1-p when the feature value in the remaining categories.
This perturbation is applied to all categorical features.
Args:
p (float): probability of sampling subset of values of features
features_fraction (float): proportion of samples to perturb.
one_side (bool): flag to subsample only samples with small feature values (True, default) or also large values.
Attributes:
p (float): probability of sampling subset of values of features
features_fraction (float): proportion of samples to perturb.
one_side (bool): flag to subsample only samples with small feature values (True, default) or also large values.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, p=0.5, features_fraction=1.0, one_side=True):
super(SubsampleCategorical, self).__init__()
self.p = p
self.features_fraction = features_fraction
self.one_side = one_side
self.name = 'subsample_categorical_feature_shift_%.2f' % p
self.feature_type = PerturbationConstants.CATEGORICAL
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
n_categorical = Xt.shape[1]
feat_indices = np.random.choice(n_categorical, ceil(n_categorical * self.features_fraction), replace=False)
for f in feat_indices:
Xt, yt = subsample_categorical_feature_shift(Xt, yt, f, p=self.p, one_side=self.one_side)
return Xt, yt
class UnderSample(Shift):
""" Subsample selecting samples close to the minority class (NearMiss3 heuristics).
Args:
samples_fraction (float): proportion of samples to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=0.5):
super(UnderSample, self).__init__()
self.samples_fraction = samples_fraction
self.name = 'under_sample_shift_%.2f' % samples_fraction
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt = SimpleImputer(strategy='median').fit_transform(Xt)
Xt, yt = under_sampling_shift(Xt, yt, self.samples_fraction)
return Xt, yt
class OverSample(Shift):
""" First subsample selecting samples close to the minority class (NearMiss3 heuristics), then add interpolated
samples via SMOTE technique.
Args:
samples_fraction (float): proportion of samples to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=0.5):
super(OverSample, self).__init__()
self.samples_fraction = samples_fraction
self.name = 'over_sample_shift_%.2f' % samples_fraction
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt = SimpleImputer(strategy='median').fit_transform(Xt)
Xt, yt = over_sampling_shift(Xt, yt, self.samples_fraction)
return Xt, yt
class Adversarial(Shift):
""" Perturb a portion of samples and features via black-box adversarial perturbations attempting to make a
classifier mis-predict the samples class.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
attack_type (str): name of the desired adversarial attack in ['zoo', 'boundary', 'hop-skip-jump'].
model (sklearn.BaseEstimator): unfitted sklearn classifier against which to perform the adversarial attack.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
attack_type (str): name of the desired adversarial attack in ['zoo', 'boundary', 'hop-skip-jump'].
model (sklearn.BaseEstimator): unfitted sklearn classifier against which to perform the adversarial attack.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0, attack_type='boundary',
model=RandomForestClassifier()):
super(Adversarial, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.attack_type = attack_type
self.model = model
self.name = 'adversarial_attack_shift_%s_%.2f_%.2f' % (attack_type, samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt, yt, self.shifted_indices, self.shifted_features = adversarial_attack_shift(Xt, yt, self.samples_fraction,
self.model,
self.attack_type,
self.features_fraction)
return Xt, yt
class ConstantNumeric(Shift):
""" Assign a constant value (median) to a portion of data.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0):
super(ConstantNumeric, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.name = 'constant_feature_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt, yt, self.shifted_indices, self.shifted_features = constant_value_shift(Xt, yt, self.samples_fraction,
self.features_fraction)
return Xt, yt
class ConstantCategorical(Shift):
""" Assign a random constant category to a portion of data.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0):
super(ConstantCategorical, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.name = 'constant_feature_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.CATEGORICAL
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = copy.deepcopy(y)
Xt, yt, self.shifted_indices, self.shifted_features = constant_categorical_shift(Xt, yt, self.samples_fraction,
self.features_fraction)
return Xt, yt
class MissingValues(Shift):
""" Insert missing values into a portion of data.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
value_to_put_in (float): desired representation of the missing value
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
value_to_put_in (float): desired representation of the missing value
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0, value_to_put_in=None):
super(MissingValues, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.value_to_put_in = value_to_put_in
self.name = 'missing_value_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.ANY
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = y
self.shifted_indices = sample_random_indices(Xt.shape[0], self.samples_fraction)
self.shifted_features = sample_random_indices(Xt.shape[1], self.features_fraction)
if self.value_to_put_in is None and np.issubdtype(Xt[:, self.shifted_features].dtype, np.integer):
self.value_to_put_in = -9999
Xt[np.transpose(np.array(self.shifted_indices)[np.newaxis]), np.array(self.shifted_features)[
np.newaxis]] = self.value_to_put_in
return Xt, yt
class FlipSign(Shift):
""" Flip the sign of a portion of data.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0):
super(FlipSign, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.name = 'flip_sign_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = y
self.shifted_indices = sample_random_indices(Xt.shape[0], self.samples_fraction)
numerical_features = np.array(range(Xt.shape[1]))
n_feats = len(numerical_features)
self.shifted_features = list(
np.array(numerical_features)[sample_random_indices(n_feats, self.features_fraction)])
Xt[np.transpose(np.array(self.shifted_indices)[np.newaxis]), np.array(self.shifted_features)[np.newaxis]] *= -1
return Xt, yt
class PlusMinusSomePercent(Shift):
""" Increment the feature values by a percentage for a portion of data.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
percentage (float): percentage of the value to add/subtract.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
percentage (float): percentage of the value to add/subtract.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0, percentage=0.1):
super(PlusMinusSomePercent, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.percentage = percentage
self.name = 'plus_minus_perc_shift_%.2f_%.2f_%.2f' % (samples_fraction, features_fraction, percentage)
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = y
self.shifted_indices = sample_random_indices(Xt.shape[0], self.samples_fraction)
numerical_features = np.array(range(Xt.shape[1]))
n_feats = len(numerical_features)
self.shifted_features = list(
np.array(numerical_features)[sample_random_indices(n_feats, self.features_fraction)])
row_indices, col_indices = np.transpose(np.array(self.shifted_indices)[np.newaxis]), \
np.array(self.shifted_features)[np.newaxis]
Xt[row_indices, col_indices] += \
Xt[row_indices, col_indices] * np.random.uniform(-self.percentage, self.percentage,
size=(len(self.shifted_indices),
len(self.shifted_features)))
return Xt, yt
class Outliers(Shift):
""" Replace a portion of data with outliers, obtained by adding random Gaussian noise.
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0):
super(Outliers, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.name = 'outlier_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = y
self.shifted_indices = sample_random_indices(Xt.shape[0], self.samples_fraction)
numerical_features = np.array(range(Xt.shape[1]))
n_feats = len(numerical_features)
self.shifted_features = list(
np.array(numerical_features)[sample_random_indices(n_feats, self.features_fraction)])
stddevs = {column: np.std(Xt[:, column]) for column in self.shifted_features}
scales = {column: random.uniform(1, 5) for column in self.shifted_features}
for column in self.shifted_features:
noise = np.random.normal(0, scales[column] * stddevs[column], size=Xt[self.shifted_indices, column].shape)
Xt[self.shifted_indices, column] += noise
return Xt, yt
class Scaling(Shift):
""" Scale a portion of samples and features by a random value in [10, 100, 1000].
Args:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
features_fraction (float): proportion of features to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0, features_fraction=1.0):
super(Scaling, self).__init__()
self.samples_fraction = samples_fraction
self.features_fraction = features_fraction
self.name = 'scaling_shift_%.2f_%.2f' % (samples_fraction, features_fraction)
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = y
self.shifted_indices = sample_random_indices(Xt.shape[0], self.samples_fraction)
numerical_features = np.array(range(Xt.shape[1]))
n_feats = len(numerical_features)
self.shifted_features = list(
np.array(numerical_features)[sample_random_indices(n_feats, self.features_fraction)])
row_indices, col_indices = np.transpose(np.array(self.shifted_indices)[np.newaxis]), \
np.array(self.shifted_features)[np.newaxis]
scale_factor = np.random.choice([10, 100, 1000])
Xt[row_indices, col_indices] *= scale_factor
return Xt, yt
class SwappedValues(Shift):
""" Swap the values of two random features for a desired portion of samples.
Args:
samples_fraction (float): proportion of samples to perturb.
Attributes:
samples_fraction (float): proportion of samples to perturb.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, samples_fraction=1.0):
super(SwappedValues, self).__init__()
self.samples_fraction = samples_fraction
self.name = 'swapped_values_shift_%.2f' % samples_fraction
self.feature_type = PerturbationConstants.NUMERIC
def transform(self, X, y=None):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
Xt = copy.deepcopy(X)
yt = y
self.shifted_indices = sample_random_indices(Xt.shape[0], self.samples_fraction)
self.shifted_features = sorted(list(np.random.choice(Xt.shape[1], size=2, replace=False)))
(column_a, column_b) = self.shifted_features[0], self.shifted_features[1]
values_of_column_a = copy.deepcopy(Xt[self.shifted_indices, column_a])
values_of_column_b = Xt[self.shifted_indices, column_b]
Xt[self.shifted_indices, column_a] = values_of_column_b
Xt[self.shifted_indices, column_b] = values_of_column_a
return Xt, yt
class ErrorBasedSampling(Shift):
""" Sample the observations to have a desired amount of wrongly predicted samples from a reference model.
Args:
error_fraction (float): proportion of wrongly predicted samples.
model (sklearn.BaseEstimator): classifier with respect to which errors are computed.
labelenc (sklearn.BaseEncoder): label encoder for the classifier.
Attributes:
error_fraction (float): proportion of wrongly predicted samples.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
model (sklearn.BaseEstimator): classifier with respect to which errors are computed.
labelenc (sklearn.BaseEncoder): label encoder for the classifier.
"""
def __init__(self, error_fraction=1.0, model=None, labelenc=None):
super(ErrorBasedSampling, self).__init__()
self.error_fraction = error_fraction
self.name = 'error_sampling_shift_%.2f' % error_fraction
self.feature_type = PerturbationConstants.ANY
self.model = model
self.labelenc = labelenc
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
if self.model is None:
raise NotImplementedError('You need to input a model. Reference model not implemented yet.')
y_pred = self.model.predict(X)
if self.labelenc is None:
y_enc = y
else:
y_enc = self.labelenc.transform(y)
error_indices = np.where(y_pred != y_enc)[0]
correct_indices = np.where(y_pred == y_enc)[0]
if error_indices.sum() == 0:
raise ValueError('The model has 0 error on the dataset. ErrorBasedSampling cannot be built.')
n_samples = X.shape[0]
n_errors = int(np.ceil(self.error_fraction * n_samples))
n_correct = n_samples - n_errors
error_row_indices = np.random.choice(error_indices, size=n_errors, replace=True)
correct_row_indices = np.random.choice(correct_indices, size=n_correct, replace=True)
row_indices = np.r_[error_row_indices, correct_row_indices]
np.random.shuffle(row_indices)
Xt = X[row_indices, :]
yt = y[row_indices]
return Xt, yt
class NearestNeighbors(Shift):
""" Simulate a particular demographic either appearing, or disappearing from production traffic.
It does so by sampling a data point and then uses nearest neighbors to identify other data points that are similar
(nearest neighbors) or dissimilar (furthest neighbors) and remove them from the dataset.
Ref: https://arxiv.org/abs/2012.08625
Args:
fraction_to_remove (float): proportion of samples to remove from the nearest/furthest set.
near_far_probability (float): probability of nearest or furthest bias.
Attributes:
fraction_to_remove (float): proportion of samples to remove from the nearest/furthest set.
near_far_probability (float): probability of nearest or furthest bias.
name (str): name of the perturbation
feature_type (int): identifier of the type of feature for which this perturbation is valid
(see PerturbationConstants).
"""
def __init__(self, fraction_to_remove=0.5, near_far_probability=0.5):
super(NearestNeighbors, self).__init__()
self.fraction_to_remove = fraction_to_remove
self.name = 'nearest_neighbors_shift_%.2f_%.2f' % (fraction_to_remove, near_far_probability)
self.feature_type = PerturbationConstants.NUMERIC
self.near_far_probability = near_far_probability
def transform(self, X, y):
""" Apply the perturbation to a dataset.
Args:
X (numpy.ndarray): feature data.
y (numpy.ndarray): target data.
"""
initial_size = X.shape[0]
# choose random point p
p_idx = np.random.choice(X.shape[0], size=1)
# sort samples by distance to p
dist_to_p = euclidean_distances(X, X[p_idx])[:, 0]
idx_by_distance = np.argsort(dist_to_p)
remove_size = int(np.ceil(self.fraction_to_remove * initial_size))
keep_size = X.shape[0] - remove_size
if random.random() < self.near_far_probability:
# remove nearest
keep_idx = idx_by_distance[-keep_size:]
else:
# remove farthest
keep_idx = idx_by_distance[:keep_size]
# resample to restore initial size (not in the original algorithm, introduce duplicates)
keep_idx = np.append(keep_idx, np.random.choice(keep_idx, size=remove_size))
Xt = X[keep_idx, :]
yt = y[keep_idx]
return Xt, yt
# Keeps an observation with a probability which decreases as points are further away from the samples mean
# gamma is the fraction of samples close to the mean we want to keep
def subsample_joint_shift(x, y, gamma=0.8):
shift_features = list(range(x.shape[1]))
n_rows = x.shape[0]
x_mean = np.nanmean(x, axis=0)
distance = np.sqrt(np.sum((x - x_mean) ** 2, axis=1))
gamma_quantile = np.quantile(distance[~np.isnan(distance)], gamma)
ln_prob_keep_far = np.log(0.5) # sample with probability 50% samples with distance after gamma quantile
probabilities = | np.exp(ln_prob_keep_far / gamma_quantile * distance) | numpy.exp |
import math, copy
import param
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as mpl_colors
from matplotlib import ticker
from matplotlib.dates import date2num
from ...core import util
from ...core import (OrderedDict, NdOverlay, DynamicMap,
CompositeOverlay, Element3D, Element)
from ...core.options import abbreviated_exception
from ..plot import GenericElementPlot, GenericOverlayPlot
from ..util import dynamic_update
from .plot import MPLPlot, mpl_rc_context
from .util import wrap_formatter
from distutils.version import LooseVersion
class ElementPlot(GenericElementPlot, MPLPlot):
apply_ticks = param.Boolean(default=True, doc="""
Whether to apply custom ticks.""")
aspect = param.Parameter(default='square', doc="""
The aspect ratio mode of the plot. By default, a plot may
select its own appropriate aspect ratio but sometimes it may
be necessary to force a square aspect ratio (e.g. to display
the plot as an element of a grid). The modes 'auto' and
'equal' correspond to the axis modes of the same name in
matplotlib, a numeric value may also be passed.""")
invert_zaxis = param.Boolean(default=False, doc="""
Whether to invert the plot z-axis.""")
labelled = param.List(default=['x', 'y'], doc="""
Whether to plot the 'x' and 'y' labels.""")
logz = param.Boolean(default=False, doc="""
Whether to apply log scaling to the y-axis of the Chart.""")
zaxis = param.Boolean(default=True, doc="""
Whether to display the z-axis.""")
zrotation = param.Integer(default=0, bounds=(0, 360), doc="""
Rotation angle of the zticks.""")
zticks = param.Parameter(default=None, doc="""
Ticks along z-axis specified as an integer, explicit list of
tick locations, list of tuples containing the locations and
labels or a matplotlib tick locator object. If set to None
default matplotlib ticking behavior is applied.""")
# Element Plots should declare the valid style options for matplotlib call
style_opts = []
# Whether plot has axes, disables setting axis limits, labels and ticks
_has_axes = True
def __init__(self, element, **params):
super(ElementPlot, self).__init__(element, **params)
check = self.hmap.last
if isinstance(check, CompositeOverlay):
check = check.values()[0] # Should check if any are 3D plots
if isinstance(check, Element3D):
self.projection = '3d'
for hook in self.initial_hooks:
try:
hook(self, element)
except Exception as e:
self.warning("Plotting hook %r could not be applied:\n\n %s" % (hook, e))
def _finalize_axis(self, key, element=None, title=None, dimensions=None, ranges=None, xticks=None,
yticks=None, zticks=None, xlabel=None, ylabel=None, zlabel=None):
"""
Applies all the axis settings before the axis or figure is returned.
Only plots with zorder 0 get to apply their settings.
When the number of the frame is supplied as n, this method looks
up and computes the appropriate title, axis labels and axis bounds.
"""
if element is None:
element = self._get_frame(key)
self.current_frame = element
if not dimensions and element and not self.subplots:
el = element.traverse(lambda x: x, [Element])
if el: dimensions = el[0].dimensions()
axis = self.handles['axis']
subplots = list(self.subplots.values()) if self.subplots else []
if self.zorder == 0 and key is not None:
if self.bgcolor:
if LooseVersion(mpl.__version__) <= '1.5.9':
axis.set_axis_bgcolor(self.bgcolor)
else:
axis.set_facecolor(self.bgcolor)
# Apply title
title = self._format_title(key)
if self.show_title and title is not None:
fontsize = self._fontsize('title')
if 'title' in self.handles:
self.handles['title'].set_text(title)
else:
self.handles['title'] = axis.set_title(title, **fontsize)
# Apply subplot label
self._subplot_label(axis)
# Apply axis options if axes are enabled
if element and not any(not sp._has_axes for sp in [self] + subplots):
# Set axis labels
if dimensions:
self._set_labels(axis, dimensions, xlabel, ylabel, zlabel)
if not subplots:
legend = axis.get_legend()
if legend:
legend.set_visible(self.show_legend)
self.handles["bbox_extra_artists"] += [legend]
axis.xaxis.grid(self.show_grid)
axis.yaxis.grid(self.show_grid)
# Apply log axes
if self.logx:
axis.set_xscale('log')
if self.logy:
axis.set_yscale('log')
if not self.projection == '3d':
self._set_axis_position(axis, 'x', self.xaxis)
self._set_axis_position(axis, 'y', self.yaxis)
# Apply ticks
if self.apply_ticks:
self._finalize_ticks(axis, dimensions, xticks, yticks, zticks)
# Set axes limits
self._set_axis_limits(axis, element, subplots, ranges)
# Apply aspects
if self.aspect is not None and self.projection != 'polar' and not self.adjoined:
self._set_aspect(axis, self.aspect)
if not subplots and not self.drawn:
self._finalize_artist(element)
for hook in self.finalize_hooks:
try:
hook(self, element)
except Exception as e:
self.warning("Plotting hook %r could not be applied:\n\n %s" % (hook, e))
return super(ElementPlot, self)._finalize_axis(key)
def _finalize_ticks(self, axis, dimensions, xticks, yticks, zticks):
"""
Finalizes the ticks on the axes based on the supplied ticks
and Elements. Sets the axes position as well as tick positions,
labels and fontsize.
"""
ndims = len(dimensions) if dimensions else 0
xdim = dimensions[0] if ndims else None
ydim = dimensions[1] if ndims > 1 else None
# Tick formatting
if xdim:
self._set_axis_formatter(axis.xaxis, xdim)
if ydim:
self._set_axis_formatter(axis.yaxis, ydim)
if self.projection == '3d':
zdim = dimensions[2] if ndims > 2 else None
if zdim:
self._set_axis_formatter(axis.zaxis, zdim)
xticks = xticks if xticks else self.xticks
self._set_axis_ticks(axis.xaxis, xticks, log=self.logx,
rotation=self.xrotation)
yticks = yticks if yticks else self.yticks
self._set_axis_ticks(axis.yaxis, yticks, log=self.logy,
rotation=self.yrotation)
if self.projection == '3d':
zticks = zticks if zticks else self.zticks
self._set_axis_ticks(axis.zaxis, zticks, log=self.logz,
rotation=self.zrotation)
for ax, ax_obj in zip('xy', [axis.xaxis, axis.yaxis]):
tick_fontsize = self._fontsize('%sticks' % ax,'labelsize',common=False)
if tick_fontsize: ax_obj.set_tick_params(**tick_fontsize)
def _finalize_artist(self, element):
"""
Allows extending the _finalize_axis method with Element
specific options.
"""
pass
def _set_labels(self, axes, dimensions, xlabel=None, ylabel=None, zlabel=None):
"""
Sets the labels of the axes using the supplied list of dimensions.
Optionally explicit labels may be supplied to override the dimension
label.
"""
xlabel, ylabel, zlabel = self._get_axis_labels(dimensions, xlabel, ylabel, zlabel)
if self.invert_axes:
xlabel, ylabel = ylabel, xlabel
if xlabel and self.xaxis and 'x' in self.labelled:
axes.set_xlabel(xlabel, **self._fontsize('xlabel'))
if ylabel and self.yaxis and 'y' in self.labelled:
axes.set_ylabel(ylabel, **self._fontsize('ylabel'))
if zlabel and self.zaxis and 'z' in self.labelled:
axes.set_zlabel(zlabel, **self._fontsize('zlabel'))
def _set_axis_formatter(self, axis, dim):
"""
Set axis formatter based on dimension formatter.
"""
if isinstance(dim, list): dim = dim[0]
formatter = None
if dim.value_format:
formatter = dim.value_format
elif dim.type in dim.type_formatters:
formatter = dim.type_formatters[dim.type]
if formatter:
axis.set_major_formatter(wrap_formatter(formatter))
def _set_aspect(self, axes, aspect):
"""
Set the aspect on the axes based on the aspect setting.
"""
if isinstance(aspect, util.basestring) and aspect != 'square':
axes.set_aspect(aspect)
return
(x0, x1), (y0, y1) = axes.get_xlim(), axes.get_ylim()
xsize = np.log(x1) - np.log(x0) if self.logx else x1-x0
ysize = np.log(y1) - np.log(y0) if self.logy else y1-y0
xsize = max(abs(xsize), 1e-30)
ysize = max(abs(ysize), 1e-30)
data_ratio = 1./(ysize/xsize)
if aspect != 'square':
data_ratio = data_ratio/aspect
axes.set_aspect(float(data_ratio))
def _set_axis_limits(self, axis, view, subplots, ranges):
"""
Compute extents for current view and apply as axis limits
"""
# Extents
scalex, scaley = True, True
extents = self.get_extents(view, ranges)
if extents and not self.overlaid:
coords = [coord if np.isreal(coord) or isinstance(coord, np.datetime64) else np.NaN for coord in extents]
coords = [date2num(util.dt64_to_dt(c)) if isinstance(c, np.datetime64) else c
for c in coords]
valid_lim = lambda c: util.isnumeric(c) and not np.isnan(c)
if self.projection == '3d' or len(extents) == 6:
l, b, zmin, r, t, zmax = coords
if self.invert_zaxis or any(p.invert_zaxis for p in subplots):
zmin, zmax = zmax, zmin
if zmin != zmax:
if valid_lim(zmin):
axis.set_zlim(bottom=zmin)
if valid_lim(zmax):
axis.set_zlim(top=zmax)
else:
l, b, r, t = coords
if self.invert_axes:
l, b, r, t = b, l, t, r
if self.invert_xaxis or any(p.invert_xaxis for p in subplots):
r, l = l, r
if l != r:
lims = {}
if valid_lim(l):
lims['left'] = l
scalex = False
if valid_lim(r):
lims['right'] = r
scalex = False
if lims:
axis.set_xlim(**lims)
if self.invert_yaxis or any(p.invert_yaxis for p in subplots):
t, b = b, t
if b != t:
lims = {}
if valid_lim(b):
lims['bottom'] = b
scaley = False
if valid_lim(t):
lims['top'] = t
scaley = False
if lims:
axis.set_ylim(**lims)
axis.autoscale_view(scalex=scalex, scaley=scaley)
def _set_axis_position(self, axes, axis, option):
"""
Set the position and visibility of the xaxis or yaxis by
supplying the axes object, the axis to set, i.e. 'x' or 'y'
and an option to specify the position and visibility of the axis.
The option may be None, 'bare' or positional, i.e. 'left' and
'right' for the yaxis and 'top' and 'bottom' for the xaxis.
May also combine positional and 'bare' into for example 'left-bare'.
"""
positions = {'x': ['bottom', 'top'], 'y': ['left', 'right']}[axis]
axis = axes.xaxis if axis == 'x' else axes.yaxis
if option in [None, False]:
axis.set_visible(False)
for pos in positions:
axes.spines[pos].set_visible(False)
else:
if option is True:
option = positions[0]
if 'bare' in option:
axis.set_ticklabels([])
axis.set_label_text('')
if option != 'bare':
option = option.split('-')[0]
axis.set_ticks_position(option)
axis.set_label_position(option)
if not self.overlaid and not self.show_frame and self.projection != 'polar':
pos = (positions[1] if (option and (option == 'bare' or positions[0] in option))
else positions[0])
axes.spines[pos].set_visible(False)
def _set_axis_ticks(self, axis, ticks, log=False, rotation=0):
"""
Allows setting the ticks for a particular axis either with
a tuple of ticks, a tick locator object, an integer number
of ticks, a list of tuples containing positions and labels
or a list of positions. Also supports enabling log ticking
if an integer number of ticks is supplied and setting a
rotation for the ticks.
"""
if isinstance(ticks, (list, tuple)) and all(isinstance(l, list) for l in ticks):
axis.set_ticks(ticks[0])
axis.set_ticklabels(ticks[1])
elif isinstance(ticks, ticker.Locator):
axis.set_major_locator(ticks)
elif not ticks and ticks is not None:
axis.set_ticks([])
elif isinstance(ticks, int):
if log:
locator = ticker.LogLocator(numticks=ticks,
subs=range(1,10))
else:
locator = ticker.MaxNLocator(ticks)
axis.set_major_locator(locator)
elif isinstance(ticks, (list, tuple)):
labels = None
if all(isinstance(t, tuple) for t in ticks):
ticks, labels = zip(*ticks)
axis.set_ticks(ticks)
if labels:
axis.set_ticklabels(labels)
for tick in axis.get_ticklabels():
tick.set_rotation(rotation)
@mpl_rc_context
def update_frame(self, key, ranges=None, element=None):
"""
Set the plot(s) to the given frame number. Operates by
manipulating the matplotlib objects held in the self._handles
dictionary.
If n is greater than the number of available frames, update
using the last available frame.
"""
reused = isinstance(self.hmap, DynamicMap) and self.overlaid
if not reused and element is None:
element = self._get_frame(key)
elif element is not None:
self.current_key = key
self.current_frame = element
if element is not None:
self.set_param(**self.lookup_options(element, 'plot').options)
axis = self.handles['axis']
axes_visible = element is not None or self.overlaid
axis.xaxis.set_visible(axes_visible and self.xaxis)
axis.yaxis.set_visible(axes_visible and self.yaxis)
axis.patch.set_alpha(np.min([int(axes_visible), 1]))
for hname, handle in self.handles.items():
hideable = hasattr(handle, 'set_visible')
if hname not in ['axis', 'fig'] and hideable:
handle.set_visible(element is not None)
if element is None:
return
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = util.match_spec(element, ranges)
label = element.label if self.show_legend else ''
style = dict(label=label, zorder=self.zorder, **self.style[self.cyclic_index])
axis_kwargs = self.update_handles(key, axis, element, ranges, style)
self._finalize_axis(key, element=element, ranges=ranges,
**(axis_kwargs if axis_kwargs else {}))
@mpl_rc_context
def initialize_plot(self, ranges=None):
element = self.hmap.last
ax = self.handles['axis']
key = list(self.hmap.data.keys())[-1]
dim_map = dict(zip((d.name for d in self.hmap.kdims), key))
key = tuple(dim_map.get(d.name, None) for d in self.dimensions)
ranges = self.compute_ranges(self.hmap, key, ranges)
ranges = util.match_spec(element, ranges)
style = dict(zorder=self.zorder, **self.style[self.cyclic_index])
if self.show_legend:
style['label'] = element.label
plot_data, plot_kwargs, axis_kwargs = self.get_data(element, ranges, style)
with abbreviated_exception():
handles = self.init_artists(ax, plot_data, plot_kwargs)
self.handles.update(handles)
return self._finalize_axis(self.keys[-1], element=element, ranges=ranges,
**axis_kwargs)
def init_artists(self, ax, plot_args, plot_kwargs):
"""
Initializes the artist based on the plot method declared on
the plot.
"""
plot_method = self._plot_methods.get('batched' if self.batched else 'single')
plot_fn = getattr(ax, plot_method)
artist = plot_fn(*plot_args, **plot_kwargs)
return {'artist': artist[0] if isinstance(artist, list) and
len(artist) == 1 else artist}
def update_handles(self, key, axis, element, ranges, style):
"""
Update the elements of the plot.
"""
self.teardown_handles()
plot_data, plot_kwargs, axis_kwargs = self.get_data(element, ranges, style)
with abbreviated_exception():
handles = self.init_artists(axis, plot_data, plot_kwargs)
self.handles.update(handles)
return axis_kwargs
def teardown_handles(self):
"""
If no custom update_handles method is supplied this method
is called to tear down any previous handles before replacing
them.
"""
if 'artist' in self.handles:
self.handles['artist'].remove()
class ColorbarPlot(ElementPlot):
colorbar = param.Boolean(default=False, doc="""
Whether to draw a colorbar.""")
clipping_colors = param.Dict(default={}, doc="""
Dictionary to specify colors for clipped values, allows
setting color for NaN values and for values above and below
the min and max value. The min, max or NaN color may specify
an RGB(A) color as a color hex string of the form #FFFFFF or
#FFFFFFFF or a length 3 or length 4 tuple specifying values in
the range 0-1 or a named HTML color.""")
cbar_padding = param.Number(default=0.01, doc="""
Padding between colorbar and other plots.""")
cbar_ticks = param.Parameter(default=None, doc="""
Ticks along colorbar-axis specified as an integer, explicit
list of tick locations, list of tuples containing the
locations and labels or a matplotlib tick locator object. If
set to None default matplotlib ticking behavior is
applied.""")
cbar_width = param.Number(default=0.05, doc="""
Width of the colorbar as a fraction of the main plot""")
symmetric = param.Boolean(default=False, doc="""
Whether to make the colormap symmetric around zero.""")
_colorbars = {}
def __init__(self, *args, **kwargs):
super(ColorbarPlot, self).__init__(*args, **kwargs)
self._cbar_extend = 'neither'
def _adjust_cbar(self, cbar, label, dim):
noalpha = math.floor(self.style[self.cyclic_index].get('alpha', 1)) == 1
if (cbar.solids and noalpha):
cbar.solids.set_edgecolor("face")
cbar.set_label(label)
if isinstance(self.cbar_ticks, ticker.Locator):
cbar.ax.yaxis.set_major_locator(self.cbar_ticks)
elif self.cbar_ticks == 0:
cbar.set_ticks([])
elif isinstance(self.cbar_ticks, int):
locator = ticker.MaxNLocator(self.cbar_ticks)
cbar.ax.yaxis.set_major_locator(locator)
elif isinstance(self.cbar_ticks, list):
if all(isinstance(t, tuple) for t in self.cbar_ticks):
ticks, labels = zip(*self.cbar_ticks)
else:
ticks, labels = zip(*[(t, dim.pprint_value(t))
for t in self.cbar_ticks])
cbar.set_ticks(ticks)
cbar.set_ticklabels(labels)
def _finalize_artist(self, element):
artist = self.handles.get('artist', None)
if artist and self.colorbar:
self._draw_colorbar()
def _draw_colorbar(self, dim=None, redraw=True):
element = self.hmap.last
artist = self.handles.get('artist', None)
fig = self.handles['fig']
axis = self.handles['axis']
ax_colorbars, position = ColorbarPlot._colorbars.get(id(axis), ([], None))
specs = [spec[:2] for _, _, spec, _ in ax_colorbars]
spec = util.get_spec(element)
if position is None or not redraw:
if redraw:
fig.canvas.draw()
bbox = axis.get_position()
l, b, w, h = bbox.x0, bbox.y0, bbox.width, bbox.height
else:
l, b, w, h = position
# Get colorbar label
dim = element.get_dimension(dim)
if dim:
label = dim.pprint_label
elif element.vdims:
label = element.vdims[0].pprint_label
elif dim is None:
label = ''
padding = self.cbar_padding
width = self.cbar_width
if spec[:2] not in specs:
offset = len(ax_colorbars)
scaled_w = w*width
cax = fig.add_axes([l+w+padding+(scaled_w+padding+w*0.15)*offset,
b, scaled_w, h])
cbar = fig.colorbar(artist, cax=cax, ax=axis, extend=self._cbar_extend)
self._adjust_cbar(cbar, label, dim)
self.handles['cax'] = cax
self.handles['cbar'] = cbar
ylabel = cax.yaxis.get_label()
self.handles['bbox_extra_artists'] += [cax, ylabel]
ax_colorbars.append((artist, cax, spec, label))
for i, (artist, cax, spec, label) in enumerate(ax_colorbars):
scaled_w = w*width
cax.set_position([l+w+padding+(scaled_w+padding+w*0.15)*i,
b, scaled_w, h])
ColorbarPlot._colorbars[id(axis)] = (ax_colorbars, (l, b, w, h))
def _norm_kwargs(self, element, ranges, opts, vdim, prefix=''):
"""
Returns valid color normalization kwargs
to be passed to matplotlib plot function.
"""
clim = opts.pop(prefix+'clims', None)
if clim is None:
cs = element.dimension_values(vdim)
if not isinstance(cs, np.ndarray):
cs = | np.array(cs) | numpy.array |
# Copyright (c) 2017-2021 <NAME> (<EMAIL>)
"""
@author: <NAME>
CG model selection via MAP-estimation using mean parameters.
"""
import numpy as np
from numpy import ix_
from cgmodsel.base_solver import BaseCGSolver
# pylint: disable=W0511 # todos
# pylint: disable=R0914 # too many locals
class MAP(BaseCGSolver):
"""
A class for estimating MAP models for CG distributions (mean parameters).
Attributes:
cat_format_required (str): specifier for format - constant.
name: name of the class.
"""
def __init__(self):
"""pass a dictionary that provides with keys dg, dc, and L"""
super().__init__()
self.name = 'MAP'
self.cat_format_required = 'flat'
def _fit_gaussian(self,
mat_v_inv,
deg_freedom: int,
vec_mu0,
n_art_cg: int):
""" fit Gaussian MAP-estimate
Wishart prior for precision matrix
Args:
mat_v_inv: inverse of V for the prior W(Lambda| V, nu).
deg_freedom: degrees of freedom (=nu).
vec_mu0: mean of Gaussian prior N(mu | mu0, (n_art_cg * Lambda)^{-1})
n_art_cg: number of artificial observations for Gaussian prior.
Note:
Setting n_art_cg=0 (and nu = #Gaussians) produces ML estimate.
Returns:
tuple: MAP estimates (vec_mu, mat_sigma)
"""
assert self.meta['n_cat'] == 0, 'use for continuous variables only'
assert self.meta['n_cg'] > 0
vec_mu = np.sum(self.cont_data, axis=0) # sum over rows
vec_mu = n_art_cg * vec_mu0 + vec_mu
vec_mu /= n_art_cg + self.meta['n_data']
mat_sigma = mat_v_inv # this is V^{-1} from the doc
for i in range(self.meta['n_data']):
# add 'scatter matrix' of the evidence
diff_yi_mumap = self.cont_data[i, :] - vec_mu
mat_sigma += np.outer(diff_yi_mumap, diff_yi_mumap)
mudiff = vec_mu - vec_mu0
mat_sigma += n_art_cg * np.outer(mudiff, mudiff)
mat_sigma /= self.meta['n_data'] + deg_freedom - self.meta['n_cg']
return vec_mu, mat_sigma
def fit_fixed_covariance(self,
n_art_cat: int = 1,
n_art_cg: int = 1,
deg_freedom: int = None,
vec_mu0=None,
mat_sigma0=None):
"""fit MAP model with single covariance matrix + individual CG means.
Warning:
This method of model estimation uses
sums over the whole discrete state space.
Args:
n_art_cat (int): Laplacian smoothing parameter
= artificial observations per discrete variable
Dirichlet prior parameters (prior for discrete distribution).
n_art_cg (int): number of 'artificial' data points per CG
(prior parameter for means of conditional Gaussians).
vec_mu0: the value of artificial CG data points.
deg_freedom (int): degrees of freedom
(prior parameter for Wishart prior
for the shared precision matrix of CG distributions).
mat_sigma0: initial guess for the covariance matrix.
Note:
Complete prior is a Dirichlet-Normal-Wishart prior.
Returns:
tuple: MAP-estimate (p(x)_x, mu(x)_x, mat_sigma), where x
are the discrete outcomes.
"""
# TODO(franknu): publish doc
# future ideas(franknu):
# (1) iter only over observed discrete examples,
# all other outcomes default to 'prior'
# (often times much less than the whole discrete state space)
# use dictionary + counts?
# (2) use flag to indicate if cov is fixed or variable
# (avoid code redundancy)
n_cg = self.meta['n_cg']
assert self.meta['n_data'] > 0, 'No data loaded.. use method dropdata'
## defaults for smoothing parameters
if vec_mu0 is None:
# reasonable choice when using standardized data Y
vec_mu0 = np.zeros(n_cg)
assert vec_mu0.shape == (n_cg,)
if deg_freedom is None:
deg_freedom = n_cg # least informative non-degenerate prior
assert deg_freedom >= n_cg, 'need deg >= n_cg for non-degenerate prior'
if mat_sigma0 is None:
mat_sigma0 = np.eye(n_cg) # standardized data --> variances are 1
assert mat_sigma0.shape == (n_cg, n_cg)
# choose V = 1/deg_freedom * mat_sigma0 for the Wishart prior
mat_v_inv = deg_freedom * np.linalg.inv(mat_sigma0)
# note: formerly used n_cg instead of deg_freedom here
## MAP-estimate Gaussians only (with unknown mean and covariance)
if self.meta['n_cat'] == 0:
vec_mu, mat_sigma = self._fit_gaussian(mat_v_inv, deg_freedom,
vec_mu0, n_art_cg)
return np.array([]), vec_mu, mat_sigma
## MAP-estimation in the presence of discrete variables
n_discrete_states = int(np.prod(self.meta['sizes']))
probs_cat = np.zeros(n_discrete_states)
mus = np.zeros((n_discrete_states, n_cg))
mat_sigma = np.zeros((n_cg, n_cg))
## mu and p
for i, state in enumerate(self.cat_data):
probs_cat[state] += 1
mus[state, :] += self.cont_data[i, :]
## MAP-estimates of mu(# MAP estimator for mu(x))
for state in range(n_discrete_states):
# MAP estimator for mu(# MAP estimator for mu(x))
mus[state, :] = (n_art_cg * vec_mu0 + mus[state, :])
mus[state, :] /= n_art_cg + probs_cat[state]
## MAP-estimate of mat_sigma
mat_sigma = mat_v_inv # this is V^{-1} from the doc
for i, state in enumerate(self.cat_data):
# add 'scatter matrix' of the evidence
diff_yi_mumap = self.cont_data[i, :] - mus[state, :]
mat_sigma += np.outer(diff_yi_mumap, diff_yi_mumap)
for state in range(n_discrete_states):
# add scatter part of artificial observations mu0
mudiff = mus[state, :] - vec_mu0
mat_sigma += n_art_cg * np.outer(mudiff, mudiff)
mat_sigma /= deg_freedom + self.meta[
'n_data'] - n_cg - 1 + n_discrete_states
## MAP-estimate of p
probs_cat = probs_cat + n_art_cat
probs_cat /= probs_cat.sum()
# note: without smoothing would yield p = p/n
## reshape to the correct shapes
probs_cat = probs_cat.reshape(self.meta['sizes'])
mus = mus.reshape(self.meta['sizes'] + [n_cg])
return probs_cat, mus, mat_sigma
def fit_variable_covariance(self,
n_art_cat: int = 1,
n_art_cg: int = 1,
deg_freedom: int = None,
vec_mu0=None,
mat_sigma0=None):
"""fit MAP model with individual covariance matrices and means.
Warning:
This method of model estimation uses
sums over the whole discrete state space.
Args:
n_art_cat (int): Laplacian smoothing parameter
= artificial observations per discrete variable
Dirichlet prior parameters (prior for discrete distribution).
n_art_cg (int): number of 'artificial' data points per CG
(prior parameter for means of conditional Gaussians).
vec_mu0: the value of artificial CG data points.
deg_freedom (int): degrees of freedom
(prior parameter for Wishart prior
for the shared precision matrix of CG distributions).
mat_sigma0: initial guess for the covariance matrices.
Note:
Complete prior is a Dirichlet-Normal-Wishart prior.
Returns:
tuple: MAP-estimate (p(x)_x, mu(x)_x, mat_sigma_x), where x
are the discrete outcomes.
"""
# TODO(franknu): publish doc
assert self.meta['n_data'] > 0, 'No data loaded.. use method dropdata'
n_cg = self.meta['n_cg']
## defaults for smoothing parameters
if vec_mu0 is None:
vec_mu0 = np.zeros(n_cg) # reasonable when using standardized data
assert vec_mu0.shape == (n_cg,)
if deg_freedom is None:
deg_freedom = n_cg + 1
# yields mat_sigma(x)=mat_sigma_0 if x not observed
string = 'need deg >= dg+1 for non-degenerate'
string += 'prior and deal with unobserved discrete outcomes'
assert deg_freedom >= n_cg + 1, string
if mat_sigma0 is None:
mat_sigma0 = np.eye(n_cg)
assert mat_sigma0.shape == (n_cg, n_cg)
# choose V = 1/deg_freedom * mat_sigma0 for the Wishart prior
# -> prior mean of W(Lambda(x)|V, deg_freedom) is
# deg_freedom*V= mat_sigma0
mat_v_inv = deg_freedom * np.linalg.inv(mat_sigma0)
# note: formerly used n_cg instead of nu here
## MAP-estimate Gaussians only (with unknown mean and covariance)
if self.meta['n_cat'] == 0:
vec_mu, mat_sigma = self._fit_gaussian(mat_v_inv, deg_freedom,
vec_mu0, n_art_cg)
return np.array([]), vec_mu, mat_sigma
## initialization
n_discrete_states = int(np.prod(self.meta['sizes']))
probs_cat = np.zeros(n_discrete_states)
mus = np.zeros((n_discrete_states, n_cg))
sigmas = np.zeros((n_discrete_states, n_cg, n_cg))
## mu and p
for i, state in enumerate(self.cat_data):
probs_cat[state] += 1
mus[state, :] += self.cont_data[i, :]
## MAP-estimates of mu(state)
for state in range(n_discrete_states):
mus[state, :] = (n_art_cg * vec_mu0 + mus[state, :]) / \
(n_art_cg + probs_cat[state]) # MAP estimator for mu(state)
## MAP-estimate of mat_sigma(state)
for i, state in enumerate(self.cat_data):
# scatter matrix of the evidence
diff_yi_mumap = self.cont_data[i, :] - mus[state, :]
sigmas[state, :, :] += np.outer(diff_yi_mumap, diff_yi_mumap)
for state in range(n_discrete_states):
mudiff = mus[state, :] - vec_mu0
sigmas[state, :, :] += mat_v_inv + \
n_art_cg * np.outer(mudiff, mudiff)
sigmas[state, :, :] /= probs_cat[state] - n_cg + deg_freedom
# note: divisor is > 0 since deg_freedom > n_cg
## MAP-estimate of p
probs_cat = probs_cat + n_art_cat
probs_cat /= probs_cat.sum()
## reshape to the correct shapes
probs_cat = probs_cat.reshape(self.meta['sizes'])
mus = mus.reshape(self.meta['sizes'] + [n_cg])
sigmas = sigmas.reshape(self.meta['sizes'] + [n_cg, n_cg])
return probs_cat, mus, sigmas
def get_plhvalue(self, mean_params):
"""Wrapper for crossvalidate method.
Args:
mean_params: tuple (p, mus, sigmas).
Returns:
pseudo-likelihood value of current data set
"""
_, _, lval = self.crossvalidate(mean_params)
return lval
def crossvalidate(self, mean_params):
"""Perform crossvalidation by node-wise predictions.
Note:
This uses the current data that has been dropped using method
drop_data.
Args:
mean_params: tuple (p, mus, sigmas), where
p has shape sizes, mus has shape sizes + (n_cg),
sigmas has shape (n_cg, n_cg) if independent of discrete vars x,
else has shape sizes + (n_cg, n_cg) if dependent on x.
Returns:
tuple: prediction errors for discrete variables (np.array),
MSE errors for continuous variables (np.array),
pseudo-likelihood value (float)
"""
n_cat = self.meta['n_cat']
n_cg = self.meta['n_cg']
sizes = self.meta['sizes']
probs_cat, mus, sigmas = mean_params
if len(sigmas.shape) == 2:
cov_variable = 0
# use zero to scale indices and toggle between modes of covariance
else:
cov_variable = 1
## initialization
n_discrete_states = int(np.prod(self.meta['sizes']))
# n_discrete_states is 1 if dc==0 since np.prod([])=1
n_covmats = (n_discrete_states - 1) * cov_variable + 1
# number of different covariance matrices
dis_errors = np.zeros(n_cat)
cts_errors = np.zeros(n_cg)
lval_testdata = 0 # likelihood value
## reshape by collapsing discrete dimensions
shapes = (probs_cat.shape, mus.shape, sigmas.shape) # store shapes
probs_cat = probs_cat.reshape(-1)
mus = mus.reshape((n_discrete_states, n_cg))
sigmas = sigmas.reshape((n_covmats, n_cg, n_cg))
## discrete only models
if n_cg == 0:
assert n_cat > 0
for i in range(self.meta['n_data']):
x = self.cat_data[i] # flat index, or empty list if dc==0
cat = list(np.unravel_index(x, sizes))
# cat is multiindex of discrete outcome
for r in range(n_cat):
probs = np.empty(sizes[r])
tmp = cat[r]
for k in range(sizes[r]):
# calculate conditional probs
cat[r] = k
ind = np.ravel_multi_index(tuple(cat), sizes)
probs[k] = probs_cat[ind] # not yet unnormalized
cat[r] = tmp
prob_xr = probs[tmp] / np.sum(probs) # normalize
dis_errors[r] += 1 - prob_xr
lval_testdata -= np.log(prob_xr)
# note: log(x) ~ x-1 around 1
return dis_errors, cts_errors, lval_testdata
## setting with Gaussian variables
## precalculate determinants, inverse matrices
# (full with dim recuced by 1)
dets = np.empty(n_covmats)
sigmas_inv = np.empty((n_covmats, n_cg, n_cg))
sigmas_red_inv = np.empty((n_covmats, n_cg, n_cg - 1, n_cg - 1))
for x in range(n_covmats):
dets[x] = np.linalg.det(sigmas[x, :, :])**(-0.5)
sigmas_inv[x, :, :] = np.linalg.inv(sigmas[x, :, :])
# for each x, s: store mat_sigma[x]_{-s, -s}^{-1}
cond_inds = list(range(1, n_cg)) # indices to keep
for s in range(n_cg):
# reduced det of cov with rows and col s deleted
mat_s = sigmas[x, :, :][ix_(cond_inds, cond_inds)] # not a view
sigmas_red_inv[x, s, :, :] = | np.linalg.inv(mat_s) | numpy.linalg.inv |
from scipy.spatial import distance
import numpy as np
import utils4knets
import APKnet
import CSKnet
from sklearn.metrics import pairwise_distances
'''
Brief K-nets description.
K-networks: Exemplar based clustering algorithm. It can be operated as a deterministic or stochastic process.
The basic K-nets parameter is k an integer resolution parameter. The smaller the value of K-nets the larger the number
of clusters extracted. K-nets operation is composed of two sequential phases. In the first (Construction/Selection), an
initial partition is determined by selecting a number of exemplars from the overall dataset while in the second
(Assignment) the samples are assigned to their nearest exemplar or we can have a number of iterations similar to k-means
until convergence.
Input Parameters:
Required:
X: Input data that can be in the form of 1) Data Matrix (NxD, N: Number of samples, D: Number of features,
2) Similarity Matrix (NxN), 3) NNs and DNNs Lists (both of dimensionality NxK, with K being the K NNs of every
sample)
k: resolution parameter (1<=k<~=N/2, with N being the number of samples)
Optional:
c: The number of requested clusters.
sims: if sims=1, it is indicated that a similarity matrix is given as input (default: 0).
Output Values:
'''
# <NAME>
# (c) 2021
# Initialize the parameters for Single-layer K-nets into a K-net structure (kns) that currently is a dictionary
# and set any user defined values. KNS is utilized to transfer the values of the parameters in the various Knet functions.
def handle_input_arguments(k, **in_args):
# kns = {'k': 1, 'exact': [], 'rsv': 1, 'metric': 'euclidean'}
kns = {'k': 1}
kns['c'] = 0
kns['metric'] = 'euclidean'
kns['iters'] = 20
kns['rsv'] = 1 # Random sampling value (rsv=1: utilize all data for CSPhase)
kns['sims'] = 0 # Similarities passed as data source instead of data matrix
kns['info'] = 0 # Print operational msgs (default: 0 - do not print)
kns['min_max'] = 1 # minimize or maximize clustering criterion
kns['dthres'] = 10000 # threshold that if exceeded the single-layer Knets is not activated
kns['ecs'] = 1000 # Exemplars Components Size: The exemplars are divided into components of size ecs
kns['scs'] = 1000 # Samples Components Size: The samples in a dataset are divided into components of size scs
kns[
'ENE'] = 50 # Number of Exemplar's Nearest Exemplars that will be considered for the new assignment of the samples
kns[
'ENM'] = 50 # Number of Exemplar's Nearest cluster Members that will be considered for detecting the new exemplar
if isinstance(k, dict):
struct = k
else:
struct = in_args
kns['k'] = k
# Set in Knet Structure the user defined paramaters
for key, value in struct.items():
kns[key] = value
# print(kns)
return kns
"""input: X, kns.
X: is the data type . It can be one of the followings:
(1). Data matrix, (2): Similarity Matrix, (3): Nearest Neighbors and corresponding distances sets/arrays.
kns: is a structure (i.e. dictionary) with the parametes of the model.
"""
def initialize_knet(X, kns):
# Xtype is the input type. It can be: (1) Data Matrix, (2) Similarity
# matrix, (3): Nearest Neighbors and corresponding distances sets/arrays.
Xtype = 1
if kns['sims'] == 1:
Xtype = 2
n = np.shape(X)[0]
elif isinstance(X, list):
Xtype = 3
NNs = X[0]
DNNs = X[1]
n = np.shape(NNs)[0]
else:
n = np.shape(X)[0]
kns['Xtype'] = Xtype
# Minimize or maximize the clustering criterion of the most central points
# to have maximum or minimum distances....
if kns['min_max'] == 1:
sorting_option = 'ascend'
else:
sorting_option = 'descend'
# If a random sampling value has been provided, randomly
# select a fraction (CSinds) of the input dataset for the Construction /
# Selection Phase. In this version rsv is activated if the input is a data
# matrix.
# Number of samples (samples4CS) that will be considered during the CSPhase
if kns['rsv'] != 1:
if kns['rsv'] > 1: # If rsv is in the form of samples number
samples4CS = np.random.randint(n, size=kns['rsv'])
else: # If rsv is in the form of percentage of the total samples
samples4CS = np.random.randint(n, size=int(kns['rsv'] * n))
else:
samples4CS = np.arange(n) # The whole dataset will be considered for CSPhase
kns['samples4CS'] = samples4CS
if isinstance(X, list):
# The samples that are part of the NNs lists have to be extracted as well
NNs = NNs[samples4CS, :]
DNNs = DNNs[samples4CS, :]
Similarities = []
else:
if np.shape(samples4CS)[0] < kns['dthres']:
if kns['sims'] == 0:
# Similarities = distance.cdist(X[samples4CS, :], X[samples4CS, :],
# kns['metric']) # Calculate Similarity Matrix
Similarities = pairwise_distances(X[samples4CS, :], metric=kns['metric'])
else:
Similarities = X[ | np.ix_(samples4CS, samples4CS) | numpy.ix_ |
import argparse
from networks.transforms import trimap_transform, groupnorm_normalise_image
from networks.models import build_model
import numpy as np
import cv2
import torch
from dataloader import AlphaTestDataset
from interaction import robot_click, jaccard, remove_non_fg_connected
def NOCS(ious, thresh):
''' Number of clicks to reach threshold'''
nocs = []
for i in range(ious.shape[0]):
for j in range(20):
if(ious[i, j] >= thresh):
nocs.append(j + 1)
break
if(len(nocs) == i):
nocs.append(20)
return nocs
def np_to_torch(x):
return torch.from_numpy(x).permute(2, 0, 1)[None, :, :, :].float().cuda()
def scale_input(x: np.ndarray, scale_type) -> np.ndarray:
''' Scales so that min side length is 352 and sides are divisible by 8'''
h, w = x.shape[:2]
h1 = int(np.ceil(h / 32) * 32)
w1 = int(np.ceil(w / 32) * 32)
x_scale = cv2.resize(x, (w1, h1), interpolation=scale_type)
return x_scale
def pred(image_np: np.ndarray, trimap_np: np.ndarray, alpha_old_np: np.ndarray, model) -> np.ndarray:
''' Predict segmentation
Parameters:
image_np -- the image in rgb format between 0 and 1. Dimensions: (h, w, 3)
trimap_np -- two channel trimap/Click map, first background then foreground. Dimensions: (h, w, 2)
Returns:
alpha: alpha matte/non-binary segmentation image between 0 and 1. Dimensions: (h, w)
'''
# return trimap_np[:,:,1] + (1-np.sum(trimap_np,-1))/2
alpha_old_np = remove_non_fg_connected(alpha_old_np, trimap_np[:, :, 1])
h, w = trimap_np.shape[:2]
image_scale_np = scale_input(image_np, cv2.INTER_LANCZOS4)
trimap_scale_np = scale_input(trimap_np, cv2.INTER_NEAREST)
alpha_old_scale_np = scale_input(alpha_old_np, cv2.INTER_LANCZOS4)
with torch.no_grad():
image_torch = np_to_torch(image_scale_np)
trimap_torch = np_to_torch(trimap_scale_np)
alpha_old_torch = np_to_torch(alpha_old_scale_np[:, :, None])
trimap_transformed_torch = np_to_torch(trimap_transform(trimap_scale_np))
image_transformed_torch = groupnorm_normalise_image(image_torch.clone(), format='nchw')
alpha = model(image_transformed_torch, trimap_transformed_torch, alpha_old_torch, trimap_torch)
alpha = cv2.resize(alpha[0].cpu().numpy().transpose((1, 2, 0)), (w, h), cv2.INTER_LANCZOS4)
alpha[trimap_np[:, :, 0] == 1] = 0
alpha[trimap_np[:, :, 1] == 1] = 1
alpha = remove_non_fg_connected(alpha, trimap_np[:, :, 1])
return alpha
def test(model, args):
test_dset = AlphaTestDataset(args.dataset_dir)
ious = np.zeros((test_dset.__len__(), args.num_clicks))
for i in range(ious.shape[0]):
item_dict = test_dset.__getitem__(i)
image = item_dict['image']
gt = item_dict['alpha']
name = item_dict['name']
h, w = gt.shape
trimap = np.zeros((h, w, 2))
alpha = np.zeros((h, w))
for j in range(ious.shape[1]):
trimap, click_region, [y, x], click_cat = robot_click(alpha >= 0.5, gt, trimap)
alpha = pred(image, trimap, alpha, model)
ious[i, j] = jaccard(gt == 1, alpha >= 0.5, | np.abs(gt - 0.5) | numpy.abs |
# ezk.py
# <NAME>, 2021
#
# Algorithm for estimating the Wasserstein metric
#
########################################
import numpy as np
from scipy.optimize import linprog
from hiton_ezk.data_structures import CpnVar, CpnSub
# final argument used to define the distance between points
# for our 1-d cases this is used to calculate the bounds on constraint equations
def estimate_wasserstein(samples, n1, n2, example_var: CpnVar):
# Define transform matrices
n: int = n1 + n2
Ag = np.zeros((n+1,n))
Bg = np.ones((n,n)) # a "pseudoinverse" of Ag
# TODO: Calculate Ag & Bg for the R1 topology (formulas below only apply to S1 / T1)
for i in range(n):
Ag[i,i] = 1
Ag[i,np.mod(i+1,n)] = -1
Ag[n,i] = 1
for j in range(n):
Bg[i,np.mod(i+j,n)] = (n-j-1)/n
# construct objective function
lf_a = (1.0 / n1) * np.ones((1, n1))
lf_b = -(1.0 / n2) * np.ones((1, n2))
linfunct = np.hstack((lf_a, lf_b))[0]
# reorder sample values and coefficients to ascending order
ordering = np.argsort(samples)
samples = samples[ordering]
f = linfunct[ordering]
g = np.matmul(Bg.transpose(), f)
dists = np.zeros(n)
idists = np.zeros(n)
# convert to a unit cube with hyperplanar constraint
if example_var.topology == 'S1':
for i in range(n):
j = np.mod(i+1, n)
d = example_var.dist(samples[i],samples[j])
dists[i] = d
idists[i] = 1/d
elif example_var.topology == 'R1':
for i in range(n-1):
j = i+1
d = example_var.dist(samples[i],samples[j])
dists[i] = d
idists[i] = 1/d
else:
raise Exception("Error: `estimate_wasserstein` method does not \
support topology {}!".format(example_var.topology))
# maximize dot product with this new vector subject to unit cube & constraint dot product
h = np.matmul(np.diag(dists.flatten()), g)
# solution alpha must satisfy alpha * dists = 0.
# find projection of objective vector
h_tilde = h - (np.dot(h,dists)/np.dot(dists,dists)) * dists
# arrange system axes so the components reach the boundary in component order
order_by_mag = np.argsort(-np.abs(h_tilde)) # largest to smallest
h = h[order_by_mag]
h_tilde = h_tilde[order_by_mag]
dists = dists[order_by_mag]
# sign_vec would be the choice solution within the cube, but violates the orthoganality constraint
sign_vec = np.sign(h_tilde)
# working heuristic to avoid saturating too many coordinates leaving no solution set
cum_alpha_dot_dist = np.abs( np.cumsum(sign_vec * dists) )
cum_ordered_dists = np.sum(dists) - | np.cumsum(dists) | numpy.cumsum |
import cv2
import numpy as np
from PIL import Image
class DefectDetector(object):
def __init__(self,**kwargs):
self.thred_dyn =kwargs["thred_dyn"]
self.ksize_dyn =kwargs["ksize_dyn"]
self.ksize_close = kwargs["ksize_close"]
self.ksize_open = kwargs["ksize_open"]
self.thred_residual=kwargs.get("thred_residual",10)
@staticmethod
def cv_dyn_threshold(img, thred=15, ksize=21):
img_blur = cv2.blur(img, ksize=(ksize, ksize))
arr_blur = np.array(img_blur, dtype=np.float)
arr = np.array(img, dtype=np.float)
mask = np.where(arr - arr_blur > thred, 1, 0)
return mask.astype(np.uint8)
@staticmethod
def high_pass_fft(img, filter_size=None, power_thred=None):
assert filter_size != None or power_thred != None
if (filter_size != None and power_thred != None):
raise Exception("filter_size and power_thred are incompatible!")
img_float32 = np.float32(img)
dft = cv2.dft(img_float32, flags=cv2.DFT_COMPLEX_OUTPUT)
# 将低频信息转换至图像中心
dft_shift = np.fft.fftshift(dft)
if power_thred != None:
# # 获取图像尺寸 与 中心坐标
features = cv2.magnitude(dft_shift[:, :, 0], dft_shift[:, :, 1]) / np.sqrt(img.shape[0] * img.shape[1])
mask = np.where(features > power_thred, 1, 0)[:, :, np.newaxis]
if filter_size != None:
crow, ccol = int(img.shape[0] / 2), int(img.shape[1] / 2) # 求得图像的中心点位置
mask = np.zeros((img.shape[0], img.shape[1], 2), np.uint8)
mask[crow - filter_size:crow + filter_size, ccol - filter_size:ccol + filter_size] = 1
# 掩码与傅里叶图像按位相乘 去除低频区域
fshift = dft_shift * mask #
# 之前把低频转换到了图像中间,现在需要重新转换回去
f_ishift = np.fft.ifftshift(fshift)
# 傅里叶逆变换
img_back = cv2.idft(f_ishift)
img_back = cv2.magnitude(img_back[:, :, 0], img_back[:, :, 1])
img_back = (img_back - np.min(img_back)) / (np.max(img_back) - np.min(img_back)) * 255
return mask[:, :, 0], img_back
@staticmethod
def concatImage(images, mode="Adapt", scale=0.5, offset=None):
"""
:param images: 图片列表
:param mode: 图片排列方式["Row" ,"Col","Adapt"]
:param scale:
:param offset: 图片间距
:return:
"""
if not isinstance(images, list):
raise Exception('images must be a list ')
if mode not in ["Row", "Col", "Adapt"]:
raise Exception('mode must be "Row" ,"Adapt",or "Col"')
images = [np.uint8(img) for img in images] # if Gray [H,W] else if RGB [H,W,3]
images = [img.squeeze(2) if len(img.shape) > 2 and img.shape[2] == 1 else img for img in images]
count = len(images)
img_ex = Image.fromarray(images[0])
size = img_ex.size # [W,H]
if mode == "Adapt":
mode = "Row" if size[0] <= size[1] else "Col"
if offset is None: offset = int(np.floor(size[0] * 0.02))
if mode == "Row":
target = Image.new(img_ex.mode, (size[0] * count + offset * (count - 1), size[1] * 1), 100)
for i in range(count):
image = Image.fromarray(images[i]).resize(size, Image.BILINEAR).convert(img_ex.mode)
target.paste(image, (i * (size[0] + offset), 0))
# target.paste(image, (i * (size[0] + offset), 0, i * (size[0] + offset) + size[0], size[1]))
return target
if mode == "Col":
target = Image.new(img_ex.mode, (size[0], size[1] * count + offset * (count - 1)), 100)
for i in range(count):
image = Image.fromarray(images[i]).resize(size, Image.BILINEAR).convert(img_ex.mode)
target.paste(image, (0, i * (size[1] + offset)))
# target.paste(image, (0, i * (size[1] + offset), size[0], i * (size[1] + offset) + size[1]))
return target
@staticmethod
def cv_open(mask, ksize=5, struct="ellipse"):
assert struct in ["rect", "ellipse"]
if struct == "rect": struct = cv2.MORPH_RECT
if struct == "ellipse": struct = cv2.MORPH_ELLIPSE
elment = cv2.getStructuringElement(struct, (ksize, ksize))
mask_open = cv2.morphologyEx(mask, cv2.MORPH_OPEN, elment)
return mask_open
@staticmethod
def cv_close(mask, ksize=5, struct="ellipse"):
assert struct in ["rect", "ellipse"]
if struct == "rect": struct = cv2.MORPH_RECT
if struct == "ellipse": struct = cv2.MORPH_ELLIPSE
elment = cv2.getStructuringElement(struct, (ksize, ksize))
mask_open = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, elment)
return mask_open
@staticmethod
def to255(array,flag=1):
if flag==1:
array= array * 127.5 + 127.5
elif flag==0:
array= array * 255
else:
raise Exception("got an wrong param “flag")
return np.array(array,np.uint8)
def detect_on_image(self,img):
mask=self.cv_dyn_threshold(img,thred=self.thred_dyn,ksize=self.ksize_dyn)
close_mask = self.cv_close(mask, ksize=self.ksize_close)
open_mask = self.cv_open(close_mask, ksize=self.ksize_open)
num_rois, rois = cv2.connectedComponents(open_mask)
ROI = np.zeros_like(mask)
if len(rois) == 0:
return ROI , mask
for roi_idx in range(1, num_rois):
# 求ROI区域的坐标
Cols, Rows = np.nonzero(np.where(rois == roi_idx, 1, 0))
# 求ROI区域的外接矩形大小
h1, h2, w1, w2 = | np.min(Cols) | numpy.min |
#!/bin/python
# This files supplements the transforms3D module
# by augmenting it with the following operations:
# quaternion to rotate v1 to v2.
# quaternion to transform between the coordinate axes.
from math import acos, sin
import transforms3d.quaternions as qops
import numpy as np
import random
def decompose_quat(q, axis=-1, bReshape=False):
"""
Dynamic decomposition of quaternions into their w-component and v-component while preserving input axes.
If bReshape is True, then the shape of q_w is augmented to have the same dimensionality as before
with a value of 1 for broadcasting purposes.
The reshaping apparently does not survive the function return....!
"""
q=np.array(q)
if axis==-1:
q_w = q[...,0]
q_v = q[...,1:4]
elif axis==0:
q_w = q[0,...]
q_v = q[1:4,...]
else:
print( sys.stderr, "= = = ERROR: decompose_quat does not support arbitrary axis definitions." )
sys.exit(1)
if bReshape and len(q_w.shape)>1:
newShape=list(q_w.shape)
if axis==-1:
newShape.append(1)
elif axis==0:
newShape.insert(0,1)
q_w = q_w.reshape(newShape)
return q_w, q_v
def vecnorm_NDarray(v, axis=-1):
"""
Vector normalisation performed along an arbitrary dimension, which by default is the last one.
Comes with workaround by casting that to zero instead of keeping np.nan or np.inf.
"""
# = = = need to protect against 0/0 errors when the vector is (0,0,0)
if len(v.shape)>1:
# = = = Obey broadcasting rules by applying 1 to the axis that is being reduced.
sh=list(v.shape)
sh[axis]=1
return np.nan_to_num( v / np.linalg.norm(v,axis=axis).reshape(sh) )
else:
return np.nan_to_num( v/np.linalg.norm(v) )
def axangle2quat_simd(ax, th, bNormalised=False):
"""
Local Version that can take additional dimension data with the axis at the end. Base maths from transforms3d.quaternions:
t2 = theta / 2.0
st2 = math.sin(t2)
return np.concatenate(([math.cos(t2)],
vector * st2))
"""
if not bNormalised:
ax = vecnorm_NDarray(ax)
half = th / 2.0
sine = np.sin(half)
if len(ax.shape)>1:
return np.concatenate( ( np.cos(half)[...,None], np.multiply(ax,sine[...,None]) ), axis=-1 )
else:
return np.concatenate( ([np.cos(half)],ax*sine) )
def quat_v1v2(v1, v2):
"""
Return the minimum-angle quaternion that rotates v1 to v2.
Non-SIMD version for clarity of maths.
"""
th=acos(np.dot(v1,v2))
ax=np.cross(v1,v2)
if all( np.isnan(ax) ):
# = = = This happens when the two vectors are identical
return qops.qeye()
else:
# = = = Do normalisation within the next function
return qops.axangle2quat(ax, th)
def quat_v1v2_simd(v1, v2, bNormalised=False):
"""
Return the minimum-angle quaternion that rotates v1 so as to align it with v2.
In this SIMD version, v1 and v2 can be single 3-vectors or 2D arrays with shape (N,3)
Values clipped to [-1,1] in-case of a float error.
"""
v1=np.array(v1)
v2=np.array(v2)
dim1=len(v1.shape)
dim2=len(v2.shape)
if not bNormalised:
v1=vecnorm_NDarray(v1)
v2=vecnorm_NDarray(v2)
if dim1==2 and dim2==2:
#th=np.arccos( np.clip( np.diag( np.matmul(v1,v2.T) ), -1.0, 1.0 ) )
th=np.arccos(np.clip( np.einsum('ij,ij->i', v1,v2),-1.0,1.0) )
else:
th=np.arccos( np.clip( np.dot(v1,v2.T), -1.0, 1.0 ) )
ax = np.cross(v1,v2)
#ax = np.nan_to_num( np.cross(v1,v2) )
return axangle2quat_simd(ax, th, bNormalised=False)
# This function returns the quaternions
# required to transform the given axes vectors
# to match the coordinate frame vectors, this is a COORDINATE transform.
# It is equivalent to a FRAME transform from the coordinate frame
# to the given frame vectors.
# Note that it does not test for orthogonality
# of length of the given axes.
# So, may give rubbish if you don't watch out.
# NB: actually calculates the rotation of vectors to
# the coordinate axes, which is a same rotation of
# the frame in the opposite direction.
def quat_frame_transform(axes):
ref=np.array( ((1,0,0),(0,1,0),(0,0,1)) )
# Define two rotations, first to match z with Z
# then to match x/y with X/Y
q1=quat_v1v2(axes[2],ref[2])
arot=[ qops.rotate_vector(axes[i],q1) for i in range(3)]
q2a=quat_v1v2(arot[0],ref[0])
#Weak test for orthogonality here. Doesn't work since the second axes can be +-Y
#q2b=quat_v1v2(arot[1],ref[1])
#if qops.nearly_equivalent(q2a, q2b) == False:
# print( "= = = ERROR in quat_frame_transform, found disagreement between " )
# print( " second rotations to bring x/y to X/Y!" )
# print( q1, q2a, q2b )
# return -1
return qops.qmult(q2a,q1)
# Returns the minimum version that has the smallest cosine angle
# to the positive or negative axis.
def quat_frame_transform_min(axes):
ref=np.array( ((1,0,0),(0,1,0),(0,0,1)) )
q1a=quat_v1v2(axes[2],(0,0, 1))
q1b=quat_v1v2(axes[2],(0,0,-1))
q1 = q1a if q1a[0]>q1b[0] else q1b
arot=[ qops.rotate_vector(axes[i],q1) for i in range(3)]
q2a=quat_v1v2(arot[0],( 1,0,0))
q2b=quat_v1v2(arot[0],(-1,0,0))
q2 = q2a if q2a[0]>q2b[0] else q2b
return qops.qmult(q2,q1)
# The opposite function to bring an frame back to the coordinate axes.
# Although the elements of the input axes are given in the final coordinate axes... -_-
# Note that the quaternion rotation is identical in the two frames.
def quat_frame_transform_inv(axes):
return qops.qconjugate(quat_frame_transform(axes))
def quat_mult(q1,q2):
q=np.zeros_like(q1)
q[0] = q1[0]*q2[0] - np.einsum('...i,...i',q1[1:4],q2[1:4])
q[1:4]= q1[0]*q2[1:4] + q2[0]*q1[1:4] + np.cross(q1[1:4],q2[1:4])
return q
def quat_mult_simd(q1, q2):
"""
SIMD-version of transforms3d.quaternions.qmult using vector operations.
when at least one of these is N-dimensional, it expects that q1 is the N-dimensional quantity.
The axis is assumed to be the last one.
To do this faster, we'll need a C-code ufunc.
w1, x1, y1, z1 = q1
w2, x2, y2, z2 = q2
w = w1*w2 - x1*x2 - y1*y2 - z1*z2
x = w1*x2 + x1*w2 + y1*z2 - z1*y2
y = w1*y2 + y1*w2 + z1*x2 - x1*z2
z = w1*z2 + z1*w2 + x1*y2 - y1*x2
return np.array([w, x, y, z])
"""
#w1, v1 = decompose_quat(q1)
#w2, v2 = decompose_quat(q2)
#del v1 ; del v2
out=np.zeros_like(q1)
out[...,0] = q1[...,0]*q2[...,0] - np.einsum('...i,...i',q1[...,1:4],q2[...,1:4])
out[...,1:4]= q1[...,0,None]*q2[...,1:4] + q2[...,0,None]*q1[...,1:4] + np.cross(q1[...,1:4],q2[...,1:4])
return out
def quat_invert(q):
return q*[1.0,-1.0,-1.0,-1.0]
def quat_negate(q):
"""
return -q.
"""
return q*-1.0
def quat_normalise(q):
"""
Return q-hat, which has unit length. Use vecnorm_NDarray for q matrices with components as the last axis.
"""
return q/np.linalg.norm(q)
def quat_rand(n=1, dtype=np.float64, bReduce=True, qref=(1,0,0,0)):
"""
Return N randomised quaternions in base float64. Calls random.uniform 3*N and converts to quaternion form.
See Shoemake, K. "Uniform random rotations", 1992.
Returns in dimensions (n, 4) for n>1 .
"""
r = np.reshape( [ random.uniform(0,1) for x in range(3*n) ], (3,n) )
q = np.zeros( (4,n), dtype=dtype)
q[0] = | np.sqrt(1.0-r[0]) | numpy.sqrt |
import numpy as np
list=[[1,2,3,4,5],[6,7,8,9,10]]
print(type(list))
d = np.array(list)
print(type(d))
print(d.dtype)
print(d.shape)# 返回一个元组(一维为2,二维为3)
print(d.ndim)
print(type(d.shape))
e = np.zeros((3,3))
print(e.dtype)
print(e.shape)# 返回一个元组(一维为2,二维为3)
print(e.ndim)
e = np.ones((2,3))
print(e.dtype)
print(e.shape)# 返回一个元组(一维为2,二维为3)
print(e.ndim)
print(e)
x = np.array([1,2.6,3],dtype = np.int64)
print(x)
x = np.array([1,2,3],dtype =np.float64)
print(x)
x = np.array([1,2.6,3],dtype =np.float64)
print(x)
y = x.astype(np.int32)
print(y)
z = y.astype(np.float64)
print(z)
x = np.array(['12','2','3'],dtype =np.string_)
print(x)
y = x.astype(np.int32)
print(y)
x = np.array([1,2,3])
y = x * 2
print(x+y)
y = x>2
print(y)
x = np.array([[1,2],[3,4],[5,6]])
print(x[0])
print(x[0][1])
y = x[0,1]
print(y)
x = np.array([[[1, 2], [3,4]], [[5, 6], [7,8]]])
print(x[0])
y = x[0].copy()
print(y[0][0])
x = np.array([1,2,3,4,5])
print(x[1:3])
print(x[:3])
print(x[1:])
print(x[0:4:2])
x = np.array([[1,2],[3,4],[5,6]])
print(x[:2])
print(x[:2,:1])
print(x[:2][0])
x[:2,:1]=0
print(x)
#https://www.jianshu.com/p/83c8ef18a1e8
#创建10行10列的随机数组
x = np.random.rand(10, 10)
print(x)
print(x.dtype)
print(x.shape)# 返回一个元组(一维为10,二维为10)
print(x.ndim)
#创建指定范围内的一个数
x = np.random.uniform(0, 100)
print(x)
# 创建指定范围内的一个整数
x = np.random.randint(0, 100)
print(x)
# 正态生成4行5列的二维数组
arr = np.random.normal(1.75, 0.1, (4, 5))
print(arr)
# 截取第1至2行的第2至3列(从第0行算起)
after_arr = arr[1:3, 2:4]
print(after_arr)
print("reshape函数的使用!")
one_20 = np.ones([20])
print("-->1行20列<--")
print (one_20)
one_4_5 = one_20.reshape([4, 5])
print("-->4行5列<--")
print (one_4_5)
stus_score = np.array([[80, 88], [82, 81], [84, 75], [86, 83], [75, 81]])
stus_score = stus_score > 80
print(stus_score)
stus_score = np.array([[80, 88], [82, 81], [84, 75], [86, 83], [75, 81]])
stus_score = np.where(stus_score < 80, 0, 90)
print(stus_score)
stus_score = | np.array([[80, 88], [82, 81], [84, 75], [86, 83], [75, 81]]) | numpy.array |
import time
from enum import IntEnum
from collections import OrderedDict
import numpy as np
from AnyQt.QtWidgets import (
QGraphicsView,
QGraphicsScene,
QGraphicsItem,
QGraphicsSimpleTextItem,
QGraphicsTextItem,
QGraphicsLineItem,
QGraphicsWidget,
QGraphicsRectItem,
QGraphicsEllipseItem,
QGraphicsLinearLayout,
QGridLayout,
QLabel,
QFrame,
QSizePolicy,
QApplication,
QDesktopWidget,
)
from AnyQt.QtGui import QColor, QPainter, QFont, QPen, QBrush
from AnyQt.QtCore import Qt, QRectF, QSize
from Orange.data import Table, Domain
from Orange.statistics.util import nanmin, nanmax, nanmean, unique
from Orange.classification import Model
from Orange.classification.naive_bayes import NaiveBayesModel
from Orange.classification.logistic_regression import LogisticRegressionClassifier
from Orange.widgets.settings import Setting, ContextSetting, ClassValuesContextHandler
from Orange.widgets.widget import OWWidget, Msg, Input
from Orange.widgets import gui
def collides(item, items):
return any(item.collidesWithItem(i) for i in items)
class SortBy(IntEnum):
NO_SORTING, NAME, ABSOLUTE, POSITIVE, NEGATIVE = 0, 1, 2, 3, 4
@staticmethod
def items():
return [
"No sorting",
"Name",
"Absolute importance",
"Positive influence",
"Negative influence",
]
class MovableToolTip(QLabel):
def __init__(self):
super().__init__()
self.setFrameShape(QFrame.StyledPanel)
self.setWindowFlags(Qt.ToolTip)
self.hide()
def show(self, pos, text, change_y=True):
self.setText(text)
self.adjustSize()
x, y = pos.x(), (pos.y() + 15 if change_y else self.y())
avail = QDesktopWidget().availableGeometry(self)
if x + self.width() > avail.right():
x -= self.width()
if y + self.height() > avail.bottom():
y = pos.y() - 10 - self.height() if change_y else self.y() - self.height()
self.move(x, y)
super().show()
class DotItem(QGraphicsEllipseItem):
TOOLTIP_STYLE = """ul {margin-top: 1px; margin-bottom: 1px;}"""
TOOLTIP_TEMPLATE = """<html><head><style type="text/css">{}</style>
</head><body><b>{}</b><hr/>{}</body></html>
"""
def __init__(self, radius, scale, offset, min_x, max_x):
super().__init__(0, 0, radius, radius)
self._min_x = min_x * scale - radius / 2 + offset
self._max_x = max_x * scale - radius / 2 + offset
self._scale = scale
self._offset = offset
self.setPos(0, -radius / 2)
self.setFlag(QGraphicsItem.ItemIsMovable)
self.setBrush(QColor(170, 220, 255, 255))
self.setPen(QPen(QBrush(QColor(20, 130, 250, 255)), 2))
self.setZValue(100)
self.tool_tip = MovableToolTip()
self.setAcceptHoverEvents(True)
@property
def value(self):
return (self.x() + self.rect().width() / 2 - self._offset) / self._scale
def move(self, x):
self.setX(x)
def move_to_val(self, val):
x = np.clip(
self._scale * val - self.rect().width() / 2 + self._offset,
self._min_x,
self._max_x,
)
self.move(x)
def hoverEnterEvent(self, event):
self.tool_tip.show(event.screenPos(), self.get_tooltip_text())
def hoverLeaveEvent(self, event):
self.tool_tip.hide()
def mouseMoveEvent(self, _):
# Prevent click-moving of these items
return
class ProbabilitiesDotItem(DotItem):
def __init__(self, radius, scale, offset, min_x, max_x, title, get_probabilities):
self.title = title
self.get_probabilities = get_probabilities
self.movable_dot_items = []
self._invisible_sum = 0
super().__init__(radius, scale, offset, min_x, max_x)
self.setBrush(QColor(150, 150, 150, 255))
self.setPen(QPen(QBrush(QColor(75, 75, 75, 255)), 2))
def move_to_sum(self, invisible_sum: float = None):
total = sum(item.value for item in self.movable_dot_items)
if invisible_sum is not None:
self._invisible_sum = invisible_sum
total += self._invisible_sum
self.move_to_val(total)
self.parentItem().rescale()
def get_tooltip_text(self):
text = "Total: {} <br/>Probability: {:.0%}".format(
np.round(self.value, 2), np.round(self.get_probabilities(self.value), 2)
)
return self.TOOLTIP_TEMPLATE.format(self.TOOLTIP_STYLE, self.title, text)
class MovableDotItem(DotItem):
def __init__(self, radius, scale, offset, min_x, max_x):
self.tooltip_labels = []
self.tooltip_values = []
super().__init__(radius, scale, offset, min_x, max_x)
self._x = min_x * scale - radius / 2 + offset
self._point_dot = None
self._total_dot = None
self._probs_dot = None
self._vertical_line = None
@property
def vertical_line(self):
return self._vertical_line
@vertical_line.setter
def vertical_line(self, line):
line.setVisible(False)
self._vertical_line = line
@property
def point_dot(self):
return self._point_dot
@point_dot.setter
def point_dot(self, dot):
dot.setVisible(False)
self._point_dot = dot
@property
def total_dot(self):
return self._total_dot
@total_dot.setter
def total_dot(self, dot):
self._total_dot = dot
self._total_dot.movable_dot_items.append(self)
@property
def probs_dot(self):
return self._probs_dot
@probs_dot.setter
def probs_dot(self, dot):
self._probs_dot = dot
self._probs_dot.movable_dot_items.append(self)
def mousePressEvent(self, event):
self.tool_tip.show(event.screenPos(), self.get_tooltip_text(), False)
self._x = event.pos().x()
self.setBrush(QColor(50, 180, 250, 255))
self._show_vertical_line_and_point_dot()
return super().mousePressEvent(event)
def mouseMoveEvent(self, event):
self.tool_tip.show(event.screenPos(), self.get_tooltip_text(), False)
delta_x = event.pos().x() - self._x
if self._min_x <= self.x() + delta_x <= self._max_x:
self.move(self.x() + delta_x)
mod_tooltip_values = [0] + list(self.tooltip_values)
if np.round(self.value, 1) in np.round(mod_tooltip_values, 1):
index = np.where(
np.round(mod_tooltip_values, 1) == np.round(self.value, 1)
)
time.sleep(0.05)
self.move_to_val(mod_tooltip_values[index[0][0]])
elif self.x() + delta_x < self._min_x:
self.move(self._min_x)
elif self.x() + delta_x > self._max_x:
self.move(self._max_x)
self._show_vertical_line_and_point_dot()
self.probs_dot.move_to_sum()
def mouseReleaseEvent(self, event):
self.tool_tip.hide()
self.setBrush(QColor(170, 220, 255, 255))
self.point_dot.setVisible(False)
self.vertical_line.setVisible(False)
return super().mousePressEvent(event)
def _show_vertical_line_and_point_dot(self):
self.vertical_line.setX(self.x() + self.rect().width() / 2 - self._offset)
self.vertical_line.setVisible(True)
self.point_dot.move_to_val(self.value)
self.point_dot.setVisible(True)
class DiscreteMovableDotItem(MovableDotItem):
def get_tooltip_text(self):
labels = self._get_tooltip_labels_with_percentages()
return self.TOOLTIP_TEMPLATE.format(
self.TOOLTIP_STYLE,
"Points: {}".format(np.round(self.value, 2)),
"".join("{}: {:.0%}<br/>".format(l, v) for l, v in labels)[:-5],
)
def _get_tooltip_labels_with_percentages(self):
if not len(self.tooltip_labels):
return []
for i, val in enumerate(self.tooltip_values):
if val > self.value:
break
diff = self.tooltip_values[i] - self.tooltip_values[i - 1]
p1 = 0 if diff < 1e-6 else (-self.value + self.tooltip_values[i]) / diff
return [
(self.tooltip_labels[i - 1].replace("<", "<"), abs(p1)),
(self.tooltip_labels[i].replace("<", "<"), abs(1 - p1)),
]
class ContinuousItemMixin:
def get_tooltip_text(self):
return self.TOOLTIP_TEMPLATE.format(
self.TOOLTIP_STYLE,
"Points: {}".format(np.round(self.value, 2)),
"Value: {}".format(np.round(self._get_tooltip_label_value(), 1)),
)
def _get_tooltip_label_value(self):
if not len(self.tooltip_labels):
return self.value
start = float(self.tooltip_labels[0])
stop = float(self.tooltip_labels[-1])
delta = self.tooltip_values[-1] - self.tooltip_values[0]
if not delta:
return np.nan
return start + self.value * (stop - start) / delta
class ContinuousMovableDotItem(MovableDotItem, ContinuousItemMixin):
pass
class Continuous2DMovableDotItem(MovableDotItem, ContinuousItemMixin):
def __init__(self, radius, scale, offset, min_x, max_x, min_y, max_y):
super().__init__(radius, scale, offset, min_x, max_x)
self._min_y = min_y
self._max_y = max_y
self._horizontal_line = None
@property
def horizontal_line(self):
return self._horizontal_line
@horizontal_line.setter
def horizontal_line(self, line):
line.setVisible(False)
self._horizontal_line = line
def move(self, x):
super().move(x)
diff_ = np.nan_to_num(self._max_x - self._min_x)
k = (x - self._min_x) / diff_ if diff_ else 0
self.setY(
self._min_y - self.rect().width() / 2 + (self._max_y - self._min_y) * k
)
def mousePressEvent(self, event):
self._show_horizontal_line()
return super().mousePressEvent(event)
def mouseMoveEvent(self, event):
super().mouseMoveEvent(event)
self._show_horizontal_line()
def mouseReleaseEvent(self, event):
self.horizontal_line.setVisible(False)
return super().mouseReleaseEvent(event)
def _show_horizontal_line(self):
self.horizontal_line.setY(
self.y() + self.rect().width() / 2 - abs(self._max_y - self._min_y) / 2
)
self.horizontal_line.setVisible(True)
class RulerItem(QGraphicsWidget):
tick_height = 6
tick_width = 0
DOT_RADIUS = 12
half_tick_height = 3
bold_label = True
DOT_ITEM_CLS = DotItem
def __init__(self, name, values, scale, name_offset, offset, labels=None):
super().__init__()
# leading label
font = name.document().defaultFont()
if self.bold_label:
font.setWeight(QFont.Bold)
name.setFont(font)
name.setPos(name_offset, -10)
name.setParentItem(self)
# prediction marker
self.dot = self.DOT_ITEM_CLS(
self.DOT_RADIUS, scale, offset, values[0], values[-1]
)
self.dot.setParentItem(self)
# pylint: disable=unused-variable
# line
line = QGraphicsLineItem(
min(values) * scale + offset, 0, max(values) * scale + offset, 0, self
)
if labels is None:
labels = [str(abs(v) if v == -0 else v) for v in values]
old_x_tick = None
shown_items = []
w = QGraphicsSimpleTextItem(labels[0]).boundingRect().width()
text_finish = values[0] * scale - w + offset - 10
for i, (label, value) in enumerate(zip(labels, values)):
text = QGraphicsSimpleTextItem(label)
x_text = value * scale - text.boundingRect().width() / 2 + offset
if text_finish > x_text - 10:
y_text, y_tick = self.DOT_RADIUS * 0.7, 0
text_finish = values[0] * scale + offset
else:
y_text = -text.boundingRect().height() - self.DOT_RADIUS * 0.7
y_tick = -self.tick_height
text_finish = x_text + text.boundingRect().width()
text.setPos(x_text, y_text)
if not collides(text, shown_items):
text.setParentItem(self)
shown_items.append(text)
x_tick = value * scale - self.tick_width / 2 + offset
tick = QGraphicsRectItem(
x_tick, y_tick, self.tick_width, self.tick_height, self
)
tick.setBrush(QColor(Qt.black))
if self.half_tick_height and i:
x = x_tick - (x_tick - old_x_tick) / 2
half_tick = QGraphicsLineItem(x, -self.half_tick_height, x, 0, self)
old_x_tick = x_tick
class ProbabilitiesRulerItem(QGraphicsWidget):
tick_height = 6
DOT_RADIUS = 14
y_diff = 4
def __init__(
self,
name,
values,
scale,
name_offset,
offset,
get_points,
title,
get_probabilities,
):
super().__init__()
self.scale = scale
self.offset = offset
self.get_points = get_points
self.min_val = min(values)
self.max_val = max(values)
# leading labels
font = name.document().defaultFont()
font.setWeight(QFont.Bold)
name_total = QGraphicsTextItem("Total", self)
name_total.setFont(font)
name_total.setPos(name_offset, -25)
name.setFont(font)
name.setPos(name_offset, 10)
name.setParentItem(self)
# prediction marker
self.dot = ProbabilitiesDotItem(
self.DOT_RADIUS,
scale,
offset,
values[0],
values[-1],
title,
get_probabilities,
)
self.dot.setPos(0, (-self.DOT_RADIUS + self.y_diff) / 2)
self.dot.setParentItem(self)
# pylint: disable=unused-variable
# two lines
t_line = QGraphicsLineItem(
self.min_val * scale + offset, 0, self.max_val * scale + offset, 0, self
)
p_line = QGraphicsLineItem(
self.min_val * scale + offset,
self.y_diff,
self.max_val * scale + offset,
self.y_diff,
self,
)
# ticks and labels
old_x_tick = values[0] * scale + offset
for i, value in enumerate(values[1:]):
x_tick = value * scale + offset
x = x_tick - (x_tick - old_x_tick) / 2
half_tick = QGraphicsLineItem(x, -self.tick_height / 2, x, 0, self)
old_x_tick = x_tick
if i == len(values) - 2:
break
text = QGraphicsTextItem(str(abs(value) if value == -0 else value), self)
x_text = value * scale - text.boundingRect().width() / 2 + offset
y_text = -text.boundingRect().height() - self.DOT_RADIUS * 0.7
text.setPos(x_text, y_text)
tick = QGraphicsLineItem(x_tick, -self.tick_height, x_tick, 0, self)
self.prob_items = [
(
i / 10,
QGraphicsTextItem(" " + str(i * 10) + " "),
QGraphicsLineItem(0, 0, 0, 0),
)
for i in range(1, 10)
]
def rescale(self):
shown_items = []
for prob, text, tick in self.prob_items:
pts = self.get_points(prob)
x = pts * self.scale - text.boundingRect().width() / 2 + self.offset
text.setPos(x, 10 + self.y_diff)
x = pts * self.scale + self.offset
tick.setLine(x, 0 + self.y_diff, x, self.tick_height + self.y_diff)
text.setParentItem(None)
tick.setParentItem(None)
text.setVisible(False)
tick.setVisible(False)
if self.min_val < pts < self.max_val:
tick.setParentItem(self)
tick.setVisible(True)
text.setParentItem(self)
if not collides(text, shown_items):
text.setVisible(True)
shown_items.append(text)
class DiscreteFeatureItem(RulerItem):
tick_height = 6
tick_width = 2
half_tick_height = 0
bold_label = False
DOT_ITEM_CLS = DiscreteMovableDotItem
def __init__(self, name, labels, values, scale, name_offset, offset):
indices = np.argsort(values)
labels, values = np.array(labels)[indices], values[indices]
super().__init__(name, values, scale, name_offset, offset, labels)
self.dot.tooltip_labels = labels
self.dot.tooltip_values = values
class ContinuousFeatureItem(RulerItem):
tick_height = 6
tick_width = 2
half_tick_height = 0
bold_label = False
DOT_ITEM_CLS = ContinuousMovableDotItem
def __init__(self, name, data_extremes, values, scale, name_offset, offset):
diff_ = np.nan_to_num(values[-1] - values[0])
k = (data_extremes[1] - data_extremes[0]) / diff_ if diff_ else 0
labels = [str(np.round(v * k + data_extremes[0], 1)) for v in values]
super().__init__(name, values, scale, name_offset, offset, labels)
self.dot.tooltip_labels = labels
self.dot.tooltip_values = values
class ContinuousFeature2DItem(QGraphicsWidget):
tick_height = 6
tick_width = 2
DOT_RADIUS = 12
y_diff = 80
n_tck = 4
def __init__(self, name, data_extremes, values, scale, name_offset, offset):
super().__init__()
data_start, data_stop = data_extremes[0], data_extremes[1]
labels = [
str(
np.round(
data_start + (data_stop - data_start) * i / (self.n_tck - 1), 1
)
)
for i in range(self.n_tck)
]
# leading label
font = name.document().defaultFont()
name.setFont(font)
name.setPos(name_offset, -10)
name.setParentItem(self)
# labels
ascending = data_start < data_stop
y_start, y_stop = (self.y_diff, 0) if ascending else (0, self.y_diff)
for i in range(self.n_tck):
text = QGraphicsSimpleTextItem(labels[i], self)
w = text.boundingRect().width()
y = y_start + (y_stop - y_start) / (self.n_tck - 1) * i
text.setPos(-5 - w, y - 8)
tick = QGraphicsLineItem(-2, y, 2, y, self)
# prediction marker
self.dot = Continuous2DMovableDotItem(
self.DOT_RADIUS, scale, offset, values[0], values[-1], y_start, y_stop
)
self.dot.tooltip_labels = labels
self.dot.tooltip_values = values
self.dot.setParentItem(self)
h_line = QGraphicsLineItem(
values[0] * scale + offset,
self.y_diff / 2,
values[-1] * scale + offset,
self.y_diff / 2,
self,
)
pen = QPen(Qt.DashLine)
pen.setBrush(QColor(Qt.red))
h_line.setPen(pen)
self.dot.horizontal_line = h_line
# pylint: disable=unused-variable
# line
line = QGraphicsLineItem(
values[0] * scale + offset,
y_start,
values[-1] * scale + offset,
y_stop,
self,
)
# ticks
for value in values:
diff_ = np.nan_to_num(values[-1] - values[0])
k = (value - values[0]) / diff_ if diff_ else 0
y_tick = (y_stop - y_start) * k + y_start - self.tick_height / 2
x_tick = value * scale - self.tick_width / 2 + offset
tick = QGraphicsRectItem(
x_tick, y_tick, self.tick_width, self.tick_height, self
)
tick.setBrush(QColor(Qt.black))
# rect
rect = QGraphicsRectItem(
values[0] * scale + offset,
-self.y_diff * 0.125,
values[-1] * scale + offset,
self.y_diff * 1.25,
self,
)
pen = QPen(Qt.DotLine)
pen.setBrush(QColor(50, 150, 200, 255))
rect.setPen(pen)
self.setPreferredSize(self.preferredWidth(), self.y_diff * 1.5)
class NomogramItem(QGraphicsWidget):
def __init__(self):
super().__init__()
self._items = []
self.setLayout(QGraphicsLinearLayout(Qt.Vertical))
def add_items(self, items):
self._items = items
for item in items:
self.layout().addItem(item)
class OWNomogram(OWWidget):
name = "Nomogram"
description = (
" Nomograms for Visualization of Naive Bayesian"
" and Logistic Regression Classifiers."
)
icon = "icons/Nomogram.svg"
priority = 2000
class Inputs:
classifier = Input("Classifier", Model)
data = Input("Data", Table)
MAX_N_ATTRS = 1000
POINT_SCALE = 0
ALIGN_LEFT = 0
ALIGN_ZERO = 1
ACCEPTABLE = (NaiveBayesModel, LogisticRegressionClassifier)
settingsHandler = ClassValuesContextHandler()
target_class_index = ContextSetting(0)
normalize_probabilities = Setting(False)
scale = Setting(1)
display_index = Setting(1)
n_attributes = Setting(10)
sort_index = Setting(SortBy.ABSOLUTE)
cont_feature_dim_index = Setting(0)
graph_name = "scene"
class Error(OWWidget.Error):
invalid_classifier = Msg(
"Nomogram accepts only Naive Bayes and " "Logistic Regression classifiers."
)
def __init__(self):
super().__init__()
self.instances = None
self.domain = None
self.data = None
self.classifier = None
self.align = OWNomogram.ALIGN_ZERO
self.log_odds_ratios = []
self.log_reg_coeffs = []
self.log_reg_coeffs_orig = []
self.log_reg_cont_data_extremes = []
self.p = None
self.b0 = None
self.points = []
self.feature_items = {}
self.feature_marker_values = []
self.scale_marker_values = lambda x: x
self.nomogram_main = None
self.vertical_line = None
self.hidden_vertical_line = None
self.old_target_class_index = self.target_class_index
self.repaint = False
# GUI
box = gui.vBox(self.controlArea, "Target class")
self.class_combo = gui.comboBox(
box,
self,
"target_class_index",
callback=self._class_combo_changed,
contentsLength=12,
)
self.norm_check = gui.checkBox(
box,
self,
"normalize_probabilities",
"Normalize probabilities",
hidden=True,
callback=self.update_scene,
tooltip="For multiclass data 1 vs. all probabilities do not"
" sum to 1 and therefore could be normalized.",
)
self.scale_radio = gui.radioButtons(
self.controlArea,
self,
"scale",
["Point scale", "Log odds ratios"],
box="Scale",
callback=self.update_scene,
)
box = gui.vBox(self.controlArea, "Display features")
grid = QGridLayout()
radio_group = gui.radioButtonsInBox(
box, self, "display_index", [], orientation=grid, callback=self.update_scene
)
radio_all = gui.appendRadioButton(radio_group, "All", addToLayout=False)
radio_best = gui.appendRadioButton(
radio_group, "Best ranked:", addToLayout=False
)
spin_box = gui.hBox(None, margin=0)
self.n_spin = gui.spin(
spin_box,
self,
"n_attributes",
1,
self.MAX_N_ATTRS,
label=" ",
controlWidth=60,
callback=self._n_spin_changed,
)
grid.addWidget(radio_all, 1, 1)
grid.addWidget(radio_best, 2, 1)
grid.addWidget(spin_box, 2, 2)
self.sort_combo = gui.comboBox(
box,
self,
"sort_index",
label="Rank by:",
items=SortBy.items(),
orientation=Qt.Horizontal,
callback=self.update_scene,
)
self.cont_feature_dim_combo = gui.comboBox(
box,
self,
"cont_feature_dim_index",
label="Numeric features: ",
items=["1D projection", "2D curve"],
orientation=Qt.Horizontal,
callback=self.update_scene,
)
gui.rubber(self.controlArea)
class _GraphicsView(QGraphicsView):
def __init__(self, scene, parent, **kwargs):
for k, v in dict(
verticalScrollBarPolicy=Qt.ScrollBarAlwaysOff,
horizontalScrollBarPolicy=Qt.ScrollBarAlwaysOff,
viewportUpdateMode=QGraphicsView.BoundingRectViewportUpdate,
renderHints=(
QPainter.Antialiasing
| QPainter.TextAntialiasing
| QPainter.SmoothPixmapTransform
),
alignment=(Qt.AlignTop | Qt.AlignLeft),
sizePolicy=QSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.MinimumExpanding
),
).items():
kwargs.setdefault(k, v)
super().__init__(scene, parent, **kwargs)
class GraphicsView(_GraphicsView):
def __init__(self, scene, parent):
super().__init__(
scene,
parent,
verticalScrollBarPolicy=Qt.ScrollBarAlwaysOn,
styleSheet="QGraphicsView {background: white}",
)
self.viewport().setMinimumWidth(
300
) # XXX: This prevents some tests failing
self._is_resizing = False
w = self
def resizeEvent(self, resizeEvent):
# Recompute main scene on window width change
if resizeEvent.size().width() != resizeEvent.oldSize().width():
self._is_resizing = True
self.w.update_scene()
self._is_resizing = False
return super().resizeEvent(resizeEvent)
def is_resizing(self):
return self._is_resizing
def sizeHint(self):
return QSize(400, 200)
class FixedSizeGraphicsView(_GraphicsView):
def __init__(self, scene, parent):
super().__init__(
scene,
parent,
sizePolicy=QSizePolicy(
QSizePolicy.MinimumExpanding, QSizePolicy.Minimum
),
)
def sizeHint(self):
return QSize(400, 85)
scene = self.scene = QGraphicsScene(self)
top_view = self.top_view = FixedSizeGraphicsView(scene, self)
mid_view = self.view = GraphicsView(scene, self)
bottom_view = self.bottom_view = FixedSizeGraphicsView(scene, self)
for view in (top_view, mid_view, bottom_view):
self.mainArea.layout().addWidget(view)
def _class_combo_changed(self):
with np.errstate(invalid="ignore"):
coeffs = [
np.nan_to_num(
p[self.target_class_index] / p[self.old_target_class_index]
)
for p in self.points
]
points = [p[self.old_target_class_index] for p in self.points]
self.feature_marker_values = [
self.get_points_from_coeffs(v, c, p)
for (v, c, p) in zip(self.feature_marker_values, coeffs, points)
]
self.feature_marker_values = np.asarray(self.feature_marker_values)
self.update_scene()
self.old_target_class_index = self.target_class_index
def _n_spin_changed(self):
self.display_index = 1
self.update_scene()
def update_controls(self):
self.class_combo.clear()
self.norm_check.setHidden(True)
self.cont_feature_dim_combo.setEnabled(True)
if self.domain is not None:
self.class_combo.addItems(self.domain.class_vars[0].values)
if len(self.domain.attributes) > self.MAX_N_ATTRS:
self.display_index = 1
if len(self.domain.class_vars[0].values) > 2:
self.norm_check.setHidden(False)
if not self.domain.has_continuous_attributes():
self.cont_feature_dim_combo.setEnabled(False)
self.cont_feature_dim_index = 0
model = self.sort_combo.model()
item = model.item(SortBy.POSITIVE)
item.setFlags(item.flags() | Qt.ItemIsEnabled)
item = model.item(SortBy.NEGATIVE)
item.setFlags(item.flags() | Qt.ItemIsEnabled)
self.align = OWNomogram.ALIGN_ZERO
if self.classifier and isinstance(
self.classifier, LogisticRegressionClassifier
):
self.align = OWNomogram.ALIGN_LEFT
@Inputs.data
def set_data(self, data):
self.instances = data
self.feature_marker_values = []
self.set_feature_marker_values()
self.update_scene()
@Inputs.classifier
def set_classifier(self, classifier):
self.closeContext()
self.classifier = classifier
self.Error.clear()
if self.classifier and not isinstance(self.classifier, self.ACCEPTABLE):
self.Error.invalid_classifier()
self.classifier = None
self.domain = self.classifier.domain if self.classifier else None
self.data = None
self.calculate_log_odds_ratios()
self.calculate_log_reg_coefficients()
self.update_controls()
self.target_class_index = 0
self.openContext(self.domain.class_var if self.domain is not None else None)
self.points = self.log_odds_ratios or self.log_reg_coeffs
self.feature_marker_values = []
self.old_target_class_index = self.target_class_index
self.update_scene()
def calculate_log_odds_ratios(self):
self.log_odds_ratios = []
self.p = None
if self.classifier is None or self.domain is None:
return
if not isinstance(self.classifier, NaiveBayesModel):
return
log_cont_prob = self.classifier.log_cont_prob
class_prob = self.classifier.class_prob
for i in range(len(self.domain.attributes)):
ca = np.exp(log_cont_prob[i]) * class_prob[:, None]
_or = (ca / (1 - ca)) / (class_prob / (1 - class_prob))[:, None]
self.log_odds_ratios.append(np.log(_or))
self.p = class_prob
def calculate_log_reg_coefficients(self):
self.log_reg_coeffs = []
self.log_reg_cont_data_extremes = []
self.b0 = None
if self.classifier is None or self.domain is None:
return
if not isinstance(self.classifier, LogisticRegressionClassifier):
return
self.domain = self.reconstruct_domain(
self.classifier.original_domain, self.domain
)
self.data = self.classifier.original_data.transform(self.domain)
attrs, ranges, start = self.domain.attributes, [], 0
for attr in attrs:
stop = start + len(attr.values) if attr.is_discrete else start + 1
ranges.append(slice(start, stop))
start = stop
self.b0 = self.classifier.intercept
coeffs = self.classifier.coefficients
if len(self.domain.class_var.values) == 2:
self.b0 = np.hstack((self.b0 * (-1), self.b0))
coeffs = np.vstack((coeffs * (-1), coeffs))
self.log_reg_coeffs = [coeffs[:, ranges[i]] for i in range(len(attrs))]
self.log_reg_coeffs_orig = self.log_reg_coeffs.copy()
min_values = nanmin(self.data.X, axis=0)
max_values = nanmax(self.data.X, axis=0)
for i, min_t, max_t in zip(
range(len(self.log_reg_coeffs)), min_values, max_values
):
if self.log_reg_coeffs[i].shape[1] == 1:
coef = self.log_reg_coeffs[i]
self.log_reg_coeffs[i] = np.hstack((coef * min_t, coef * max_t))
self.log_reg_cont_data_extremes.append(
[sorted([min_t, max_t], reverse=(c < 0)) for c in coef]
)
else:
self.log_reg_cont_data_extremes.append([None])
def update_scene(self):
self.clear_scene()
if self.domain is None or not len(self.points[0]):
return
n_attrs = self.n_attributes if self.display_index else int(1e10)
attr_inds, attributes = zip(*self.get_ordered_attributes()[:n_attrs])
name_items = [QGraphicsTextItem(attr.name) for attr in attributes]
point_text = QGraphicsTextItem("Points")
probs_text = QGraphicsTextItem("Probabilities (%)")
all_items = name_items + [point_text, probs_text]
name_offset = -max(t.boundingRect().width() for t in all_items) - 10
w = self.view.viewport().rect().width()
max_width = w + name_offset - 30
points = [self.points[i][self.target_class_index] for i in attr_inds]
if self.align == OWNomogram.ALIGN_LEFT:
points = [p - p.min() for p in points]
max_ = np.nan_to_num(max(max(abs(p)) for p in points))
d = 100 / max_ if max_ else 1
minimums = [p[self.target_class_index].min() for p in self.points]
if self.scale == OWNomogram.POINT_SCALE:
points = [p * d for p in points]
if self.align == OWNomogram.ALIGN_LEFT:
self.scale_marker_values = lambda x: (x - minimums) * d
else:
self.scale_marker_values = lambda x: x * d
else:
if self.align == OWNomogram.ALIGN_LEFT:
self.scale_marker_values = lambda x: x - minimums
else:
self.scale_marker_values = lambda x: x
point_item, nomogram_head = self.create_main_nomogram(
attributes,
attr_inds,
name_items,
points,
max_width,
point_text,
name_offset,
)
probs_item, nomogram_foot = self.create_footer_nomogram(
probs_text, d, minimums, max_width, name_offset
)
for item in self.feature_items.values():
item.dot.point_dot = point_item.dot
item.dot.probs_dot = probs_item.dot
item.dot.vertical_line = self.hidden_vertical_line
self.nomogram = nomogram = NomogramItem()
nomogram.add_items([nomogram_head, self.nomogram_main, nomogram_foot])
self.scene.addItem(nomogram)
self.set_feature_marker_values()
rect = QRectF(
self.scene.itemsBoundingRect().x(),
self.scene.itemsBoundingRect().y(),
self.scene.itemsBoundingRect().width(),
self.nomogram.preferredSize().height(),
).adjusted(10, 0, 20, 0)
self.scene.setSceneRect(rect)
# Clip top and bottom (60 and 150) parts from the main view
self.view.setSceneRect(
rect.x(), rect.y() + 80, rect.width() - 10, rect.height() - 160
)
self.view.viewport().setMaximumHeight(rect.height() - 160)
# Clip main part from top/bottom views
# below point values are imprecise (less/more than required) but this
# is not a problem due to clipped scene content still being drawn
self.top_view.setSceneRect(rect.x(), rect.y() + 3, rect.width() - 10, 20)
self.bottom_view.setSceneRect(
rect.x(), rect.height() - 110, rect.width() - 10, 30
)
def create_main_nomogram(
self,
attributes,
attr_inds,
name_items,
points,
max_width,
point_text,
name_offset,
):
cls_index = self.target_class_index
min_p = min(p.min() for p in points)
max_p = max(p.max() for p in points)
values = self.get_ruler_values(min_p, max_p, max_width)
min_p, max_p = min(values), max(values)
diff_ = np.nan_to_num(max_p - min_p)
scale_x = max_width / diff_ if diff_ else max_width
nomogram_header = NomogramItem()
point_item = RulerItem(
point_text, values, scale_x, name_offset, -scale_x * min_p
)
point_item.setPreferredSize(point_item.preferredWidth(), 35)
nomogram_header.add_items([point_item])
self.nomogram_main = NomogramItem()
cont_feature_item_class = (
ContinuousFeature2DItem
if self.cont_feature_dim_index
else ContinuousFeatureItem
)
feature_items = [
DiscreteFeatureItem(
name_item, attr.values, point, scale_x, name_offset, -scale_x * min_p
)
if attr.is_discrete
else cont_feature_item_class(
name_item,
self.log_reg_cont_data_extremes[i][cls_index],
self.get_ruler_values(
point.min(), point.max(), scale_x * point.ptp(), False
),
scale_x,
name_offset,
-scale_x * min_p,
)
for i, attr, name_item, point in zip(
attr_inds, attributes, name_items, points
)
]
self.nomogram_main.add_items(feature_items)
self.feature_items = OrderedDict(sorted(zip(attr_inds, feature_items)))
x = -scale_x * min_p
y = self.nomogram_main.layout().preferredHeight() + 10
self.vertical_line = QGraphicsLineItem(x, -6, x, y)
self.vertical_line.setPen(QPen(Qt.DotLine))
self.vertical_line.setParentItem(point_item)
self.hidden_vertical_line = QGraphicsLineItem(x, -6, x, y)
pen = QPen(Qt.DashLine)
pen.setBrush(QColor(Qt.red))
self.hidden_vertical_line.setPen(pen)
self.hidden_vertical_line.setParentItem(point_item)
return point_item, nomogram_header
def get_ordered_attributes(self):
"""Return (in_domain_index, attr) pairs, ordered by method in SortBy combo"""
if self.domain is None or not self.domain.attributes:
return []
attrs = self.domain.attributes
sort_by = self.sort_index
class_value = self.target_class_index
if sort_by == SortBy.NO_SORTING:
return list(enumerate(attrs))
elif sort_by == SortBy.NAME:
def key(x):
_, attr = x
return attr.name.lower()
elif sort_by == SortBy.ABSOLUTE:
def key(x):
i, attr = x
if attr.is_discrete:
ptp = self.points[i][class_value].ptp()
else:
coef = np.abs(self.log_reg_coeffs_orig[i][class_value]).mean()
ptp = coef * np.ptp(self.log_reg_cont_data_extremes[i][class_value])
return -ptp
elif sort_by == SortBy.POSITIVE:
def key(x):
i, attr = x
max_value = (
self.points[i][class_value].max()
if attr.is_discrete
else np.mean(self.log_reg_cont_data_extremes[i][class_value])
)
return -max_value
elif sort_by == SortBy.NEGATIVE:
def key(x):
i, attr = x
min_value = (
self.points[i][class_value].min()
if attr.is_discrete
else np.mean(self.log_reg_cont_data_extremes[i][class_value])
)
return min_value
return sorted(enumerate(attrs), key=key)
def create_footer_nomogram(self, probs_text, d, minimums, max_width, name_offset):
eps, d_ = 0.05, 1
k = -np.log(self.p / (1 - self.p)) if self.p is not None else -self.b0
min_sum = k[self.target_class_index] - np.log((1 - eps) / eps)
max_sum = k[self.target_class_index] - np.log(eps / (1 - eps))
if self.align == OWNomogram.ALIGN_LEFT:
max_sum = max_sum - sum(minimums)
min_sum = min_sum - sum(minimums)
for i in range(len(k)):
k[i] = k[i] - sum([min(q) for q in [p[i] for p in self.points]])
if self.scale == OWNomogram.POINT_SCALE:
min_sum *= d
max_sum *= d
d_ = d
values = self.get_ruler_values(min_sum, max_sum, max_width)
min_sum, max_sum = min(values), max(values)
diff_ = np.nan_to_num(max_sum - min_sum)
scale_x = max_width / diff_ if diff_ else max_width
cls_var, cls_index = self.domain.class_var, self.target_class_index
nomogram_footer = NomogramItem()
def get_normalized_probabilities(val):
if not self.normalize_probabilities:
return 1 / (1 + np.exp(k[cls_index] - val / d_))
totals = self.__get_totals_for_class_values(minimums)
p_sum = np.sum(1 / (1 + np.exp(k - totals / d_)))
return 1 / (1 + np.exp(k[cls_index] - val / d_)) / p_sum
def get_points(prob):
if not self.normalize_probabilities:
return (k[cls_index] - np.log(1 / prob - 1)) * d_
totals = self.__get_totals_for_class_values(minimums)
p_sum = np.sum(1 / (1 + np.exp(k - totals / d_)))
return (k[cls_index] - np.log(1 / (prob * p_sum) - 1)) * d_
probs_item = ProbabilitiesRulerItem(
probs_text,
values,
scale_x,
name_offset,
-scale_x * min_sum,
get_points=get_points,
title="{}='{}'".format(cls_var.name, cls_var.values[cls_index]),
get_probabilities=get_normalized_probabilities,
)
nomogram_footer.add_items([probs_item])
return probs_item, nomogram_footer
def __get_totals_for_class_values(self, minimums):
cls_index = self.target_class_index
marker_values = self.scale_marker_values(self.feature_marker_values)
totals = np.full(len(self.domain.class_var.values), np.nan)
totals[cls_index] = marker_values.sum()
for i in range(len(self.domain.class_var.values)):
if i == cls_index:
continue
coeffs = [np.nan_to_num(p[i] / p[cls_index]) for p in self.points]
points = [p[cls_index] for p in self.points]
total = sum(
[
self.get_points_from_coeffs(v, c, p)
for (v, c, p) in zip(self.feature_marker_values, coeffs, points)
]
)
if self.align == OWNomogram.ALIGN_LEFT:
points = [p - m for m, p in zip(minimums, points)]
total -= sum([min(p) for p in [p[i] for p in self.points]])
d = 100 / max(max(abs(p)) for p in points)
if self.scale == OWNomogram.POINT_SCALE:
total *= d
totals[i] = total
assert not np.any(np.isnan(totals))
return totals
def set_feature_marker_values(self):
if not (len(self.points) and len(self.feature_items)):
return
if not len(self.feature_marker_values):
self._init_feature_marker_values()
marker_values = self.scale_marker_values(self.feature_marker_values)
invisible_sum = 0
for i in range(len(marker_values)):
try:
item = self.feature_items[i]
except KeyError:
invisible_sum += marker_values[i]
else:
item.dot.move_to_val(marker_values[i])
item.dot.probs_dot.move_to_sum(invisible_sum)
def _init_feature_marker_values(self):
self.feature_marker_values = []
cls_index = self.target_class_index
instances = Table(self.domain, self.instances) if self.instances else None
values = []
for i, attr in enumerate(self.domain.attributes):
value, feature_val = 0, None
if len(self.log_reg_coeffs):
if attr.is_discrete:
ind, n = unique(self.data.X[:, i], return_counts=True)
feature_val = np.nan_to_num(ind[np.argmax(n)])
else:
feature_val = nanmean(self.data.X[:, i])
# If data is provided on a separate signal, use the first data
# instance to position the points instead of the mean
inst_in_dom = instances and attr in instances.domain
if inst_in_dom and not np.isnan(instances[0][attr]):
feature_val = instances[0][attr]
if feature_val is not None:
value = (
self.points[i][cls_index][int(feature_val)]
if attr.is_discrete
else self.log_reg_coeffs_orig[i][cls_index][0] * feature_val
)
values.append(value)
self.feature_marker_values = | np.asarray(values) | numpy.asarray |
"""
Created on Sat Dec 5 21:39:37 2020
@author: T.Nishiyama
"""
import numpy as np
import matplotlib.pyplot as plt
#Definitions of parameters.
sigma=0.5*np.log(3)
v=0.5*np.log(0.75)/sigma
s=0.5*np.log(3)/sigma
alpha=4.5*s**2-sigma*(3*s-v)
beta=2*s**2
def collatz(n):
count=0
while (n > 1):
if (n % 2 == 0):
n = n // 2
else:
n = 3 * n + 1
count+=1
return count
def calc_stopping_time(ns, ne, t_range):
hist=np.zeros(t_range)
t_max=0
for i in range(ns, ne+1):
count=collatz(i)
if count < t_range:
hist[count]+=1
if count > t_max:
t_max=count
return hist, t_max
#Approximation of the scaled complementary error function.
def erfcx(x):
a=1.98
b=1.135
return (1-np.exp(-a*x))/(b * np.sqrt(np.pi)*x)
#Function that estimates the total stopping time distribution of the Collatz problem.
def log_phi(r, p, T):
x=np.log(r)+p*np.log(10)
C=-s/np.sqrt(beta)-0.5*(3*s-v)/np.sqrt(alpha)
D=-s/np.sqrt(beta)+0.5*(3*s-v)/np.sqrt(alpha)
t=(x/sigma+2*s*T)/(3*s-v)
X=np.sqrt(alpha*t)+T*np.sqrt(beta/t)
Y=-np.sqrt(alpha*t)+T*np.sqrt(beta/t)
Z=-alpha*t-beta*T**2/t
g=(6*s**2-2*sigma*s)*T+Z
return g + np.log(np.sqrt(2)*sigma*s) + np.log(C*erfcx(X)+D*erfcx(Y))
#Estimate the total stopping time distribution for numbers in [ns=rs*10**ps, ne=re*10**pe].
def estimate_distribution(rs, ps, re, pe, t_range):
T=np.arange(t_range)
ps = max(ps, 1)
return np.exp(log_phi(re, pe, T)) - np.exp(log_phi(rs, ps, T))
#Estimate the longest total stopping time for the numbers in [1, n=r*10**p].
def estimate_max_stopping_time(r, p, t_range):
T=np.arange(t_range)
log_count=log_phi(r, p, T)
max_idx=np.argmax(log_count)
min_idx=np.argmin( | np.abs(log_count[max_idx:]) | numpy.abs |
import json
import spacy
from spacy.tokens import Doc
import re
from keras.models import Model, load_model
from keras.preprocessing import sequence
from keras.layers import Dense, Dropout, Embedding, GRU, TimeDistributed, Bidirectional, Input
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras import backend as K
from sklearn.metrics import classification_report, confusion_matrix
from gensim.models import keyedvectors
from unidecode import unidecode_expect_nonascii
import numpy as np
import pandas as pd
class TokenClassifier(object):
def __init__(self, nlp=None, seq_maxlen=100, fpath='data/ner_annotations_split.json'):
if nlp is None:
self.nlp = spacy.load('en_core_web_sm')
else:
self.nlp = nlp
self._seq_maxlen = seq_maxlen
self.token_classes = {
0: "null",
1: "nonrecipe-material",
2: "unspecified-material",
3: "material",
4: "precursor",
5: "solvent",
6: "gas",
7: "target",
8: "number",
9: "amount-unit",
10: "amount-misc",
11: "condition-unit",
12: "condition-misc",
13: "condition-type",
14: "property-unit",
15: "property-misc",
16: "property-type",
17: "synthesis-apparatus",
18: "apparatus-property-type",
19: "apparatus-descriptor",
20: "apparatus-unit",
21: "brand",
22: "reference",
23: "operation",
24: "meta",
25: "material-descriptor",
26: "characterization-apparatus"
}
self.inv_token_classes = {v: k for k, v in self.token_classes.items()}
self._load_embeddings()
annotated_data = json.loads(open(fpath, "r").read())
self.X_train, self.X_dev, self.X_test = [],[],[]
self.y_train, self.y_dev, self.y_test = [],[],[]
for ann_paper in annotated_data["data"]:
for i, (sent, labels) in enumerate(zip(ann_paper["tokens"], ann_paper["labels"])):
if i == 0: continue #skip titles
ft_vec = self.featurize(sent)
onehot_labels = np.zeros(shape=(self._seq_maxlen, len(self.token_classes)))
for j, label in enumerate(labels[:self._seq_maxlen]):
onehot_label = [0.0]*len(self.token_classes)
onehot_label[self.inv_token_classes[label]] = 1.0
onehot_labels[j] = onehot_label
if ann_paper["split"] == "train":
self.X_train.append(ft_vec)
self.y_train.append(onehot_labels)
elif ann_paper["split"] == "dev":
self.X_dev.append(ft_vec)
self.y_dev.append(onehot_labels)
else:
self.X_test.append(ft_vec)
self.y_test.append(onehot_labels)
self.X_train = np.asarray(sequence.pad_sequences(self.X_train, maxlen=self._seq_maxlen, padding='post', truncating='post'))
self.X_dev = np.asarray(sequence.pad_sequences(self.X_dev, maxlen=self._seq_maxlen, padding='post', truncating='post'))
self.X_test = np.asarray(sequence.pad_sequences(self.X_test, maxlen=self._seq_maxlen, padding='post', truncating='post'))
self.y_train, self.y_dev, self.y_test = np.asarray(self.y_train), np.asarray(self.y_dev), np.asarray(self.y_test)
print('Initialized Fasttext Token Model.....')
print('Train Set Shape: Input-', self.X_train.shape, ' Output-', self.y_train.shape)
print('Dev Set Shape: Input-', self.X_dev.shape, ' Output-', self.y_dev.shape)
print('Test Set Shape: Input-', self.X_test.shape, ' Output-', self.y_test.shape)
def build_nn_model(self, recurrent_dim=256):
try:
x = self.emb_vocab_w2v
except:
self._load_embeddings()
input_ft_ids = Input(shape=(self._seq_maxlen,))
emb_ft = Embedding(
input_dim=self.emb_weights_ft.shape[0],
output_dim=self.emb_weights_ft.shape[1],
input_length=self._seq_maxlen,
weights=[self.emb_weights_ft],
trainable=False,
mask_zero=True
)(input_ft_ids)
drop_1 = Dropout(0.1)(emb_ft)
rnn_1 = Bidirectional(GRU(recurrent_dim, return_sequences=True, dropout=0.2, recurrent_dropout=0.2))(drop_1)
dense_out = TimeDistributed(Dense(len(self.token_classes), activation="softmax"))(rnn_1)
model = Model(inputs=[input_ft_ids], outputs=[dense_out])
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False),
metrics=['accuracy'])
self.model = model
self.fast_predict = K.function(
self.model.inputs + [K.learning_phase()],
[self.model.layers[-1].output]
)
def featurize(self, words):
ft_vector = []
words = [self._normalize_string(w) for w in words]
spacy_doc = Doc(self.nlp.vocab, words=words)
self.nlp.tagger(spacy_doc)
self.nlp.parser(spacy_doc)
self.nlp.entity(spacy_doc)
spacy_tokens = spacy_doc[:self._seq_maxlen]
for word_tok in spacy_tokens:
word_string = word_tok.lemma_
if word_string in self.emb_vocab_ft:
ft_vector.append(self.emb_vocab_ft[word_string])
else:
ft_vector.append(1)
return np.array(ft_vector)
def train(self, batch_size=256, num_epochs=30, checkpt_filepath=None,
checkpt_period=5, stop_early=False, verbosity=1, val_split=0.0):
callbacks = []
if stop_early:
callbacks.append(
EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto')
)
self.model.fit(
x=self.X_train,
y=self.y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_split=val_split,
validation_data=(self.X_dev, self.y_dev),
callbacks= callbacks,
verbose=verbosity
)
def test(self, confusion_matrix_test=True):
raw_preds_test = self.model.predict(self.X_test)
train_labels, train_predictions, train_words = [],[],[]
test_labels, test_predictions, test_words = [],[],[]
for i, (labels, preds) in enumerate(zip(self.y_test, raw_preds_test)):
for label, pred in zip(labels, preds):
test_labels.append(self.token_classes[np.argmax(label)])
test_predictions.append(self.token_classes[np.argmax(pred)])
print('Test Set Results.....')
print(classification_report(test_labels, test_predictions))
if confusion_matrix_test:
print('---')
print(confusion_matrix(test_labels, test_predictions))
return self.model.predict(self.X_test)
def evaluate(self, batch_size=32):
return self.model.evaluate(self.X_test, self.y_test, batch_size=batch_size)
def predict_many(self, tokenized_sentences):
predictions = []
for sent in tokenized_sentences:
predictions.append(self.predict_one(sent))
return predictions
def predict_one(self, words):
num_words = len(words)
ft_feature_vector = self.featurize(words)
ft_feature_vector = sequence.pad_sequences([ft_feature_vector], maxlen=self._seq_maxlen,
padding='post', truncating='post')
return [self.token_classes[np.argmax(w)] for w in self.fast_predict([ft_feature_vector, 0])[0][0]][:num_words]
def save(self, filepath='bin/token_classifier.model'):
self.model.save(filepath)
def load(self, filepath='bin/token_classifier.model'):
self.model = load_model(filepath)
self._load_embeddings()
self.fast_predict = K.function(
self.model.inputs + [K.learning_phase()],
[self.model.layers[-1].output]
)
def _load_embeddings(self, ft_fpath='bin/fasttext_embeddings-MINIFIED.model'):
ft_embeddings = keyedvectors.KeyedVectors.load(ft_fpath)
self.emb_vocab_ft = dict([('<null>', 0), ('<oov>', 1)] +
[(k, v.index+2) for k, v in ft_embeddings.vocab.items()])
self.emb_weights_ft = np.vstack([np.zeros((1,100)), np.ones((1,100)), | np.array(ft_embeddings.syn0) | numpy.array |
import os
import sys
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("Please declare the environment variable 'SUMO_HOME'")
import traci
import numpy as np
from gym import spaces
class TrafficSignal:
"""
This class represents a Traffic Signal of an intersection
It is responsible for retrieving information and changing the traffic phase using Traci API
"""
def __init__(self, env, ts_id, delta_time, yellow_time, min_green, begin_seconds):
self.id = ts_id
self.env = env
self.delta_time = delta_time
self.yellow_time = yellow_time
self.min_green = min_green
self.green_phase = 0
self.is_yellow = False
self.time_since_last_phase_change = 0
self.next_action_time = begin_seconds
self.last_measure = 0.0
self.last_reward = None
self.build_phases()
self.lanes = list(dict.fromkeys(traci.trafficlight.getControlledLanes(self.id))) # Remove duplicates and keep order
self.out_lanes = [link[0][1] for link in traci.trafficlight.getControlledLinks(self.id) if link]
self.out_lanes = list(set(self.out_lanes))
self.lanes_length = {lane: traci.lane.getLength(lane) for lane in self.lanes}
self.observation_space = spaces.Box(low=np.zeros(self.num_green_phases+1+2*len(self.lanes), dtype=np.float32), high=np.ones(self.num_green_phases+1+2*len(self.lanes), dtype=np.float32))
self.discrete_observation_space = spaces.Tuple((
spaces.Discrete(self.num_green_phases), # Green Phase
spaces.Discrete(2), # Binary variable active if min_green seconds already elapsed
*(spaces.Discrete(10) for _ in range(2*len(self.lanes))) # Density and stopped-density for each lane
))
self.action_space = spaces.Discrete(self.num_green_phases)
def build_phases(self):
phases = traci.trafficlight.getAllProgramLogics(self.id)[0].phases
self.green_phases = list()
self.yellow_dict = dict()
for phase in phases:
state = phase.state
if 'y' not in state and (state.count('r') + state.count('s') != len(state)):
self.green_phases.append(traci.trafficlight.Phase(60, state))
self.num_green_phases = len(self.green_phases)
self.all_phases = self.green_phases.copy()
for i, p1 in enumerate(self.green_phases):
for j, p2 in enumerate(self.green_phases):
if i == j:
continue
yellow_state = ''
for s in range(len(p1.state)):
if (p1.state[s] == 'G' or p1.state[s] == 'g') and (p2.state[s] == 'r' or p2.state[s] == 's'):
yellow_state += 'y'
else:
yellow_state += p1.state[s]
self.yellow_dict[(i,j)] = len(self.all_phases)
self.all_phases.append(traci.trafficlight.Phase(self.yellow_time, yellow_state))
programs = traci.trafficlight.getAllProgramLogics(self.id)
logic = programs[0]
logic.type = 0
logic.phases = self.all_phases
traci.trafficlight.setProgramLogic(self.id, logic)
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[0].state)
@property
def phase(self):
return traci.trafficlight.getPhase(self.id)
@property
def time_to_act(self):
return self.next_action_time == self.env.sim_step
def update(self):
self.time_since_last_phase_change += 1
if self.is_yellow and self.time_since_last_phase_change == self.yellow_time:
#traci.trafficlight.setPhase(self.id, self.green_phase)
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.green_phase].state)
self.is_yellow = False
def set_next_phase(self, new_phase):
"""
Sets what will be the next green phase and sets yellow phase if the next phase is different than the current
:param new_phase: (int) Number between [0..num_green_phases]
"""
if new_phase is not None:
new_phase = int(new_phase)
if new_phase is None or self.green_phase == new_phase or self.time_since_last_phase_change < self.yellow_time + self.min_green:
if self.time_since_last_phase_change < self.yellow_time + self.min_green:
self.next_action_time = max(
[self.env.sim_step + self.min_green + self.yellow_time - self.time_since_last_phase_change,
self.env.sim_step + self.delta_time]
)
else:
#traci.trafficlight.setPhase(self.id, self.green_phase)
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.green_phase].state)
self.next_action_time = self.env.sim_step + self.delta_time
else:
#traci.trafficlight.setPhase(self.id, self.yellow_dict[(self.green_phase, new_phase)]) # turns yellow
traci.trafficlight.setRedYellowGreenState(self.id, self.all_phases[self.yellow_dict[(self.green_phase, new_phase)]].state)
self.green_phase = new_phase
self.next_action_time = self.env.sim_step + self.delta_time
self.is_yellow = True
self.time_since_last_phase_change = 0
def compute_observation(self):
time_info = self.compute_time_for_observation()
phase_id = [1 if self.phase//2 == i else 0 for i in range(self.num_green_phases)] # one-hot encoding
density = self.get_lanes_density()
queue = self.get_lanes_queue()
distance, speed = self.get_distance_and_speed()
observation = np.array(time_info + phase_id + density + queue + distance + speed, dtype=np.float32)
return observation
def compute_reward(self):
if self.env.reward_type == "waiting_time":
self.last_reward = self._waiting_time_reward()
elif self.env.reward_type == "vehicle_speed":
self.last_reward = self._vehicle_speed_reward()
elif self.env.reward_type == "vehicle_distance":
self.last_reward = self._vehicle_distance_reward()
return self.last_reward
def _pressure_reward(self):
return -self.get_pressure()
def _queue_average_reward(self):
new_average = np.mean(self.get_stopped_vehicles_num())
reward = self.last_measure - new_average
self.last_measure = new_average
return reward
def _queue_reward(self):
return - (sum(self.get_stopped_vehicles_num()))**2
def _waiting_time_reward(self):
ts_wait = sum(self.get_waiting_time_per_lane()) / 100.0
reward = self.last_measure - ts_wait
self.last_measure = ts_wait
return reward
def _waiting_time_reward2(self):
ts_wait = sum(self.get_waiting_time())
self.last_measure = ts_wait
if ts_wait == 0:
reward = 1.0
else:
reward = 1.0/ts_wait
return reward
def _waiting_time_reward3(self):
ts_wait = sum(self.get_waiting_time())
reward = -ts_wait
self.last_measure = ts_wait
return reward
def _vehicle_speed_reward(self):
veh_speed = list()
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
for veh in veh_list:
speed = traci.vehicle.getSpeed(veh)
speed_norm = speed / 10.0
veh_speed.append(speed_norm)
if len(veh_speed) == 0:
veh_speed_mean = 0.0
else:
veh_speed_mean = np.mean(veh_speed).tolist()
return veh_speed_mean
def _vehicle_distance_reward(self):
veh_dist = list()
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
for veh in veh_list:
leader = traci.vehicle.getLeader(veh)
if leader is None:
continue
else:
dist_norm = leader[1] / 10.0
veh_dist.append(dist_norm)
if len(veh_dist) == 0:
veh_dist_mean = 0.0
else:
veh_dist_mean = np.mean(veh_dist).tolist()
return veh_dist_mean
def get_waiting_time_per_lane(self):
wait_time_per_lane = []
for lane in self.lanes:
veh_list = traci.lane.getLastStepVehicleIDs(lane)
wait_time = 0.0
for veh in veh_list:
veh_lane = traci.vehicle.getLaneID(veh)
acc = traci.vehicle.getAccumulatedWaitingTime(veh)
if veh not in self.env.vehicles:
self.env.vehicles[veh] = {veh_lane: acc}
else:
self.env.vehicles[veh][veh_lane] = acc - sum([self.env.vehicles[veh][lane] for lane in self.env.vehicles[veh].keys() if lane != veh_lane])
wait_time += self.env.vehicles[veh][veh_lane]
wait_time_per_lane.append(wait_time)
return wait_time_per_lane
def get_pressure(self):
return abs(sum(traci.lane.getLastStepVehicleNumber(lane) for lane in self.lanes) - sum(traci.lane.getLastStepVehicleNumber(lane) for lane in self.out_lanes))
def get_out_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.out_lanes]
def get_lanes_density(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepVehicleNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_lanes_queue(self):
vehicle_size_min_gap = 7.5 # 5(vehSize) + 2.5(minGap)
return [min(1, traci.lane.getLastStepHaltingNumber(lane) / (traci.lane.getLength(lane) / vehicle_size_min_gap)) for lane in self.lanes]
def get_total_queued(self):
return sum([traci.lane.getLastStepHaltingNumber(lane) for lane in self.lanes])
def _get_veh_list(self):
veh_list = []
for lane in self.lanes:
veh_list += traci.lane.getLastStepVehicleIDs(lane)
return veh_list
def get_distance_and_speed(self):
veh_dist_mean = list()
veh_speed_mean = list()
for lane in self.lanes:
veh_dist = list()
veh_speed = list()
veh_list = traci.lane.getLastStepVehicleIDs(lane)
for veh in veh_list:
speed = traci.vehicle.getSpeed(veh)
max_speed = traci.vehicle.getMaxSpeed(veh)
speed_norm = speed / max_speed
veh_speed.append(speed_norm)
leader = traci.vehicle.getLeader(veh)
if leader is None:
continue
else:
standard_len = traci.lane.getLength(lane)
dist_norm = leader[1] / standard_len
if abs(dist_norm) > 1.0:
dist_norm = 1.0
veh_dist.append(dist_norm)
if len(veh_dist) == 0:
veh_dist_mean.append(1.0)
else:
veh_dist_mean.append( | np.mean(veh_dist) | numpy.mean |
# Copyright (c) 2017 The Verde Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
#
# This code is part of the Fatiando a Terra project (https://www.fatiando.org)
#
"""
Functions for generating and manipulating coordinates.
"""
import warnings
import numpy as np
from sklearn.utils import check_random_state
from .base.utils import n_1d_arrays, check_coordinates
from .utils import kdtree
def check_region(region):
"""
Check that the given region dimensions are valid.
For example, the west limit should not be greater than the east and there
must be exactly 4 values given.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
Raises
------
ValueError
If the region doesn't have exactly 4 entries, W > E, or S > N.
"""
if len(region) != 4:
raise ValueError("Invalid region '{}'. Only 4 values allowed.".format(region))
w, e, s, n = region
if w > e:
raise ValueError(
"Invalid region '{}' (W, E, S, N). Must have W =< E. ".format(region)
+ "If working with geographic coordinates, don't forget to match geographic"
+ " region with coordinates using 'verde.longitude_continuity'."
)
if s > n:
raise ValueError(
"Invalid region '{}' (W, E, S, N). Must have S =< N.".format(region)
)
def get_region(coordinates):
"""
Get the bounding region of the given coordinates.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting and
northing will be used, all subsequent coordinates will be ignored.
Returns
-------
region : tuple = (W, E, S, N)
The boundaries of a given region in Cartesian or geographic
coordinates.
Examples
--------
>>> coords = grid_coordinates((0, 1, -10, -6), shape=(10, 10))
>>> print(get_region(coords))
(0.0, 1.0, -10.0, -6.0)
"""
easting, northing = coordinates[:2]
region = (np.min(easting), np.max(easting), np.min(northing), np.max(northing))
return region
def pad_region(region, pad):
"""
Extend the borders of a region by the given amount.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
pad : float or tuple = (pad_north, pad_east)
The amount of padding to add to the region. If it's a single number,
add this to all boundaries of region equally. If it's a tuple of
numbers, then will add different padding to the North-South and
East-West dimensions.
Returns
-------
padded_region : list = [W, E, S, N]
The padded region.
Examples
--------
>>> pad_region((0, 1, -5, -3), 1)
(-1, 2, -6, -2)
>>> pad_region((0, 1, -5, -3), (3, 2))
(-2, 3, -8, 0)
"""
if np.isscalar(pad):
pad = (pad, pad)
w, e, s, n = region
padded = (w - pad[1], e + pad[1], s - pad[0], n + pad[0])
return padded
def scatter_points(region, size, random_state=None, extra_coords=None):
"""
Generate the coordinates for a random scatter of points.
The points are drawn from a uniform distribution.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
size : int
The number of points to generate.
random_state : numpy.random.RandomState or an int seed
A random number generator used to define the state of the random
permutations. Use a fixed seed to make sure computations are
reproducible. Use ``None`` to choose a seed automatically (resulting in
different numbers with each run).
extra_coords : None, scalar, or list
If not None, then value(s) of extra coordinate arrays to be generated.
These extra arrays will have the same *size* as the others but will
contain a constant value. Will generate an extra array per value given
in *extra_coords*. Use this to generate arrays of constant heights or
times, for example, that might be needed to evaluate a gridder.
Returns
-------
coordinates : tuple of arrays
Arrays with coordinates of each point in the grid. Each array contains
values for a dimension in the order: easting, northing, vertical, and
any extra dimensions given in *extra_coords*. All arrays will have the
specified *size*.
Examples
--------
>>> # We'll use a seed value will ensure that the same will be generated
>>> # every time.
>>> easting, northing = scatter_points((0, 10, -2, -1), 4, random_state=0)
>>> print(', '.join(['{:.4f}'.format(i) for i in easting]))
5.4881, 7.1519, 6.0276, 5.4488
>>> print(', '.join(['{:.4f}'.format(i) for i in northing]))
-1.5763, -1.3541, -1.5624, -1.1082
>>> easting, northing, height = scatter_points(
... (0, 10, -2, -1), 4, random_state=0, extra_coords=12
... )
>>> print(height)
[12. 12. 12. 12.]
>>> easting, northing, height, time = scatter_points(
... (0, 10, -2, -1), 4, random_state=0, extra_coords=[12, 1986])
>>> print(height)
[12. 12. 12. 12.]
>>> print(time)
[1986. 1986. 1986. 1986.]
See also
--------
grid_coordinates : Generate coordinates for each point on a regular grid
profile_coordinates : Coordinates for a profile between two points
"""
check_region(region)
random = check_random_state(random_state)
coordinates = []
for lower, upper in np.array(region).reshape((len(region) // 2, 2)):
coordinates.append(random.uniform(lower, upper, size))
if extra_coords is not None:
for value in np.atleast_1d(extra_coords):
coordinates.append(np.ones_like(coordinates[0]) * value)
return tuple(coordinates)
def grid_coordinates(
region,
shape=None,
spacing=None,
adjust="spacing",
pixel_register=False,
extra_coords=None,
):
"""
Generate the coordinates for each point on a regular grid.
The grid can be specified by either the number of points in each dimension
(the *shape*) or by the grid node spacing.
If the given region is not divisible by the desired spacing, either the
region or the spacing will have to be adjusted. By default, the spacing
will be rounded to the nearest multiple. Optionally, the East and North
boundaries of the region can be adjusted to fit the exact spacing given.
See the examples below.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
shape : tuple = (n_north, n_east) or None
The number of points in the South-North and West-East directions,
respectively.
spacing : float, tuple = (s_north, s_east), or None
The grid spacing in the South-North and West-East directions,
respectively. A single value means that the spacing is equal in both
directions.
adjust : {'spacing', 'region'}
Whether to adjust the spacing or the region if required. Ignored if
*shape* is given instead of *spacing*. Defaults to adjusting the
spacing.
pixel_register : bool
If True, the coordinates will refer to the center of each grid pixel
instead of the grid lines. In practice, this means that there will be
one less element per dimension of the grid when compared to grid line
registered (only if given *spacing* and not *shape*). Default is False.
extra_coords : None, scalar, or list
If not None, then value(s) of extra coordinate arrays to be generated.
These extra arrays will have the same *shape* as the others but will
contain a constant value. Will generate an extra array per value given
in *extra_coords*. Use this to generate arrays of constant heights or
times, for example, that might be needed to evaluate a gridder.
Returns
-------
coordinates : tuple of arrays
Arrays with coordinates of each point in the grid. Each array contains
values for a dimension in the order: easting, northing, vertical, and
any extra dimensions given in *extra_coords*. All arrays will have the
specified *shape*.
Examples
--------
>>> east, north = grid_coordinates(region=(0, 5, 0, 10), shape=(5, 3))
>>> print(east.shape, north.shape)
(5, 3) (5, 3)
>>> # Lower printing precision to shorten this example
>>> import numpy as np; np.set_printoptions(precision=1, suppress=True)
>>> print(east)
[[0. 2.5 5. ]
[0. 2.5 5. ]
[0. 2.5 5. ]
[0. 2.5 5. ]
[0. 2.5 5. ]]
>>> print(north)
[[ 0. 0. 0. ]
[ 2.5 2.5 2.5]
[ 5. 5. 5. ]
[ 7.5 7.5 7.5]
[10. 10. 10. ]]
The grid can also be specified using the spacing between points instead of
the shape:
>>> east, north = grid_coordinates(region=(0, 5, 0, 10), spacing=2.5)
>>> print(east.shape, north.shape)
(5, 3) (5, 3)
>>> print(east)
[[0. 2.5 5. ]
[0. 2.5 5. ]
[0. 2.5 5. ]
[0. 2.5 5. ]
[0. 2.5 5. ]]
>>> print(north)
[[ 0. 0. 0. ]
[ 2.5 2.5 2.5]
[ 5. 5. 5. ]
[ 7.5 7.5 7.5]
[10. 10. 10. ]]
The spacing can be different for northing and easting, respectively:
>>> east, north = grid_coordinates(region=(-5, 1, 0, 10), spacing=(2.5, 1))
>>> print(east.shape, north.shape)
(5, 7) (5, 7)
>>> print(east)
[[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]
[-5. -4. -3. -2. -1. 0. 1.]]
>>> print(north)
[[ 0. 0. 0. 0. 0. 0. 0. ]
[ 2.5 2.5 2.5 2.5 2.5 2.5 2.5]
[ 5. 5. 5. 5. 5. 5. 5. ]
[ 7.5 7.5 7.5 7.5 7.5 7.5 7.5]
[10. 10. 10. 10. 10. 10. 10. ]]
If the region can't be divided into the desired spacing, the spacing will
be adjusted to conform to the region:
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.6)
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.5 0. ]
[-5. -2.5 0. ]
[-5. -2.5 0. ]]
>>> print(north)
[[0. 0. 0. ]
[2.5 2.5 2.5]
[5. 5. 5. ]]
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.4)
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.5 0. ]
[-5. -2.5 0. ]
[-5. -2.5 0. ]]
>>> print(north)
[[0. 0. 0. ]
[2.5 2.5 2.5]
[5. 5. 5. ]]
You can choose to adjust the East and North boundaries of the region
instead:
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.6,
... adjust='region')
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.4 0.2]
[-5. -2.4 0.2]
[-5. -2.4 0.2]]
>>> print(north)
[[0. 0. 0. ]
[2.6 2.6 2.6]
[5.2 5.2 5.2]]
>>> east, north = grid_coordinates(region=(-5, 0, 0, 5), spacing=2.4,
... adjust='region')
>>> print(east.shape, north.shape)
(3, 3) (3, 3)
>>> print(east)
[[-5. -2.6 -0.2]
[-5. -2.6 -0.2]
[-5. -2.6 -0.2]]
>>> print(north)
[[0. 0. 0. ]
[2.4 2.4 2.4]
[4.8 4.8 4.8]]
We can optionally generate coordinates for the center of each grid pixel
instead of the corner (default):
>>> east, north = grid_coordinates(region=(0, 5, 0, 10), spacing=2.5,
... pixel_register=True)
>>> # Raise the printing precision for this example
>>> np.set_printoptions(precision=2, suppress=True)
>>> # Notice that the shape is 1 less than when pixel_register=False
>>> print(east.shape, north.shape)
(4, 2) (4, 2)
>>> print(east)
[[1.25 3.75]
[1.25 3.75]
[1.25 3.75]
[1.25 3.75]]
>>> print(north)
[[1.25 1.25]
[3.75 3.75]
[6.25 6.25]
[8.75 8.75]]
>>> east, north = grid_coordinates(region=(0, 5, 0, 10), shape=(4, 2),
... pixel_register=True)
>>> print(east)
[[1.25 3.75]
[1.25 3.75]
[1.25 3.75]
[1.25 3.75]]
>>> print(north)
[[1.25 1.25]
[3.75 3.75]
[6.25 6.25]
[8.75 8.75]]
Generate arrays for other coordinates that have a constant value:
>>> east, north, height = grid_coordinates(
... region=(0, 5, 0, 10), spacing=2.5, extra_coords=57
... )
>>> print(east.shape, north.shape, height.shape)
(5, 3) (5, 3) (5, 3)
>>> print(height)
[[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]]
>>> east, north, height, time = grid_coordinates(
... region=(0, 5, 0, 10), spacing=2.5, extra_coords=[57, 0.1]
... )
>>> print(east.shape, north.shape, height.shape, time.shape)
(5, 3) (5, 3) (5, 3) (5, 3)
>>> print(height)
[[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]
[57. 57. 57.]]
>>> print(time)
[[0.1 0.1 0.1]
[0.1 0.1 0.1]
[0.1 0.1 0.1]
[0.1 0.1 0.1]
[0.1 0.1 0.1]]
See also
--------
scatter_points : Generate the coordinates for a random scatter of points
profile_coordinates : Coordinates for a profile between two points
"""
check_region(region)
if shape is not None and spacing is not None:
raise ValueError("Both grid shape and spacing provided. Only one is allowed.")
if shape is None and spacing is None:
raise ValueError("Either a grid shape or a spacing must be provided.")
if spacing is not None:
shape, region = spacing_to_shape(region, spacing, adjust)
elif pixel_register:
# Starts by generating grid-line registered coordinates and shifting
# them to the center of the pixel. Need 1 more point if given a shape
# so that we can do that because we discard the last point when
# shifting the coordinates.
shape = tuple(i + 1 for i in shape)
east_lines = np.linspace(region[0], region[1], shape[1])
north_lines = np.linspace(region[2], region[3], shape[0])
if pixel_register:
east_lines = east_lines[:-1] + (east_lines[1] - east_lines[0]) / 2
north_lines = north_lines[:-1] + (north_lines[1] - north_lines[0]) / 2
coordinates = list(np.meshgrid(east_lines, north_lines))
if extra_coords is not None:
for value in np.atleast_1d(extra_coords):
coordinates.append(np.ones_like(coordinates[0]) * value)
return tuple(coordinates)
def spacing_to_shape(region, spacing, adjust):
"""
Convert the grid spacing to a grid shape.
Adjusts the spacing or the region if the desired spacing is not a multiple
of the grid dimensions.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
spacing : float, tuple = (s_north, s_east), or None
The grid spacing in the South-North and West-East directions,
respectively. A single value means that the spacing is equal in both
directions.
adjust : {'spacing', 'region'}
Whether to adjust the spacing or the region if required. Ignored if
*shape* is given instead of *spacing*. Defaults to adjusting the
spacing.
Returns
-------
shape, region : tuples
The calculated shape and region that best fits the desired spacing.
Spacing or region may be adjusted.
"""
if adjust not in ["spacing", "region"]:
raise ValueError(
"Invalid value for *adjust* '{}'. Should be 'spacing' or 'region'".format(
adjust
)
)
spacing = np.atleast_1d(spacing)
if len(spacing) == 1:
deast = dnorth = spacing[0]
elif len(spacing) == 2:
dnorth, deast = spacing
else:
raise ValueError(
"Only two values allowed for grid spacing: {}".format(str(spacing))
)
w, e, s, n = region
# Add 1 to get the number of nodes, not segments
nnorth = int(round((n - s) / dnorth)) + 1
neast = int(round((e - w) / deast)) + 1
if adjust == "region":
# The shape is the same but we adjust the region so that the spacing
# isn't altered when we do the linspace.
n = s + (nnorth - 1) * dnorth
e = w + (neast - 1) * deast
return (nnorth, neast), (w, e, s, n)
def shape_to_spacing(region, shape, pixel_register=False):
"""
Calculate the spacing of a grid given region and shape.
Parameters
----------
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
shape : tuple = (n_north, n_east) or None
The number of points in the South-North and West-East directions,
respectively.
pixel_register : bool
If True, the coordinates will refer to the center of each grid pixel
instead of the grid lines. In practice, this means that there will be
one less element per dimension of the grid when compared to grid line
registered (only if given *spacing* and not *shape*). Default is False.
Returns
-------
spacing : tuple = (s_north, s_east)
The grid spacing in the South-North and West-East directions,
respectively.
Examples
--------
>>> spacing = shape_to_spacing([0, 10, -5, 1], (7, 11))
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 1.0
>>> spacing = shape_to_spacing([0, 10, -5, 1], (14, 11))
>>> print("{:.1f}, {:.1f}".format(*spacing))
0.5, 1.0
>>> spacing = shape_to_spacing([0, 10, -5, 1], (7, 21))
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 0.5
>>> spacing = shape_to_spacing(
... [-0.5, 10.5, -5.5, 1.5], (7, 11), pixel_register=True,
... )
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 1.0
>>> spacing = shape_to_spacing(
... [-0.25, 10.25, -5.5, 1.5], (7, 21), pixel_register=True,
... )
>>> print("{:.1f}, {:.1f}".format(*spacing))
1.0, 0.5
"""
spacing = []
for i, n_points in enumerate(reversed(shape)):
if not pixel_register:
n_points -= 1
spacing.append((region[2 * i + 1] - region[2 * i]) / n_points)
return tuple(reversed(spacing))
def profile_coordinates(point1, point2, size, extra_coords=None):
"""
Coordinates for a profile along a straight line between two points.
Parameters
----------
point1 : tuple or list
``(easting, northing)`` West-East and South-North coordinates of the
first point, respectively.
point2 : tuple or list
``(easting, northing)`` West-East and South-North coordinates of the
second point, respectively.
size : int
Number of points to sample along the line.
extra_coords : None, scalar, or list
If not None, then value(s) of extra coordinate arrays to be generated.
These extra arrays will have the same *size* as the others but will
contain a constant value. Will generate an extra array per value given
in *extra_coords*. Use this to generate arrays of constant heights or
times, for example, that might be needed to evaluate a gridder.
Returns
-------
coordinates, distances : tuple and 1d array
The coordinates of points along the straight line and the distances
from the first point.
Examples
--------
>>> (east, north), dist = profile_coordinates((1, 10), (1, 20), size=11)
>>> print('easting:', ', '.join('{:.1f}'.format(i) for i in east))
easting: 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0
>>> print('northing:', ', '.join('{:.1f}'.format(i) for i in north))
northing: 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0
>>> print('distance:', ', '.join('{:.1f}'.format(i) for i in dist))
distance: 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0
>>> (east, north, height), dist = profile_coordinates(
... (1, 10), (1, 20), size=11, extra_coords=35)
>>> print(height)
[35. 35. 35. 35. 35. 35. 35. 35. 35. 35. 35.]
>>> (east, north, height, time), dist = profile_coordinates(
... (1, 10), (1, 20), size=11, extra_coords=[35, 0.1])
>>> print(height)
[35. 35. 35. 35. 35. 35. 35. 35. 35. 35. 35.]
>>> print(time)
[0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1]
See also
--------
scatter_points : Generate the coordinates for a random scatter of points
grid_coordinates : Generate coordinates for each point on a regular grid
"""
if size <= 0:
raise ValueError("Invalid profile size '{}'. Must be > 0.".format(size))
diffs = [i - j for i, j in zip(point2, point1)]
separation = np.hypot(*diffs)
distances = np.linspace(0, separation, size)
angle = np.arctan2(*reversed(diffs))
coordinates = [
point1[0] + distances * np.cos(angle),
point1[1] + distances * np.sin(angle),
]
if extra_coords is not None:
for value in np.atleast_1d(extra_coords):
coordinates.append(np.ones_like(coordinates[0]) * value)
return tuple(coordinates), distances
def inside(coordinates, region):
"""
Determine which points fall inside a given region.
Points at the boundary are counted as being outsize.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting and
northing will be used, all subsequent coordinates will be ignored.
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates.
Returns
-------
are_inside : array of booleans
An array of booleans with the same shape as the input coordinate
arrays. Will be ``True`` if the respective coordinates fall inside the
area, ``False`` otherwise.
Examples
--------
>>> import numpy as np
>>> east = np.array([1, 2, 3, 4, 5, 6])
>>> north = np.array([10, 11, 12, 13, 14, 15])
>>> region = [2.5, 5.5, 12, 15]
>>> print(inside((east, north), region))
[False False True True True False]
>>> # This also works for 2D-arrays
>>> east = np.array([[1, 1, 1],
... [2, 2, 2],
... [3, 3, 3]])
>>> north = np.array([[5, 7, 9],
... [5, 7, 9],
... [5, 7, 9]])
>>> region = [0.5, 2.5, 6, 9]
>>> print(inside((east, north), region))
[[False True True]
[False True True]
[False False False]]
Geographic coordinates are also supported using
:func:`verde.longitude_continuity`:
>>> from verde import longitude_continuity
>>> east, north = grid_coordinates([0, 350, -20, 20], spacing=10)
>>> region = [-10, 10, -10, 10]
>>> are_inside = inside(*longitude_continuity([east, north], region))
>>> print(east[are_inside])
[ 0. 10. 350. 0. 10. 350. 0. 10. 350.]
>>> print(north[are_inside])
[-10. -10. -10. 0. 0. 0. 10. 10. 10.]
"""
check_region(region)
w, e, s, n = region
easting, northing = coordinates[:2]
# Allocate temporary arrays to minimize memory allocation overhead
out = np.empty_like(easting, dtype=np.bool)
tmp = tuple(np.empty_like(easting, dtype=np.bool) for i in range(4))
# Using the logical functions is a lot faster than & > < for some reason
# Plus, this way avoids repeated allocation of intermediate arrays
in_we = np.logical_and(
np.greater_equal(easting, w, out=tmp[0]),
np.less_equal(easting, e, out=tmp[1]),
out=tmp[2],
)
in_ns = np.logical_and(
np.greater_equal(northing, s, out=tmp[0]),
np.less_equal(northing, n, out=tmp[1]),
out=tmp[3],
)
are_inside = np.logical_and(in_we, in_ns, out=out)
return are_inside
def block_split(coordinates, spacing=None, adjust="spacing", region=None, shape=None):
"""
Split a region into blocks and label points according to where they fall.
The labels are integers corresponding to the index of the block. Also
returns the coordinates of the center of each block (following the same
index as the labels).
The size of the blocks can be specified by the *spacing* parameter.
Alternatively, the number of blocks in the South-North and West-East
directions can be specified using the *shape* parameter.
.. note::
If installed, package ``pykdtree`` will be used instead of
:class:`scipy.spatial.cKDTree` for better performance.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting and
northing will be used, all subsequent coordinates will be ignored.
shape : tuple = (n_north, n_east) or None
The number of blocks in the South-North and West-East directions,
respectively.
spacing : float, tuple = (s_north, s_east), or None
The block size in the South-North and West-East directions,
respectively. A single value means that the size is equal in both
directions.
adjust : {'spacing', 'region'}
Whether to adjust the spacing or the region if required. Ignored if
*shape* is given instead of *spacing*. Defaults to adjusting the
spacing.
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates. If not region is given, will use the bounding region of
the given points.
Returns
-------
block_coordinates : tuple of arrays
(easting, northing) arrays with the coordinates of the center of each
block.
labels : array
integer label for each data point. The label is the index of the block
to which that point belongs.
See also
--------
BlockReduce : Apply a reduction operation to the data in blocks (windows).
rolling_window : Select points on a rolling (moving) window.
expanding_window : Select points on windows of changing size.
Examples
--------
>>> from verde import grid_coordinates
>>> coords = grid_coordinates((-5, 0, 5, 10), spacing=1)
>>> block_coords, labels = block_split(coords, spacing=2.5)
>>> for coord in block_coords:
... print(', '.join(['{:.2f}'.format(i) for i in coord]))
-3.75, -1.25, -3.75, -1.25
6.25, 6.25, 8.75, 8.75
>>> print(labels.reshape(coords[0].shape))
[[0 0 0 1 1 1]
[0 0 0 1 1 1]
[0 0 0 1 1 1]
[2 2 2 3 3 3]
[2 2 2 3 3 3]
[2 2 2 3 3 3]]
>>> # Use the shape instead of the block size
>>> block_coords, labels = block_split(coords, shape=(4, 2))
>>> for coord in block_coords:
... print(', '.join(['{:.3f}'.format(i) for i in coord]))
-3.750, -1.250, -3.750, -1.250, -3.750, -1.250, -3.750, -1.250
5.625, 5.625, 6.875, 6.875, 8.125, 8.125, 9.375, 9.375
>>> print(labels.reshape(coords[0].shape))
[[0 0 0 1 1 1]
[0 0 0 1 1 1]
[2 2 2 3 3 3]
[4 4 4 5 5 5]
[6 6 6 7 7 7]
[6 6 6 7 7 7]]
"""
# Select the coordinates after checking to make sure indexing will still
# work on the ignored coordinates.
coordinates = check_coordinates(coordinates)[:2]
if region is None:
region = get_region(coordinates)
block_coords = grid_coordinates(
region, spacing=spacing, shape=shape, adjust=adjust, pixel_register=True
)
tree = kdtree(block_coords)
labels = tree.query(np.transpose(n_1d_arrays(coordinates, 2)))[1]
return n_1d_arrays(block_coords, len(block_coords)), labels
def rolling_window(
coordinates, size, spacing=None, shape=None, region=None, adjust="spacing"
):
"""
Select points on a rolling (moving) window.
A window of the given size is moved across the region at a given step
(specified by *spacing* or *shape*). Returns the indices of points falling
inside each window step. You can use the indices to select points falling
inside a given window.
The size of the step when moving the windows can be specified by the
*spacing* parameter. Alternatively, the number of windows in the
South-North and West-East directions can be specified using the *shape*
parameter. **One of the two must be given.**
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting and
northing will be used, all subsequent coordinates will be ignored.
size : float
The size of the windows. Units should match the units of *coordinates*.
spacing : float, tuple = (s_north, s_east), or None
The window size in the South-North and West-East directions,
respectively. A single value means that the size is equal in both
directions.
shape : tuple = (n_north, n_east) or None
The number of blocks in the South-North and West-East directions,
respectively.
region : list = [W, E, S, N]
The boundaries of a given region in Cartesian or geographic
coordinates. If not region is given, will use the bounding region of
the given points.
adjust : {'spacing', 'region'}
Whether to adjust the spacing or the region if required. Ignored if
*shape* is given instead of *spacing*. Defaults to adjusting the
spacing.
Returns
-------
window_coordinates : tuple of arrays
Coordinate arrays for the center of each window.
indices : array
Each element of the array corresponds the indices of points falling
inside a window. The array will have the same shape as the
*window_coordinates*. Use the array elements to index the coordinates
for each window. The indices will depend on the number of dimensions in
the input coordinates. For example, if the coordinates are 2D arrays,
each window will contain indices for 2 dimensions (row, column).
See also
--------
block_split : Split a region into blocks and label points accordingly.
expanding_window : Select points on windows of changing size.
Examples
--------
Generate a set of sample coordinates on a grid and determine the indices
of points for each rolling window:
>>> from verde import grid_coordinates
>>> coords = grid_coordinates((-5, -1, 6, 10), spacing=1)
>>> print(coords[0])
[[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]]
>>> print(coords[1])
[[ 6. 6. 6. 6. 6.]
[ 7. 7. 7. 7. 7.]
[ 8. 8. 8. 8. 8.]
[ 9. 9. 9. 9. 9.]
[10. 10. 10. 10. 10.]]
>>> # Get the rolling window indices
>>> window_coords, indices = rolling_window(coords, size=2, spacing=2)
>>> # Window coordinates will be 2D arrays. Their shape is the number of
>>> # windows in each dimension
>>> print(window_coords[0].shape, window_coords[1].shape)
(2, 2) (2, 2)
>>> # The there are the easting and northing coordinates for the center of
>>> # each rolling window
>>> for coord in window_coords:
... print(coord)
[[-4. -2.]
[-4. -2.]]
[[7. 7.]
[9. 9.]]
>>> # The indices of points falling on each window will have the same shape
>>> # as the window center coordinates
>>> print(indices.shape)
(2, 2)
>>> # The points in the first window. Indices are 2D positions because the
>>> # coordinate arrays are 2D.
>>> print(len(indices[0, 0]))
2
>>> for dimension in indices[0, 0]:
... print(dimension)
[0 0 0 1 1 1 2 2 2]
[0 1 2 0 1 2 0 1 2]
>>> for dimension in indices[0, 1]:
... print(dimension)
[0 0 0 1 1 1 2 2 2]
[2 3 4 2 3 4 2 3 4]
>>> for dimension in indices[1, 0]:
... print(dimension)
[2 2 2 3 3 3 4 4 4]
[0 1 2 0 1 2 0 1 2]
>>> for dimension in indices[1, 1]:
... print(dimension)
[2 2 2 3 3 3 4 4 4]
[2 3 4 2 3 4 2 3 4]
>>> # To get the coordinates for each window, use indexing
>>> print(coords[0][indices[0, 0]])
[-5. -4. -3. -5. -4. -3. -5. -4. -3.]
>>> print(coords[1][indices[0, 0]])
[6. 6. 6. 7. 7. 7. 8. 8. 8.]
If the coordinates are 1D, the indices will also be 1D:
>>> coords1d = [coord.ravel() for coord in coords]
>>> window_coords, indices = rolling_window(coords1d, size=2, spacing=2)
>>> print(len(indices[0, 0]))
1
>>> print(indices[0, 0][0])
[ 0 1 2 5 6 7 10 11 12]
>>> print(indices[0, 1][0])
[ 2 3 4 7 8 9 12 13 14]
>>> print(indices[1, 0][0])
[10 11 12 15 16 17 20 21 22]
>>> print(indices[1, 1][0])
[12 13 14 17 18 19 22 23 24]
>>> # The returned indices can be used in the same way as before
>>> print(coords1d[0][indices[0, 0]])
[-5. -4. -3. -5. -4. -3. -5. -4. -3.]
>>> print(coords1d[1][indices[0, 0]])
[6. 6. 6. 7. 7. 7. 8. 8. 8.]
By default, the windows will span the entire data region. You can also
control the specific region you'd like the windows to cover:
>>> # Coordinates on a larger region but with the same spacing as before
>>> coords = grid_coordinates((-10, 5, 0, 20), spacing=1)
>>> # Get the rolling window indices but limited to the region from before
>>> window_coords, indices = rolling_window(
... coords, size=2, spacing=2, region=(-5, -1, 6, 10),
... )
>>> # The windows should still be in the same place as before
>>> for coord in window_coords:
... print(coord)
[[-4. -2.]
[-4. -2.]]
[[7. 7.]
[9. 9.]]
>>> # And indexing the coordinates should also provide the same result
>>> print(coords[0][indices[0, 0]])
[-5. -4. -3. -5. -4. -3. -5. -4. -3.]
>>> print(coords[1][indices[0, 0]])
[6. 6. 6. 7. 7. 7. 8. 8. 8.]
Only the first 2 coordinates are considered (assumed to be the horizontal
ones). All others will be ignored by the function.
>>> coords = grid_coordinates((-5, -1, 6, 10), spacing=1, extra_coords=20)
>>> print(coords[2])
[[20. 20. 20. 20. 20.]
[20. 20. 20. 20. 20.]
[20. 20. 20. 20. 20.]
[20. 20. 20. 20. 20.]
[20. 20. 20. 20. 20.]]
>>> window_coords, indices = rolling_window(coords, size=2, spacing=2)
>>> # The windows would be the same in this case since coords[2] is ignored
>>> for coord in window_coords:
... print(coord)
[[-4. -2.]
[-4. -2.]]
[[7. 7.]
[9. 9.]]
>>> print(indices.shape)
(2, 2)
>>> for dimension in indices[0, 0]:
... print(dimension)
[0 0 0 1 1 1 2 2 2]
[0 1 2 0 1 2 0 1 2]
>>> for dimension in indices[0, 1]:
... print(dimension)
[0 0 0 1 1 1 2 2 2]
[2 3 4 2 3 4 2 3 4]
>>> for dimension in indices[1, 0]:
... print(dimension)
[2 2 2 3 3 3 4 4 4]
[0 1 2 0 1 2 0 1 2]
>>> for dimension in indices[1, 1]:
... print(dimension)
[2 2 2 3 3 3 4 4 4]
[2 3 4 2 3 4 2 3 4]
>>> # The indices can still be used with the third coordinate
>>> print(coords[0][indices[0, 0]])
[-5. -4. -3. -5. -4. -3. -5. -4. -3.]
>>> print(coords[1][indices[0, 0]])
[6. 6. 6. 7. 7. 7. 8. 8. 8.]
>>> print(coords[2][indices[0, 0]])
[20. 20. 20. 20. 20. 20. 20. 20. 20.]
"""
# Check if shape or spacing were passed
if shape is None and spacing is None:
raise ValueError("Either a shape or a spacing must be provided.")
# Select the coordinates after checking to make sure indexing will still
# work on the ignored coordinates.
coordinates = check_coordinates(coordinates)[:2]
if region is None:
region = get_region(coordinates)
# Check if window size is bigger than the minimum dimension of the region
region_min_width = min(region[1] - region[0], region[3] - region[2])
if region_min_width < size:
raise ValueError(
"Window size '{}' is larger ".format(size)
+ "than dimensions of the region '{}'.".format(region)
)
# Calculate the region spanning the centers of the rolling windows
window_region = [
dimension + (-1) ** (i % 2) * size / 2 for i, dimension in enumerate(region)
]
_check_rolling_window_overlap(window_region, size, shape, spacing)
centers = grid_coordinates(
window_region, spacing=spacing, shape=shape, adjust=adjust
)
# pykdtree doesn't support query_ball_point yet and we need that
tree = kdtree(coordinates, use_pykdtree=False)
# Coordinates must be transposed because the kd-tree wants them as columns
# of a matrix
# Use p=inf (infinity norm) to get square windows instead of circular ones
indices1d = tree.query_ball_point(
np.transpose(n_1d_arrays(centers, 2)), r=size / 2, p=np.inf
)
# Make the indices array the same shape as the center coordinates array.
# That preserves the information of the number of windows in each
# dimension. Need to first create an empty array of object type because
# otherwise numpy tries to use the index tuples as dimensions (even if
# given ndim=1 explicitly). Can't make it 1D and then reshape because the
# reshape is ignored for some reason. The workaround is to create the array
# with the correct shape and assign the values to a raveled view of the
# array.
indices = np.empty(centers[0].shape, dtype="object")
# Need to convert the indices to int arrays because unravel_index doesn't
# like empty lists but can handle empty integer arrays in case a window has
# no points inside it.
indices.ravel()[:] = [
np.unravel_index(np.array(i, dtype="int"), shape=coordinates[0].shape)
for i in indices1d
]
return centers, indices
def _check_rolling_window_overlap(region, size, shape, spacing):
"""
Warn the user if there is no overlap between neighboring windows.
"""
if shape is not None:
ndims = len(shape)
dimensions = [region[i * ndims + 1] - region[i * ndims] for i in range(ndims)]
# The - 1 is because we need to divide by the number of intervals, not
# the number of nodes.
spacing = tuple(dim / (n - 1) for dim, n in zip(dimensions, shape))
spacing = np.atleast_1d(spacing)
if np.any(spacing > size):
warnings.warn(
f"Rolling windows do not overlap (size '{size}' and spacing '{spacing}'). "
"Some data points may not be included in any window. "
"Increase size or decrease spacing to avoid this."
)
def expanding_window(coordinates, center, sizes):
"""
Select points on windows of changing size around a center point.
Returns the indices of points falling inside each window.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...). Only easting and
northing will be used, all subsequent coordinates will be ignored.
center : tuple
The coordinates of the center of the window. Should be in the
following order: (easting, northing, vertical, ...).
sizes : array
The sizes of the windows. Does not have to be in any particular order.
The order of indices returned will match the order of window sizes
given. Units should match the units of *coordinates* and *center*.
Returns
-------
indices : list
Each element of the list corresponds to the indices of points falling
inside a window. Use them to index the coordinates for each window. The
indices will depend on the number of dimensions in the input
coordinates. For example, if the coordinates are 2D arrays, each window
will contain indices for 2 dimensions (row, column).
See also
--------
block_split : Split a region into blocks and label points accordingly.
rolling_window : Select points on a rolling (moving) window.
Examples
--------
Generate a set of sample coordinates on a grid and determine the indices
of points for each expanding window:
>>> from verde import grid_coordinates
>>> coords = grid_coordinates((-5, -1, 6, 10), spacing=1)
>>> print(coords[0])
[[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]
[-5. -4. -3. -2. -1.]]
>>> print(coords[1])
[[ 6. 6. 6. 6. 6.]
[ 7. 7. 7. 7. 7.]
[ 8. 8. 8. 8. 8.]
[ 9. 9. 9. 9. 9.]
[10. 10. 10. 10. 10.]]
>>> # Get the expanding window indices
>>> indices = expanding_window(coords, center=(-3, 8), sizes=[1, 2, 4])
>>> # There is one index per window
>>> print(len(indices))
3
>>> # The points in the first window. Indices are 2D positions because the
>>> # coordinate arrays are 2D.
>>> print(len(indices[0]))
2
>>> for dimension in indices[0]:
... print(dimension)
[2]
[2]
>>> for dimension in indices[1]:
... print(dimension)
[1 1 1 2 2 2 3 3 3]
[1 2 3 1 2 3 1 2 3]
>>> for dimension in indices[2]:
... print(dimension)
[0 0 0 0 0 1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4]
[0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4]
>>> # To get the coordinates for each window, use indexing
>>> print(coords[0][indices[0]])
[-3.]
>>> print(coords[1][indices[0]])
[8.]
>>> print(coords[0][indices[1]])
[-4. -3. -2. -4. -3. -2. -4. -3. -2.]
>>> print(coords[1][indices[1]])
[7. 7. 7. 8. 8. 8. 9. 9. 9.]
If the coordinates are 1D, the indices will also be 1D:
>>> coords1d = [coord.ravel() for coord in coords]
>>> indices = expanding_window(coords1d, center=(-3, 8), sizes=[1, 2, 4])
>>> print(len(indices))
3
>>> # Since coordinates are 1D, there is only one index
>>> print(len(indices[0]))
1
>>> print(indices[0][0])
[12]
>>> print(indices[1][0])
[ 6 7 8 11 12 13 16 17 18]
>>> # The returned indices can be used in the same way as before
>>> print(coords1d[0][indices[0]])
[-3.]
>>> print(coords1d[1][indices[0]])
[8.]
Only the first 2 coordinates are considered (assumed to be the horizontal
ones). All others will be ignored by the function.
>>> coords = grid_coordinates((-5, -1, 6, 10), spacing=1, extra_coords=15)
>>> print(coords[2])
[[15. 15. 15. 15. 15.]
[15. 15. 15. 15. 15.]
[15. 15. 15. 15. 15.]
[15. 15. 15. 15. 15.]
[15. 15. 15. 15. 15.]]
>>> indices = expanding_window(coords, center=(-3, 8), sizes=[1, 2, 4])
>>> # The returned indices should be the same as before, ignoring coords[2]
>>> print(len(indices[0]))
2
>>> for dimension in indices[0]:
... print(dimension)
[2]
[2]
>>> for dimension in indices[1]:
... print(dimension)
[1 1 1 2 2 2 3 3 3]
[1 2 3 1 2 3 1 2 3]
>>> for dimension in indices[2]:
... print(dimension)
[0 0 0 0 0 1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4]
[0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4]
>>> # The indices can be used to index all 3 coordinates
>>> print(coords[0][indices[0]])
[-3.]
>>> print(coords[1][indices[0]])
[8.]
>>> print(coords[2][indices[0]])
[15.]
"""
# Select the coordinates after checking to make sure indexing will still
# work on the ignored coordinates.
coordinates = check_coordinates(coordinates)[:2]
shape = coordinates[0].shape
center = np.atleast_2d(center)
# pykdtree doesn't support query_ball_point yet and we need that
tree = kdtree(coordinates, use_pykdtree=False)
indices = []
for size in sizes:
# Use p=inf (infinity norm) to get square windows instead of circular
index1d = tree.query_ball_point(center, r=size / 2, p=np.inf)[0]
# Convert indices to an array to avoid errors when the index is empty
# (no points in the window). unravel_index doesn't like empty lists.
indices.append(np.unravel_index(np.array(index1d, dtype="int"), shape=shape))
return indices
def longitude_continuity(coordinates, region):
"""
Modify coordinates and region boundaries to ensure longitude continuity.
Longitudinal boundaries of the region are moved to the ``[0, 360)`` or
``[-180, 180)`` degrees interval depending which one is better suited for
that specific region.
Parameters
----------
coordinates : list or array
Set of geographic coordinates that will be moved to the same degrees
interval as the one of the modified region.
region : list or array
List or array containing the boundary coordinates `w`, `e`, `s`, `n` of
the region in degrees.
Returns
-------
modified_coordinates : array
Modified set of extra geographic coordinates.
modified_region : array
List containing the modified boundary coordinates `w, `e`, `s`, `n` of
the region.
Examples
--------
>>> # Modify region with west > east
>>> w, e, s, n = 350, 10, -10, 10
>>> print(longitude_continuity(coordinates=None, region=[w, e, s, n]))
[-10 10 -10 10]
>>> # Modify region and extra coordinates
>>> from verde import grid_coordinates
>>> region = [-70, -60, -40, -30]
>>> coordinates = grid_coordinates([270, 320, -50, -20], spacing=5)
>>> [longitude, latitude], region = longitude_continuity(
... coordinates, region
... )
>>> print(region)
[290 300 -40 -30]
>>> print(longitude.min(), longitude.max())
270.0 320.0
>>> # Another example
>>> region = [-20, 20, -20, 20]
>>> coordinates = grid_coordinates([0, 350, -90, 90], spacing=10)
>>> [longitude, latitude], region = longitude_continuity(
... coordinates, region
... )
>>> print(region)
[-20 20 -20 20]
>>> print(longitude.min(), longitude.max())
-180.0 170.0
"""
# Get longitudinal boundaries and check region
w, e, s, n = region[:4]
# Run sanity checks for region
_check_geographic_region([w, e, s, n])
# Check if region is defined all around the globe
all_globe = np.allclose(abs(e - w), 360)
# Move coordinates to [0, 360)
interval_360 = True
w = w % 360
e = e % 360
# Move west=0 and east=360 if region longitudes goes all around the globe
if all_globe:
w, e = 0, 360
# Check if the [-180, 180) interval is better suited
if w > e:
interval_360 = False
e = ((e + 180) % 360) - 180
w = ((w + 180) % 360) - 180
region = np.array(region)
region[:2] = w, e
# Modify extra coordinates if passed
if coordinates:
# Run sanity checks for coordinates
_check_geographic_coordinates(coordinates)
longitude = coordinates[0]
if interval_360:
longitude = longitude % 360
else:
longitude = ((longitude + 180) % 360) - 180
coordinates = np.array(coordinates)
coordinates[0] = longitude
return coordinates, region
return region
def _check_geographic_coordinates(coordinates):
"Check if geographic coordinates are within accepted degrees intervals"
longitude, latitude = coordinates[:2]
if np.any(longitude > 360) or np.any(longitude < -180):
raise ValueError(
"Invalid longitude coordinates. They should be < 360 and > -180 degrees."
)
if np.any(latitude > 90) or np.any(latitude < -90):
raise ValueError(
"Invalid latitude coordinates. They should be < 90 and > -90 degrees."
)
def _check_geographic_region(region):
"""
Check if region is in geographic coordinates are within accepted intervals.
"""
w, e, s, n = region[:4]
# Check if coordinates are within accepted degrees intervals
if np.any( | np.array([w, e]) | numpy.array |
# You are at the top. If you attempt to go any higher
# you will go beyond the known limits of the code
# universe where there are most certainly monsters
# might be able to get a speedup where I'm appending move and -move
# to do:
# use point raycaster to make a cloth_wrap option
# self colisions
# maybe do dynamic margins for when cloth is moving fast
# object collisions
# collisions need to properly exclude pinned and vertex pinned
# add bending springs
# add curl by shortening bending springs on one axis or diagonal
# independantly scale bending springs and structural to create buckling
# option to cache animation?
# Custom Source shape option for animated shapes
# collisions:
# Only need to check one of the edges for groups connected to a vertex
# for edge to face intersections...
# figure out where the edge hit the face
# figure out which end of the edge is inside the face
# move along the face normal to the surface for the point inside.
# if I reflect by flipping the vel around the face normal
# if it collides on the bounce it will get caught on the next iteration
# Sewing
# Could create super sewing that doesn't use edges but uses scalars along the edge to place virtual points
# sort of a barycentric virtual spring. Could even use it to sew to faces if I can think of a ui for where on the face.
# On an all triangle mesh, where sew edges come together there are long strait lines. This probably causes those edges to fold.
# in other words... creating diagonal springs between these edges will not solve the fold problem. Bend spring could do this.
# Bend springs:
# need to speed things up
# When faces have various sizes, the forces don't add up
# self collision
# where points are pinned, stuff is all jittery
'''??? Would it make sense to do self collisions with virtual edges ???'''
'''??? Could do dynamic collision margins for stuff moving fast ???'''
bl_info = {
"name": "Modeling Cloth",
"author": "<NAME> (<EMAIL>.com), <NAME> (@ucupumar)",
"version": (1, 0),
"blender": (2, 79, 0),
"location": "View3D > Extended Tools > Modeling Cloth",
"description": "Maintains the surface area of an object so it behaves like cloth",
"warning": "There might be an angry rhinoceros behind you",
"wiki_url": "",
"category": '3D View'}
import bpy
import bmesh
import numpy as np
from numpy import newaxis as nax
from bpy_extras import view3d_utils
from bpy.props import *
from bpy.app.handlers import persistent
from mathutils import *
import time, sys
#enable_numexpr = True
enable_numexpr = False
if enable_numexpr:
import numexpr as ne
you_have_a_sense_of_humor = False
#you_have_a_sense_of_humor = True
if you_have_a_sense_of_humor:
import antigravity
def get_co(ob, arr=None, key=None): # key
"""Returns vertex coords as N x 3"""
c = len(ob.data.vertices)
if arr is None:
arr = np.zeros(c * 3, dtype=np.float32)
if key is not None:
ob.data.shape_keys.key_blocks[key].data.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
ob.data.vertices.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
def get_proxy_co(ob, arr, me):
"""Returns vertex coords with modifier effects as N x 3"""
if arr is None:
arr = np.zeros(len(me.vertices) * 3, dtype=np.float32)
arr.shape = (arr.shape[0] //3, 3)
c = arr.shape[0]
me.vertices.foreach_get('co', arr.ravel())
arr.shape = (c, 3)
return arr
def triangulate(me, ob=None):
"""Requires a mesh. Returns an index array for viewing co as triangles"""
obm = bmesh.new()
obm.from_mesh(me)
bmesh.ops.triangulate(obm, faces=obm.faces)
#obm.to_mesh(me)
count = len(obm.faces)
#tri_idx = np.zeros(count * 3, dtype=np.int32)
#me.polygons.foreach_get('vertices', tri_idx)
tri_idx = np.array([[v.index for v in f.verts] for f in obm.faces])
# Identify bend spring groups. Each edge gets paired with two points on tips of tris around edge
# Restricted to edges with two linked faces on a triangulated version of the mesh
if ob is not None:
link_ed = [e for e in obm.edges if len(e.link_faces) == 2]
ob.bend_eidx = np.array([[e.verts[0].index, e.verts[1].index] for e in link_ed])
fv = np.array([[[v.index for v in f.verts] for f in e.link_faces] for e in link_ed])
fv.shape = (fv.shape[0],6)
ob.bend_tips = np.array([[idx for idx in fvidx if idx not in e] for e, fvidx in zip(ob.bend_eidx, fv)])
obm.free()
return tri_idx#.reshape(count, 3)
def tri_normals_in_place(col, tri_co):
"""Takes N x 3 x 3 set of 3d triangles and
returns non-unit normals and origins"""
col.origins = tri_co[:,0]
col.cross_vecs = tri_co[:,1:] - col.origins[:, nax]
col.normals = np.cross(col.cross_vecs[:,0], col.cross_vecs[:,1])
col.nor_dots = np.einsum("ij, ij->i", col.normals, col.normals)
col.normals /= np.sqrt(col.nor_dots)[:, nax]
def get_tri_normals(tr_co):
"""Takes N x 3 x 3 set of 3d triangles and
returns non-unit normals and origins"""
origins = tr_co[:,0]
cross_vecs = tr_co[:,1:] - origins[:, nax]
return cross_vecs, np.cross(cross_vecs[:,0], cross_vecs[:,1]), origins
def closest_points_edge(vec, origin, p):
'''Returns the location of the point on the edge'''
vec2 = p - origin
d = (vec2 @ vec) / (vec @ vec)
cp = vec * d[:, nax]
return cp, d
def proxy_in_place(col, me):
"""Overwrite vert coords with modifiers in world space"""
me.vertices.foreach_get('co', col.co.ravel())
col.co = apply_transforms(col.ob, col.co)
def apply_rotation(col):
"""When applying vectors such as normals we only need
to rotate"""
m = np.array(col.ob.matrix_world)
mat = m[:3, :3].T
col.v_normals = col.v_normals @ mat
def proxy_v_normals_in_place(col, world=True, me=None):
"""Overwrite vert coords with modifiers in world space"""
me.vertices.foreach_get('normal', col.v_normals.ravel())
if world:
apply_rotation(col)
def proxy_v_normals(ob, me):
"""Overwrite vert coords with modifiers in world space"""
arr = np.zeros(len(me.vertices) * 3, dtype=np.float32)
me.vertices.foreach_get('normal', arr)
arr.shape = (arr.shape[0] //3, 3)
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
return arr @ mat
def apply_transforms(ob, co):
"""Get vert coords in world space"""
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def apply_in_place(ob, arr, cloth):
"""Overwrite vert coords in world space"""
m = np.array(ob.matrix_world, dtype=np.float32)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
arr[:] = arr @ mat + loc
#cloth.co = cloth.co @ mat + loc
def applied_key_co(ob, arr=None, key=None):
"""Get vert coords in world space"""
c = len(ob.data.vertices)
if arr is None:
arr = np.zeros(c * 3, dtype=np.float32)
ob.data.shape_keys.key_blocks[key].data.foreach_get('co', arr)
arr.shape = (c, 3)
m = np.array(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def revert_transforms(ob, co):
"""Set world coords on object.
Run before setting coords to deal with object transforms
if using apply_transforms()"""
m = np.linalg.inv(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
return co @ mat + loc
def revert_in_place(ob, co):
"""Revert world coords to object coords in place."""
m = np.linalg.inv(ob.matrix_world)
mat = m[:3, :3].T # rotates backwards without T
loc = m[:3, 3]
co[:] = co @ mat + loc
def revert_rotation(ob, co):
"""When reverting vectors such as normals we only need
to rotate"""
#m = np.linalg.inv(ob.matrix_world)
m = np.array(ob.matrix_world)
mat = m[:3, :3] # rotates backwards without T
return co @ mat
def get_last_object():
"""Finds cloth objects for keeping settings active
while selecting other objects like pins"""
cloths = [i for i in bpy.data.objects if i.mclo.enable] # so we can select an empty and keep the settings menu up
if bpy.context.object.mclo.enable:
return cloths, bpy.context.object
if len(cloths) > 0:
ob = bpy.context.scene.mclo.last_object
return cloths, ob
return None, None
def get_poly_centers(ob, type=np.float32, mesh=None):
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
p_count = len(mesh.polygons)
center = np.zeros(p_count * 3, dtype=type)
mesh.polygons.foreach_get('center', center)
center.shape = (p_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
return center
def simple_poly_centers(ob, key=None):
if key is not None:
s_key = ob.data.shape_keys.key_blocks[key].data
return np.squeeze([[np.mean([ob.data.vertices[i].co for i in p.vertices], axis=0)] for p in ob.data.polygons])
def get_poly_normals(ob, type=np.float32, mesh=None):
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
p_count = len(mesh.polygons)
normal = np.zeros(p_count * 3, dtype=type)
mesh.polygons.foreach_get('normal', normal)
normal.shape = (p_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
return normal
def get_v_normals(ob, arr, mesh):
"""Since we're reading from a shape key we have to use
a proxy mesh."""
mod = False
m_count = len(ob.modifiers)
if m_count > 0:
show = np.zeros(m_count, dtype=np.bool)
ren_set = np.copy(show)
ob.modifiers.foreach_get('show_render', show)
ob.modifiers.foreach_set('show_render', ren_set)
mod = True
#v_count = len(mesh.vertices)
#normal = np.zeros(v_count * 3)#, dtype=type)
mesh.vertices.foreach_get('normal', arr.ravel())
#normal.shape = (v_count, 3)
if mod:
ob.modifiers.foreach_set('show_render', show)
def get_v_nor(ob, nor_arr):
ob.data.vertices.foreach_get('normal', nor_arr.ravel())
return nor_arr
def closest_point_edge(e1, e2, p):
'''Returns the location of the point on the edge'''
vec1 = e2 - e1
vec2 = p - e1
d = np.dot(vec2, vec1) / np.dot(vec1, vec1)
cp = e1 + vec1 * d
return cp
def create_vertex_groups(groups=['common', 'not_used'], weights=[0.0, 0.0], ob=None):
'''Creates vertex groups and sets weights. "groups" is a list of strings
for the names of the groups. "weights" is a list of weights corresponding
to the strings. Each vertex is assigned a weight for each vertex group to
avoid calling vertex weights that are not assigned. If the groups are
already present, the previous weights will be preserved. To reset weights
delete the created groups'''
if ob is None:
ob = bpy.context.object
vg = ob.vertex_groups
for g in range(0, len(groups)):
if groups[g] not in vg.keys(): # Don't create groups if there are already there
vg.new(groups[g])
vg[groups[g]].add(range(0,len(ob.data.vertices)), weights[g], 'REPLACE')
else:
vg[groups[g]].add(range(0,len(ob.data.vertices)), 0, 'ADD') # This way we avoid resetting the weights for existing groups.
def get_bmesh(obj=None):
ob = get_last_object()[1]
if ob is None:
ob = obj
obm = bmesh.new()
if ob.mode == 'OBJECT':
obm.from_mesh(ob.data)
elif ob.mode == 'EDIT':
obm = bmesh.from_edit_mesh(ob.data)
return obm
def get_minimal_edges(ob):
obm = get_bmesh(ob)
obm.edges.ensure_lookup_table()
obm.verts.ensure_lookup_table()
obm.faces.ensure_lookup_table()
# get sew edges:
sew = [i.index for i in obm.edges if len(i.link_faces)==0]
# so if I have a vertex with one or more sew edges attached
# I need to get the mean location of all verts shared by those edges
# every one of those verts needs to move towards the total mean
# get linear edges
e_count = len(obm.edges)
eidx = np.zeros(e_count * 2, dtype=np.int32)
e_bool = np.zeros(e_count, dtype=np.bool)
e_bool[sew] = True
ob.data.edges.foreach_get('vertices', eidx)
eidx.shape = (e_count, 2)
# get diagonal edges:
diag_eidx = []
start = 0
stop = 0
step_size = [len(i.verts) for i in obm.faces]
p_v_count = np.sum(step_size)
p_verts = np.ones(p_v_count, dtype=np.int32)
ob.data.polygons.foreach_get('vertices', p_verts)
# can only be understood on a good day when the coffee flows (uses rolling and slicing)
# creates uniqe diagonal edge sets
for f in obm.faces:
fv_count = len(f.verts)
stop += fv_count
if fv_count > 3: # triangles are already connected by linear springs
skip = 2
f_verts = p_verts[start:stop]
for fv in range(len(f_verts)):
if fv > 1: # as we go around the loop of verts in face we start overlapping
skip = fv + 1 # this lets us skip the overlap so we don't have mirror duplicates
roller = np.roll(f_verts, fv)
for r in roller[skip:-1]:
diag_eidx.append([roller[0], r])
start += fv_count
# eidx groups
sew_eidx = eidx[e_bool]
lin_eidx = eidx[~e_bool]
diag_eidx = np.array(diag_eidx)
# deal with sew verts connected to more than one edge
s_t_rav = sew_eidx.T.ravel()
s_uni, s_inv, s_counts = np.unique(s_t_rav,return_inverse=True, return_counts=True)
s_multi = s_counts > 1
multi_groups = None
if np.any(s_counts):
multi_groups = []
ls = sew_eidx[:,0]
rs = sew_eidx[:,1]
for i in s_uni[s_multi]:
gr = np.array([i])
gr = np.append(gr, ls[rs==i])
gr = np.append(gr, rs[ls==i])
multi_groups.append(gr)
return lin_eidx, diag_eidx, sew_eidx, multi_groups
def add_remove_virtual_springs(remove=False):
ob = get_last_object()[1]
cloth = get_cloth_data(ob)
obm = get_bmesh()
obm.verts.ensure_lookup_table()
count = len(obm.verts)
idxer = np.arange(count, dtype=np.int32)
sel = np.array([v.select for v in obm.verts])
selected = idxer[sel]
virtual_springs = np.array([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])
if virtual_springs.shape[0] == 0:
virtual_springs.shape = (0, 2)
if remove:
ls = virtual_springs[:, 0]
in_sel = np.in1d(ls, idxer[sel])
deleter = np.arange(ls.shape[0], dtype=np.int32)[in_sel]
for i in reversed(deleter):
ob.mclo.virtual_springs.remove(i)
return
existing = np.append(cloth.eidx, virtual_springs, axis=0)
flip = existing[:, ::-1]
existing = np.append(existing, flip, axis=0)
ls = existing[:,0]
#springs = []
for i in idxer[sel]:
# to avoid duplicates:
# where this vert occurs on the left side of the existing spring list
v_in = existing[i == ls]
v_in_r = v_in[:,1]
not_in = selected[~np.in1d(selected, v_in_r)]
idx_set = not_in[not_in != i]
for sv in idx_set:
#springs.append([i, sv])
new_vs = ob.mclo.virtual_springs.add()
new_vs.vertex_id_1 = i
new_vs.vertex_id_2 = sv
# gets appended to eidx in the cloth_init function after calling get connected polys in case geometry changes
def generate_guide_mesh():
"""Makes the arrow that appears when creating pins"""
verts = [[0.0, 0.0, 0.0], [-0.01, -0.01, 0.1], [-0.01, 0.01, 0.1], [0.01, -0.01, 0.1], [0.01, 0.01, 0.1], [-0.03, -0.03, 0.1], [-0.03, 0.03, 0.1], [0.03, 0.03, 0.1], [0.03, -0.03, 0.1], [-0.01, -0.01, 0.2], [-0.01, 0.01, 0.2], [0.01, -0.01, 0.2], [0.01, 0.01, 0.2]]
edges = [[0, 5], [5, 6], [6, 7], [7, 8], [8, 5], [1, 2], [2, 4], [4, 3], [3, 1], [5, 1], [2, 6], [4, 7], [3, 8], [9, 10], [10, 12], [12, 11], [11, 9], [3, 11], [9, 1], [2, 10], [12, 4], [6, 0], [7, 0], [8, 0]]
faces = [[0, 5, 6], [0, 6, 7], [0, 7, 8], [0, 8, 5], [1, 3, 11, 9], [1, 2, 6, 5], [2, 4, 7, 6], [4, 3, 8, 7], [3, 1, 5, 8], [12, 10, 9, 11], [4, 2, 10, 12], [3, 4, 12, 11], [2, 1, 9, 10]]
name = 'ModelingClothPinGuide'
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
else:
mesh = bpy.data.meshes.new('ModelingClothPinGuide')
mesh.from_pydata(verts, edges, faces)
mesh.update()
mesh_ob = bpy.data.objects.new(name, mesh)
bpy.context.scene.objects.link(mesh_ob)
mesh_ob.show_x_ray = True
return mesh_ob
def create_guide():
"""Spawns the guide"""
if 'ModelingClothPinGuide' in bpy.data.objects:
mesh_ob = bpy.data.objects['ModelingClothPinGuide']
return mesh_ob
mesh_ob = generate_guide_mesh()
bpy.context.scene.objects.active = mesh_ob
bpy.ops.object.material_slot_add()
if 'ModelingClothPinGuide' in bpy.data.materials:
mat = bpy.data.materials['ModelingClothPinGuide']
else:
mat = bpy.data.materials.new(name='ModelingClothPinGuide')
mat.use_transparency = True
mat.alpha = 0.35
mat.emit = 2
mat.game_settings.alpha_blend = 'ALPHA_ANTIALIASING'
mat.diffuse_color = (1, 1, 0)
mesh_ob.material_slots[0].material = mat
return mesh_ob
def delete_guide():
"""Deletes the arrow"""
if 'ModelingClothPinGuide' in bpy.data.objects:
bpy.data.objects.remove(bpy.data.objects['ModelingClothPinGuide'])
if 'ModelingClothPinGuide' in bpy.data.meshes:
guide_mesh = bpy.data.meshes['ModelingClothPinGuide']
guide_mesh.user_clear()
bpy.data.meshes.remove(guide_mesh)
def scale_source(multiplier):
"""grow or shrink the source shape"""
ob = get_last_object()[1]
if ob is not None:
if ob.mclo.enable:
count = len(ob.data.vertices)
co = np.zeros(count*3, dtype=np.float32)
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_get('co', co)
co.shape = (count, 3)
mean = np.mean(co, axis=0)
co -= mean
co *= multiplier
co += mean
ob.data.shape_keys.key_blocks['modeling cloth source key'].data.foreach_set('co', co.ravel())
cloth = get_cloth_data(ob)
if hasattr(cloth, 'cy_dists'):
cloth.cy_dists *= multiplier
def reset_shapes(ob=None):
"""Sets the modeling cloth key to match the source key.
Will regenerate shape keys if they are missing"""
if ob is None:
if bpy.context.object.mclo.enable:
ob = bpy.context.object
else:
ob = bpy.context.scene.mclo.last_object
if ob.data.shape_keys == None:
ob.shape_key_add('Basis')
if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth source key')
if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth key')
ob.data.shape_keys.key_blocks['modeling cloth key'].value=1
keys = ob.data.shape_keys.key_blocks
count = len(ob.data.vertices)
co = np.zeros(count * 3, dtype=np.float32)
keys['Basis'].data.foreach_get('co', co)
#co = applied_key_co(ob, None, 'modeling cloth source key')
#keys['modeling cloth source key'].data.foreach_set('co', co)
keys['modeling cloth key'].data.foreach_set('co', co)
# reset the data stored in the class
cloth = get_cloth_data(ob)
cloth.vel[:] = 0
co.shape = (co.shape[0]//3, 3)
cloth.co = co
keys['modeling cloth key'].mute = True
keys['modeling cloth key'].mute = False
def get_spring_mix(ob, eidx):
rs = []
ls = []
minrl = []
for i in eidx:
r = eidx[eidx == i[1]].shape[0]
l = eidx[eidx == i[0]].shape[0]
rs.append (min(r,l))
ls.append (min(r,l))
mix = 1 / np.array(rs + ls, dtype=np.float32) ** 1.2
return mix
def collision_data_update(self, context):
ob = self.id_data
if ob.mclo.self_collision:
create_cloth_data(ob)
def refresh_noise(self, context):
ob = self.id_data
cloth = get_cloth_data(ob)
if cloth:
zeros = np.zeros(cloth.count, dtype=np.float32)
random = np.random.random(cloth.count)
zeros[:] = random
cloth.noise = ((zeros + -0.5) * ob.mclo.noise * 0.1)[:, nax]
def generate_wind(wind_vec, ob, cloth):
"""Maintains a wind array and adds it to the cloth vel"""
tri_nor = cloth.normals # non-unit calculated by tri_normals_in_place() per each triangle
w_vec = revert_rotation(ob, wind_vec)
turb = ob.mclo.turbulence
if turb != 0:
w_vec += np.random.random(3).astype(np.float32) * turb * np.mean(w_vec) * 4
# only blow on verts facing the wind
perp = np.abs(tri_nor @ w_vec)
cloth.wind += w_vec
cloth.wind *= perp[:, nax][:, nax]
# reshape for add.at
shape = cloth.wind.shape
cloth.wind.shape = (shape[0] * 3, 3)
cloth.wind *= cloth.tri_mix
np.add.at(cloth.vel, cloth.tridex.ravel(), cloth.wind)
cloth.wind.shape = shape
def generate_inflate(ob, cloth):
"""Blow it up baby!"""
tri_nor = cloth.normals #* ob.mclo.inflate # non-unit calculated by tri_normals_in_place() per each triangle
#tri_nor /= np.einsum("ij, ij->i", tri_nor, tri_nor)[:, nax]
# reshape for add.at
shape = cloth.inflate.shape
cloth.inflate += tri_nor[:, nax] * ob.mclo.inflate# * cloth.tri_mix
cloth.inflate.shape = (shape[0] * 3, 3)
cloth.inflate *= cloth.tri_mix
np.add.at(cloth.vel, cloth.tridex.ravel(), cloth.inflate)
cloth.inflate.shape = shape
cloth.inflate *= 0
def get_quat(rad, axis):
theta = (rad * 0.5)
w = np.cos(theta)
q_axis = axis * np.sin(theta)[:, nax]
return w, q_axis
def q_rotate(co, w, axis):
"""Takes an N x 3 numpy array and returns that array rotated around
the axis by the angle in radians w. (standard quaternion)"""
move1 = np.cross(axis, co)
move2 = np.cross(axis, move1)
move1 *= w[:, nax]
return co + (move1 + move2) * 2
def bend_springs(cloth, co, measure=None):
bend_eidx, tips = cloth.bend_eidx, cloth.bend_tips
tips_co = co[tips]
bls, brs = bend_eidx[:,0], bend_eidx[:, 1]
b_oris = co[bls]
be_vecs = co[brs] - b_oris
te_vecs = tips_co - b_oris[:, nax]
bcp_dots = np.einsum('ij,ikj->ik', be_vecs, te_vecs)
be_dots = np.einsum('ij,ij->i', be_vecs, be_vecs)
b_div = np.nan_to_num(bcp_dots / be_dots[:, nax])
tcp = be_vecs[:, nax] * b_div[:, :, nax]
# tip vecs from cp
tcp_vecs = te_vecs - tcp
tcp_dots = np.einsum('ijk,ijk->ij',tcp_vecs, tcp_vecs)
u_tcp_vecs = tcp_vecs / np.sqrt(tcp_dots)[:, :, nax]
u_tcp_ls = u_tcp_vecs[:, 0]
u_tcp_rs = u_tcp_vecs[:, 1]
# dot of unit tri tips around axis
angle_dot = np.einsum('ij,ij->i', u_tcp_ls, u_tcp_rs)
#paralell = angle_dot < -.9999999
angle = np.arccos(np.clip(angle_dot, -1, 1)) # values outside and arccos gives nan
#angle = np.arccos(angle_dot) # values outside and arccos gives nan
# get the angle sign
tcp_cross = np.cross(u_tcp_vecs[:, 0], u_tcp_vecs[:, 1])
sign = np.sign(np.einsum('ij,ij->i', be_vecs, tcp_cross))
if measure is None:
s = np.arccos(angle_dot)
s *= sign
s[angle_dot < -.9999999] = np.pi
return s
angle *= sign
# rotate edges with quaternypoos
u_be_vecs = be_vecs / np.sqrt(be_dots)[:, nax]
b_dif = angle - measure
l_ws, l_axes = get_quat(b_dif, u_be_vecs)
r_ws, r_axes = l_ws, -l_axes
# move tcp vecs so their origin is in the middle:
#u_tcp_vecs *= 0.5
# should I rotate the unit vecs or the source?
# rotating the unit vecs here.
#stiff = cloth.ob.modeling_cloth_bend_stiff * 0.0057
stiff = cloth.ob.mclo.bend_stiff * 0.0057
rot_ls = q_rotate(u_tcp_ls, l_ws, l_axes)
l_force = (rot_ls - u_tcp_ls) * stiff
rot_rs = q_rotate(u_tcp_rs, r_ws, r_axes)
r_force = (rot_rs - u_tcp_rs) * stiff
np.add.at(cloth.co, tips[:, 0], l_force)
np.add.at(cloth.co, tips[:, 1], r_force)
np.subtract.at(cloth.co, bend_eidx.ravel(), np.tile(r_force * .5, 2).reshape(r_force.shape[0] * 2, 3))
np.subtract.at(cloth.co, bend_eidx.ravel(), np.tile(l_force * .5, 2).reshape(l_force.shape[0] * 2, 3))
return
cloth.co[tips[:, 0]] += l_force
cloth.co[tips[:, 1]] += r_force
#cloth.co[bend_eidx] -= l_force
cloth.co[bend_eidx] -= r_force[:, nax]
cloth.co[bend_eidx] -= l_force[:, nax]
#cloth.co[brs] -= r_force
#print("bend here")
# will need to read bend springs continuously when using
# a dynamic source shape. Guess I should do that now...
# need the angle at each edge
# need to get the tips of each tri around each edge
# should be a pair everywhere there is a link face in
# the tri bmesh
"""
With no sign I just get the dot in radians.
Rotation should move towards the shortest distance
to the same dot in radians.
Without getting the sign at all, it will always rotate
in the same direction to go back to the target.
By multiplying the dif by the sign, I can make it spin
the other way to go back to the target dot in rads
"""
# sewing functions ---------------->>>
def create_sew_edges():
bpy.ops.mesh.bridge_edge_loops()
bpy.ops.mesh.delete(type='ONLY_FACE')
return
#highlight a sew edge
#compare vertex counts
#subdivide to match counts
#distribute and smooth back into mesh
#create sew lines
# sewing functions ---------------->>>
def check_and_get_pins_and_hooks(ob):
scene = bpy.context.scene
pins = []
hooks = []
cull_ids = []
for i, pin in enumerate(ob.mclo.pins):
# Check if hook object still exists
if not pin.hook or (pin.hook and not scene.objects.get(pin.hook.name)):
cull_ids.append(i)
else:
#vert = ob.data.vertices[pin.vertex_id]
pins.append(pin.vertex_id)
hooks.append(pin.hook)
# Delete missing hooks pointers
for i in reversed(cull_ids):
pin = ob.mclo.pins[i]
if pin.hook:
bpy.data.objects.remove(pin.hook)
ob.mclo.pins.remove(i)
return pins, hooks
class ClothData:
pass
def create_cloth_data(ob):
"""Creates instance of cloth object with attributes needed for engine"""
scene = bpy.context.scene
data = scene.modeling_cloth_data_set
# Try to get the cloth data first
try:
cloth = data[ob.name]
except:
# Search for possible name changes
cloth = None
for ob_name, c in data.items():
if c.ob == ob:
# Rename the key
data[ob.name] = data.pop(ob_name)
cloth = data[ob.name]
break
# If cloth still not found
if not cloth:
cloth = ClothData()
data[ob.name] = cloth
cloth.ob = ob
# get proxy object
#proxy = ob.to_mesh(bpy.context.scene, False, 'PREVIEW')
# ----------------
scene.objects.active = ob
cloth.idxer = np.arange(len(ob.data.vertices), dtype=np.int32)
# data only accesible through object mode
mode = ob.mode
if mode == 'EDIT':
bpy.ops.object.mode_set(mode='OBJECT')
# data is read from a source shape and written to the display shape so we can change the target springs by changing the source shape
#cloth.name = ob.name
if ob.data.shape_keys == None:
ob.shape_key_add('Basis')
if 'modeling cloth source key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth source key')
if 'modeling cloth key' not in ob.data.shape_keys.key_blocks:
ob.shape_key_add('modeling cloth key')
ob.data.shape_keys.key_blocks['modeling cloth key'].value=1
cloth.count = len(ob.data.vertices)
# we can set a large group's pin state using the vertex group. No hooks are used here
if 'modeling_cloth_pin' not in ob.vertex_groups:
cloth.pin_group = create_vertex_groups(groups=['modeling_cloth_pin'], weights=[0.0], ob=None)
for i in range(cloth.count):
try:
ob.vertex_groups['modeling_cloth_pin'].weight(i)
except RuntimeError:
# assign a weight of zero
ob.vertex_groups['modeling_cloth_pin'].add(range(0,len(ob.data.vertices)), 0.0, 'REPLACE')
cloth.pin_bool = ~np.array([ob.vertex_groups['modeling_cloth_pin'].weight(i) for i in range(cloth.count)], dtype=np.bool)
# unique edges------------>>>
uni_edges = get_minimal_edges(ob)
if len(uni_edges[1]) > 0:
cloth.eidx = np.append(uni_edges[0], uni_edges[1], axis=0)
else:
cloth.eidx = uni_edges[0]
#cloth.eidx = uni_edges[0][0]
if len(ob.mclo.virtual_springs) > 0:
virtual_springs = np.array([[vs.vertex_id_1, vs.vertex_id_2] for vs in ob.mclo.virtual_springs])
cloth.eidx = | np.append(cloth.eidx, virtual_springs, axis=0) | numpy.append |
from .mmic import ICs
import numpy as np
from pytipsy import wtipsy
class PPDisk(ICs):
'''
Class for holding protoplanetary disk parameters and generating initial conditions.
'''
def __init__(self,units='CGS'):
self._set_units(units)
self._set_volume()
# Disk Parameters (with default values)
self.rhoPower = -1.0 # Radial density profile power
self.tempPower = -0.5 # Temperature profile power
self.rout = None #1.0*self.AU.value # Outer radius in AU
self.rin = None #0.3*self.AU.value # Inner radius of the disk in AU
self.Mstar = None #1./3.*self.Msol.value # Mass of the star
self.T0 = None #150. # Central temperature
self.sigma0 = None #(48000 * self.sDens).value # Central surface density
def makeIC(self,filename='snapshot.std',output=True):
'''Generate tipsy snapshot'''
try:
self.points
except:
print('Generating initial mesh points...')
self._generate_mesh_points()
try:
self.pos
except:
print('Building disk...')
self.makeDisk()
narr = np.array([])
zarr = np.zeros(1)
zsarr = np.zeros(self.ngas)
n = self.ngas + 1
self.header={'time':0.,'n':n,'ndim':3,'ngas':self.ngas,'ndark':1,'nstar':0}
# Gas quantities
rho = np.ones(self.ngas)
h = self.rin * 0.1 * | np.ones(self.ngas) | numpy.ones |
import numpy as np
from matplotlib import pyplot as plt
from math import sqrt
from random import randint
from scipy.optimize import minimize
from pyswarm import pso
class hidr(object):
# Dados de cadastro das usinas hidreletricas (presentes no HIDR.DAT)
Codigo = None # Codigo da UHE
Nome = None # Nome da UHE
Posto = None # Numero do Posto
Bdh = None # Desvio - Nao sei qual e esta informacao ??????
Sist = None # Submercado
Empr = None # Codigo da empresa
Jusante = None # Codigo de Jusante
Desvio = None # Desvio - Nao sei qual e esta informacao ??????
VolMin = None # Volume Minimo
VolMax = None # Volume Maximo
VolVert = None # Volume Vertimento
VolMinDesv = None # Volume Minimo para Desvio
CotaMin = None # Cota Minima
CotaMax = None # Cota Maxima
PolCotaVol = None # Polinomio Cota-Volume
PolCotaArea = None # Polinomio Cata-Area
CoefEvap = None # Coeficientes de Evaporacao
NumConjMaq = None # Numero de Conjuntos de Maquinas
MaqporConj = None # Numero de Maquinas por Conjunto
PEfporConj = None # POtencia Efetiva por Maquina do Conjunto
CF_HBQT = None # Nao sei qual e esta informacao ??????
CF_HBQG = None # Nao sei qual e esta informacao ??????
CF_HBPT = None # Nao sei qual e esta informacao ??????
AltEfetConj = None # Altura de Queda Efetiva do Conjunto
VazEfetConj = None # Vazao Efetiva do Conjunto
ProdEsp = None # Produtibilidade Especifica
PerdaHid = None # Perda Hidraulica
NumPolVNJ = None # Numero de Polinomios Vazao Nivel Jusante
PolVazNivJus = None # Polinomios Vazao Nivel Jusante
CotaRefNivelJus = None # Cota Referencia Nivel de Jusante
CFMed = None # Cota Media do Canal de Fuga
InfCanalFuga = None # Informacao Canal de Fuga - Nao sei qual e esta informacao ??????
FatorCargaMax = None # Fator de Caga Maximo - Nao sei qual e esta informacao ?????????
FatorCargaMin = None # Fator de Caga Minimo - Nao sei qual e esta informacao ?????????
VazMin = None # Vazao Minima Obrigatoria
UnidBase = None # Numero de Unidades de Base
TipoTurb = None # Tipo de Turbina Hidraulica
Repres_Conj = None # Representacao Conjunto de Maquina - Nao sei qual e esta informacao ?????
TEIFH = None # Taxa Equivalente de Indisponibilidade Forcada Hidraulica
IP = None # Indisponibilidade Programada
TipoPerda = None # Tipo Perda Hidraulica
Data = None # Nao sei qual e esta informacao ??????
Observ = None # Observacao
VolRef = None # Volume de Referencia
TipoReg = None # Tipo de Regulacao
# Dados Adicionais Especificados no arquivo de configuracao hidraulica (CONFHD)
Ree = None
Status = None
VolIni = None
Modif = None
AnoI = None
AnoF = None
# Dados Adicinais Calculados para as Usinas pertecentes a configuracao hidraulica (CONFHD)
VolUtil = None
VazEfet = None
PotEfet = None
Ro65 = None # PDTMED (NEWAVE) - PROD. ASSOCIADA A ALTURA CORRESPONDENTE A 65% DO V.U.
Ro50 = None
RoMax = None # PDTMAX (NEWAVE) - PROD. ASSOCIADA A ALTURA MAXIMA
RoMin = None # PDTMIN (NEWAVE) - PROD. ASSOCIADA A ALTURA MINIMA
RoEquiv = None # PRODT (NEWAVE) - PROD. EQUIVALENTE ( DO VOL. MINIMO AO VOL. MAXIMO )
RoEquiv65 = None # PRODTM (NEWAVE) - PROD. EQUIVALENTE ( DO VOL. MINIMO A 65% DO V.U. )
Engolimento = None
RoAcum = None # PDTARM (NEWAVE) - PROD. ACUM. PARA CALCULO DA ENERGIA ARMAZENADA
RoAcum65 = None # PDAMED (NEWAVE) - PROD. ACUM. PARA CALCULO DA ENERGIA ARMAZENADA CORRESPONDENTE A 65% DO V.U.
RoAcumMax = None # PDCMAX e PDVMAX (NEWAVE) - PROD. ACUM.
RoAcumMed = None # PDTCON, PDCMED e PDVMED (NEWAVE) - PROD. ACUM.
RoAcumMin = None # PDCMIN e PDVMIN (NEWAVE) - PROD. ACUM.
RoAcum_A_Ree = None
RoAcum_B_Ree = None
RoAcum_C_Ree = None
RoAcum_A_Sist = None
RoAcum_B_Sist = None
RoAcum_C_Sist = None
RoAcumEntreResRee = None
RoAcumEntreResSist = None
# Vazoes Naturais, Incrementais e Par(p)
Vazoes = None # Historico de Vazoes naturais (imes, ilag)
FAC = None # Funcao de Autocorrelacao (imes, ilag)
FACP = None # Funcao de Autocorrelacao Parcial (imes, ilag)
CoefParp = None # Coeficientes do Modelo par(p) (imes,ilag)
CoefIndParp = None # Coeficientes independentes do Modelo par(p) (imes) - Aditivo = 0 - Multiplicativo > 0
Ordem = None # Ordem do modelo par(p) para todos os meses (mes)
# Parametros da usina Dependentes do Tempo - Especificados (MODIF.DAT)
VolMinT = None # Volume Mínimo Operativo (pode variar mes a mes)
VolMaxT = None # Volume Maximo Operativo (pode variar mes a mes)
VolMinP = None # Volume Mínimo com adocao de penalidade (pode variar mes a mes)
VazMinT = None # Vazao Minima pode variar mes a mes
CFugaT = None # Cota do Canal de Fuga (pode varia mes a mes)
# Parametros relativos a expansao hidrica que variam no tempo para usinas 'EE' e 'NE' (EXPH)
StatusVolMorto = None # Status do Volume Morto - 0: Nao Comecou Encher - 1: Enchendo - 2: Cheio
VolMortoTempo = None # Evolucao do Volume Minimo da Usina
StatusMotoriz = None # Status da Motorizacao - 0: Nao Comecou Motorizar - 1: Motorizando - 3: Motorizada
UnidadesTempo = None # Numero de Unidades em cada mes
EngolTempo = None # Evolucao do Engolimento Maximo da Usina
PotenciaTempo = None # Evolucao da Potencia Instalada da Usina
##########################################################################################################
# Graficos Diversos
##########################################################################################################
# Plota Polinomio Cota-Volume
def PlotaPCV(self):
if self.VolMin == 0:
return
if (self.VolMin == self.VolMax):
volumes = np.linspace(self.VolMin - 1,self.VolMax + 1, 100)
else:
volumes = np.linspace(self.VolMin,self.VolMax,100)
a = self.PolCotaVol[0]
b = self.PolCotaVol[1]
c = self.PolCotaVol[2]
d = self.PolCotaVol[3]
e = self.PolCotaVol[4]
cota = a + b*volumes + c*volumes**2 + d*volumes**3 + e*volumes**4
cota.shape = volumes.shape
plt.plot(volumes, cota, 'b-', lw=3)
plt.xlabel('Volume do Reservatorio (hm^3)', fontsize=16)
titulo = 'Polinomio Cota-Volume da Usina ' + self.Nome
plt.title(titulo, fontsize=16)
plt.ylabel('Cota em Metros', fontsize=16)
plt.xlim(volumes[0], volumes[99])
if ( cota[0] == cota[99]):
plt.ylim(cota[0]-1, cota[99]+1)
else:
plt.ylim(cota[0], cota[99])
plt.show()
# Plota Polinomio Cota-Area
def PlotaPCA(self):
if self.VolMin == 0:
return
if (self.CotaMax == self.CotaMin):
cotas = np.linspace(self.CotaMin - 1,self.CotaMax + 1, 100)
else:
cotas = np.linspace(self.CotaMin,self.CotaMax,100)
a = self.PolCotaArea[0]
b = self.PolCotaArea[1]
c = self.PolCotaArea[2]
d = self.PolCotaArea[3]
e = self.PolCotaArea[4]
areas = a + b*cotas + c*cotas**2 + d*cotas**3 + e*cotas**4
areas.shape = cotas.shape
plt.plot(cotas, areas, 'b-', lw=3)
plt.xlabel('Cota do Reservatorio (em metros)', fontsize=16)
titulo = 'Polinomio Cota-Area da Usina ' + self.Nome
plt.title(titulo, fontsize=16)
plt.ylabel('Area Superficia em km^2', fontsize=16)
plt.xlim(cotas[0], cotas[99])
if ( areas[0] == areas[99]):
plt.ylim(areas[0]-1, areas[99]+1)
else:
plt.ylim(areas[0], areas[99])
plt.show()
# <NAME>
def PlotaColina(self):
if self.VolMin == 0:
return
if (self.VolMin == self.VolMax):
volumes = np.linspace(self.VolMin - 1,self.VolMax + 1, 100)
else:
volumes = np.linspace(self.VolMin,self.VolMax,100)
a = self.PolCotaVol[0]
b = self.PolCotaVol[1]
c = self.PolCotaVol[2]
d = self.PolCotaVol[3]
e = self.PolCotaVol[4]
cotamont = a + b*volumes + c*volumes**2 + d*volumes**3 + e*volumes**4
cotamont.shape = volumes.shape
qdef = np.linspace(self.VazMin, 5*self.Engolimento, 100)
a = self.PolVazNivJus[0][0]
b = self.PolVazNivJus[0][1]
c = self.PolVazNivJus[0][2]
d = self.PolVazNivJus[0][3]
e = self.PolVazNivJus[0][4]
cotajus = a + b*qdef + c*qdef**2 + d*qdef**3 + e*qdef**4
cotajus.shape = qdef.shape
xGrid, yGrid = np.meshgrid(cotamont, cotajus)
z = self.ProdEsp * ( xGrid - yGrid )
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(qdef, volumes,z, rcount=100, ccount = 100, cmap=plt.cm.coolwarm,
linewidth=0, antialiased=False)
plt.xlabel('Vazão Defluente em m^3/s', fontsize=12)
titulo = 'Produtibilidade da Usina ' + self.Nome
plt.title(titulo, fontsize=16)
plt.ylabel('Volume Armazenado em hm^3', fontsize=12)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
def PlotaProdutibs(self, iano, imes):
x_axis = np.arange(1,6)
y_axis = [ self.RoEquiv[iano][imes], self.RoMin[iano][imes], self.Ro50[iano][imes], self.Ro65[iano][imes], self.RoMax[iano][imes] ]
fig, ax = plt.subplots()
a, b, c, d, e = plt.bar(x_axis, y_axis)
a.set_facecolor('r')
b.set_facecolor('g')
c.set_facecolor('b')
d.set_facecolor('y')
e.set_facecolor('m')
ax.set_xticks(x_axis)
ax.set_xticklabels(['Equiv', 'Min', '50%', '65%', 'Max'])
titulo = 'Produtibilidades da Usina ' + self.Nome
plt.title(titulo, fontsize=16)
plt.xlabel('Tipo de Produtibilidade', fontsize=16)
plt.ylabel('Produtibilidade', fontsize=16)
plt.show()
def PlotaVazoes(self):
x_axis = | np.arange(1,13) | numpy.arange |
"""
The "Archetype Classifier" is a classifier for fluorosequences using
K-Nearest-Neighbors with a Gaussian Mixture Model on top.
Terminology (applies to test_nn and test_nn)
n_features:
(n_cycles * n_channels)
dye_count_space:
The (n_cycles * n_channels) dimensional feature space of the data.
Note that dye_count_space differs from "radiometry space" in that
dye_count_space has unit brightness for each dye whereas radiometry_space
has an independent brightness for each channel.
dyemat:
An instance of a matrix in dye_count_space
dyerow:
One row of a dyemat
radiometry_space:
The data space in which the data from the scope (or simulated) dwells.
Like dye_count space this is an (n_cycles * n_channels) dimensional space,
but wherein each channel has its own brightness and variance.
radmat:
An instance of a matrix in radiometry_space.
Shape of n_rows * n_features
radrow:
One row of the radmat
dye_gain:
The brightness of each dye. Steps up-linearly with each additional dye-count.
Note that this approximation may not hold for large numbers of dyes
or different scopes/labels in that there is some evidence that single-molecule
data should follow a log-normal distribution.
variance_per_dye (VPD):
The amount the variance goes up for each unit of dye. As noted above,
there is some evidence that single molecule data should follow a log-normal
distribution and therefore this model may be inadequate at higher dye counts.
For now, this approximation seems sufficient.
Gaussian Mixture Model (GMM):
As the dye counts grows larger, the variance increases accordingly and thus
individual fluoroseq classes tend to overlap at high dye_counts.
The GMM is used to estimate the probability that a data-point came from
a given fluoroseq.
X:
A matrix of n_rows by n_features holding sim or true signal data.
uX:
A matrix of X unit-normalized so that all channels are on a scale of 1 unit
per dye.
true_y:
pred_y:
A true or predicition vector of classification calls.
The NN classifier works by the following method:
Calibration (off line):
* Fits the ErrorModel (see error_model.py)
Dye_tracks generated by monte-carlo simulations of peptides
|
| Raw-data in radiometry-space from signal-processing or sim.
| |
| v
+-> Find nearest-neighbor dye_track patterns within some neighbor-radius
|
v
Weighted Gaussian Mixture Model to predict most likely dye_track in neighborhood.
|
v
Maximum-Liklihood-Estimator to assign dye-track to peptide.
"""
from munch import Munch
import numpy as np
import pandas as pd
import pyflann
import random
from plaster.tools.utils import utils
from plaster.tools.schema import check
from plaster.tools.zap import zap
from plaster.tools.log.log import debug
from scipy.spatial import distance
def _create_flann(dt_mat):
pyflann.set_distance_type("euclidean")
flann = pyflann.FLANN()
flann.build_index(utils.mat_flatter(dt_mat), algorithm="kdtree_simple")
return flann
def _get_neighbor_iz(flann, radrow, n_neighbors, radius, default=0):
"""
Return n_neighbors worth of neighbors using radius search.
If unable to find n_neighbors worth, pad the return array with default
so that it will always return np.array((n_neighbors,))
"""
check.array_t(radrow, ndim=1)
# I don't think there's any guarantee that if you ask
# for only one neighbor that you get the closest one.
# Therefore, best not to use this under that assumption.
# assert n_neighbors > 1
# Removing this assert for now because my test results
# show that getting 1 neighbor gives me nearly identical
# results so I'm considering switching to a nearest-neighbor
# model alone.
nn_iz, dists = flann.nn_radius(radrow.astype(float), radius, max_nn=n_neighbors)
n_found = nn_iz.shape[0]
neighbor_iz = np.full((n_neighbors,), default)
neighbor_iz[0:n_found] = nn_iz[np.argsort(dists)]
return neighbor_iz
def _do_nn_and_gmm(
unit_radrow,
dyerow,
dt_mat,
dt_inv_var_mat,
dt_weights,
flann,
n_neighbors,
dt_score_mode,
dt_filter_threshold,
dt_score_metric,
dt_score_bias,
penalty_coefs,
rare_penalty,
radius,
):
"""
The workhorse of the Gaussian Mixture Model called from a parallel map below.
Arguments:
unit_radrow: One row of the unit_X matrix.
dyerow: The true dyerow used for debugging
dt_mat: The Dyetrack object used for neighbor lookup.
dt_inv_var_mat: The inverse variance of each pattern (based on the VPD for each count)
dt_weights: The weight of the pattern
flann: The neighbor lookup
The remainder are temporary parameters until I find the best solution
Outline:
unit_radmat_row is one row of uX. We ask the PYFLANN index to tell us the
n nearest neighbors of all the dt_ann to this row and then for each
of those neighbors we compute a variance-corrected distance.
Notes:
After some testing to compare to cdist "mahalanobis" I realized that actually
the cdist function was not operating as expected. Specifically, it was taking the
first element of the VI as THE ONLY covariance matrix, not one-per-dt! This means
that all previous run results were wrong so I'm concerned how that was getting
results that were as good as RF?
"""
check.array_t(dt_mat, ndim=3)
n_dts, n_channels, n_cycles = dt_mat.shape
check.array_t(dt_inv_var_mat, shape=dt_mat.shape)
check.array_t(unit_radrow, shape=(n_channels, n_cycles))
check.array_t(dt_weights, shape=(n_dts,))
n_cols = n_channels * n_cycles
unit_radmat_row_flat = unit_radrow.reshape((n_cols,))
# true_dt_i is found by use the "dyerow" which is really a cheat
# since dyerow is oly something we know in simulated data.
# There is no guarantee that the training set has every row
# that is represented by the test set. Thus the "true_dt_i"
# might not be found. We find it by using the _get_neighbor_iz
# with a tiny radius because if the test dyetrack is found
# among the targets it should be right on top of its target.
true_dt_i = _get_neighbor_iz(
flann,
dyerow.reshape((n_cols,)),
n_neighbors=n_neighbors,
radius=0.01,
default=0,
)[0]
nn_iz = _get_neighbor_iz(
flann, unit_radmat_row_flat, n_neighbors=n_neighbors, radius=radius, default=0
)
nn_iz = nn_iz[nn_iz > 0]
# FILTER any low-weight dyetracks
sufficient_weight_mask = dt_weights[nn_iz] >= dt_filter_threshold
nn_iz = nn_iz[sufficient_weight_mask]
n_neigh_found = np.sum(nn_iz > 0)
if n_neigh_found > 0:
assert 1 <= n_neigh_found <= n_neighbors
neigh_dt_mat = dt_mat[nn_iz].reshape((n_neigh_found, n_cols))
neigh_dt_inv_var = dt_inv_var_mat[nn_iz].reshape((n_neigh_found, n_cols))
neigh_weights = dt_weights[nn_iz]
vdist = np.zeros_like(neigh_dt_inv_var)
def cd():
return distance.cdist(
unit_radmat_row_flat.reshape((1, n_cols)),
neigh_dt_mat,
metric=dt_score_metric,
)[0]
def penalty():
p = 1.0
if rare_penalty is not None:
p *= 1.0 - np.exp(-rare_penalty * neigh_weights)
if penalty_coefs is not None:
# Experimental: reduce score by total est. dye count
total_brightness = unit_radrow.sum()
# From fitting on RF I see that p_correct is correlated to
# total brightness. A linear function m=0.054, b=0.216
# So I'm going to try reducing score by this factor
correction = max(
0.0,
min(1.0, (total_brightness * penalty_coefs[0] + penalty_coefs[1])),
)
assert 0 <= correction <= 1.0
p *= correction
return p
if dt_score_mode == "gmm_normalized_wpdf":
delta = unit_radmat_row_flat - neigh_dt_mat
vdist = np.sum(delta * delta * neigh_dt_inv_var, axis=1)
pdf = np.exp(-vdist)
weighted_pdf = neigh_weights * pdf
scores = utils.np_safe_divide(weighted_pdf, weighted_pdf.sum())
elif dt_score_mode == "gmm_normalized_wpdf_dist_sigma":
"""
https://en.wikipedia.org/wiki/Multivariate_normal_distribution
Given:
Sigma: covariance matrix
mu: mean
k: Dimensionality of the Gaussian
The multivariate normal has the form:
(
(2.0 * np.pi)**(-k / 2.0)
) * (
np.linalg.det(Sigma)**(-1.0 / 2.0)
) * np.exp(
-1.0 / 2.0 * ((x-mu).T @ np.linalg.inv(Sigma) @ (x-mu))
)
We can make some simplifications here:
1. We have n_rows (number of neighbors) and n_cols (number of feature dimensions)
2. We have pre-computed (x-mu) and call it "delta"
This is a (n_rows, n_cols) matrix
3. We don't actually have Sigma, rather we have
the "inverse variance" which we call: "neigh_dt_inv_var"
and which we store in vector form!
Therefore:
Sigma = 1.0 / neigh_dt_inv_var
This is a (n_rows, n_cols) matrix
4. Our covariance matrix (Sigma) is diagonal and therefore
its determinant is the product of the diagonal elements
which, again, is stored as the rows.
np.linalg.det(Sigma) == np.prod(Sigma, axis=1)
5. Following from Sigma being diagonal, its inverse is:
(1.0 / its elements). Therefore:
np.linalg.inv(Sigma) == 1.0 / Sigma
6. Furthermore, the term:
(x-mu).T @ np.linalg.inv(Sigma) @ (x-mu)
is typically a full matrix expression.
But Sigma is diagonal and stored as a vector, therefore:
delta.T @ np.sum((1.0 / Sigma) * delta, axis=1) ==
np.sum(delta * ((1.0 / Sigma) * delta), axis=1)
7. Because the whole equation converts to vector form
all of the row operations on each neighbor can
be done simultaneously.
8. The (2.0 * np.pi)**(-k / 2.0) is omitted as it will get
factored out when scores are computed.
Therefore:
n_rows = n_neigh_found = # Number of rows of neigh_dt_mat
n_cols = # Number of columns of neigh_dt_mat
delta = unit_radmat_row_flat - neigh_dt_mat # np.array(size=(n_rows, n_cols,))
Sigma = 1.0 / neigh_dt_inv_var # np.array(size=(n_rows, n_cols,))
pdf_at_x = (
(2.0 * np.pi)**(-n_cols / 2.0) # A constant (we can factor this out)
) * (
np.prod(Sigma, axis=1)**(-1.0 / 2.0) # np.array(size=(n_rows,))
) * np.exp(
-1.0 / 2.0 * np.sum(delta * (neigh_dt_inv_var * delta), axis=1)
) # np.array(size=(n_rows,))
"""
delta = unit_radmat_row_flat - neigh_dt_mat
vdist = np.sum(delta * neigh_dt_inv_var * delta, axis=1)
sigma = 1.0 / neigh_dt_inv_var
determinant_of_sigma = np.prod(sigma, axis=1)
pdf = determinant_of_sigma ** (-1 / 2) * np.exp(-vdist / 2)
weighted_pdf = neigh_weights * pdf
scores = utils.np_safe_divide(weighted_pdf, weighted_pdf.sum())
elif dt_score_mode == "gmm_normalized_wpdf_no_inv_var":
delta = unit_radmat_row_flat - neigh_dt_mat
vdist = np.sum(delta * delta, axis=1)
pdf = np.exp(-vdist)
weighted_pdf = neigh_weights * pdf
scores = utils.np_safe_divide(weighted_pdf, weighted_pdf.sum())
elif dt_score_mode == "cdist_normalized":
d = cd()
scores = 1.0 / (dt_score_bias + d)
scores = utils.np_safe_divide(scores, scores.sum())
elif dt_score_mode == "cdist_weighted_sqrt":
d = cd()
scores = np.sqrt(neigh_weights) / (dt_score_bias + d)
elif dt_score_mode == "cdist_weighted_log":
d = cd()
scores = np.log(neigh_weights) / (dt_score_bias + d)
elif dt_score_mode == "cdist_weighted_normalized":
d = cd()
scores = neigh_weights / (dt_score_bias + d)
scores = utils.np_safe_divide(scores, scores.sum())
elif dt_score_mode == "cdist_weighted_normalized_sqrt":
d = cd()
scores = np.sqrt(neigh_weights) / (dt_score_bias + d)
scores = utils.np_safe_divide(scores, scores.sum())
elif dt_score_mode == "cdist_weighted_normalized_log":
d = cd()
scores = np.log(neigh_weights) / (dt_score_bias + d)
scores = utils.np_safe_divide(scores, scores.sum())
else:
raise NotImplementedError()
# PICK highest score
scores *= penalty()
arg_sort = np.argsort(scores)[::-1]
best_arg = arg_sort[0].astype(int)
pred_dt_i = int(nn_iz[best_arg])
pred_dt_score = scores[best_arg]
vdist = vdist[best_arg]
else:
pred_dt_i = 0
pred_dt_score = 0.0
vdist = 0.0
return (
np.array([true_dt_i]),
np.array([pred_dt_i]),
np.array([pred_dt_score]),
np.array([vdist]),
)
def _fit_gain_one_channel(one_channel_radmat, expected_dark_cycle):
"""
Fit the gain of one_channel_radmat
Assumes (demands) that the dye count is not more than one.
This will fail in all other cases.
Arguments:
one_channel_radmat: A 2D matrix of (n_samples, n_cycles)
expected_dark_cycle: cycle (0-based) where the dark is expected
Returns:
Gain estimate
"""
from sklearn.cluster import KMeans # Defer slow import
check.array_t(one_channel_radmat, ndim=2)
n_rows, n_cycles = one_channel_radmat.shape
assert np.any(one_channel_radmat > 100.0) # Check that this is a non-unity X
n_rows = one_channel_radmat.shape[0]
# Step one, divide the measurements into two groups (dark and bright) by using k-means
# sklearn's KMeans only accept 2+ dimensional data, so convert it
samples = one_channel_radmat.flatten()
samples_2d = np.zeros((samples.shape[0], 2))
samples_2d[:, 0] = samples
kmeans = KMeans(n_clusters=2, random_state=0).fit(samples_2d)
gain = np.median(samples[kmeans.labels_ == 1])
dark = np.sort(samples[kmeans.labels_ == 1])[0]
# gain is now an initial guess at the one-dye gain and
# dark is a lower bound
# Step 2: Filter outliers and refine estimate
# Filter out any rows that don't conform to the expected pattern
# by measuring the distance of each row in pattern space to the expected
expected_pattern = np.ones((n_cycles,))
expected_pattern[expected_dark_cycle:] = 0
keep_mat = None
for i in range(5): # 5 is empirical
# Repeat solving for the gain keeping anything that is < 2 stdev from the gain
# This knocks out high corruptions
delta = one_channel_radmat / gain - expected_pattern
dist = np.sqrt(np.sum(delta * delta, axis=1))
keep_mask = dist < 1.0
keep_mat = one_channel_radmat[keep_mask]
# Take samples from rows that match the pattern and that aren't dark
samples = keep_mat.flatten()
samples = samples[samples > dark]
# Keep samples that are 2 std from current gain
std = np.std(samples - gain)
samples = samples[(samples - gain) ** 2 < (2 * std) ** 2]
gain = np.mean(samples)
return gain, keep_mat
def _fit_vpd_one_channel(one_channel_radmat, gain, expected_dyerow, accept_dist=1.0):
n_rows, n_cycles = one_channel_radmat.shape
assert np.any(one_channel_radmat > 100.0) # Check that this is a non-unity X
assert expected_dyerow.shape[0] == n_cycles
n_dyes = int(np.max(expected_dyerow))
radmat = one_channel_radmat / gain
# Filter out any rows that don't conform to the expected pattern
# by measuring the distance of each row in pattern space to the expected
delta = radmat - expected_dyerow
# debug(delta[:, 0:])
# debug(expected_dyerow)
delta = delta * np.exp(-0.2 * expected_dyerow)
# delta[:, expected_dyerow == 0.0] = 0.0
# debug(delta[:, 0:])
dist = np.sqrt(np.sum(delta * delta, axis=1))
# Take samples from rows that match the pattern
keep_rows = dist < accept_dist
keep_radmat = radmat[keep_rows]
x = [0]
y = [0]
for i in range(1, n_dyes + 1):
cycles_with_this_many_dyes = np.argwhere(expected_dyerow == i).flatten()
samples = keep_radmat[:, cycles_with_this_many_dyes]
x += [i]
y += [ | np.var(samples) | numpy.var |
import colorsys
import os
import traceback
import cv2
import matplotlib.pyplot as plt
import numpy as np
import umap # to import after sklearn!
from bullet_envs import bullet_envs_path
from mpl_toolkits.mplot3d import Axes3D
from PIL import Image
from sklearn import decomposition
def plotter(
loss_log,
save_path,
name="loss",
title=None,
xlabel="gradient steps",
ylabel="Mini-Batch Loss",
backprop_per_eval=None,
text_file=None,
):
if text_file is not None:
with open(text_file) as f:
loss_log = np.array(f.read().split("\n"), dtype=np.float32)
if backprop_per_eval is None:
x = np.arange(0, len(loss_log))
elif type(backprop_per_eval) is list:
x = | np.cumsum(backprop_per_eval) | numpy.cumsum |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# Modified by <NAME>
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import cv2
import random
def get_gradient_magnitude(img_in, filter='scharr', threshold=0.3):
def _get_pixel_safe_cv2(image, x, y):
try:
return image[y, x]
except IndexError:
return 0
if filter == 'sobel':
gx = [
[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]
]
gy = [
[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]
]
elif filter == 'scharr':
gx = [
[47, 0, -47],
[162, 0, -162],
[47, 0, -47]
]
gy = [
[47, 162, 47],
[0, 0, 0],
[-47, -162, -47]
]
assert len(img_in.shape) == 2
new_height, new_width = img_in.shape
out_mag = np.zeros((new_height, new_width))
# out_direct = np.zeros((new_height, new_width))
for y in range(0, new_height):
for x in range(0, new_width):
gradient_y = (
gy[0][0] * _get_pixel_safe_cv2(img_in, x - 1, y - 1) +
gy[0][1] * _get_pixel_safe_cv2(img_in, x, y - 1) +
gy[0][2] * _get_pixel_safe_cv2(img_in, x + 1, y - 1) +
gy[2][0] * _get_pixel_safe_cv2(img_in, x - 1, y + 1) +
gy[2][1] * _get_pixel_safe_cv2(img_in, x, y + 1) +
gy[2][2] * _get_pixel_safe_cv2(img_in, x + 1, y + 1)
)
gradient_x = (
gx[0][0] * _get_pixel_safe_cv2(img_in, x - 1, y - 1) +
gx[0][2] * _get_pixel_safe_cv2(img_in, x + 1, y - 1) +
gx[1][0] * _get_pixel_safe_cv2(img_in, x - 1, y) +
gx[1][2] * _get_pixel_safe_cv2(img_in, x + 1, y) +
gx[2][0] * _get_pixel_safe_cv2(img_in, x - 1, y - 1) +
gx[2][2] * _get_pixel_safe_cv2(img_in, x + 1, y + 1)
)
gradient_magnitude = math.sqrt(pow(gradient_x, 2) + pow(gradient_y, 2))
# gradient_direction = math.atan2(gradient_y, gradient_x)
out_mag[y, x] = gradient_magnitude
# out_direct[y, x] = gradient_direction
out_mag = (out_mag - np.min(out_mag)) / (np.max(out_mag) - np.min(out_mag))
out_mag[out_mag > threshold] = 1
# out_direct = (out_direct - np.min(out_direct)) / (np.max(out_direct) - np.min(out_direct))
# out_direct[out_mag == 0] = 0
return out_mag
def flip(img):
return img[:, :, ::-1].copy()
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform( | np.float32(src) | numpy.float32 |
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
# Author: <NAME>
# E-mail: <EMAIL>
from sklearn.preprocessing import OneHotEncoder
from sklearn.metrics import accuracy_score
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from tqdm import tqdm
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import OneHotEncoder
class MLPModel(torch.nn.Module):
@staticmethod
def actFunct(af_type: str):
"""
Returns the specified activation type from torch.nn
______________________________________________________________
Parameters:
af_type: str
The Activation function to return
______________________________________________________________
Returns:
af: torch.nn.function
The specified activation function
"""
if af_type == "relu":
return torch.nn.ReLU()
if af_type == "sigmoid":
return torch.nn.Sigmoid()
elif af_type == "tanh":
return torch.nn.Tanh()
elif af_type == "softmax":
return torch.nn.Softmax(dim=1)
@staticmethod
def lossFunct(lf_type: str):
"""
Returns the specified loss function from torch.nn
______________________________________________________________
Parameters:
lf_type: str
The loss function to return
______________________________________________________________
Returns:
lf: torch.nn.function
The specified loss function
"""
if lf_type == "cross-entropy":
# needs squeezed target
return torch.nn.CrossEntropyLoss() # I:(N,C) O:(N)
elif lf_type == "hinge-embedding":
# needs plain target (no squeeze)
return torch.nn.HingeEmbeddingLoss()
elif lf_type == "bce" or \
lf_type == 'binary-cross-entropy':
# needs squeezed target
return torch.nn.BCELoss() # I:(N,C) O:(N, C)
elif lf_type == "bce-logit":
# needs squeezed target
return torch.nn.BCEWithLogitsLoss() # I:(N,C) O:(N, C)
elif lf_type == "soft-margin": # target .1 and 1
# check target structure
return torch.nn.SoftMarginLoss()
@staticmethod
def transform_target_for_loss(target, loss_funct_str):
"""fix target to match what loss needs."""
need_encode = ['cross-entropy']
if loss_funct_str in need_encode:
target = target.squeeze(1)
return target
def __init__(self, emb, n_hl: int = 1, num_features: int = 10,
n_classes: int = 2, dropout: float = 0.2,
epochs: int = 5, units: int = 25, bias: float = 0.1,
lr: float = 0.01, momentum: float = 0.9,
device: torch.device = torch.device("cpu"),
weights_init: str = "xavier_normal",
hl_actfunct: str = "tanh",
out_actfunct: str = "relu",
loss_funct: str = "cross-entropy",
random_state: int = None,
verbose: bool = False,
embedding_type: str = "mean",
freeze: bool = False) -> None:
"""
Creates a multilayer perceptron object with the specified
parameters using the pytorch framework.
______________________________________________________________
Parameters:
n_hl: int = 1
Number of hidden layers, defaults to 1
num_features: int = 10
Number of features, defaults to 10
n_classes: int = 10
Number of classes, defaults to 10
dropout: float = 0.2
Dropout value, defaults to 0.2
epochs: int = 50
Number of epochs to run, defaults to 20
units: int = 25
Number of units per hidden layer, defaults to 25
bias: float = 0.1
Bias value, defaults to 0.1
lr: float = 0.01
Learning rate, defaults to 0.01
momentum: float = 0.9
Momentum value, defaults to 0.9
device: torch.device = torch.device("cpu")
Specifies how the model is run, defaults to "cpu"
weights_init: str = "xavier_normal"
Specifies how the weights are initialized, defaults to
"xavier_normal"
hl_actfunct: str = "tanh"
Hidden layer activation function, defaults to "tahn"
out_actfunct: str = "relu"
Output activation function, defaults to "relu"
loss_funct: str = "cross-entropy"
Loss function, defaults to "cross-entropy"
random_state: int = None
The seed for the random state
verbose: bool = False
If True: prints out progressive output, defaults to False
______________________________________________________________
Returns:
None
"""
super().__init__()
# seeding
if random_state is not None:
print(f'Setting torch random_state to {random_state}...') \
if verbose else None
torch.manual_seed(random_state)
np.random.seed(random_state)
# parameters
self.device = device
self.epochs = epochs
self.verbose = verbose
self.hl_actfunct = self.actFunct(af_type=hl_actfunct)
self.out_actfunct = self.actFunct(af_type=out_actfunct)
self.out_actfunct_str = out_actfunct
self.loss_funct_str = loss_funct
self.loss_funct = self.lossFunct(lf_type=loss_funct)
self.freeze = freeze
if n_hl == 0:
self.model = torch.nn.Sequential(
torch.nn.Dropout(dropout),
torch.nn.Linear(num_features, n_classes),
self.out_actfunct,
)
elif n_hl == 1:
self.model = torch.nn.Sequential(
torch.nn.Dropout(dropout),
torch.nn.Linear(num_features, units),
self.hl_actfunct,
torch.nn.Dropout(dropout),
torch.nn.Linear(units, n_classes),
self.out_actfunct,
)
elif n_hl >= 2:
self.model = torch.nn.Sequential(
torch.nn.Dropout(dropout),
torch.nn.Linear(num_features, units),
self.hl_actfunct
)
for i in range(1, n_hl):
self.model.add_module(
name=f"HL{i + 1}-Dropout",
module=torch.nn.Dropout(dropout)
)
self.model.add_module(
name=f"HL{i + 1}-Linear",
module=torch.nn.Linear(units, units),
)
self.model.add_module(
name=f"HL{i + 1}-ActFunction",
module=self.hl_actfunct,
)
self.model.add_module(
name="Output-Linear",
module=torch.nn.Linear(units, n_classes),
)
self.model.add_module(
name="Output-ActFunction",
module=self.out_actfunct,
)
for m in self.model:
if isinstance(m, torch.nn.Linear):
# initializing bias
torch.nn.init.constant_(m.bias, val=bias)
# initializing weights
if weights_init == "xavier_normal":
torch.nn.init.xavier_normal_(m.weight, gain=1.0)
self.model.to(device)
self.opt = torch.optim.SGD(
params=self.model.parameters(),
lr=lr,
momentum=momentum
)
self.losses = None
vectors = torch.FloatTensor(emb.vectors)
if device == torch.device("cuda"):
vectors = vectors.to(device)
self.word_embeddings = torch.nn.Embedding.from_pretrained(
vectors, freeze=self.freeze)
self.embedding_type = embedding_type
def forward(self, batch):
"""
Performs a forward step on the model.
______________________________________________________________
Parameters:
batch: torch.nn.tensor
The mini-batch input tensor to update
______________________________________________________________
Returns:
self.model: MLPModel
The updated model
"""
x = None
if self.embedding_type == "mean":
x = torch.mean(self.word_embeddings(batch), dim=1)
elif self.embedding_type == "sum":
x = torch.sum(self.word_embeddings(batch), dim=1)
elif self.embedding_type == "max":
x = torch.max(self.word_embeddings(batch), dim=1)
# TODO: Combine to tensor 3x as long for input # NOTE: I think this can really help us
return self.model(x)
def predict_classes(self, input_tensor):
"""
Makes predictions from a test tensor using the model.
______________________________________________________________
Parameters:
input_tensor: torch.nn.tensor
The tensor to make predictions on.
______________________________________________________________
Returns:
y_pred: np.array
An array containing the predicted classes of the input
tensors.
"""
x = None
# TODO: Why not just send this through forward and call detatch?
if self.embedding_type == "mean": # NOTE: If this is the same as above, this should be a method
x = torch.mean(self.word_embeddings(input_tensor), dim=1)
elif self.embedding_type == "sum":
x = torch.sum(self.word_embeddings(input_tensor), dim=1)
elif self.embedding_type == "max":
x = torch.max(self.word_embeddings(input_tensor), dim=1)
y_pred = self.model(x)
return y_pred.max(dim=1)[1]
def backward(self, output, target):
"""
Performs a backpropogation step computing the loss.
______________________________________________________________
Parameters:
output:
The output after forward with shape (batch_size, num_classes).
target:
The target it is optimizing towards
______________________________________________________________
Returns:
loss: float
How close the estimate was to the gold standard.
"""
target = self.transform_target_for_loss(target, self.loss_funct_str)
if self.loss_funct_str == "bce" or \
self.loss_funct_str == 'binary-cross-entropy':
encoder = OneHotEncoder(sparse=False) # NOTE: Shouldn't this encoding be outside of a function thats called for every data point?
target = encoder.fit_transform(target)
target = torch.FloatTensor(target)
# normalizing between 0 and 1
min_ = output.min(dim=1, keepdim=True)[0]
max_ = output.max(dim=1, keepdim=True)[0]
output = (output - min_) / (max_ - min_)
# output = sigmoid(output) # -> the same as bce-logit
elif self.loss_funct_str == "bce-logit":
encoder = OneHotEncoder(sparse=False)
target = encoder.fit_transform(target)
target = torch.FloatTensor(target)
# BUG output is never generated when bce-logit is called
# calculating the loss
loss = self.loss_funct(output, target)
# resetting the gradients from the optimizer
# more info: https://pytorch.org/docs/stable/optim.html
self.opt.zero_grad()
# calculating gradients
loss.backward()
# updating weights from the model by calling optimizer.step()
self.opt.step()
return loss
def fit(self, loader=None, verbose=False) -> None:
"""
Fits the model to the training data using the models
initialized values. Runs for the models number of epochs.
______________________________________________________________
Parameters:
laoder: torch.nn.Dataloader=None
Dataloader object to load the batches, defaults to None
verbose: bool=False
If True: prints out progressive output, defaults to False
______________________________________________________________
Returns:
None
"""
self.losses = np.empty(shape=self.epochs, dtype=float)
iterator = tqdm(range(self.epochs)) if verbose else range(self.epochs)
for i in iterator:
_loss = []
for n, batch in enumerate(loader):
text, source = batch
text = text.to(self.device)
source = source.to(self.device)
output = self.forward(text)
loss = self.backward(output, source)
_loss.append(loss.item())
self.losses[i] = np.mean(_loss)
print(f'Epoch: {i} loss:', np.mean(_loss))
class RNNModel(torch.nn.Module):
@staticmethod
def lossFunct(lf_type: str):
"""
Returns the specified loss function from torch.nn
______________________________________________________________
Parameters:
lf_type: str
The loss function to return
______________________________________________________________
Returns:
lf: torch.nn.function
The specified loss function
"""
if lf_type == "cross-entropy":
return torch.nn.CrossEntropyLoss() # I:(N,C) O:(N)
# NOTE we should definately still check everything, right? (will studies have to be changed?)
elif lf_type == "hinge-embedding":
# needs plain target (no squeeze)
return torch.nn.HingeEmbeddingLoss()
elif lf_type == "bce" or \
lf_type == 'binary-cross-entropy':
# needs squeezed target
return torch.nn.BCELoss() # I:(N,C) O:(N, C)
elif lf_type == "bce-logit":
# needs squeezed target
return torch.nn.BCEWithLogitsLoss() # I:(N,C) O:(N, C)
elif lf_type == "soft-margin": # target .1 and 1
# check target structure
return torch.nn.SoftMarginLoss()
@staticmethod
def model_constructor(n_hl, units, dropout, num_features, rnn_type,
nonlinearity, bidirectional):
model = None
if rnn_type == "rnn":
model = torch.nn.RNN(
input_size=num_features,
hidden_size=units,
num_layers=n_hl,
nonlinearity=nonlinearity, # -> 'tanh' or 'relu'
batch_first=True, # -> (batch, seq, feature)
dropout=dropout,
bidirectional=bidirectional
)
elif rnn_type == "lstm":
model = torch.nn.LSTM(
input_size=num_features,
hidden_size=units,
num_layers=n_hl,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional
)
elif rnn_type == "gru":
model = torch.nn.GRU(
input_size=num_features,
hidden_size=units,
num_layers=n_hl,
batch_first=True,
dropout=dropout,
bidirectional=bidirectional
)
return model
def __init__(self, emb, n_hl: int = 1, num_features: int = 10,
n_classes: int = 2, dropout: float = 0.2,
epochs: int = 5, units: int = 25,
lr: float = 0.01, momentum: float = 0.9,
device: str = "cpu",
loss_funct: str = "cross-entropy",
random_state: int = None,
verbose: bool = False,
rnn_type: str = "rnn",
nonlinearity: str = 'tanh',
bidirectional: bool = True,
freeze: bool = False,
lr_scheduler: bool = False,
factor: float = 0.1,
patience: int = 2,
pool_type: str = 'cat'
) -> None:
"""
Creates a multilayer perceptron object with the specified
parameters using the pytorch framework.
______________________________________________________________
Parameters:
n_hl: int = 1
Number of hidden layers, defaults to 1
num_features: int = 10
Number of features, defaults to 10
n_classes: int = 10
Number of classes, defaults to 10
dropout: float = 0.2
Dropout value, defaults to 0.2
epochs: int = 50
Number of epochs to run, defaults to 20
units: int = 25
Number of units per hidden layer, defaults to 25
lr: float = 0.01
Learning rate, defaults to 0.01
momentum: float = 0.9
Momentum value, defaults to 0.9
device: str = "cpu"
Specifies how the model is run, defaults to "cpu"
loss_funct: str = "cross-entropy"
Loss function, defaults to "cross-entropy"
random_state: int = None
The seed for the random state
verbose: bool = False
If True: prints out progressive output, defaults to False
______________________________________________________________
Returns:
None
"""
super().__init__()
# seeding
self.verbose = verbose
self.random_state = random_state
if self.random_state is not None:
print(f'Setting torch random_state to {self.random_state}...') \
if self.verbose else None
torch.manual_seed(self.random_state)
np.random.seed(self.random_state)
# parameters:
self.n_hl = n_hl # <- num_layers
self.num_features = num_features # <- input_size
self.n_classes = n_classes
self.dropout = dropout
self.epochs = int(epochs)
self.units = units # <- hidden_size
self.device = device # -> 'cpu' or 'cuda'
self.rnn_type = rnn_type # -> 'rnn', 'lstm', 'gru'
self.loss_funct_str = loss_funct
self.loss_funct = self.lossFunct(lf_type=loss_funct)
self.nonlinearity = nonlinearity
self.bidirectional = bidirectional
self.freeze = freeze
self.lr_scheduler = lr_scheduler
self.factor = factor
self.patience = patience
self.lr = lr
self.momentum = momentum
self.pool_type = pool_type
self.losses = None
self.val_losses = None
self.model = self.model_constructor(
n_hl=self.n_hl,
units=self.units,
dropout=self.dropout,
num_features=self.num_features,
rnn_type=self.rnn_type,
nonlinearity=self.nonlinearity,
bidirectional=self.bidirectional
).to(self.device)
# TODO: a similar constr method that could check different optimizers
self.opt = torch.optim.SGD(
params=self.model.parameters(),
lr=self.lr,
momentum=self.momentum
)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=self.opt,
mode='min',
factor=self.factor,
patience=self.patience
)
# loading weights from pre-trained vocab
self._word_embeddings = torch.nn.Embedding.from_pretrained(
embeddings=torch.FloatTensor(emb.vectors),
freeze=self.freeze # true: not update in the learning process
).to(self.device)
# output layer
if self.bidirectional is True:
if self.pool_type == 'cat':
self._linear = torch.nn.Linear(
in_features=self.units*2*2,
out_features=self.n_classes,
bias=True
).to(self.device)
else:
self._linear = torch.nn.Linear(
in_features=self.units*2,
out_features=self.n_classes,
bias=True
).to(self.device)
else:
if pool_type == 'cat':
self._linear = torch.nn.Linear(
in_features=self.units * 2,
out_features=self.n_classes,
bias=True
).to(self.device)
else:
self._linear = torch.nn.Linear(
in_features=self.units,
out_features=self.n_classes,
bias=True
).to(self.device)
def forward(self, x):
"""
Performs a forward step on the model.
______________________________________________________________
Parameters:
x: torch.nn.tensor
The input tensor to update
______________________________________________________________
Returns:
self.model: MLPModel
The updated model
"""
_X, lengths = x
# getting embeddings for indices in _X
# <- [batch:num_samples, longest_sentence] := words indices
# -> [batch:num_samples, longest_sentence, emb_vectors:weights])
emb = self._word_embeddings(_X)
# -> [Tensor, Tensor, batch_size:bool, enforce_sorted:bool<-True]
packed = torch.nn.utils.rnn.pack_padded_sequence(
input=emb,
lengths=lengths,
batch_first=True,
enforce_sorted=False
)
# randomly initializing weights
if (self.bidirectional is True) and (self.rnn_type in ["rnn", "gru"]):
# -> [num_layers*num_directions, batch, hidden_size]
h0 = torch.zeros(
self.n_hl*2,
_X.size(0),
self.units
).to(self.device)
# fitting the model
# <- [batch:n_samples, longest_sentence:seq_len, features:in_size]
# -> [batch:n_samples, seq_len, units:hidden_size*num_directions]
out, states = self.model(packed, h0)
elif (self.bidirectional is False) and \
(self.rnn_type in ["rnn", "gru"]):
h0 = torch.zeros(
self.n_hl,
_X.size(0),
self.units
).to(self.device)
out, hn = self.model(packed, h0)
elif (self.bidirectional is True) and (self.rnn_type == "lstm"):
# [num_layers * num_directions, batch, hidden_size]
h0 = torch.zeros(
self.n_hl*2,
_X.size(0),
self.units
).to(self.device)
# [num_layers * num_directions, batch, hidden_size]
c0 = torch.zeros(
self.n_hl*2,
_X.size(0),
self.units
).to(self.device)
out, hn = self.model(packed, (h0, c0))
elif (self.bidirectional is False) and (self.rnn_type == "lstm"):
# [num_layers * num_directions, batch, hidden_size]
h0 = torch.zeros(
self.n_hl,
_X.size(0),
self.units
).to(self.device)
# [num_layers * num_directions, batch, hidden_size]
c0 = torch.zeros(
self.n_hl,
_X.size(0),
self.units
).to(self.device)
out, hn = self.model(packed, (h0, c0))
# print('out', out.shape)
seq_unpacked, lens_unpacked = torch.nn.utils.rnn.pad_packed_sequence(
sequence=out,
batch_first=True
)
# permuting to fix/reduce the tensor's dimension from 3 to 2 later
# <- [batch:n_samples, seq_len, units:hidden_size*num_directions]
# -> [batch:n_samples, units:hidden_size*num_directions, seq_len]
seq_unpacked = seq_unpacked.permute(0, 2, 1)
# concatenating seq_len in units
# <- [batch:n_samples, units:hidden_size*num_directions, seq_len]
# -> [batch:n_samples, units:hidden_size*num_directions]
if self.pool_type == 'cat':
out_ = torch.cat(
(seq_unpacked[:, :, 0], seq_unpacked[:, :, -1]),
dim=-1
)
# <- [batch:n_samples, units:hidden_size*num_directions, seq_len]
# -> [batch:n_samples, units:hidden_size*num_directions]
elif self.pool_type == 'first':
out_ = seq_unpacked[:, :, 0].squeeze(1)
# <- [batch:n_samples, units:hidden_size*num_directions, seq_len]
# -> [batch:n_samples, units:hidden_size*num_directions]
elif self.pool_type == 'last':
out_ = seq_unpacked[:, :, -1].squeeze(1)
# <- [batch:n_samples, units:hidden_size*num_directions]
return self._linear(out_)
def predict_classes(self, test_data):
"""
Makes predictions from a test tensor using the model.
______________________________________________________________
Parameters:
test_loader:
DataLoader to make predictions on.
______________________________________________________________
Returns:
y_pred: np.array
An array containing the predicted classes of the input
tensors.
"""
validation_data_copy = test_data
X_test, y_test = next(iter(validation_data_copy)) # assuming entire input is one batch
y_pred_dist = self.forward(X_test) # TODO should this be detached?
y_pred = y_pred_dist.max(dim=1)[1] # subscript 0 return prob_dist
return y_test[:, 0], y_pred
def backward(self, output, target, verbose=False):
"""
Performs a backpropogation step computing the loss.
______________________________________________________________
Parameters:
output:
The output after forward with shape (batch_size, num_classes).
target:
The target it is optimizing towards
______________________________________________________________
Returns:
loss: float
How close the estimate was to the gold standard.
"""
loss = self.loss_funct(output, target[:, 0])
# resetting the gradients from the optimizer
# more info: https://pytorch.org/docs/stable/optim.html
self.opt.zero_grad()
# calculating gradients
loss.backward()
# updating weights from the model by calling optimizer.step()
self.opt.step()
# resetting the gradients from the optimizer
# more info: https://pytorch.org/docs/stable/optim.html
self.opt.zero_grad()
return loss
def fit(self, loader=None, verbose=False, test=None) -> None:
"""
Fits the model to the training data using the models
initialized values. Runs for the models number of epochs.
______________________________________________________________
Parameters:
laoder: torch.nn.Dataloader=None
Dataloader object to load the batches, defaults to None
verbose: bool=False
If True: prints out progressive output, defaults to False
______________________________________________________________
Returns:
None
"""
self.losses = np.empty(shape=self.epochs, dtype=float)
iterator = tqdm(range(self.epochs)) if verbose else range(self.epochs)
if test is not None:
self.val_losses = np.empty(shape=self.epochs, dtype=float)
X_test, y_test = next(iter(test))
for epoch in iterator:
_loss = []
_val_loss = []
for n, batch in enumerate(loader):
if n%5 == -1:
verbose = True
else:
verbose = False
text, source = batch
# print(source.shape)
# print(source[0])
output = self.forward(text)
loss = self.backward(output, source, verbose)
_loss.append(loss.item())
if test is not None:
y_valid = self.forward(X_test)
y_valid = y_valid.max(dim=1)[1]
_val_loss.append(accuracy_score(y_valid, y_test[:, 0]))
self.losses[epoch] = np.mean(_loss)
if self.lr_scheduler is True:
self.scheduler.step(np.mean(_loss))
if test is not None:
self.val_losses[epoch] = np.mean(_val_loss)
print(f'Epoch: {epoch} loss: {np.mean(_loss)} - '
f'Valid acc: {np.mean(_val_loss)}')
else:
print(f'Epoch: {epoch} loss: {np.mean(_loss)}')
def best_epoch(self):
if self.val_losses is None:
raise ValueError("Error: Test argument was not provided while "
"training/fitting the model.")
return | np.argmax(self.val_losses) | numpy.argmax |
# -*- coding: utf-8 -*-
r"""
general helper functions
"""
# Import standard library
import os
import logging
import itertools
from pathlib import Path
from glob import glob
from operator import concat
from functools import reduce
from os.path import join, exists
from pprint import pprint
# Import from module
# from matplotlib.figure import Figure
# from matplotlib.image import AxesImage
# from loguru import logger
from uncertainties import unumpy
from tqdm import tqdm
import numpy as np
import pandas as pd
from scipy.stats import norm
from scipy.ndimage import zoom
import matplotlib.pyplot as pl
import lightkurve as lk
from astropy.visualization import hist
from astropy import units as u
from astropy import constants as c
from astropy.timeseries import LombScargle
from astropy.modeling import models, fitting
from astropy.io import ascii
from astropy.coordinates import (
SkyCoord,
Distance,
sky_coordinate,
Galactocentric,
match_coordinates_3d,
)
from skimage import measure
from astroquery.vizier import Vizier
from astroquery.mast import Catalogs, tesscut
from astroquery.gaia import Gaia
import deepdish as dd
# Import from package
from chronos import target
from chronos import cluster
from chronos import gls
from chronos.config import DATA_PATH
log = logging.getLogger(__name__)
__all__ = [
"get_nexsci_archive",
"get_tess_ccd_info",
"get_all_campaigns",
"get_all_sectors",
"get_sector_cam_ccd",
"get_tois",
"get_toi",
"get_ctois",
"get_ctoi",
"get_target_coord",
"get_epicid_from_k2name",
"get_target_coord_3d",
"get_transformed_coord",
"query_gaia_params_of_all_tois",
"get_mamajek_table",
"get_distance",
"get_excess_from_extiction",
"get_absolute_color_index",
"get_absolute_gmag",
"parse_aperture_mask",
"make_round_mask",
"make_square_mask",
"remove_bad_data",
"is_point_inside_mask",
"get_fluxes_within_mask",
"get_harps_bank",
"get_specs_table_from_tfop",
"get_rotation_period",
"get_transit_mask",
"get_mag_err_from_flux",
"get_err_quadrature",
"get_phase",
"bin_data",
"map_float",
"map_int",
"flatten_list",
"detrend",
"query_tpf",
"query_tpf_tesscut",
"is_gaiaid_in_cluster",
"get_pix_area_threshold",
"get_above_lower_limit",
"get_below_upper_limit",
"get_between_limits",
"get_RV_K",
"get_RM_K",
"get_tois_mass_RV_K",
"get_vizier_tables",
"get_mist_eep_table",
"get_tepcat",
]
# Ax/Av
extinction_ratios = {
"U": 1.531,
"B": 1.324,
"V": 1.0,
"R": 0.748,
"I": 0.482,
"J": 0.282,
"H": 0.175,
"K": 0.112,
"G": 0.85926,
"Bp": 1.06794,
"Rp": 0.65199,
}
def query_WDSC():
"""
Washington Double Star Catalog
"""
url = "http://www.astro.gsu.edu/wds/Webtextfiles/wdsnewframe.html"
df = pd.read_csv(url)
return df
def get_tepcat(catalog="all"):
"""
TEPCat
https://www.astro.keele.ac.uk/jkt/tepcat/
Choices:
all, homogenerous, planning, obliquity
"""
base_url = "https://www.astro.keele.ac.uk/jkt/tepcat/"
if catalog == "all":
full_url = base_url + "allplanets-csv.csv"
elif catalog == "homogeneous":
full_url = base_url + "homogeneous-par-csv.csv"
elif catalog == "planning":
full_url = base_url + "observables.csv"
elif catalog == "obliquity":
full_url = base_url + "obliquity.csv"
else:
raise ValueError("catalog=[all,homogeneous,planning,obliquity]")
df = pd.read_csv(full_url)
return df
def get_mist_eep_table():
"""
For eep phases, see
http://waps.cfa.harvard.edu/MIST/README_tables.pdf
"""
fp = Path(DATA_PATH, "mist_eep_table.csv")
return pd.read_csv(fp, comment="#")
def get_nexsci_archive(table="all"):
base_url = "https://exoplanetarchive.ipac.caltech.edu/"
settings = "cgi-bin/nstedAPI/nph-nstedAPI?table="
if table == "all":
url = base_url + settings + "exomultpars"
elif table == "confirmed":
url = base_url + settings + "exoplanets"
elif table == "composite":
url = base_url + settings + "compositepars"
else:
raise ValueError("table=[all, confirmed, composite]")
df = pd.read_csv(url)
return df
def get_vizier_tables(key, tab_index=None, row_limit=50, verbose=True):
"""
Parameters
----------
key : str
vizier catalog key
tab_index : int
table index to download and parse
Returns
-------
tables if tab_index is None else parsed df
"""
if row_limit == -1:
msg = f"Downloading all tables in "
else:
msg = f"Downloading the first {row_limit} rows of each table in "
msg += f"{key} from vizier."
if verbose:
print(msg)
# set row limit
Vizier.ROW_LIMIT = row_limit
tables = Vizier.get_catalogs(key)
errmsg = f"No data returned from Vizier."
assert tables is not None, errmsg
if tab_index is None:
if verbose:
print({k: tables[k]._meta["description"] for k in tables.keys()})
return tables
else:
df = tables[tab_index].to_pandas()
df = df.applymap(
lambda x: x.decode("ascii") if isinstance(x, bytes) else x
)
return df
def get_tois_mass_RV_K(clobber=False):
fp = Path(DATA_PATH, "TOIs2.csv")
if clobber:
try:
from mrexo import predict_from_measurement, generate_lookup_table
except Exception:
raise ModuleNotFoundError("pip install mrexo")
tois = get_tois()
masses = {}
for key, row in tqdm(tois.iterrows()):
toi = row["TOI"]
Rp = row["Planet Radius (R_Earth)"]
Rp_err = row["Planet Radius (R_Earth) err"]
Mp, (Mp_lo, Mp_hi), iron_planet = predict_from_measurement(
measurement=Rp,
measurement_sigma=Rp_err,
qtl=[0.16, 0.84],
dataset="kepler",
)
masses[toi] = (Mp, Mp_lo, Mp_hi)
df = pd.DataFrame(masses).T
df.columns = [
"Planet mass (Mp_Earth)",
"Planet mass (Mp_Earth) lo",
"Planet mass (Mp_Earth) hi",
]
df.index.name = "TOI"
df = df.reset_index()
df["RV_K_lo"] = get_RV_K(
tois["Period (days)"],
tois["Stellar Radius (R_Sun)"], # should be Mstar
df["Planet mass (Mp_Earth) lo"],
with_unit=True,
)
df["RV_K_hi"] = get_RV_K(
tois["Period (days)"],
tois["Stellar Radius (R_Sun)"], # should be Mstar
df["Planet mass (Mp_Earth) hi"],
with_unit=True,
)
joint = pd.merge(tois, df, on="TOI")
joint.to_csv(fp, index=False)
print(f"Saved: {fp}")
else:
joint = pd.read_csv(fp)
print(f"Loaded: {fp}")
return joint
def get_phase(time, period, epoch, offset=0.5):
"""phase offset -0.5,0.5
"""
phase = (((((time - epoch) / period) + offset) % 1) / offset) - 1
return phase
def bin_data(array, binsize, func=np.mean):
"""
"""
a_b = []
for i in range(0, array.shape[0], binsize):
a_b.append(func(array[i : i + binsize], axis=0))
return a_b
def get_tess_ccd_info(target_coord):
"""use search_targetpixelfile like get_all_sectors?"""
ccd_info = tesscut.Tesscut.get_sectors(target_coord)
errmsg = f"Target not found in any TESS sectors"
assert len(ccd_info) > 0, errmsg
return ccd_info.to_pandas()
def get_all_sectors(target_coord):
""" """
ccd_info = get_tess_ccd_info(target_coord)
all_sectors = [int(i) for i in ccd_info["sector"].values]
return np.array(all_sectors)
def get_all_campaigns(epicid):
""" """
res = lk.search_targetpixelfile(
f"K2 {epicid}", campaign=None, mission="K2"
)
errmsg = "No data found"
assert len(res) > 0, errmsg
df = res.table.to_pandas()
campaigns = df["observation"].apply(lambda x: x.split()[-1]).values
return np.array([int(c) for c in campaigns])
def get_sector_cam_ccd(target_coord, sector=None):
"""get TESS sector, camera, and ccd numbers using Tesscut
"""
df = get_tess_ccd_info(target_coord)
all_sectors = [int(i) for i in df["sector"].values]
if sector is not None:
sector_idx = df["sector"][df["sector"].isin([sector])].index.tolist()
if len(sector_idx) == 0:
raise ValueError(f"Available sector(s): {all_sectors}")
cam = str(df.iloc[sector_idx]["camera"].values[0])
ccd = str(df.iloc[sector_idx]["ccd"].values[0])
else:
sector_idx = 0
sector = str(df.iloc[sector_idx]["sector"])
cam = str(df.iloc[sector_idx]["camera"])
ccd = str(df.iloc[sector_idx]["ccd"])
return sector, cam, ccd
def is_gaiaid_in_cluster(
gaiaid, cluster_name=None, catalog_name="Bouma2019", verbose=True
):
"""
See scripts/check_target_in_cluster
"""
# reduce the redundant names above
gaiaid = int(gaiaid)
if cluster_name is None:
cc = cluster.ClusterCatalog(catalog_name=catalog_name, verbose=False)
df_mem = cc.query_catalog(return_members=True)
else:
c = cluster.Cluster(
catalog_name=catalog_name, cluster_name=cluster_name, verbose=False
)
df_mem = c.query_cluster_members()
idx = df_mem.source_id.isin([gaiaid])
if idx.sum() > 0:
if verbose:
if cluster_name is None:
cluster_match = df_mem[idx].Cluster.values[0]
else:
# TODO: what if cluster_match != cluster_name?
cluster_match = cluster_name
print(
f"Gaia DR2 {gaiaid} is IN {cluster_match} cluster based on {catalog_name} catalog!"
)
return True
else:
if verbose:
print(f"Gaia DR2 {gaiaid} is NOT in {catalog_name} catalog!")
return False
def query_tpf(
query_str,
sector=None,
campaign=None,
quality_bitmask="default",
apply_data_quality_mask=False,
mission="TESS",
verbose=True,
):
"""
"""
if verbose:
print(f"Searching targetpixelfile for {query_str} using lightkurve")
tpf = lk.search_targetpixelfile(
query_str, mission=mission, sector=sector, campaign=campaign
).download()
if apply_data_quality_mask:
tpf = remove_bad_data(tpf, sector=sector, verbose=verbose)
return tpf
def query_tpf_tesscut(
query_str,
sector=None,
quality_bitmask="default",
cutout_size=(15, 15),
apply_data_quality_mask=False,
verbose=True,
):
"""
"""
if verbose:
if isinstance(query_str, sky_coordinate.SkyCoord):
query = f"ra,dec=({query_str.to_string()})"
else:
query = query_str
print(f"Searching targetpixelfile for {query} using Tesscut")
tpf = lk.search_tesscut(query_str, sector=sector).download(
quality_bitmask=quality_bitmask, cutout_size=cutout_size
)
assert tpf is not None, "No results from Tesscut search."
# remove zeros
zero_mask = (tpf.flux_err == 0).all(axis=(1, 2))
if zero_mask.sum() > 0:
tpf = tpf[~zero_mask]
if apply_data_quality_mask:
tpf = remove_bad_data(tpf, sector=sector, verbose=verbose)
return tpf
def detrend(self, polyorder=1, break_tolerance=10):
"""mainly to be added as method to lk.LightCurve
"""
lc = self.copy()
half = lc.time.shape[0] // 2
if half % 2 == 0:
# add 1 if even
half += 1
return lc.flatten(
window_length=half,
polyorder=polyorder,
break_tolerance=break_tolerance,
)
def get_rotation_period(
time,
flux,
flux_err=None,
min_per=0.5,
max_per=None,
method="ls",
npoints=20,
plot=True,
verbose=True,
):
"""
time, flux : array
time and flux
min_period, max_period : float
minimum & maxmimum period (default=half baseline e.g. ~13 days)
method : str
ls = lomb-scargle; gls = generalized ls
npoints : int
datapoints around which to fit a Gaussian
Note:
1. Transits are assumed to be masked already
2. The period and uncertainty were determined from the mean and the
half-width at half-maximum of a Gaussian fit to the periodogram peak, respectively
See also:
https://arxiv.org/abs/1702.03885
"""
baseline = int(time[-1] - time[0])
max_per = max_per if max_per is not None else baseline / 2
if method == "ls":
if verbose:
print("Using Lomb-Scargle method")
ls = LombScargle(time, flux, dy=flux_err)
frequencies, powers = ls.autopower(
minimum_frequency=1.0 / max_per, maximum_frequency=1.0 / min_per
)
idx = np.argmax(powers)
while npoints > idx:
npoints -= 1
best_freq = frequencies[idx]
best_period = 1.0 / best_freq
# specify which points to fit a gaussian
x = (1 / frequencies)[idx - npoints : idx + npoints]
y = powers[idx - npoints : idx + npoints]
# Fit the data using a 1-D Gaussian
g_init = models.Gaussian1D(amplitude=0.5, mean=best_period, stddev=1)
fit_g = fitting.LevMarLSQFitter()
g = fit_g(g_init, x, y)
label = f"P={g.mean.value:.2f}+/-{g.stddev.value:.2f} d"
if plot:
# Plot the data with the best-fit model
pl.plot(x, y, "ko", label="_nolegend_")
pl.plot(x, g(x), label="_nolegend_")
pl.ylabel("Lomb-Scargle Power")
pl.xlabel("Period [days]")
pl.axvline(g.mean, 0, 1, ls="--", c="r", label=label)
pl.legend()
if verbose:
print(label)
return (g.mean.value, g.stddev.value)
elif method == "gls":
if verbose:
print("Using Generalized Lomb-Scargle method")
data = (time, flux, flux_err)
ls = gls.Gls(data, Pbeg=min_per, Pend=max_per, verbose=verbose)
prot, prot_err = ls.hpstat["P"], ls.hpstat["e_P"]
if plot:
_ = ls.plot(block=False, figsize=(10, 8))
return (prot, prot_err)
else:
raise ValueError("Use method=[ls | gls]")
def get_transit_mask(lc, period, epoch, duration_hours):
"""
lc : lk.LightCurve
lightcurve that contains time and flux properties
mask = []
t0 += np.ceil((time[0] - dur - t0) / period) * period
for t in np.arange(t0, time[-1] + dur, period):
mask.extend(np.where(np.abs(time - t) < dur / 2.)[0])
return np.array(mask)
"""
assert isinstance(lc, lk.LightCurve)
assert (
(period is not None)
& (epoch is not None)
& (duration_hours is not None)
)
temp_fold = lc.fold(period, t0=epoch)
fractional_duration = (duration_hours / 24.0) / period
phase_mask = np.abs(temp_fold.phase) < (fractional_duration * 1.5)
transit_mask = np.in1d(lc.time, temp_fold.time_original[phase_mask])
return transit_mask
def get_harps_bank(
target_coord, separation=30, outdir=DATA_PATH, verbose=True
):
"""
Check if target has archival HARPS data from:
http://www.mpia.de/homes/trifonov/HARPS_RVBank.html
See also https://github.com/3fon3fonov/HARPS_RVBank
For column meanings:
https://www2.mpia-hd.mpg.de/homes/trifonov/HARPS_RVBank_header.txt
"""
homeurl = "http://www.mpia.de/homes/trifonov/HARPS_RVBank.html"
fp = os.path.join(outdir, "HARPS_RVBank_table.csv")
if os.path.exists(fp):
df = pd.read_csv(fp)
msg = f"Loaded: {fp}\n"
else:
if verbose:
print(
f"Downloading HARPS bank from {homeurl}. This may take a while."
)
# csvurl = "http://www.mpia.de/homes/trifonov/HARPS_RVBank_v1.csv"
# df = pd.read_csv(csvurl)
df = pd.read_html(homeurl, header=0)[0] # choose first table
df.to_csv(fp, index=False)
msg = f"Saved: {fp}\n"
if verbose:
print(msg)
# coordinates
coords = SkyCoord(
ra=df["RA"],
dec=df["DEC"],
distance=df["Dist [pc]"],
unit=(u.hourangle, u.deg, u.pc),
)
# check which falls within `separation`
idxs = target_coord.separation(coords) < separation * u.arcsec
if idxs.sum() > 0:
# result may be multiple objects
res = df[idxs]
if verbose:
targets = res["Target"].values
print(f"There are {len(res)} matches: {targets}")
print(f"{df.loc[idxs, df.columns[7:14]].T}\n\n")
return res
else:
# find the nearest HARPS object in the database to target
# idx, sep2d, dist3d = match_coordinates_3d(
# target_coord, coords, nthneighbor=1)
idx = target_coord.separation(coords).argmin()
sep2d = target_coord.separation(coords[idx])
nearest_obj = df.iloc[idx]["Target"]
ra, dec = df.iloc[idx][["RA", "DEC"]]
print(
f"Nearest HARPS object is\n{nearest_obj}: ra,dec=({ra},{dec}) @ d={sep2d.arcsec/60:.2f} arcmin\n"
)
return None
# def get_harps_bank(url, verbose=True):
# """
# Download archival HARPS data from url
# http://www.mpia.de/homes/trifonov/HARPS_RVBank.html
# """
# homeurl = ""
# fp = os.path.join(outdir, "HARPS_RVBank_table.csv")
# return
def get_mamajek_table(clobber=False, verbose=True, data_loc=DATA_PATH):
fp = join(data_loc, f"mamajek_table.csv")
if not exists(fp) or clobber:
url = "http://www.pas.rochester.edu/~emamajek/EEM_dwarf_UBVIJHK_colors_Teff.txt"
# cols="SpT Teff logT BCv Mv logL B-V Bt-Vt G-V U-B V-Rc V-Ic V-Ks J-H H-Ks Ks-W1 W1-W2 W1-W3 W1-W4 Msun logAge b-y M_J M_Ks Mbol i-z z-Y R_Rsun".split(' ')
df = pd.read_csv(
url,
skiprows=21,
skipfooter=524,
delim_whitespace=True,
engine="python",
)
# tab = ascii.read(url, guess=None, data_start=0, data_end=124)
# df = tab.to_pandas()
# replace ... with NaN
df = df.replace(["...", "....", "....."], np.nan)
# replace header
# df.columns = cols
# drop last duplicate column
df = df.drop(df.columns[-1], axis=1)
# df['#SpT_num'] = range(df.shape[0])
# df['#SpT'] = df['#SpT'].astype('category')
# remove the : type in M_J column
df["M_J"] = df["M_J"].apply(lambda x: str(x).split(":")[0])
# convert columns to float
for col in df.columns:
if col == "#SpT":
df[col] = df[col].astype("category")
else:
df[col] = df[col].astype(float)
# if col=='SpT':
# df[col] = df[col].astype('categorical')
# else:
# df[col] = df[col].astype(float)
df.to_csv(fp, index=False)
print(f"Saved: {fp}")
else:
df = pd.read_csv(fp)
if verbose:
print(f"Loaded: {fp}")
return df
def get_mag_err_from_flux(flux, flux_err):
"""
equal to 1.086/(S/N)
"""
return 2.5 * np.log10(1 + flux_err / flux)
def get_err_quadrature(err1, err2):
return | np.sqrt(err1 ** 2 + err2 ** 2) | numpy.sqrt |
# Author: <NAME>(ICSRL)
# Created: 4/14/2020, 7:15 AM
# Email: <EMAIL>
import tensorflow as tf
import numpy as np
from network.loss_functions import huber_loss, mse_loss
from network.network import *
from numpy import linalg as LA
class initialize_network_DeepQLearning():
def __init__(self, cfg, name, vehicle_name):
self.g = tf.Graph()
self.vehicle_name = vehicle_name
self.first_frame = True
self.last_frame = []
with self.g.as_default():
stat_writer_path = cfg.network_path + self.vehicle_name + '/return_plot/'
loss_writer_path = cfg.network_path + self.vehicle_name + '/loss' + name + '/'
self.stat_writer = tf.summary.FileWriter(stat_writer_path)
# name_array = 'D:/train/loss'+'/'+name
self.loss_writer = tf.summary.FileWriter(loss_writer_path)
self.env_type = cfg.env_type
self.input_size = cfg.input_size
self.num_actions = cfg.num_actions
# Placeholders
self.batch_size = tf.placeholder(tf.int32, shape=())
self.learning_rate = tf.placeholder(tf.float32, shape=())
self.X1 = tf.placeholder(tf.float32, [None, cfg.input_size, cfg.input_size, 3], name='States')
# self.X = tf.image.resize_images(self.X1, (227, 227))
self.X = tf.map_fn(lambda frame: tf.image.per_image_standardization(frame), self.X1)
self.target = tf.placeholder(tf.float32, shape=[None], name='Qvals')
self.actions = tf.placeholder(tf.int32, shape=[None], name='Actions')
# self.model = AlexNetDuel(self.X, cfg.num_actions, cfg.train_fc)
self.model = C3F2(self.X, cfg.num_actions, cfg.train_fc)
self.predict = self.model.output
ind = tf.one_hot(self.actions, cfg.num_actions)
pred_Q = tf.reduce_sum(tf.multiply(self.model.output, ind), axis=1)
self.loss = huber_loss(pred_Q, self.target)
self.train = tf.train.AdamOptimizer(learning_rate=self.learning_rate, beta1=0.9, beta2=0.99).minimize(
self.loss, name="train")
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.saver = tf.train.Saver()
self.all_vars = tf.trainable_variables()
self.sess.graph.finalize()
# Load custom weights from custom_load_path if required
if cfg.custom_load:
print('Loading weights from: ', cfg.custom_load_path)
self.load_network(cfg.custom_load_path)
def get_vars(self):
return self.sess.run(self.all_vars)
def initialize_graphs_with_average(self, agent, agent_on_same_network):
values = {}
var = {}
all_assign = {}
for name_agent in agent_on_same_network:
values[name_agent] = agent[name_agent].network_model.get_vars()
var[name_agent] = agent[name_agent].network_model.all_vars
all_assign[name_agent] = []
for i in range(len(values[name_agent])):
val = []
for name_agent in agent_on_same_network:
val.append(values[name_agent][i])
# Take mean here
mean_val = np.average(val, axis=0)
for name_agent in agent_on_same_network:
# all_assign[name_agent].append(tf.assign(var[name_agent][i], mean_val))
var[name_agent][i].load(mean_val, agent[name_agent].network_model.sess)
def Q_val(self, xs):
target = np.zeros(shape=[xs.shape[0]], dtype=np.float32)
actions = np.zeros(dtype=int, shape=[xs.shape[0]])
return self.sess.run(self.predict,
feed_dict={self.batch_size: xs.shape[0], self.learning_rate: 0, self.X1: xs,
self.target: target, self.actions: actions})
def train_n(self, xs, ys, actions, batch_size, dropout_rate, lr, epsilon, iter):
_, loss, Q = self.sess.run([self.train, self.loss, self.predict],
feed_dict={self.batch_size: batch_size, self.learning_rate: lr, self.X1: xs,
self.target: ys, self.actions: actions})
meanQ = np.mean(Q)
maxQ = np.max(Q)
# Log to tensorboard
self.log_to_tensorboard(tag='Loss', group=self.vehicle_name, value=LA.norm(loss) / batch_size, index=iter)
self.log_to_tensorboard(tag='Epsilon', group=self.vehicle_name, value=epsilon, index=iter)
self.log_to_tensorboard(tag='Learning Rate', group=self.vehicle_name, value=lr, index=iter)
self.log_to_tensorboard(tag='MeanQ', group=self.vehicle_name, value=meanQ, index=iter)
self.log_to_tensorboard(tag='MaxQ', group=self.vehicle_name, value=maxQ, index=iter)
def action_selection(self, state):
target = np.zeros(shape=[state.shape[0]], dtype=np.float32)
actions = | np.zeros(dtype=int, shape=[state.shape[0]]) | numpy.zeros |
import numpy as np
def generate_map(lines, diags=False):
vents = np.zeros(( | np.max(lines[:, :, 1]) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Copyright 2019 University of Liege
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
## @package GeoGen (CFD basic grid creator)
#
# Create an unstructured tetrahedral grid around a wing
# to be meshed with gmsh for Flow or SU2 CFD solvers
# <NAME>
import numpy as np
## Generic domain class
#
# <NAME>
class Domain:
def __init__(self, _wing, _tip):
self.wing = _wing
self.tip = _tip
## Handle sphere data
#
# <NAME>
class Sphere(Domain):
def __init__(self, R, _wing, _tip):
Domain.__init__(self, _wing, _tip)
self.initData(R)
def initData(self, R):
"""Initialize data, define numbering
"""
self.pts = [np.array([[self.wing.chord[0], 0., 0.]]),
np.array([[R+self.wing.chord[0], 0., 0.],
[0., 0., R],
[-R+self.wing.chord[0], 0., 0.],
[0., 0., -R]]),
np.array([[self.wing.chord[0], R, 0.]])]
self.ptsN = [np.array([5001]), np.arange(5002, 5006), | np.array([5007]) | numpy.array |
import os
import numpy as np
import theano
import lasagne
import time
from scipy.stats import bayes_mvs
from loading import augment
from network import Network, Autoencoder
L = lasagne.layers
T = theano.tensor
class Trainer(object):
"""
Base for subclassing optimizers
Includes:
- a function for iterating minibatches
- a training function that trains a given network on provided training
and validation data as X, y tuples
- a test function that tests a given network on provided test data as
an X, y tuple
"""
def __init__(self, batchsize=128, stopthresh=100, print_interval=50,
updates=lasagne.updates.adam, update_args={}, seed=None):
"""
ToDos:
- More options?
Arguments:
- batchsize: number of examples in each minibatch
- stopthresh: early stopping threshold. training stops when mean
gradient of validation error becomes positive over last <stopthresh>
epochs
- print_interval: print a small report every <print_interval> epochs
- updates: reference to updates algorithm, either from lasagne.updates
or implemented similarly
- update_args: dictionary of arguments for update algorithm (eg learning
rate, momentum, etc)
- seed: random seed for repeating experiment
"""
self.updates = updates
self.bs = batchsize
self.epoch = 0
self.max_epoch = 5000 # default: really high
self.stopthresh = stopthresh
self.print_interval = print_interval
self.update_args = update_args
self.seed = seed
self.val_trace = []
self.train_trace = []
if self.seed is not None:
np.random.seed(self.seed)
def train(self, network, training_data, validation_data):
"""
Training and validation
It might be better to abstract the training and validation loops into
their own functions, but not a priority for now
"""
network.updates = self.updates(network.loss, network.params, **self.update_args)
X, y = training_data
Xv, yv = validation_data
self.train_start = time.time()
for epoch in range(self.max_epoch):
train_err = 0
train_bats = 0
val_err = 0
val_acc = 0
val_bats = 0
epoch_start = time.time()
for batch in self.iterate_minibatches(X, y, shuffle=True):
inputs, targets = batch
train_err += network.train_fn(inputs, targets)
train_bats += 1
epoch_dur = time.time() - epoch_start
for batch in self.iterate_minibatches(Xv, yv, shuffle=False):
inputs, targets = batch
error, accuracy = network.test_fn(inputs, targets)
val_err += error
val_acc += accuracy
val_bats += 1
mean_train_err = train_err / train_bats
mean_val_err = val_err / val_bats
self.val_trace.append(mean_val_err)
self.epoch = epoch
del_val_err = np.diff(self.val_trace)
if epoch > self.stopthresh:
if del_val_err[epoch-self.stopthresh:].mean() > 0:
print("Abandon ship!")
break
if epoch % self.print_interval == 0:
print("Epoch {} took {:.3f}s".format(epoch, epoch_dur))
print("\ttraining loss:\t\t\t{:.4f}".format(mean_train_err))
print("\tvalidation loss:\t\t{:.4f}".format(mean_val_err))
print("\tvalidation accuracy:\t\t{:.2f}%".format(100*val_acc/val_bats))
print("\ttotal time elapsed:\t\t{:.3f}s".format(time.time() - self.train_start))
return train_err, val_err
def test(self, network, testing_data):
X, y = testing_data
test_err = 0
test_acc = 0
test_bats = 0
for batch in self.iterate_minibatches(X, y, shuffle=False):
inputs, targets = batch
error, accuracy = network.test_fn(inputs, targets)
test_err += error
test_acc += accuracy
test_bats += 1
network.test_err = test_err/test_bats
print("\nTEST PERFORMANCE")
print("\tStopped in epoch:\t\t{}".format(self.epoch))
print("\tTest loss:\t\t\t{:.4f}".format(test_err/test_bats))
print("\tTest accuracy:\t\t\t{:.2f}%\n".format(100*test_acc/test_bats))
return test_err, test_acc, test_bats
def iterate_minibatches(self, inputs, targets, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
idxs = np.arange(len(inputs))
np.random.shuffle(idxs)
for idx in range(0, len(inputs)-self.bs+1, self.bs):
if shuffle:
excerpt = idxs[idx:idx+self.bs]
else:
excerpt = slice(idx, idx+self.bs)
yield inputs[excerpt], targets[excerpt]
class DefaultTrainer(Trainer):
"""
Implements an additional function that does training for all 5 default
cross-validation splits
This is meant as a standalone, not for subclassing. But I should consider
implementing a basic train_all function that does random cv splits rather
than premade...
self.train_all may be further decomposable
(eg separate "unpack data" function...)
"""
def get_split_idxs(self, num_splits, split):
"""
Generates an array for of split indices for training, validation,
and test sets, then returns training, validation, and test set indices
for input split.
"""
split_array = np.tile( | np.arange(num_splits) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 5 14:53:10 2018
@author: gregz
"""
import os.path as op
import sys
from astropy.io import fits
from astropy.table import Table
from utils import biweight_location
import numpy as np
from scipy.interpolate import LSQBivariateSpline, interp1d
from astropy.convolution import Gaussian1DKernel, interpolate_replace_nans
from astropy.convolution import convolve
from scipy.signal import medfilt, savgol_filter
from skimage.feature import register_translation
import argparse as ap
from input_utils import setup_logging
import warnings
from astropy.modeling.models import Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
get_newwave = True
def get_script_path():
return op.dirname(op.realpath(sys.argv[0]))
DIRNAME = get_script_path()
blueinfo = [['BL', 'uv', 'multi_503_056_7001', [3640., 4640.], ['LL', 'LU'],
[4350., 4375.]], ['BR', 'orange', 'multi_503_056_7001',
[4660., 6950.], ['RU', 'RL'], [6270., 6470.]]]
redinfo = [['RL', 'red', 'multi_502_066_7002', [6450., 8400.], ['LL', 'LU'],
[7225., 7425.]], ['RR', 'farred', 'multi_502_066_7002',
[8275., 10500.], ['RU', 'RL'], [9280., 9530.]]]
parser = ap.ArgumentParser(add_help=True)
parser.add_argument("-b", "--basedir",
help='''base directory for reductions''',
type=str, default=None)
parser.add_argument("-s", "--side",
help='''blue for LRS2-B and red for LRS2-R''',
type=str, default='blue')
parser.add_argument("-scd", "--scidateobsexp",
help='''Example: "20180112,lrs20000027,exp01"''',
type=str, default=None)
parser.add_argument("-skd", "--skydateobsexp",
help='''Example: "20180112,lrs20000027,exp01"''',
type=str, default=None)
targs = ["-b", "/Users/gregz/cure/reductions",
"-s", "red", "-scd", "20181108,lrs20000025,exp01", "-skd",
"20181108,lrs20000024,exp01"]
args = parser.parse_args(args=targs)
args.log = setup_logging('test_skysub')
if args.scidateobsexp is None:
args.log.error('--scidateobsexp/-scd was not set.')
sys.exit(1)
if args.skydateobsexp is None:
args.log.error('--skydateobsexp/-skd was not set.')
sys.exit(1)
if args.side == 'blue':
list_of_blue = [args.scidateobsexp.split(',') +
args.skydateobsexp.split(',')]
if args.side == 'red':
list_of_red = [args.scidateobsexp.split(',') +
args.skydateobsexp.split(',')]
basedir = op.join(args.basedir, '%s/lrs2/%s/%s/lrs2/%s')
skyline_file = op.join(DIRNAME, 'lrs2_config/%s_skylines.dat')
def make_frame(xloc, yloc, data, wave, dw, Dx, Dy, wstart=5700.,
wend=5800., scale=0.4, seeing_fac=1.3):
seeing = seeing_fac * scale
a, b = data.shape
x = np.arange(xloc.min()-scale,
xloc.max()+1*scale, scale)
y = np.arange(yloc.min()-scale,
yloc.max()+1*scale, scale)
xgrid, ygrid = np.meshgrid(x, y)
zgrid = np.zeros((b,)+xgrid.shape)
area = 3. / 4. * np.sqrt(3.) * 0.59**2
for k in np.arange(b):
sel = np.isfinite(data[:, k])
D = np.sqrt((xloc[:, np.newaxis, np.newaxis] - Dx[k] - xgrid)**2 +
(yloc[:, np.newaxis, np.newaxis] - Dy[k] - ygrid)**2)
W = np.exp(-0.5 / (seeing/2.35)**2 * D**2)
N = W.sum(axis=0)
zgrid[k, :, :] = ((data[sel, k][:, np.newaxis, np.newaxis] *
W[sel]).sum(axis=0) / N / scale**2 / area)
wi = np.searchsorted(wave, wstart, side='left')
we = np.searchsorted(wave, wend, side='right')
zimage = biweight_location(zgrid[wi:we+1], axis=(0,))
return zgrid, zimage, xgrid, ygrid
def rectify(wave, spec, lims, mask=None, fac=1.0):
N, D = wave.shape
rect_wave = np.linspace(lims[0], lims[1], int(D*fac))
rect_spec = np.zeros((N, len(rect_wave)))
G = Gaussian1DKernel(1.5 * fac)
for i in np.arange(N):
dw = np.diff(wave[i])
dw = np.hstack([dw[0], dw])
if mask is None:
x = wave[i]
y = spec[i] / dw
else:
x = wave[i]
y = (spec[i] / dw)
y[mask[i]] = np.nan
y = interpolate_replace_nans(y, G)
I = interp1d(x, y, kind='quadratic',
bounds_error=False, fill_value=-999.)
rect_spec[i, :] = I(rect_wave)
return rect_wave, rect_spec
def fit_continuum(wv, sky, skip=3, fil_len=95, func=np.array):
skym_s = 1. * sky
sky_sm = savgol_filter(skym_s, fil_len, 1)
allind = np.arange(len(wv), dtype=int)
for i in np.arange(5):
mad = np.median(np.abs(sky - sky_sm))
outlier = func(sky - sky_sm) > 1.5 * mad
sel = np.where(outlier)[0]
for j in np.arange(1, skip+1):
sel = np.union1d(sel, sel + 1)
sel = np.union1d(sel, sel - 1)
sel = np.sort(np.unique(sel))
sel = sel[skip:-skip]
good = np.setdiff1d(allind, sel)
skym_s = 1.*sky
skym_s[sel] = np.interp(wv[sel], wv[good], sky_sm[good])
sky_sm = savgol_filter(skym_s, fil_len, 1)
return sky_sm
def make_skyline_model(wave, skylines, norm, dw_pix=None, kernel_size=2.1):
kernel_size = kernel_size * dw_pix
skymodel = np.zeros(wave.shape)
for line in skylines:
G = (norm * line[1] / np.sqrt((np.pi * 2. * kernel_size**2)) *
np.exp(-1. * (wave-line[0])**2 / (2. * kernel_size**2)))
skymodel += G
return skymodel
def convert_vac_to_air(skyline):
s2 = (1e4 / skyline[:, 0])**2
n = (1 + 0.0000834254 + 0.02406147 / (130 - s2) + 0.00015998 /
(38.9 - s2))
skyline[:, 0] = skyline[:, 0] / n
return skyline
def get_skyline_file(skyline_file):
V = np.loadtxt(skyline_file)
V[:, 0] = V[:, 0] * 1e4
skyline = convert_vac_to_air(V)
return skyline
def make_avg_spec(wave, spec, binsize=35, per=50):
ind = np.argsort(wave.ravel())
T = 1
for p in wave.shape:
T *= p
wchunks = np.array_split(wave.ravel()[ind],
T / binsize)
schunks = np.array_split(spec.ravel()[ind],
T / binsize)
nwave = np.array([np.mean(chunk) for chunk in wchunks])
nspec = np.array([np.percentile(chunk, per) for chunk in schunks])
nwave, nind = | np.unique(nwave, return_index=True) | numpy.unique |
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
import rvo2
class NavRVO2Env_all(gym.Env):
"""
What's new for the new environment:
Added 8 pedestrians initialized to be at 8 corners ([-0.7,-0.7], [0.7,-0.7], [0.7,0.7], [-0.7,0.7])
of a rectangle centering at the origin. 1 pedestrians at each corner. They walk almostly
diagonally towards the other side (specific direction is upon randomness). After they exit the rectangle,
they will be initialized at the corners again.
robot state:
'px', 'py', 'vx', 'vy', 'gx', 'gy'
0 1 2 3 4 5
pedestrian state:
'px1', 'py1', 'vx1', 'vy1'
6 7 8 9
"""
def __init__(self, task={}):
super(NavRVO2Env_all, self).__init__()
self._num_ped = 8
self._self_dim = 6
self._ped_dim = 4
self._num_agent = self._num_ped + 1 # ped_num + robot_num
self._state_dim = self._self_dim + self._num_ped * self._ped_dim # robot_state_dim + ped_num * ped_state_dim
self.observation_space = spaces.Box(low=-np.inf, high=np.inf,
shape=(self._state_dim,), dtype=np.float32)
self.action_space = spaces.Box(low=-0.1, high=0.1,
shape=(2,), dtype=np.float32)
self._done = False
self._task = task
self._goal = task.get('goal', np.array([0., 0.], dtype=np.float32))
self._default_robot_state = np.array([0., 0., 0., 0., self._goal[0], self._goal[1]], dtype=np.float32)
self._state = self._default_robot_state.copy()
self.seed()
self._ped_radius = 0.15
self._ped_speed = task.get('ped_speed', np.zeros(self._num_ped, dtype=np.float32))
self._ped_direc = task.get('ped_direc', np.zeros(self._num_ped, dtype=np.float32))
self._entering_corner = np.float32(0.7)
self._default_ped_states = self._entering_corner * np.array([[-1,-1], [1,-1], [1,1], [-1,1]])
self._default_ped_states = | np.vstack((self._default_ped_states, self._default_ped_states)) | numpy.vstack |
# Created by cc215 at 02/05/19
# Modified by cc215 at 11/12/19
# This code is for testing basic segmentation networks
# Steps:
# 1. get the segmentation network and the path of checkpoint
# 2. fetch images tuples from the disk to test the segmentation
# 3. get the prediction result
# 4. update the metric
# 5. save the results.
from __future__ import print_function
from os.path import join
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas
from torch.utils.data import DataLoader
from tqdm import tqdm
import time
import torch.nn.functional as F
from medseg.models.model_util import makeVariable
from medseg.models.base_segmentation_model import SegmentationModel
from medseg.dataset_loader.base_segmentation_dataset import BaseSegDataset
from medseg.common_utils.metrics import runningMySegmentationScore
from medseg.common_utils.save import save_nrrd_to_disk
class TestSegmentationNetwork():
def __init__(self, test_dataset: BaseSegDataset, crop_size, segmentation_model, use_gpu=True, save_path='',
summary_report_file_name='result.csv', detailed_report_file_name='details.csv',
save_prediction=False, patient_wise=False, metrics_list=['Dice', 'HD'],
foreground_only=False, save_soft_prediction=False):
'''
perform segmentation model evaluation
:param test_dataset: test_dataset
:param segmentation_model: trained_segmentation_model
'''
self.test_dataset = test_dataset
self.testdataloader = DataLoader(dataset=self.test_dataset, num_workers=0, batch_size=1, shuffle=False,
drop_last=False)
self.segmentation_model = segmentation_model
self.use_gpu = use_gpu
self.num_classes = segmentation_model.num_classes
self.segmentation_metric = runningMySegmentationScore(n_classes=segmentation_model.num_classes,
idx2cls_dict=self.test_dataset.formalized_label_dict,
metrics_list=metrics_list, foreground_only=foreground_only)
self.save_path = save_path
self.summary_report_file_name = summary_report_file_name
self.detailed_report_file_name = detailed_report_file_name
self.crop_size = crop_size
self.save_prediction = save_prediction
self.save_format_name = '{}_pred.npy' # id plu
self.patient_wise = patient_wise
self.save_soft_prediction = save_soft_prediction
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
self.df = None
self.result_dict = {}
def run(self):
print('start evaluating')
self.progress_bar = tqdm(total=100)
if self.patient_wise:
for i in range(self.test_dataset.patient_number):
data_tensor_pack = self.test_dataset.get_patient_data_for_testing(i, crop_size=self.crop_size)
pid, patient_triplet_result = self.evaluate(i, data_tensor_pack, self.test_dataset.patient_number)
self.result_dict[pid] = patient_triplet_result
else:
loader = self.testdataloader
for i, data_tensor_pack in enumerate(loader):
pid, patient_triplet_result = self.evaluate(i, data_tensor_pack, len(loader))
self.result_dict[pid] = patient_triplet_result
###self.segmentation_model.save_testing_images_results(self.save_path, '', max_slices=10,file_name='{}.png'.format(pid))
self.segmentation_metric.get_scores(save_path=join(self.save_path, self.summary_report_file_name))
self.df = self.segmentation_metric.save_patient_wise_result_to_csv(
save_path=join(self.save_path, self.detailed_report_file_name))
# save top k and worst k cases
print('<-finish->')
def evaluate(self, i: int, data_tensor_pack: dict, total_number: int, maximum_batch_size=10):
'''
:param i: id
:param data_tensor_pack:
:return:
'''
assert maximum_batch_size > 0
image = data_tensor_pack['image']
label_npy = data_tensor_pack['label'].numpy()
pid = self.test_dataset.get_id()
total_size = image.size(0)
if total_size > maximum_batch_size:
# image size is too large, break it down to chunks.[[start_id,end_id],[start_id,end_id]]
a_split_ids = [[x, min(total_size, x + maximum_batch_size)]
for x in range(0, total_size, maximum_batch_size)]
else:
a_split_ids = [[0, total_size]]
pred_npy = np.zeros_like(label_npy, dtype=np.uint8)
soft_pred_npy = np.zeros_like(label_npy, dtype=np.float32)
soft_pred_npy = soft_pred_npy[:, np.newaxis, :]
soft_pred_npy = np.repeat(soft_pred_npy, repeats=self.num_classes, axis=1)
for chunk_id in a_split_ids:
image_a = image[chunk_id[0]:chunk_id[1], :, :, :]
image_V_a = makeVariable(image_a, type='float', use_gpu=self.use_gpu, requires_grad=True)
predict_a = self.segmentation_model.predict(input=image_V_a, softmax=False)
pred_npy[chunk_id[0]:chunk_id[1]] = predict_a.max(1)[1].cpu().numpy()
soft_pred_npy[chunk_id[0]:chunk_id[1]] = predict_a.data.cpu().numpy()
# update metrics patient by patient
self.segmentation_metric.update(pid=pid, preds=pred_npy, gts=label_npy,
voxel_spacing=self.test_dataset.get_voxel_spacing())
image_width = pred_npy.shape[-2]
image_height = pred_npy.shape[-1]
rgb_channel = image.size(1)
assert rgb_channel == 1, 'currently only support gray images, found: {}'.format(rgb_channel)
if label_npy.shape[0] == 1:
# 2D images
image_gt_pred = {
'image': image.numpy().reshape(image_height, image_width),
'label': label_npy.reshape(image_height, image_width),
'pred': pred_npy.reshape(image_height, image_width),
'soft_pred': soft_pred_npy.reshape(self.num_classes, image_height, image_width)
}
else:
# 3D images
image_gt_pred = {
'image': image.numpy().reshape(-1, image_height, image_width),
'label': label_npy.reshape(-1, image_height, image_width),
'pred': pred_npy.reshape(-1, image_height, image_width),
'soft_pred': soft_pred_npy.reshape(total_size, self.num_classes, image_height, image_width),
}
self.progress_bar.update(100 * (i / total_number))
##print('completed {cur_id}/{total_number}'.format(cur_id=str(i + 1), total_number=str(len(self.test_dataset))))
if self.save_prediction:
nrrd_save_path = os.path.join(self.save_path, 'pred_nrrd')
if not os.path.exists(nrrd_save_path):
os.makedirs(nrrd_save_path)
image = image_gt_pred['image']
pred = image_gt_pred['pred']
gt = image_gt_pred['label']
if '/' in pid:
pid = pid.replace('/', '_')
save_nrrd_to_disk(save_folder=nrrd_save_path, file_name=pid, image=image, pred=pred, gt=gt)
print('save to:{}'.format(nrrd_save_path))
if self.save_soft_prediction:
npy_save_path = os.path.join(self.save_path, 'pred_npy')
if not os.path.exists(npy_save_path):
os.makedirs(npy_save_path)
# save image and label and softprediction to numpy array
if '/' in pid:
pid = pid.replace('/', '_')
save_path = join(npy_save_path, '{}_soft_pred.npy'.format(str(pid)))
with open(save_path, 'wb') as f:
np.save(file=save_path, arr=image_gt_pred['soft_pred'])
# save_path = join(npy_save_path,'{}_hidden_feature.npy'.format(str(pid)))
# with open(save_path, 'wb') as f:
# np.save(file = save_path, arr = image_gt_pred['hidden_feature'])
save_path = join(npy_save_path, '{}_gt.npy'.format(str(pid)))
with open(save_path, 'wb') as f:
np.save(file=save_path, arr=image_gt_pred['label'])
save_path = join(npy_save_path, '{}_image.npy'.format(str(pid)))
with open(save_path, 'wb') as f:
| np.save(file=save_path, arr=image_gt_pred['image']) | numpy.save |
# Copyright (c) 2021, Intel Corporation
#
# SPDX-License-Identifier: BSD-3-Clause
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import (AutoMinorLocator, FuncFormatter, MaxNLocator,
FormatStrFormatter, LogLocator)
class BiHistogram():
def __init__(self, values_a, values_b):
self.values_a = values_a
self.values_b = values_b
self.xlabel = 'Latency (us)'
def _plot_hist(self, axis, label, data, colour):
hist, edges = np.histogram(data, bins='sqrt')
axis.fill_between(edges[:-1], hist, color=colour, antialiased=False,
rasterized=True)
axis.set_yscale('log')
axis.set_xscale('log')
axis.tick_params(axis='y', which='both', labelsize='xx-small')
axis.set_ylabel(label)
axis.grid(True, which='both', axis='x', linewidth=0.3)
def plot(self, title, filename):
fig, axes = plt.subplots(2, 1, gridspec_kw={'hspace': 0.01},
sharex=True)
self._plot_hist(axes[0], self.values_a[0], self.values_a[1], 'orange')
self._plot_hist(axes[1], self.values_b[0], self.values_b[1],
'cornflowerblue')
axes[1].set_xlabel(self.xlabel, fontsize='x-small')
axes[1].tick_params(axis='x', which='both', labelsize='xx-small',
labelrotation=45)
axes[1].invert_yaxis()
ax = axes[1].get_xaxis()
ax.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.set_major_formatter(FormatStrFormatter("%.2f"))
for ax in fig.get_axes():
ax.label_outer()
fig.suptitle(title)
plt.savefig(filename, dpi=300)
plt.close()
class StackedBarChart():
def __init__(self):
self.colours = ['gold', 'lightgreen', 'lightsalmon', 'violet',
'cornflowerblue', 'lightcoral']
self.bars = []
self.bar_distance = 2
self.ylabel = 'Latency (us)'
def add_bar(self, legend, values):
self.bars.append((legend, values))
def __attribute_colours(self):
values_names = sorted({value[0]
for bar in self.bars
for value in bar[1]})
if len(values_names) > len(self.colours):
raise Exception('Add more self.colours for stacked bar chart!')
return dict(zip(values_names, self.colours))
def plot(self, title, filename):
values_colours = self.__attribute_colours()
indices = []
index = 0
for bar in self.bars:
i = 0
cumu_col = 0
for value in bar[1]:
height = value[1]
plt.bar(index, height, label=value[0],
color=values_colours[value[0]], bottom=cumu_col)
plt.text(index, cumu_col + height / 2, "%.3f" % height,
ha='center', va='center', fontsize=7)
cumu_col = height + cumu_col
i = i + 1
indices.append(index)
# Bigger increase to better space the bars
index = index + self.bar_distance
ax = plt.gca()
handles, labels = ax.get_legend_handles_labels()
# Avoid legend repetition by using the label as a key to dict
labels, handles = zip(*dict(zip(labels, handles)).items())
plt.subplots_adjust(right=0.8)
plt.legend(reversed(handles), reversed(labels), loc='upper left',
fontsize='x-small', ncol=1, bbox_to_anchor=(1.01, 1.))
ax.set_xbound(-1, 4)
ax.set_xticks(indices)
ax.set_xticklabels([bar[0] for bar in self.bars])
plt.title(title)
plt.xticks(fontsize='x-small')
plt.ylabel(self.ylabel)
plt.savefig(filename, dpi=300)
plt.close()
class HistogramGroupPlot():
def __init__(self, group_size, min_latency, max_latency, iterations):
self.min_latency = min_latency
self.max_latency = max_latency
self.iterations = iterations
self.fig, self.plots = plt.subplots(group_size, 1, sharex=True)
if not isinstance(self.plots, np.ndarray):
self.plots = np.array([self.plots])
self.plot_idx = 0
self.xlabel = 'Latency (us)'
def add_histogram(self, data, edges, mean, stdev, ylabel):
if self.plot_idx >= len(self.plots):
raise Exception("Can't add more histograms: group_size too small")
plot = self.plots[self.plot_idx]
self.plot_idx += 1
plot.fill_between(edges[1:], data, antialiased=False, rasterized=True)
plot.set_xscale('log', subsx=[2, 4, 6, 8])
plot.set_yscale('log')
# Set the labels
plot.text(0.8, 0.8, f'Mean {mean:.2f} us', fontsize=5,
transform=plot.transAxes)
plot.text(0.8, 0.7, f'STDEV {stdev:.2f} us', fontsize=5,
transform=plot.transAxes)
plot.set_ylabel(ylabel)
# Set limits and locations of ticks
# Set ylim a bit bigger than strictly needed so there's some headspace
# on plots
plot.set_ylim(0.5, self.iterations * 2)
ax = plot.get_xaxis()
ax.limit_range_for_scale(self.min_latency, self.max_latency)
# There isn't any one-size-fits-all for placing Ticks. So, choose Tick
# Locator depending on the range of data.
if self.max_latency - self.min_latency < 100:
ax.set_major_locator(MaxNLocator(nbins=5, steps=[1, 2, 3, 4, 5],
min_n_ticks=4))
plot.minorticks_off()
else:
ax.set_major_locator(LogLocator())
# Format the ticks and enable grid
ax.set_minor_formatter(FormatStrFormatter("%.2f"))
ax.set_major_formatter(FormatStrFormatter("%.2f"))
plot.tick_params(axis='x', which='both', labelsize='xx-small',
labelrotation=45)
plot.grid(b=True, which='both', axis='x', linewidth=0.3)
def plot(self, title, filename):
for ax in self.fig.get_axes():
ax.label_outer()
self.plots[-1].set_xlabel(self.xlabel)
plt.tight_layout(pad=1.5)
self.fig.suptitle(title, y=0.99)
self.fig.savefig(filename, dpi=300)
plt.close()
class RunSequencePlot:
def __init__(self, data):
self.data = data
self.colour_masks = [
{'mask': np.full_like(self.data[1], True, dtype=bool),
'colour': 'C0'}]
def _format_xtick(self, x, pos):
return x / 1000000
def _plot_x_label(self, axis):
xaxis = axis.get_xaxis()
if isinstance(self.data[0], np.ndarray):
xaxis.set_major_formatter(FuncFormatter(self._format_xtick))
axis.set_xlabel('Iterations (millions)', fontsize='x-small')
elif np.issubdtype(self.data[0], np.datetime64):
xaxis_fmt = mdates.DateFormatter("%H:%M:%S")
xaxis.set_major_formatter(xaxis_fmt)
axis.set_xlabel('Time (hh:mm:ss)', fontsize='x-small')
axis.tick_params('x', labelrotation=90)
plt.subplots_adjust(bottom=0.2)
else:
raise Exception('Unknown indices type')
xaxis.set_minor_locator(AutoMinorLocator())
xaxis.set_major_locator(MaxNLocator(nbins='auto', prune='upper'))
def _plot_y_label(self, axis):
axis.set_ylabel('Latency (us)', fontsize='x-small')
axis.tick_params(labelsize='xx-small')
axis.margins(x=0)
def _plot_scatter(self, axis, indices, values):
for mask in self.colour_masks:
axis.plot(indices[mask['mask']], values[mask['mask']], marker='.',
markersize=1, linestyle='', c=mask['colour'],
rasterized=True)
def _plot_histogram(self, axis, values):
hist, edges = np.histogram(values, bins='sqrt')
axis.fill_betweenx(edges[:-1], hist, color='#9ec0ff',
antialiased=False, rasterized=True)
axis.set_yticks([])
axis.set_xscale('log')
axis.set_xlim(left=0.9, right=len(values))
axis.minorticks_on()
axis.tick_params(labelsize='xx-small')
axis.grid(True, which='both', axis='x', linewidth=0.3)
axis.set_xlabel('Frequency', fontsize='x-small')
def plot(self, title, filename):
fig, axes = plt.subplots(1, 2, gridspec_kw={'width_ratios': [2, 1],
'wspace': 0.01})
indices = self.data[0]
values = self.data[1]
self._plot_x_label(axes[0])
self._plot_y_label(axes[0])
self._plot_scatter(axes[0], indices, values)
self._plot_histogram(axes[1], values)
fig.suptitle(title, fontsize=8)
plt.savefig(filename, dpi=300)
plt.close()
class RunSequenceGroupPlot(RunSequencePlot):
def __init__(self, data_list):
self.data_list = data_list
self.colour_masks = dict()
for data in self.data_list:
self.colour_masks[data[0]] = [
{'mask': np.full_like(data[2], True, dtype=bool),
'colour': 'C0'}]
def plot(self, title, filename):
fig, axes = plt.subplots(len(self.data_list), 2,
gridspec_kw={'width_ratios': [2, 1],
'wspace': 0.01})
if not isinstance(axes[0], np.ndarray):
axes = | np.array([axes]) | numpy.array |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import matplotlib, warnings
import numpy as np
import CoolProp
from CoolProp.CoolProp import PropsSI
from CoolProp.Plots.Common import BasePlot, PropertyDict, SIunits
def SimpleCycle(Ref, Te, Tc, DTsh, DTsc, eta_a, Ts_Ph='Ph', **kwargs):
"""
This function plots a simple four-component cycle, on the current axis, or that given by the optional parameter *axis*
Required parameters:
* Ref : A string for the refrigerant
* Te : Evap Temperature in K
* Tc : Condensing Temperature in K
* DTsh : Evaporator outlet superheat in K
* DTsc : Condenser outlet subcooling in K
* eta_a : Adiabatic efficiency of compressor (no units) in range [0,1]
Optional parameters:
* Ts_Ph : 'Ts' for a Temperature-Entropy plot, 'Ph' for a Pressure-Enthalpy
* axis : An axis to use instead of the active axis
* skipPlot : If True, won't actually plot anything, just print COP
"""
warnings.warn("This function has been deprecated. Please consider converting it to an object inheriting from \"BaseCycle\".", DeprecationWarning)
for i in kwargs:
warnings.warn("This function has been deprecated, your input \"{0}: {1}\" will be ignored".format(i, kwargs[i]), DeprecationWarning)
from CoolProp.Plots import SimpleCompressionCycle
cycle = SimpleCompressionCycle(fluid_ref=Ref, graph_type=Ts_Ph)
cycle.simple_solve_dt(Te, Tc, DTsh, DTsc, eta_a, SI=True)
print(cycle.COP_cooling(), cycle.COP_heating())
def TwoStage(Ref, Q, Te, Tc, DTsh, DTsc, eta_oi, f_p, Tsat_ic, DTsh_ic, Ts_Ph='Ph', prints=False, skipPlot=False, axis=None, **kwargs):
"""
This function plots a two-stage cycle, on the current axis, or that given by the optional parameter *axis*
Required parameters:
* Ref : Refrigerant [string]
* Q : Cooling capacity [W]
* Te : Evap Temperature [K]
* Tc : Condensing Temperature [K]
* DTsh : Evaporator outlet superheat [K]
* DTsc : Condenser outlet subcooling [K]
* eta_oi : Adiabatic efficiency of compressor (no units) in range [0,1]
* f_p : fraction of compressor power lost as ambient heat transfer in range [0,1]
* Tsat_ic : Saturation temperature corresponding to intermediate pressure [K]
* DTsh_ic : Superheating at outlet of intermediate stage [K]
Optional parameters:
* Ts_Ph : 'Ts' for a Temperature-Entropy plot, 'Ph' for a Pressure-Enthalpy
* prints : True to print out some values
* axis : An axis to use instead of the active axis
* skipPlot : If True, won't actually plot anything, just print COP
"""
warnings.warn("This function has been deprecated. PLease consider converting it to an object inheriting from \"BaseCycle\".", DeprecationWarning)
T = np.zeros((8))
h = np.zeros_like(T)
p = np.zeros_like(T)
s = np.zeros_like(T)
rho = np.zeros_like(T)
T[0] = np.NAN
s[0] = np.NAN
T[1] = Te + DTsh
pe = PropsSI('P', 'T', Te, 'Q', 1.0, Ref)
pc = PropsSI('P', 'T', Tc, 'Q', 1.0, Ref)
pic = PropsSI('P', 'T', Tsat_ic, 'Q', 1.0, Ref)
Tbubble_c = PropsSI('T', 'P', pc, 'Q', 0, Ref)
Tbubble_e = PropsSI('T', 'P', pe, 'Q', 0, Ref)
h[1] = PropsSI('H', 'T', T[1], 'P', pe, Ref)
s[1] = PropsSI('S', 'T', T[1], 'P', pe, Ref)
rho[1] = PropsSI('D', 'T', T[1], 'P', pe, Ref)
T[5] = Tbubble_c - DTsc
h[5] = PropsSI('H', 'T', T[5], 'P', pc, Ref)
s[5] = PropsSI('S', 'T', T[5], 'P', pc, Ref)
rho[5] = PropsSI('D', 'T', T[5], 'P', pc, Ref)
mdot = Q / (h[1] - h[5])
rho1 = PropsSI('D', 'T', T[1], 'P', pe, Ref)
h2s = PropsSI('H', 'S', s[1], 'P', pic, Ref)
Wdot1 = mdot * (h2s - h[1]) / eta_oi
h[2] = h[1] + (1 - f_p) * Wdot1 / mdot
T[2] = PropsSI('T', 'H', h[2], 'P', pic, Ref)
s[2] = PropsSI('S', 'T', T[2], 'P', pic, Ref)
rho[2] = PropsSI('D', 'T', T[2], 'P', pic, Ref)
T[3] = 288
p[3] = pic
h[3] = PropsSI('H', 'T', T[3], 'P', pic, Ref)
s[3] = PropsSI('S', 'T', T[3], 'P', pic, Ref)
rho[3] = PropsSI('D', 'T', T[3], 'P', pic, Ref)
rho3 = PropsSI('D', 'T', T[3], 'P', pic, Ref)
h4s = PropsSI('H', 'T', s[3], 'P', pc, Ref)
Wdot2 = mdot * (h4s - h[3]) / eta_oi
h[4] = h[3] + (1 - f_p) * Wdot2 / mdot
T[4] = PropsSI('T', 'H', h[4], 'P', pc, Ref)
s[4] = PropsSI('S', 'T', T[4], 'P', pc, Ref)
rho[4] = PropsSI('D', 'T', T[4], 'P', pc, Ref)
sbubble_e = PropsSI('S', 'T', Tbubble_e, 'Q', 0, Ref)
sbubble_c = PropsSI('S', 'T', Tbubble_c, 'Q', 0, Ref)
sdew_e = PropsSI('S', 'T', Te, 'Q', 1, Ref)
sdew_c = PropsSI('S', 'T', Tc, 'Q', 1, Ref)
hsatL = PropsSI('H', 'T', Tbubble_e, 'Q', 0, Ref)
hsatV = PropsSI('H', 'T', Te, 'Q', 1, Ref)
ssatL = PropsSI('S', 'T', Tbubble_e, 'Q', 0, Ref)
ssatV = PropsSI('S', 'T', Te, 'Q', 1, Ref)
vsatL = 1 / PropsSI('D', 'T', Tbubble_e, 'Q', 0, Ref)
vsatV = 1 / PropsSI('D', 'T', Te, 'Q', 1, Ref)
x = (h[5] - hsatL) / (hsatV - hsatL)
s[6] = x * ssatV + (1 - x) * ssatL
T[6] = x * Te + (1 - x) * Tbubble_e
rho[6] = 1.0 / (x * vsatV + (1 - x) * vsatL)
h[6] = h[5]
h[7] = h[1]
s[7] = s[1]
T[7] = T[1]
p = [np.nan, pe, pic, pic, pc, pc, pe, pe]
COP = Q / (Wdot1 + Wdot2)
RE = h[1] - h[6]
if prints == True:
print('x5:', x)
print('COP:', COP)
print('COPH', (Q + Wdot1 + Wdot2) / (Wdot1 + Wdot2))
print(T[2] - 273.15, T[4] - 273.15, p[2] / p[1], p[4] / p[3])
print(mdot, mdot * (h[4] - h[5]), pic)
print('Vdot1', mdot / rho1, 'Vdisp', mdot / rho1 / (3500 / 60.) * 1e6 / 0.7)
print('Vdot2', mdot / rho3, 'Vdisp', mdot / rho3 / (3500 / 60.) * 1e6 / 0.7)
print(mdot * (h[4] - h[5]), Tc - 273.15)
for i in range(1, len(T) - 1):
print('%d & %g & %g & %g & %g & %g \\\\' % (i, T[i] - 273.15, p[i], h[i], s[i], rho[i]))
else:
print(Tsat_ic, COP)
if skipPlot == False:
if axis == None:
ax = matplotlib.pyplot.gca()
else:
ax = axis
if Ts_Ph in ['ph', 'Ph']:
ax.plot(h, p)
elif Ts_Ph in ['Ts', 'ts']:
s_copy = s.copy()
T_copy = T.copy()
for i in range(1, len(s) - 1):
ax.plot(s[i], T[i], 'bo', mfc='b', mec='b')
dT = [0, -5, 5, -20, 5, 5, 5]
ds = [0, 0.05, 0, 0, 0, 0, 0]
ax.text(s[i] + ds[i], T[i] + dT[i], str(i))
s = list(s)
T = list(T)
s.insert(7, sdew_e)
T.insert(7, Te)
s.insert(5, sbubble_c)
T.insert(5, Tbubble_c)
s.insert(5, sdew_c)
T.insert(5, Tc)
ax.plot(s, T)
s = s_copy
T = T_copy
else:
raise TypeError('Type of Ts_Ph invalid')
return COP
def EconomizedCycle(Ref, Qin, Te, Tc, DTsh, DTsc, eta_oi, f_p, Ti, Ts_Ph='Ts', skipPlot=False, axis=None, **kwargs):
"""
This function plots an economized cycle, on the current axis, or that given by the optional parameter *axis*
Required parameters:
* Ref : Refrigerant [string]
* Qin : Cooling capacity [W]
* Te : Evap Temperature [K]
* Tc : Condensing Temperature [K]
* DTsh : Evaporator outlet superheat [K]
* DTsc : Condenser outlet subcooling [K]
* eta_oi : Adiabatic efficiency of compressor (no units) in range [0,1]
* f_p : fraction of compressor power lost as ambient heat transfer in range [0,1]
* Ti : Saturation temperature corresponding to intermediate pressure [K]
Optional parameters:
* Ts_Ph : 'Ts' for a Temperature-Entropy plot, 'Ph' for a Pressure-Enthalpy
* axis : An axis to use instead of the active axis
* skipPlot : If True, won't actually plot anything, just print COP
"""
warnings.warn("This function has been deprecated. Please consider converting it to an object inheriting from \"BaseCycle\".", DeprecationWarning)
from scipy.optimize import newton
m = 1
T = np.zeros((11))
h = np.zeros_like(T)
p = np.zeros_like(T)
s = np.zeros_like(T)
rho = np.zeros_like(T)
T[0] = np.NAN
s[0] = np.NAN
T[1] = Te + DTsh
pe = PropsSI('P', 'T', Te, 'Q', 1.0, Ref)
pc = PropsSI('P', 'T', Tc, 'Q', 1.0, Ref)
pi = PropsSI('P', 'T', Ti, 'Q', 1.0, Ref)
p[1] = pe
h[1] = PropsSI('H', 'T', T[1], 'P', pe, Ref)
s[1] = PropsSI('S', 'T', T[1], 'P', pe, Ref)
rho[1] = PropsSI('D', 'T', T[1], 'P', pe, Ref)
h2s = PropsSI('H', 'S', s[1], 'P', pi, Ref)
wdot1 = (h2s - h[1]) / eta_oi
h[2] = h[1] + (1 - f_p[0]) * wdot1
p[2] = pi
# T[2]=T_hp(Ref,h[2],pi,T2s)
T[2] = PropsSI('T', 'H', h[2], 'P', pi, Ref)
s[2] = PropsSI('S', 'T', T[2], 'P', pi, Ref)
rho[2] = PropsSI('D', 'T', T[2], 'P', pi, Ref)
T[5] = Tc - DTsc
h[5] = PropsSI('H', 'T', T[5], 'P', pc, Ref)
s[5] = PropsSI('S', 'T', T[5], 'P', pc, Ref)
rho[5] = PropsSI('D', 'T', T[5], 'P', pc, Ref)
p[5] = pc
p[6] = pi
h[6] = h[5]
p[7] = pi
p[8] = pi
p[6] = pi
T[7] = Ti
h[7] = PropsSI('H', 'T', Ti, 'Q', 1, Ref)
s[7] = PropsSI('S', 'T', Ti, 'Q', 1, Ref)
rho[7] = PropsSI('D', 'T', Ti, 'Q', 1, Ref)
T[8] = Ti
h[8] = PropsSI('H', 'T', Ti, 'Q', 0, Ref)
s[8] = PropsSI('S', 'T', Ti, 'Q', 0, Ref)
rho[8] = PropsSI('D', 'T', Ti, 'Q', 0, Ref)
x6 = (h[6] - h[8]) / (h[7] - h[8]) # Vapor Quality
s[6] = s[7] * x6 + s[8] * (1 - x6)
rho[6] = 1.0 / (x6 / rho[7] + (1 - x6) / rho[8])
T[6] = Ti
# Injection mass flow rate
x = m * (h[6] - h[8]) / (h[7] - h[6])
p[3] = pi
h[3] = (m * h[2] + x * h[7]) / (m + x)
# T[3]=T_hp(Ref,h[3],pi,T[2])
T[3] = PropsSI('T', 'H', h[3], 'P', pi, Ref)
s[3] = PropsSI('S', 'T', T[3], 'P', pi, Ref)
rho[3] = PropsSI('D', 'T', T[3], 'P', pi, Ref)
T4s = newton(lambda T: PropsSI('S', 'T', T, 'P', pc, Ref) - s[3], T[2] + 30)
h4s = PropsSI('H', 'T', T4s, 'P', pc, Ref)
p[4] = pc
wdot2 = (h4s - h[3]) / eta_oi
h[4] = h[3] + (1 - f_p[1]) * wdot2
# T[4]=T_hp(Ref,h[4],pc,T4s)
T[4] = PropsSI('T', 'H', h[4], 'P', pc, Ref)
s[4] = PropsSI('S', 'T', T[4], 'P', pc, Ref)
rho[4] = PropsSI('D', 'T', T[4], 'P', pc, Ref)
p[9] = pe
h[9] = h[8]
T[9] = Te
hsatL_e = PropsSI('H', 'T', Te, 'Q', 0, Ref)
hsatV_e = PropsSI('H', 'T', Te, 'Q', 1, Ref)
ssatL_e = PropsSI('S', 'T', Te, 'Q', 0, Ref)
ssatV_e = PropsSI('S', 'T', Te, 'Q', 1, Ref)
vsatL_e = 1 / PropsSI('D', 'T', Te, 'Q', 0, Ref)
vsatV_e = 1 / PropsSI('D', 'T', Te, 'Q', 1, Ref)
x9 = (h[9] - hsatL_e) / (hsatV_e - hsatL_e) # Vapor Quality
s[9] = ssatV_e * x9 + ssatL_e * (1 - x9)
rho[9] = 1.0 / (x9 * vsatV_e + (1 - x9) * vsatL_e)
s[10] = s[1]
T[10] = T[1]
h[10] = h[1]
p[10] = p[1]
Tbubble_e = Te
Tbubble_c = Tc
sbubble_e = PropsSI('S', 'T', Tbubble_e, 'Q', 0, Ref)
sbubble_c = PropsSI('S', 'T', Tbubble_c, 'Q', 0, Ref)
sdew_e = PropsSI('S', 'T', Te, 'Q', 1, Ref)
sdew_c = PropsSI('S', 'T', Tc, 'Q', 1, Ref)
Wdot1 = m * wdot1
Wdot2 = (m + x) * wdot2
if skipPlot == False:
if axis == None:
ax = matplotlib.pyplot.gca()
else:
ax = axis
if Ts_Ph in ['ph', 'Ph']:
ax.plot(h, p)
ax.set_yscale('log')
elif Ts_Ph in ['Ts', 'ts']:
ax.plot(np.r_[s[7], s[3]], np.r_[T[7], T[3]], 'b')
s_copy = s.copy()
T_copy = T.copy()
dT = [0, -5, 5, -12, 5, 12, -12, 0, 0, 0]
ds = [0, 0.05, 0.05, 0, 0.05, 0, 0.0, 0.05, -0.05, -0.05]
for i in range(1, len(s) - 1):
ax.plot(s[i], T[i], 'bo', mfc='b', mec='b')
ax.text(s[i] + ds[i], T[i] + dT[i], str(i), ha='center', va='center')
s = list(s)
T = list(T)
s.insert(10, sdew_e)
T.insert(10, Te)
s.insert(5, sbubble_c)
T.insert(5, Tbubble_c)
s.insert(5, sdew_c)
T.insert(5, Tc)
ax.plot(s, T, 'b')
s = s_copy
T = T_copy
else:
raise TypeError('Type of Ts_Ph invalid')
COP = m * (h[1] - h[9]) / (m * (h[2] - h[1]) + (m + x) * (h[4] - h[3]))
for i in range(1, len(T) - 1):
print('%d & %g & %g & %g & %g & %g \\\\' % (i, T[i] - 273.15, p[i], h[i], s[i], rho[i]))
print(x, m * (h[1] - h[9]), (m * (h[2] - h[1]) + (m + x) * (h[4] - h[3])), COP)
mdot = Qin / (h[1] - h[9])
mdot_inj = x * mdot
print('x9', x9,)
print('Qcond', (mdot + mdot_inj) * (h[4] - h[5]), 'T4', T[4] - 273.15)
print(mdot, mdot + mdot_inj)
f = 3500 / 60.
eta_v = 0.7
print('Vdisp1: ', mdot / (rho[1] * f * eta_v) * 1e6, 'cm^3')
print('Vdisp2: ', (mdot + mdot_inj) / (rho[1] * f * eta_v) * 1e6, 'cm^3')
return COP
# class SimpleCycle(object):
# """A class that calculates a simple thermodynamic cycle"""
# def __init__(self, *args, **kwargs):
# object.__init__(self, *args, **kwargs)
# (states, steps, fluid):
# Parameters
# ----------
# x_type : int, str
# Either a letter or an integer that specifies the property type for the x-axis
# y_type : int, str
# Either a letter or an integer that specifies the property type for the y-axis
# states : list
# A collection of state points that follows a fixed scheme defined
# in the implementing subclass.
# fluid_ref : str, CoolProp.AbstractState
# The fluid property provider, either a subclass of CoolProp.AbstractState
# or a string that can be used to generate a CoolProp.AbstractState instance
# via :func:`Common.process_fluid_state`.
# steps : int
# The number of steps used for going from one state to another
#
# for more properties, see :class:`CoolProp.Plots.Common.Base2DObject`.
# # See http://stackoverflow.com/questions/1061283/lt-instead-of-cmp
# class ComparableMixin:
# """A mixin class that implements all comparing mathods except for __lt__"""
# def __eq__(self, other):
# return not self<other and not other<self
# def __ne__(self, other):
# return self<other or other<self
# def __gt__(self, other):
# return other<self
# def __ge__(self, other):
# return not self<other
# def __le__(self, other):
# return not other<self
class StatePoint(PropertyDict):
"""A simple fixed dimension dict represented by an object with attributes"""
# Significant digits in SI units
ROUND_DECIMALS = {
CoolProp.iDmass: 5,
CoolProp.iHmass: 5,
CoolProp.iP: 2,
CoolProp.iSmass: 5,
CoolProp.iT: 5,
CoolProp.iUmass: 5,
CoolProp.iQ: 5
}
def __iter__(self):
"""Make sure we always iterate in the same order"""
keys = [CoolProp.iDmass, CoolProp.iHmass, CoolProp.iP, CoolProp.iSmass, CoolProp.iT]
for key in sorted(keys):
yield key
def __str__(self):
return str(self.__dict__)
def __prop_compare(self, other, typ):
# TODO
if self[typ] is None and other[typ] is None: return 0
elif self[typ] is None and other[typ] is not None: return -1
elif self[typ] is not None and other[typ] is None: return 1
else:
A = np.round(self[typ], self.ROUND_DECIMALS[typ])
B = np.round(other[typ], self.ROUND_DECIMALS[typ])
if A > B: return 1
elif A < B: return -1
elif A == B: return 0
else: raise ValueError("Comparison failed.")
def __eq__(self, other):
for i in self:
if not self.__prop_compare(other, i) == 0:
return False
return True
def __hash__(self):
return hash(repr(self))
class StateContainer(object):
"""A collection of values for the main properties, built to mixin with :class:`CoolProp.Plots.Common.PropertyDict`
Examples
--------
This container has overloaded accessor methods. Just pick your own flavour
or mix the styles as you like:
>>> from __future__ import print_function
>>> import CoolProp
>>> from CoolProp.Plots.SimpleCycles import StateContainer
>>> T0 = 300.000; p0 = 200000.000; h0 = 112745.749; s0 = 393.035
>>> cycle_states = StateContainer()
>>> cycle_states[0,'H'] = h0
>>> cycle_states[0]['S'] = s0
>>> cycle_states[0][CoolProp.iP] = p0
>>> cycle_states[0,CoolProp.iT] = T0
>>> cycle_states[1,"T"] = 300.064
>>> print(cycle_states)
Stored State Points:
state T (K) p (Pa) d (kg/m3) h (J/kg) s (J/kg/K)
0 300.000 200000.000 - 112745.749 393.035
1 300.064 - - - -
"""
def __init__(self, unit_system=SIunits()):
self._points = {}
self._units = unit_system
@property
def points(self): return self._points
@points.setter
def points(self, value): self._points = value
@property
def units(self): return self._units
@units.setter
def units(self, value): self._units = value
def get_point(self, index, SI=True):
if SI:
state = self[index]
else:
state = self[index]
for i in state:
state[i] = self.units[i].from_SI(state[i])
return state
def set_point(self, index, value, SI=True):
if SI:
self._points[index] = value
else:
for i in value:
self._points[index][i] = self.units[i].to_SI(value[i])
def _list_like(self, value):
"""Try to detect a list-like structure excluding strings"""
return (not hasattr(value, "strip") and
(hasattr(value, "__getitem__") or
hasattr(value, "__iter__")))
# return is_sequence(value) # use from pandas.core.common import is_sequence
def __len__(self):
"""Some cheating to get the correct behaviour"""
return len(self._points)
def __iter__(self):
"""Make sure we iterate in the righ order"""
for key in sorted(self._points):
yield key
def __getitem__(self, index):
"""Another tweak that changes the default access path"""
if self._list_like(index):
len_var = len(index)
if len_var == 0:
raise IndexError("Received empty index.")
elif len_var == 1:
return self._points[index[0]]
elif len_var == 2:
return self._points[index[0]][index[1]]
else:
raise IndexError("Received too long index.")
return self._points[index]
def __setitem__(self, index, value):
"""Another tweak that changes the default access path"""
if self._list_like(index):
len_var = len(index)
if len_var == 0:
raise IndexError("Received empty index.")
elif len_var == 1:
self._points[index[0]] = value
elif len_var == 2:
# safeguard against empty entries
if index[0] not in self._points:
self._points[index[0]] = StatePoint()
self._points[index[0]][index[1]] = value
else:
raise IndexError("Received too long index.")
else:
self._points[index] = value
def __str__(self):
out = "Stored State Points:\n"
keys = True
for i in self._points:
if keys:
row = [u"{0:>5s}".format("state")]
for j in self._points[i]:
label = u"{0:s} ({1:s})".format(self.units[j].symbol, self.units[j].unit)
row.append(u"{0:>11s}".format(label))
out = out + u" ".join(row) + "\n"
keys = False
row = [u"{0:>5s}".format(str(i))]
for j in self._points[i]:
try:
row.append(u"{0:11.3f}".format(self.units[j].from_SI(self._points[i][j])))
except:
row.append(u"{0:>11s}".format("-"))
out = out + u" ".join(row) + "\n"
return out
def append(self, new):
i = 0 + self.__len__()
for j in new:
self[i, j] = new[j]
return self
def extend(self, new):
i = 0 + self.__len__()
for j in new:
for k in new[j]:
self[i, k] = new[j][k]
i = i + 1
return self
@property
def D(self): return np.array([self._points[k].D for k in self])
@property
def H(self): return np.array([self._points[k].H for k in self])
@property
def P(self): return np.array([self._points[k].P for k in self])
@property
def S(self): return np.array([self._points[k].S for k in self])
@property
def T(self): return np.array([self._points[k].T for k in self])
@property
def U(self): return | np.array([self._points[k].U for k in self]) | numpy.array |
import numpy as np
import pytest
from scipy import sparse
from xugrid import connectivity
@pytest.fixture(scope="function")
def triangle_mesh():
fill_value = -1
# Two triangles
faces = np.array(
[
[0, 1, 2],
[1, 3, 2],
]
)
return faces, fill_value
@pytest.fixture(scope="function")
def mixed_mesh():
fill_value = -1
# Triangle, quadrangle
faces = np.array(
[
[0, 1, 2, fill_value],
[1, 3, 4, 2],
]
)
return faces, fill_value
def test_neighbors():
i = [0, 0, 0, 1, 1, 1]
j = [0, 1, 2, 1, 3, 2]
coo_content = (j, (i, j))
A = sparse.coo_matrix(coo_content).tocsr()
A = connectivity.AdjacencyMatrix(A.indices, A.indptr, A.nnz)
assert np.array_equal(connectivity.neighbors(A, 0), [0, 1, 2])
assert np.array_equal(connectivity.neighbors(A, 1), [1, 2, 3])
def test_to_ij(triangle_mesh, mixed_mesh):
faces, fill_value = triangle_mesh
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=False)
expected_i = [0, 0, 0, 1, 1, 1]
expected_j = [0, 1, 2, 1, 3, 2]
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
# Inverted
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=True)
assert np.array_equal(actual_i, expected_j)
assert np.array_equal(actual_j, expected_i)
faces, fill_value = mixed_mesh
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=False)
expected_i = [0, 0, 0, 1, 1, 1, 1]
expected_j = [0, 1, 2, 1, 3, 4, 2]
assert np.array_equal(actual_i, expected_i)
assert np.array_equal(actual_j, expected_j)
# Inverted
actual_i, actual_j = connectivity._to_ij(faces, fill_value, invert=True)
assert np.array_equal(actual_i, expected_j)
assert np.array_equal(actual_j, expected_i)
def test_to_sparse(mixed_mesh):
faces, fill_value = mixed_mesh
csr = connectivity._to_sparse(faces, fill_value, invert=False, sort_indices=True)
expected_j = np.array([0, 1, 2, 1, 2, 3, 4])
assert np.array_equal(csr.indices, expected_j)
assert csr.has_sorted_indices
csr = connectivity._to_sparse(faces, fill_value, invert=False, sort_indices=False)
expected_j = np.array([0, 1, 2, 1, 3, 4, 2])
assert np.array_equal(csr.indices, expected_j)
assert not csr.has_sorted_indices
def test_ragged_index():
n = 3
m = 4
m_per_row = np.array([1, 2, 3])
actual = connectivity.ragged_index(n, m, m_per_row)
expected = np.array(
[
[True, False, False, False],
[True, True, False, False],
[True, True, True, False],
]
)
assert np.array_equal(actual, expected)
def test_sparse_dense_conversion_roundtrip(triangle_mesh, mixed_mesh):
faces, fill_value = triangle_mesh
sparse = connectivity.to_sparse(faces, fill_value)
back = connectivity.to_dense(sparse, fill_value)
# Note: roundtrip does not preserve CW/CCW orientation, since orientation
# does not apply to node_face_connectivity, but the sorted rows should
# contain the same elements.
assert np.array_equal(faces.sort(axis=1), back.sort(axis=1))
faces, fill_value = mixed_mesh
sparse = connectivity.to_sparse(faces, fill_value)
back = connectivity.to_dense(sparse, fill_value)
assert np.array_equal(faces.sort(axis=1), back.sort(axis=1))
def test_invert_dense(triangle_mesh, mixed_mesh):
faces, fill_value = triangle_mesh
actual = connectivity.invert_dense(faces, fill_value)
expected = np.array(
[
[0, -1], # 0
[0, 1], # 1
[0, 1], # 2
[1, -1], # 3
]
)
assert np.array_equal(actual, expected)
faces, fill_value = mixed_mesh
actual = connectivity.invert_dense(faces, fill_value)
expected = np.array(
[
[0, -1], # 0
[0, 1], # 1
[0, 1], # 2
[1, -1], # 3
[1, -1], # 4
]
)
assert np.array_equal(actual, expected)
def test_invert_sparse(triangle_mesh, mixed_mesh):
faces, fill_value = triangle_mesh
sparse = connectivity.to_sparse(faces, fill_value)
inverted = connectivity.invert_sparse(sparse)
actual = connectivity.to_dense(inverted, fill_value)
expected = np.array(
[
[0, -1], # 0
[0, 1], # 1
[0, 1], # 2
[1, -1], # 3
]
)
assert np.array_equal(actual, expected)
faces, fill_value = mixed_mesh
sparse = connectivity.to_sparse(faces, fill_value)
inverted = connectivity.invert_sparse(sparse)
actual = connectivity.to_dense(inverted, fill_value)
expected = np.array(
[
[0, -1], # 0
[0, 1], # 1
[0, 1], # 2
[1, -1], # 3
[1, -1], # 4
]
)
assert np.array_equal(actual, expected)
def test_renumber():
a = np.array(
[
[0, 1, 2],
[10, 11, 12],
[30, 31, 32],
]
)
actual = connectivity.renumber(a)
expected = np.array(
[
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
]
)
assert np.array_equal(actual, expected)
a = np.array(
[
[0, 1, 2],
[10, 11, 2],
[30, 31, 2],
]
)
actual = connectivity.renumber(a)
expected = np.array(
[
[0, 1, 2],
[3, 4, 2],
[5, 6, 2],
]
)
assert np.array_equal(actual, expected)
def test_close_polygons(mixed_mesh):
faces, fill_value = mixed_mesh
closed, isfill = connectivity.close_polygons(faces, fill_value)
expected = np.array(
[
[0, 1, 2, 0, 0],
[1, 3, 4, 2, 1],
]
)
expected_isfill = np.full((2, 5), False)
expected_isfill[0, -2:] = True
expected_isfill[1, -1] = True
assert np.array_equal(closed, expected)
assert np.array_equal(isfill, expected_isfill)
def test_reverse_orientation(mixed_mesh):
faces, fill_value = mixed_mesh
reverse = connectivity.reverse_orientation(faces, fill_value)
expected = np.array(
[
[2, 1, 0, fill_value],
[2, 4, 3, 1],
]
)
assert np.array_equal(reverse, expected)
def test_counterclockwise():
nodes = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[0.0, 2.0],
]
)
fill_value = -1
# Already ccw, nothing should be changed.
faces = np.array([[0, 2, 3, -1]])
actual = connectivity.counterclockwise(faces, fill_value, nodes)
assert np.array_equal(actual, faces)
# Clockwise with a fill value, reverse.
faces_cw = np.array([[3, 2, 0, -1]])
actual = connectivity.counterclockwise(faces_cw, fill_value, nodes)
assert np.array_equal(actual, faces)
# Including a hanging node, ccw, nothing changed.
hanging_ccw = np.array([[0, 1, 2, 3, -1]])
actual = connectivity.counterclockwise(hanging_ccw, fill_value, nodes)
assert np.array_equal(actual, hanging_ccw)
# Including a hanging node, reverse.
hanging_cw = np.array([[3, 2, 1, 0, -1]])
actual = connectivity.counterclockwise(hanging_cw, fill_value, nodes)
assert np.array_equal(actual, hanging_ccw)
def test_edge_connectivity(mixed_mesh):
faces, fill_value = mixed_mesh
edge_nodes, face_edges = connectivity.edge_connectivity(faces, fill_value)
expected_edge_nodes = np.array(
[
[0, 1],
[0, 2],
[1, 2],
[1, 3],
[2, 4],
[3, 4],
]
)
expected_face_edges = np.array(
[
[0, 2, 1, -1],
[3, 5, 4, 2],
]
)
assert np.array_equal(edge_nodes, expected_edge_nodes)
assert np.array_equal(face_edges, expected_face_edges)
def test_face_face_connectivity():
edge_faces = np.array(
[
[0, -1],
[0, -1],
[0, 1],
[1, -1],
[1, -1],
[1, -1],
]
)
face_face = connectivity.face_face_connectivity(edge_faces, fill_value=-1)
assert isinstance(face_face, sparse.csr_matrix)
assert np.array_equal(face_face.indices, [1, 0])
assert np.array_equal(face_face.indptr, [0, 1, 2])
def test_centroids(mixed_mesh):
faces, fill_value = mixed_mesh
nodes = np.array(
[
[0.0, 0.0],
[1.0, 0.0],
[1.0, 1.0],
[2.0, 0.0],
[2.0, 1.0],
]
)
actual = connectivity.centroids(faces, fill_value, nodes[:, 0], nodes[:, 1])
expected = np.array(
[
[2.0 / 3.0, 1.0 / 3.0],
[1.5, 0.5],
]
)
assert | np.allclose(actual, expected) | numpy.allclose |
###############################################################################
# Author: <NAME>
# E-mail: <EMAIL>
# Date created: 07.18.2020
# Date last modified: 07.18.2020
# Python Version: 3.7
###############################################################################
import numpy as np
import copy
class QRNN:
def __init__(self,
layer_list,
time_steps = 0,
weight_scaler = 1,
firing_rate_scaler = 0,
learning_rate = 0.1,
loss_function = 'mse',
optimizer = 'sgd'):
#Assign basic parameters
self.dh_list = []
self.learning_rate = learning_rate
self.shape = layer_list
self.time_steps = time_steps
self.loss_func = loss_function
self.optimizer = optimizer
n = len(layer_list)
self.gradient_list=[]
#Initialize layers
self.layers = []
for i in range(n):
self.layers.append(np.zeros(self.shape[i]))
#Initialize rates for horizontal and vertical
self.rate = [] #Horizontal rates : sum of horizontal weights
for i in range(n):
self.rate.append(np.zeros((self.shape[i],1)))
self.rate[-1] = firing_rate_scaler * np.ones(self.rate[-1].shape)
self.rate_h = np.zeros((self.shape[1],1)) #Vertical rates : sum of Vertical weights
# self.rate.shape = (H,)
#Q and D are initiliazed with zeros
self.Q_intime = []
self.D_intime = []
if self.time_steps != 0:
for t in range(self.time_steps+1):
Q = []
for i in range(n):
Q.append(np.zeros((self.shape[i],1)))
self.Q_intime.append(copy.deepcopy(Q))
self.D_intime = copy.deepcopy(self.Q_intime)
# Initialize vertical wieghts: wplus, wminus
self.wplus = []
self.wminus = []
for i in range(n-1):
self.wplus.append(np.zeros((self.layers[i].size,
self.layers[i+1].size)))
self.gradient_list.append(np.zeros((self.layers[i].size,
self.layers[i+1].size)))
self.wminus.append(np.zeros((self.layers[i].size,
self.layers[i+1].size)))
self.gradient_list.append(np.zeros((self.layers[i].size,
self.layers[i+1].size)))
#Initialize horizonta wieghts: wplus_h, wminus_h
self.wplus_h = np.zeros((self.layers[1].size,
self.layers[1].size))
self.gradient_list.append(np.zeros((self.layers[i].size,
self.layers[i].size)))
self.wminus_h = np.zeros((self.layers[1].size,
self.layers[1].size))
self.gradient_list.append(np.zeros((self.layers[i].size,
self.layers[i].size)))
#Initialize lambdas : lambda_plus, lambda_minus
self.lambda_plus = []
self.lambda_minus = []
self.global_var2 = []
self.global_var = []
self.init_weights(weight_scaler)
self.gradient_list_2=copy.deepcopy(self.gradient_list)
self.gradient_list_3=copy.deepcopy(self.gradient_list)
self.bp_counter=0
def recreate_Q_intime(self,step):
#Q and D are initiliazed with zeros
self.Q_intime = []
self.D_intime = []
for t in range(step+1):
Q = []
for i in range(len(self.shape)):
Q.append(np.zeros((self.shape[i],1)))
self.Q_intime.append(copy.deepcopy(Q))
self.D_intime = copy.deepcopy(self.Q_intime)
return 1
def init_weights(self,weight_scaler):
for i in range(len(self.wplus)):
self.wplus[i] = weight_scaler*np.random.rand(self.wplus[i].shape[0],self.wplus[i].shape[1])
self.wminus[i] = weight_scaler*np.random.rand(self.wminus[i].shape[0],self.wminus[i].shape[1])
self.wplus_h = weight_scaler*np.random.rand(self.wplus_h.shape[0],self.wplus_h.shape[1])
self.wminus_h = weight_scaler*np.random.rand(self.wminus_h.shape[0],self.wminus_h.shape[1])
return 1
def calculate_rate(self):
#for t in self.timesteps:
for layer_num in range(len(self.layers)-1): #there is no rate for output layer
for neuron in range(len(self.layers[layer_num])):
self.rate[layer_num][neuron] = (self.wplus[layer_num][neuron] + self.wminus[layer_num][neuron]).sum()
for neuron in range(len(self.layers[1])):
self.rate_h[neuron] = (self.wplus_h[neuron] + self.wminus_h[neuron]).sum()
return 1
def feedforward(self,input_list):
#self.recreate_Q_intime()
t=0
if self.time_steps==0:
self.recreate_Q_intime(input_list.shape[0])
for t,input_t in enumerate(input_list,start = 1):
#self.D.clear() #clear list of D matrixes for the next iteration
self.lambda_plus = np.where(input_t > 0, input_t, 0).reshape(-1,1)
self.lambda_minus = np.where(input_t < 0, -input_t, 0).reshape(-1,1)
# Input Layer
self.D_intime[t][0] = self.rate[0]+self.lambda_minus
self.Q_intime[t][0] = self.lambda_plus/self.D_intime[t][0]
np.clip(self.Q_intime[t][0],0,1,out=self.Q_intime[t][0])
# Hidden Layer
T_plus_i = self.wplus[0].transpose() @ self.Q_intime[t][0]
T_minus_i = self.wminus[0].transpose() @ self.Q_intime[t][0]
T_plus_h = self.wplus_h.transpose() @ self.Q_intime[t-1][1]
T_minus_h = self.wminus_h.transpose() @ self.Q_intime[t-1][1]
if t == input_list.shape[0]:
self.D_intime[t][1] = self.rate[1] + T_minus_i + T_minus_h
else:
self.D_intime[t][1] = self.rate_h + T_minus_i + T_minus_h
self.Q_intime[t][1] = (T_plus_i + T_plus_h)/(self.D_intime[t][1])
np.clip(self.Q_intime[t][1],0,1,out=self.Q_intime[t][1])
# Output Layer
T_plus = self.wplus[1].transpose() @ self.Q_intime[t][1]
T_minus = self.wminus[1].transpose() @ self.Q_intime[t][1]
self.D_intime[t][2] = self.rate[2] + T_minus
self.Q_intime[t][2] = T_plus / self.D_intime[t][2]
np.clip(self.Q_intime[t][2] ,0,1,out=self.Q_intime[t][2])
return copy.deepcopy(self.Q_intime[t][2])
def vertical_gradient(self,t,l): #t:rimestep, l=layer
vergrad2 = self.wplus[l].transpose()-self.Q_intime[t][l+1].reshape(-1,1)*self.wminus[l].transpose()
vergrad3 = vergrad2/self.D_intime[t][l+1].reshape(-1,1)
return vergrad3.transpose()
def horizontal_gradient(self,t):
hor_grad2 = self.wplus_h.transpose()-self.Q_intime[t][1].reshape(-1,1)*self.wminus_h.transpose()
hor_grad3 = hor_grad2/self.D_intime[t][1].reshape(-1,1)
return hor_grad3.transpose()
def backpropagation(self,real_output,tmp1=np.zeros((2,1))):
self.bp_counter=self.bp_counter+1
if self.optimizer=='nag':
weights=np.asarray([self.wplus[0],self.wminus[0],self.wplus[1],self.wminus[1],self.wplus_h,self.wminus_h])
x_ahead = weights-np.asarray(self.gradient_list)*0.9
self.wplus[0] = copy.deepcopy(np.clip(x_ahead[0],a_min=0.001,a_max=None))
self.wminus[0] = copy.deepcopy(np.clip(x_ahead[1],a_min=0.001,a_max=None))
self.wplus[1] = copy.deepcopy(np.clip(x_ahead[2],a_min=0.001,a_max=None))
self.wminus[1] = copy.deepcopy(np.clip(x_ahead[3],a_min=0.001,a_max=None))
self.wplus_h = copy.deepcopy(np.clip(x_ahead[4],a_min=0.001,a_max=None))
self.wminus_h = copy.deepcopy(np.clip(x_ahead[5],a_min=0.001,a_max=None))
d_Who_p = []
d_Who_m = []
d_Whh_p = []
d_Whh_m = []
d_Wih_p = []
d_Wih_m = []
d_Who_p.append(((self.Q_intime[-1][1] @ (1/self.D_intime[-1][2]).transpose()).transpose()*tmp1).transpose())
d_Who_m.append((-(self.Q_intime[-1][1] @ (self.Q_intime[-1][2]/self.D_intime[-1][2]).transpose()).transpose()*tmp1).transpose())
steps = len(self.Q_intime)-1
o3_h3 = self.vertical_gradient(steps,1)
d_hidden_layer = o3_h3
for t in reversed(range(1,steps+1)):#2 1 0
if t==steps:
d_whop = d_hidden_layer*(self.Q_intime[t][1]/self.D_intime[t][1])
d_whom = copy.deepcopy(d_whop)
d_Who_p.append((d_whop.transpose()*tmp1).transpose())
d_Who_m.append((d_whom.transpose()*tmp1).transpose())
d_hidden_layer = d_hidden_layer@tmp1
####################Gradients from hidden layers###################
#dqh/dNi
d_hidden_layer_N = d_hidden_layer/self.D_intime[t][1]
#dqh/dDi
d_hidden_layer_D = d_hidden_layer*(-self.Q_intime[t][1]/self.D_intime[t][1])
wihm = self.Q_intime[t][0] @ d_hidden_layer_D.transpose()
wihp = self.Q_intime[t][0] @ d_hidden_layer_N.transpose()
if t==steps:
whhm = self.Q_intime[t-1][1] @ d_hidden_layer_D.transpose()
whhp = self.Q_intime[t-1][1] @ d_hidden_layer_N.transpose()
else :
whhm = (1+self.Q_intime[t-1][1]) @ d_hidden_layer_D.transpose()
whhp_n = self.Q_intime[t-1][1] @ d_hidden_layer_N.transpose()
whhp_d = d_hidden_layer_D
whhp = whhp_n + whhp_d
d_Whh_p.append(whhp)
d_Whh_m.append(whhm)
d_Wih_p.append(wihp)
d_Wih_m.append(wihm)
################### Gradients from input layers ###################
#dqh/dqi
d_input_layer = self.vertical_gradient(t,0)
#dqi/dDi
d_input_layer_D = d_input_layer*(-(self.Q_intime[t][0]/self.D_intime[t][0]))
d_Wih_p.append((d_input_layer_D.transpose()*d_hidden_layer_D).transpose())
d_Wih_m.append((d_input_layer_D.transpose()*d_hidden_layer_D).transpose())
######################## New Hidden Layer #########################
#self.dh_list.append(d_hidden_layer)
d_hidden_layer = self.horizontal_gradient(t) @ d_hidden_layer
#değiştirebilirsin
#dh_list.clear()
#weights=np.asarray([self.wplus[0],self.wminus[0],self.wplus[1],self.wminus[1],self.wplus_h,self.wminus_h])
if self.optimizer == 'sgd':
grads=np.asarray([sum(d_Wih_p),sum(d_Wih_m),sum(d_Who_p),sum(d_Who_m),sum(d_Whh_p),sum(d_Whh_m)])*self.learning_rate
self.update_weigths(grads)
elif self.optimizer == 'momentum':
moment = np.asarray(self.gradient_list)*0.9
grads = moment + np.asarray([sum(d_Wih_p),sum(d_Wih_m),sum(d_Who_p),sum(d_Who_m),sum(d_Whh_p),sum(d_Whh_m)])*self.learning_rate
self.update_weigths(grads)
self.gradient_list=copy.deepcopy(grads)
elif self.optimizer == 'nag':
grads = np.asarray([sum(d_Wih_p),sum(d_Wih_m),sum(d_Who_p),sum(d_Who_m),sum(d_Whh_p),sum(d_Whh_m)])
self.gradient_list = np.asarray(self.gradient_list)*0.9 + grads*self.learning_rate
self.update_weigths(self.gradient_list)
elif self.optimizer == 'adagrad':
grads = np.asarray([sum(d_Wih_p),sum(d_Wih_m),sum(d_Who_p),sum(d_Who_m),sum(d_Whh_p),sum(d_Whh_m)])
grads_2=np.square(grads)
self.gradient_list+=copy.deepcopy(grads_2)
for i in range(len(grads)):
grads[i] = (self.learning_rate/(np.sqrt(self.gradient_list[i]+0.000001)))*grads[i]
self.update_weigths(grads)
#self.gradient_list+=copy.deepcopy(grads_2)
elif self.optimizer == 'adadelta': #gradient_list_2->gt//gradient_list->teta_t
eps=0.000001;beta=0.90;
grads=np.asarray([sum(d_Wih_p),sum(d_Wih_m),sum(d_Who_p),sum(d_Who_m),sum(d_Whh_p),sum(d_Whh_m)])
grads_2 = np.square(grads)
self.gradient_list_2 = beta*np.asarray(self.gradient_list_2) + (1-beta)*grads_2
delta_teta = copy.deepcopy(self.gradient_list)
for i in range(len(grads)):
#delta_teta[i] = (self.learning_rate/(np.sqrt(self.gradient_list_2[i]+0.000001)))*grads[i]
delta_teta[i] = (np.sqrt(self.gradient_list[i]+0.000001)/(np.sqrt(self.gradient_list_2[i]+0.000001)))*grads[i]
self.gradient_list = beta* | np.asarray(self.gradient_list) | numpy.asarray |
import numpy as np
import os
import matplotlib.pyplot as plt
import argparse
import random
from sklearn.manifold import TSNE
from matplotlib import cm
from sklearn import utils
import pdb
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tSNE visualization of features')
parser.add_argument('--tsne_classes', default=20, type=int, help = 'Number of classes to be used for tSNE')
parser.add_argument('--n_components', type=int, default=2, help = 'Number of components for tSNE')
parser.add_argument('--perplexity', default = 10.0, type = float, help = 'tSNE perplexity parameter')
parser.add_argument('--n_iter', default = 50000, type = int, help = 'Number of iterations for tSNE')
parser.add_argument('--input_folder', default='features', type=str, help='Input folder having the features arrays')
parser.add_argument('--input_filename', type=str, default='', help='features and labels file name')
args = parser.parse_args()
# Load arrays
print('Loading arrays...')
iboff_features_file = 'iboff_' + args.input_filename + '_features.npy'
iboff_labels_file = 'iboff_' + args.input_filename + '_labels.npy'
ibatt_features_file = 'ibatt_' + args.input_filename + '_features.npy'
ibatt_labels_file = 'ibatt_' + args.input_filename + '_labels.npy'
X_iboff = np.load(os.path.join(args.input_folder, iboff_features_file))
T_iboff = np.load(os.path.join(args.input_folder, iboff_labels_file))
print('Loaded: {}'.format(iboff_features_file))
print('Loaded: {}'.format(iboff_labels_file))
X_ibatt = np.load(os.path.join(args.input_folder, ibatt_features_file))
T_ibatt = np.load(os.path.join(args.input_folder, ibatt_labels_file))
print('Loaded: {}'.format(ibatt_features_file))
print('Loaded: {}'.format(ibatt_labels_file))
# Choose some of the classes (e.g. 10, 15, 20, etc.)
assert np.unique(T_iboff).shape[0] == np.unique(T_ibatt).shape[0]
num_classes = np.unique(T_iboff).shape[0]
random_classes = random.sample(range(int(T_iboff[0]), int(T_iboff[-1])), args.tsne_classes)
random_classes = np.array(random_classes).astype(np.float32)
X_iboff_few = []
T_iboff_few = []
for class_idx in random_classes:
X_temp = X_iboff[T_iboff==class_idx]
T_temp = T_iboff[T_iboff==class_idx]
X_iboff_few.append(X_temp)
T_iboff_few.append(T_temp)
X_iboff_few = | np.concatenate(X_iboff_few) | numpy.concatenate |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
# Define a labeling convention
metric_label = {'True Positive Rate': 'Power', 'False Positive Rate': 'False Positive Rate',
'True Negative Rate': 'True Negative Rate', 'False Negative Rate': 'False Negative Rate',
'Positive Predictive Value': 'Positive Predictive Value',
'Negative Predictive Value': 'Negative Predictive Value',
'False Discovery Rate': 'False Discovery Rate', 'False Omission Rate': 'False Omission Rate',
'Accuracy': 'Accuracy', 'F1': 'F1'}
def plot_metric_curve(analysis_output, metric='True Positive Rate',
plot_multiple_testing=False, fixed_effect_size=None,
fixed_sample_size=100, sd=2):
"""
:param analysis_output:
:param metric:
:param plot_multiple_testing:
:param fixed_effect_size:
:param fixed_sample_size:
:param sd:
:return:
"""
if plot_multiple_testing is True:
results_idx = 1
else:
results_idx = 0
if fixed_effect_size is not None:
eff_index = np.where(analysis_output[0][0]['Effect Size'] == fixed_effect_size)[0]
means_metric = np.array([np.nanmean(analysis_output[x][results_idx][metric], axis=2)[eff_index, :] for x in range(len(analysis_output))])
stdev_metric = np.array([np.nanstd(analysis_output[x][results_idx][metric], axis=2)[eff_index, :] for x in range(len(analysis_output))])
x = analysis_output[0][0]['Sample Size']
y = np.nanstd(means_metric, axis=1)
y_err = np.nanmean(stdev_metric, axis=1) * sd
x_lab = 'Sample Size'
elif fixed_sample_size is not None:
samp_index = np.where(analysis_output[0][0]['Sample Size'] == fixed_sample_size)[0]
means_metric = np.array([np.nanmean(analysis_output[x][results_idx][metric], axis=2)[:, samp_index] for x in range(len(analysis_output))])
stdev_metric = np.array([np.nanstd(analysis_output[x][results_idx][metric], axis=2)[:, samp_index] for x in range(len(analysis_output))])
x = analysis_output[0][0]['Effect Size']
y = np.nanmean(means_metric, axis=0)
y_err = | np.nanmean(stdev_metric, 0) | numpy.nanmean |
import numpy as np
import scipy
import matplotlib.pyplot as plt
import statsmodels.api as sm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import matplotlib.gridspec as gridspec
from hydroDL import utils
import string
import os
# manually add package
# os.environ[
# 'PROJ_LIB'] = r'C:\pythonenvir\pkgs\proj4-5.2.0-ha925a31_1\Library\share'
from mpl_toolkits import basemap
def plotBoxFig(data,
label1=None,
label2=None,
colorLst='rbkgcmy',
title=None,
figsize=(8, 6),
sharey=True,
legOnly=False):
nc = len(data)
fig, axes = plt.subplots(ncols=nc, sharey=sharey, figsize=figsize)
for k in range(0, nc):
ax = axes[k] if nc > 1 else axes
temp = data[k]
if type(temp) is list:
for kk in range(len(temp)):
tt = temp[kk]
if tt is not None and tt != []:
tt = tt[~np.isnan(tt)]
temp[kk] = tt
else:
temp[kk] = []
else:
temp = temp[~np.isnan(temp)]
bp = ax.boxplot(temp, patch_artist=True, notch=True, showfliers=False)
for kk in range(0, len(bp['boxes'])):
plt.setp(bp['boxes'][kk], facecolor=colorLst[kk])
if label1 is not None:
ax.set_xlabel(label1[k])
else:
ax.set_xlabel(str(k))
ax.set_xticks([])
# ax.ticklabel_format(axis='y', style='sci')
if label2 is not None:
ax.legend(bp['boxes'], label2, loc='best')
if legOnly is True:
ax.legend(bp['boxes'], label2, bbox_to_anchor=(1, 0.5))
if title is not None:
fig.suptitle(title)
return fig
def plotTS(t,
y,
*,
ax=None,
tBar=None,
figsize=(12, 4),
cLst='rbkgcmy',
markerLst=None,
legLst=None,
title=None,
linewidth=2):
newFig = False
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.subplots()
newFig = True
if type(y) is np.ndarray:
y = [y]
for k in range(len(y)):
tt = t[k] if type(t) is list else t
yy = y[k]
legStr = None
if legLst is not None:
legStr = legLst[k]
if markerLst is None:
if True in np.isnan(yy):
ax.plot(tt, yy, '*', color=cLst[k], label=legStr)
else:
ax.plot(
tt, yy, color=cLst[k], label=legStr, linewidth=linewidth)
else:
if markerLst[k] is '-':
ax.plot(
tt, yy, color=cLst[k], label=legStr, linewidth=linewidth)
else:
ax.plot(
tt, yy, color=cLst[k], label=legStr, marker=markerLst[k])
# ax.set_xlim([np.min(tt), np.max(tt)])
if tBar is not None:
ylim = ax.get_ylim()
tBar = [tBar] if type(tBar) is not list else tBar
for tt in tBar:
ax.plot([tt, tt], ylim, '-k')
if legLst is not None:
ax.legend(loc='best')
if title is not None:
ax.set_title(title)
if newFig is True:
return fig, ax
else:
return ax
def plotVS(x,
y,
*,
ax=None,
title=None,
xlabel=None,
ylabel=None,
titleCorr=True,
plot121=True,
doRank=False,
figsize=(8, 6)):
if doRank is True:
x = scipy.stats.rankdata(x)
y = scipy.stats.rankdata(y)
corr = scipy.stats.pearsonr(x, y)[0]
pLr = np.polyfit(x, y, 1)
xLr = np.array([np.min(x), np.max(x)])
yLr = np.poly1d(pLr)(xLr)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.subplots()
else:
fig = None
if title is not None:
if titleCorr is True:
title = title + ' ' + r'$\rho$={:.2f}'.format(corr)
ax.set_title(title)
else:
if titleCorr is True:
ax.set_title(r'$\rho$=' + '{:.2f}'.format(corr))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# corr = np.corrcoef(x, y)[0, 1]
ax.plot(x, y, 'b.')
ax.plot(xLr, yLr, 'r-')
if plot121 is True:
plot121Line(ax)
return fig, ax
def plot121Line(ax, spec='k-'):
xlim = ax.get_xlim()
ylim = ax.get_ylim()
vmin = np.min([xlim[0], ylim[0]])
vmax = np.max([xlim[1], ylim[1]])
ax.plot([vmin, vmax], [vmin, vmax], spec)
def plotMap(data,
*,
ax=None,
lat=None,
lon=None,
title=None,
cRange=None,
shape=None,
pts=None,
figsize=(8, 4),
plotColorBar=True):
if cRange is not None:
vmin = cRange[0]
vmax = cRange[1]
else:
temp = flatData(data)
vmin = np.percentile(temp, 5)
vmax = | np.percentile(temp, 95) | numpy.percentile |
from ctapipe.visualization import CameraDisplay
from matplotlib.collections import PatchCollection
from ctapipe.instrument import PixelShape
from matplotlib import pyplot as plt
from astropy import units as u
from matplotlib.patches import RegularPolygon, Rectangle, Circle
from ctapipe.io import EventSource, EventSeeker
from ctapipe.image import tailcuts_clean, LocalPeakWindowSum
from ctapipe.utils import get_dataset_path
from ctapipe.calib import CameraCalibrator
import copy
import numpy as np
class MYCameraDisplay(CameraDisplay):
"""
Based on the ctapipe camera display from
https://github.com/cta-observatory/ctapipe/blob/8851e1214409eac4564996cc0f4b76dfe05cf9cf/ctapipe/visualization/mpl_camera.py
No title, axis labels and no autoscale
"""
def __init__(
self,
geometry,
image=None,
ax=None,
norm="lin",
cmap=None,
autoscale=True,
):
self.axes = ax if ax is not None else plt.gca()
self.pixels = None
self.colorbar = None
self.autoscale = autoscale
self.geom = geometry
patches = []
if hasattr(self.geom, "mask"):
self.mask = self.geom.mask
else:
self.mask = np.ones_like(self.geom.pix_x.value, dtype=bool)
pix_x = self.geom.pix_x.value[self.mask]
pix_y = self.geom.pix_y.value[self.mask]
pix_width = self.geom.pixel_width.value[self.mask]
for x, y, w in zip(pix_x, pix_y, pix_width):
if self.geom.pix_type == PixelShape.HEXAGON:
r = w / np.sqrt(3)
patch = RegularPolygon(
(x, y),
6,
radius=r,
orientation=self.geom.pix_rotation.to_value(u.rad),
fill=True,
)
elif self.geom.pix_type == PixelShape.CIRCLE:
patch = Circle((x, y), radius=w / 2, fill=True)
elif self.geom.pix_type == PixelShape.SQUARE:
patch = Rectangle(
(x - w / 2, y - w / 2),
width=w,
height=w,
angle=self.geom.pix_rotation.to_value(u.deg),
fill=True,
)
else:
raise ValueError(f"Unsupported pixel_shape {self.geom.pix_type}")
patches.append(patch)
self.pixels = PatchCollection(patches, cmap=cmap, linewidth=0)
self.axes.add_collection(self.pixels)
self.pixel_highlighting = copy.copy(self.pixels)
self.pixel_highlighting.set_facecolor("none")
self.pixel_highlighting.set_linewidth(0)
self.axes.add_collection(self.pixel_highlighting)
self.axes.set_aspect("equal")
if image is not None:
self.image = image
else:
self.image = np.zeros_like(self.geom.pix_id, dtype=np.float64)
self.norm = norm
self.axes.xticks = None
def main():
# using toymodel would generate a nicer shower, but I want accurate waveforms as well
source = EventSource(get_dataset_path("gamma_test_large.simtel.gz"), max_events=100)
seeker = EventSeeker(source)
# Define calibrator and extractor
# Calib should use the same extractor to be most accurate in the waveform plot
ex = LocalPeakWindowSum(subarray=source.subarray)
calib = CameraCalibrator(
subarray=source.subarray, image_extractor_type="LocalPeakWindowSum"
)
# A reasonable bright event, there might be better ones still
event = seeker.get_event_index(91)
calib(event)
tel_id = 1 # LST-1
geom = source.subarray.tel[tel_id].camera.geometry
image = event.dl1.tel[tel_id].image
fig, ax = plt.subplots(figsize=(12, 6))
# d is our natural unit here, the width of one pixel
# Since all pixels are the same, we can use this to fine tune the view of the first zoom
d = geom.pixel_width.value[0]
norm = "lin"
color_shower_zoom = "red"
color_waveform_zoom = "blue"
color_extractor = "green"
color_waveform_peak = "magenta"
# add space on the right for the inset axes
ax.set_xlim(-1.5, 4.5)
ax.set_ylim(-1.5, 1.5)
main_cam_display = MYCameraDisplay(geom, ax=ax, norm=norm)
main_cam_display.image = image
# This is manually chosen to match the figure size!
main_cam_display.add_colorbar(location="left", fraction=0.04, pad=0.001, aspect=10)
ax.spines.right.set_visible(False)
ax.spines.top.set_visible(False)
ax.spines.left.set_visible(False)
ax.spines.bottom.set_visible(False)
ax.set_xticks([])
ax.set_yticks([])
ax_shower_zoom = ax.inset_axes([0.45, 0.41, 0.2, 0.5])
zoomed_cam_display = MYCameraDisplay(geom, ax=ax_shower_zoom, norm=norm)
mask = tailcuts_clean(geom, image, 14, 7, 2)
zoomed_cam_display.image = image
zoomed_cam_display.highlight_pixels(mask, alpha=0.4, color=color_shower_zoom)
ax_shower_zoom.set_xlim(
geom.pix_x[mask].value.min() - d, geom.pix_x[mask].value.max() + d
)
ax_shower_zoom.set_ylim(
geom.pix_y[mask].value.min() - d, geom.pix_y[mask].value.max() + d
)
ax_shower_zoom.set_xticks([])
ax_shower_zoom.set_yticks([])
ax.indicate_inset_zoom(ax_shower_zoom, edgecolor=color_shower_zoom)
# Select the pixel
# This will select a square instead of a hexagon
# It would require some manual work to properly select the
# pixel, but would also avoid our scaling issues below
# Maybe at some point in the future
xl = geom.pix_x.value[np.argmax(image)] - d / 2
xu = geom.pix_x.value[np.argmax(image)] + d / 2
yl = geom.pix_y.value[ | np.argmax(image) | numpy.argmax |
import numpy as np
from tilitools.ssvm import SSVM
from tilitools.so_multiclass import SOMultiClass
from tilitools import utils_data
from tilitools import profiler
if __name__ == '__main__':
# generate raw training data
Dtrain1 = utils_data.get_gaussian(1000, dims=2, means=[4.0, 2.0], vars=[1.0, 0.3])
Dtrain2 = utils_data.get_gaussian(100, dims=2, means=[-2.0, 1.0], vars=[0.3, 1.3])
Dtrain3 = utils_data.get_gaussian(100, dims=2, means=[3.0, -1.0], vars=[0.3, 0.3])
Dtrain4 = utils_data.get_gaussian(50, dims=2, means=[6.0, -3.0], vars=[0.2, 0.1])
Dtrain = | np.concatenate((Dtrain1, Dtrain2, Dtrain3, Dtrain4)) | numpy.concatenate |
import numpy as np
from numpy.polynomial import Chebyshev as Ch
from scipy.linalg import cho_factor, cho_solve
from scipy.optimize import minimize
import psoap
from psoap import constants as C
from psoap import matrix_functions
from psoap.data import lredshift
try:
import celerite
from celerite import terms
except ImportError:
print("If you want to use the fast 1D (SB1 or ST1 models), please install celerite")
try:
import george
from george import kernels
except ImportError:
print("If you want to use the fast GP solver (SB2, ST2, or ST3 models) please install george")
def predict_f(lwl_known, fl_known, sigma_known, lwl_predict, amp_f, l_f, mu_GP=1.0):
'''wl_known are known wavelengths.
wl_predict are the prediction wavelengths.
Assumes all inputs are 1D arrays.'''
# determine V11, V12, V21, and V22
M = len(lwl_known)
V11 = np.empty((M, M), dtype=np.float64)
matrix_functions.fill_V11_f(V11, lwl_known, amp_f, l_f)
# V11[np.diag_indices_from(V11)] += sigma_known**2
V11 = V11 + sigma_known**2 * np.eye(M)
N = len(wl_predict)
V12 = np.empty((M, N), dtype=np.float64)
matrix_functions.fill_V12_f(V12, lwl_known, lwl_predict, amp_f, l_f)
V22 = np.empty((N, N), dtype=np.float64)
# V22 is the covariance between the prediction wavelengths
# The routine to fill V11 is the same as V22
matrix_functions.fill_V11_f(V22, lwl_predict, amp_f, l_f)
# Find V11^{-1}
factor, flag = cho_factor(V11)
mu = mu_GP + np.dot(V12.T, cho_solve((factor, flag), (fl_known - mu_GP)))
Sigma = V22 - np.dot(V12.T, cho_solve((factor, flag), V12))
return (mu, Sigma)
def predict_python(wl_known, fl_known, sigma_known, wl_predict, amp_f, l_f, mu_GP=1.0):
'''wl_known are known wavelengths.
wl_predict are the prediction wavelengths.'''
# determine V11, V12, V21, and V22
V11 = get_V11(wl_known, sigma_known, amp_f, l_f)
# V12 is covariance between data wavelengths and prediction wavelengths
V12 = get_V12(wl_known, wl_predict, amp_f, l_f)
# V22 is the covariance between the prediction wavelengths
V22 = get_V22(wl_predict, amp_f, l_f)
# Find V11^{-1}
factor, flag = cho_factor(V11)
mu = mu_GP + np.dot(V12.T, cho_solve((factor, flag), (fl_known - mu_GP)))
Sigma = V22 - np.dot(V12.T, cho_solve((factor, flag), V12))
return (mu, Sigma)
def predict_f_g(lwl_f, lwl_g, fl_fg, sigma_fg, lwl_f_predict, lwl_g_predict, mu_f, amp_f, l_f, mu_g, amp_g, l_g, get_Sigma=True):
'''
Given that f + g is the flux that we're modeling, jointly predict the components.
'''
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
n_pix = len(lwl_f)
assert len(lwl_f_predict) == len(lwl_g_predict), "Prediction wavelengths must be the same length."
n_pix_predict = len(lwl_f_predict)
# Convert mu constants into vectors
mu_f = mu_f * np.ones(n_pix_predict)
mu_g = mu_g * np.ones(n_pix_predict)
# Cat these into a single vector
mu_cat = np.hstack((mu_f, mu_g))
# Create the matrices for the input data
# print("allocating V11_f, V11_g", n_pix, n_pix)
V11_f = np.empty((n_pix, n_pix), dtype=np.float)
V11_g = np.empty((n_pix, n_pix), dtype=np.float)
# print("filling V11_f, V11_g", n_pix, n_pix)
matrix_functions.fill_V11_f(V11_f, lwl_f, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g, amp_g, l_g)
B = V11_f + V11_g
B[np.diag_indices_from(B)] += sigma_fg**2
# print("factoring sum")
factor, flag = cho_factor(B)
# print("Allocating prediction matrices")
# Now create separate matrices for the prediction
V11_f_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
V11_g_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
# print("Filling prediction matrices")
matrix_functions.fill_V11_f(V11_f_predict, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g_predict, lwl_g_predict, amp_g, l_g)
zeros = np.zeros((n_pix_predict, n_pix_predict))
A = np.vstack((np.hstack([V11_f_predict, zeros]), np.hstack([zeros, V11_g_predict])))
# A[np.diag_indices_from(A)] += 1e-4 # Add a small nugget term
# C is now the cross-matrices between the predicted wavelengths and the data wavelengths
V12_f = np.empty((n_pix_predict, n_pix), dtype=np.float)
V12_g = np.empty((n_pix_predict, n_pix), dtype=np.float)
# print("Filling cross-matrices")
matrix_functions.fill_V12_f(V12_f, lwl_f_predict, lwl_f, amp_f, l_f)
matrix_functions.fill_V12_f(V12_g, lwl_g_predict, lwl_g, amp_g, l_g)
C = np.vstack((V12_f, V12_g))
# print("Sloving for mu, sigma")
# the 1.0 signifies that mu_f + mu_g = mu_fg = 1
mu = mu_cat + np.dot(C, cho_solve((factor, flag), fl_fg - 1.0))
if get_Sigma:
Sigma = A - np.dot(C, cho_solve((factor, flag), C.T))
return mu, Sigma
else:
return mu
def predict_f_g_sum(lwl_f, lwl_g, fl_fg, sigma_fg, lwl_f_predict, lwl_g_predict, mu_fg, amp_f, l_f, amp_g, l_g):
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
M = len(lwl_f_predict)
N = len(lwl_f)
V11_f = np.empty((M, M), dtype=np.float)
V11_g = np.empty((M, M), dtype=np.float)
matrix_functions.fill_V11_f(V11_f, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g_predict, amp_g, l_g)
V11 = V11_f + V11_g
V11[np.diag_indices_from(V11)] += 1e-8
V12_f = np.empty((M, N), dtype=np.float64)
V12_g = np.empty((M, N), dtype=np.float64)
matrix_functions.fill_V12_f(V12_f, lwl_f_predict, lwl_f, amp_f, l_f)
matrix_functions.fill_V12_f(V12_g, lwl_g_predict, lwl_g, amp_g, l_g)
V12 = V12_f + V12_g
V22_f = np.empty((N,N), dtype=np.float)
V22_g = np.empty((N,N), dtype=np.float)
# It's a square matrix, so we can just reuse fill_V11_f
matrix_functions.fill_V11_f(V22_f, lwl_f, amp_f, l_f)
matrix_functions.fill_V11_f(V22_g, lwl_g, amp_g, l_g)
V22 = V22_f + V22_g
V22[np.diag_indices_from(V22)] += sigma_fg**2
factor, flag = cho_factor(V22)
mu = mu_fg + np.dot(V12, cho_solve((factor, flag), (fl_fg - 1.0)))
Sigma = V11 - np.dot(V12, cho_solve((factor, flag), V12.T))
return mu, Sigma
def predict_f_g_h(lwl_f, lwl_g, lwl_h, fl_fgh, sigma_fgh, lwl_f_predict, lwl_g_predict, lwl_h_predict, mu_f, mu_g, mu_h, amp_f, l_f, amp_g, l_g, amp_h, l_h):
'''
Given that f + g + h is the flux that we're modeling, jointly predict the components.
'''
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
assert len(lwl_f) == len(lwl_h), "Input wavelengths must be the same length."
n_pix = len(lwl_f)
assert len(lwl_f_predict) == len(lwl_g_predict), "Prediction wavelengths must be the same length."
assert len(lwl_f_predict) == len(lwl_h_predict), "Prediction wavelengths must be the same length."
n_pix_predict = len(lwl_f_predict)
# Convert mu constants into vectors
mu_f = mu_f * np.ones(n_pix_predict)
mu_g = mu_g * np.ones(n_pix_predict)
mu_h = mu_h * np.ones(n_pix_predict)
# Cat these into a single vector
mu_cat = np.hstack((mu_f, mu_g, mu_h))
V11_f = np.empty((n_pix, n_pix), dtype=np.float)
V11_g = np.empty((n_pix, n_pix), dtype=np.float)
V11_h = np.empty((n_pix, n_pix), dtype=np.float)
matrix_functions.fill_V11_f(V11_f, lwl_f, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g, amp_g, l_g)
matrix_functions.fill_V11_f(V11_h, lwl_h, amp_h, l_h)
B = V11_f + V11_g + V11_h
B[np.diag_indices_from(B)] += sigma_fgh**2
factor, flag = cho_factor(B)
# Now create separate matrices for the prediction
V11_f_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
V11_g_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
V11_h_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
# Fill the prediction matrices
matrix_functions.fill_V11_f(V11_f_predict, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g_predict, lwl_g_predict, amp_g, l_g)
matrix_functions.fill_V11_f(V11_h_predict, lwl_h_predict, amp_h, l_h)
zeros = np.zeros((n_pix_predict, n_pix_predict))
A = np.vstack((np.hstack([V11_f_predict, zeros, zeros]), np.hstack([zeros, V11_g_predict, zeros]), np.hstack([zeros, zeros, V11_h_predict])))
V12_f = np.empty((n_pix_predict, n_pix), dtype=np.float)
V12_g = np.empty((n_pix_predict, n_pix), dtype=np.float)
V12_h = np.empty((n_pix_predict, n_pix), dtype=np.float)
matrix_functions.fill_V12_f(V12_f, lwl_f_predict, lwl_f, amp_f, l_f)
matrix_functions.fill_V12_f(V12_g, lwl_g_predict, lwl_g, amp_g, l_g)
matrix_functions.fill_V12_f(V12_h, lwl_h_predict, lwl_h, amp_h, l_h)
C = np.vstack((V12_f, V12_g, V12_h))
mu = mu_cat + np.dot(C, cho_solve((factor, flag), fl_fgh - 1.0))
Sigma = A - np.dot(C, cho_solve((factor, flag), C.T))
return mu, Sigma
def predict_f_g_h_sum(lwl_f, lwl_g, lwl_h, fl_fgh, sigma_fgh, lwl_f_predict, lwl_g_predict, lwl_h_predict, mu_fgh, amp_f, l_f, amp_g, l_g, amp_h, l_h):
'''
Given that f + g + h is the flux that we're modeling, predict the joint sum.
'''
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
M = len(lwl_f_predict)
N = len(lwl_f)
V11_f = np.empty((M, M), dtype=np.float)
V11_g = np.empty((M, M), dtype=np.float)
V11_h = np.empty((M, M), dtype=np.float)
matrix_functions.fill_V11_f(V11_f, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g_predict, amp_g, l_g)
matrix_functions.fill_V11_f(V11_h, lwl_h_predict, amp_h, l_h)
V11 = V11_f + V11_g + V11_h
# V11[np.diag_indices_from(V11)] += 1e-5 # small nugget term
V12_f = np.empty((M, N), dtype=np.float64)
V12_g = np.empty((M, N), dtype=np.float64)
V12_h = np.empty((M, N), dtype=np.float64)
matrix_functions.fill_V12_f(V12_f, lwl_f_predict, lwl_f, amp_f, l_f)
matrix_functions.fill_V12_f(V12_g, lwl_g_predict, lwl_g, amp_g, l_g)
matrix_functions.fill_V12_f(V12_h, lwl_h_predict, lwl_h, amp_h, l_h)
V12 = V12_f + V12_g + V12_h
V22_f = np.empty((N,N), dtype=np.float)
V22_g = np.empty((N,N), dtype=np.float)
V22_h = np.empty((N,N), dtype=np.float)
# It's a square matrix, so we can just reuse fil_V11_f
matrix_functions.fill_V11_f(V22_f, lwl_f, amp_f, l_f)
matrix_functions.fill_V11_f(V22_g, lwl_g, amp_g, l_g)
matrix_functions.fill_V11_f(V22_h, lwl_h, amp_h, l_h)
V22 = V22_f + V22_g + V22_h
V22[np.diag_indices_from(V22)] += sigma_fgh**2
factor, flag = cho_factor(V22)
mu = mu_fgh + np.dot(V12.T, cho_solve((factor, flag), (fl_fgh - mu_fgh)))
Sigma = V11 - np.dot(V12, cho_solve((factor, flag), V12.T))
return mu, Sigma
def lnlike_f(V11, wl_f, fl, sigma, amp_f, l_f, mu_GP=1.):
"""Calculate the log-likelihood for a single-lined spectrum.
This function takes a pre-allocated array and fills out the covariance matrices and evaluates the likelihood function for a single-lined spectrum, assuming a squared-exponential kernel (does not ``celerite``).
Args:
V11 (numpy 2D array): Description of arg1
wl_f (numpy 1D array): Description of arg2
fl (numpy 1D array): ae
amp_f (float) : amplitude of GP
l_f (float) : length scale of GP
mu_GP (float) : mean of GP
Returns:
float: The log-likelihood value
"""
if amp_f < 0.0 or l_f < 0.0:
return -np.inf
# Fill the matrix using fast cython routine.
matrix_functions.fill_V11_f(V11, wl_f, amp_f, l_f)
V11[np.diag_indices_from(V11)] += sigma**2
try:
factor, flag = cho_factor(V11)
except np.linalg.linalg.LinAlgError:
return -np.inf
logdet = np.sum(2 * np.log((np.diag(factor))))
return -0.5 * (np.dot((fl - mu_GP).T, cho_solve((factor, flag), (fl - mu_GP))) + logdet)
def lnlike_f_g(V11, wl_f, wl_g, fl, sigma, amp_f, l_f, amp_g, l_g, mu_GP=1.):
'''
V11 is a matrix to be allocated.
wl_known, fl_known, and sigma_known are flattened 1D arrays.
'''
if amp_f < 0.0 or l_f < 0.0 or amp_g < 0.0 or l_g < 0.0:
return -np.inf
# Fill the matrix using fast cython routine.
matrix_functions.fill_V11_f_g(V11, wl_f, wl_g, amp_f, l_f, amp_g, l_g)
V11[np.diag_indices_from(V11)] += sigma**2
try:
# factor, flag = cho_factor(V11)
factor, flag = cho_factor(V11, overwrite_a=True, lower=False, check_finite=False)
except np.linalg.linalg.LinAlgError:
return -np.inf
logdet = np.sum(2 * np.log((np.diag(factor))))
return -0.5 * (np.dot((fl - mu_GP).T, cho_solve((factor, flag), (fl - mu_GP))) + logdet)
def lnlike_f_g_h(V11, wl_f, wl_g, wl_h, fl, sigma, amp_f, l_f, amp_g, l_g, amp_h, l_h, mu_GP=1.):
'''
V11 is a matrix to be allocated.
wl_known, fl_known, and sigma_known are flattened 1D arrays.
'''
if amp_f < 0.0 or l_f < 0.0 or amp_g < 0.0 or l_g < 0.0 or amp_h < 0.0 or l_h < 0.0:
return -np.inf
# Fill the matrix using fast cython routine.
matrix_functions.fill_V11_f_g_h(V11, wl_f, wl_g, wl_h, amp_f, l_f, amp_g, l_g, amp_h, l_h)
V11[np.diag_indices_from(V11)] += sigma**2
try:
factor, flag = cho_factor(V11)
except np.linalg.linalg.LinAlgError:
return -np.inf
logdet = np.sum(2 * np.log((np.diag(factor))))
return -0.5 * (np.dot((fl - mu_GP).T, cho_solve((factor, flag), (fl - mu_GP))) + logdet)
# Assemble lnlikelihood functions for the different models
lnlike = {"SB1": lnlike_f, "SB2": lnlike_f_g, "ST1": lnlike_f, "ST2": lnlike_f_g, "ST3": lnlike_f_g_h}
# Alternatively, use george to do the likelihood calculations
def lnlike_f_g_george(lwl_f, lwl_g, fl, sigma, amp_f, l_f, amp_g, l_g, mu_GP=1.):
'''
Evaluate the joint likelihood for *f* and *g* using George.
'''
# assumes that the log wavelengths, fluxes, and errors are already flattened
# lwl_f = chunk.lwl.flatten()
# lwl_g = chunk.lwl.flatten()
# does it help to sort?
# ind = np.argsort(lwl_f)
x = np.vstack((lwl_f, lwl_g)).T
# might also want to "block" the kernel to limit it to some velocity range
kernel = amp_f * kernels.ExpSquaredKernel(l_f, ndim=2, axes=0) # primary
kernel += amp_g * kernels.ExpSquaredKernel(l_g, ndim=2, axes=1) # secondary
# instantiate the GP and evaluate the kernel for the prior
gp = george.GP(kernel)
gp.compute(x, sigma)
# evaluate the likelihood for the data
return gp.log_likelihood(fl)
def optimize_GP_f(wl_known, fl_known, sigma_known, amp_f, l_f, mu_GP=1.0):
'''
Optimize the GP hyperparameters for the given slice of data. Amp and lv are starting guesses.
'''
N = len(wl_known)
V11 = np.empty((N,N), dtype=np.float64)
def func(x):
try:
a, l = x
return -lnlike_f(V11, wl_known, fl_known, sigma_known, a, l, mu_GP)
except np.linalg.linalg.LinAlgError:
return np.inf
ans = minimize(func, np.array([amp_f, l_f]), method="Nelder-Mead")
return ans["x"]
def optimize_epoch_velocity_f(lwl_epoch, fl_epoch, sigma_epoch, lwl_fixed, fl_fixed, sigma_fixed, gp):
'''
Optimize the wavelengths of the chosen epoch relative to the fixed wavelengths. Identify the velocity required to redshift the chosen epoch.
'''
fl = np.concatenate((fl_epoch, fl_fixed)).flatten()
sigma = np.concatenate((sigma_epoch, sigma_fixed)).flatten()
def func(p):
try:
v, log_sigma, log_rho = p
if v < -200 or v > 200 or log_sigma < -3 or log_sigma > -2 or log_rho < -9 or log_rho > -8:
return -np.inf
# Doppler shift the input wl_epoch
lwl_shift = lredshift(lwl_epoch, v)
# Reconcatenate spectra into 1D array and sort
lwl = np.concatenate((lwl_shift, lwl_fixed)).flatten()
indsort = np.argsort(lwl)
# Set the par vectors
gp.set_parameter_vector(p[1:])
# compute GP on new wl grid
gp.compute(lwl[indsort], yerr=sigma[indsort])
return -gp.log_likelihood(fl[indsort])
except np.linalg.linalg.LinAlgError:
return np.inf
# bound as -200 to 200 km/s
p0 = np.concatenate((np.array([0.0]), gp.get_parameter_vector()))
# bounds = [(-200, 200.)] + gp.get_parameter_bounds()
# print(bounds)
# ans = minimize(func, p0, method="L-BFGS-B", bounds=bounds)
ans = minimize(func, p0, method="Nelder-Mead")
# The velocity returned is the amount that was required to redshift wl_epoch to line up with wl_fixed.
if ans["success"]:
print("Success found with", ans["x"])
return ans["x"][0]
else:
print(ans)
raise C.ChunkError("Unable to optimize velocity for epoch.")
def determine_all_velocities(chunk, log_sigma, log_rho, mu_GP=1.0):
kernel = terms.Matern32Term(log_sigma=log_sigma, log_rho=log_rho)
gp = celerite.GP(kernel, mean=1.0, fit_mean=False)
lwl_fixed = chunk.lwl[0]
fl_fixed = chunk.fl[0]
sigma_fixed = chunk.sigma[0]
velocities = np.empty(chunk.n_epochs, dtype=np.float64)
velocities[0] = 0.0
for i in range(1, chunk.n_epochs):
try:
velocities[i] = optimize_epoch_velocity_f(chunk.lwl[i], chunk.fl[i], chunk.sigma[i], lwl_fixed, fl_fixed, sigma_fixed, gp)
except C.ChunkError as e:
print("Unable to optimize velocity for epoch {:}".format(chunk.date1D[i]))
velocities[i] = 0.0
return velocities
# uses smart inverse from Celerite
def optimize_calibration_ST1(lwl0, lwl1, lwl_cal, fl_cal, fl_fixed, gp, A, C, mu_GP=1.0, order=1):
'''
Determine the calibration parameters for this epoch of observations.
lwl0, lwl1: set the points for the Chebyshev.
This is a more general method than optimize_calibration_static, since it allows arbitrary covariance matrices, which should be used when there is orbital motion.
lwl_cal: the wavelengths corresponding to the epoch we want to calibrate
fl_cal: the fluxes corresponding to the epoch we want to calibrate
fl_fixed: the remaining epochs of data to calibrate in reference to.
gp: the celerite GP
order: the degree polynomial to use. order = 1 is a line, order = 2 is a line + parabola
Assumes that covariance matrices are appropriately filled out.
'''
# Get a clean set of the Chebyshev polynomials evaluated on the input wavelengths
T = []
for i in range(0, order + 1):
coeff = [0 for j in range(i)] + [1]
Chtemp = Ch(coeff, domain=[lwl0, lwl1])
Ttemp = Chtemp(lwl_cal)
T += [Ttemp]
T = np.array(T)
D = fl_cal[:,np.newaxis] * T.T
# Solve for the calibration coefficients c0, c1, ...
# Find B^{-1}, fl_prime, and C_prime
# B^{-1} corresponds to the gp.apply_inverse
fl_prime = mu_GP + np.dot(C, gp.apply_inverse(fl_fixed.flatten() - mu_GP))
C_prime = A - np.dot(C, gp.apply_inverse(C.T))
# Find {C^\prime}^{-1}
CP_cho = cho_factor(C_prime)
# Invert the least squares problem
left = np.dot(D.T, cho_solve(CP_cho, D))
right = np.dot(D.T, cho_solve(CP_cho, fl_prime))
left_cho = cho_factor(left)
# the coefficents, X = [c0, c1]
X = cho_solve(left_cho, right)
# Apply the correction
fl_cor = np.dot(D, X)
# Return both the corrected flux and the coefficients, in case we want to log them,
# or apply the correction later.
return fl_cor, X
def optimize_calibration(lwl0, lwl1, lwl_cal, fl_cal, fl_fixed, A, B, C, order=1, mu_GP=1.0):
'''
Determine the calibration parameters for this epoch of observations. This is a more general method than :py:meth:`psoap.covariance.optimize_calibration_static`, since it allows arbitrary covariance matrices, which should be used when there is orbital motion. Assumes that covariance matrices are appropriately filled out.
Args:
lwl0 (float) : left side evaluation point for Chebyshev
lwl1 (float) : right side evaluation point for Chebyshev
lwl_cal (np.array): the wavelengths corresponding to the epoch we want to calibrate
fl_cal (np.array): the fluxes corresponding to the epoch we want to calibrate
fl_fixed (np.array): the remaining epochs of data to calibrate in reference to.
A (2D np.array) : matrix_functions.fill_V11_f(A, lwl_cal, amp, l_f) with sigma_cal already added to the diagonal
B (2D np.array) : matrix_functions.fill_V11_f(B, lwl_fixed, amp, l_f) with sigma_fixed already added to the diagonal
C (2D np.array): matrix_functions.fill_V12_f(C, lwl_cal, lwl_fixed, amp, l_f) cross matrix (with no sigma added, since these are independent measurements).
order (int): the degree polynomial to use. order = 1 is a line, order = 2 is a line + parabola
Returns:
(np.array, np.array): a tuple of two data products. The first is the ``fl_cal`` vector, now calibrated. The second is the array of the Chebyshev coefficients, in case one wants to re-evaluate the calibration polynomials.
'''
# basically, assume that A, B, and C are already filled out.
# the only thing this routine needs to do is fill out the Q matrix
# Get a clean set of the Chebyshev polynomials evaluated on the input wavelengths
T = []
for i in range(0, order + 1):
coeff = [0 for j in range(i)] + [1]
Chtemp = Ch(coeff, domain=[lwl0, lwl1])
Ttemp = Chtemp(lwl_cal)
T += [Ttemp]
T = np.array(T)
D = fl_cal[:,np.newaxis] * T.T
# Solve for the calibration coefficients c0, c1, ...
# Find B^{-1}, fl_prime, and C_prime
try:
B_cho = cho_factor(B)
except np.linalg.linalg.LinAlgError:
print("Failed to solve matrix inverse. Calibration not valid.")
raise
fl_prime = mu_GP + np.dot(C, cho_solve(B_cho, (fl_fixed.flatten() - mu_GP)))
C_prime = A - np.dot(C, cho_solve(B_cho, C.T))
# Find {C^\prime}^{-1}
CP_cho = cho_factor(C_prime)
# Invert the least squares problem
left = np.dot(D.T, cho_solve(CP_cho, D))
right = np.dot(D.T, cho_solve(CP_cho, fl_prime))
left_cho = cho_factor(left)
# the coefficents, X = [c0, c1]
X = cho_solve(left_cho, right)
# Apply the correction
fl_cor = np.dot(D, X)
# Return both the corrected flux and the coefficients, in case we want to log them,
# or apply the correction later.
return fl_cor, X
def optimize_calibration_static(wl0, wl1, wl_cal, fl_cal, sigma_cal, wl_fixed, fl_fixed, sigma_fixed, amp, l_f, order=1, mu_GP=1.0):
'''
Determine the calibration parameters for this epoch of observations. Assumes all wl, fl arrays are 1D, and that the relative velocities between all epochs are zero.
Args:
wl0 (float) : left wl point to evaluate the Chebyshev
wl1 (float) : right wl point to evaluate the Chebyshev
wl_cal (np.array) : the wavelengths of the epoch to calibrate
fl_cal (np.array) : the fluxes of the epoch to calibrate
sigma_cal (np.array): the sigmas of the epoch to calibrate
wl_fixed (np.array) : the 1D (flattened) array of the reference wavelengths
fl_fixed (np.array) : the 1D (flattened) array of the reference fluxes
sigma_fixed (np.array) : the 1D (flattened) array of the reference sigmas
amp (float): the GP amplitude
l_f (float): the GP length
order (int): the Chebyshev order to use
mu_GP (optional): the mean of the GP to assume.
Returns:
(np.array, np.array): a tuple of two data products. The first is the ``fl_cal`` vector, now calibrated. The second is the array of the Chebyshev coefficients, in case one wants to re-evaluate the calibration polynomials.
'''
N_A = len(wl_cal)
A = np.empty((N_A, N_A), dtype=np.float64)
N_B = len(wl_fixed)
B = np.empty((N_B, N_B), dtype=np.float64)
C = np.empty((N_A, N_B), dtype=np.float64)
matrix_functions.fill_V11_f(A, wl_cal, amp, l_f)
matrix_functions.fill_V11_f(B, wl_fixed, amp, l_f)
matrix_functions.fill_V12_f(C, wl_cal, wl_fixed, amp, l_f)
# Add in sigmas
A[np.diag_indices_from(A)] += sigma_cal**2
B[np.diag_indices_from(B)] += sigma_fixed**2
# Get a clean set of the Chebyshev polynomials evaluated on the input wavelengths
T = []
for i in range(0, order + 1):
coeff = [0 for j in range(i)] + [1]
Chtemp = Ch(coeff, domain=[wl0, wl1])
Ttemp = Chtemp(wl_cal)
T += [Ttemp]
T = np.array(T)
D = fl_cal[:,np.newaxis] * T.T
# Solve for the calibration coefficients c0, c1
# Find B^{-1}, fl_prime, and C_prime
try:
B_cho = cho_factor(B)
except np.linalg.linalg.LinAlgError:
print("Failed to solve matrix inverse. Calibration not valid.")
raise
fl_prime = mu_GP + np.dot(C, cho_solve(B_cho, (fl_fixed.flatten() - mu_GP)))
C_prime = A - np.dot(C, cho_solve(B_cho, C.T))
# Find {C^\prime}^{-1}
CP_cho = cho_factor(C_prime)
# Invert the least squares problem
left = np.dot(D.T, cho_solve(CP_cho, D))
right = np.dot(D.T, cho_solve(CP_cho, fl_prime))
left_cho = cho_factor(left)
# the coefficents, X = [c0, c1]
X = cho_solve(left_cho, right)
# Apply the correction
fl_cor = np.dot(D, X)
return fl_cor, X
def cycle_calibration(wl, fl, sigma, amp_f, l_f, ncycles, order=1, limit_array=3, mu_GP=1.0, soften=1.0):
'''
Given a chunk of spectra, cycle n_cycles amongst all spectra and return the spectra with inferred calibration adjustments.
order : what order of Chebyshev polynomials to use. 1st order = line.
Only use `limit_array` number of spectra to save memory.
'''
wl0 = np.min(wl)
wl1 = np.max(wl)
fl_out = np.copy(fl)
# Soften the sigmas a little bit
sigma = soften * sigma
n_epochs = len(wl)
for cycle in range(ncycles):
for i in range(n_epochs):
wl_tweak = wl[i]
fl_tweak = fl_out[i]
sigma_tweak = sigma[i]
# Temporary arrays without the epoch we just chose
wl_remain = np.delete(wl, i, axis=0)[0:limit_array]
fl_remain = np.delete(fl_out, i, axis=0)[0:limit_array]
sigma_remain = | np.delete(sigma, i, axis=0) | numpy.delete |
# -*- coding: utf-8 -*-
import numpy as np
class BaseValidationCase:
def __init__(self, times_per_hour=60, total_days=60):
self.times_per_hour = times_per_hour
hours_per_day = 24
self.total_days = total_days
self.total_timesteps = hours_per_day * times_per_hour * total_days
self.total_hours = total_days * hours_per_day # used for result evaluation
self.timesteps_day = int(hours_per_day * times_per_hour)
def get_building_parameters(self):
return {}
def get_ventilation_rate(self):
return | np.zeros(self.total_timesteps) | numpy.zeros |
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import ctypes
import numpy as np
from onnx.backend.test.case.node import _extract_value_info
import onnx
from onnx import TensorProto, helper, mapping, numpy_helper
import pycuda.driver as cuda
import tensorrt as trt
import tensorflow as tf
sys.path.append("..")
from python import *
os.chdir("../python/")
I_GPU = 0
os.environ["CUDA_VISIBLE_DEVICES"] = str(I_GPU)
tf.set_random_seed(1234)
np.random.seed(0)
ITERATIONS = 10
CONFIG = tf.ConfigProto()
CONFIG.gpu_options.allow_growth = True
INPUT_MODEL_FILE = "model/test_op_plugin.onnx"
OUTPUT_MODEL_FILE = "model/test_op_trt.onnx"
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
# TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
BATCH_SIZE = 1
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
# size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
size = trt.volume(engine.get_binding_shape(binding))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
def do_inference(context, bindings, inputs, outputs, stream, batch_size=1):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(
batch_size=batch_size, bindings=bindings, stream_handle=stream.handle
)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
def run_tf_graph(sess, input_data, input_node, output_node):
"""Generic function to execute tensorflow"""
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
output_node = convert_to_list(output_node)
tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node]
input_dict = {e: input_data[i] for i, e in enumerate(input_node)}
# if len(input_node) == 1 and input_node[0] == "":
# output_data = sess.run(tensor)
# else:
output_data = sess.run(tensor, input_dict)
return output_data
def verify_tf_with_trt_result(in_data, in_name, out_name, op_name):
def name_without_num(name):
return name.split(":")[0] if ":" in name else name
out_name = convert_to_list(out_name)
out_node = [name_without_num(name) for name in out_name]
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
tf_result = run_tf_graph(sess, in_data, in_name, out_name)
frozen_graph = tf.graph_util.convert_variables_to_constants(
sess, sess.graph_def, out_node
)
with open("model/test_op_{}.pb".format(op_name), "wb") as ofile:
ofile.write(frozen_graph.SerializeToString())
os.system(
"python3 -m tf2onnx.convert --input model/test_op_{}.pb --inputs {} --outputs {} --output {} --opset 11".format(
op_name, str(",").join(in_name), str(",").join(out_name), INPUT_MODEL_FILE
)
)
ops_name = [op_name]
trt_plugin_name = onnx2plugin(
INPUT_MODEL_FILE, OUTPUT_MODEL_FILE, node_names=ops_name
)
for plugin_name in trt_plugin_name:
ctypes.cdll.LoadLibrary("./trt_plugin/lib/{}.so".format(plugin_name))
cuda.Device(0).make_context()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_batch_size = batch_size
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 30
with open(OUTPUT_MODEL_FILE, "rb") as model:
# parse onnx model
parser.parse(model.read())
for i in range(parser.num_errors):
print(parser.get_error(i))
engine = builder.build_engine(network, builder_config)
if engine is None:
print("[ERROR] engine is None")
exit(-1)
inputs, outputs, bindings, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
for i in range(len(inputs)):
input_data = in_data[i].ravel()
np.copyto(inputs[i].host, input_data)
trt_result = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream,
batch_size=batch_size,
)
cuda.Context.pop()
ret = True
if len(trt_result) == 1:
ret = compare_tf_trt_result(tf_result, trt_result)
else:
for i in range(len(trt_result)):
ret &= compare_tf_trt_result(tf_result[i], trt_result[i])
assert ret, "result check False"
return ret
def compare_tf_trt_result(tf_result, trt_result):
print(tf_result)
print("================")
print(trt_result)
tf_reshape = np.array(tf_result).reshape(-1)
trt_reshape = np.array(trt_result).reshape(-1)
if (
isinstance(tf_result, list)
and isinstance(trt_result, list)
and len(tf_result) > 0
and len(trt_result) > 0
and np.isnan(tf_result[0]).any()
and np.isnan(trt_result[0]).any()
):
return True
elif (
isinstance(tf_result, list)
and isinstance(trt_result, list)
and len(tf_result) > 0
and len(trt_result) > 0
and np.isinf(tf_result[0]).any()
and np.isinf(trt_result[0]).any()
):
return True
print(
"trt cross_check output ",
str(np.allclose(tf_reshape.flatten(), trt_reshape.flatten(), atol=1e-5)),
flush=True,
)
return bool(np.allclose(tf_reshape.flatten(), trt_reshape.flatten(), atol=1e-5))
def get_onnxruntime_output(model, inputs):
import onnxruntime.backend
rep = onnxruntime.backend.prepare(model, "CPU")
if isinstance(inputs, list) and len(inputs) == 1:
inp = inputs[0]
else:
inp = inputs
output = rep.run(inp)
# Unpack output if there's only a single value.
if len(output) == 1:
output = output[0]
return output
def verify_with_ort_with_trt(
model,
inputs,
op_name,
opset=None,
dtype="float32",
opt_level=1,
np_result=None,
use_vm=False,
):
if opset is not None:
model.opset_import[0].version = opset
onnx.save(model, INPUT_MODEL_FILE)
if np_result is None:
ort_result = get_onnxruntime_output(model, inputs)
else:
ort_result = np_result
in_data = convert_to_list(inputs)
ops_name = [op_name]
trt_plugin_name = onnx2plugin(
INPUT_MODEL_FILE, OUTPUT_MODEL_FILE, node_names=ops_name
)
for plugin_name in trt_plugin_name:
ctypes.cdll.LoadLibrary("./trt_plugin/lib/{}.so".format(plugin_name))
cuda.Device(0).make_context()
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(
1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_batch_size = BATCH_SIZE
builder_config = builder.create_builder_config()
builder_config.max_workspace_size = 1 << 30
with open(OUTPUT_MODEL_FILE, "rb") as model:
# parse onnx model
parser.parse(model.read())
for i in range(parser.num_errors):
print(parser.get_error(i))
engine = builder.build_engine(network, builder_config)
if engine is None:
print("[ERROR] engine is None")
exit(-1)
inputs, outputs, bindings, stream = allocate_buffers(engine)
with engine.create_execution_context() as context:
for i in range(len(inputs)):
input_data = in_data[i].ravel()
np.copyto(inputs[i].host, input_data)
trt_result = do_inference(
context,
bindings=bindings,
inputs=inputs,
outputs=outputs,
stream=stream,
batch_size=BATCH_SIZE,
)
cuda.Context.pop()
ret = True
if len(trt_result) == 1:
ret = compare_tf_trt_result(ort_result, trt_result)
else:
for i in range(len(trt_result)):
ret &= compare_tf_trt_result(ort_result[i], trt_result[i])
assert ret, "result check False"
return ret
def make_constant_node(name, data_type, dims, vals):
return helper.make_node(
"Constant",
inputs=[],
outputs=[name],
value=helper.make_tensor(name=name, data_type=data_type, dims=dims, vals=vals),
)
def make_onnx_model(node, inputs, outputs, name, **kwargs):
present_inputs = [x for x in node.input if (x != "")]
present_outputs = [x for x in node.output if (x != "")]
input_type_protos = [None] * len(inputs)
if "input_type_protos" in kwargs:
input_type_protos = kwargs[str("input_type_protos")]
del kwargs[str("input_type_protos")]
output_type_protos = [None] * len(outputs)
if "output_type_protos" in kwargs:
output_type_protos = kwargs[str("output_type_protos")]
del kwargs[str("output_type_protos")]
inputs_vi = [
_extract_value_info(arr, arr_name, input_type)
for arr, arr_name, input_type in zip(inputs, present_inputs, input_type_protos)
]
outputs_vi = [
_extract_value_info(arr, arr_name, output_type)
for arr, arr_name, output_type in zip(
outputs, present_outputs, output_type_protos
)
]
graph = helper.make_graph(
nodes=[node], name=name, inputs=inputs_vi, outputs=outputs_vi
)
kwargs[str("producer_name")] = "TRTPluginAutoGen-test"
model = onnx.helper.make_model(graph, **kwargs)
return model
def op_expect(node, inputs, outputs, op_type, op_name, np_result=None):
model = make_onnx_model(
node, inputs=inputs, outputs=outputs, name="test_{}".format(op_type)
)
verify_with_ort_with_trt(model, inputs, op_name, np_result=np_result)
# ====================================================================================
# ---UnitTest
# ====================================================================================
def test_abs():
op_name = "abs_0"
op_type = "Abs"
x = np.random.randn(3, 4, 5).astype(np.float32)
y = abs(x)
node = helper.make_node(op_type, inputs=["x"], outputs=["y"], name=op_name)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_acos():
op_name = "acos_0"
op_type = "Acos"
node = onnx.helper.make_node("Acos", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arccos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "acos_1"
op_type = "Acos"
node = onnx.helper.make_node("Acos", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.rand(3, 4, 5).astype(np.float32)
y = np.arccos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_and():
op_name = "and_0"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
# 2d
x = (np.random.randn(3, 4) > 0).astype(bool)
y = (np.random.randn(3, 4) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "and_1"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
x = (np.random.randn(3, 4, 5) > 0).astype(bool)
y = (np.random.randn(3, 4, 5) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "and_2"
op_type = "And"
node = onnx.helper.make_node(
"And", inputs=["x", "y"], outputs=["and"], name=op_name
)
x = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
y = (np.random.randn(3, 4, 5, 6) > 0).astype(bool)
z = np.logical_and(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_add():
op_name = "add_0"
op_type = "Add"
node = onnx.helper.make_node(
"Add", inputs=["x", "y"], outputs=["sum"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
op_expect(node, inputs=[x, y], outputs=[x + y], op_type=op_type, op_name=op_name)
op_name = "add_1"
op_type = "Add"
node = onnx.helper.make_node(
"Add", inputs=["x", "y"], outputs=["sum"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(5).astype(np.float32)
op_expect(node, inputs=[x, y], outputs=[x + y], op_type=op_type, op_name=op_name)
def test_argmax():
op_type = "ArgMax"
op_name = "argmax_0"
data = np.array([[2, 1, 3, 10], [3, 4, 5, 6]], dtype=np.float32)
keepdims = 1
axis = -1
node = onnx.helper.make_node(
"ArgMax",
inputs=["data"],
outputs=["result"],
keepdims=keepdims,
axis=axis,
name=op_name,
)
# result: [[1], [1]]
from onnx.backend.test.case.node.argmax import argmax_use_numpy
result = argmax_use_numpy(data, keepdims=keepdims, axis=axis)
op_expect(node, inputs=[data], outputs=[result], op_type=op_type, op_name=op_name)
op_name = "argmax_1"
node = onnx.helper.make_node(
"ArgMax",
inputs=["data"],
outputs=["result"],
keepdims=keepdims,
axis=axis,
name=op_name,
)
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [1, 3, 4]
result = argmax_use_numpy(data, keepdims=keepdims, axis=axis)
op_expect(node, inputs=[data], outputs=[result], op_type=op_type, op_name=op_name)
def test_argmin():
op_type = "ArgMin"
op_name = "argmin_0"
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
keepdims = 1
axis = 1
node = onnx.helper.make_node(
"ArgMin",
inputs=["data"],
outputs=["result"],
keepdims=keepdims,
axis=axis,
name=op_name,
)
# result: [[1], [1]]
from onnx.backend.test.case.node.argmin import argmin_use_numpy
result = argmin_use_numpy(data, keepdims=keepdims, axis=axis)
op_expect(node, inputs=[data], outputs=[result], op_type=op_type, op_name=op_name)
def test_asin():
op_name = "asin_0"
op_type = "Asin"
node = onnx.helper.make_node("Asin", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arcsin(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "asin_1"
op_type = "Asin"
node = onnx.helper.make_node("Asin", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.rand(3, 4, 5).astype(np.float32)
y = np.arcsin(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_asinh():
op_name = "asinh_0"
op_type = "Asinh"
node = onnx.helper.make_node("Asinh", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arcsinh(x) # expected output [-0.88137358, 0., 0.88137358]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "asinh_1"
op_type = "Asinh"
node = onnx.helper.make_node("Asinh", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arcsinh(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_atan():
op_type = "Atan"
op_name = "atan_0"
node = onnx.helper.make_node("Atan", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arctan(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_type = "Atan"
op_name = "atan_1"
node = onnx.helper.make_node("Atan", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arctan(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_atanh():
op_name = "atanh_0"
op_type = "Atanh"
node = onnx.helper.make_node("Atanh", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arctanh(x) # expected output [-0.54930615, 0., 0.54930615]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "atanh_1"
op_type = "Atanh"
node = onnx.helper.make_node("Atanh", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32)
y = np.arctanh(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_averagepool():
op_name = "averagepool_1d_default"
op_type = "AveragePool"
"""
input_shape: [1, 3, 32]
output_shape: [1, 3, 31]
"""
node = onnx.helper.make_node(
"AveragePool", inputs=["x"], outputs=["y"], kernel_shape=[2], name=op_name
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.shape(x)
kernel_shape = [2]
strides = [1]
from onnx.backend.test.case.node.pool_op_common import get_output_shape, pool
out_shape = get_output_shape("VALID", x_shape[2:], kernel_shape, strides)
padded = x
y = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], "AVG")
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "averagepool_2d_ceil"
op_type = "AveragePool"
node = onnx.helper.make_node(
"AveragePool",
inputs=["x"],
outputs=["y"],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
name=op_name,
)
x = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
]
).astype(np.float32)
y = np.array([[[[6, 7.5], [12, 13.5]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_batchnormalization():
op_name = "batchnormalization_0"
op_type = "BatchNormalization"
# input size: (2, 3, 4, 5)
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
mean = np.random.randn(3).astype(np.float32)
var = np.random.rand(3).astype(np.float32)
from onnx.backend.test.case.node.batchnorm import _batchnorm_test_mode
y = _batchnorm_test_mode(x, s, bias, mean, var).astype(np.float32)
node = onnx.helper.make_node(
"BatchNormalization",
inputs=["x", "s", "bias", "mean", "var"],
outputs=["y"],
name=op_name,
)
# output size: (2, 3, 4, 5)
op_expect(
node,
inputs=[x, s, bias, mean, var],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_ceil():
op_name = "ceil_0"
op_type = "Ceil"
node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1.5, 1.2]).astype(np.float32)
y = np.ceil(x) # expected output [-1., 2.]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "ceil_1"
op_type = "Ceil"
node = onnx.helper.make_node("Ceil", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.ceil(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_celu():
op_name = "celu_0"
op_type = "Celu"
alpha = 2.0
node = onnx.helper.make_node(
"Celu", inputs=["X"], outputs=["Y"], alpha=alpha, name=op_name
)
input_data = np.array(
[
[
[[0.8439683], [0.5665144], [0.05836735]],
[[0.02916367], [0.12964272], [0.5060197]],
[[0.79538304], [0.9411346], [0.9546573]],
],
[
[[0.17730942], [0.46192095], [0.26480448]],
[[0.6746842], [0.01665257], [0.62473077]],
[[0.9240844], [0.9722341], [0.11965699]],
],
[
[[0.41356155], [0.9129373], [0.59330076]],
[[0.81929934], [0.7862604], [0.11799799]],
[[0.69248444], [0.54119414], [0.07513223]],
],
],
dtype=np.float32,
)
# Calculate expected output data
positive_input = np.maximum(0, input_data)
negative_input = np.minimum(0, alpha * (np.exp(input_data / alpha) - 1))
expected_output = positive_input + negative_input
op_expect(
node,
inputs=[input_data],
outputs=[expected_output],
op_type=op_type,
op_name=op_name,
)
def test_clip():
op_name = "Clip_0"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-2, 0, 2]).astype(np.float32)
min_val = np.array([-1.0]).astype(np.float32) # .float32(-1.0)
max_val = np.array([1.0]).astype(np.float32) # .float32(1.0)
y = np.clip(x, min_val, max_val) # expected output [-1., 0., 1.]
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_1"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, min_val, max_val)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_2"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
min_val = np.array([-5.0]).astype(np.float32) # .float32(-1.0)
max_val = np.array([5.0]).astype(np.float32) # .float32(1.0)
op_name = "Clip_3"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.array([-1, 0, 1]).astype(np.float32)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_4"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-6, 0, 6]).astype(np.float32)
y = np.array([-5, 0, 5]).astype(np.float32)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "Clip_5"
op_type = "Clip"
node = onnx.helper.make_node(
"Clip", inputs=["x", "min", "max"], outputs=["y"], name=op_name
)
x = np.array([-1, 0, 6]).astype(np.float32)
y = np.array([-1, 0, 5]).astype(np.float32)
op_expect(
node,
inputs=[x, min_val, max_val],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_concat():
test_cases = {
"1d": ([1, 2], [3, 4]),
"2d": ([[1, 2], [3, 4]], [[5, 6], [7, 8]]),
"3d": (
[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[9, 10], [11, 12]], [[13, 14], [15, 16]]],
),
} # type: Dict[Text, Sequence[Any]]
for test_case, values_ in test_cases.items():
values = [np.asarray(v, dtype=np.float32) for v in values_]
for i in range(len(values[0].shape)):
op_name = "concat_{}_{}".format(test_case, i)
op_type = "Concat"
in_args = ["value" + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
"Concat",
inputs=[s for s in in_args],
outputs=["output"],
axis=i,
name=op_name,
)
output = np.concatenate(values, i)
op_expect(
node,
inputs=[v for v in values],
outputs=[output],
op_type=op_type,
op_name=op_name,
)
for i in range(-len(values[0].shape), 0):
op_name = "concat_{}_1_{}".format(test_case, abs(i))
op_type = "Concat"
in_args = ["value" + str(k) for k in range(len(values))]
node = onnx.helper.make_node(
"Concat",
inputs=[s for s in in_args],
outputs=["output"],
axis=i,
name=op_name,
)
output = np.concatenate(values, i)
op_expect(
node,
inputs=[v for v in values],
outputs=[output],
op_type=op_type,
op_name=op_name,
)
def test_conv():
# ------Conv
op_name, op_type = "test_basic_conv_with_padding", "Conv"
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
# Convolution with padding
node_with_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[1, 1, 1, 1],
name=op_name,
)
y_with_padding = np.array(
[
[
[
[12.0, 21.0, 27.0, 33.0, 24.0], # (1, 1, 5, 5) output tensor
[33.0, 54.0, 63.0, 72.0, 51.0],
[63.0, 99.0, 108.0, 117.0, 81.0],
[93.0, 144.0, 153.0, 162.0, 111.0],
[72.0, 111.0, 117.0, 123.0, 84.0],
]
]
]
).astype(np.float32)
op_expect(
node_with_padding,
inputs=[x, W],
outputs=[y_with_padding],
op_type=op_type,
op_name=op_name,
)
op_name, op_type = "test_basic_conv_without_padding", "Conv"
# Convolution without padding
node_without_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=[0, 0, 0, 0],
name=op_name,
)
y_without_padding = np.array(
[
[
[
[54.0, 63.0, 72.0], # (1, 1, 3, 3) output tensor
[99.0, 108.0, 117.0],
[144.0, 153.0, 162.0],
]
]
]
).astype(np.float32)
op_expect(
node_without_padding,
inputs=[x, W],
outputs=[y_without_padding],
op_type=op_type,
op_name=op_name,
)
# conv_with_autopad_same
op_name, op_type = "test_conv_with_autopad_same", "Conv"
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 5, 5) input tensor
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
# Convolution with auto_pad='SAME_LOWER' and strides=2
node = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
auto_pad="SAME_LOWER",
kernel_shape=[3, 3],
strides=[2, 2],
name=op_name,
)
y = np.array(
[[[[12.0, 27.0, 24.0], [63.0, 108.0, 81.0], [72.0, 117.0, 84.0]]]]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
# conv_with_strides
op_name, op_type = "test_conv_with_strides_padding", "Conv"
x = np.array(
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 7, 5) input tensor
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
[20.0, 21.0, 22.0, 23.0, 24.0],
[25.0, 26.0, 27.0, 28.0, 29.0],
[30.0, 31.0, 32.0, 33.0, 34.0],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[1.0, 1.0, 1.0], # (1, 1, 3, 3) tensor for convolution weights
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
]
]
]
).astype(np.float32)
# Convolution with strides=2 and padding
node_with_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
pads=[1, 1, 1, 1],
strides=[
2,
2,
], # Default values for other attributes: dilations=[1, 1], groups=1
name=op_name,
)
y_with_padding = np.array(
[
[
[
[12.0, 27.0, 24.0], # (1, 1, 4, 3) output tensor
[63.0, 108.0, 81.0],
[123.0, 198.0, 141.0],
[112.0, 177.0, 124.0],
]
]
]
).astype(np.float32)
op_expect(
node_with_padding,
inputs=[x, W],
outputs=[y_with_padding],
op_type=op_type,
op_name=op_name,
)
op_name = "test_conv_with_strides_no_padding"
# Convolution with strides=2 and no padding
node_without_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
pads=[0, 0, 0, 0],
strides=[
2,
2,
], # Default values for other attributes: dilations=[1, 1], groups=1
name=op_name,
)
y_without_padding = np.array(
[[[[54.0, 72.0], [144.0, 162.0], [234.0, 252.0]]]] # (1, 1, 3, 2) output tensor
).astype(np.float32)
op_expect(
node_without_padding,
inputs=[x, W],
outputs=[y_without_padding],
op_type=op_type,
op_name=op_name,
)
op_name = "test_conv_with_strides_and_asymmetric_padding"
# Convolution with strides=2 and padding only along one dimension (the H dimension in NxCxHxW tensor)
node_with_asymmetric_padding = onnx.helper.make_node(
"Conv",
inputs=["x", "W"],
outputs=["y"],
kernel_shape=[3, 3],
pads=[1, 0, 1, 0],
strides=[
2,
2,
], # Default values for other attributes: dilations=[1, 1], groups=1
name=op_name,
)
y_with_asymmetric_padding = np.array(
[
[
[
[21.0, 33.0], # (1, 1, 4, 2) output tensor
[99.0, 117.0],
[189.0, 207.0],
[171.0, 183.0],
]
]
]
).astype(np.float32)
op_expect(
node_with_asymmetric_padding,
inputs=[x, W],
outputs=[y_with_asymmetric_padding],
op_type=op_type,
op_name=op_name,
)
def test_convtranspose():
op_name, op_type = "test_convtranspose", "ConvTranspose"
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], name=op_name)
y = np.array(
[
[
[
[0.0, 1.0, 3.0, 3.0, 2.0], # (1, 2, 5, 5)
[3.0, 8.0, 15.0, 12.0, 7.0],
[9.0, 21.0, 36.0, 27.0, 15.0],
[9.0, 20.0, 33.0, 24.0, 13.0],
[6.0, 13.0, 21.0, 15.0, 8.0],
],
[
[0.0, 1.0, 3.0, 3.0, 2.0],
[3.0, 8.0, 15.0, 12.0, 7.0],
[9.0, 21.0, 36.0, 27.0, 15.0],
[9.0, 20.0, 33.0, 24.0, 13.0],
[6.0, 13.0, 21.0, 15.0, 8.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_convtranspose_1d", "ConvTranspose"
x = np.array([[[0.0, 1.0, 2.0]]]).astype(np.float32) # (1, 1, 3)
#NOCC:invalid-name(其他:onnx example)
W = np.array([[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]]).astype(np.float32) # (1, 2, 3)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], name=op_name)
y = np.array(
[[[0.0, 1.0, 3.0, 3.0, 2.0], [0.0, 1.0, 3.0, 3.0, 2.0]]] # (1, 2, 5)
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_convtranspose_3d", "ConvTranspose"
x = np.array(
[
[
[
[
[0.0, 1.0, 2.0, 3.0, 4.0], # (1, 1, 3, 4, 5)
[5.0, 6.0, 7.0, 8.0, 9.0],
[10.0, 11.0, 12.0, 13.0, 14.0],
[15.0, 16.0, 17.0, 18.0, 19.0],
],
[
[20.0, 21.0, 22.0, 23.0, 24.0],
[25.0, 26.0, 27.0, 28.0, 29.0],
[30.0, 31.0, 32.0, 33.0, 34.0],
[35.0, 36.0, 37.0, 38.0, 39.0],
],
[
[40.0, 41.0, 42.0, 43.0, 44.0],
[45.0, 46.0, 47.0, 48.0, 49.0],
[50.0, 51.0, 52.0, 53.0, 54.0],
[55.0, 56.0, 57.0, 58.0, 59.0],
],
]
]
]
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[
[
[1.0, 1.0, 1.0], # (1, 2, 3, 3, 3)
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
],
]
]
).astype(np.float32)
node = onnx.helper.make_node("ConvTranspose", ["X", "W"], ["Y"], name=op_name)
y = np.array(
[
[
[
[
[0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0], # (1, 2, 5, 6, 7)
[5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],
[15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],
[30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],
[25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],
[15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],
],
[
[20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],
[50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],
[90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],
[120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],
[90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],
[50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],
],
[
[60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],
[135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],
[225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],
[270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],
[195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],
[105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],
],
[
[60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],
[130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],
[210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],
[240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],
[170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],
[90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],
],
[
[40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],
[85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],
[135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],
[150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],
[105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],
[55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],
],
],
[
[
[0.0, 1.0, 3.0, 6.0, 9.0, 7.0, 4.0],
[5.0, 12.0, 21.0, 27.0, 33.0, 24.0, 13.0],
[15.0, 33.0, 54.0, 63.0, 72.0, 51.0, 27.0],
[30.0, 63.0, 99.0, 108.0, 117.0, 81.0, 42.0],
[25.0, 52.0, 81.0, 87.0, 93.0, 64.0, 33.0],
[15.0, 31.0, 48.0, 51.0, 54.0, 37.0, 19.0],
],
[
[20.0, 42.0, 66.0, 72.0, 78.0, 54.0, 28.0],
[50.0, 104.0, 162.0, 174.0, 186.0, 128.0, 66.0],
[90.0, 186.0, 288.0, 306.0, 324.0, 222.0, 114.0],
[120.0, 246.0, 378.0, 396.0, 414.0, 282.0, 144.0],
[90.0, 184.0, 282.0, 294.0, 306.0, 208.0, 106.0],
[50.0, 102.0, 156.0, 162.0, 168.0, 114.0, 58.0],
],
[
[60.0, 123.0, 189.0, 198.0, 207.0, 141.0, 72.0],
[135.0, 276.0, 423.0, 441.0, 459.0, 312.0, 159.0],
[225.0, 459.0, 702.0, 729.0, 756.0, 513.0, 261.0],
[270.0, 549.0, 837.0, 864.0, 891.0, 603.0, 306.0],
[195.0, 396.0, 603.0, 621.0, 639.0, 432.0, 219.0],
[105.0, 213.0, 324.0, 333.0, 342.0, 231.0, 117.0],
],
[
[60.0, 122.0, 186.0, 192.0, 198.0, 134.0, 68.0],
[130.0, 264.0, 402.0, 414.0, 426.0, 288.0, 146.0],
[210.0, 426.0, 648.0, 666.0, 684.0, 462.0, 234.0],
[240.0, 486.0, 738.0, 756.0, 774.0, 522.0, 264.0],
[170.0, 344.0, 522.0, 534.0, 546.0, 368.0, 186.0],
[90.0, 182.0, 276.0, 282.0, 288.0, 194.0, 98.0],
],
[
[40.0, 81.0, 123.0, 126.0, 129.0, 87.0, 44.0],
[85.0, 172.0, 261.0, 267.0, 273.0, 184.0, 93.0],
[135.0, 273.0, 414.0, 423.0, 432.0, 291.0, 147.0],
[150.0, 303.0, 459.0, 468.0, 477.0, 321.0, 162.0],
[105.0, 212.0, 321.0, 327.0, 333.0, 224.0, 113.0],
[55.0, 111.0, 168.0, 171.0, 174.0, 117.0, 59.0],
],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_convtranspose_pads", "ConvTranspose"
x = np.array(
[[[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0]]]] # (1, 1, 3, 3)
).astype(np.float32)
#NOCC:invalid-name(其他:onnx example)
W = np.array(
[
[
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], # (1, 2, 3, 3)
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
]
]
).astype(np.float32)
node = onnx.helper.make_node(
"ConvTranspose",
["X", "W"],
["Y"],
strides=[3, 2],
pads=[1, 2, 1, 2],
name=op_name,
)
y = np.array(
[
[
[
[1.0, 1.0, 3.0], # (1, 2, 7, 3)
[1.0, 1.0, 3.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[13.0, 7.0, 15.0],
[13.0, 7.0, 15.0],
],
[
[1.0, 1.0, 3.0],
[1.0, 1.0, 3.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[7.0, 4.0, 9.0],
[13.0, 7.0, 15.0],
[13.0, 7.0, 15.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x, W], outputs=[y], op_type=op_type, op_name=op_name)
def test_cos():
op_name, op_type = "test_cos_example", "Cos"
node = onnx.helper.make_node("Cos", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.cos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_cos", "Cos"
node = onnx.helper.make_node("Cos", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.cos(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_cosh():
op_name, op_type = "test_cosh_example", "Cosh"
node = onnx.helper.make_node("Cosh", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.cosh(x) # expected output [1.54308069, 1., 1.54308069]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_cosh", "Cosh"
node = onnx.helper.make_node("Cosh", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.cosh(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_depthtospace():
op_name, op_type = "test_depthtospace_crd_mode_example", "DepthToSpace"
node = onnx.helper.make_node(
"DepthToSpace",
inputs=["x"],
outputs=["y"],
blocksize=2,
mode="CRD",
name=op_name,
)
# (1, 8, 2, 3) input tensor
x = np.array(
[
[
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],
[[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],
[[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],
[[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],
[[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],
[[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],
[[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],
]
]
).astype(np.float32)
# (1, 2, 4, 6) output tensor
y = np.array(
[
[
[
[0.0, 9.0, 1.0, 10.0, 2.0, 11.0],
[18.0, 27.0, 19.0, 28.0, 20.0, 29.0],
[3.0, 12.0, 4.0, 13.0, 5.0, 14.0],
[21.0, 30.0, 22.0, 31.0, 23.0, 32.0],
],
[
[36.0, 45.0, 37.0, 46.0, 38.0, 47.0],
[54.0, 63.0, 55.0, 64.0, 56.0, 65.0],
[39.0, 48.0, 40.0, 49.0, 41.0, 50.0],
[57.0, 66.0, 58.0, 67.0, 59.0, 68.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_depthtospace_example"
node = onnx.helper.make_node(
"DepthToSpace",
inputs=["x"],
outputs=["y"],
blocksize=2,
mode="DCR",
name=op_name,
)
# (1, 8, 2, 3) input tensor
x = np.array(
[
[
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]],
[[9.0, 10.0, 11.0], [12.0, 13.0, 14.0]],
[[18.0, 19.0, 20.0], [21.0, 22.0, 23.0]],
[[27.0, 28.0, 29.0], [30.0, 31.0, 32.0]],
[[36.0, 37.0, 38.0], [39.0, 40.0, 41.0]],
[[45.0, 46.0, 47.0], [48.0, 49.0, 50.0]],
[[54.0, 55.0, 56.0], [57.0, 58.0, 59.0]],
[[63.0, 64.0, 65.0], [66.0, 67.0, 68.0]],
]
]
).astype(np.float32)
# (1, 2, 4, 6) output tensor
y = np.array(
[
[
[
[0.0, 18.0, 1.0, 19.0, 2.0, 20.0],
[36.0, 54.0, 37.0, 55.0, 38.0, 56.0],
[3.0, 21.0, 4.0, 22.0, 5.0, 23.0],
[39.0, 57.0, 40.0, 58.0, 41.0, 59.0],
],
[
[9.0, 27.0, 10.0, 28.0, 11.0, 29.0],
[45.0, 63.0, 46.0, 64.0, 47.0, 65.0],
[12.0, 30.0, 13.0, 31.0, 14.0, 32.0],
[48.0, 66.0, 49.0, 67.0, 50.0, 68.0],
],
]
]
).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_div():
op_name, op_type = "test_div_example", "Div"
node = onnx.helper.make_node("Div", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([3, 4]).astype(np.float32)
y = np.array([1, 2]).astype(np.float32)
z = x / y # expected output [3., 2.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name, op_type = "test_div", "Div"
node = onnx.helper.make_node("Div", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(3, 4, 5).astype(np.float32) + 1.0
z = x / y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name, op_type = "test_div_bcast", "Div"
node = onnx.helper.make_node("Div", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.rand(5).astype(np.float32) + 1.0
z = x / y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_einsum():
op_name, op_type = "test_einsum_batch_diagonal", "Einsum"
eqn = "...ii ->...i"
node = onnx.helper.make_node(
"Einsum", inputs=["x"], outputs=["y"], equation=eqn, name=op_name
)
#NOCC:invalid-name(其他:onnx example)
X = np.random.randn(3, 5, 5).astype(np.float32)
from onnx.backend.test.case.node.einsum import einsum_reference_implementation
#NOCC:invalid-name(其他:onnx example)
Z = einsum_reference_implementation(eqn, (X,))
op_expect(node, inputs=[X], outputs=[Z], op_type=op_type, op_name=op_name)
def test_elu():
op_name, op_type = "test_elu_example", "Elu"
node = onnx.helper.make_node(
"Elu", inputs=["x"], outputs=["y"], alpha=2.0, name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
# expected output [-1.2642411, 0., 1.]
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_elu", "Elu"
node = onnx.helper.make_node(
"Elu", inputs=["x"], outputs=["y"], alpha=2.0, name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_elu_default", "Elu"
default_alpha = 1.0
node = onnx.helper.make_node("Elu", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + (np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_erf():
op_name, op_type = "test_erf", "Erf"
node = onnx.helper.make_node("Erf", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(1, 3, 32, 32).astype(np.float32)
import math
y = np.vectorize(math.erf)(x).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_exp():
op_name, op_type = "test_exp_example", "Exp"
node = onnx.helper.make_node("Exp", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.exp(x) # expected output [0.36787945, 1., 2.71828175]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_exp", "Exp"
node = onnx.helper.make_node("Exp", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.exp(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_eyelike():
op_name, op_type = "test_eyelike_populate_off_main_diagonal", "EyeLike"
shape = (4, 5)
off_diagonal_offset = 1
node = onnx.helper.make_node(
"EyeLike",
inputs=["x"],
outputs=["y"],
k=off_diagonal_offset,
dtype=onnx.TensorProto.FLOAT,
name=op_name,
)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
y = np.eye(shape[0], shape[1], k=off_diagonal_offset, dtype=np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_eyelike_with_dtype"
shape = (3, 4)
node = onnx.helper.make_node(
"EyeLike",
inputs=["x"],
outputs=["y"],
dtype=onnx.TensorProto.FLOAT,
name=op_name,
)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
y = np.eye(shape[0], shape[1], dtype=np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_eyelike_without_dtype"
shape = (4, 4)
node = onnx.helper.make_node("EyeLike", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randint(0, 100, size=shape, dtype=np.int32)
y = np.eye(shape[0], shape[1], dtype=np.int32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_floor():
op_name, op_type = "test_floor_example", "Floor"
node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-1.5, 1.2, 2]).astype(np.float32)
y = np.floor(x) # expected output [-2., 1., 2.]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name, op_type = "test_floor", "Floor"
node = onnx.helper.make_node("Floor", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.floor(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def verify_rnn(
seq_length,
batch_size,
input_size,
hidden_size,
rnn_type="LSTM",
use_bias=False,
activations=None,
alphas=None,
betas=None,
use_initial_state=False,
use_peep=False,
linear_before_reset=False,
op_name=None,
):
if rnn_type == "LSTM":
multiplier = 4
elif rnn_type == "GRU":
multiplier = 3
else:
raise NotImplementedError("%s RNNs not yet supported." % rnn_type)
x_np = np.random.uniform(size=(seq_length, batch_size, input_size)).astype(
"float32"
)
w_np = np.random.uniform(size=(1, multiplier * hidden_size, input_size)).astype(
"float32"
)
r_np = np.random.uniform(size=(1, multiplier * hidden_size, hidden_size)).astype(
"float32"
)
input_names = ["X", "W", "R"]
input_tensors = [
helper.make_tensor_value_info("X", TensorProto.FLOAT, list(x_np.shape)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, list(w_np.shape)),
helper.make_tensor_value_info("R", TensorProto.FLOAT, list(r_np.shape)),
]
input_values = [x_np, w_np, r_np]
if use_bias:
b_np = np.random.uniform(size=(1, multiplier * 2 * hidden_size)).astype(
"float32"
)
input_names.append("B")
input_tensors.append(
helper.make_tensor_value_info(
"B", TensorProto.FLOAT, [1, multiplier * 2 * hidden_size]
)
)
input_values.append(b_np)
if use_initial_state:
assert use_bias is True, "Initial states must have bias specified."
sequence_np = np.repeat(seq_length, batch_size).astype("int32")
input_names.append("sequence_lens")
input_tensors.append(
helper.make_tensor_value_info(
"sequence_lens", TensorProto.INT32, [batch_size]
)
)
input_values.append(sequence_np)
initial_h_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype(
"float32"
)
input_names.append("initial_h")
input_tensors.append(
helper.make_tensor_value_info(
"initial_h", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_h_np)
if rnn_type == "LSTM":
initial_c_np = np.random.uniform(size=(1, batch_size, hidden_size)).astype(
"float32"
)
input_names.append("initial_c")
input_tensors.append(
helper.make_tensor_value_info(
"initial_c", TensorProto.FLOAT, [1, batch_size, hidden_size]
)
)
input_values.append(initial_c_np)
if use_peep and rnn_type == "LSTM":
assert (
use_initial_state is True
), "Peepholes require initial state to be specified."
p_np = np.random.uniform(size=(1, 3 * hidden_size)).astype("float32")
input_names.append("P")
input_tensors.append(
helper.make_tensor_value_info("P", TensorProto.FLOAT, [1, 3 * hidden_size])
)
input_values.append(p_np)
#NOCC:invalid-name(其他:onnx example)
Y_shape = [seq_length, 1, batch_size, hidden_size]
#NOCC:invalid-name(其他:onnx example)
Y_h_shape = [1, batch_size, hidden_size]
outputs = ["Y", "Y_h"]
graph_outputs = [
helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(Y_shape)),
helper.make_tensor_value_info("Y_h", TensorProto.FLOAT, list(Y_h_shape)),
]
output_shapes = [Y_shape, Y_h_shape]
if rnn_type == "LSTM":
#NOCC:invalid-name(其他:onnx example)
Y_c_shape = [1, batch_size, hidden_size]
outputs.append("Y_c")
graph_outputs.append(
helper.make_tensor_value_info("Y_c", TensorProto.FLOAT, list(Y_c_shape))
)
output_shapes.append(Y_c_shape)
rnn_node = helper.make_node(
rnn_type,
inputs=input_names,
outputs=outputs,
hidden_size=hidden_size,
name=op_name,
)
if activations is not None:
activations_attr = helper.make_attribute("activations", activations)
rnn_node.attribute.append(activations_attr)
if alphas is not None:
alphas_attr = helper.make_attribute("activation_alpha", alphas)
rnn_node.attribute.append(alphas_attr)
if betas is not None:
betas_attr = helper.make_attribute("activation_beta", betas)
rnn_node.attribute.append(betas_attr)
if linear_before_reset and rnn_type == "GRU":
lbr_attr = helper.make_attribute("linear_before_reset", 1)
rnn_node.attribute.append(lbr_attr)
graph = helper.make_graph(
[rnn_node], "rnn_test", inputs=input_tensors, outputs=graph_outputs
)
model = helper.make_model(graph, producer_name="rnn_test")
verify_with_ort_with_trt(model, input_values, op_name)
def test_gather():
op_name, op_type = "test_gather_0", "Gather"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=0, name=op_name
)
data = np.random.randn(5, 4, 3, 2).astype(np.float32)
indices = np.array([0, 1, 3])
y = np.take(data, indices, axis=0)
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_1"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=1, name=op_name
)
data = np.random.randn(5, 4, 3, 2).astype(np.float32)
indices = np.array([0, 1, 3])
y = np.take(data, indices, axis=1)
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_2d_indices"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=1, name=op_name
)
data = np.random.randn(3, 3).astype(np.float32)
indices = np.array([[0, 2]])
y = np.take(data, indices, axis=1)
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_negative_indices"
node = onnx.helper.make_node(
"Gather", inputs=["data", "indices"], outputs=["y"], axis=0, name=op_name
)
data = np.arange(10).astype(np.float32)
indices = np.array([0, -9, -10])
y = np.take(data, indices, axis=0)
# print(y)
# [0. 1. 0.]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_gatherelement():
op_name, op_type = "test_gather_elements_0", "GatherElements"
axis = 1
node = onnx.helper.make_node(
"GatherElements",
inputs=["data", "indices"],
outputs=["y"],
axis=axis,
name=op_name,
)
data = np.array([[1, 2], [3, 4]], dtype=np.float32)
indices = np.array([[0, 0], [1, 0]], dtype=np.int32)
from onnx.backend.test.case.node.gatherelements import gather_elements
y = gather_elements(data, indices, axis)
# print(y) produces
# [[1, 1],
# [4, 3]]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_elements_1"
axis = 0
node = onnx.helper.make_node(
"GatherElements",
inputs=["data", "indices"],
outputs=["y"],
axis=axis,
name=op_name,
)
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[1, 2, 0], [2, 0, 0]], dtype=np.int32)
y = gather_elements(data, indices, axis)
# print(y) produces
# [[4, 8, 3],
# [7, 2, 3]]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
op_name = "test_gather_elements_negative_indices"
axis = 0
node = onnx.helper.make_node(
"GatherElements",
inputs=["data", "indices"],
outputs=["y"],
axis=axis,
name=op_name,
)
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[-1, -2, 0], [-2, 0, 0]], dtype=np.int32)
y = gather_elements(data, indices, axis)
# print(y) produces
# [[7, 5, 3],
# [4, 2, 3]]
op_expect(
node,
inputs=[data, indices.astype(np.int64)],
outputs=[y],
op_type=op_type,
op_name=op_name,
)
def test_gathernd():
op_name, op_type = "test_gathernd_example_float32", "GatherND"
node = onnx.helper.make_node(
"GatherND", inputs=["data", "indices"], outputs=["output"], name=op_name
)
data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.float32)
indices = np.array([[[0, 1]], [[1, 0]]], dtype=np.int64)
from onnx.backend.test.case.node.gathernd import gather_nd_impl
output = gather_nd_impl(data, indices, 0)
expected_output = np.array([[[2, 3]], [[4, 5]]], dtype=np.float32)
assert np.array_equal(output, expected_output)
op_expect(
node, inputs=[data, indices], outputs=[output], op_type=op_type, op_name=op_name
)
op_name = "test_gathernd_example_int32"
node = onnx.helper.make_node(
"GatherND", inputs=["data", "indices"], outputs=["output"], name=op_name
)
data = np.array([[0, 1], [2, 3]], dtype=np.int32)
indices = np.array([[0, 0], [1, 1]], dtype=np.int64)
output = gather_nd_impl(data, indices, 0)
expected_output = np.array([0, 3], dtype=np.int32)
assert np.array_equal(output, expected_output)
op_expect(
node, inputs=[data, indices], outputs=[output], op_type=op_type, op_name=op_name
)
op_name = "test_gathernd_example_int32_batch_dim1"
node = onnx.helper.make_node(
"GatherND",
inputs=["data", "indices"],
outputs=["output"],
batch_dims=1,
name=op_name,
)
data = np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]], dtype=np.int32)
indices = np.array([[1], [0]], dtype=np.int64)
output = gather_nd_impl(data, indices, 1)
expected_output = np.array([[2, 3], [4, 5]], dtype=np.int32)
assert np.array_equal(output, expected_output)
op_expect(
node, inputs=[data, indices], outputs=[output], op_type=op_type, op_name=op_name
)
def test_gemm():
op_name, op_type = "test_gemm_all_attributes", "Gemm"
node = onnx.helper.make_node(
"Gemm",
inputs=["a", "b", "c"],
outputs=["y"],
alpha=0.25,
beta=0.35,
transA=1,
transB=1,
name=op_name,
)
a = np.random.ranf([4, 3]).astype(np.float32)
b = np.random.ranf([5, 4]).astype(np.float32)
c = np.random.ranf([1, 5]).astype(np.float32)
from onnx.backend.test.case.node.gemm import gemm_reference_implementation
y = gemm_reference_implementation(
a, b, c, transA=1, transB=1, alpha=0.25, beta=0.35
)
op_expect(node, inputs=[a, b, c], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_gemm_alpha"
node = onnx.helper.make_node(
"Gemm", inputs=["a", "b", "c"], outputs=["y"], alpha=0.5, name=op_name
)
a = np.random.ranf([3, 5]).astype(np.float32)
b = np.random.ranf([5, 4]).astype(np.float32)
c = np.zeros([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, alpha=0.5)
op_expect(node, inputs=[a, b, c], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_gemm_beta"
node = onnx.helper.make_node(
"Gemm", inputs=["a", "b", "c"], outputs=["y"], beta=0.5, name=op_name
)
a = np.random.ranf([2, 7]).astype(np.float32)
b = np.random.ranf([7, 4]).astype(np.float32)
c = np.random.ranf([1, 4]).astype(np.float32)
y = gemm_reference_implementation(a, b, c, beta=0.5)
op_expect(node, inputs=[a, b, c], outputs=[y], op_type=op_type, op_name=op_name)
def test_globalaveragepool():
op_name, op_type = "test_globalaveragepool", "GlobalAveragePool"
node = onnx.helper.make_node(
"GlobalAveragePool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.randn(1, 3, 5, 5).astype(np.float32)
y = np.mean(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_globalaveragepool_precomputed"
node = onnx.helper.make_node(
"GlobalAveragePool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array(
[
[
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]
]
]
).astype(np.float32)
y = np.array([[[[5]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_globalmaxpool():
op_name = "test_globalmaxpool"
op_type = "GlobalMaxPool"
node = onnx.helper.make_node(
"GlobalMaxPool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.randn(1, 3, 5, 5).astype(np.float32)
y = np.max(x, axis=tuple(range(2, np.ndim(x))), keepdims=True)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_globalmaxpool_precomputed"
node = onnx.helper.make_node(
"GlobalMaxPool", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array(
[
[
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
]
]
]
).astype(np.float32)
y = np.array([[[[9]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_hardsigmoid():
op_name, op_type = "test_hardsigmoid_example", "HardSigmoid"
node = onnx.helper.make_node(
"HardSigmoid", inputs=["x"], outputs=["y"], alpha=0.5, beta=0.6, name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.clip(x * 0.5 + 0.6, 0, 1) # expected output [0.1, 0.6, 1.]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_hardsigmoid"
node = onnx.helper.make_node(
"HardSigmoid", inputs=["x"], outputs=["y"], alpha=0.5, beta=0.6, name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x * 0.5 + 0.6, 0, 1)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_hardsigmoid_default"
default_alpha = 0.2
default_beta = 0.5
node = onnx.helper.make_node(
"HardSigmoid", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x * default_alpha + default_beta, 0, 1)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_hardswish():
op_name, op_type = "test_hardswish", "HardSwish"
node = onnx.helper.make_node("HardSwish", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
from onnx.backend.test.case.node.hardswish import hardswish
y = hardswish(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_hardmax():
op_name, op_type = "test_hardmax_example", "Hardmax"
node = onnx.helper.make_node("Hardmax", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([[3, 0, 1, 2], [2, 5, 1, 0], [0, 1, 3, 2], [0, 1, 2, 3]]).astype(
np.float32
)
# expect result:
# [[1. 0. 0. 0.]
# [0. 1. 0. 0.]
# [0. 0. 1. 0.]
# [0. 0. 0. 1.]]
from onnx.backend.test.case.node.hardmax import hardmax
y = hardmax(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_identity():
op_name, op_type = "test_identity", "Identity"
node = onnx.helper.make_node("Identity", inputs=["x"], outputs=["y"], name=op_name)
data = np.array(
[
[
[
[1, 2],
[3, 4],
]
]
],
dtype=np.float32,
)
op_expect(node, inputs=[data], outputs=[data], op_type=op_type, op_name=op_name)
def test_instancenormalization():
op_name, op_type = "test_instancenorm_example", "InstanceNormalization"
def _instancenorm_test_mode(x, s, bias, epsilon=1e-5): # type: ignore
dims_x = len(x.shape)
axis = tuple(range(2, dims_x))
mean = np.mean(x, axis=axis, keepdims=True)
var = np.var(x, axis=axis, keepdims=True)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
return s * (x - mean) / np.sqrt(var + epsilon) + bias
# input size: (1, 2, 1, 3)
x = np.array([[[[-1, 0, 1]], [[2, 3, 4]]]]).astype(np.float32)
s = np.array([1.0, 1.5]).astype(np.float32)
bias = np.array([0, 1]).astype(np.float32)
y = _instancenorm_test_mode(x, s, bias).astype(np.float32)
node = onnx.helper.make_node(
"InstanceNormalization", inputs=["x", "s", "bias"], outputs=["y"], name=op_name
)
# output size: (1, 2, 1, 3)
op_expect(node, inputs=[x, s, bias], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_instancenorm_epsilon"
# input size: (2, 3, 4, 5)
x = np.random.randn(2, 3, 4, 5).astype(np.float32)
s = np.random.randn(3).astype(np.float32)
bias = np.random.randn(3).astype(np.float32)
epsilon = 1e-2
y = _instancenorm_test_mode(x, s, bias, epsilon).astype(np.float32)
node = onnx.helper.make_node(
"InstanceNormalization",
inputs=["x", "s", "bias"],
outputs=["y"],
epsilon=epsilon,
name=op_name,
)
# output size: (2, 3, 4, 5)
op_expect(node, inputs=[x, s, bias], outputs=[y], op_type=op_type, op_name=op_name)
def test_leakyrelu():
op_name, op_type = "test_leakyrelu_example", "LeakyRelu"
node = onnx.helper.make_node(
"LeakyRelu", inputs=["x"], outputs=["y"], alpha=0.1, name=op_name
)
x = np.array([-1, 0, 1]).astype(np.float32)
# expected output [-0.1, 0., 1.]
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_leakyrelu"
node = onnx.helper.make_node(
"LeakyRelu", inputs=["x"], outputs=["y"], alpha=0.1, name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * 0.1
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_leakyrelu_default"
default_alpha = 0.01
node = onnx.helper.make_node("LeakyRelu", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * default_alpha
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_log():
op_name = "test_log_example"
op_type = "Log"
node = onnx.helper.make_node("Log", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([1, 10]).astype(np.float32)
y = np.log(x) # expected output [0., 2.30258512]
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_log"
node = onnx.helper.make_node("Log", inputs=["x"], outputs=["y"], name=op_name)
x = np.exp(np.random.randn(3, 4, 5).astype(np.float32))
y = np.log(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_logsoftmax():
op_name, op_type = "test_logsoftmax_example_1", "LogSoftmax"
node = onnx.helper.make_node(
"LogSoftmax", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
# expected output
# [[-2.4076061 -1.407606 -0.407606 ]]
from onnx.backend.test.case.node.logsoftmax import logsoftmax
y = logsoftmax(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)
axis_order = [0, 1, -1]
for axis in axis_order:
op_name = "test_logsoftmax_axis_{}".format(str(axis + 1))
node = onnx.helper.make_node(
"LogSoftmax", inputs=["x"], outputs=["y"], axis=axis, name=op_name
)
y = logsoftmax(x, axis=axis)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_matmul():
op_name, op_type = "test_matmul_2d", "MatMul"
node = onnx.helper.make_node(
"MatMul", inputs=["a", "b"], outputs=["c"], name=op_name
)
# 2d
a = np.random.randn(3, 4).astype(np.float32)
b = np.random.randn(4, 3).astype(np.float32)
c = np.matmul(a, b)
op_expect(node, inputs=[a, b], outputs=[c], op_type=op_type, op_name=op_name)
def test_max():
op_name = "test_max_example"
op_type = "Max"
data_0 = np.array([3, 2, 1]).astype(np.float32)
data_1 = np.array([1, 4, 4]).astype(np.float32)
data_2 = np.array([2, 5, 3]).astype(np.float32)
result = np.array([3, 5, 4]).astype(np.float32)
node = onnx.helper.make_node(
"Max", inputs=["data_0", "data_1", "data_2"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1, data_2],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
op_name = "test_max_two_inputs"
result = np.maximum(data_0, data_1)
node = onnx.helper.make_node(
"Max", inputs=["data_0", "data_1"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
def _test_maxpool_2d_ceil():
op_name, op_type = "test_maxpool_2d_ceil", "MaxPool"
node = onnx.helper.make_node(
"MaxPool",
inputs=["x"],
outputs=["y"],
kernel_shape=[3, 3],
strides=[2, 2],
ceil_mode=True,
name=op_name,
)
x = np.array(
[
[
[
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
]
]
]
).astype(np.float32)
y = np.array([[[[11, 12], [15, 16]]]]).astype(np.float32)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def _test_maxpool_1d_default():
op_name, op_type = "test_maxpool_1d_default", "MaxPool"
node = onnx.helper.make_node(
"MaxPool", inputs=["x"], outputs=["y"], kernel_shape=[2], name=op_name
)
x = np.random.randn(1, 3, 32).astype(np.float32)
x_shape = np.shape(x)
kernel_shape = [2]
strides = [1]
from onnx.backend.test.case.node.pool_op_common import get_output_shape, pool
out_shape = get_output_shape("VALID", x_shape[2:], kernel_shape, strides)
padded = x
y = pool(padded, x_shape, kernel_shape, strides, out_shape, [0], "MAX")
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_maxpool():
_test_maxpool_2d_ceil()
_test_maxpool_1d_default()
def test_mean():
op_name, op_type = "test_mean_example", "Mean"
data_0 = np.array([3, 0, 2]).astype(np.float32)
data_1 = np.array([1, 3, 4]).astype(np.float32)
data_2 = np.array([2, 6, 6]).astype(np.float32)
result = np.array([2, 3, 4]).astype(np.float32)
node = onnx.helper.make_node(
"Mean", inputs=["data_0", "data_1", "data_2"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1, data_2],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
op_name = "test_mean_two_inputs"
result = np.divide(np.add(data_0, data_1), 2.0)
node = onnx.helper.make_node(
"Mean", inputs=["data_0", "data_1"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
def test_min():
op_name, op_type = "test_min_example", "Min"
data_0 = np.array([3, 2, 1]).astype(np.float32)
data_1 = np.array([1, 4, 4]).astype(np.float32)
data_2 = np.array([2, 5, 0]).astype(np.float32)
result = np.array([1, 2, 0]).astype(np.float32)
node = onnx.helper.make_node(
"Min", inputs=["data_0", "data_1", "data_2"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1, data_2],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
op_name = "test_min_two_inputs"
result = np.minimum(data_0, data_1)
node = onnx.helper.make_node(
"Min", inputs=["data_0", "data_1"], outputs=["result"], name=op_name
)
op_expect(
node,
inputs=[data_0, data_1],
outputs=[result],
op_type=op_type,
op_name=op_name,
)
def test_mul():
op_name, op_type = "test_mul_example", "Mul"
node = onnx.helper.make_node("Mul", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.float32)
z = x * y # expected output [4., 10., 18.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_mul"
node = onnx.helper.make_node("Mul", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = x * y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_mul_bcast"
node = onnx.helper.make_node("Mul", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.random.randn(5).astype(np.float32)
z = x * y
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_neg():
op_name, op_type = "test_neg_example", "Neg"
node = onnx.helper.make_node("Neg", inputs=["x"], outputs=["y"], name=op_name)
x = np.array([-4, 2]).astype(np.float32)
y = np.negative(x) # expected output [4., -2.],
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_neg"
node = onnx.helper.make_node("Neg", inputs=["x"], outputs=["y"], name=op_name)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.negative(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_negativeloglikelihoodloss():
op_name, op_type = "test_nllloss_NC", "NegativeLogLikelihoodLoss"
reduction = "none"
node = onnx.helper.make_node(
"NegativeLogLikelihoodLoss",
inputs=["input", "target"],
outputs=["loss"],
reduction=reduction,
name=op_name,
)
#NOCC:invalid-name(其他:onnx example)
N, C = 3, 5
np.random.seed(0)
input = np.random.rand(N, C).astype(np.float32)
target = np.random.randint(0, high=C, size=(N,)).astype(np.int64)
from onnx.backend.test.case.node.negativeloglikelihoodloss import (
compute_negative_log_likelihood_loss,
)
negative_log_likelihood_loss = compute_negative_log_likelihood_loss(
input, target, weight=None, reduction=reduction
)
op_expect(
node,
inputs=[input, target],
outputs=[negative_log_likelihood_loss],
op_type=op_type,
op_name=op_name,
)
def test_prelu():
op_name, op_type = "test_prelu_example", "PRelu"
node = onnx.helper.make_node(
"PRelu", inputs=["x", "slope"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
slope = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope
op_expect(node, inputs=[x, slope], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_prelu_broadcast"
node = onnx.helper.make_node(
"PRelu", inputs=["x", "slope"], outputs=["y"], name=op_name
)
x = np.random.randn(3, 4, 5).astype(np.float32)
slope = np.random.randn(5).astype(np.float32)
y = np.clip(x, 0, np.inf) + np.clip(x, -np.inf, 0) * slope
op_expect(node, inputs=[x, slope], outputs=[y], op_type=op_type, op_name=op_name)
def test_pow():
op_name, op_type = "test_pow_example", "Pow"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([4, 5, 6]).astype(np.float32)
z = pow(x, y) # expected output [1., 32., 729.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_pow"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.arange(60).reshape(3, 4, 5).astype(np.float32)
y = np.random.randn(3, 4, 5).astype(np.float32)
z = pow(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_pow_bcast_scalar"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([2]).astype(np.float32)
z = pow(x, y) # expected output [1., 4., 9.]
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
op_name = "test_pow_bcast_array"
node = onnx.helper.make_node("Pow", inputs=["x", "y"], outputs=["z"], name=op_name)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
y = np.array([[1, 2, 3]]).astype(np.float32)
# expected output [[1, 4, 27], [4, 25, 216]]
z = pow(x, y)
op_expect(node, inputs=[x, y], outputs=[z], op_type=op_type, op_name=op_name)
def test_reciprocal():
op_name, op_type = "test_reciprocal_example", "Reciprocal"
node = onnx.helper.make_node(
"Reciprocal", inputs=["x"], outputs=["y"], name=op_name
)
x = np.array([-4, 2]).astype(np.float32)
y = np.reciprocal(x) # expected output [-0.25, 0.5],
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
op_name = "test_reciprocal"
node = onnx.helper.make_node(
"Reciprocal", inputs=["x"], outputs=["y"], name=op_name
)
x = np.random.rand(3, 4, 5).astype(np.float32) + 0.5
y = np.reciprocal(x)
op_expect(node, inputs=[x], outputs=[y], op_type=op_type, op_name=op_name)
def test_reducel1():
op_name, op_type = "test_reduce_l1_default_axes_keepdims_example", "ReduceL1"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceL1",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
# print(data)
# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
reduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)
# print(reduced)
# [[[78.]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sum(a=np.abs(data), axis=axes, keepdims=keepdims == 1)
op_name = "test_reduce_l1_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceL1",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducel2():
op_name, op_type = "test_reduce_l2_default_axes_keepdims_example", "ReduceL2"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceL2",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
# print(data)
# [[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]], [[9., 10.], [11., 12.]]]
reduced = np.sqrt(np.sum(a=np.square(data), axis=axes, keepdims=keepdims == 1))
# print(reduced)
# [[[25.49509757]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_l2_default_axes_keepdims_random"
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.sqrt(np.sum(a=np.square(data), axis=axes, keepdims=keepdims == 1))
node = onnx.helper.make_node(
"ReduceL2",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducelogsum():
op_name, op_type = "test_reduce_log_sum_default", "ReduceLogSum"
node = onnx.helper.make_node(
"ReduceLogSum", inputs=["data"], outputs=["reduced"], name=op_name
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, keepdims=True))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_negative_axes"
node = onnx.helper.make_node(
"ReduceLogSum", inputs=["data"], outputs=["reduced"], axes=[-2], name=op_name
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(-2), keepdims=True))
# print(reduced)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_desc_axes"
node = onnx.helper.make_node(
"ReduceLogSum",
inputs=["data"],
outputs=["reduced"],
axes=[2, 1],
keepdims=0,
name=op_name,
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(2, 1), keepdims=False))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_asc_axes"
node = onnx.helper.make_node(
"ReduceLogSum",
inputs=["data"],
outputs=["reduced"],
axes=[0, 1],
keepdims=0,
name=op_name,
)
data = np.random.ranf([3, 4, 5]).astype(np.float32)
reduced = np.log(np.sum(data, axis=(0, 1), keepdims=False))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducelogsumexp():
op_name, op_type = (
"test_reduce_log_sum_exp_default_axes_keepdims_example",
"ReduceLogSumExp",
)
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceLogSumExp",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.array(
[[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32
)
reduced = np.log(np.sum(np.exp(data), axis=axes, keepdims=keepdims == 1))
# print(reduced)
# [[[60.00671387]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_log_sum_exp_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceLogSumExp",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.log(np.sum(np.exp(data), axis=axes, keepdims=keepdims == 1))
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducemax():
op_name, op_type = "test_reduce_max_default_axes_keepdim_example", "ReduceMax"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceMax",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.array(
[[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32
)
reduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)
# print(reduced)
# [[[60.]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_max_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceMax",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
np.random.seed(0)
data = np.random.uniform(-10, 10, shape).astype(np.float32)
reduced = np.maximum.reduce(data, axis=axes, keepdims=keepdims == 1)
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
def test_reducemean():
op_name, op_type = "test_reduce_mean_default_axes_keepdims_example", "ReduceMean"
shape = [3, 2, 2]
axes = None
keepdims = 1
node = onnx.helper.make_node(
"ReduceMean",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
data = np.array(
[[[5, 1], [20, 2]], [[30, 1], [40, 2]], [[55, 1], [60, 2]]], dtype=np.float32
)
reduced = np.mean(data, axis=axes, keepdims=keepdims == 1)
# print(reduced)
# [[[18.25]]]
op_expect(node, inputs=[data], outputs=[reduced], op_type=op_type, op_name=op_name)
op_name = "test_reduce_mean_default_axes_keepdims_random"
node = onnx.helper.make_node(
"ReduceMean",
inputs=["data"],
outputs=["reduced"],
keepdims=keepdims,
name=op_name,
)
| np.random.seed(0) | numpy.random.seed |
import argparse
import cv2
import torch
import numpy as np
from model import SCNN
from model_ENET_SAD import ENet_SAD
from model_SEENET_SAD import SEENet_SAD
from utils.prob2lines import getLane
from utils.prob2lines.lane_detection_funtions import roneld_lane_detection
from utils.transforms import *
import time
from multiprocessing import Process, JoinableQueue, SimpleQueue
from threading import Lock
img_size = (800, 288)
#net = SCNN(input_size=(800, 288), pretrained=False)
net = ENet_SAD((800,288), sad=False)
seenet = SEENet_SAD((800, 288), sad=False)
# CULane mean, std
mean=(0.3598, 0.3653, 0.3662)
std=(0.2573, 0.2663, 0.2756)
# Imagenet mean, std
# mean=(0.485, 0.456, 0.406)
# std=(0.229, 0.224, 0.225)
transform_img = Resize(img_size)
transform_to_net = Compose(ToTensor(), Normalize(mean=mean, std=std))
pipeline = False
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--video_path", '-i', type=str, default="experiments/Zhejiang_cloud_crude_20201103175630_cruve.mp4", help="Path to demo video")
parser.add_argument("--weight_path", '-w', type=str, default="experiments/exp1/exp1_best.pth", help="Path to model weights")
parser.add_argument("--weight_path1", '-x', type=str, default="experiments/exp2/exp1_best.pth", help="Path to model weights")
parser.add_argument("--visualize", '-v', action="store_true", default=False, help="Visualize the result")
args = parser.parse_args()
return args
def do_canny(frame):
# Converts frame to grayscale because we only need the luminance channel for detecting edges - less computationally expensive
gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# Applies a 5x5 gaussian blur with deviation of 0 to frame - not mandatory since Canny will do this for us
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# Applies Canny edge detector with minVal of 50 and maxVal of 150
canny = cv2.Canny(blur, 50, 150)
return canny
def do_segment(frame):
# Since an image is a multi-directional array containing the relative intensities of each pixel in the image, we can use frame.shape to return a tuple: [number of rows, number of columns, number of channels] of the dimensions of the frame
# frame.shape[0] give us the number of rows of pixels the frame has. Since height begins from 0 at the top, the y-coordinate of the bottom of the frame is its height
height = frame.shape[0]
# Creates a triangular polygon for the mask defined by three (x, y) coordinates
polygons = np.array([
[(150, 200), (580, 200), (350, 100)]
])
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros_like(frame)
# Allows the mask to be filled with values of 1 and the other areas to be filled with values of 0
cv2.fillPoly(mask, polygons, 255)
# A bitwise and operation between the mask and frame keeps only the triangular area of the frame
segment = cv2.bitwise_and(frame, mask)
return segment
def calculate_lines(frame, lines):
# Empty arrays to store the coordinates of the left and right lines
left = []
right = []
print("lallalalalal",lines)
# Loops through every detected line
for line in lines:
# Reshapes line from 2D array to 1D array
x1, y1, x2, y2 = line.reshape(4)
print(x1, y1, x2, y2 )
# Fits a linear polynomial to the x and y coordinates and returns a vector of coefficients which describe the slope and y-intercept
parameters = np.polyfit((x1, x2), (y1, y2), 1)
slope = parameters[0]
y_intercept = parameters[1]
# If slope is negative, the line is to the left of the lane, and otherwise, the line is to the right of the lane
if slope < 0:
left.append((slope, y_intercept))
else:
right.append((slope, y_intercept))
# Averages out all the values for left and right into a single slope and y-intercept value for each line
left_avg = np.average(left, axis = 0)
right_avg = np.average(right, axis = 0)
# Calculates the x1, y1, x2, y2 coordinates for the left and right lines
left_line = calculate_coordinates(frame, left_avg)
right_line = calculate_coordinates(frame, right_avg)
return | np.array([left_line, right_line]) | numpy.array |
"""This script is the differentiable renderer for Deep3DFaceRecon_pytorch
Attention, antialiasing step is missing in current version.
"""
import torch
import torch.nn.functional as F
import kornia
from kornia.geometry.camera import pixel2cam
import numpy as np
from typing import List
import nvdiffrast.torch as dr
from scipy.io import loadmat
from torch import nn
def ndc_projection(x=0.1, n=1.0, f=50.0):
return np.array([[n/x, 0, 0, 0],
[ 0, n/-x, 0, 0],
[ 0, 0, -(f+n)/(f-n), -(2*f*n)/(f-n)],
[ 0, 0, -1, 0]]).astype(np.float32)
class MeshRenderer(nn.Module):
def __init__(self,
rasterize_fov,
znear=0.1,
zfar=10,
rasterize_size=224):
super(MeshRenderer, self).__init__()
x = np.tan( | np.deg2rad(rasterize_fov * 0.5) | numpy.deg2rad |
#!/usr/bin/env python
"""
This module use the carla simulation to collect data and train a HMM computing the calculate the computation matrices .
"""
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
import carla
import time
from time import sleep
import math
import numpy as np
import itertools
import random
import atexit
from hmmlearn import hmm
import sys
import os.path
sys.path.append('../Perception')
from Perception import local_map as lmp
# ==============================================================================
# -- Implementation ------------------------------------------------------------
# ==============================================================================
STATE_VECTOR = ["Stop", "Decelerate", "Steady", "Accelerate"]
CRITERIA = ["DS", "U", "DV"] # DS: Distance from stop, u:speed, DV: distance from vehicle
CRITERIA_STATES = ["S", "B"]
OBSERVATION_VECTOR = [subset for subset in itertools.product(CRITERIA_STATES, repeat=len(CRITERIA))]
for case_i in OBSERVATION_VECTOR:
temp = ""
for s_i in case_i:
temp += s_i
OBSERVATION_VECTOR[OBSERVATION_VECTOR.index(case_i)] = temp
class HMM_MODEL:
def __init__(self, local_map=None):
if local_map is not None: # <-- For training and fitting process
self.local_map = local_map
self.vehicle_id = None
self.state_matrix = np.zeros((len(STATE_VECTOR), len(STATE_VECTOR)))
self.observation_matrix = np.zeros((len(OBSERVATION_VECTOR), len(STATE_VECTOR)))
self.previous_state = STATE_VECTOR[0]
self.previous_speed = self.local_map.get_ego_vehicle().speed
self.observations_list = []
self.states_list = []
self.HMM_model = None
else: # <-- For prediction process
state_file_parameters = os.path.dirname(os.path.realpath(__file__))+'/fit_state_matrix.txt'
observation_file_parameters = os.path.dirname(os.path.realpath(__file__))+'/fit_observation_matrix.txt'
assert os.path.isfile(state_file_parameters) or os.path.isfile(observation_file_parameters)
state_matrix = np.loadtxt(state_file_parameters, dtype=float)
observation_matrix = | np.loadtxt(observation_file_parameters, dtype=float) | numpy.loadtxt |
import os
import sys
import pylab
import numpy
import torch
import time
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.gridspec as gridspec
import matplotlib.cm as cm
import copy
from scipy.spatial.transform import Rotation as R
def calculate_contours(pdf_vals, bin_volumes, probs=[0.68, 0.95]):
totsum = 0.0
flattend_pdf = pdf_vals.flatten()
#flattend_volumes = bin_volumes.flatten()
sorta = numpy.argsort(flattend_pdf)[::-1]
contour_values = []
cur_prob_index = 0
for ind, pdf_eval in enumerate(flattend_pdf[sorta]):
totsum += pdf_eval*bin_volumes#flattend_volumes[sorta][ind]
if (totsum > probs[cur_prob_index]):
contour_values.append(pdf_eval)
cur_prob_index += 1
if (cur_prob_index >= len(probs)):
break
return contour_values
def get_bounds_from_contour(cres, boundary=0.1):
cont_min_x = 9999999.9
cont_max_x = -9999999.9
cont_min_y = 999999999.9
cont_max_y = -9999999999.9
for i in cres.allsegs[0]:
for j in i:
if (j[0] < cont_min_x):
cont_min_x = j[0]
if (j[0] > cont_max_x):
cont_max_x = j[0]
if (j[1] < cont_min_y):
cont_min_y = j[1]
if (j[1] > cont_max_y):
cont_max_y = j[1]
return cont_min_x, cont_max_x, cont_min_y, cont_max_y
def get_minmax_values(samples):
mins_maxs = []
for ind in range(samples.shape[1]):
min_val = min(samples[:, ind])
max_val = max(samples[:, ind])
mins_maxs.append((min_val, max_val))
return mins_maxs
def get_pdf_on_grid(mins_maxs, npts, model, conditional_input=None, s2_norm="standard", s2_rotate_to_true_value=False, true_values=None):
side_vals = []
bin_volumes = 1.0#numpy.ones([npts]*len(mins_maxs))
glob_ind = 0
#has_high_dim_spheres = False
cinput = None
sin_zen_mask=[]
for pdf in model.pdf_defs_list:
this_sub_dim = int(pdf[1])
if (pdf == "s2" and s2_norm=="lambert"):
#has_high_dim_spheres = True
side_vals.append(numpy.linspace(-2, 2, npts))
bin_volumes *= (side_vals[-1][1] - side_vals[-1][0])
side_vals.append(numpy.linspace(-2, 2, npts))
bin_volumes *= (side_vals[-1][1] - side_vals[-1][0])
sin_zen_mask.extend([0,0])
elif(pdf=="s2" and s2_norm=="standard"):
sin_zen_mask.extend([1,0])
zen_vals=numpy.linspace(mins_maxs[glob_ind][0]+1e-4, mins_maxs[glob_ind][1]-1e-4, npts)
side_vals.append(zen_vals)
bin_volumes*=(side_vals[-1][1] - side_vals[-1][0])
side_vals.append(numpy.linspace(1e-4, 2*numpy.pi-1e-4, npts))
bin_volumes *= (side_vals[-1][1] - side_vals[-1][0])
elif(pdf=="s2"):
raise Exception("s2_norm ", s2_norm, " unknown .")
else:
for ind, mm in enumerate(mins_maxs[glob_ind:glob_ind +
this_sub_dim]):
side_vals.append(numpy.linspace(mm[0], mm[1], npts))
bin_volumes *= (side_vals[-1][1] - side_vals[-1][0])
sin_zen_mask.append(0)
glob_ind += this_sub_dim
eval_positions = numpy.meshgrid(*side_vals)
torch_positions = torch.from_numpy(
numpy.resize(
numpy.array(eval_positions).T,
(npts**len(mins_maxs), len(mins_maxs))))
eval_positions = torch_positions.clone()
mask_inner = torch.ones(len(torch_positions)) == 1
for ind, pdf_def in enumerate(model.pdf_defs_list):
if (pdf_def == "s2" and s2_norm=="lambert"):
fix_point=None
if(s2_rotate_to_true_value and true_values is not None):
fix_point=true_values[model.target_dim_indices[ind][0]:model.target_dim_indices[ind][1]]
mask_inner = mask_inner & (torch.sqrt(
(eval_positions[:, model.target_dim_indices[ind][0]:model.
target_dim_indices[ind][1]]**2).sum(axis=1)) <
2)
## transform s2 subdimensions from equal-area lambert dimension to real spherical dimensiosn the model can use
eval_positions[:, model.target_dim_indices[ind][0]:model.
target_dim_indices[ind]
[1]] = cartesian_lambert_to_spherical(
eval_positions[:, model.
target_dim_indices[ind][0]:model.
target_dim_indices[ind][1]], fix_point=fix_point)
if (conditional_input is not None):
cinput = conditional_input.repeat(npts**len(mins_maxs), 1)[mask_inner]
log_res, _, _ = model(eval_positions[mask_inner], conditional_input=cinput)
## no conditional input and only s2 pdf .. mask bad regions
flagged_coords=numpy.array([])
if(conditional_input is None and model.pdf_defs_list[0]=="s2"):
problematic_pars=model.layer_list[0][0].return_problematic_pars_between_hh_and_intrinsic(eval_positions[mask_inner], flag_pole_distance=0.02)
if(problematic_pars.shape[0]>0):
if(s2_norm=="lambert"):
fix_point=None
if(s2_rotate_to_true_value and true_values is not None):
fix_point=true_values[model.target_dim_indices[ind][0]:model.target_dim_indices[ind][1]]
problematic_pars=spherical_to_cartesian_lambert(problematic_pars, fix_point=fix_point)
flagged_coords=problematic_pars.detach().numpy()
"""
lr_mask=numpy.exp(log_res)>1e4
print("############################# TEST")
bad_res,_,_=model(eval_positions[mask_inner][lr_mask][:1], conditional_input=None)
print(bad_res)
"""
res = (-600.0)*torch.ones(len(torch_positions)).type_as(torch_positions)
res[mask_inner] = log_res #.exp()
res = res.detach().numpy()
numpy_positions=eval_positions.detach().numpy()
if((numpy.isfinite(res)==False).sum()>0):
print("Non-finite evaluation during PDF eval for plotting..")
print((numpy.isfinite(res)==False).sum())
print(numpy_positions[(numpy.isfinite(res)==False)])
r,_,_=model(eval_positions[mask_inner][torch.isfinite(log_res)==False][:1], conditional_input=cinput)
print(r)
raise Exception()
#######################
res.resize([npts] * len(mins_maxs))
resized_torch_positions = torch_positions.detach().numpy()
resized_torch_positions.resize([npts] * len(mins_maxs) + [len(mins_maxs)])
## add in sin(theta) factors into density
for ind, sz in enumerate(sin_zen_mask):
if(sz==1):
slice_mask=(None,)*ind+(slice(None,None),)+(None,)*(len(sin_zen_mask)-1-ind)
zen_vals=numpy.sin(numpy.linspace(mins_maxs[ind][0]+1e-4, mins_maxs[ind][1]-1e-4, npts))
## log val, adding zenith factors where needed
res+=numpy.log(zen_vals[slice_mask])
return resized_torch_positions, res, bin_volumes, sin_zen_mask, flagged_coords
def rotate_coords_to(theta, phi, target, reverse=False):
target_theta=target[0]
target_phi=target[1]
x=numpy.cos(target_phi)*numpy.sin(target_theta)
y= | numpy.sin(target_phi) | numpy.sin |
import numpy as np
import scipy.io
import scipy.signal
import soundfile as sf
import librosa
import matplotlib.pyplot as plt
class AudioProcessor(object):
def __init__(self,
sample_rate=22050,
num_mels=80,
num_freq=1025,
frame_length_ms=50,
frame_shift_ms=12.5,
preemphasis=0.98,
min_level_db=-100,
ref_level_db=20,
power=1.5,
mel_fmin=0.0,
mel_fmax=8000.0,
griffin_lim_iters=60):
self.sample_rate = sample_rate
self.num_mels = num_mels
self.num_freq = num_freq
self.frame_length_ms = frame_length_ms
self.frame_shift_ms = frame_shift_ms
self.preemphasis = preemphasis
self.min_level_db = min_level_db
self.ref_level_db = ref_level_db
self.power = power
self.mel_fmin = mel_fmin
self.mel_fmax = mel_fmax
self.griffin_lim_iters = griffin_lim_iters
self.n_fft, self.hop_length, self.win_length = self._stft_parameters()
print('AudioProcessor')
for key, value in vars(self).items():
print(' {}:{}'.format(key, value))
def load_wav(self, filename, sr=None):
if sr is None:
y, sr = sf.read(filename)
else:
y, sr = librosa.load(filename, sr=sr)
assert self.sample_rate == sr, \
'WARNING: sample_rate mismatch: %d <=> %d' % (self.sample_rate, sr)
return y
def save_wav(self, y, path):
wav_norm = y * (32767 / max(0.01, np.max( | np.abs(y) | numpy.abs |
"""
VAE on the swirl task.
Basically, VAEs don't work. It's probably because the prior isn't very good
and/or because the learning signal is pretty weak when both the encoder and
decoder change quickly. However, I tried also alternating between the two,
and that didn't seem to help.
"""
from torch.distributions import Normal
from torch.optim import Adam
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn as nn
import railrl.torch.pytorch_util as ptu
SWIRL_RATE = 1
T = 10
BS = 128
N_BATCHES = 2000
N_VIS = 1000
HIDDEN_SIZE = 32
VERBOSE = False
def swirl_data(batch_size):
t = np.random.uniform(size=batch_size, low=0, high=T)
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
data = np.array([x, y]).T
noise = np.random.randn(batch_size, 2) / (T * 2)
return data + noise, t.reshape(-1, 1)
def swirl_t_to_data(t):
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return np.array([x, y]).T
def kl_to_prior(means, log_stds, stds):
"""
KL between a Gaussian and a standard Gaussian.
https://stats.stackexchange.com/questions/60680/kl-divergence-between-two-multivariate-gaussians
"""
return 0.5 * (
- 2 * log_stds # log std_prior = 0
- 1 # d = 1
+ stds ** 2
+ means ** 2
)
class Encoder(nn.Sequential):
def encode(self, x):
return self.get_encoding_and_suff_stats(x)[0]
def get_encoding_and_suff_stats(self, x):
output = self(x)
means, log_stds = (
output[:, 0:1], output[:, 1:2]
)
stds = log_stds.exp()
epsilon = ptu.Variable(torch.randn(*means.size()))
latents = epsilon * stds + means
latents = latents
return latents, means, log_stds, stds
class Decoder(nn.Sequential):
def decode(self, latents):
output = self(latents)
means, log_stds = output[:, 0:2], output[:, 2:4]
distribution = Normal(means, log_stds.exp())
return distribution.sample()
def t_to_xy(t):
if len(t.shape) == 2:
t = t[:, 0]
x = t * np.cos(t * SWIRL_RATE) / T
y = t * np.sin(t * SWIRL_RATE) / T
return | np.array([x, y]) | numpy.array |
#Ref: <NAME>
"""
Training and testing for semantic segmentation (Unet) of mitochondria
Uses standard Unet framework with Jacard for loss.
Dataset info: Electron microscopy (EM) dataset from
https://www.epfl.ch/labs/cvlab/data/data-em/
Patches of 256x256 from images and labels
have been extracted (via separate program) and saved to disk.
This code uses 256x256 images/masks.
"""
from simple_unet_model_with_jacard import simple_unet_model_with_jacard #Use normal unet model
from simple_unet_model import simple_unet_model #Use normal unet model
from keras.utils import normalize
import os
import cv2
from PIL import Image
import numpy as np
from matplotlib import pyplot as plt
image_directory = 'data/generated_patches/images/'
mask_directory = 'data/generated_patches/masks/'
SIZE = 256
image_dataset = [] #Many ways to handle data, you can use pandas. Here, we are using a list format.
mask_dataset = [] #Place holders to define add labels. We will add 0 to all parasitized images and 1 to uninfected.
images = os.listdir(image_directory)
for i, image_name in enumerate(images): #Remember enumerate method adds a counter and returns the enumerate object
if (image_name.split('.')[1] == 'tif'):
#print(image_directory+image_name)
image = cv2.imread(image_directory+image_name, 0)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
image_dataset.append(np.array(image))
#Iterate through all images in Uninfected folder, resize to 64 x 64
#Then save into the same numpy array 'dataset' but with label 1
masks = os.listdir(mask_directory)
for i, image_name in enumerate(masks):
if (image_name.split('.')[1] == 'tif'):
image = cv2.imread(mask_directory+image_name, 0)
image = Image.fromarray(image)
image = image.resize((SIZE, SIZE))
mask_dataset.append(np.array(image))
#Normalize images
image_dataset = np.expand_dims(normalize(np.array(image_dataset), axis=1),3)
#D not normalize masks, just rescale to 0 to 1.
mask_dataset = np.expand_dims((np.array(mask_dataset)),3) /255.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(image_dataset, mask_dataset, test_size = 0.10, random_state = 0)
X_train_quick_test, X_test_quick_test, y_train_quick_test, y_test_quick_test = train_test_split(X_train, y_train, test_size = 0.9, random_state = 0)
#Sanity check, view few mages
import random
import numpy as np
image_number = random.randint(0, len(X_train_quick_test))
plt.figure(figsize=(12, 6))
plt.subplot(121)
plt.imshow(np.reshape(X_train_quick_test[image_number], (256, 256)), cmap='gray')
plt.subplot(122)
plt.imshow(np.reshape(y_train_quick_test[image_number], (256, 256)), cmap='gray')
plt.show()
###############################################################
IMG_HEIGHT = image_dataset.shape[1]
IMG_WIDTH = image_dataset.shape[2]
IMG_CHANNELS = image_dataset.shape[3]
def get_jacard_model():
return simple_unet_model_with_jacard(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
model_jacard = get_jacard_model()
def get_standard_model():
return simple_unet_model(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)
model_standard = get_standard_model()
#If starting with pre-trained weights.
#model.load_weights('mitochondria_with_jacard_50_epochs.hdf5')
history_jacard = model_jacard.fit(X_train_quick_test, y_train_quick_test,
batch_size = 16,
verbose=1,
epochs=10,
validation_data=(X_test, y_test),
shuffle=False)
history_standard = model_standard.fit(X_train_quick_test, y_train_quick_test,
batch_size = 16,
verbose=1,
epochs=10,
validation_data=(X_test, y_test),
shuffle=False)
model_jacard.save('mitochondria_with_jacard.hdf5')
model_standard.save('mitochondria_standard.hdf5')
#To be compared with trained model mitochondria_gpu_tf1.4.hdf5
# that just used cross entropy for loss and accuracy for metric.
############################################################
#Evaluate the model
# evaluate model
_, acc = model_jacard.evaluate(X_test, y_test)
print("Accuracy of Jacard Model is = ", (acc * 100.0), "%")
# evaluate model
_, acc = model_standard.evaluate(X_test, y_test)
print("Accuracy of Standard Model is = ", (acc * 100.0), "%")
#plot the training and validation accuracy and loss at each epoch
loss = history_jacard.history['loss']
val_loss = history_jacard.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
jc = history_jacard.history['jacard_coef']
#acc = history.history['accuracy']
val_jc = history_jacard.history['val_jacard_coef']
#val_acc = history.history['val_accuracy']
plt.plot(epochs, jc, 'y', label='Training Jacard Coeff.')
plt.plot(epochs, val_jc, 'r', label='Validation Jacard Coeff.')
plt.title('Training and validation Jacard')
plt.xlabel('Epochs')
plt.ylabel('Jacard Coefficient')
plt.legend()
plt.show()
###
#plot the training and validation accuracy and loss at each epoch
loss = history_standard.history['loss']
val_loss = history_standard.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'y', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
acc = history_standard.history['acc']
#acc = history.history['accuracy']
val_acc = history_standard.history['val_acc']
#val_acc = history.history['val_accuracy']
plt.plot(epochs, acc, 'y', label='Training Accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation Accuracy')
plt.title('Training and validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
##################################
model = model_jacard #Assign model to one of the models instead of changing the entire code for testing.
#model = model_standard #Assign model to one of the models instead of changing the entire code for testing.
#IOU
y_pred=model.predict(X_test)
y_pred_thresholded = y_pred > 0.5
intersection = np.logical_and(y_test, y_pred_thresholded)
union = np.logical_or(y_test, y_pred_thresholded)
iou_score = np.sum(intersection) / | np.sum(union) | numpy.sum |
# Parser for MDPs - iostr
import numpy as np
from dataset import Dataset
def parse_iostr(folder, filename):
assert('mdps' in folder)
assert('.iostr' in filename)
data = False
X = []
Y = []
Xnames = np.array([])
Svarno = -1
Avarno = -1
Xdomains = []
Ynames = {'no': 0, 'yes': 1}
def decimal_to_binary(decimal, length):
res = []
for i in range(length):
res.append(decimal % 2)
decimal = decimal // 2 # int division
return list(reversed(res))
def binary_to_decimal(binary):
res, add = 0, 1
for i in reversed(binary):
if (i == 1):
res += add
add *= 2
return res
assert(47 == binary_to_decimal(decimal_to_binary(47, 9)))
lineno = 0
for line in open('%s/%s' % (folder, filename), 'r'):
lineno += 1
if (lineno == 1):
assert(filename.replace('.iostr','') in line)
continue
if (lineno == 2):
assert('State Boolean variable names' in line)
Svarno = int(line.split()[-1].replace('(','').replace(')',''))
continue
if (lineno == 3):
Xnames = np.array(line.split())
assert(Xnames.size == Svarno)
continue
if (lineno == 4):
assert('Action Boolean variable names' in line)
Avarno = int(line.split()[-1].replace('(','').replace(')',''))
continue
if (lineno == 5):
Xnames = np.append(Xnames, np.array(line.split()))
assert(Xnames.size == Svarno + Avarno)
for _ in range(Xnames.size):
Xdomains.append(set({0, 1}))
continue
if (lineno == 6):
assert('State-action pairs' in line)
continue
if (lineno == 7):
assert(line == '# statenumber:(s,t,a,t,e,v,a,l,u,e,s):successornumber:(p,l,a,y,t,h,i,s,a,c,t,i,o,n)\n')
continue
# State-action pairs
spl = line.split(':')
assert(len(spl) == 4)
stval = [float(e) for e in spl[1].replace('(','').replace(')','').split(',')]
assert(len(stval) == Svarno)
acval = [int(e) for e in spl[3].replace('(','').replace(')','').split(',')]
assert(len(acval) == Avarno)
X.append(stval + [float(e) for e in acval]) # copies stval
Y.append(1)
acdec = binary_to_decimal(acval)
for i in range(2 ** Avarno):
if (i != acdec):
X.append(stval + [float(e) for e in decimal_to_binary(i, Avarno)])
Y.append(0)
return Dataset(np.array(X).astype(float), | np.array(Y) | numpy.array |
from typing import Optional, Iterable
import numpy as np
import matplotlib
matplotlib.use("Agg") # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.colors import to_rgb
from mpl_toolkits.mplot3d import proj3d, Axes3D
from abc import ABC, abstractmethod
from gym_softrobot.config import RendererType
from gym_softrobot.utils.render.base_renderer import (
BaseRenderer,
BaseElasticaRendererSession,
)
import pkg_resources
def render_figure(fig:plt.figure):
w, h = fig.get_size_inches()
dpi_res = fig.get_dpi()
w, h = int(np.ceil(w * dpi_res)), int(np.ceil(h*dpi_res))
canvas = FigureCanvasAgg(fig)
canvas.draw()
data = np.asarray(canvas.buffer_rgba())[:,:,:3]
return data
def convert_marker_size(radius, ax):
"""
Convert marker size from radius to s (in scatter plot).
Parameters
----------
radius : np.array or float
Array (or a number) of radius
ax : matplotlib.Axes
"""
xlim = ax.get_xlim()
ylim = ax.get_ylim()
max_axis_length = max(abs(xlim[1]-xlim[0]), abs(ylim[1]-ylim[0]))
scaling_factor = 3.0e3 * (2*0.1) / max_axis_length
return np.pi * (scaling_factor * radius) ** 2
#ppi = 72 # standard point size in matplotlib is 72 points per inch (ppi), no matter the dpi
#point_whole_ax = 5 * 0.8 * ppi
#point_radius= 2 * radius / 1.0 * point_whole_ax
#return point_radius**2
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
class Geom(ABC):
@abstractmethod
def __call__(self):
pass
class ElasticaRod(Geom):
# RGB color must be 2d array
rgb_color = | np.array([[0.35, 0.29, 1.0]]) | numpy.array |
import argparse
import json
import os
import numpy as np
import torch
from torch.autograd import Variable
from tqdm import tqdm
from misc.utils import AverageMeter
from models.adacrowd.blocks import assign_adaptive_params
from models.cc_adacrowd import CrowdCounterAdaCrowd
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed = 3035
torch.backends.cudnn.benchmark = True
if seed is not None:
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
SINGLE_FOLDER_DATASETS = ["WE", "City"]
def load_dataloader(data_mode):
if data_mode == 'WE':
from datasets.adacrowd.WE.loading_data import loading_data
from datasets.adacrowd.WE.setting import cfg_data
elif data_mode == 'Mall':
from datasets.adacrowd.Mall.loading_data import loading_data
from datasets.adacrowd.Mall.setting import cfg_data
elif data_mode == 'PETS':
from datasets.adacrowd.PETS.loading_data import loading_data
from datasets.adacrowd.PETS.setting import cfg_data
elif data_mode == 'FDST':
from datasets.adacrowd.FDST.loading_data import loading_data
from datasets.adacrowd.FDST.setting import cfg_data
elif data_mode == 'City':
from datasets.adacrowd.City.loading_data import loading_data
from datasets.adacrowd.City.setting import cfg_data
return loading_data, cfg_data
def test(dataset, args, scene_folder=None):
maes = AverageMeter()
mses = AverageMeter()
model_path = args.model_path
model = CrowdCounterAdaCrowd([args.gpu_id],
args.model_name,
num_gbnnorm=args.num_norm).to(device)
checkpoint = torch.load(model_path, map_location='cuda:0')
model.load_state_dict(checkpoint)
model.to(device)
model.eval()
# loading the training and testing dataloader
load_data, data_args = load_dataloader(dataset)
RESULT = {}
for trail in range(1, args.trails + 1):
if dataset == ['PETS', 'FDST']:
train_loader, val_loader, _ = load_data(
k_shot=args.k_shot, scene_folder=scene_folder)
elif dataset in ['WE', 'City']:
train_loader, val_loader, _ = load_data(
k_shot=args.k_shot)
else:
train_loader, val_loader, _ = load_data(k_shot=args.k_shot)
if dataset in ['PETS', 'FDST']:
file_name = "scene_{}_stats.json".format(scene_folder)
else:
file_name = "stats.json"
model_name = args.model_path.split(os.sep)[-1].split('.pth')[0]
output_folder = os.path.join(
args.result_folder,
args.model_name,
dataset,
model_name)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
output_path = os.path.join(output_folder, file_name)
with torch.no_grad():
# Compute the accuracy of the model with the updated model
for didx, data in enumerate(
tqdm(val_loader, total=len(val_loader))):
# clearing the cache to have memory to test all the images
torch.cuda.empty_cache()
# loading the data based on the type of folder of the dataset
if dataset in SINGLE_FOLDER_DATASETS:
crow_imgs, gt, gui_imgs = data
else:
crow_imgs, gt = data
# converting the data to torch variable
crow_imgs = Variable(crow_imgs).to(device)
gt = Variable(gt).to(device)
# computing the mean latent representation for the unlabeled
# images
if dataset in SINGLE_FOLDER_DATASETS:
mean_latent = model.compute_k_mean(
gui_imgs, dataset=dataset)
else:
# Iterate through train images to compute the mean and std
# for the decoder
mean_latent = model.compute_k_mean(train_loader)
# incorporating the mean latent values of the target dataset
# (mean and std) to the decoder of the source model
assign_adaptive_params(mean_latent, model.CCN.crowd_decoder)
# forward pass to generate the crowd images latent
# representation
crow_img = model.CCN.crowd_encoder(crow_imgs)
# generate the density map for the extracted crowd image
# output
pred_map = model.CCN.crowd_decoder(crow_img)
# calculate the predicted crowd count to determine the
# performance of the model
pred_img = pred_map.data.cpu().numpy()
gt_img = gt.data.cpu().numpy()
pred_count = np.sum(pred_img) / 100.
gt_count = | np.sum(gt_img) | numpy.sum |
#
# BSD 3-Clause License
#
# Copyright (c) 2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import bisect
from scipy.special import gamma
from functools import lru_cache
from .._commonfuncs import (
lens,
indComb,
indnComb,
efficient_indnComb,
check_random_generator,
)
from sklearn.utils.validation import check_array
from .._commonfuncs import LocalEstimator
class ESS(LocalEstimator):
# SPDX-License-Identifier: MIT, 2017 <NAME> [IDJohnsson]_
"""Intrinsic dimension estimation using the Expected Simplex Skewness algorithm. [Johnsson2015]_ [IDJohnsson]_
The ESS method assumes that the data is local, i.e. that it is a neighborhood taken from a larger data set, such that the curvature and the noise within the neighborhood is relatively small. In the ideal case (no noise, no curvature) this is equivalent to the data being uniformly distributed over a hyper ball.
Parameters
----------
ver: str, 'a' or 'b'
See Johnsson et al. (2015).
d: int, default=1
For ver ='a', any value of d is possible, for ver ='b', only d = 1 is supported.
Attributes
----------
ess_: float
The Expected Simplex Skewness value.
"""
def __init__(self, ver="a", d=1, random_state=None):
self.ver = ver
self.d = d
self.random_state = random_state
def _fit(self, X, dists, knnidx):
self.random_state_ = check_random_generator(self.random_state)
self.dimension_pw_, self.essval_ = np.zeros(len(X)), np.zeros(len(X))
for i in range(len(X)):
self.dimension_pw_[i], self.essval_[i] = self._essLocalDimEst(
X[knnidx[i, :]]
)
def fit_once(self, X, y=None):
""" Fit ESS on a single neighborhood. /!\ Not meant to be used on a complete dataset - X should be a local patch of a dataset, otherwise call .fit()
Parameters
----------
X : {array-like}, shape (n_samples, n_features)
The training input samples. /!\ Should be a local patch of a dataset
y : dummy parameter to respect the sklearn API
Returns
-------
self : object
Returns self.
"""
self.random_state_ = check_random_generator(self.random_state)
X = check_array(X, ensure_min_samples=2, ensure_min_features=2)
self.dimension_, self.essval_ = self._essLocalDimEst(X)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def _essLocalDimEst(self, X):
essval = self._computeEss(X, verbose=False)
if np.isnan(essval):
de = np.nan
return (de, essval)
mindim = 1
maxdim = 20
dimvals = self._essReference(maxdim, mindim)
while (self.ver == "a" and essval > dimvals[maxdim - 1]) or (
self.ver == "b" and essval < dimvals[maxdim - 1]
):
mindim = maxdim + 1
maxdim = 2 * (maxdim - 1)
dimvals = np.append(dimvals, self._essReference(maxdim, mindim))
if self.ver == "a":
i = bisect.bisect(dimvals[mindim - 1 : maxdim], essval)
else:
i = len(range(mindim, maxdim + 1)) - bisect.bisect(
dimvals[mindim - 1 : maxdim][::-1], essval
)
de_integer = mindim + i - 1
de_fractional = (essval - dimvals[de_integer - 1]) / (
dimvals[de_integer] - dimvals[de_integer - 1]
)
de = de_integer + de_fractional
return (de, essval)
################################################################################
def _computeEss(self, X, verbose=False):
p = self.d + 1
n = X.shape[1]
if p > n:
if self.ver == "a":
return 0
if self.ver == "b":
return 1
else:
raise ValueError("Not a valid version")
vectors = self._vecToCs_onedir(X, 1)
if verbose:
print("Number of vectors:", len(vectors), "\n")
# groups = indnComb(len(vectors), p)
# if (len(groups) > 5000):
# groups = groups[np.random.choice(range(len(groups)),size=5000, replace=False),:]
# if len(vectors)>100: #sample 5000 combinations
groups = efficient_indnComb(len(vectors), p, self.random_state_)
# else: #generate all combs with the original function
# groups = indnComb(len(vectors), p)
if verbose:
print("Number of simple elements:", len(groups), "\n")
Allist = [vectors[group] for group in groups]
Alist = Allist
# Compute weights for each simple element
weight = np.prod([lens(l) for l in Alist], axis=1)
if self.ver == "a":
# Compute the volumes of the simple elements
vol = np.array([np.linalg.det(vecgr.dot(vecgr.T)) for vecgr in Alist])
if np.any(vol < 0):
if not hasattr(self, "_warned"):
self._warned = True
print(
"Warning: your data might contain duplicate rows, which can affect results"
)
vol = np.sqrt(np.abs(vol))
return np.sum(vol) / np.sum(weight)
elif self.ver == "b":
if self.d == 1:
# Compute the projection of one vector onto one other
proj = [np.abs(np.sum(vecgr[0, :] * vecgr[1, :])) for vecgr in Alist]
return np.sum(proj) / np.sum(weight)
else:
raise ValueError('For ver == "b", d > 1 is not supported.')
else:
raise ValueError("Not a valid version")
################################################################################
@staticmethod
def _vecToC_onedir(
points, add_mids=False, weight_mids=1, mids_maxdist=float("inf")
):
# Mean center data
center = np.mean(points, axis=0)
vecOneDir = points - center
if add_mids: # Add midpoints
pt1, pt2, ic = indComb(len(vecOneDir))
mids = (vecOneDir[ic[pt1],] + vecOneDir[ic[pt2],]) / 2
dist = lens(vecOneDir[ic[pt1],] - vecOneDir[ic[pt2],])
# Remove midpoints for very distant
mids = mids[
dist <= mids_maxdist,
]
# points
vecOneDir = np.vstack((vecOneDir, weight_mids * mids))
return vecOneDir
def _vecToCs_onedir(self, points, n_group):
if n_group == 1:
return self._vecToC_onedir(points)
NN = len(points)
ind_groups = indnComb(NN, n_group)
reshape_ind_groups = ind_groups.reshape((n_group, -1))
point_groups = points[reshape_ind_groups, :].reshape((-1, n_group))
group_centers = np.array(
[points[ind_group, :].mean(axis=0) for ind_group in ind_groups]
)
centers = group_centers[np.repeat(np.arange(len(group_centers)), n_group), :]
return point_groups - centers
@lru_cache()
def _essReference(self, maxdim, mindim=1):
if maxdim <= self.d + 2:
raise ValueError(
"maxdim (", maxdim, ") must be larger than d + 2 (", self.d + 2, ")",
)
if self.ver == "a":
# ID(n) = factor1(n)**d * factor2(n)
# factor1(n) = gamma(n/2)/gamma((n+1)/2)
# factor2(n) = gamma(n/2)/gamma((n-d)/2)
# compute factor1
# factor1(n) = gamma(n/2)/gamma((n+1)/2)
# [using the rule gamma(n+1) = n * gamma(n)] repeatedly
# = gamma(1/2)/gamma(2/2) * prod{j \in J1} j/(j+1) if n is odd
# = gamma(2/2)/gamma(3/2) * prod(j \in J2) j/(j+1) if n is even
# where J1 = np.arange(1, n-2, 2), J2 = np.arange(2, n-2, 2)
J1 = np.array([1 + i for i in range(0, maxdim + 2, 2) if 1 + i <= maxdim])
J2 = np.array([2 + i for i in range(0, maxdim + 2, 2) if 2 + i <= maxdim])
factor1_J1 = (
gamma(1 / 2)
/ gamma(2 / 2)
* np.concatenate((np.array([1]), np.cumprod(J1 / (J1 + 1))[:-1]))
)
factor1_J2 = (
gamma(2 / 2)
/ gamma(3 / 2)
* np.concatenate((np.array([1]), np.cumprod(J2 / (J2 + 1))[:-1]))
)
factor1 = np.repeat(np.nan, maxdim)
factor1[J1 - 1] = factor1_J1
factor1[J2 - 1] = factor1_J2
# compute factor2
# factor2(n) = gamma(n/2)/gamma((n-d)/2)
# = gamma((d+1)/2)/gamma(1/2) * prod{k \in K1} k/(k-d) if n-d is odd
# = gamma((d+2)/2)/gamma(2/2) * prod(k \in K2) k/(k-d) if n-d is even
# where K1 = np.arange(d+1, n-2, 2), K2 = np.arange(d+2, n-2, 2)
# if n > d+2, otherwise 0.
K1 = np.array(
[
self.d + 1 + i
for i in range(0, maxdim + 2, 2)
if self.d + 1 + i <= maxdim
]
)
K2 = np.array(
[
self.d + 2 + i
for i in range(0, maxdim + 2, 2)
if self.d + 2 + i <= maxdim
]
)
factor2_K1 = (
gamma((self.d + 1) / 2)
/ gamma(1 / 2)
* np.concatenate((np.array([1]), np.cumprod(K1 / (K1 - self.d))[:-1]))
)
factor2_K2 = (
gamma((self.d + 2) / 2)
/ gamma(2 / 2)
* np.concatenate((np.array([1]), np.cumprod(K2 / (K2 - self.d))[:-1]))
)
factor2 = np.zeros(maxdim)
factor2[K1 - 1] = factor2_K1
factor2[K2 - 1] = factor2_K2
# compute ID
ID = factor1 ** self.d * factor2
ID = ID[mindim - 1 : maxdim]
return ID
if self.ver == "b":
if self.d == 1:
# ID(n) = 2*pi**(-1/2)/n *gamma((n+1)/2)/gamma((n+2)/2)
# = gamma(2/2)/gamma(3/2) * prod{j \in J1} (j+1)/(j+2) * 2/sqrt(pi)/n if n is odd
# = gamma(3/2)/gamma(4/2) * prod(j \in J2) (j+1)/(j+2) * 2/sqrt(pi)/n if n is even
# where J1 = np.arange(1, n-2, 2), J2 = np.arange(2, n-2, 2)
J1 = np.array(
[1 + i for i in range(0, maxdim + 2, 2) if 1 + i <= maxdim]
)
J2 = np.array(
[2 + i for i in range(0, maxdim + 2, 2) if 2 + i <= maxdim]
)
ID_J1 = (
gamma(3 / 2)
/ gamma(2 / 2)
* np.concatenate(
( | np.array([1]) | numpy.array |
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sympy.solvers import solve
from sympy import Symbol
from matplotlib import patches
import matplotlib.patches as mpatches
import scipy.io as sio
# plotting configuration
ratio = 1.5
figure_len, figure_width = 15*ratio, 12*ratio
font_size_1, font_size_2 = 36*ratio, 36*ratio
legend_size = 18*ratio
line_width, tick_len = 3*ratio, 10*ratio
marker_size = 15*ratio
plot_line_width = 5*ratio
hfont = {'fontname': 'Arial'}
# simulation setup
dt = 0.0001
T = int(9/dt)
# neuronal parameters
tau_e, tau_i = 0.020, 0.010
alpha_e, alpha_i = 2, 2
# adaptation
U, U_max = 1, 6
tau_x = 0.20
# network connectivity
Jee = 1.8
Jie = 1.0
Jei = 1.0
Jii = 0.6
l_b_before_stimulation = [True, False]
for b_before_stimulation in l_b_before_stimulation:
x = 1
r_e, r_i = 0, 0
z_e, z_i = 0, 0
l_r_e, l_r_i = [], []
for i in range(T):
if 50000 <= i < 70000:
g_e, g_i = 3.0, 2
else:
g_e, g_i = 1.55, 2
if b_before_stimulation:
if 42000 < i <= 49000:
g_i = 2.1
else:
pass
else:
if 62000 < i <= 69000:
g_i = 2.1
else:
pass
g_e = g_e * (g_e > 0)
g_i = g_i * (g_i > 0)
# SSN part
z_e = Jee * r_e - Jei * r_i + g_e
z_i = Jie * x * r_e - Jii * r_i + g_i
z_e = z_e * (z_e > 0)
z_i = z_i * (z_i > 0)
r_e = r_e + (-r_e + np.power(z_e, alpha_e)) / tau_e * dt
r_i = r_i + (-r_i + np.power(z_i, alpha_i)) / tau_i * dt
r_e = r_e * (r_e > 0)
r_i = r_i * (r_i > 0)
# adaptation of excitatory neurons
x = x + ((U - x) / tau_x + U * (U_max - x) * r_e) * dt
x = np.clip(x, 0, U_max)
l_r_e.append(r_e)
l_r_i.append(r_i)
l_r_e = np.asarray(l_r_e)
l_r_i = | np.asarray(l_r_i) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 15:10:36 2020
@author: chitra
"""
import time
_start_time = time.time()
def tick():
global _start_time
_start_time = time.time()
def tock():
t_sec = round(time.time() - _start_time)
(t_min, t_sec) = divmod(t_sec,60)
(t_hour,t_min) = divmod(t_min,60)
print('Time passed: {}hour:{}min:{}sec'.format(t_hour,t_min,t_sec))
import numpy as np
import pyblp
import pandas as pd
# the standard deviation of log income is constant across years, but it has year-varying means
# 0.375 is calibrated to match OG diversion of 2nd choice data
def solve_nl_nevo(df,rho=0.375):
groups = df.groupby(['market_ids', 'nesting_ids'])
df['demand_instruments20'] = groups['shares'].transform(np.size)
nl_formulation = pyblp.Formulation('0 + prices')
problem = pyblp.Problem(nl_formulation, df)
res=problem.solve(rho=rho,optimization=pyblp.Optimization('return'))
og=res.extract_diagonals(res.compute_diversion_ratios()).mean()
print(og)
return problem,res
def draw_blp_agents(ndraws=10000):
log_income_sd = 1.72
log_income_means = {
1971: 2.01156,
1972: 2.06526,
1973: 2.07843,
1974: 2.05775,
1975: 2.02915,
1976: 2.05346,
1977: 2.06745,
1978: 2.09805,
1979: 2.10404,
1980: 2.07208,
1981: 2.06019,
1982: 2.06561,
1983: 2.07672,
1984: 2.10437,
1985: 2.12608,
1986: 2.16426,
1987: 2.18071,
1988: 2.18856,
1989: 2.21250,
1990: 2.18377,
}
# construct agent data year-by-year
market_ids = []
weights = []
nodes = []
income = []
for index, (year, log_income_mean) in enumerate(log_income_means.items()):
integration = pyblp.Integration('halton', ndraws, {'discard': 1000 + index * ndraws,'seed': index})
untransformed_agents = pyblp.build_integration(integration, 6)
market_ids.append(np.repeat(year, untransformed_agents.weights.size))
weights.append(untransformed_agents.weights)
nodes.append(untransformed_agents.nodes[:, :-1])
income.append(np.exp(log_income_mean + log_income_sd * untransformed_agents.nodes[:, -1]))
# concatenate the constructed agent data
agent_data = {
'market_ids': np.concatenate(market_ids),
'weights': np.concatenate(weights),
'nodes': np.vstack(nodes),
'income': np.concatenate(income),
}
# Make this a dataframe
agents=agent_data.copy()
del agents['nodes']
del agents['weights']
agent_df=pd.DataFrame.from_dict(agents)
for index, vi in enumerate(np.vstack(nodes).T):
agent_df[f'nodes{index}'] = vi
agent_df['weights']=np.concatenate(weights).flatten()
return agent_df
def save_pyblp_results(results, problem,filename):
## add in all the other things we could potentially be interested in
res_dict = results.to_dict()
res_dict['diversion_ratios'] = results.compute_diversion_ratios()
res_dict['quality_diversion_ratios'] = results.compute_diversion_ratios(name=None)
res_dict['own_diversion'] = results.extract_diagonals(res_dict['diversion_ratios'])
res_dict['long_run_diversion_ratios'] = results.compute_long_run_diversion_ratios()
res_dict['objective'] = results.objective.item()
res_dict['objective_scaled'] = results.objective.item()/problem.N
res_dict['elasticities'] = results.compute_elasticities()
res_dict['aggregate_elasticities'] = results.compute_aggregate_elasticities()
res_dict['diag_elasticities'] = results.extract_diagonals(res_dict['elasticities'])
res_dict['consumer_surplus'] = results.compute_consumer_surpluses()
res_dict['markups'] =results.compute_markups()
res_dict['probabilities'] = results.compute_probabilities()
np.save(filename, res_dict, allow_pickle =True)
def load_pyblp_dict(filename):
dict = np.load(filename, allow_pickle=True)
return dict
# this ONLY works for the base!
def load_blp_base(problem, filename):
base_res = np.load(filename, allow_pickle=True)
dict_W = base_res.item().get('W')
dict_delta = base_res.item().get('delta')
dict_gamma = base_res.item().get('gamma')
dict_beta = base_res.item().get('beta')
dict_sigma = base_res.item().get('sigma')
dict_pi = base_res.item().get('pi')
## Use these to quickly get the exact results as estimation
fast_options = dict(
method='1s',
check_optimality='gradient',
costs_bounds=(0.001, None),
W_type='clustered',
se_type='clustered',
initial_update=False,
iteration=pyblp.Iteration('squarem', {'atol': 1e-14}),
optimization=pyblp.Optimization('return'),
scale_objective=False,
W=dict_W,
delta=dict_delta,
beta=dict_beta,
gamma=dict_gamma,
sigma = dict_sigma,
pi = dict_pi
)
results_fast = problem.solve(**fast_options)
return results_fast
def get_params_nevo(results_dict, w=None):
elasticities = results_dict.item().get('diag_elasticities')
agg_elas = results_dict.item().get('aggregate_elasticities')
diversion0 = results_dict.item().get('own_diversion')
div = results_dict.item().get('diversion_ratios')
div[np.isnan(div)]=0
div[div==diversion0]=0
div.sort(axis=1)
top5=div[:,-5:].sum(axis=1)
price_param = results_dict.item().get('beta').item()
price_param_se = results_dict.item().get('beta_se').item()
cs = results_dict.item().get('consumer_surplus')*100
markups=results_dict.item().get('markups')
# CRM: Adding the interactions as pi
if results_dict.item().get('sigma').shape[0] == 0:
sigmas = np.zeros(5)
sigma_ses = np.zeros((5,5))
else:
sigma_ses = results_dict.item().get('sigma_se')
sigmas=np.abs(np.diag(results_dict.item().get('sigma')))
if results_dict.item().get('pi').shape[0] == 0 :
pis = np.zeros((5,5))
pi_ses = np.zeros((5,5))
else:
pis = results_dict.item().get('pi')
pi_ses = results_dict.item().get('pi_se')
objective = results_dict.item().get('objective')
objective_scaled = results_dict.item().get('objective_scaled')
return {'sigma_cons': sigmas[0],
'sigma_price': sigmas[1],
'sigma_sugar': sigmas[2],
'sigma_mushy': sigmas[3],
'sigma_cons_se': sigma_ses[0,0],
'sigma_price_se': sigma_ses[1,1],
'sigma_sugar_se': sigma_ses[2,2],
'sigma_mushy_se': sigma_ses[3,3],
'pi_cons_inc': pis[0,0],
'pi_cons_inc2': pis[0,1],
'pi_cons_age': pis[0,2],
'pi_price_inc': pis[1,0],
'pi_price_inc2': pis[1,1],
'pi_price_child': pis[1,3],
'pi_sugar_inc': pis[2,0],
'pi_sugar_age': pis[2,2],
'pi_mushy_inc': pis[3,0],
'pi_mushy_age': pis[3,2],
'pi_cons_inc_se': pi_ses[0,0],
'pi_cons_inc2_se': pi_ses[0,1],
'pi_cons_age_se': pi_ses[0,2],
'pi_price_inc_se': pi_ses[1,0],
'pi_price_inc2_se': pi_ses[1,1],
'pi_price_child_se': pi_ses[1,3],
'pi_sugar_inc_se': pi_ses[2,0],
'pi_sugar_age_se': pi_ses[2,2],
'pi_mushy_inc_se': pi_ses[3,0],
'pi_mushy_age_se': pi_ses[3,2],
'price_coeff': price_param,
'price_se': price_param_se,
'median_own_elas':np.median(elasticities),
'median_agg_elas': | np.median(agg_elas) | numpy.median |
from typing import Dict, Optional, List
import numpy as np
import Box2D
from gym import spaces
from gym.utils import EzPickle
from gym.envs.box2d import bipedal_walker
from gym.envs.box2d import bipedal_walker as bpw
from Box2D.b2 import (edgeShape, fixtureDef, polygonShape)
from carl.envs.carl_env import CARLEnv
from carl.utils.trial_logger import TrialLogger
DEFAULT_CONTEXT = {
"FPS": 50,
"SCALE": 30.0, # affects how fast-paced the game is, forces should be adjusted as well
"GRAVITY_X": 0,
"GRAVITY_Y": -10,
# surroundings
"FRICTION": 2.5,
"TERRAIN_STEP": 14/30.0,
"TERRAIN_LENGTH": 200, # in steps
"TERRAIN_HEIGHT": 600/30/4, # VIEWPORT_H/SCALE/4
"TERRAIN_GRASS": 10, # low long are grass spots, in steps
"TERRAIN_STARTPAD": 20, # in steps
# walker
"MOTORS_TORQUE": 80,
"SPEED_HIP": 4,
"SPEED_KNEE": 6,
"LIDAR_RANGE": 160/30.0,
"LEG_DOWN": -8/30.0,
"LEG_W": 8/30.0,
"LEG_H": 34/30.0,
# absolute value of random force applied to walker at start of episode
"INITIAL_RANDOM": 5,
# Size of world
"VIEWPORT_W": 600,
"VIEWPORT_H": 400,
}
# TODO make bounds more generous for all Box2D envs?
CONTEXT_BOUNDS = {
"FPS": (1, 500, float),
"SCALE": (1, 100, float), # affects how fast-paced the game is, forces should be adjusted as well
# surroundings
"FRICTION": (0, 10, float),
"TERRAIN_STEP": (0.25, 1, float),
"TERRAIN_LENGTH": (100, 500, int), # in steps
"TERRAIN_HEIGHT": (3, 10, float), # VIEWPORT_H/SCALE/4
"TERRAIN_GRASS": (5, 15, int), # low long are grass spots, in steps
"TERRAIN_STARTPAD": (10, 30, int), # in steps
# walker
"MOTORS_TORQUE": (0, 200, float),
"SPEED_HIP": (1e-6, 15, float),
"SPEED_KNEE": (1e-6, 15, float),
"LIDAR_RANGE": (0.5, 20, float),
"LEG_DOWN": (-2, -0.25, float),
"LEG_W": (0.25, 0.5, float),
"LEG_H": (0.25, 2, float),
# absolute value of random force applied to walker at start of episode
"INITIAL_RANDOM": (0, 50, float),
# Size of world
"VIEWPORT_W": (400, 1000, int),
"VIEWPORT_H": (200, 800, int),
"GRAVITY_X": (-20, 20, float), # unit: m/s²
"GRAVITY_Y": (-20, -0.01, float), # the y-component of gravity must be smaller than 0 because otherwise the
# body leaves the frame by going up
}
class CustomBipedalWalkerEnv(bipedal_walker.BipedalWalker):
def __init__(self, gravity: (float, float) = (0, -10)): # TODO actually we dont need a custom env because the gravity can be adjusted afterwards
EzPickle.__init__(self)
self.seed()
self.viewer = None
self.world = Box2D.b2World(gravity=gravity)
self.terrain = None
self.hull = None
self.prev_shaping = None
self.fd_polygon = fixtureDef(
shape=polygonShape(vertices=
[(0, 0),
(1, 0),
(1, -1),
(0, -1)]),
friction=bpw.FRICTION)
self.fd_edge = fixtureDef(
shape=edgeShape(vertices=
[(0, 0),
(1, 1)]),
friction=bpw.FRICTION,
categoryBits=0x0001,
)
self.reset()
high = np.array([np.inf] * 24)
self.action_space = spaces.Box(np.array([-1, -1, -1, -1]), np.array([1, 1, 1, 1]), dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
class CARLBipedalWalkerEnv(CARLEnv):
def __init__(
self,
env: Optional[CustomBipedalWalkerEnv] = None,
contexts: Dict[str, Dict] = {},
instance_mode: str = "rr",
hide_context: bool = False,
add_gaussian_noise_to_context: bool = False,
gaussian_noise_std_percentage: float = 0.05,
logger: Optional[TrialLogger] = None,
scale_context_features: str = "no",
default_context: Optional[Dict] = DEFAULT_CONTEXT,
state_context_features: Optional[List[str]] = None,
dict_observation_space: bool = False,
):
"""
Parameters
----------
env: gym.Env, optional
Defaults to classic control environment mountain car from gym (MountainCarEnv).
contexts: List[Dict], optional
Different contexts / different environment parameter settings.
instance_mode: str, optional
"""
if env is None:
env = CustomBipedalWalkerEnv()
if not contexts:
contexts = {0: DEFAULT_CONTEXT}
super().__init__(
env=env,
contexts=contexts,
instance_mode=instance_mode,
hide_context=hide_context,
add_gaussian_noise_to_context=add_gaussian_noise_to_context,
gaussian_noise_std_percentage=gaussian_noise_std_percentage,
logger=logger,
scale_context_features=scale_context_features,
default_context=default_context,
state_context_features=state_context_features,
dict_observation_space=dict_observation_space
)
self.whitelist_gaussian_noise = list(DEFAULT_CONTEXT.keys()) # allow to augment all values
def _update_context(self):
bpw.FPS = self.context["FPS"]
bpw.SCALE = self.context["SCALE"]
bpw.FRICTION = self.context["FRICTION"]
bpw.TERRAIN_STEP = self.context["TERRAIN_STEP"]
bpw.TERRAIN_LENGTH = int(self.context["TERRAIN_LENGTH"]) # TODO do this automatically
bpw.TERRAIN_HEIGHT = self.context["TERRAIN_HEIGHT"]
bpw.TERRAIN_GRASS = self.context["TERRAIN_GRASS"]
bpw.TERRAIN_STARTPAD = self.context["TERRAIN_STARTPAD"]
bpw.MOTORS_TORQUE = self.context["MOTORS_TORQUE"]
bpw.SPEED_HIP = self.context["SPEED_HIP"]
bpw.SPEED_KNEE = self.context["SPEED_KNEE"]
bpw.LIDAR_RANGE = self.context["LIDAR_RANGE"]
bpw.LEG_DOWN = self.context["LEG_DOWN"]
bpw.LEG_W = self.context["LEG_W"]
bpw.LEG_H = self.context["LEG_H"]
bpw.INITIAL_RANDOM = self.context["INITIAL_RANDOM"]
bpw.VIEWPORT_W = self.context["VIEWPORT_W"]
bpw.VIEWPORT_H = self.context["VIEWPORT_H"]
gravity_x = self.context["GRAVITY_X"]
gravity_y = self.context["GRAVITY_Y"]
gravity = (gravity_x, gravity_y)
self.env.world.gravity = gravity
# Important for building terrain
self.env.fd_polygon = fixtureDef(
shape=polygonShape(vertices=
[(0, 0),
(1, 0),
(1, -1),
(0, -1)]),
friction=bipedal_walker.FRICTION)
self.env.fd_edge = fixtureDef(
shape=edgeShape(vertices=
[(0, 0),
(1, 1)]),
friction=bipedal_walker.FRICTION,
categoryBits=0x0001,
)
bpw.HULL_FD = fixtureDef(
shape=polygonShape(vertices=[(x / bpw.SCALE, y / bpw.SCALE) for x, y in bpw.HULL_POLY]),
density=5.0,
friction=0.1,
categoryBits=0x0020,
maskBits=0x001, # collide only with ground
restitution=0.0) # 0.99 bouncy
bpw.LEG_FD = fixtureDef(
shape=polygonShape(box=(bpw.LEG_W / 2, bpw.LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
bpw.LOWER_FD = fixtureDef(
shape=polygonShape(box=(0.8 * bpw.LEG_W / 2, bpw.LEG_H / 2)),
density=1.0,
restitution=0.0,
categoryBits=0x0020,
maskBits=0x001)
self.env.world.gravity = gravity
def demo_heuristic(env):
env.reset()
steps = 0
total_reward = 0
a = | np.array([0.0, 0.0, 0.0, 0.0]) | numpy.array |
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
"""
Small collection of robust statistical estimators based on functions from
<NAME> (Hughes STX) statistics library (called ROBLIB) that have
been incorporated into the AstroIDL User's Library. Function included are:
* biweightMean - biweighted mean estimator
* mean - robust estimator of the mean of a data set
* mode - robust estimate of the mode of a data set using the half-sample
method
* std - robust estimator of the standard deviation of a data set
* checkfit - return the standard deviation and biweights for a fit in order
to determine its quality
* linefit - outlier resistant fit of a line to data
* polyfit - outlier resistant fit of a polynomial to data
For the fitting routines, the coefficients are returned in the same order as
numpy.polyfit, i.e., with the coefficient of the highest power listed first.
For additional information about the original IDL routines, see:
http://idlastro.gsfc.nasa.gov/contents.html#C17
"""
import math
import numpy
__version__ = '0.4'
__revision__ = '$Rev$'
__all__ = ['biweightMean', 'mean', 'mode', 'std', 'checkfit', 'linefit', 'polyfit', '__version__', '__revision__', '__all__']
__iterMax = 25
__delta = 5.0e-7
__epsilon = 1.0e-20
#print("Note that for the outlier rejection, BisquareLimit=3.0 is used")
def biweightMean(inputData, axis=None, dtype=None):
"""
Calculate the mean of a data set using bisquare weighting.
Based on the biweight_mean routine from the AstroIDL User's
Library.
.. versionchanged:: 1.0.3
Added the 'axis' and 'dtype' keywords to make this function more
compatible with numpy.mean()
"""
if axis is not None:
fnc = lambda x: biweightMean(x, dtype=dtype)
y0 = numpy.apply_along_axis(fnc, axis, inputData)
else:
y = inputData.ravel()
if type(y).__name__ == "MaskedArray":
y = y.compressed()
if dtype is not None:
y = y.astype(dtype)
n = len(y)
closeEnough = 0.03*numpy.sqrt(0.5/(n-1))
diff = 1.0e30
nIter = 0
y0 = numpy.median(y)
deviation = y - y0
sigma = std(deviation)
if sigma < __epsilon:
diff = 0
while diff > closeEnough:
nIter = nIter + 1
if nIter > __iterMax:
break
uu = ((y-y0)/(6.0*sigma))**2.0
uu = numpy.where(uu > 1.0, 1.0, uu)
weights = (1.0-uu)**2.0
weights /= weights.sum()
y0 = (weights*y).sum()
deviation = y - y0
prevSigma = sigma
sigma = std(deviation, Zero=True)
if sigma > __epsilon:
diff = numpy.abs(prevSigma - sigma) / prevSigma
else:
diff = 0.0
return y0
def mean(inputData, Cut=3.0, axis=None, dtype=None):
"""
Robust estimator of the mean of a data set. Based on the
resistant_mean function from the AstroIDL User's Library.
.. versionchanged:: 1.0.3
Added the 'axis' and 'dtype' keywords to make this function more
compatible with numpy.mean()
"""
if axis is not None:
fnc = lambda x: mean(x, dtype=dtype)
dataMean = numpy.apply_along_axis(fnc, axis, inputData)
else:
data = inputData.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if dtype is not None:
data = data.astype(dtype)
data0 = numpy.median(data)
maxAbsDev = numpy.median(numpy.abs(data-data0)) / 0.6745
if maxAbsDev < __epsilon:
maxAbsDev = (numpy.abs(data-data0)).mean() / 0.8000
cutOff = Cut*maxAbsDev
good = numpy.where( numpy.abs(data-data0) <= cutOff )
good = good[0]
dataMean = data[good].mean()
dataSigma = math.sqrt( ((data[good]-dataMean)**2.0).sum() / len(good) )
if Cut > 1.0:
sigmaCut = Cut
else:
sigmaCut = 1.0
if sigmaCut <= 4.5:
dataSigma = dataSigma / (-0.15405 + 0.90723*sigmaCut - 0.23584*sigmaCut**2.0 + 0.020142*sigmaCut**3.0)
cutOff = Cut*dataSigma
good = numpy.where( numpy.abs(data-data0) <= cutOff )
good = good[0]
dataMean = data[good].mean()
if len(good) > 3:
dataSigma = math.sqrt( ((data[good]-dataMean)**2.0).sum() / len(good) )
if Cut > 1.0:
sigmaCut = Cut
else:
sigmaCut = 1.0
if sigmaCut <= 4.5:
dataSigma = dataSigma / (-0.15405 + 0.90723*sigmaCut - 0.23584*sigmaCut**2.0 + 0.020142*sigmaCut**3.0)
dataSigma = dataSigma / math.sqrt(len(good)-1)
return dataMean
def mode(inputData, axis=None, dtype=None):
"""
Robust estimator of the mode of a data set using the half-sample mode.
.. versionadded: 1.0.3
"""
if axis is not None:
fnc = lambda x: mode(x, dtype=dtype)
dataMode = numpy.apply_along_axis(fnc, axis, inputData)
else:
# Create the function that we can use for the half-sample mode
def _hsm(data):
if data.size == 1:
return data[0]
elif data.size == 2:
return data.mean()
elif data.size == 3:
i1 = data[1] - data[0]
i2 = data[2] - data[1]
if i1 < i2:
return data[:2].mean()
elif i2 > i1:
return data[1:].mean()
else:
return data[1]
else:
wMin = data[-1] - data[0]
N = data.size/2 + data.size%2
for i in xrange(0, N):
w = data[i+N-1] - data[i]
if w < wMin:
wMin = w
j = i
return _hsm(data[j:j+N])
data = inputData.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if dtype is not None:
data = data.astype(dtype)
# The data need to be sorted for this to work
data = numpy.sort(data)
# Find the mode
dataMode = _hsm(data)
return dataMode
def std(inputData, Zero=False, axis=None, dtype=None):
"""
Robust estimator of the standard deviation of a data set.
Based on the robust_sigma function from the AstroIDL User's Library.
.. versionchanged:: 1.0.3
Added the 'axis' and 'dtype' keywords to make this function more
compatible with numpy.std()
"""
if axis is not None:
fnc = lambda x: std(x, dtype=dtype)
sigma = numpy.apply_along_axis(fnc, axis, inputData)
else:
data = inputData.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if dtype is not None:
data = data.astype(dtype)
if Zero:
data0 = 0.0
else:
data0 = numpy.median(data)
maxAbsDev = numpy.median(numpy.abs(data-data0)) / 0.6745
if maxAbsDev < __epsilon:
maxAbsDev = (numpy.abs(data-data0)).mean() / 0.8000
if maxAbsDev < __epsilon:
sigma = 0.0
return sigma
u = (data-data0) / 6.0 / maxAbsDev
u2 = u**2.0
good = numpy.where( u2 <= 1.0 )
good = good[0]
if len(good) < 3:
print("WARNING: Distribution is too strange to compute standard deviation")
sigma = -1.0
return sigma
numerator = ((data[good]-data0)**2.0 * (1.0-u2[good])**2.0).sum()
nElements = (data.ravel()).shape[0]
denominator = ((1.0-u2[good])*(1.0-5.0*u2[good])).sum()
sigma = nElements*numerator / (denominator*(denominator-1.0))
if sigma > 0:
sigma = math.sqrt(sigma)
else:
sigma = 0.0
return sigma
def checkfit(inputData, inputFit, epsilon, delta, BisquareLimit=3.0):
"""
Determine the quality of a fit and biweights. Returns a tuple
with elements:
0. Robust standard deviation analog
1. Fractional median absolute deviation of the residuals
2. Number of input points given non-zero weight in the calculation
3. Bisquare weights of the input points
4. Residual values scaled by sigma
This function is based on the rob_checkfit routine from the AstroIDL
User's Library.
"""
data = inputData.ravel()
fit = inputFit.ravel()
if type(data).__name__ == "MaskedArray":
data = data.compressed()
if type(fit).__name__ == "MaskedArray":
fit = fit.compressed()
deviation = data - fit
sigma = std(deviation, Zero=True)
if sigma < epsilon:
return (sigma, 0.0, 0, 0.0, 0.0)
toUse = (numpy.where( numpy.abs(fit) > epsilon ))[0]
if len(toUse) < 3:
fracDev = 0.0
else:
fracDev = numpy.median(numpy.abs(deviation[toUse]/fit[toUse]))
if fracDev < delta:
return (sigma, fracDev, 0, 0.0, 0.0)
biweights = numpy.abs(deviation)/(BisquareLimit*sigma)
toUse = (numpy.where(biweights > 1))[0]
if len(toUse) > 0:
biweights[toUse] = 1.0
nGood = len(data) - len(toUse)
scaledResids = (1.0 - biweights**2.0)
scaledResids = scaledResids / scaledResids.sum()
return (sigma, fracDev, nGood, biweights, scaledResids)
def linefit(inputX, inputY, iterMax=25, Bisector=False, BisquareLimit=6.0, CloseFactor=0.03):
"""
Outlier resistance two-variable linear regression function.
Based on the robust_linefit routine in the AstroIDL User's Library.
"""
xIn = inputX.ravel()
yIn = inputY.ravel()
if type(yIn).__name__ == "MaskedArray":
xIn = xIn.compress(numpy.logical_not(yIn.mask))
yIn = yIn.compressed()
n = len(xIn)
x0 = xIn.sum() / n
y0 = yIn.sum() / n
x = xIn - x0
y = yIn - y0
cc = numpy.zeros(2)
ss = numpy.zeros(2)
sigma = 0.0
yFit = yIn
badFit = 0
nGood = n
lsq = 0.0
yp = y
if n > 5:
s = numpy.argsort(x)
u = x[s]
v = y[s]
nHalf = n/2 -1
x1 = numpy.median(u[0:nHalf])
x2 = numpy.median(u[nHalf:])
y1 = numpy.median(v[0:nHalf])
y2 = numpy.median(v[nHalf:])
if numpy.abs(x2-x1) < __epsilon:
x1 = u[0]
x2 = u[-1]
y1 = v[0]
y2 = v[-1]
cc[1] = (y2-y1)/(x2-x1)
cc[0] = y1 - cc[1]*x1
yFit = cc[0] + cc[1]*x
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood < 2:
lsq = 1.0
if lsq == 1 or n < 6:
sx = x.sum()
sy = y.sum()
sxy = (x*y).sum()
sxx = (x*x).sum()
d = sxx - sx*sx
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
ySlope = (sxy - sx*sy) / d
yYInt = (sxx*sy - sx*sxy) / d
if Bisector:
syy = (y*y).sum()
d = syy - sy*sy
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
tSlope = (sxy - sy*sx) / d
tYInt = (syy*sx - sy*sxy) / d
if numpy.abs(tSlope) < __epsilon:
return (0.0, 0.0)
xSlope = 1.0/tSlope
xYInt = -tYInt / tSlope
if ySlope > xSlope:
a1 = yYInt
b1 = ySlope
r1 = numpy.sqrt(1.0+ySlope**2.0)
a2 = xYInt
b2 = xSlope
r2 = numpy.sqrt(1.0+xSlope**2.0)
else:
a2 = yYInt
b2 = ySlope
r2 = numpy.sqrt(1.0+ySlope**2.0)
a1 = xYInt
b1 = xSlope
r1 = numpy.sqrt(1.0+xSlope**2.0)
yInt = (r1*a2 + r2*a1) / (r1 + r2)
slope = (r1*b2 + r2*b1) / (r1 + r2)
r = numpy.sqrt(1.0+slope**2.0)
if yInt > 0:
r = -r
u1 = slope / r
u2 = -1.0/r
u3 = yInt / r
yp = u1*x + u2*y + u3
yFit = y*0.0
ss = yp
else:
slope = ySlope
yInt = yYInt
yFit = yInt + slope*x
cc[0] = yInt
cc[1] = slope
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood < 2:
cc[0] = cc[0] + y0 - cc[1]*x0
return cc[::-1]
sigma1 = (100.0*sigma)
closeEnough = CloseFactor * numpy.sqrt(0.5/(n-1))
if closeEnough < __delta:
closeEnough = __delta
diff = 1.0e20
nIter = 0
while diff > closeEnough:
nIter = nIter + 1
if nIter > iterMax:
break
sigma2 = sigma1
sigma1 = sigma
sx = (biweights*x).sum()
sy = (biweights*y).sum()
sxy = (biweights*x*y).sum()
sxx = (biweights*x*x).sum()
d = sxx - sx*sx
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
ySlope = (sxy - sx*sy) / d
yYInt = (sxx*sy - sx*sxy) / d
slope = ySlope
yInt = yYInt
if Bisector:
syy = (biweights*y*y).sum()
d = syy - sy*sy
if numpy.abs(d) < __epsilon:
return (0.0, 0.0)
tSlope = (sxy - sy*sx) / d
tYInt = (syy*sx - sy*sxy) / d
if numpy.abs(tSlope) < __epsilon:
return (0.0, 0.0)
xSlope = 1.0/tSlope
xYInt = -tYInt / tSlope
if ySlope > xSlope:
a1 = yYInt
b1 = ySlope
r1 = numpy.sqrt(1.0+ySlope**2.0)
a2 = xYInt
b2 = xSlope
r2 = numpy.sqrt(1.0+xSlope**2.0)
else:
a2 = yYInt
b2 = ySlope
r2 = numpy.sqrt(1.0+ySlope**2.0)
a1 = xYInt
b1 = xSlope
r1 = numpy.sqrt(1.0+xSlope**2.0)
yInt = (r1*a2 + r2*a1) / (r1 + r2)
slope = (r1*b2 + r2*b1) / (r1 + r2)
r = numpy.sqrt(1.0+slope**2.0)
if yInt > 0:
r = -r
u1 = slope / r
u2 = -1.0/r
u3 = yInt / r
yp = u1*x + u2*y + u3
yFit = y*0.0
ss = yp
else:
yFit = yInt + slope*x
cc[0] = yInt
cc[1] = slope
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood < 2:
badFit = 1
break
diff1 = numpy.abs(sigma1 - sigma)/sigma
diff2 = numpy.abs(sigma2 - sigma)/sigma
if diff1 < diff2:
diff = diff1
else:
diff = diff2
cc[0] = cc[0] + y0 - cc[1]*x0
return cc[::-1]
def polyfit(inputX, inputY, order, iterMax=25):
"""
Outlier resistance two-variable polynomial function fitter.
Based on the robust_poly_fit routine in the AstroIDL User's
Library.
Unlike robust_poly_fit, two different polynomial fitters are used
because numpy.polyfit does not support non-uniform weighting of the
data. For the weighted fitting, the SciPy Orthogonal Distance
Regression module (scipy.odr) is used.
"""
from scipy import odr
def polyFunc(B, x, order=order):
out = x*0.0
for i in range(order+1):
out = out + B[i]*x**i
model = odr.Model(polyFunc)
x = inputX.ravel()
y = inputY.ravel()
if type(y).__name__ == "MaskedArray":
x = x.compress(numpy.logical_not(y.mask))
y = y.compressed()
n = len(x)
x0 = x.sum() / n
y0 = y.sum() / n
u = x
v = y
nSeg = order + 2
if (nSeg//2)*2 == nSeg:
nSeg = nSeg + 1
minPts = nSeg*3
if n < 1000:
lsqFit = 1
cc = numpy.polyfit(u, v, order)
yFit = numpy.polyval(cc, u)
else:
lsqfit = 0
q = numpy.argsort(u)
u = u[q]
v = v[q]
nPerSeg = numpy.zeros(nSeg, dtype=int) + n//nSeg
nLeft = n - nPerSeg[0]*nSeg
nPerSeg[nSeg//2] = nPerSeg[nSeg//2] + nLeft
r = numpy.zeros(nSeg)
s = numpy.zeros(nSeg)
r[0] = numpy.median(u[0:nPerSeg[0]])
s[0] = numpy.median(v[0:nPerSeg[0]])
i2 = nPerSeg[0]-1
for i in range(1,nSeg):
i1 = i2
i2 = i1 + nPerSeg[i]
r[i] = numpy.median(u[i1:i2])
s[i] = numpy.median(v[i1:i2])
cc = numpy.polyfit(r, s, order)
yFit = numpy.polyval(cc, u)
sigma, fracDev, nGood, biweights, scaledResids = checkfit(v, yFit, __epsilon, __delta)
if nGood == 0:
return cc, np.nan
if nGood < minPts:
if lsqFit == 0:
cc = numpy.polyfit(u, v, order)
yFit = numpy.polyval(cc, u)
sigma, fracDev, nGood, biweights, scaledResids = checkfit(yp, yFit, __epsilon, __delta)
if nGood == 0:
return __processPoly(x0, y0, order, cc)
nGood = n - nGood
if nGood < minPts:
return 0, np.nan
closeEnough = 0.03*numpy.sqrt(0.5/(n-1))
if closeEnough < __delta:
closeEnough = __delta
diff = 1.0e10
sigma1 = 100.0*sigma
nIter = 0
while diff > closeEnough:
nIter = nIter + 1
if nIter > iterMax:
break
sigma2 = sigma1
sigma1 = sigma
g = (numpy.where(biweights < 1))[0]
if len(g) < len(biweights):
u = u[g]
v = v[g]
biweights = biweights[g]
try:
## Try the fancy method...
data = odr.RealData(u, v, sy=1.0/biweights)
fit = odr.ODR(data, model, beta0=cc[::-1])
out = fit.run()
cc = out.beta[::-1]
except:
## And then give up when it doesn't work
cc = numpy.polyfit(u, v, order)
yFit = numpy.polyval(cc, u)
sigma, fracDev, nGood, biweights, scaledResids = checkfit(v, yFit, __epsilon, __delta)
if nGood < minPts:
return cc, np.nan
diff1 = numpy.abs(sigma1 - sigma)/sigma
diff2 = numpy.abs(sigma2 - sigma)/sigma
if diff1 < diff2:
diff = diff1
else:
diff = diff2
return cc, sigma
import numpy as np
from scipy import optimize
def gaussian(x,A,x0,err,B):
return A * np.exp(-(x-x0)**2/(2.*err**2)) + B
def fit_gaussian(x,y,p0=None,yerr=None, **kwargs):
assert np.all(np.isfinite(x)) & np.all(np.isfinite(y))
if p0 is None:
p0 = [np.max(y), (np.max(x)-np.min(x))/2., np.median(x), np.min(y)]
popt, pcov = optimize.curve_fit(gaussian, x, y, p0=p0,
bounds=([0, np.min(x), 0, 0],
[2*np.max(y), np.max(x), 3*(np.max(x)-np.min(x)), np.max(y)]),
sigma=yerr,
**kwargs)
return popt, pcov
def gfunc3(x, *theta):
z = (x-theta[1])/theta[2]
return theta[0] * np.exp(-z**2/2.)
def gfunc4(x, *theta):
z = (x-theta[1])/theta[2]
return theta[0] * np.exp(-z**2/2.) + theta[3]
def gfunc5(x, *theta):
z = (x-theta[1])/theta[2]
return theta[0] * | np.exp(-z**2/2.) | numpy.exp |
# -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=C0111
# ignore snakecase warning, missing docstring
"""Copyright 2015 <NAME>.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from math import cos, sin
import matplotlib.pyplot as plt
import numpy.random as random
from numpy.random import randn
from numpy import asarray
import numpy as np
from pytest import approx
from scipy.spatial.distance import mahalanobis as scipy_mahalanobis
from filterpy.kalman import UnscentedKalmanFilter
from filterpy.kalman import (unscented_transform, MerweScaledSigmaPoints,
JulierSigmaPoints, SimplexSigmaPoints,
KalmanFilter)
from filterpy.common import Q_discrete_white_noise, Saver
import filterpy.stats as stats
DO_PLOT = False
def test_sigma_plot():
""" Test to make sure sigma's correctly mirror the shape and orientation
of the covariance array."""
x = np.array([[1, 2]])
P = np.array([[2, 1.2],
[1.2, 2]])
kappa = .1
# if kappa is larger, than points shoudld be closer together
sp0 = JulierSigmaPoints(n=2, kappa=kappa)
sp1 = JulierSigmaPoints(n=2, kappa=kappa*1000)
sp2 = MerweScaledSigmaPoints(n=2, kappa=0, beta=2, alpha=1e-3)
sp3 = SimplexSigmaPoints(n=2)
# test __repr__ doesn't crash
str(sp0)
str(sp1)
str(sp2)
str(sp3)
w0 = sp0.Wm
w1 = sp1.Wm
w2 = sp2.Wm
w3 = sp3.Wm
Xi0 = sp0.sigma_points(x, P)
Xi1 = sp1.sigma_points(x, P)
Xi2 = sp2.sigma_points(x, P)
Xi3 = sp3.sigma_points(x, P)
assert max(Xi1[:, 0]) > max(Xi0[:, 0])
assert max(Xi1[:, 1]) > max(Xi0[:, 1])
if DO_PLOT:
plt.figure()
for i in range(Xi0.shape[0]):
plt.scatter((Xi0[i, 0]-x[0, 0])*w0[i] + x[0, 0],
(Xi0[i, 1]-x[0, 1])*w0[i] + x[0, 1],
color='blue', label='Julier low $\kappa$')
for i in range(Xi1.shape[0]):
plt.scatter((Xi1[i, 0]-x[0, 0]) * w1[i] + x[0, 0],
(Xi1[i, 1]-x[0, 1]) * w1[i] + x[0, 1],
color='green', label='Julier high $\kappa$')
for i in range(Xi2.shape[0]):
plt.scatter((Xi2[i, 0] - x[0, 0]) * w2[i] + x[0, 0],
(Xi2[i, 1] - x[0, 1]) * w2[i] + x[0, 1],
color='red')
for i in range(Xi3.shape[0]):
plt.scatter((Xi3[i, 0] - x[0, 0]) * w3[i] + x[0, 0],
(Xi3[i, 1] - x[0, 1]) * w3[i] + x[0, 1],
color='black', label='Simplex')
stats.plot_covariance_ellipse([1, 2], P)
def test_scaled_weights():
for n in range(1, 5):
for alpha in np.linspace(0.99, 1.01, 100):
for beta in range(2):
for kappa in range(2):
sp = MerweScaledSigmaPoints(n, alpha, 0, 3-n)
assert abs(sum(sp.Wm) - 1) < 1.e-1
assert abs(sum(sp.Wc) - 1) < 1.e-1
def test_julier_sigma_points_1D():
""" tests passing 1D data into sigma_points"""
kappa = 0.
sp = JulierSigmaPoints(1, kappa)
Wm, Wc = sp.Wm, sp.Wc
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 3
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm, ucov = unscented_transform(Xi, Wm, Wc, 0)
# sum of weights*sigma points should be the original mean
m = 0.0
for x, w in zip(Xi, Wm):
m += x*w
assert abs(m-mean) < 1.e-12
assert abs(xm[0] - mean) < 1.e-12
assert abs(ucov[0, 0] - cov) < 1.e-12
assert Xi.shape == (3, 1)
def test_simplex_sigma_points_1D():
""" tests passing 1D data into sigma_points"""
sp = SimplexSigmaPoints(1)
Wm, Wc = sp.Wm, sp.Wc
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 2
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm, ucov = unscented_transform(Xi, Wm, Wc, 0)
# sum of weights*sigma points should be the original mean
m = 0.0
for x, w in zip(Xi, Wm):
m += x*w
assert abs(m-mean) < 1.e-12
assert abs(xm[0] - mean) < 1.e-12
assert abs(ucov[0, 0]-cov) < 1.e-12
assert Xi.shape == (2, 1)
class RadarSim(object):
def __init__(self, dt):
self.x = 0
self.dt = dt
def get_range(self):
vel = 100 + 5*randn()
alt = 1000 + 10*randn()
self.x += vel*self.dt
v = self.x * 0.05*randn()
rng = (self.x**2 + alt**2)**.5 + v
return rng
def test_radar():
def fx(x, dt):
A = np.eye(3) + dt * np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
return A.dot(x)
def hx(x):
return [np.sqrt(x[0]**2 + x[2]**2)]
dt = 0.05
sp = JulierSigmaPoints(n=3, kappa=0.)
kf = UnscentedKalmanFilter(3, 1, dt, fx=fx, hx=hx, points=sp)
assert np.allclose(kf.x, kf.x_prior)
assert np.allclose(kf.P, kf.P_prior)
# test __repr__ doesn't crash
str(kf)
kf.Q *= 0.01
kf.R = 10
kf.x = np.array([0., 90., 1100.])
kf.P *= 100.
radar = RadarSim(dt)
t = np.arange(0, 20+dt, dt)
n = len(t)
xs = np.zeros((n, 3))
random.seed(200)
rs = []
for i in range(len(t)):
r = radar.get_range()
kf.predict()
kf.update(z=[r])
xs[i, :] = kf.x
rs.append(r)
# test mahalanobis
a = np.zeros(kf.y.shape)
maha = scipy_mahalanobis(a, kf.y, kf.SI)
assert kf.mahalanobis == approx(maha)
if DO_PLOT:
print(xs[:, 0].shape)
plt.figure()
plt.subplot(311)
plt.plot(t, xs[:, 0])
plt.subplot(312)
plt.plot(t, xs[:, 1])
plt.subplot(313)
plt.plot(t, xs[:, 2])
def test_linear_2d_merwe():
""" should work like a linear KF if problem is linear """
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = MerweScaledSigmaPoints(4, .1, 2., -1)
kf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([-1., 1., -1., 1])
kf.P *= 1.1
# test __repr__ doesn't crash
str(kf)
zs = [[i+randn()*0.1, i+randn()*0.1] for i in range(20)]
Ms, Ps = kf.batch_filter(zs)
smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dts=dt)
if DO_PLOT:
plt.figure()
zs = np.asarray(zs)
plt.plot(zs[:, 0], marker='+')
plt.plot(Ms[:, 0], c='b')
plt.plot(smooth_x[:, 0], smooth_x[:, 2], c='r')
print(smooth_x)
def test_linear_2d_simplex():
""" should work like a linear KF if problem is linear """
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = SimplexSigmaPoints(n=4)
kf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([-1., 1., -1., 1])
kf.P *= 0.0001
zs = []
for i in range(20):
z = np.array([i+randn()*0.1, i+randn()*0.1])
zs.append(z)
Ms, Ps = kf.batch_filter(zs)
smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dts=dt)
if DO_PLOT:
zs = np.asarray(zs)
plt.plot(Ms[:, 0])
plt.plot(smooth_x[:, 0], smooth_x[:, 2])
print(smooth_x)
def test_linear_1d():
""" should work like a linear KF if problem is linear """
def fx(x, dt):
F = np.array([[1., dt],
[0, 1]])
return np.dot(F, x)
def hx(x):
return np.array([x[0]])
dt = 0.1
points = MerweScaledSigmaPoints(2, .1, 2., -1)
kf = UnscentedKalmanFilter(dim_x=2, dim_z=1, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([1, 2])
kf.P = np.array([[1, 1.1],
[1.1, 3]])
kf.R *= 0.05
kf.Q = np.array([[0., 0], [0., .001]])
z = np.array([2.])
kf.predict()
kf.update(z)
zs = []
for i in range(50):
z = np.array([i + randn()*0.1])
zs.append(z)
kf.predict()
kf.update(z)
print('K', kf.K.T)
print('x', kf.x)
def test_batch_missing_data():
""" batch filter should accept missing data with None in the measurements """
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = MerweScaledSigmaPoints(4, .1, 2., -1)
kf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([-1., 1., -1., 1])
kf.P *= 0.0001
zs = []
for i in range(20):
z = np.array([i + randn()*0.1, i + randn()*0.1])
zs.append(z)
zs[2] = None
Rs = [1]*len(zs)
Rs[2] = None
Ms, Ps = kf.batch_filter(zs)
def test_rts():
def fx(x, dt):
A = np.eye(3) + dt * np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
f = np.dot(A, x)
return f
def hx(x):
return [np.sqrt(x[0]**2 + x[2]**2)]
dt = 0.05
sp = JulierSigmaPoints(n=3, kappa=1.)
kf = UnscentedKalmanFilter(3, 1, dt, fx=fx, hx=hx, points=sp)
kf.Q *= 0.01
kf.R = 10
kf.x = np.array([0., 90., 1100.])
kf.P *= 100.
radar = RadarSim(dt)
t = np.arange(0, 20 + dt, dt)
n = len(t)
xs = np.zeros((n, 3))
random.seed(200)
rs = []
for i in range(len(t)):
r = radar.get_range()
kf.predict()
kf.update(z=[r])
xs[i, :] = kf.x
rs.append(r)
kf.x = np.array([0., 90., 1100.])
kf.P = np.eye(3) * 100
M, P = kf.batch_filter(rs)
assert np.array_equal(M, xs), "Batch filter generated different output"
Qs = [kf.Q] * len(t)
M2, P2, K = kf.rts_smoother(Xs=M, Ps=P, Qs=Qs)
if DO_PLOT:
print(xs[:, 0].shape)
plt.figure()
plt.subplot(311)
plt.plot(t, xs[:, 0])
plt.plot(t, M2[:, 0], c='g')
plt.subplot(312)
plt.plot(t, xs[:, 1])
plt.plot(t, M2[:, 1], c='g')
plt.subplot(313)
plt.plot(t, xs[:, 2])
plt.plot(t, M2[:, 2], c='g')
def test_fixed_lag():
def fx(x, dt):
A = np.eye(3) + dt * np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
f = np.dot(A, x)
return f
def hx(x):
return [np.sqrt(x[0]**2 + x[2]**2)]
dt = 0.05
sp = JulierSigmaPoints(n=3, kappa=0)
kf = UnscentedKalmanFilter(3, 1, dt, fx=fx, hx=hx, points=sp)
kf.Q *= 0.01
kf.R = 10
kf.x = np.array([0., 90., 1100.])
kf.P *= 1.
radar = RadarSim(dt)
t = np.arange(0, 20 + dt, dt)
n = len(t)
xs = np.zeros((n, 3))
random.seed(200)
rs = []
M = []
P = []
N = 10
flxs = []
for i in range(len(t)):
r = radar.get_range()
kf.predict()
kf.update(z=[r])
xs[i, :] = kf.x
flxs.append(kf.x)
rs.append(r)
M.append(kf.x)
P.append(kf.P)
print(i)
if i == 20 and len(M) >= N:
try:
M2, P2, K = kf.rts_smoother(Xs=np.asarray(M)[-N:],
Ps=np.asarray(P)[-N:])
flxs[-N:] = M2
except:
print('except', i)
kf.x = np.array([0., 90., 1100.])
kf.P = np.eye(3) * 100
M, P = kf.batch_filter(rs)
Qs = [kf.Q]*len(t)
M2, P2, K = kf.rts_smoother(Xs=M, Ps=P, Qs=Qs)
flxs = np.asarray(flxs)
print(xs[:, 0].shape)
plt.figure()
plt.subplot(311)
plt.plot(t, xs[:, 0])
plt.plot(t, flxs[:, 0], c='r')
plt.plot(t, M2[:, 0], c='g')
plt.subplot(312)
plt.plot(t, xs[:, 1])
plt.plot(t, flxs[:, 1], c='r')
plt.plot(t, M2[:, 1], c='g')
plt.subplot(313)
plt.plot(t, xs[:, 2])
plt.plot(t, flxs[:, 2], c='r')
plt.plot(t, M2[:, 2], c='g')
def test_circle():
from filterpy.kalman import KalmanFilter
from math import radians
def hx(x):
radius = x[0]
angle = x[1]
x = cos(radians(angle)) * radius
y = sin(radians(angle)) * radius
return np.array([x, y])
def fx(x, dt):
return np.array([x[0], x[1] + x[2], x[2]])
std_noise = .1
sp = JulierSigmaPoints(n=3, kappa=0.)
f = UnscentedKalmanFilter(dim_x=3, dim_z=2, dt=.01,
hx=hx, fx=fx, points=sp)
f.x = np.array([50., 90., 0])
f.P *= 100
f.R = np.eye(2)*(std_noise**2)
f.Q = np.eye(3)*.001
f.Q[0, 0] = 0
f.Q[2, 2] = 0
kf = KalmanFilter(dim_x=6, dim_z=2)
kf.x = np.array([50., 0., 0, 0, .0, 0.])
F = np.array([[1., 1., .5, 0., 0., 0.],
[0., 1., 1., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., 1., 1., .5],
[0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 1.]])
kf.F = F
kf.P *= 100
kf.H = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
kf.R = f.R
kf.Q[0:3, 0:3] = Q_discrete_white_noise(3, 1., .00001)
kf.Q[3:6, 3:6] = Q_discrete_white_noise(3, 1., .00001)
results = []
zs = []
kfxs = []
for t in range(12000):
a = t / 30 + 90
x = cos(radians(a)) * 50. + randn() * std_noise
y = sin(radians(a)) * 50. + randn() * std_noise
# create measurement = t plus white noise
z = np.array([x, y])
zs.append(z)
f.predict()
f.update(z)
kf.predict()
kf.update(z)
# save data
results.append(hx(f.x))
kfxs.append(kf.x)
results = np.asarray(results)
zs = np.asarray(zs)
kfxs = np.asarray(kfxs)
print(results)
if DO_PLOT:
plt.plot(zs[:, 0], zs[:, 1], c='r', label='z')
plt.plot(results[:, 0], results[:, 1], c='k', label='UKF')
plt.plot(kfxs[:, 0], kfxs[:, 3], c='g', label='KF')
plt.legend(loc='best')
plt.axis('equal')
def kf_circle():
from filterpy.kalman import KalmanFilter
from math import radians
import math
def hx(x):
radius = x[0]
angle = x[1]
x = cos(radians(angle)) * radius
y = sin(radians(angle)) * radius
return np.array([x, y])
def fx(x, dt):
return np.array([x[0], x[1] + x[2], x[2]])
def hx_inv(x, y):
angle = math.atan2(y, x)
radius = math.sqrt(x*x + y*y)
return np.array([radius, angle])
std_noise = .1
kf = KalmanFilter(dim_x=3, dim_z=2)
kf.x = np.array([50., 0., 0.])
F = np.array([[1., 0, 0.],
[0., 1., 1.],
[0., 0., 1.]])
kf.F = F
kf.P *= 100
kf.H = np.array([[1, 0, 0],
[0, 1, 0]])
kf.R = np.eye(2)*(std_noise**2)
#kf.Q[0:3, 0:3] = Q_discrete_white_noise(3, 1., .00001)
zs = []
kfxs = []
for t in range(2000):
a = t / 30 + 90
x = cos(radians(a)) * 50. + randn() * std_noise
y = sin(radians(a)) * 50. + randn() * std_noise
z = hx_inv(x, y)
zs.append(z)
kf.predict()
kf.update(z)
# save data
kfxs.append(kf.x)
zs = np.asarray(zs)
kfxs = np.asarray(kfxs)
if DO_PLOT:
plt.plot(zs[:, 0], zs[:, 1], c='r', label='z')
plt.plot(kfxs[:, 0], kfxs[:, 1], c='g', label='KF')
plt.legend(loc='best')
plt.axis('equal')
def two_radar():
# code is not complete - I was using to test RTS smoother. very similar
# to two_radary.py in book.
import numpy as np
import matplotlib.pyplot as plt
from numpy import array
from numpy.linalg import norm
from numpy.random import randn
from math import atan2
from filterpy.common import Q_discrete_white_noise
class RadarStation(object):
def __init__(self, pos, range_std, bearing_std):
self.pos = asarray(pos)
self.range_std = range_std
self.bearing_std = bearing_std
def reading_of(self, ac_pos):
""" Returns range and bearing to aircraft as tuple. bearing is in
radians.
"""
diff = np.subtract(self.pos, ac_pos)
rng = norm(diff)
brg = atan2(diff[1], diff[0])
return rng, brg
def noisy_reading(self, ac_pos):
rng, brg = self.reading_of(ac_pos)
rng += randn() * self.range_std
brg += randn() * self.bearing_std
return rng, brg
class ACSim(object):
def __init__(self, pos, vel, vel_std):
self.pos = asarray(pos, dtype=float)
self.vel = asarray(vel, dtype=float)
self.vel_std = vel_std
def update(self):
vel = self.vel + (randn() * self.vel_std)
self.pos += vel
return self.pos
dt = 1.
def hx(x):
r1, b1 = hx.R1.reading_of((x[0], x[2]))
r2, b2 = hx.R2.reading_of((x[0], x[2]))
return array([r1, b1, r2, b2])
def fx(x, dt):
x_est = x.copy()
x_est[0] += x[1]*dt
x_est[2] += x[3]*dt
return x_est
vx, vy = 0.1, 0.1
f = UnscentedKalmanFilter(dim_x=4, dim_z=4, dt=dt, hx=hx, fx=fx, kappa=0)
aircraft = ACSim((100, 100), (vx*dt, vy*dt), 0.00000002)
range_std = 0.001 # 1 meter
bearing_std = 1./1000 # 1mrad
R1 = RadarStation((0, 0), range_std, bearing_std)
R2 = RadarStation((200, 0), range_std, bearing_std)
hx.R1 = R1
hx.R2 = R2
f.x = array([100, vx, 100, vy])
f.R = np.diag([range_std**2, bearing_std**2, range_std**2, bearing_std**2])
q = Q_discrete_white_noise(2, var=0.0002, dt=dt)
f.Q[0:2, 0:2] = q
f.Q[2:4, 2:4] = q
f.P = np.diag([.1, 0.01, .1, 0.01])
track = []
zs = []
for i in range(int(300/dt)):
pos = aircraft.update()
r1, b1 = R1.noisy_reading(pos)
r2, b2 = R2.noisy_reading(pos)
z = np.array([r1, b1, r2, b2])
zs.append(z)
track.append(pos.copy())
zs = asarray(zs)
xs, Ps, Pxz, pM, pP = f.batch_filter(zs)
ms, _, _ = f.rts_smoother(xs, Ps)
track = asarray(track)
time = np.arange(0, len(xs) * dt, dt)
plt.figure()
plt.subplot(411)
plt.plot(time, track[:, 0])
plt.plot(time, xs[:, 0])
plt.legend(loc=4)
plt.xlabel('time (sec)')
plt.ylabel('x position (m)')
plt.tight_layout()
plt.subplot(412)
plt.plot(time, track[:, 1])
plt.plot(time, xs[:, 2])
plt.legend(loc=4)
plt.xlabel('time (sec)')
plt.ylabel('y position (m)')
plt.tight_layout()
plt.subplot(413)
plt.plot(time, xs[:, 1])
plt.plot(time, ms[:, 1])
plt.legend(loc=4)
plt.ylim([0, 0.2])
plt.xlabel('time (sec)')
plt.ylabel('x velocity (m/s)')
plt.tight_layout()
plt.subplot(414)
plt.plot(time, xs[:, 3])
plt.plot(time, ms[:, 3])
plt.ylabel('y velocity (m/s)')
plt.legend(loc=4)
plt.xlabel('time (sec)')
plt.tight_layout()
plt.show()
def test_linear_rts():
""" for a linear model the Kalman filter and UKF should produce nearly
identical results.
Test code mostly due to user gboehl as reported in GitHub issue #97, though
I converted it from an AR(1) process to constant velocity kinematic
model.
"""
dt = 1.0
F = np.array([[1., dt], [.0, 1]])
H = np.array([[1., .0]])
def t_func(x, dt):
F = np.array([[1., dt], [.0, 1]])
return np.dot(F, x)
def o_func(x):
return np.dot(H, x)
sig_t = .1 # peocess
sig_o = .00000001 # measurement
N = 50
X_true, X_obs = [], []
for i in range(N):
X_true.append([i + 1, 1.])
X_obs.append(i + 1 + np.random.normal(scale=sig_o))
X_true = np.array(X_true)
X_obs = np.array(X_obs)
oc = np.ones((1, 1)) * sig_o**2
tc = np.zeros((2, 2))
tc[1, 1] = sig_t**2
tc = Q_discrete_white_noise(dim=2, dt=dt, var=sig_t**2)
points = MerweScaledSigmaPoints(n=2, alpha=.1, beta=2., kappa=1)
ukf = UnscentedKalmanFilter(dim_x=2, dim_z=1, dt=dt, hx=o_func, fx=t_func, points=points)
ukf.x = np.array([0., 1.])
ukf.R = np.copy(oc)
ukf.Q = np.copy(tc)
s = Saver(ukf)
s.save()
s.to_array()
kf = KalmanFilter(dim_x=2, dim_z=1)
kf.x = np.array([[0., 1]]).T
kf.R = np.copy(oc)
kf.Q = np.copy(tc)
kf.H = np.copy(H)
kf.F = np.copy(F)
mu_ukf, cov_ukf = ukf.batch_filter(X_obs)
x_ukf, _, _ = ukf.rts_smoother(mu_ukf, cov_ukf)
mu_kf, cov_kf, _, _ = kf.batch_filter(X_obs)
x_kf, _, _, _ = kf.rts_smoother(mu_kf, cov_kf)
# check results of filtering are correct
kfx = mu_kf[:, 0, 0]
ukfx = mu_ukf[:, 0]
kfxx = mu_kf[:, 1, 0]
ukfxx = mu_ukf[:, 1]
dx = kfx - ukfx
dxx = kfxx - ukfxx
# error in position should be smaller then error in velocity, hence
# atol is different for the two tests.
assert np.allclose(dx, 0, atol=1e-7)
assert np.allclose(dxx, 0, atol=1e-6)
# now ensure the RTS smoothers gave nearly identical results
kfx = x_kf[:, 0, 0]
ukfx = x_ukf[:, 0]
kfxx = x_kf[:, 1, 0]
ukfxx = x_ukf[:, 1]
dx = kfx - ukfx
dxx = kfxx - ukfxx
assert np.allclose(dx, 0, atol=1e-7)
assert np.allclose(dxx, 0, atol=1e-6)
return ukf
def _test_log_likelihood():
from filterpy.common import Saver
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = MerweScaledSigmaPoints(4, .1, 2., -1)
kf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=dt, fx=fx, hx=hx, points=points)
z_std = 0.1
kf.R = | np.diag([z_std**2, z_std**2]) | numpy.diag |
import cv2
import numpy as np
from cube import Cube
from cube import Face
def xrotation(th):
c = np.cos(th)
s = np.sin(th)
return | np.array([[1, 0, 0], [0, c, s], [0, -s, c]]) | numpy.array |
import os
import os.path as path
import sys
import math
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
import scipy.cluster as cluster
import scipy.stats as stats
from sklearn.neighbors import KernelDensity, BallTree
import option
import data
import model
import utils
def powLawOnLabels(labels, numLabels, params):
cnt = [0] * numLabels
for i in labels:
cnt[i] += 1
label_weight = np.array(cnt)
label_weight = np.power(label_weight, params.alpha)
label_weight /= np.sum(label_weight)
label_weight *= params.beta
weights = np.array([label_weight[i] for i in labels])
print(np.max(weights), np.min(weights))
return weights
def trainBaseline(params):
dataloader = data.DataLoader(params)
trainSet, testSet, valSet = dataloader.readData()
def avgAcc(numTrial, dataloader, trainFunc, params):
trainSet, testSet, valSet = dataloader.readData()
trainAcc = []
valAcc = []
cateAcc = []
timer = utils.Timer()
for i in range(1, numTrial+1):
print(f"Trial {i} starts at {timer()}")
ta, va, ca = trainFunc(dataloader, trainSet, testSet, valSet, params)
print(f"Trail {i} result:")
print(f"Train accuracy {ta}")
print(f"Val accuracy {va}")
print(f"Cate accuracy {ca}")
trainAcc.append(ta)
valAcc.append(va)
cateAcc.append(ca)
trainAcc = tf.reduce_mean(tf.stack(trainAcc))
valAcc = tf.reduce_mean(tf.stack(valAcc))
cateAcc = tf.reduce_mean(tf.stack(cateAcc), axis=0)
print(f"Finished {timer()} {sys.argv}")
print("Overall train accuracy %.4f" % (float(trainAcc)))
print("Overall val accuracy %.4f" % (float(valAcc)))
print("Overall cate accuracy ", end='')
for i in cateAcc[:-1]:
print("%.2lf," % i, end=' ')
print("%.3f" % cateAcc[-1])
def trainBaseline(dataloader, trainSet, testSet, valSet, params):
baseline = model.makeBaselineModel(params)
optimizer = keras.optimizers.Adam(lr=params.learningRate)
loss_fn = keras.losses.CategoricalCrossentropy()
metricAcc = keras.metrics.CategoricalAccuracy()
cateAcc = model.CateAcc()
baseline.compile(optimizer, loss=loss_fn, metrics=[metricAcc, cateAcc])
baseline.fit(trainSet, epochs=params.numEpochs, validation_data=testSet)
baseline.save_weights(path.join(params.modelPath, "baseline.keras"))
accs = baseline.evaluate(trainSet)
trainAcc = accs[1]
accs = baseline.evaluate(valSet)
valAcc = accs[1]
cateAcc = accs[2]
return trainAcc, valAcc, cateAcc
def trainWeightedBaseline(dataloader, trainSet, testSet, valSet, params):
labels = dataloader.trainData[b"labels"]
weights = powLawOnLabels(labels, 10, params)
baseline = model.makeBaselineModel(params)
optimizer = keras.optimizers.Adam(lr=params.learningRate)
loss_fn = keras.losses.CategoricalCrossentropy()
metricAcc = keras.metrics.CategoricalAccuracy()
cateAcc = model.CateAcc()
baseline.compile(optimizer, loss=loss_fn, metrics=[
metricAcc, cateAcc], loss_weights=weights)
baseline.fit(trainSet, epochs=params.numEpochs, validation_data=testSet)
baseline.save_weights(
path.join(params.modelPath, "weightedBaseline.keras"))
accs = baseline.evaluate(valSet)
for i in accs[2]:
print("%.2lf," % i, end=' ')
print()
print("%.3lf" % accs[2][9])
accs = baseline.evaluate(trainSet)
trainAcc = accs[1]
accs = baseline.evaluate(valSet)
valAcc = accs[1]
cateAcc = accs[2]
return trainAcc, valAcc, cateAcc
def KMeanupdate(encode_record, params):
centroids, label = cluster.vq.kmeans2(encode_record, 10, minit="points")
weights = powLawOnLabels(label, 10, params)
return tf.constant(weights, dtype=tf.float32)
def KMeanDistupdate(encode_record, params):
centroids, label = cluster.vq.kmeans2(encode_record, 10, minit="points")
weights = powLawOnLabels(label, 10, params)
dist = np.array([np.linalg.norm(centroids[l]-encode_record[i])
for i, l in enumerate(label)])
dist = np.power(dist * 0.1, -params.alpha)
weights *= dist
print(np.max(weights), np.min(weights))
return tf.constant(weights, dtype=tf.float32)
def radiusUpdate(encode_record, params):
encode_record -= np.mean(encode_record, 0)
print(np.max(np.max(encode_record)), np.min(np.min(encode_record)))
tree = BallTree(encode_record)
neighbor = tree.query_radius(encode_record, 3, count_only=True) + 1
print(np.max(neighbor), np.min(neighbor))
weights = np.power(neighbor, params.alpha) * params.beta
return tf.constant(weights, dtype=tf.float32)
def normalize(weight):
return weight / np.mean(weight)
def Onlylossupdate(old_loss, loss, params):
loss_weight = np.array(loss)
# loss_weight = normalize(loss_weight)
loss_weight = np.power(loss_weight, params.alpha)
loss_weight = normalize(loss_weight)
return tf.constant(loss_weight, dtype=tf.float32)
def Trendlossupdate(old_loss, loss, params):
loss_weight = | np.array(loss) | numpy.array |
"""
Helper functions which obtain forces and energies
corresponding to atoms in structures. These functions automatically
cast atoms into their respective atomic environments.
"""
import numpy as np
from flare.gp import GaussianProcess
from flare.struc import Structure
from copy import deepcopy
from flare.predict import (
predict_on_structure_par,
predict_on_atom,
predict_on_atom_en,
predict_on_structure_par_en,
)
from .fake_gp import generate_hm, get_tstp, get_random_structure
from flare.predict import (
predict_on_structure,
predict_on_structure_par,
predict_on_structure_efs,
predict_on_structure_efs_par,
)
import pytest
import time
def fake_predict(_, __):
return np.random.uniform(-1, 1), | np.random.uniform(-1, 1) | numpy.random.uniform |
from scipy import linalg
import numpy as np
from ..utils.simple_functions import choose
CLOSED_THRESHOLD = 0.001
def bezier(points):
n = len(points) - 1
return lambda t: sum([
((1 - t)**(n - k)) * (t**k) * choose(n, k) * point
for k, point in enumerate(points)
])
def partial_bezier_points(points, a, b):
"""
Given an array of points which define
a bezier curve, and two numbers 0<=a<b<=1,
return an array of the same size, which
describes the portion of the original bezier
curve on the interval [a, b].
This algorithm is pretty nifty, and pretty dense.
"""
if a == 1:
return [points[-1]] * len(points)
a_to_1 = np.array([
bezier(points[i:])(a)
for i in range(len(points))
])
end_prop = (b - a) / (1. - a)
return np.array([
bezier(a_to_1[:i + 1])(end_prop)
for i in range(len(points))
])
# Linear interpolation variants
def interpolate(start, end, alpha):
return (1 - alpha) * start + alpha * end
def integer_interpolate(start, end, alpha):
"""
alpha is a float between 0 and 1. This returns
an integer between start and end (inclusive) representing
appropriate interpolation between them, along with a
"residue" representing a new proportion between the
returned integer and the next one of the
list.
For example, if start=0, end=10, alpha=0.46, This
would return (4, 0.6).
"""
if alpha >= 1:
return (end - 1, 1.0)
if alpha <= 0:
return (start, 0)
value = int(interpolate(start, end, alpha))
residue = ((end - start) * alpha) % 1
return (value, residue)
def mid(start, end):
return (start + end) / 2.0
def inverse_interpolate(start, end, value):
return np.true_divide(value - start, end - start)
def match_interpolate(new_start, new_end, old_start, old_end, old_value):
return interpolate(
new_start, new_end,
inverse_interpolate(old_start, old_end, old_value)
)
# Figuring out which bezier curves most smoothly connect a sequence of points
def get_smooth_handle_points(points):
points = np.array(points)
num_handles = len(points) - 1
dim = points.shape[1]
if num_handles < 1:
return np.zeros((0, dim)), np.zeros((0, dim))
# Must solve 2*num_handles equations to get the handles.
# l and u are the number of lower an upper diagonal rows
# in the matrix to solve.
l, u = 2, 1
# diag is a representation of the matrix in diagonal form
# See https://www.particleincell.com/2012/bezier-splines/
# for how to arive at these equations
diag = np.zeros((l + u + 1, 2 * num_handles))
diag[0, 1::2] = -1
diag[0, 2::2] = 1
diag[1, 0::2] = 2
diag[1, 1::2] = 1
diag[2, 1:-2:2] = -2
diag[3, 0:-3:2] = 1
# last
diag[2, -2] = -1
diag[1, -1] = 2
# This is the b as in Ax = b, where we are solving for x,
# and A is represented using diag. However, think of entries
# to x and b as being points in space, not numbers
b = np.zeros((2 * num_handles, dim))
b[1::2] = 2 * points[1:]
b[0] = points[0]
b[-1] = points[-1]
def solve_func(b):
return linalg.solve_banded((l, u), diag, b)
use_closed_solve_function = is_closed(points)
if use_closed_solve_function:
# Get equations to relate first and last points
matrix = diag_to_matrix((l, u), diag)
# last row handles second derivative
matrix[-1, [0, 1, -2, -1]] = [2, -1, 1, -2]
# first row handles first derivative
matrix[0, :] = np.zeros(matrix.shape[1])
matrix[0, [0, -1]] = [1, 1]
b[0] = 2 * points[0]
b[-1] = | np.zeros(dim) | numpy.zeros |
import numpy as np
import time
import matplotlib.pyplot as plt
import tensorflow as tf
import copy
from tqdm import tqdm
from helper_funcs import frames_to_mp4
from policy_random import Policy_Random
from gym.spaces import Box
from cpprb import ReplayBuffer
from tf2rl.experiments.utils import save_path
from tf2rl.envs.utils import is_discrete, get_act_dim
from tf2rl.misc.get_replay_buffer import get_replay_buffer, get_default_rb_dict
from tf2rl.misc.prepare_output_dir import prepare_output_dir
from tf2rl.misc.initialize_logger import initialize_logger
from tf2rl.envs.normalizer import EmpiricalNormalizer
from tf2rl.misc.discount_cumsum import discount_cumsum
import logging
import wandb
from tf2rl.algos.ppo import PPO
from tf2rl.algos.sac import SAC
from tf2rl.algos.td3 import TD3
class CollectSamples(object):
def __init__(self, env, policy, visualize_rollouts, which_agent, dt_steps, dt_from_xml, follow_trajectories):
self.env = env
self.policy = policy
self.visualize_at_all = visualize_rollouts
self.which_agent = which_agent
self.low = self.env.observation_space.low
self.high = self.env.observation_space.high
self.shape = self.env.observation_space.shape
self.use_low = self.low + (self.high-self.low)/3.0 ##invalid value encountered in substract
self.use_high = self.high - (self.high-self.low)/3.0
self.dt_steps = dt_steps
self.dt_from_xml = dt_from_xml
self.follow_trajectories = follow_trajectories
def collect_samples(self, num_rollouts, steps_per_rollout):
observations_list = []
actions_list = []
starting_states_list=[]
rewards_list = []
visualization_frequency = 10
pbar = tqdm(total=num_rollouts, ascii=True)
for rollout_number in range(num_rollouts):
org_observation = self.env.reset()
min_len=0
observation = org_observation
min_len = 2
if (steps_per_rollout==23333):
steps_per_rollout = 100
min_len = 50
starting_state = np.copy(observation)
observations = np.zeros((1,2))
trails_counter = 0
while not (np.shape(observations)[0] >= min_len):
observations, actions, reward_for_rollout = self.perform_rollout(observation, steps_per_rollout,
rollout_number, visualization_frequency)
trails_counter += 1
if (trails_counter > 100):
if (min_len == 5):
min_len -= 1
else:
min_len -= 5
trails_counter = 0
rewards_list.append(reward_for_rollout)
observations= np.array(observations)
actions= np.array(actions)
observations_list.append(observations)
actions_list.append(actions)
starting_states_list.append(starting_state)
pbar.update(1)
pbar.close()
#return list of length = num rollouts
#each entry of that list contains one rollout
#each entry is [steps_per_rollout x statespace_dim] or [steps_per_rollout x actionspace_dim]
print('observations_list, actions_list, starting_states_list, rewards_list') #(60, xxx, 29) (60, xxx, 8) (60, xxx) (60,)
# print(np.shape(observations_list), np.shape(actions_list), np.shape(starting_states_list),np.shape(rewards_list))
# print(observations_list[0][0])
# print(starting_states_list[0])
return observations_list, actions_list, starting_states_list, rewards_list
def perform_rollout(self, observation, steps_per_rollout, rollout_number, visualization_frequency):
observations = []
actions = []
visualize = False
reward_for_rollout = 0
self.env.reset()
if((rollout_number%visualization_frequency)==0):
#print("currently performing rollout #", rollout_number)
if(self.visualize_at_all):
all_states=[]
print ("---- visualizing a rollout ----")
visualize=True
for step_num in range(steps_per_rollout):
action, _ = self.policy.get_action(observation)
#print("[MESSAGE FROM COLLECT_SAMPLES] The action just took: \n", action)
observations.append(observation)
actions.append(action)
org_next_observation, reward, terminal, env_info = self.env.step(action)
try:
cost = env_info['cost']
except (TypeError, KeyError, IndexError):
cost = 0
next_observation = org_next_observation
reward_for_rollout += reward
observation = np.copy(next_observation)
if terminal:
#print(f"Had to stop rollout after {step_num} steps because terminal state was reached.")
break
if(visualize):
if(self.which_agent==0):
curr_state = self.env.render()
all_states.append(np.expand_dims(curr_state, axis=0))
else:
self.env.render()
time.sleep(self.dt_steps*self.dt_from_xml)
if(visualize and (self.which_agent==0)):
all_states= np.concatenate(all_states, axis=0)
plt.plot(all_states[:,0], all_states[:,1], 'r')
plt.show()
return observations, actions, reward_for_rollout
class CollectSamples_random(object):
def __init__(self, env, max_steps, _output_dir=None, _episode_max_steps=100, gpu=0, wandb_on=False):
self.env = env
self._policy = Policy_Random(env)
self.visualize_at_all = False
self.max_steps = max_steps
self.wandb_on = wandb_on
self.low = self.env.observation_space.low
self.high = self.env.observation_space.high
self.shape = self.env.observation_space.shape
self.use_low = self.low + (self.high-self.low)/3.0 ##invalid value encountered in substract
self.use_high = self.high - (self.high-self.low)/3.0
self.global_total_steps = 0
self.global_num_episodes = 0
self.global_cost = 0
def collect_samples(self):
observations_list = []
actions_list = []
starting_states_list=[]
rewards_list = []
visualization_frequency = 10
pbar = tqdm(total=self.max_steps, ascii=True)
step_counter = 0
while(step_counter < self.max_steps):
org_observation = self.env.reset()
min_len=0
observation = org_observation
starting_state = np.copy(observation)
observations, actions, reward_for_rollout, episode_cost = self.perform_rollout(observation)
self.global_num_episodes += 1
self.global_cost += episode_cost
step_counter += len(actions)
cost_rate = self.global_cost/self.global_total_steps
if self.wandb_on:
wandb.log({'Training_Return': reward_for_rollout, 'Training_Cost': episode_cost, 'Cost_Rate': cost_rate}, step=self.global_num_episodes)
# print("DEBUG! LOGGING ...")
tf.summary.scalar(name="Common/Training_Return", data=reward_for_rollout, step=self.global_total_steps)
tf.summary.scalar(name="Common/Training_Cost", data=episode_cost, step=self.global_total_steps)
tf.summary.scalar(name="Common/Cost_Rate", data=cost_rate, step=self.global_total_steps)
if not (step_counter>self.max_steps): # otherwise ignore this rollout
rewards_list.append(reward_for_rollout)
observations= np.array(observations)
actions= np.array(actions)
observations_list.append(observations)
actions_list.append(actions)
starting_states_list.append(starting_state)
pbar.update(len(actions))
pbar.close()
#return list of length = num rollouts
#each entry of that list contains one rollout
#each entry is [steps_per_rollout x statespace_dim] or [steps_per_rollout x actionspace_dim]
#print('observations_list, actions_list, starting_states_list, rewards_list') #(60, xxx, 29) (60, xxx, 8) (60, xxx) (60,)
# print(np.shape(observations_list), np.shape(actions_list), np.shape(starting_states_list),np.shape(rewards_list))
# print(observations_list[0][0])
# print(starting_states_list[0])
return observations_list, actions_list, starting_states_list, rewards_list
def perform_rollout(self, observation, steps_per_rollout=1000, rollout_number=1, visualization_frequency=1):
observations = []
actions = []
visualize = False
reward_for_rollout = 0
episode_cost = 0
self.env.reset()
if((rollout_number%visualization_frequency)==0):
#print("currently performing rollout #", rollout_number)
if(self.visualize_at_all):
all_states=[]
print ("---- visualizing a rollout ----")
visualize=True
for step_num in range(steps_per_rollout):
action, _ = self._policy.get_action(observation)
#print("[MESSAGE FROM COLLECT_SAMPLES] The action just took: \n", action)
observations.append(observation)
actions.append(action)
org_next_observation, reward, terminal, env_info = self.env.step(action)
try:
cost = env_info['cost']
except (TypeError, KeyError, IndexError):
cost = 0
self.global_total_steps += 1
next_observation = org_next_observation
reward_for_rollout += reward
episode_cost += cost
observation = np.copy(next_observation)
if terminal:
#print(f"Had to stop rollout after {step_num} steps because terminal state was reached.")
break
if(visualize):
if(self.which_agent==0):
curr_state = self.env.render()
all_states.append(np.expand_dims(curr_state, axis=0))
else:
self.env.render()
time.sleep(self.dt_steps*self.dt_from_xml)
if(visualize and (self.which_agent==0)):
all_states= np.concatenate(all_states, axis=0)
plt.plot(all_states[:,0], all_states[:,1], 'r')
plt.show()
return observations, actions, reward_for_rollout, episode_cost
class CollectSamples_SAC(CollectSamples_random):
def __init__(self, env, max_steps, _output_dir, _episode_max_steps=100, gpu=0, wandb_on=False, *args, **kwargs):
super().__init__(env, max_steps, _output_dir, _episode_max_steps, gpu, wandb_on, *args, **kwargs)
#self._set_from_args(args)
# experiment settings
self._max_steps = max_steps
self._episode_max_steps = _episode_max_steps
self._n_experiments = 1
self._show_progress = False
self._save_model_interval = int(400)
self._save_summary_interval = int(1e3)
self._normalize_obs = False
self._logdir = _output_dir
self._model_dir = _output_dir + '/../checkpoints/policy'
# replay buffer
self._use_prioritized_rb = False
self._use_nstep_rb = False
self._n_step = 4
# test settings
self._evaluate = False
self._test_interval = int(1e4) #TODO
self._show_test_progress = False
self._test_episodes = 3
self._save_test_path = False
self._save_test_movie = False
self._show_test_images = False
self.wandb_on = wandb_on
self.evaluation_interval = 2#20 #TODO
self._policy = SAC(
state_shape=env.observation_space.shape,
action_dim=env.action_space.high.size,
gpu=gpu,
max_action=env.action_space.high[0],
batch_size=100,
n_warmup=1000)
self._env = env
self._test_env = copy.copy(self._env)
if self._normalize_obs:
assert isinstance(env.observation_space, Box)
self._obs_normalizer = EmpiricalNormalizer(
shape=env.observation_space.shape)
# prepare log directory
self._output_dir = self._logdir
self.logger = initialize_logger(
logging_level=logging.getLevelName('INFO'),
output_dir=self._output_dir)
if self._evaluate:
assert self._model_dir is not None
self._set_check_point(self._model_dir)
self.replay_buffer = get_replay_buffer(
self._policy, self._env, self._use_prioritized_rb,
self._use_nstep_rb, self._n_step)
# prepare TensorBoard output
# self.writer = tf.summary.create_file_writer(self._output_dir)
# self.writer.set_as_default()
def collect_samples(self):#observations_list, actions_list, starting_states_list, rewards_list (60, xxx, 29) (60, xxx, 8) (60, xxx) (60,)
observations_list = []
actions_list = []
starting_states_list=[]
rewards_list = []
tf.summary.experimental.set_step(self.global_total_steps)
episode_steps = 0
episode_return = 0
episode_cost = 0
episode_start_time = time.perf_counter()
n_episode = 0
# self.replay_buffer = get_replay_buffer(
# self._policy, self._env, self._use_prioritized_rb,
# self._use_nstep_rb, self._n_step)
obs = self._env.reset()
observations = []
actions = []
starting_states_list.append(obs)
while self.global_total_steps < self._max_steps:
if self.global_total_steps < self._policy.n_warmup:
action = self._env.action_space.sample()
else:
action = self._policy.get_action(obs)
next_obs, reward, done, env_info = self._env.step(action)
try:
cost = env_info['cost']
except (TypeError, KeyError, IndexError):
cost = 0
observations.append(obs)
actions.append(action)
rewards_list.append(reward)
if self._show_progress:
self._env.render()
episode_steps += 1
episode_return += reward
episode_cost += cost
self.global_total_steps += 1
tf.summary.experimental.set_step(self.global_total_steps)
done_flag = done
if hasattr(self._env, "_max_episode_steps") and \
episode_steps == self._env._max_episode_steps:
done_flag = False
self.replay_buffer.add(obs=obs, act=action,
next_obs=next_obs, rew=reward, done=done_flag)
obs = next_obs
if done or episode_steps == self._episode_max_steps:
obs = self._env.reset()
n_episode += 1
self.global_num_episodes += 1
self.global_cost += episode_cost
cost_rate = self.global_cost/self.global_total_steps
fps = episode_steps / (time.perf_counter() - episode_start_time)
self.logger.info("Total Epi: {0: 5} Steps: {1: 7} Episode Steps: {2: 5} Return: {3: 7.4f} Cost: {4: 5.4f} FPS: {5:5.2f}".format(
n_episode, self.global_total_steps, episode_steps, episode_return, episode_cost, fps))
tf.summary.scalar(name="Common/Training_Return", data=episode_return, step=self.global_total_steps)
tf.summary.scalar(name="Common/Training_Cost", data=episode_cost, step=self.global_total_steps)
tf.summary.scalar(name="Common/Cost_Rate", data=cost_rate, step=self.global_total_steps)
tf.summary.scalar(name="MFAgent/FPS", data=fps, step=self.global_total_steps)
if self.wandb_on:
wandb.log({'Training_Return': episode_return, 'Training_Cost': episode_cost, 'Cost_Rate': cost_rate}, step=self.global_num_episodes)
rewards_list.append(episode_return)
observations_list.append(np.array(observations))
actions_list.append(np.array(actions))
starting_states_list.append(obs)
#print('=============DEBUG:actions_list',np.shape(actions_list),'=============')
#print(actions_list)
observations = []
actions = []
episode_steps = 0
episode_return = 0
episode_cost = 0
episode_start_time = time.perf_counter()
if self.global_total_steps < self._policy.n_warmup:
continue
if self.global_total_steps % self._policy.update_interval == 0:
samples = self.replay_buffer.sample(self._policy.batch_size)
with tf.summary.record_if(self.global_total_steps % self._save_summary_interval == 0):
self._policy.train(
samples["obs"], samples["act"], samples["next_obs"],
samples["rew"], np.array(samples["done"], dtype=np.float32),
None if not self._use_prioritized_rb else samples["weights"])
if self._use_prioritized_rb:
td_error = self._policy.compute_td_error(
samples["obs"], samples["act"], samples["next_obs"],
samples["rew"], np.array(samples["done"], dtype=np.float32))
self.replay_buffer.update_priorities(
samples["indexes"], np.abs(td_error) + 1e-6)
if self.global_total_steps % self._test_interval == 0:
# print('Evaluating the MFAgent ...')
time1 = time.time()
self.evaluate_policy()
if False:
avg_test_return = self.evaluate_policy(self.global_total_steps)
self.logger.info("Evaluation Total Steps: {0: 7} Average Reward {1: 5.4f} over {2: 2} episodes".format(
total_steps, avg_test_return, self._test_episodes))
tf.summary.scalar(
name="Common/average_test_return", data=avg_test_return)
tf.summary.scalar(name="Common/fps", data=fps)
self.writer.flush()
if self.global_total_steps % self._save_model_interval == 0:
self.checkpoint_manager.save()
tf.summary.flush()
#return list of length = num rollouts
#each entry of that list contains one rollout
#each entry is [steps_per_rollout x statespace_dim] or [steps_per_rollout x actionspace_dim]
return observations_list, actions_list, starting_states_list, rewards_list
def train(self):
samples = self.replay_buffer.sample(self._policy.batch_size)
with tf.summary.record_if(self.global_total_steps % self._save_summary_interval == 0):
self._policy.train(
samples["obs"], samples["act"], samples["next_obs"],
samples["rew"], np.array(samples["done"], dtype=np.float32),
None if not self._use_prioritized_rb else samples["weights"])
if self._use_prioritized_rb:
td_error = self._policy.compute_td_error(
samples["obs"], samples["act"], samples["next_obs"],
samples["rew"], np.array(samples["done"], dtype=np.float32))
self.replay_buffer.update_priorities(
samples["indexes"], np.abs(td_error) + 1e-6)
def _set_check_point(self, model_dir):
# Save and restore model
self._checkpoint = tf.train.Checkpoint(policy=self._policy)
self.checkpoint_manager = tf.train.CheckpointManager(
self._checkpoint, directory=model_dir, max_to_keep=5)
# if model_dir is not None:
# assert os.path.isdir(model_dir)
# self._latest_path_ckpt = tf.train.latest_checkpoint(model_dir)
# self._checkpoint.restore(self._latest_path_ckpt)
# self.logger.info("Restored {}".format(self._latest_path_ckpt))
def evaluate_policy(self, total_steps=None):
tf.summary.experimental.set_step(self.global_total_steps)
if self._normalize_obs:
self._test_env.normalizer.set_params(
*self._env.normalizer.get_params())
avg_test_return = 0.
avg_test_cost = 0.
if self._save_test_path:
replay_buffer = get_replay_buffer(
self._policy, self._test_env, size=self._episode_max_steps)
for i in range(self._test_episodes):
timer0 = time.time()
episode_return = 0.
episode_cost = 0.
frames = []
obs = self._test_env.reset()
# print(f'LOOP{i}-PERIOD1: ', time.time() - timer0)
timer0 = time.time()
for _ in range(self._episode_max_steps):
action = self._policy.get_action(obs, test=True)
next_obs, reward, done, env_info = self._test_env.step(action)
try:
cost = env_info['cost']
except (TypeError, KeyError, IndexError):
cost = 0
if self._save_test_path:
replay_buffer.add(obs=obs, act=action,
next_obs=next_obs, rew=reward, done=done)
if self._save_test_movie:
frames.append(self._test_env.render(mode='rgb_array'))
elif self._show_test_progress:
self._test_env.render()
episode_return += reward
episode_cost += cost
obs = next_obs
if done:
break
# print(f'LOOP{i}-PERIOD2: ', time.time() - timer0)
prefix = "step_{0:08d}_epi_{1:02d}_return_{2:010.4f}_cost_{2:010.4f}".format(
self.global_total_steps, i, episode_return, episode_cost)
if self._save_test_path:
save_path(replay_buffer._encode_sample(np.arange(self._episode_max_steps)),
os.path.join(self._output_dir, prefix + ".pkl"))
replay_buffer.clear()
if self._save_test_movie:
# frames_to_gif(frames, prefix, self._output_dir)
frames_to_mp4(frames, prefix, self._output_dir)
avg_test_return += episode_return
avg_test_cost += episode_cost
if self._show_test_images:
images = tf.cast(
tf.expand_dims(np.array(obs).transpose(2, 0, 1), axis=3),
tf.uint8)
tf.summary.image('train/input_img', images,)
avg_test_return = avg_test_return / self._test_episodes
if self.wandb_on:
wandb.log({'Evaluation_Return': avg_test_return, 'Evaluation_Cost': avg_test_cost}, step=self.global_num_episodes)
tf.summary.scalar(name="MFAgent/Evaluation_Return", data=avg_test_return, step=self.global_total_steps)
tf.summary.scalar(name="MFAgent/Evaluation_Cost", data=avg_test_cost, step=self.global_total_steps)
return avg_test_return, avg_test_cost
def load_checkpoints(self, global_total_steps, global_num_episodes, global_cost):
self.global_total_steps = global_total_steps
self.global_num_episodes = global_num_episodes
self.global_cost = global_cost
self._latest_path_ckpt = tf.train.latest_checkpoint(self._model_dir)
self._checkpoint.restore(self._latest_path_ckpt)
self.logger.info("Restored {}".format(self._latest_path_ckpt))
tf.summary.experimental.set_step(self.global_total_steps)
class CollectSamples_PPO(CollectSamples_SAC):
def __init__(self, env, max_steps, _output_dir, _episode_max_steps=100, gpu=0, wandb_on=False, *args, **kwargs):
super(CollectSamples_PPO, self).__init__(env, max_steps, _output_dir, _episode_max_steps, gpu, wandb_on, *args, **kwargs)
# experiment settings
self._max_steps = max_steps
self._episode_max_steps = _episode_max_steps
self._n_experiments = 1
self._show_progress = False
self._save_model_interval = int(1e4)
self._save_summary_interval = int(1e3)
self._normalize_obs = False
self._logdir = _output_dir
# self._model_dir = None
# replay buffer
self._use_prioritized_rb = False
self._use_nstep_rb = False
self._n_step = 4
# test settings
self._evaluate = False
# self._test_interval = int(1e4)
self._show_test_progress = False
# self._test_episodes = 5
self._save_test_path = False
self._save_test_movie = False
self._show_test_images = False
# PPO settings
self._enable_gae = False
self._normalize_adv = False
self._horizon = 2048
self._output_dir = _output_dir
self._policy = PPO(
state_shape=env.observation_space.shape,
action_dim=get_act_dim(env.action_space),
is_discrete=is_discrete(env.action_space),
max_action=None if is_discrete(
env.action_space) else env.action_space.high[0],
batch_size=64,
actor_units=(64, 64),
critic_units=(64, 64),
n_epoch=10,
lr_actor=3e-4,
lr_critic=3e-4,
hidden_activation_actor="tanh",
hidden_activation_critic="tanh",
discount=0.99,
lam=0.95,
entropy_coef=0.,
horizon=self._horizon,
normalize_adv=self._normalize_adv,
enable_gae=self._enable_gae,
gpu=gpu)
# Prepare buffer
self.replay_buffer = get_replay_buffer(
self._policy, self._env)
kwargs_local_buf = get_default_rb_dict(
size=self._policy.horizon, env=self._env)
kwargs_local_buf["env_dict"]["logp"] = {}
kwargs_local_buf["env_dict"]["val"] = {}
if is_discrete(self._env.action_space):
kwargs_local_buf["env_dict"]["act"]["dtype"] = np.int32
self.local_buffer = ReplayBuffer(**kwargs_local_buf)
def collect_samples(self):
observations_list = []
actions_list = []
starting_states_list=[]
rewards_list = []
# # Prepare buffer
# self.replay_buffer = get_replay_buffer(
# self._policy, self._env)
# kwargs_local_buf = get_default_rb_dict(
# size=self._policy.horizon, env=self._env)
# kwargs_local_buf["env_dict"]["logp"] = {}
# kwargs_local_buf["env_dict"]["val"] = {}
# if is_discrete(self._env.action_space):
# kwargs_local_buf["env_dict"]["act"]["dtype"] = np.int32
# self.local_buffer = ReplayBuffer(**kwargs_local_buf)
episode_steps = 0
episode_return = 0
episode_cost = 0
episode_start_time = time.time()
total_steps = np.array(0, dtype=np.int32)
# self.global_total_steps = total_steps
n_episode = 0
obs = self._env.reset()
observations = []
actions = []
starting_states_list.append(obs)
tf.summary.experimental.set_step(self.global_total_steps)
while self.global_total_steps < self._max_steps:
# Collect samples
for _ in range(self._policy.horizon):
if self._normalize_obs:
obs = self._obs_normalizer(obs, update=False)
act, logp, val = self._policy.get_action_and_val(obs)
next_obs, reward, done, env_info = self._env.step(act)
try:
cost = env_info['cost']
except (TypeError, KeyError, IndexError):
cost = 0
observations.append(obs)
actions.append(act)
if self._show_progress:
self._env.render()
episode_steps += 1
self.global_total_steps += 1
# self.global_total_steps = total_steps
episode_return += reward
episode_cost += cost
done_flag = done
if hasattr(self._env, "_max_episode_steps") and \
episode_steps == self._env._max_episode_steps:
done_flag = False
self.local_buffer.add(
obs=obs, act=act, next_obs=next_obs,
rew=reward, done=done_flag, logp=logp, val=val)
obs = next_obs
if done or episode_steps == self._episode_max_steps:
tf.summary.experimental.set_step(self.global_total_steps)
self.finish_horizon()
obs = self._env.reset()
n_episode += 1
self.global_num_episodes += 1
self.global_cost += episode_cost
cost_rate = self.global_cost/self.global_total_steps
fps = episode_steps / (time.time() - episode_start_time)
self.logger.info(
"Total Epi: {0: 5} Steps: {1: 7} Episode Steps: {2: 5} Return: {3: 7.4f} Cost: {4: 5.4f} FPS: {5:5.2f}".format(
n_episode, int(self.global_total_steps), episode_steps, episode_return, episode_cost, fps))
# tf.summary.scalar(name="Common/training_return", data=episode_return)
tf.summary.scalar(name="Common/Training_Return", data=episode_return, step=self.global_total_steps)
tf.summary.scalar(name="Common/Training_Cost", data=episode_cost, step=self.global_total_steps)
tf.summary.scalar(name="Common/Cost_Rate", data=cost_rate, step=self.global_total_steps)
tf.summary.scalar(name="MFAgent/FPS", data=fps, step=self.global_total_steps)
if self.wandb_on:
wandb.log({'Training_Return': episode_return, 'Training_Cost': episode_cost, 'Cost_Rate': cost_rate}, step=self.global_num_episodes)
starting_states_list.append(obs)
rewards_list.append(episode_return)
observations_list.append(np.array(observations))
actions_list.append(np.array(actions))
#print('=============DEBUG:actions_list',np.shape(actions_list),'=============')
#print(actions_list)
observations = []
actions = []
episode_steps = 0
episode_return = 0
episode_start_time = time.time()
if self.global_total_steps % self._test_interval == 0:
# print('Evaluating the MFAgent ...')
self.evaluate_policy()
#if total_steps % self._test_interval == 0:
if False:
avg_test_return = self.evaluate_policy(self.global_total_steps)
self.logger.info("Evaluation Total Steps: {0: 7} Average Reward {1: 5.4f} over {2: 2} episodes".format(
self.global_total_steps, avg_test_return, self._test_episodes))
tf.summary.scalar(
name="Common/average_test_return", data=avg_test_return)
self.writer.flush()
if self.global_total_steps % self._save_model_interval == 0:
self.checkpoint_manager.save()
self.finish_horizon(last_val=val)
tf.summary.experimental.set_step(self.global_total_steps)
# Train actor critic
if self._policy.normalize_adv:
samples = self.replay_buffer._encode_sample(np.arange(self._policy.horizon))
mean_adv = np.mean(samples["adv"])
std_adv = np.std(samples["adv"])
# Update normalizer
if self._normalize_obs:
self._obs_normalizer.experience(samples["obs"])
with tf.summary.record_if(self.global_total_steps % self._save_summary_interval == 0):
for _ in range(self._policy.n_epoch):
samples = self.replay_buffer._encode_sample(
np.random.permutation(self._policy.horizon))
if self._normalize_obs:
samples["obs"] = self._obs_normalizer(samples["obs"], update=False)
if self._policy.normalize_adv:
adv = (samples["adv"] - mean_adv) / (std_adv + 1e-8)
else:
adv = samples["adv"]
for idx in range(int(self._policy.horizon / self._policy.batch_size)):
target = slice(idx * self._policy.batch_size,
(idx + 1) * self._policy.batch_size)
self._policy.train(
states=samples["obs"][target],
actions=samples["act"][target],
advantages=adv[target],
logp_olds=samples["logp"][target],
returns=samples["ret"][target])
tf.summary.flush()
#print('====================DEGUG:observations_list',np.shape(observations_list),'====================')
#print(observations_list)
#print('====================DEGUG:actions_list',np.shape(actions_list),'====================')
return observations_list, actions_list, starting_states_list, rewards_list
def finish_horizon(self, last_val=0):
samples = self.local_buffer._encode_sample(
np.arange(self.local_buffer.get_stored_size()))
rews = np.append(samples["rew"], last_val)
vals = np.append(samples["val"], last_val)
# GAE-Lambda advantage calculation
deltas = rews[:-1] + self._policy.discount * vals[1:] - vals[:-1]
if self._policy.enable_gae:
advs = discount_cumsum(
deltas, self._policy.discount * self._policy.lam)
else:
advs = deltas
# Rewards-to-go, to be targets for the value function
rets = discount_cumsum(rews, self._policy.discount)[:-1]
self.replay_buffer.add(
obs=samples["obs"], act=samples["act"], done=samples["done"],
ret=rets, adv=advs, logp=np.squeeze(samples["logp"]))
self.local_buffer.clear()
def evaluate_policy(self, total_steps=None):
avg_test_return = 0.
avg_test_cost = 0.
if self._save_test_path:
replay_buffer = get_replay_buffer(
self._policy, self._test_env, size=self._episode_max_steps)
for i in range(self._test_episodes):
episode_return = 0.
episode_cost = 0.
frames = []
obs = self._test_env.reset()
for _ in range(self._episode_max_steps):
if self._normalize_obs:
obs = self._obs_normalizer(obs, update=False)
act, _ = self._policy.get_action(obs, test=True)
act = act if not hasattr(self._env.action_space, "high") else \
np.clip(act, self._env.action_space.low, self._env.action_space.high)
next_obs, reward, done, env_info = self._test_env.step(act)
try:
cost = env_info['cost']
except (TypeError, KeyError, IndexError):
cost = 0
if self._save_test_path:
replay_buffer.add(
obs=obs, act=act, next_obs=next_obs,
rew=reward, done=done)
if self._save_test_movie:
frames.append(self._test_env.render(mode='rgb_array'))
elif self._show_test_progress:
self._test_env.render()
episode_return += reward
episode_cost += cost
obs = next_obs
if done:
break
prefix = "step_{0:08d}_epi_{1:02d}_return_{2:010.4f}_cost_{2:010.4f}".format(
self.global_total_steps, i, episode_return, episode_cost)
if self._save_test_path:
save_path(replay_buffer.sample(self._episode_max_steps),
os.path.join(self._output_dir, prefix + ".pkl"))
replay_buffer.clear()
if self._save_test_movie:
# frames_to_gif(frames, prefix, self._output_dir)
frames_to_mp4(frames, prefix, self._output_dir)
avg_test_return += episode_return
avg_test_cost += episode_cost
if self._show_test_images:
images = tf.cast(
tf.expand_dims(np.array(obs).transpose(2, 0, 1), axis=3),
tf.uint8)
tf.summary.image('train/input_img', images, )
avg_test_return = avg_test_return / self._test_episodes
avg_test_cost = avg_test_cost / self._test_episodes
if self.wandb_on:
wandb.log({'Evaluation_Return': avg_test_return, 'Evaluation_Cost': avg_test_cost}, step=self.global_num_episodes)
tf.summary.scalar(name="MFAgent/Evaluation_Return", data=avg_test_return, step=self.global_total_steps)
tf.summary.scalar(name="MFAgent/Evaluation_Cost", data=avg_test_cost, step=self.global_total_steps)
return avg_test_return, avg_test_cost
def train(self):
if self._policy.normalize_adv:
samples = self.replay_buffer._encode_sample( | np.arange(self._policy.horizon) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
''' Description
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "<NAME>"
__copyright__ = "Copyright (C) 2019, HANDBOOK"
__credits__ = ["CONG-MINH NGUYEN"]
__license__ = "GPL"
__version__ = "1.0.1"
__date__ = "5/10/2019"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development" # ["Prototype", "Development", or "Production"]
# Project Style: https://dev.to/codemouse92/dead-simple-python-project-structure-and-imports-38c6
# Code Style: http://web.archive.org/web/20111010053227/http://jaynes.colorado.edu/PythonGuidelines.html#module_formatting
#==============================================================================
# Imported Modules
#==============================================================================
import argparse
from pathlib import Path
import os.path
import sys
import time
import copy
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "" # The GPU id to use, usually either "0" or "1"
import json
import numpy as np
import cv2
import requests
from Camera.OrbbecAstraS.camera import Camera, rgbd_to_pointcloud
from GeneralUtils import List, Tuple, Dict, Union, Generic, TypeVar
from GeneralUtils import sample_arrays, stack_list_horizontal
from PointCloudUtils import visualize_pc, points_to_pc, coords_labels_to_pc, load_ply_as_pc, load_ply_as_points
from PointCloudUtils import adjust_pc_coords, global_icp
from PointCloudUtils import radian2degree, degree2radian, m2mm, mm2m, create_rotx_matrix, create_roty_matrix, create_rotz_matrix, create_tranl_matrix
from Segmentation.PointNet.learner import PointNetLearner
#==============================================================================
# Constant Definitions
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def mpose2mmpose(pose: np.ndarray):
tarr = np.ones(len(pose))
tarr[:3] *= 1000
return pose * tarr
def mmpose2mpose(pose: np.ndarray):
tarr = np.ones(len(pose))
tarr[:3] *= 0.001
return pose * tarr
def load_object_models(model_path='./obj_models/modelMay10/'):
"""
Description:
:param model_path: str, path to the reference models of known objects
:return: pc_models, List[2L ndarrays], list of points of target surface
:return: centroid_models, List[Vector(3 floats)], the list of centroids of model
:return: pose_models, List[List[Vector(6 floats)]], the list of pose list of each model(each model has a list of poses)
"""
pc_models = []
centroid_models = []
pose_models = []
files = os.listdir(path=os.path.join(model_path, 'pc/'))
for _, file in enumerate(files):
filename, _ = os.path.splitext(file)
pc_model = load_ply_as_points(file_path=os.path.join(model_path, 'pc/', file))
centroid, grasping_pose = np.load(os.path.join(model_path, 'info/', filename + '.npy'), allow_pickle=True)
grasping_pose = np.array(grasping_pose).astype(float)
grasping_pose[:, :3] = mm2m(grasping_pose[:, :3])
pc_models.append(pc_model)
centroid_models.append(centroid)
pose_models.append(grasping_pose)
return pc_models, centroid_models, pose_models
def measure_xtran_params(neutral_point, transformation):
"""
Description: Assume that the transformation from robot coord to camera coord is: RotX -> RotY -> RotZ -> Tranl
In this case: RotX = 180, RotY = 0; RotZ = -90; Tranl: unknown
But we know coords of a determined neutral point in 2 coord systems,
hence we can measure Transl from robot centroid to camera centroid.(Step 2)
:param neutral_point : Dict, list of 2 coords of neutral_point in 2 coord systems
:param transformation : Dict, list of 3 rotating transformations
:return: r2c_xtran : Matrix 4x4 floats, transformation from robot coord to camera coord
:return: c2r_xtran : Matrix 4x4 floats, transformation from camera coord to robot coord
# :return: tranl : Matrix 4x4 floats, translation from robot coord to camera coord
"""
# 1: Load coords of the neutral point
neutral_robot = mm2m(coords=np.array(neutral_point['robot_coord'])) # neutral point coord in robot coord system
neutral_camera = mm2m(coords=np.array(neutral_point['camera_coord'])) # neutral point coord in camera coord system
rotx = create_rotx_matrix(theta=-transformation['rotx']) # load transformation matrix of rotation around x
roty = create_roty_matrix(theta=-transformation['roty']) # load transformation matrix of rotation around y
rotz = create_rotz_matrix(theta=-transformation['rotz']) # load transformation matrix of rotation around z
# 2: Find transformation between robot coord centroid and camera coord centroid
rotxyz = np.dot(np.dot(rotz, roty), rotx) # determine transformation matrix after rotate sequently around x, y, z
neutral_robot3 = np.dot(rotxyz, np.append(neutral_robot, 1))[:3] # find coord of neutral point after RotXYZ
Oc_in_3 = neutral_robot3 - neutral_camera # find coord of robot centroid in camera coord system
tranl = create_tranl_matrix(vector=-Oc_in_3)
# 3: Find transformation matrix from robot to camera
# r2c_xtran = np.dot(np.dot(np.dot(tranl, rotz), roty), rotx)
# c2r_xtran = np.linalg.inv(r2c_xtran)
return rotx, roty, rotz, tranl
def input_cli():
user_input = input("Enter CLI commands such as (--NAME VALUE ...): ")
custom_parser = argparse.ArgumentParser()
custom_parser.add_argument('-vb', '--verbose', type=bool, help='show detail results')
custom_parser.add_argument('-vs', '--voxel_size', type=float, help='adjust voxel size')
custom_parser.add_argument('-ft', '--fitness_threshold', type=float, help='adjust voxel size')
custom_parser.add_argument('-pi', '--selected_pose_id', type=int, help='select pose id that will execute grasp')
custom_args = custom_parser.parse_args(user_input.split())
return custom_args
def normalize_pc(points: np.ndarray):
new_points = copy.deepcopy(points)
new_points[:, 2] -= 0.677
new_points[:, 3:6] /= 255.
return new_points
def segment_obj_in_scene(scene_points, n_points: int=16384, n_channels: int=6, url='http://127.0.0.1:5000/api/'):
"""
Description: segment the point clouds of wrench and pipe out of scene
:param learner : Object, a PointNet Learner that's able to do predict point-wise classification
:param scene_points : 2L ndarray(shape=(n_points, n_channels)), list of points
:param n_points : int > 0, number input points of PointNet Learner
:param n_channels : int > 0, number channels of input points of PointNet Learner
:return: wrench_points : 2L ndarray, points of wrench
:return: pipe_points : 2L ndarray, points of pipe
"""
# Shuffle points to distribute the points equally in arrays(useful for next step, cut scene into parts to segment)
n_scene_points = len(scene_points)
scene_points = sample_arrays(arrs=scene_points, n_samples=n_scene_points)
# Do segment(cut scene into 2 parts, segment each part then unify results of 2 parts to get overall picture)
wrench_points = []
pipe_points = []
for i in range(2):
# sample the points to fit the network
cur_scene_points = scene_points[i * n_scene_points // 2:(i + 1) * n_scene_points // 2]
cur_scene_points = sample_arrays(arrs=cur_scene_points, n_samples=n_points)
# predict segment labels(send data to remote server through RESTful API)
# pred_labels = learner.predict(x=normalize_pc(points=cur_scene_points[:, :n_channels]))
data = {'points': normalize_pc(points=cur_scene_points[:, :n_channels]).tolist()}
j_data = json.dumps(data)
headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}
res = requests.post(url=url, data=j_data, headers=headers)
pred_labels = np.asarray(json.loads(res.text))
# extract the points in the scene of each object by labels
wrench_points.append(cur_scene_points[pred_labels == 2])
pipe_points.append(cur_scene_points[pred_labels == 3])
wrench_points = np.vstack(wrench_points) # get entire points of wrench
pipe_points = np.vstack(pipe_points) # get entire points of pipe
# visualize_pc(coords_labels_to_pc(coords=cur_scene_points[:, :3], labels=pred_labels))
return wrench_points, pipe_points
def match_object_surface(surface: np.ndarray, model: np.ndarray, model_centroid: Tuple[float, float, float],
voxel_size: float, n_channel: int=6, verbose: bool=False):
"""
Description:
:param surface : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface
:param model : 2L ndarray(shape=(n_points, n_channels)), list of points of target surface
:param model_centroid : Vector(3 floats), the centroid of `model`
:param voxel_size : float, default=0.6, downsampling size of point cloud in `global_icp` algorithm
:param n_channel : int > 0, number channels of input points of PointNet Learner
:param verbose : bool, show detail results and notification or not
:return: TYPE, MEAN
"""
point_cloud_model = adjust_pc_coords(point_cloud=points_to_pc(model[:, :n_channel]), coord=model_centroid)
point_cloud_target = adjust_pc_coords(point_cloud=points_to_pc(surface[:, :n_channel]), coord=model_centroid)
xtran = global_icp(source=points_to_pc(point_cloud_model), target=points_to_pc(point_cloud_target),
voxel_size=voxel_size, verbose=verbose)
print(xtran)
return xtran
def interpolate_pose(ref_pose, surf_xtran, rotx, roty, rotz, tranl, pc_centroid):
"""
Description: match reference_pose of (x, y, z) (rx, ry, rz) and (mode, aperture) from reference source to target point cloud
:param ref_pose : Vector(8 floats), the pose of the reference model
:param surf_xtran : Matrix(4x4 floats), the transformation matrix from source model to target point cloud
:param rotx : Matrix(4x4 floats), the transformation matrix of rotation around x axis of robot coord
:param roty : Matrix(4x4 floats), the transformation matrix of rotation around y axis of robot coord
:param rotz : Matrix(4x4 floats), the transformation matrix of rotation around z axis of robot coord
:param tranl : Matrix(4x4 floats), the transformation matrix of translation from robot origin to the camera origin
:param pc_centroid : Matrix(4x4 floats), the centroid of considered point cloud
:return: Vector(6 floats), the pose in robot system
"""
# transformation matrix of robot origin to point cloud center, xyz elements
tranl2 = create_tranl_matrix(vector=-np.array(pc_centroid))
r2pc_xyz_xtran = np.dot(np.dot(np.dot(np.dot(tranl2, tranl), rotz), roty), rotx)
pc2r_xyz_xtran = np.linalg.inv(r2pc_xyz_xtran)
# measure xyz
new_xyz = np.append(arr=ref_pose[:3], values=1, axis=None)
new_xyz = | np.dot(r2pc_xyz_xtran, new_xyz) | numpy.dot |
import numpy as np
import matplotlib.pyplot as plt
#Entry point into the program
def main():
epochs = 30
learning = 0.1
batch = 300
#load data into matrices
pre_data = np.loadtxt(open("mnist_train.csv", "rb"), delimiter=",", skiprows=1)
pret_data = np.loadtxt(open("mnist_test.csv", "rb"), delimiter=",", skiprows=1)
t = pre_data[:,0:1] #cut first column (targets)
tt = pret_data[:,0:1] #cut first column (targets) from test data
#get main (training data)
data = getData(pre_data)
targets = getTargets(t, data.shape[0])
weights = getWeights(data.shape[1])
#returns multiple things
tup = train(epochs, data, weights, targets, learning, batch)
weights = tup[1]
trainCorrect = tup[0]
trainEpochs = tup[2]
trainWeights = tup[3]
#get test data
testData = getData(pret_data)
test_rows = testData.shape[0]
testTargets = getTargets(tt, test_rows)
#gets correct % prediction for each epoch (list)
testCorrect = test(epochs, testData, trainWeights, testTargets, batch)
#prints accuracy graph for training and test data
accuracyGraph(trainCorrect, batch, trainEpochs, epochs, testCorrect)
#printf confusion matrix for the test data
confMat(testData, testTargets, weights, epochs, batch, learning)
#helper function to create initial weights matrix
def getWeights(cols):
weights = np.random.uniform(low=-0.5, high = 0.5, size=(cols, 10))
return weights
#helper function that creates a onehot matrix of targets from
#stripped first column (targets)
def getTargets(t, rows):
targets = np.zeros(shape=(rows, 10), dtype = int)
rN = 0
for target in t:
targets[rN, int(target)] = 1;
rN+=1
return targets
#helper function that strips data matrix from the first column (targets)
#and appends bias vector to the end, as well as optimizes data
def getData(pre_data):
#remove first column from the input data (targets)
data = np.delete(pre_data, 0, 1)
#optimize data
data = data / 255
#create bias vector and concat bias into data
rows = np.size(data, 0) #alt: data.shape[0]
cols = np.size(data, 1) #alt: data.shape[1]
bias = np.full((1, rows), 1);
data = np.concatenate((data, bias.T), axis = 1)
return data
#helper function that tests input data on the latest weights
#and returns list of orrect predictions per epoch
def test(epochs, data, weightsList, targets, batch):
correctList = []
correct = 0;
for epoch in range(0, epochs):
correct = 0;
output = np.dot(data, weightsList[epoch])
output = | np.where(output > 0, 1, 0) | numpy.where |
#!python
"""This module provides functions to handle Bruker data.
It primarily implements the TimsTOF class, that acts as an in-memory container
for Bruker data accession and storage.
"""
# builtin
import os
import sys
import contextlib
import logging
# external
import numpy as np
import pandas as pd
import h5py
# local
import alphatims
import alphatims.utils
if sys.platform[:5] == "win32":
BRUKER_DLL_FILE_NAME = os.path.join(
alphatims.utils.EXT_PATH,
"timsdata.dll"
)
elif sys.platform[:5] == "linux":
BRUKER_DLL_FILE_NAME = os.path.join(
alphatims.utils.EXT_PATH,
"timsdata.so"
)
else:
logging.warning(
"WARNING: "
"No Bruker libraries are available for this operating system. "
"Mobility and m/z values need to be estimated. "
"While this estimation often returns acceptable results with errors "
"< 0.02 Th, huge errors (e.g. offsets of 6 Th) have already been "
"observed for some samples!"
)
logging.info("")
BRUKER_DLL_FILE_NAME = ""
def init_bruker_dll(bruker_dll_file_name: str = BRUKER_DLL_FILE_NAME):
"""Open a bruker.dll in Python.
Five functions are defined for this dll:
- tims_open: [c_char_p, c_uint32] -> c_uint64
- tims_close: [c_char_p, c_uint32] -> c_uint64
- tims_read_scans_v2: [c_uint64, c_int64, c_uint32, c_uint32, c_void_p, c_uint32] -> c_uint32
- tims_index_to_mz: [c_uint64, c_int64, POINTER(c_double), POINTER(c_double), c_uint32] -> None
- tims_scannum_to_oneoverk0: Same as "tims_index_to_mz"
Parameters
----------
bruker_dll_file_name : str
The absolute path to the timsdata.dll.
Default is alphatims.utils.BRUKER_DLL_FILE_NAME.
Returns
-------
: ctypes.cdll
The Bruker dll library.
"""
import ctypes
bruker_dll = ctypes.cdll.LoadLibrary(
os.path.realpath(bruker_dll_file_name)
)
bruker_dll.tims_open.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
bruker_dll.tims_open.restype = ctypes.c_uint64
bruker_dll.tims_close.argtypes = [ctypes.c_uint64]
bruker_dll.tims_close.restype = None
bruker_dll.tims_read_scans_v2.argtypes = [
ctypes.c_uint64,
ctypes.c_int64,
ctypes.c_uint32,
ctypes.c_uint32,
ctypes.c_void_p,
ctypes.c_uint32
]
bruker_dll.tims_read_scans_v2.restype = ctypes.c_uint32
bruker_dll.tims_index_to_mz.argtypes = [
ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double),
ctypes.c_uint32
]
bruker_dll.tims_index_to_mz.restype = ctypes.c_uint32
bruker_dll.tims_scannum_to_oneoverk0.argtypes = [
ctypes.c_uint64,
ctypes.c_int64,
ctypes.POINTER(ctypes.c_double),
ctypes.POINTER(ctypes.c_double),
ctypes.c_uint32
]
bruker_dll.tims_scannum_to_oneoverk0.restype = ctypes.c_uint32
bruker_dll.tims_set_num_threads.argtypes = [ctypes.c_uint64]
bruker_dll.tims_set_num_threads.restype = None
bruker_dll.tims_set_num_threads(alphatims.utils.MAX_THREADS)
# multiple threads is equally fast as just 1 for io?
# bruker_dll.tims_set_num_threads(1)
return bruker_dll
@contextlib.contextmanager
def open_bruker_d_folder(
bruker_d_folder_name: str,
bruker_dll_file_name=BRUKER_DLL_FILE_NAME,
) -> tuple:
"""A context manager for a bruker dll connection to a .d folder.
Parameters
----------
bruker_d_folder_name : str
The name of a Bruker .d folder.
bruker_dll_file_name : str, ctypes.cdll
The path to Bruker' timsdata.dll library.
Alternatively, the library itself can be passed as argument.
Default is alphatims.utils.BRUKER_DLL_FILE_NAME,
which in itself is dependent on the OS.
Returns
-------
: tuple (ctypes.cdll, int).
The opened bruker dll and identifier of the .d folder.
"""
try:
if isinstance(bruker_dll_file_name, str):
bruker_dll = init_bruker_dll(bruker_dll_file_name)
logging.info(f"Opening handle for {bruker_d_folder_name}")
bruker_d_folder_handle = bruker_dll.tims_open(
bruker_d_folder_name.encode('utf-8'),
0
)
yield bruker_dll, bruker_d_folder_handle
finally:
logging.info(f"Closing handle for {bruker_d_folder_name}")
bruker_dll.tims_close(bruker_d_folder_handle)
def read_bruker_sql(
bruker_d_folder_name: str,
add_zeroth_frame: bool = True,
drop_polarity: bool = True,
convert_polarity_to_int: bool = True,
) -> tuple:
"""Read metadata, (fragment) frames and precursors from a Bruker .d folder.
Parameters
----------
bruker_d_folder_name : str
The name of a Bruker .d folder.
add_zeroth_frame : bool
Bruker uses 1-indexing for frames.
If True, a zeroth frame is added without any TOF detections to
make Python simulate this 1-indexing.
If False, frames are 0-indexed.
Default is True.
drop_polarity : bool
The polarity column of the frames table contains "+" or "-" and
is not numerical.
If True, the polarity column is dropped from the frames table.
this ensures a fully numerical pd.DataFrame.
If False, this column is kept, resulting in a pd.DataFrame with
dtype=object.
Default is True.
convert_polarity_to_int : bool
Convert the polarity to int (-1 or +1).
This allows to keep it in numerical form.
This is ignored if the polarity is dropped.
Default is True.
Returns
-------
: tuple
(str, dict, pd.DataFrame, pd.DataFrame, pd.DataFrame).
The acquisition_mode, global_meta_data, frames, fragment_frames
and precursors.
For diaPASEF, precursors is None.
"""
import sqlite3
logging.info(f"Reading frame metadata for {bruker_d_folder_name}")
with sqlite3.connect(
os.path.join(bruker_d_folder_name, "analysis.tdf")
) as sql_database_connection:
global_meta_data = pd.read_sql_query(
"SELECT * from GlobalMetaData",
sql_database_connection
)
frames = pd.read_sql_query(
"SELECT * FROM Frames",
sql_database_connection
)
if 9 in frames.MsMsType.values:
acquisition_mode = "diaPASEF"
fragment_frames = pd.read_sql_query(
"SELECT * FROM DiaFrameMsMsInfo",
sql_database_connection
)
fragment_frame_groups = pd.read_sql_query(
"SELECT * from DiaFrameMsMsWindows",
sql_database_connection
)
fragment_frames = fragment_frames.merge(
fragment_frame_groups,
how="left"
)
fragment_frames.rename(
columns={"WindowGroup": "Precursor"},
inplace=True
)
precursors = None
elif 8 in frames.MsMsType.values:
acquisition_mode = "ddaPASEF"
fragment_frames = pd.read_sql_query(
"SELECT * from PasefFrameMsMsInfo",
sql_database_connection
)
precursors = pd.read_sql_query(
"SELECT * from Precursors",
sql_database_connection
)
else:
acquisition_mode = "noPASEF"
fragment_frames = pd.DataFrame(
{
"Frame": np.array([0]),
"ScanNumBegin": np.array([0]),
"ScanNumEnd": np.array([0]),
"IsolationWidth": np.array([0]),
"IsolationMz": np.array([0]),
"Precursor": np.array([0]),
}
)
precursors = None
# raise ValueError("Scan mode is not ddaPASEF or diaPASEF")
if add_zeroth_frame:
frames = pd.concat(
[
pd.DataFrame(frames.iloc[0]).T,
frames,
],
ignore_index=True
)
frames.Id[0] = 0
frames.Time[0] = 0
frames.MaxIntensity[0] = 0
frames.SummedIntensities[0] = 0
frames.NumPeaks[0] = 0
polarity_col = frames["Polarity"].copy()
frames = pd.DataFrame(
{
col: pd.to_numeric(
frames[col]
) for col in frames if col != "Polarity"
}
)
if not drop_polarity:
if convert_polarity_to_int:
frames['Polarity'] = polarity_col.apply(
lambda x: 1 if x == "+" else -1
).astype(np.int8)
else:
frames['Polarity'] = polarity_col
return (
acquisition_mode,
global_meta_data,
frames,
fragment_frames,
precursors
)
@alphatims.utils.njit(nogil=True)
def parse_decompressed_bruker_binary_type2(decompressed_bytes: bytes) -> tuple:
"""Parse a Bruker binary frame buffer into scans, tofs and intensities.
Parameters
----------
decompressed_bytes : bytes
A Bruker frame binary buffer that is already decompressed with pyzstd.
Returns
-------
: tuple (np.uint32[:], np.uint32[:], np.uint32[:]).
The scan_indices, tof_indices and intensities present in this binary
array
"""
temp = np.frombuffer(decompressed_bytes, dtype=np.uint8)
buffer = np.frombuffer(temp.reshape(4, -1).T.flatten(), dtype=np.uint32)
scan_count = buffer[0]
scan_indices = buffer[:scan_count].copy() // 2
scan_indices[0] = 0
tof_indices = buffer[scan_count::2].copy()
index = 0
for size in scan_indices:
current_sum = 0
for i in range(size):
current_sum += tof_indices[index]
tof_indices[index] = current_sum
index += 1
intensities = buffer[scan_count + 1::2]
last_scan = len(intensities) - np.sum(scan_indices[1:])
scan_indices[:-1] = scan_indices[1:]
scan_indices[-1] = last_scan
return scan_indices, tof_indices, intensities
@alphatims.utils.njit(nogil=True)
def parse_decompressed_bruker_binary_type1(
decompressed_bytes: bytes,
scan_indices_: np.ndarray,
tof_indices_: np.ndarray,
intensities_: np.ndarray,
scan_start: int,
scan_index: int,
) -> int:
"""Parse a Bruker binary scan buffer into tofs and intensities.
Parameters
----------
decompressed_bytes : bytes
A Bruker scan binary buffer that is already decompressed with lzf.
scan_indices_ : np.ndarray
The scan_indices_ buffer array.
tof_indices_ : np.ndarray
The tof_indices_ buffer array.
intensities_ : np.ndarray
The intensities_ buffer array.
scan_start : int
The offset where to start new tof_indices and intensity_values.
scan_index : int
The scan index.
Returns
-------
: int
The number of peaks in this scan.
"""
buffer = | np.frombuffer(decompressed_bytes, dtype=np.int32) | numpy.frombuffer |
import numpy as np
from numba import jit,prange
from copy import deepcopy
__all__ = ['proporcional_sampler', 'roulette_sampler', 'stochastic_universal_sampler',\
'deterministic_sampler', 'tournament_sampler', 'merge_selector', 'replacement_selector',\
'proporcional_sampling', 'roulette_sampling', 'stochastic_universal_sampling',\
'deterministic_sampling', 'tournament_sampling']
@jit(nopython=True, parallel=True)
def get_expected_values(aptitude: np.array) -> np.array:
averageAptitude = np.sum(aptitude)
N = len(aptitude)
return aptitude/averageAptitude * N
"""
---------------------------------------------------------------------------------
| Genetic Algorithms. |
---------------------------------------------------------------------------------
"""
"""
---------------------------------------------------------------------------------
Parent selection.
---------------------------------------------------------------------------------
"""
@jit(nopython=True,parallel=False)
def proporcional_sampling(expectedVals: np.ndarray) -> np.ndarray:
return np.arange(len(expectedVals))
@jit(nopython=True,parallel=True)
def roulette_sampling(expectedVals : np.ndarray) -> np.ndarray:
"""
------------------------------------------------------
Description:
selection algorithm O(nlogn) for genetic algorithm.
Arguments:
probabilities: 1-D numpy array which is the probability of every
individual based on the aptitude.
About:
If you want more information, check:
------------------------------------------------------
"""
expectedVals = get_expected_values(expectedVals)
N = len(expectedVals)
# expectedVals_ = np.copy(expectedVals)
expectedCumulative = np.cumsum(expectedVals)
r = np.random.uniform(0.0,expectedCumulative[-1], N)
return np.searchsorted(expectedCumulative, r) #sample
@jit(nopython=False, parallel=False)
def stochastic_universal_sampling(expectedVals : np.ndarray) -> np.ndarray:
"""
------------------------------------------------------
Arguments:
probabilities: 1-D numpy array which is the probability of every
individual based on the aptitude.
About:
If you want more information, check:
------------------------------------------------------
"""
expectedVals = get_expected_values(expectedVals)
N = len(expectedVals)
# expectedVals_ =np.copy(expectedVals)
r = np.random.uniform(0,1)
currentSum = 0
sample = []
for i in range(N):
currentSum += expectedVals[i]
while r < currentSum:
sample.append(i)
r += 1
return np.array(sample)
def deterministic_sampling(expectedVals : np.ndarray) -> np.ndarray:
expectedVals = get_expected_values(expectedVals)
N = len(expectedVals)
integer_part = np.array(expectedVals, dtype=int)
indices = np.arange(N)
sample = np.repeat(indices,integer_part)
if(len(sample) < N):
float_part = expectedVals - integer_part
ind = np.argpartition(float_part,len(sample) - N)[len(sample) - N:]
sample = np.concatenate((sample, indices[ind]))
return sample[:N]
def tournament_sampling( expectedVals : np.ndarray, chunks : int=2 , prob: float=1.0) -> np.ndarray:
"""
------------------------------------------------------
Description:
selection algorithm O(NP) for genetic algorithm where N is the population
size and P is the number of chunks.
Arguments:
probabilities: 1-D numpy array which is the probability of every
individual based on the aptitude.
chunks: Integer value which is the number of chunks (by default is 2).
prob: Float value which means if choose the max or min value in every chunk
(by default is 1).
About:
If you want more information, check:
------------------------------------------------------
"""
expectedVals = get_expected_values(expectedVals)
N=len(expectedVals)
indices = np.arange(N)
chunks_ = np.ceil(N/chunks)
sample = []
ind = -1
for tournament in range(chunks):
groups = np.array_split(np.random.permutation(indices), chunks_)
for i in prange(len(groups)):
if prob >= np.random.rand():
ind = np.argmax(expectedVals[groups[i]])
else:
ind = np.argmin(expectedVals[groups[i]])
sample.append(groups[i][ind])
return np.array(sample)
"""
---------------------------------------------------------------------------------
Survivor selection.
---------------------------------------------------------------------------------
"""
def get_lowest_indices(value: np.ndarray, m: int) -> np.ndarray:
"""
------------------------------------------------------
Description:
Return m indices with the lowest fitness.
Arguments:
value: float array.
m: int element.
------------------------------------------------------
"""
pseudo_sorted_array = np.argpartition(value, m)
return pseudo_sorted_array[:m]
"""
---------------------------------------------------------------------------------
Classes
---------------------------------------------------------------------------------
"""
"""
----------------------------
Parent Selection.
----------------------------
"""
class proporcional_sampler:
def __init__(self, transform=None):
self.__doc__ = "Proporcional sampling"
def __call__(self,population_f: np.ndarray) -> np.ndarray:
return proporcional_sampling(population_f)
class roulette_sampler:
def __init__(self, transform=None):
self.__doc__ = "Roulette sampling"
self.transform = transform
def __call__(self,population_f: np.ndarray) -> np.ndarray:
vals = | np.copy(population_f) | numpy.copy |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import theano
from ..light_curves import LimbDarkLightCurve
from .simple import SimpleTransitOrbit
def test_simple():
period = 3.456
t0 = 1.45
b = 0.5
duration = 0.12
r_star = 1.345
t = t0 + | np.linspace(-2 * period, 2 * period, 5000) | numpy.linspace |
#This file plots the positivity rate (=true positives) and the frequency in each class in a boxplot and a histogram after platt scaling.
import sparsechem as sc
import numpy as np
import argparse
import scipy
import pandas as pd
import matplotlib.pyplot as plt
import math
import scipy.stats as sci
import matplotlib.gridspec as gridspec
from datetime import datetime
date=datetime.now().strftime('%Y_%m_%d-%I:%M:%S_%p')
parser = argparse.ArgumentParser(description="Obtaining Histograms for Probability Calibration for singular Taget")
parser.add_argument("--y_class", "--y", "--y_classification", help="Sparse pattern file for classification, optional. If provided returns predictions for given locations only (matrix market, .npy or .npz)", type=str, default=None)
parser.add_argument("--y_hat_platt", help="predicted Values after platt scaling", type=str, default=None)
parser.add_argument("--folding", help="Folds for rows of y, optional. Needed if only one fold should be predicted.", type=str, required=False)
parser.add_argument("--predict_fold", help="One or more folds, integer(s). Needed if --folding is provided.", nargs="+", type=int, required=False)
parser.add_argument("--targetID", help="TargetID", type=int, required=True)
args = parser.parse_args()
#load data
TargetID=args.targetID
y_class = sc.load_sparse(args.y_class)
y_hat_platt=sc.load_sparse(args.y_hat_platt)
#select correct fold for y_class
folding = | np.load(args.folding) | numpy.load |
from pathlib import Path
from datetime import datetime
from unittest import mock
import numpy as np
import astropy.units as u
from astropy.time import Time
from sunpy.net import attrs as a
from radiospectra.spectrogram2 import Spectrogram
from radiospectra.spectrogram2.sources import WAVESSpectrogram
@mock.patch('radiospectra.spectrogram2.spectrogram.parse_path')
def test_waves_rad1(parse_path_moc):
meta = {
'instrument': 'WAVES',
'observatory': 'wind',
'start_time': Time('2020-11-28 00:00:00'),
'end_time': Time('2020-11-28 23:59:00'),
'wavelength': a.Wavelength(20*u.kHz, 1040*u.kHz),
'detector': 'rad1',
'freqs': | np.linspace(20, 1040, 256) | numpy.linspace |
from __future__ import print_function
import argparse
import time
import numpy as np
# import h5py
from skimage.measure import compare_psnr as psnr
from skimage.measure import compare_ssim as ssim
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision.transforms import ToTensor
from model import NetARCNN
from model import NetARCNN_deform
from PIL import Image
# from torchvision.transforms import ToTensor
from math import log10
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Deblocking Example')
parser.add_argument('--input_image', type=str, required=True, help='input image to use')
parser.add_argument('--model', type=str, required=True, help='model file to use')
parser.add_argument('--target_image', type=str, required=True, help='target image to use')
parser.add_argument('--output_filename', type=str, help='where to save the output image')
parser.add_argument('--cuda', action='store_true', help='use cuda')
opt = parser.parse_args()
print(opt)
img = Image.open(opt.input_image)
target = Image.open(opt.target_image)
model = NetARCNN()
# model = NetARCNN_deform()
model.load_state_dict(torch.load(opt.model))
print("model built")
# img = np.asarray(img)
# img = img.astype(np.float32)/255
# target = target.astype(np.float32)
# target = torch.from_numpy(target)
# target = Variable(target.view(1, -1, target.size[1], target.size[0]))
# print(img.size)
# input = torch.from_numpy(img)
# input = Variable(input.view(1, -1, input.size[1], input.size[0])) # ? first 1 then 0?
# Actually, ToTensor() will operate the normalization automatically
input = Variable(ToTensor()(img)).view(1, -1, img.size[1], img.size[0])
target0 = Variable(ToTensor()(target)).view(1, -1, target.size[1], target.size[0])
# print(input,target0)
# check dataprocess for detail
if opt.cuda:
model = model.cuda()
input = input.cuda()
target0 = target0.cuda()
a = time.time()
for index in range(300):
output = model(input)
b = time.time() - a
print("time spent for deblocking 300 images: ", b, "s\nMeaning that the deblocking rate is ", 300/b, "fps")
criterion = nn.MSELoss()
mse1 = criterion(input, target0)
mse2 = criterion(output, target0)
psnr01 = 10 * log10(1/mse1.data[0])
psnr02 = 10 * log10(1/mse2.data[0])
print("Before: PSNR {} dB (provided by PyTorch)\nAfter: PSNR {} dB(provided by PyTorch)".format(psnr01, psnr02))
out = output.cpu()
out = out.data[0].numpy() * 255.0
# is clip necessary?
out = np.uint8(out.clip(0,255))
img = | np.asarray(img) | numpy.asarray |
import collections
import copy
import logging
from multiprocessing.sharedctypes import RawArray
from typing import Dict, Any
import numpy as np
import scipy
from monty.serialization import dumpfn, loadfn
from amset import amset_defaults
from amset.misc.constants import k_B
from pymatgen import Spin
from pymatgen.util.string import latexify_spacegroup
__author__ = "<NAME>"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "June 21, 2019"
spin_name = {Spin.up: "spin-up", Spin.down: "spin-down"}
logger = logging.getLogger(__name__)
def create_shared_array(data, return_buffer=False):
data = np.asarray(data)
shared_data = RawArray("d", int(np.prod(data.shape)))
buffered_data = np.frombuffer(shared_data).reshape(data.shape)
buffered_data[:] = data[:]
if return_buffer:
return shared_data, buffered_data
else:
return shared_data
def validate_settings(user_settings):
settings = copy.deepcopy(amset_defaults)
def recursive_update(d, u):
""" Recursive dict update."""
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = recursive_update(d.get(k, {}), v)
else:
d[k] = v
return d
recursive_update(settings, user_settings)
# validate the type of some settings
if isinstance(settings["general"]["doping"], (int, float)):
settings["general"]["doping"] = [settings["general"]["doping"]]
elif isinstance(settings["general"]["doping"], str):
settings["general"]["doping"] = parse_doping(
settings["general"]["doping"])
if isinstance(settings["general"]["temperatures"], (int, float)):
settings["general"]["temperatures"] = [
settings["general"]["temperatures"]]
elif isinstance(settings["general"]["temperatures"], str):
settings["general"]["temperatures"] = parse_temperatures(
settings["general"]["temperatures"])
if isinstance(settings["material"]["deformation_potential"], str):
settings["general"]["deformation_potential"] = \
parse_deformation_potential(
settings["general"]["deformation_potential"])
settings["general"]["doping"] = np.asarray(settings["general"]["doping"])
settings["general"]["temperatures"] = np.asarray(
settings["general"]["temperatures"])
return settings
def tensor_average(tensor):
return np.average(scipy.linalg.eigvalsh(tensor))
def groupby(a, b):
# Get argsort indices, to be used to sort a and b in the next steps
sidx = b.argsort(kind='mergesort')
a_sorted = a[sidx]
b_sorted = b[sidx]
# Get the group limit indices (start, stop of groups)
cut_idx = np.flatnonzero(
np.r_[True, b_sorted[1:] != b_sorted[:-1], True])
# Split input array with those start, stop ones
out = np.array(
[a_sorted[i:j] for i, j in zip(cut_idx[:-1], cut_idx[1:])])
return out
def unicodeify_spacegroup(spacegroup_symbol: str):
subscript_unicode_map = {
0: "₀",
1: "₁",
2: "₂",
3: "₃",
4: "₄",
5: "₅",
6: "₆",
7: "₇",
8: "₈",
9: "₉",
}
symbol = latexify_spacegroup(spacegroup_symbol)
for number, unicode_number in subscript_unicode_map.items():
symbol = symbol.replace("$_{" + str(number) + "}$", unicode_number)
overline = "\u0305" # u"\u0304" (macron) is also an option
symbol = symbol.replace("$\\overline{", overline)
symbol = symbol.replace("$", "")
symbol = symbol.replace("{", "")
symbol = symbol.replace("}", "")
return symbol
def write_settings_to_file(settings: Dict[str, Any], filename: str):
"""Write amset configuration settings to a formatted yaml file.
Args:
settings: The configuration settings.
filename: A filename.
"""
settings = cast_dict(settings)
dumpfn(settings, filename, indent=4, default_flow_style=False)
def load_settings_from_file(filename: str) -> Dict[str, Any]:
"""Load amset configuration settings from a yaml file.
If the settings file does not contain a required parameter, the default
value will be added to the configuration.
An example file is given in *amset/examples/example_settings.yaml*.
Args:
filename: Path to settings file.
Returns:
The settings, with any missing values set according to the amset
defaults.
"""
logger.info("Loading settings from: {}".format(filename))
settings = loadfn(filename)
return validate_settings(settings)
def cast_dict(d):
new_d = {}
for k, v in d.items():
# cast keys
if isinstance(k, Spin):
k = k.value
if isinstance(v, collections.Mapping):
new_d[k] = cast_dict(v)
else:
# cast values
if isinstance(v, np.ndarray):
v = v.tolist()
elif isinstance(v, tuple):
v = list(v)
new_d[k] = v
return new_d
def gen_even_slices(n, n_packs):
"""Generator to create n_packs slices going up to n.
Parameters
----------
n : int
n_packs : int
Number of slices to generate.
Yields
------
slice
Examples
--------
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
yield slice(start, end, None)
start = end
def kpoints_to_first_bz(kpoints: np.ndarray) -> np.ndarray:
"""Translate fractional k-points to the first Brillouin zone.
I.e. all k-points will have fractional coordinates:
-0.5 <= fractional coordinates <= 0.5
Args:
kpoints: The k-points in fractional coordinates.
Returns:
The translated k-points.
"""
return kpoints - np.round(kpoints)
def parse_doping(doping_str: str):
doping_str = doping_str.strip().replace(" ", "")
try:
if ":" in doping_str:
parts = list(map(float, doping_str.split(":")))
if len(parts) != 3:
raise ValueError
return np.geomspace(parts[0], parts[1], int(parts[2]))
else:
return np.array(list(map(float, doping_str.split(","))))
except ValueError:
raise ValueError("ERROR: Unrecognised doping format: {}".format(
doping_str))
def parse_temperatures(temperatures_str: str):
temperatures_str = temperatures_str.strip().replace(" ", "")
try:
if ":" in temperatures_str:
parts = list(map(float, temperatures_str.split(":")))
if len(parts) != 3:
raise ValueError
return np.linspace(parts[0], parts[1], int(parts[2]))
else:
return np.array(list(map(float, temperatures_str.split(","))))
except ValueError:
raise ValueError("ERROR: Unrecognised temperature format: {}".format(
temperatures_str))
def parse_deformation_potential(deformation_pot_str: str):
deformation_pot_str = deformation_pot_str.strip().replace(" ", "")
try:
parts = list(map(float, deformation_pot_str.split(",")))
if len(parts) == 1:
return parts[0]
elif len(parts) == 2:
return tuple(parts)
else:
raise ValueError
except ValueError:
raise ValueError("ERROR: Unrecognised deformation potential format: "
"{}".format(deformation_pot_str))
def f0(energy, fermi, temperature):
"""
Returns the value of Fermi-Dirac distribution at equilibrium.
Args:
energy (float): energy in eV
fermi (float): the Fermi level with the same reference as E (in eV)
temperature (float): the absolute temperature in Kelvin.
Returns (0<float<1):
The occupation calculated by Fermi dirac
"""
return 1. / (1. + np.exp((energy - fermi) / (k_B * temperature)))
def df0de(energy, fermi, temperature):
"""
Returns the energy derivative of the Fermi-Dirac equilibrium distribution
Args: see Args for f0(energy, fermi, temperature)
Returns (float<0): the energy derivative of the Fermi-Dirac distribution.
"""
exponent = (energy - fermi) / (k_B * temperature)
result = -1 / (k_B * temperature) * \
| np.exp((energy - fermi) / (k_B * temperature)) | numpy.exp |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 8 10:44:58 2017
@author: chris
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Muse output server
==================
This script shows how to process and stream different Muse outputs, such as:
-
TODO:
- Make musetools.realtime.EEGServer more general so we don't have to
create a new class for this example (and instead only plug in
necessary pieces or inherit from it).
"""
import time
from threading import Thread
import numpy as np
from scipy import signal, interpolate
from pylsl import StreamInlet, resolve_byprop
USE_LIBLO = True # s.name != 'nt'
if USE_LIBLO:
from liblo import ServerThread, Address, Message, Bundle, send
print('using Liblo')
else:
from pythonosc import dispatcher, osc_server, udp_client
print('using pythonOSC')
import live_utils as ut
from optparse import OptionParser
usage = "python museProc_tenere.py --port 9999 --oscip 127.0.0.1 --oscport 7878 --sparseip 10.0.0.14 --sparseport 1234"
parser = OptionParser(usage=usage)
parser.add_option("-l", "--port",
dest="port", type='int', default=9810,
help="port to listen for muse data on")
parser.add_option("-o", "--oscip",
dest="oscip", type='string', default="127.0.0.1",
help="IP address of Tenere LXstudio to send OSC message to")
parser.add_option("-p", "--oscport",
dest="oscport", type='int', default=7878,
help="The oort that Tenere LXstudio is listening on ")
parser.add_option("-r", "--sparseip",
dest="sparseip", type='string', default="127.0.0.1",
help="IP address of the Pi to send OSC status message to")
parser.add_option("-s", "--sparseport",
dest="sparseport", type='int', default=1234,
help="Port for OSC status messages on the Pi")
(options, args) = parser.parse_args()
class FFTServer():
"""Server to receive EEG data and stream classifier outputs.
Attributes:
See args.
Args:
incoming (str or dict): incoming data stream. If provided as a
string, look for an LSL stream with the corresponding type. If
provided as dict with fields `address` and `port`, open an OSC
port at that address and port.
outgoing (str or dict): outgoing data stream. If provided as a
string, stream to an LSL stream with the corresponding type. If
provided as dict with fields `address` and `port`, stream to an
OSC port at that address and port.
Keyword Args:
config (dict): dictionary containing the configuration and
preprocessing parameters, e.g. (these are the default values):
config = {'fs': 256.,
'n_channels': 4,
'raw_buffer_len': 3 * fs,
'filt_buffer_len': 3 * fs,
'window_len': fs,
'step': int(fs / 10),
'filter': ([1], [1]),
'psd_window_len': 256.,
'psd_buffer_len': 10}
device_source (str): Device from which the data is coming from.
'muse' or 'vive'
streaming_source (str): Software source of the data stream:
'muselsl'
'musedirect'
'musemonitor'
debug_outputs (bool): if True, send debug outputs (not used by VR
experience)
verbose (bool): if True, print status whenever new data is
received or sent.
"""
def __init__(self, incoming, outgoing, sparseOutput=None, config={}, device_source='Muse',
software_source='muselsl', debug_outputs=True, verbose=False):
self.incoming = incoming
self.outgoing = outgoing
self.sparseOutput = sparseOutput
self.device_source = device_source
self.software_source = software_source
self.debug_outputs = debug_outputs
self.verbose = verbose
self.eeg_chunk_length = 12
# 1. Initialize inlet
if isinstance(self.incoming, str): # LSL inlet
print('Looking for the {} stream...'.format(incoming))
self._stream = resolve_byprop('type', incoming, timeout=2)
if len(self._stream) == 0:
raise(RuntimeError('Can\'t find {} stream.'.format(incoming)))
print('Aquiring data from the \'{}\' stream...'.format(incoming))
self._inlet = StreamInlet(self._stream[0],
max_chunklen=self.eeg_chunk_length)
self._info_in = self._inlet.info()
else: # OSC port
if USE_LIBLO:
self._osc_server = ServerThread(incoming['port'])
print('OSC server initialized at port {}.'.format(
incoming['port']))
else:
self._dispatcher = dispatcher.Dispatcher()
print('python-osc dispatcher initialized.')
# 2. Initialize outlets
if not isinstance(self.outgoing, tuple):
self.outgoing = [self.outgoing]
self._output_threads = []
for out in self.outgoing:
if isinstance(out, str): # LSL outlet
raise NotImplementedError
elif isinstance(out, dict): # OSC port
if USE_LIBLO:
self._output_threads.append(Address(out['address'],
out['port']))
else:
raise NotImplementedError
# self._client = udp_client.SimpleUDPClient(
# outgoing['address'], outgoing['port'])
print('OSC client initialized at {}:{}.'.format(
out['address'], out['port']))
if (self.sparseOutput !=None):
if not isinstance(self.sparseOutput, tuple):
self.sparseOutput = [self.sparseOutput]
self._sparseOutput_threads = []
for out in self.sparseOutput:
if isinstance(out, str): # LSL outlet
raise NotImplementedError
elif isinstance(out, dict): # OSC port
if USE_LIBLO:
self._sparseOutput_threads.append(Address(out['address'],
out['port']))
else:
raise NotImplementedError
print('OSC sparse output client initialized at {}:{}.'.format(
out['address'], out['port']))
# 3. Initialize internal buffers and variables
self._init_processing(config)
def _init_processing(self, config):
"""Initialize internal buffers and variables for EEG processing.
Args:
config (dict): dictionary containing various parameters. See
DEFAULT_CONFIG below for default values.
fs (float): sampling frequency
n_channels (int): number of channels
raw_buffer_len (int): raw data buffer length
filt_buffer_len (int): filtered data buffer length
window_len (int): processing window length
step (int): number of samples between two consecutive
windows to process
filter (tuple or dict): filtering parameters. If provided
as a tuple, the first and second elements should be
the `b` and `a` coefficients of a filter. If provided
as a dictionary, the fields `order`, `l_freq`, `h_freq`
and `method` are required; the function
pre.get_filter_coeff() will then be used to compute the
coefficients.
If None, don't use a filter (windows will be extracted
from the raw buffer).
psd_window_len (int): length of the window to use for PSD
psd_buffer_len (int): PSD buffer length
"""
DEFAULT_CONFIG = {'fs': 256.,
'n_channels': 5,
'raw_buffer_len': 3 * 256,
'filt_buffer_len': 3 * 256,
'window_len': 256,
'step': int(256 / 10),
'filter': ([1], [1]),
'filter_bank': {},
'psd_window_len': 256.,
'psd_buffer_len': 10}
config = {**DEFAULT_CONFIG, **config}
self.fs = config['fs']
self.n_channels = config['n_channels']
# Initialize EEG channel remapping parameters
self.eeg_ch_remap = None
if self.device_source.lower() == 'vive':
self.eeg_ch_remap = [3, 1, 2, 3, 4]
self.n_channels = 5
if self.software_source.lower() == 'musedirect':
self.eeg_ch_remap[-1] = 5
self.n_channels = 5
if self.device_source.lower() == 'leroy':
self.eeg_ch_remap = None
self.n_channels = 4
if self.device_source.lower() == 'muse':
self.eeg_ch_remap = None
self.n_channels = 4
if self.device_source.lower() == 'vivehr':
self.eeg_ch_remap = [3, 1, 2, 3, 0]
self.n_channels = 5
# Initialize the EEG buffers
raw_buffer_len = int(config['raw_buffer_len'])
filt_buffer_len = int(config['filt_buffer_len'])
self.eeg_buffer = ut.NanBuffer(raw_buffer_len, self.n_channels)
self.filt_eeg_buffer = ut.CircularBuffer(filt_buffer_len,
self.n_channels)
self.hpfilt_eeg_buffer = ut.CircularBuffer(filt_buffer_len,
self.n_channels)
self.smooth_eeg_buffer = ut.CircularBuffer(filt_buffer_len,
self.n_channels)
self.eyeH_buffer = ut.CircularBuffer(100,1)
# Initialize the EEG filter
if config['filter']:
if isinstance(config['filter'], tuple):
b = config['filter'][0]
a = config['filter'][1]
elif isinstance(config['filter'], dict):
b, a = ut.get_filter_coeff(self.fs, **config['filter'])
zi = np.tile(signal.lfilter_zi(b, a), (self.n_channels, 1)).T
self.bandpass_filt = {'b': b,
'a': a,
'zi': zi}
if config['hpfilter']:
b = config['hpfilter'][0]
a = config['hpfilter'][1]
zi = np.tile(signal.lfilter_zi(b, a), (self.n_channels, 1)).T
self.hp_filt = {'b': b,
'a': a,
'zi': zi}
if config['lpfilter']:
b = config['lpfilter'][0]
a = config['lpfilter'][1]
zi = np.tile(signal.lfilter_zi(b, a), (self.n_channels, 1)).T
self.lp_filt = {'b': b,
'a': a,
'zi': zi}
# Initialize the filter bank
if config['filter_bank']:
self.filter_bank = {}
for name, coeff in config['filter_bank'].items():
zi = np.tile(signal.lfilter_zi(coeff[0], coeff[1]),
(self.n_channels, 1)).T
self.filter_bank[name] = {'b': coeff[0],
'a': coeff[1],
'zi': zi}
# Initialize processing parameters
self.window_len = int(config['window_len'])
self.step = int(config['step'])
# Initialize processing buffers
psd_buffer_len = int(config['psd_buffer_len'])
self.psd_buffer = ut.CircularBuffer(psd_buffer_len, 129,
self.n_channels)
# Initialize scoring histograms
decayRate = 0.997
self.hists = {'delta': ut.Histogram(1000, self.n_channels, bounds=(0, 50), min_count=80, decay=decayRate ),
'theta': ut.Histogram(1000, self.n_channels, bounds=(0, 30),min_count=80, decay=decayRate),
'alpha': ut.Histogram(1000, self.n_channels,bounds=(0, 20), min_count=80, decay=decayRate),
'beta': ut.Histogram(1000, self.n_channels,bounds=(0, 10), min_count=80, decay=decayRate),
'gamma': ut.Histogram(1000, self.n_channels,bounds=(0, 10), min_count=80, decay=decayRate)}
self.eyeH_hist = ut.Histogram(500, 1, bounds=(0, 10000), min_count=80, decay=decayRate )
self.emg_hist = ut.Histogram(500, 1, bounds=(0, 10), min_count=80, decay=decayRate )
self.blinkwait = 0
self.blink = 0
self.firstWindowProc = True
self.band_names =0
self.band_powers =0
self.ratio_powers=0
self.ratio_names=0
# Used for calm score
self.slow_calm_score = 0
self.slow_alpha_score = 0
self.eye_mov_percent_buffer = ut.CircularBuffer(256, 1)
self.slow_calm_score_buffer = ut.CircularBuffer(512, 1)
self.increments_buffer = ut.CircularBuffer(512, 1)
self.low_freq_chs_buffer = ut.CircularBuffer(150, 2)
self.low_freq_chs_std = 1
######################################################################
# BODY Motion Processing, Accelerometer, Gyro
raw_buffer_len = 150
filt_buffer_len = 150
self.acc_window_len = 50
self.acc_buffer = ut.NanBuffer(raw_buffer_len, 3)
self.filt0_buffer = ut.CircularBuffer(filt_buffer_len,3)
self.heart_buffer = ut.CircularBuffer(150,1)
self.breath_buffer = ut.CircularBuffer(500,1)
# Initialize the Body Filters
if config['filter0']:
b = config['filter0'][0]
a = config['filter0'][1]
zi = np.tile(signal.lfilter_zi(b, a), (3, 1)).T
self.filter0 = {'b': b,'a': a,'zi': zi}
if config['filter1']:
b = config['filter1'][0]
a = config['filter1'][1]
zi = np.tile(signal.lfilter_zi(b, a), (3, 1)).T
self.filter1 = {'b': b,'a': a,'zi': zi}
if config['filter2']:
b = config['filter2'][0]
a = config['filter2'][1]
zi = signal.lfilter_zi(b, a)
self.filter2 = {'b': b,'a': a,'zi': zi}
if config['filter3']:
b = config['filter3'][0]
a = config['filter3'][1]
zi = np.tile(signal.lfilter_zi(b, a), (3, 1)).T
self.filter3 = {'b': b,'a': a,'zi': zi}
if config['filter4']:
b = config['filter4'][0]
a = config['filter4'][1]
zi = signal.lfilter_zi(b, a)
self.filter4 = {'b': b,'a': a,'zi': zi}
if config['filter5']:
b = config['filter5'][0]
a = config['filter5'][1]
zi = signal.lfilter_zi(b, a)
self.filter5 = {'b': b,'a': a,'zi': zi}
if config['filter6']:
b = config['filter6'][0]
a = config['filter6'][1]
zi = signal.lfilter_zi(b, a)
self.filter6 = {'b': b,'a': a,'zi': zi}
if config['filter7']:
b = config['filter7'][0]
a = config['filter7'][1]
zi = signal.lfilter_zi(b, a)
self.filter7 = {'b': b,'a': a,'zi': zi}
def _update_eeg_liblo_osc(self, path, args):
"""Collect new EEG data point(s) from pyliblo OSC and process.
Args:
path (str): OSC path listened to
args (list): received values
"""
if self.verbose:
print('Receiving OSC packet!')
sample = np.array(args).reshape(1, -1)
self._process_eeg(sample[:, :self.n_channels], 0)
def _update_eeg_python_osc(self, unused_addr, args, *chs):
"""Collect new EEG data point(s) from python-osc and process.
Args:
path (str): OSC path listened to
args (list): received values
"""
if self.verbose:
print('Receiving OSC packet!')
sample = np.array(chs).reshape(1, -1)
self._process_eeg(sample[:, :self.n_channels], 0)
def _update_acc_liblo_osc(self, path, args):
if self.verbose:
print('Receiving ACC packet!')
sample = np.array(args).reshape(1, -1)
self._process_acc(sample[:, :3], 0)
def _update_gyro_liblo_osc(self, path, args):
if self.verbose:
print('Receiving GYRO packet!')
sample = np.array(args).reshape(1, -1)
self._process_gyro(sample[:, :3], 0)
def _process_eeg(self, samples, timestamp):
"""Process EEG.
Process EEG. Includes buffering, filtering, windowing and pipeline.
Args:
samples (numpy.ndarray): new EEG samples to process
timestamp (float): timestamp
Returns:
output (scalar): output of the pipeline
"""
# Re-map
if self.eeg_ch_remap:
samples = samples[:, self.eeg_ch_remap]
self.eeg_buffer.update(samples)
# self._send_outputs(samples, timestamp, 'raw_eeg')
# Apply filtes
filt_samples = samples;
if config['filter']:
filt_samples, self.bandpass_filt['zi'] = signal.lfilter(
self.bandpass_filt['b'], self.bandpass_filt['a'],
samples, axis=0, zi=self.bandpass_filt['zi'])
# self._send_filtered_eeg(filt_samples, timestamp)
self.filt_eeg_buffer.update(filt_samples)
if config['hpfilter']:
filt_samples, self.hp_filt['zi'] = signal.lfilter(
self.hp_filt['b'], self.hp_filt['a'],
filt_samples, axis=0, zi=self.hp_filt['zi'])
self.hpfilt_eeg_buffer.update(filt_samples)
if config['lpfilter']:
smooth_eeg_samples, self.lp_filt['zi'] = signal.lfilter(
self.lp_filt['b'], self.lp_filt['a'],
filt_samples, axis=0, zi=self.lp_filt['zi'])
if self.debug_outputs:
self._send_output_vec(smooth_eeg_samples, timestamp, 'smooth_eeg')
else:
smooth_eeg_samples = filt_samples
self.smooth_eeg_buffer.update(smooth_eeg_samples)
if config['filter_bank']:
filter_bank_samples = {}
for name, filt_dict in self.filter_bank.items():
filter_bank_samples[name], self.filter_bank[name]['zi'] = \
signal.lfilter(filt_dict['b'], filt_dict['a'],
filt_samples, axis=0,
zi=self.filter_bank[name]['zi'])
low_freq_chs = filter_bank_samples['delta'][0, [0, 2]] #+ filter_bank_samples['theta'][0, [0, 1]
window = self.smooth_eeg_buffer.extract(self.window_len)
eegEarWindow = window[:, 3] #data from right ear Channel
#eye movement computed from the difference between two frontal channels
eyewindow = self.smooth_eeg_buffer.extract(200)
eegFLWindow = eyewindow[:, 1]
eegFRWindow = eyewindow[:, 2]
# norm_diff_eyes = eegFLWindow[-1] - eegFRWindow[-1]*np.nanstd(eegFLWindow, axis=0)/np.nanstd(eegFRWindow, axis=0)
# eyeH = np.reshape([np.square(norm_diff_eyes)], (1, 1))
#find blinks in the left eegEarWindow
blinkVal = ut.blink_template_match(eegEarWindow)
if (blinkVal > 100000 and self.blink == 0):
self.blink = 50
self.blinkwait = 350
else:
if (self.blinkwait > 0):
self.blinkwait -= 1
if (self.blink > 0):
self.blink -= 1
# LONGER-TERM CALM SCORE based on Saccadic Eye Movement
eye_mov_percent = np.reshape(np.percentile(eegFLWindow - eegFRWindow, 90), (1, 1))
self.eye_mov_percent_buffer.update(eye_mov_percent)
remap_eye_mov_percent = ut.sigmoid(self.eye_mov_percent_buffer.extract().mean(), 0.5, -10, 0)
max_value = 1
incr_decr = remap_eye_mov_percent < 0.2
inc = self.increments_buffer.extract().mean()
dpoints_per_second = 0.0005
if incr_decr:
self.slow_calm_score += dpoints_per_second*inc # 1/max([max_value - self.slow_calm_score, 1])
else:
self.slow_calm_score -= dpoints_per_second*inc*4 #0.7 # (self.slow_calm_score)/1280
self.increments_buffer.update(np.reshape(incr_decr, (1, 1)))
if self.slow_calm_score > max_value:
self.slow_calm_score = max_value
elif self.slow_calm_score < 0:
self.slow_calm_score = 0
self.slow_calm_score_buffer.update(np.reshape(self.slow_calm_score, (1, 1)))
# Send outputs at a reduced sampling rate
if self.smooth_eeg_buffer.pts%3==0 :
self._send_output_vec(smooth_eeg_samples, timestamp, 'muse/eeg')
if (self.blink > 0):
self._send_output(np.array([[1]]), timestamp, 'blink')
else:
self._send_output(np.array([[0]]), timestamp, 'blink')
self._send_output(blinkVal/300000,timestamp,'blinkVal')
self._send_output(remap_eye_mov_percent, timestamp, 'saccad')
self._send_output(np.reshape(self.slow_calm_score_buffer.extract().mean(), (1, 1)),timestamp, 'calm') # slow_calm_score
self._send_output(low_freq_chs / self.low_freq_chs_std + 0.5, timestamp, 'low_freq_chs')
# process and send output at every step. usually about every 1/10s
if self.eeg_buffer.pts > self.step:
self.eeg_buffer.pts = 0
# Get filtered EEG window
if config['lpfilter']:
window = self.smooth_eeg_buffer.extract(self.window_len)
else:
window = self.eeg_buffer.extract(self.window_len)
psd_raw_buffer = self.eeg_buffer.extract(self.window_len)
# Get average PSD
psd, f = ut.fft_continuous(psd_raw_buffer, n=int(self.fs), psd=True,
log='psd', fs=self.fs, window='hamming')
self.psd_buffer.update(np.expand_dims(psd, axis=0))
mean_psd = np.nanmean(self.psd_buffer.extract(), axis=0)
# find variance of eegWindow for Bad Signal detact
eegVar = np.nanvar(window,axis=0)
self._send_output_vec(eegVar.reshape(1,self.n_channels), timestamp, 'hsi')
if (self.sparseOutput!=None):
#send channel varience for signal quality indication at source Raspberry Pi
#send(Address('10.0.0.14','1234'), "/hsi", eegVar[0],eegVar[1],eegVar[2],eegVar[3])
self._send_sparseOutput_vec(eegVar.reshape(1,self.n_channels), timestamp, 'hsi')
# Get band powers and ratios
bandPowers, bandNames = ut.compute_band_powers(mean_psd, f, relative=False)
ratioPowers, ratioNames = ut.compute_band_ratios(bandPowers)
if (self.firstWindowProc):
self.band_powers = bandPowers
self.band_names = bandNames
self.ratio_powers = ratioPowers
self.ratio_names = ratioNames
self.scores = np.zeros((len(self.band_names), self.n_channels))
self.firstWindowProc = False
if (eegVar.mean() < 300 and self.blinkwait == 0 ): #threshold for good data
for i, (name, hist) in enumerate(self.hists.items()):
self.band_powers = bandPowers
self.ratio_powers = ratioPowers
#send good data indicator based on mean eegWindow variance and blinkwait
self._send_output(np.array([[1]]), timestamp, 'goodData') #good data
else:
self._send_output(np.array([[0]]), timestamp, 'goodData') #good data
self._send_outputs(self.band_powers, timestamp, 'bands')
self._send_outputs(self.ratio_powers, timestamp, 'ratios')
mask = ((f >= 30 ) & (f<50))
self.low_freq_chs_buffer.update(np.reshape(low_freq_chs, (1, -1)))
self.low_freq_chs_std = self.low_freq_chs_buffer.extract().std(axis=0)
emg_power = np.mean(mean_psd[mask, 0], axis=0) #HF power of right ear
self._send_output(np.array([np.sqrt(emg_power)/2]), timestamp, 'emg')
def _process_acc(self, samples, timestamp):
self._send_output_vec(samples,0,'muse/acc')
self.acc_buffer.update(samples)
window = self.acc_buffer.extract(self.acc_window_len)
timestamps = np.linspace(0,1/50*self.acc_window_len,self.acc_window_len)
new_fs= 250
timestamps_upsampled = np.arange(timestamps[0], timestamps[-1],1/new_fs)
f = interpolate.interp1d(timestamps, window, kind='cubic', axis=0,
fill_value=np.nan, assume_sorted=True)
window_upsampled = f(timestamps_upsampled)
for t in range(timestamps_upsampled.size-5,timestamps_upsampled.size):
if self.debug_outputs:
self._send_output(window_upsampled[t],0,'upsamp')
upsample = np.array(window_upsampled[t]).reshape(1,3)
filt_samples, self.filter0['zi'] = signal.lfilter(
self.filter0['b'], self.filter0['a'],
upsample, axis=0, zi=self.filter0['zi'])
self.filt0_buffer.update(filt_samples)
if self.debug_outputs:
self._send_outputs(filt_samples,0,'filter0')
filt_samples, self.filter1['zi'] = signal.lfilter(
self.filter1['b'], self.filter1['a'],
filt_samples, axis=0, zi=self.filter1['zi'])
if self.debug_outputs:
self._send_outputs(filt_samples,0,'filter1')
filt_samples = np.sqrt(np.sum(filt_samples ** 2, axis=1))
if self.debug_outputs:
self._send_output(filt_samples,0,'filter1L2')
heart_samples, self.filter2['zi'] = signal.lfilter(
self.filter2['b'], self.filter2['a'],
filt_samples, axis=0, zi=self.filter2['zi'])
if self.debug_outputs:
self._send_output(heart_samples,0,'filter2')
breathfilt_samples, self.filter3['zi'] = signal.lfilter(
self.filter3['b'], self.filter3['a'],
upsample, axis=0, zi=self.filter3['zi'])
if self.debug_outputs:
self._send_outputs(breathfilt_samples,0,'filter3')
self.heart_buffer.update(heart_samples.reshape(1,1))
heartbuf = self.heart_buffer.extract(150)
heartbufMin = heartbuf.min()
heartbufMax = heartbuf.max()
heart = | np.reshape((heartbuf[-1]-heartbufMin)/(heartbufMax-heartbufMin), (1, 1)) | numpy.reshape |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 11:36:51 2020
@author: cjburke
"""
import numpy as np
import matplotlib.pyplot as plt
import h5py
from astropy.wcs import WCS
from astropy.io import fits
import glob
import os
import argparse
try:
import pyds9 as pd
except ImportError:
print('Warning: No pyds9 installed. No debugging with image display available')
from statsmodels import robust
def idx_filter(idx, *array_list):
new_array_list = []
for array in array_list:
new_array_list.append(array[idx])
return new_array_list
def binmedian(xdata, ydata, nBins=30, xmin=None, xmax=None, showDetail=False):
if xmin == None:
xmin = xdata.min()
if xmax == None:
xmax = xdata.max()
xedges = np.linspace(xmin, xmax, nBins+1)
midx = xedges[:-1] + np.diff(xedges)/2.0
iargs = np.digitize(xdata, xedges)
medata = np.zeros_like(midx)
mndata = np.zeros_like(midx)
stddata = np.zeros_like(midx)
ndata = np.zeros_like(midx)
for i in np.arange(0,nBins):
iuse = np.where(iargs == i)[0]
medata[i] = np.median(ydata[iuse])
mndata[i] = np.mean(ydata[iuse])
stddata[i] = np.std(ydata[iuse])
ndata[i] = len(ydata[iuse])
if showDetail:
for i in np.arange(0,nBins):
errmn = stddata[i]/np.sqrt(ndata[i])
sigmn = mndata[i] / errmn
print('i: {0:d} med: {1:f} mn: {2:f} n: {3:f} errmn: {4:f} sigdif: {5:f} midx: {6:f}'.format(\
i, medata[i], mndata[i], ndata[i], errmn, sigmn, midx[i]))
return medata, midx, mndata, stddata, ndata
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--skipAll", action='store_false', \
help="Debug level; integer higher has more output")
args = parser.parse_args()
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/SpocS29/Cam1/Ccd1'
# refh5 = 'refout/refspocunidense_S29_11.h5'
# lookatwcs = 'tess2020240141914-s0029-1-1-0193-s_ffic.fits_wcs.fits'
# lookatimg = '/pdo/spoc-data/sector-029/ffis/tess2020240141914-s0029-1-1-0193-s_ffic.fits.gz'
# lookatwcs = 'tess2020240142914-s0029-1-1-0193-s_ffic.fits_wcs.fits'
# lookatimg = '/pdo/spoc-data/sector-029/ffis/tess2020240142914-s0029-1-1-0193-s_ffic.fits.gz'
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/SpocS29/Cam1/Ccd4'
# refh5 = 'refout/refspoc_S29_14.h5'
# lookatwcs = 'tess2020240001914-s0029-1-4-0193-s_ffic.fits_wcs.fits'
# lookatimg = '/pdo/spoc-data/sector-029/ffis/tess2020240001914-s0029-1-4-0193-s_ffic.fits.gz'
# lookatwcs = 'tess2020252001914-s0029-1-4-0193-s_ffic.fits_wcs.fits'
# lookatimg = '/pdo/spoc-data/sector-029/ffis/tess2020252001914-s0029-1-4-0193-s_ffic.fits.gz'
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/Orbit66/Cam2/Ccd1'
# refh5 = 'refout/ref_S29_Orbit66_21.h5'
# lookatwcs = 'tess2020268092527-00126867-2-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd1/FITS/tess2020268092527-00126867-2-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00126509-2-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd2/FITS/tess2020268092527-00126509-2-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00127706-2-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd1/FITS/tess2020268092527-00127706-2-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00126509-2-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd2/FITS/tess2020268092527-00126509-2-crm-ffi-ccd1.fits'
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/Orbit66/Cam2/Ccd2'
# refh5 = 'refout/ref_S29_Orbit66_22.h5'
# lookatwcs = 'tess2020268092527-00126867-2-crm-ffi-ccd2_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd2/FITS/tess2020268092527-00126867-2-crm-ffi-ccd2.fits'
# lookatwcs = 'tess2020268092527-00126509-2-crm-ffi-ccd2_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd2/FITS/tess2020268092527-00126509-2-crm-ffi-ccd2.fits'
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/Orbit66/Cam1/Ccd1'
# refh5 = 'refout/ref_S29_Orbit66_11.h5'
# lookatwcs = 'tess2020268092527-00126867-1-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam1/ccd1/FITS/tess2020268092527-00126867-1-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00126509-1-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam1/ccd1/FITS/tess2020268092527-00126509-1-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00127617-1-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam1/ccd1/FITS/tess2020268092527-00127617-1-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00127661-1-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam1/ccd1/FITS/tess2020268092527-00127661-1-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00127711-1-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam1/ccd1/FITS/tess2020268092527-00127711-1-crm-ffi-ccd1.fits'
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/Orbit66/Cam2/Ccd3'
# refh5 = 'refout/ref_S29_Orbit66_23.h5'
# lookatwcs = 'tess2020268092527-00126867-1-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam1/ccd1/FITS/tess2020268092527-00126867-1-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00126509-2-crm-ffi-ccd3_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd3/FITS/tess2020268092527-00126509-2-crm-ffi-ccd3.fits'
# lookatwcs = 'tess2020268092527-00126860-2-crm-ffi-ccd3_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd3/FITS/tess2020268092527-00126860-2-crm-ffi-ccd3.fits'
# lookatwcs = 'tess2020268092527-00126848-2-crm-ffi-ccd3_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd3/FITS/tess2020268092527-00126848-2-crm-ffi-ccd3.fits'
# lookatwcs = 'tess2020268092527-00126849-2-crm-ffi-ccd3_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam2/ccd3/FITS/tess2020268092527-00126849-2-crm-ffi-ccd3.fits'
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/Orbit66/Cam4/Ccd1'
# refh5 = 'refout/refstrict_S29_Orbit66_41.h5'
# lookatwcs = 'tess2020268092527-00126867-4-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam4/ccd1/FITS/tess2020268092527-00126867-4-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00126509-4-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam4/ccd1/FITS/tess2020268092527-00126509-4-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00127455-4-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam4/ccd1/FITS/tess2020268092527-00127455-4-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00127456-4-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam4/ccd1/FITS/tess2020268092527-00127456-4-crm-ffi-ccd1.fits'
wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/SpocS29/Cam4/Ccd1'
refh5 = 'refout/refspocunidense_S29_41.h5'
lookatwcs = 'tess2020259212913-s0029-4-1-0193-s_ffic.fits_wcs.fits'
lookatimg = '/pdo/spoc-data/sector-029/ffis/tess2020259212913-s0029-4-1-0193-s_ffic.fits.gz'
lookatwcs = 'tess2020259213913-s0029-4-1-0193-s_ffic.fits_wcs.fits'
lookatimg = '/pdo/spoc-data/sector-029/ffis/tess2020259213913-s0029-4-1-0193-s_ffic.fits.gz'
# wcsdir = '/pdo/users/cjburke/tica/wcs_build/fitsout/Orbit66/Cam4/Ccd3'
# refh5 = 'refout/ref_S29_Orbit66_43.h5'
# lookatwcs = 'tess2020268092527-00126867-1-crm-ffi-ccd1_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam1/ccd1/FITS/tess2020268092527-00126867-1-crm-ffi-ccd1.fits'
# lookatwcs = 'tess2020268092527-00126509-4-crm-ffi-ccd3_wcs.fits'
# lookatimg = '/pdo/qlp-data/orbit-66/ffi/cam4/ccd3/FITS/tess2020268092527-00126509-4-crm-ffi-ccd3.fits'
args.skipAll = True
if args.skipAll:
# Get the wcs fits list of images
fitsList = glob.glob(os.path.join(wcsdir,'*.fits'))
fitsList = np.sort(fitsList)
fitsNames = np.array([])
fitsTs = np.array([], np.float)
fitsBrghtStds = np.array([], dtype=np.float)
fitsFaintStds = np.array([], dtype=np.float)
fitsFracUses = np.array([], dtype=np.float)
fitsExStd0s = np.array([], dtype=np.float)
fitsExStd1s = np.array([], dtype=np.float)
fitsExStd2s = np.array([], dtype=np.float)
fitsExStd3s = np.array([], dtype=np.float)
for i, curName in enumerate(fitsList):
hdulist=fits.open(curName)
fitsNames = np.append(fitsNames, os.path.basename(curName))
fitsTs = np.append(fitsTs, hdulist[0].header['TIME'])
fitsBrghtStds = np.append(fitsBrghtStds, hdulist[0].header['FITSTDB'])
fitsFaintStds = np.append(fitsFaintStds, hdulist[0].header['FITSTDF'])
fitsFracUses = np.append(fitsFracUses, hdulist[0].header['WCSGDF'])
fitsExStd0s = np.append(fitsExStd0s, hdulist[0].header['FITSTDX0'])
fitsExStd1s = np.append(fitsExStd1s, hdulist[0].header['FITSTDX1'])
fitsExStd2s = np.append(fitsExStd2s, hdulist[0].header['FITSTDX2'])
fitsExStd3s = np.append(fitsExStd3s, hdulist[0].header['FITSTDX3'])
plt.plot(fitsTs, fitsFracUses, '.')
plt.show()
plt.plot(fitsTs, fitsBrghtStds, '.')
plt.show()
plt.plot(fitsTs, fitsFaintStds, '.')
plt.show()
plt.plot(fitsTs, fitsExStd0s, '.', label='0')
plt.plot(fitsTs, fitsExStd1s, '.', label='1')
plt.plot(fitsTs, fitsExStd2s, '.', label='2')
plt.plot(fitsTs, fitsExStd3s, '.', label='3')
plt.legend()
plt.show()
# Load the reference position information
fin = h5py.File(refh5, 'r')
tics = np.array(fin['tics'])
ras = np.array(fin['ras'])
decs = np.array(fin['decs'])
tmags = np.array(fin['tmags'])
blkidxs = np.array(fin['blkidxs'])
obscols = np.array(fin['obscols'])
obsrows = np.array(fin['obsrows'])
# sort by tic number
idx = np.argsort(tics)
tics, ras, decs, tmags, blkidxs, obscols, obsrows = idx_filter(idx, \
tics, ras, decs, tmags, blkidxs, obscols, obsrows)
# Load the wcs fits info for a particular image
hdulist = fits.open(os.path.join(wcsdir, lookatwcs))
hdr = hdulist[0].header
fitsTIC = hdulist[1].data['TIC']
fitsflxcols = hdulist[1].data['FLXCOL']
fitsflxrows = hdulist[1].data['FLXROW']
gdPRF = hdulist[1].data['FLXVALID']
# sort by tic number
idx = np.argsort(fitsTIC)
fitsTIC, fitsflxcols, fitsflxrows, gdPRF = idx_filter(idx, \
fitsTIC, fitsflxcols, fitsflxrows, gdPRF)
# Double check that the data rows are lined up by tic
checkPASS = True
if not len(fitsTIC) == len(tics):
checkPASS = False
if checkPASS:
sumd = np.sum(fitsTIC - tics)
if sumd != 0:
checkPASS = False
if not checkPASS:
print('Error: TIC arrays disagree between reference data and image')
exit()
idxgd = np.where(gdPRF == 1)[0]
allBad = False
if len(idxgd) == 0:
# revert to looking at stars used in bad regions
print('WARNING NO Good targets.')
idxgd = np.where(np.abs(gdPRF) == 1)[0]
allBad = True
#Read WCS and see how well it recovers the input ra and decs
# from their flux weighted centroid measurements
my_wcs = WCS(hdr)
pix_list = list(zip(fitsflxcols[idxgd], fitsflxrows[idxgd]))
pix_coords = np.array(pix_list, dtype=np.double)
pred_coords = my_wcs.all_pix2world(pix_coords, 1)
predRa = np.array([x[0] for x in pred_coords], dtype=np.double)
predDec = np.array([x[1] for x in pred_coords], dtype=np.double)
deg2Rad = np.pi/180.0
deltaRas = (predRa - ras[idxgd]) *3600.0 * np.cos(predDec*deg2Rad)
deltaDecs = (predDec - decs[idxgd]) *3600.0
deltaSeps = np.sqrt(deltaRas*deltaRas + deltaDecs*deltaDecs)
idxb = np.where(tmags[idxgd]<10.0)[0]
brightstd = np.std(deltaSeps[idxb])
idxf = np.where(tmags[idxgd]>10.0)[0]
faintstd = np.std(deltaSeps[idxf])
allstd = np.std(deltaSeps)
# print some stats of numbers of bright and faint targets valid
# over the sub regions
nCol = hdulist[0].header['CTRPCOL']
nRow = hdulist[0].header['CTRPROW']
nBlck = nCol*nRow
brightns = np.zeros((nBlck,), dtype=np.int32)
totns = np.zeros((nBlck,), dtype=np.int32)
brightstds = np.zeros((nBlck,), dtype=np.float)
faintstds = np.zeros((nBlck,), dtype=np.float)
tmptmags = tmags[idxgd]
for i in range(nBlck):
jRow = i//nCol
iCol = np.mod(i, nCol)
ia = np.where(blkidxs[idxgd] == i)[0]
if len(ia)>0:
totns[i] = len(tmptmags[ia])
idxt = np.where(tmptmags[ia]<10.0)[0]
if len(idxt)>0:
brightns[i] = len(idxt)
else:
brightns[i] = 0
else:
totns[i] = 0
print('iCol: {0:d} jRow: {1:d} nTot: {2:d} nBrght {3:d}'.format(\
iCol, jRow, totns[i], brightns[i]))
print('totN:{0:d} brightN:{1:d}'.format( | np.sum(totns) | numpy.sum |
import numpy as np
from colour.characterisation import polynomial_expansion_Finlayson2015, matrix_colour_correction_Finlayson2015
from .Node import Node
from typing import Optional, Any
from numpy.typing import NDArray
from camera_match.optimise import NodeOptimiser
class LinearMatrix(Node):
def __init__(self, matrix: Optional[NDArray[Any]] = None):
self.matrix = matrix
if self.matrix is None:
self.matrix = self.identity()
def solve(self, source: NDArray[Any], target: NDArray[Any]):
# Setting Matrix with Moore-Penrose solution for speed
self.matrix = matrix_colour_correction_Finlayson2015(source, target, degree=1)
optimiser = NodeOptimiser(self.apply_matrix, self.matrix)
self.matrix = optimiser.solve(source, target)
def apply(self, RGB: NDArray[Any]) -> NDArray[Any]:
return self.apply_matrix(RGB, self.matrix)
@staticmethod
def identity() -> NDArray[Any]:
return np.identity(3)
@staticmethod
def apply_matrix(RGB: NDArray[Any], matrix: NDArray[Any]) -> NDArray[Any]:
shape = RGB.shape
RGB = np.reshape(RGB, (-1, 3))
return np.reshape(np.transpose(np.dot(matrix, | np.transpose(RGB) | numpy.transpose |
import numpy as np
from algo_CROWN import CROWN
from threat_models.threat_lighten import get_first_layers as lighten_layers
from threat_models.threat_saturate import get_first_layers as saturate_layers
from threat_models.threat_hue import get_first_layers as hue_layers
from threat_models.threat_bandc import get_first_layers as bandc_layers
from utils.get_epsilon import get_eps_2 as get_eps
class Semantic(CROWN):
def __init__(self, model):
super().__init__(model)
@staticmethod
def get_layer_bound_implicit(W, b, UB_prev, LB_prev, is_first, x0, eps):
UB_new = np.empty_like(b)
LB_new = np.empty_like(b)
if is_first: # first layer
Ax0 = np.matmul(W, x0)
for j in range(W.shape[0]):
dualnorm_Aj = np.sum(np.abs(np.multiply(W[j], eps)), axis=1)
UB_new[j] = np.max(Ax0[j] + dualnorm_Aj) + b[j]
LB_new[j] = | np.min(Ax0[j] - dualnorm_Aj) | numpy.min |
import matplotlib.pyplot as plt
import h5py, argparse
import numpy as np
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import matplotlib.colors as colors
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as mpatches
from scipy.optimize import minimize
'''
该文件计算激光触发符合区域的事例对应的参量,以及TTS
'''
def fitGaus(tts,limits):
tts_select = tts[(tts<limits[1])&(tts>limits[0])]
result = minimize(likelihood,[1, np.mean(tts_select),np.std(tts_select)],args=(tts_select, tts_select.shape[0]), bounds=[(0,None),limits,(0,(limits[1]-limits[0])/2)])
return result, tts_select.shape[0]
def likelihood(x,*args):
A,mu,sigma = x
tts,N = args
return A*N-tts.shape[0]*np.log(A)+np.sum((tts-mu)**2)/2/sigma**2+tts.shape[0]*np.log(sigma)
psr = argparse.ArgumentParser()
psr.add_argument('-i', dest='ipt', help='input h5 file')
psr.add_argument('-o', dest='opt', help='output png file')
psr.add_argument('-c', dest='channel', nargs='+', default=[0,1],help='channel used in DAQ')
psr.add_argument('-t', dest='trigger', help='trigger h5 file')
args = psr.parse_args()
#plt.style.use('fivethirtyeight')
info = []
results = np.zeros(len(args.channel), dtype=[('peakC','<f4'), ('vallyC','<f4'),('PV','<f4'),('chargeMu','<f4'),('chargeSigma','<f4')])
with h5py.File(args.ipt, 'r') as ipt:
for j in range(len(args.channel)):
info.append(ipt['ch{}'.format(args.channel[j])][:])
with h5py.File(args.trigger, 'r') as ipt:
rinterval = ipt['rinterval'][:]
rangemin =-100
rangemax = 500
bins = rangemax-rangemin
# set the figure appearance
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
jet = plt.cm.jet
newcolors = jet(np.linspace(0, 1, 32768))
white = np.array([1, 1, 1, 0.5])
newcolors[0, :] = white
cmap = ListedColormap(newcolors)
print('begin plot')
pdf = PdfPages(args.opt+'.pdf')
# 下面循环绘制每个channel的图像
nearMax = 10
for j in range(len(args.channel)):
# charge分布
fig, ax = plt.subplots()
ax.set_title('charge distribution')
rangemin = int(np.min(info[j]['minPeakCharge'])-1)
rangemax = int(np.max(info[j]['minPeakCharge'])+1)
bins = rangemax-rangemin
h = ax.hist(info[j]['minPeakCharge'], histtype='step', bins=bins, range=[rangemin, rangemax], label='charge')
ax.set_xlabel('charge/mVns')
ax.set_ylabel('entries')
ax.legend()
ax.set_yscale('log')
ax.xaxis.set_minor_locator(MultipleLocator(100))
# plt.savefig('{}/{}charge.png'.format(args.opt,args.channel[j]))
pdf.savefig(fig)
ax.set_xlim([-5, 1000])
pdf.savefig(fig)
ax.set_yscale('linear')
if h[0].shape[0]>200:
ax.set_ylim([0, 2*np.max(h[0][70:150])])
pi = h[1][70:150][np.argmax(h[0][70:150])]
vi = h[1][15:70][np.argmin(h[0][15:70])]
pv = np.max(h[0][70:150])
vv = | np.min(h[0][10:80]) | numpy.min |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from einsumt import einsumt
class TestEinsumt(object):
def test_summation_reduction(self):
a = np.random.rand(100, 10, 10)
b = np.random.rand(10, 10)
subs = 'aij,ji->ij'
assert np.allclose(np.einsum(subs, a, b), einsumt(subs, a, b, idx='a'))
def test_concatenation_reduction(self):
a = np.random.rand(100, 10, 10)
b = np.random.rand(10, 10)
subs = 'aij,ji->aij'
assert np.allclose(np.einsum(subs, a, b), einsumt(subs, a, b, idx='a'))
def test_custom_index(self):
a = np.random.rand(100, 10, 10)
b = np.random.rand(10, 10)
subs = 'aij,ji->ij'
res0 = np.einsum(subs, a, b)
res1 = einsumt(subs, a, b, idx='a')
res2 = einsumt(subs, a, b, idx='i')
res3 = einsumt(subs, a, b, idx='j')
assert np.allclose(res0, res1)
assert np.allclose(res0, res2)
assert np.allclose(res0, res3)
def test_automatic_index1(self):
a = np.random.rand(100, 10, 10)
b = np.random.rand(10, 10)
subs = 'aij,ji->ij'
res0 = np.einsum(subs, a, b)
res1 = einsumt(subs, a, b)
assert np.allclose(res0, res1)
def test_automatic_index2(self):
a = np.random.rand(10, 10, 100)
b = np.random.rand(10, 10)
subs = 'ija,ji->ij'
res0 = np.einsum(subs, a, b)
res1 = einsumt(subs, a, b)
assert np.allclose(res0, res1)
def test_ellipsis1(self):
a = np.random.rand(10, 10, 100)
b = np.random.rand(10, 10)
subs = 'ij...,ji->...ij'
res0 = np.einsum(subs, a, b)
res1 = einsumt(subs, a, b)
assert np.allclose(res0, res1)
def test_ellipsis2(self):
a = np.random.rand(10, 10, 100)
b = np.random.rand(10, 10)
subs = 'ij...,ji->...ij'
res0 = np.einsum(subs, a, b)
res1 = einsumt(subs, a, b, idx='j')
assert np.allclose(res0, res1)
def test_small_array(self):
from multiprocessing import cpu_count
n = cpu_count() - 1
if n > 0:
a = np.random.rand(n, 10, 10)
b = np.random.rand(10, 10)
subs = 'aij,ji->ij'
res0 = np.einsum(subs, a, b)
res1 = einsumt(subs, a, b, idx='a')
assert np.allclose(res0, res1)
else:
pytest.skip("not enough CPUs for this test")
def test_custom_pool(self):
from multiprocessing.pool import ThreadPool
a = np.random.rand(100, 10, 10)
b = np.random.rand(10, 10)
subs = 'aij,ji->ij'
res0 = np.einsum(subs, a, b)
res1 = einsumt(subs, a, b, pool=ThreadPool())
res2 = einsumt(subs, a, b, pool=5)
assert np.allclose(res0, res1)
assert np.allclose(res0, res2)
def test_muti_operand(self):
e = np.random.rand(50, 10)
f = np.random.rand(50, 10, 10)
g = | np.random.rand(50, 10, 20) | numpy.random.rand |
import numpy as np
import matplotlib.pyplot as plt
import time
import skcuda.linalg as linalg
import aspire.em_classavg.data_utils as data_utils
from aspire.em_classavg.image_denoising.image_denoising.ConverterModel.Converter import Converter
class EM:
def __init__(self, images, trunc_param=10, beta=0.5, ang_jump=1,
max_shift=5, shift_jump=1, n_scales=10, is_remove_outliers=True, outliers_precent_removal=5):
self.trunc_param = trunc_param
self.beta = beta
self.ang_jump = ang_jump
self.is_remove_outliers = is_remove_outliers
self.outliers_precent_removal = outliers_precent_removal
self.em_params = dict()
self.em_params['n_scales'] = n_scales
self.em_params['max_shift'] = max_shift
self.em_params['shift_jump'] = shift_jump
self.em_params['thetas'] = np.arange(1, 361, self.ang_jump)
self.em_params['shifts'] = np.arange(-1 * self.em_params['max_shift'],
self.em_params['max_shift'] + 1, self.em_params['shift_jump'])
self.im_size = np.shape(images)[-1]
if np.ndim(images) == 3:
self.n_images = len(images)
else:
self.n_images = 1
images, self.mean_bg_ims, self.sd_bg_ims = data_utils.normalize_background(images)
snr_est = EM.est_snr(images)
est_scale = np.sqrt(snr_est * np.mean(self.sd_bg_ims) ** 2)
self.em_params['scales'] = np.linspace(0.8 * est_scale, 1.2 * est_scale, self.em_params['n_scales'])
self.converter = Converter(self.im_size, self.trunc_param, self.beta)
self.converter.init_direct('full')
self.c_ims = self.converter.direct_forward(images)
self.const_terms = self.pre_compute_const_terms()
self.phases = np.exp(-1j * 2 * np.pi / 360 *
np.outer(self.em_params['thetas'], self.converter.get_angular_frequency()))
# the expansion coefficients of each image for each possible rotation
self.c_ims_rot = self.c_ims[:, np.newaxis, :] * self.phases[np.newaxis, :]
def e_step(self, c_avg):
print('e-step')
n_scales = len(self.em_params['scales'])
n_rots = len(self.em_params['thetas'])
n_shifts_2d = len(self.em_params['shifts'])**2
n_shifts_1d = len(self.em_params['shifts'])
posteriors = np.zeros((self.n_images, n_shifts_2d, n_scales, n_rots))
# posteriors = np.zeros((self.n_images, n_scales, n_rots, n_shifts_2d))
# compute the terms that do not depend on the shifts
ann_const = (np.linalg.norm(c_avg) * np.outer(1 / self.sd_bg_ims, self.em_params['scales']))**2
cross_cnn_ann = np.outer(self.mean_bg_ims / (self.sd_bg_ims**2), self.em_params['scales']) * \
2 * np.real(np.vdot(c_avg, self.const_terms['c_all_ones_im']))
ann_const_cross_cnn_anns = ann_const + cross_cnn_ann
const_elms = ann_const_cross_cnn_anns + (self.const_terms['anni'] + self.const_terms['cnn'])[:, np.newaxis]
for shift_x in self.em_params['shifts']:
for shift_y in self.em_params['shifts']:
if shift_y < shift_x:
continue
A_shift = self.calc_A_shift(shift_x, shift_y)
tmp1_shift = np.conj(self.const_terms['c_all_ones_im']).dot(A_shift)
tmp2_shift = | np.conj(c_avg) | numpy.conj |
"""
Unit tests for crystal class
"""
__author__ = '<NAME>'
import unittest
import numpy as np
import yaml
import onsager.crystal as crystal
class UnitCellTests(unittest.TestCase):
"""Tests to make sure incell and halfcell work as expected."""
def testincell(self):
"""In cell testing"""
a = np.array([4. / 3., -2. / 3., 19. / 9.])
b = np.array([1. / 3., 1. / 3., 1. / 9.])
self.assertTrue(np.allclose(crystal.incell(a), b))
def testhalfcell(self):
"""Half cell testing"""
a = np.array([4. / 3., -2. / 3., 17. / 9.])
b = np.array([1. / 3., 1. / 3., -1. / 9.])
self.assertTrue(np.allclose(crystal.inhalf(a), b))
class GroupOperationTests(unittest.TestCase):
"""Tests for our group operations."""
def setUp(self):
self.rot = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
self.trans = np.zeros(3)
self.cartrot = np.array([[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
self.indexmap = ((0,),)
self.mirrorop = crystal.GroupOp(self.rot, self.trans, self.cartrot, self.indexmap)
self.ident = crystal.GroupOp(np.eye(3, dtype=int), np.zeros(3), np.eye(3), ((0,),))
def testEquality(self):
"""Can we check if two group operations are equal?"""
self.assertNotEqual(self.mirrorop, self.rot)
self.assertEqual(self.mirrorop.incell(), self.mirrorop)
# self.assertEqual(self.mirrorop.__hash__(), (self.mirrorop + np.array([1,0,0])).__hash__())
def testAddition(self):
"""Can we add a vector to our group operation and get a new one?"""
with self.assertRaises(TypeError):
self.mirrorop + 0
v1 = np.array([1, 0, 0])
newop = self.mirrorop + v1
mirroroptrans = crystal.GroupOp(self.rot, self.trans + v1, self.cartrot, self.indexmap)
self.assertEqual(newop, mirroroptrans)
self.assertTrue(np.allclose((self.ident - v1).trans, -v1))
def testMultiplication(self):
"""Does group operation multiplication work correctly?"""
self.assertEqual(self.mirrorop * self.mirrorop, self.ident)
v1 = np.array([1, 0, 0])
trans = self.ident + v1
self.assertEqual(trans * trans, self.ident + 2 * v1)
rot3 = crystal.GroupOp(np.eye(3, dtype=int), np.zeros(3), np.eye(3), ((1, 2, 0),))
ident3 = crystal.GroupOp(np.eye(3, dtype=int), np.zeros(3), np.eye(3), ((0, 1, 2),))
self.assertEqual(rot3 * rot3 * rot3, ident3)
def testInversion(self):
"""Is the product with the inverse equal to identity?"""
self.assertEqual(self.ident.inv, self.ident.inv)
self.assertEqual(self.mirrorop * (self.mirrorop.inv()), self.ident)
v1 = np.array([1, 0, 0])
trans = self.ident + v1
self.assertEqual(trans.inv(), self.ident - v1)
inversion = crystal.GroupOp(-np.eye(3, dtype=int), np.zeros(3), -np.eye(3), ((0,),))
self.assertEqual(inversion.inv(), inversion)
invtrans = inversion + v1
self.assertEqual(invtrans.inv(), invtrans)
def testHash(self):
"""Can we construct a frozenset? --requires __hash__"""
fr = frozenset([self.ident, self.mirrorop])
self.assertTrue(len(fr), 2)
def testGroupAnalysis(self):
"""If we determine the eigenvalues / vectors of a group operation, are they what we expect?"""
# This is entirely dictated by the cartrot part of a GroupOp, so we will only look at that
# identity
# rotation type: 1 = identity; 2..6 : 2- .. 6- fold rotation; negation includes a
# perpendicular mirror
# therefore: a single mirror is -1, and inversion is -2 (since 2-fold rotation + mirror = i)
rot = np.eye(3, dtype=int)
cartrot = np.eye(3)
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertEqual(rottype, 1) # should be the identity
self.assertTrue(np.allclose(eigenvect, np.eye(3)))
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 3) # should be a sphere
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 6) # should be 6 unique symmetric tensors
for t in tensorbasis:
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
# inversion
rot = -np.eye(3, dtype=int)
cartrot = -np.eye(3)
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertEqual(rottype, -2) # should be the identity
self.assertTrue(np.allclose(eigenvect, np.eye(3)))
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 0) # should be a point
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 6) # should be 6 unique symmetric tensors
for t in tensorbasis:
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
self.assertAlmostEqual(np.dot(t.flatten(), t.flatten()), 1)
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
# mirror through the y=x line: (x,y) -> (y,x)
rot = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
cartrot = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., 1.]])
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertEqual(rottype, -1)
self.assertTrue(np.isclose(abs(np.dot(eigenvect[0],
np.array([1 / np.sqrt(2), -1 / np.sqrt(2), 0]))), 1))
self.assertTrue(np.allclose(-eigenvect[0], np.dot(rot, eigenvect[0]))) # inverts
self.assertTrue(np.allclose(eigenvect[1], np.dot(rot, eigenvect[1]))) # leaves unchanged
self.assertTrue(np.allclose(eigenvect[2], np.dot(rot, eigenvect[2]))) # leaves unchanged
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 2) # should be a plane
self.assertTrue(np.allclose(basis[1], eigenvect[0]))
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 4) # should be 4 unique symmetric tensors
for t in tensorbasis:
# check symmetry, and remaining unchanged with operations
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
rott = np.dot(rot, np.dot(t, rot.T))
self.assertTrue(np.allclose(t, rott),
msg="\n{}\nis not unchanged with\n{}\n{}".format(t, rot, rott))
self.assertAlmostEqual(np.dot(t.flatten(), t.flatten()), 1)
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
# three-fold rotation around the body-center
rot = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]])
cartrot = np.array([[0., 1., 0.], [0., 0., 1.], [1., 0., 0.]])
rottype, eigenvect = (crystal.GroupOp(rot, self.trans, cartrot, self.indexmap)).eigen()
self.assertEqual(rottype, 3)
self.assertTrue(np.isclose(np.linalg.det(eigenvect), 1))
self.assertTrue(np.isclose(abs(np.dot(eigenvect[0],
np.array([1 / np.sqrt(3), 1 / np.sqrt(3), 1 / np.sqrt(3)]))), 1))
self.assertTrue(np.allclose(eigenvect[0], np.dot(rot, eigenvect[0]))) # our rotation axis
basis = crystal.VectorBasis(rottype, eigenvect)
self.assertEqual(basis[0], 1) # should be a line
self.assertTrue(np.allclose(basis[1], eigenvect[0]))
tensorbasis = crystal.SymmTensorBasis(rottype, eigenvect) # at some point in the future, generalize
self.assertEqual(len(tensorbasis), 2) # should be 2 unique symmetric tensors
for t in tensorbasis:
# check symmetry, and remaining unchanged with operations
self.assertTrue(np.all(t == t.T), msg="{} is not symmetric".format(t))
rott = np.dot(rot, np.dot(t, rot.T))
self.assertTrue(np.allclose(t, rott),
msg="\n{}\nis not unchanged with\n{}\n{}".format(t, rot, rott))
self.assertAlmostEqual(np.dot(t.flatten(), t.flatten()), 1)
for t2 in tensorbasis:
if np.any(t2 != t):
self.assertAlmostEqual(np.dot(t.flatten(), t2.flatten()), 0)
def testCombineVectorBasis(self):
"""Test our ability to combine a few vector basis choices"""
# these are all (d, vect) tuples that we work with
sphere = (3, np.zeros(3))
point = (0, np.zeros(3))
plane1 = (2, np.array([0., 0., 1.]))
plane2 = (2, np.array([1., 1., 1.]) / np.sqrt(3))
line1 = (1, np.array([1., 0., 0.]))
line2 = (1, np.array([0., 1., 0.]))
line3 = (1, np.array([1., -1., 0.]) / np.sqrt(2))
for t in [sphere, point, plane1, plane2, line1, line2, line3]:
self.assertEqual(crystal.CombineVectorBasis(t, t)[0], t[0])
res = crystal.CombineVectorBasis(line1, plane1)
self.assertEqual(res[0], 1) # should be a line
self.assertTrue(np.isclose(abs(np.dot(res[1], line1[1])), 1))
res = crystal.CombineVectorBasis(plane1, plane2)
self.assertEqual(res[0], 1) # should be a line
self.assertTrue(np.isclose(abs(np.dot(res[1], line3[1])), 1))
res = crystal.CombineVectorBasis(plane1, line1)
self.assertEqual(res[0], 1) # should be a line
self.assertTrue(np.isclose(abs(np.dot(res[1], line1[1])), 1))
res = crystal.CombineVectorBasis(plane2, line1)
self.assertEqual(res[0], 0) # should be a point
res = crystal.CombineVectorBasis(line1, line2)
self.assertEqual(res[0], 0) # should be a point
def testCombineTensorBasis(self):
"""Test the intersection of tensor bases"""
fullbasis = crystal.SymmTensorBasis(1, np.eye(3)) # full basis (identity)
yzbasis = crystal.SymmTensorBasis(-1, np.eye(3)) # mirror through the x axis
xzbasis = crystal.SymmTensorBasis(-1, [np.array([0., 1., 0.]), np.array([0., 0., 1.]), np.array([1., 0., 0.])])
rotbasis = crystal.SymmTensorBasis(3, np.eye(3)) # 120 deg rot through the x axis
rotbasis2 = crystal.SymmTensorBasis(3, [np.array([0., 0., 1.]), np.array([1., 0., 0.]), np.array([0., 1., 0.])])
for b in [fullbasis, yzbasis, xzbasis, rotbasis, rotbasis2]:
combbasis = crystal.CombineTensorBasis(fullbasis, b)
self.assertEqual(len(b), len(combbasis))
combbasis = crystal.CombineTensorBasis(b, fullbasis)
self.assertEqual(len(b), len(combbasis))
combbasis = crystal.CombineTensorBasis(yzbasis, rotbasis)
self.assertEqual(len(combbasis), len(crystal.CombineTensorBasis(rotbasis, yzbasis)))
self.assertEqual(len(combbasis), len(rotbasis)) # should be two left here
combbasis = crystal.CombineTensorBasis(rotbasis, rotbasis2)
self.assertEqual(len(combbasis), 1) # if there's only one, it has to be 1/sqrt(3).
self.assertAlmostEqual(1, abs(np.dot(combbasis[0].flatten(), np.eye(3).flatten() / np.sqrt(3))))
combbasis = crystal.CombineTensorBasis(yzbasis, xzbasis)
self.assertEqual(len(combbasis), 3)
class CrystalClassTests(unittest.TestCase):
"""Tests for the crystal class and symmetry analysis."""
def setUp(self):
self.a0 = 2.5
self.c_a = np.sqrt(8. / 3.)
self.sclatt = self.a0 * np.eye(3)
self.fcclatt = self.a0 * np.array([[0, 0.5, 0.5],
[0.5, 0, 0.5],
[0.5, 0.5, 0]])
self.bcclatt = self.a0 * np.array([[-0.5, 0.5, 0.5],
[0.5, -0.5, 0.5],
[0.5, 0.5, -0.5]])
self.hexlatt = self.a0 * np.array([[0.5, 0.5, 0],
[-np.sqrt(0.75), np.sqrt(0.75), 0],
[0, 0, self.c_a]])
self.basis = [np.array([0., 0., 0.])]
self.squarelatt = self.a0 * np.eye(2) # two-dimensional crystal
self.basis2d = [np.zeros(2)]
def isscMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, a0 ** 3)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 4 == 0:
# diagonal element
self.assertAlmostEqual(a2, a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, 0)
def isfccMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, 0.25 * a0 ** 3)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 4 == 0:
# diagonal element
self.assertAlmostEqual(a2, 0.5 * a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, 0.25 * a0 ** 2)
def isbccMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, 0.5 * a0 ** 3)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 4 == 0:
# diagonal element
self.assertAlmostEqual(a2, 0.75 * a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, -0.25 * a0 ** 2)
def ishexMetric(self, crys, a0=0, c_a=0):
if a0 == 0: a0 = self.a0
if c_a == 0: c_a = self.c_a
self.assertAlmostEqual(crys.volume, np.sqrt(0.75) * c_a * a0 ** 3)
self.assertAlmostEqual(crys.metric[0, 0], a0 ** 2)
self.assertAlmostEqual(crys.metric[1, 1], a0 ** 2)
self.assertAlmostEqual(crys.metric[0, 1], -0.5 * a0 ** 2)
self.assertAlmostEqual(crys.metric[2, 2], (c_a * a0) ** 2)
self.assertAlmostEqual(crys.metric[0, 2], 0)
self.assertAlmostEqual(crys.metric[1, 2], 0)
def issquareMetric(self, crys, a0=0):
if a0 == 0: a0 = self.a0
self.assertAlmostEqual(crys.volume, a0 ** 2)
for i, a2 in enumerate(crys.metric.flatten()):
if i % 3 == 0:
# diagonal element
self.assertAlmostEqual(a2, a0 ** 2)
else:
# off-diagonal element
self.assertAlmostEqual(a2, 0)
def isspacegroup(self, crys):
"""Check that the space group obeys all group definitions: not fast."""
# 1. Contains the identity: O(group size)
identity = None
dim = crys.dim
for g in crys.G:
if np.all(g.rot == np.eye(dim, dtype=int)):
identity = g
self.assertTrue(np.allclose(g.trans, 0),
msg="Identity has bad translation: {}".format(g.trans))
for atommap in g.indexmap:
for i, j in enumerate(atommap):
self.assertTrue(i == j,
msg="Identity has bad indexmap: {}".format(g.indexmap))
self.assertTrue(identity is not None, msg="Missing identity")
# 2. Check for inverses: O(group size^2)
for g in crys.G:
inverse = g.inv().inhalf()
self.assertIn(inverse, crys.G,
msg="Missing inverse op:\n{}\n{}|{}^-1 =\n{}\n{}|{}".format(
g.rot, g.cartrot, g.trans,
inverse.rot, inverse.cartrot, inverse.trans))
# 3. Closed under multiplication: g.g': O(group size^3)
for g in crys.G:
for gp in crys.G:
product = (g * gp).inhalf()
self.assertIn(product, crys.G,
msg="Missing product op:\n{}\n{}|{} *\n{}\n{}|{} = \n{}\n{}|{}".format(
g.rot, g.cartrot, g.trans,
gp.rot, gp.cartrot, gp.trans,
product.rot, product.cartrot, product.trans))
def testscMetric(self):
"""Does the simple cubic lattice have the right volume and metric?"""
crys = crystal.Crystal(self.sclatt, self.basis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testfccMetric(self):
"""Does the face-centered cubic lattice have the right volume and metric?"""
crys = crystal.Crystal(self.fcclatt, self.basis)
self.isfccMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testbccMetric(self):
"""Does the body-centered cubic lattice have the right volume and metric?"""
crys = crystal.Crystal(self.bcclatt, self.basis)
self.isbccMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testsquareMetric(self):
"""Does the square lattice have the right volume and metric?"""
crys = crystal.Crystal(self.squarelatt, self.basis2d)
self.issquareMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testscReduce(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 1]], dtype=int)
doublebasis = [self.basis[0], np.array([0.5, 0, 0]) + self.basis[0],
np.array([0, 0.5, 0]) + self.basis[0], np.array([0.5, 0.5, 0]) + self.basis[0]]
crys = crystal.Crystal(np.dot(self.sclatt, nsuper), doublebasis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testscReduce2(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[5, -3, 0], [1, -1, 3], [-2, 1, 1]], dtype=int)
crys = crystal.Crystal(np.dot(self.sclatt, nsuper), self.basis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testbccReduce2(self):
"""If we start with a supercell, does it get reduced back to our start?"""
basis = [[np.array([0., 0., 0.]), np.array([0.5, 0.5, 0.5])]]
crys = crystal.Crystal(self.sclatt, basis)
self.isbccMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testscShift(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[5, -3, 0], [1, -1, 3], [-2, 1, 1]], dtype=int)
basis = [np.array([0.33, -0.25, 0.45])]
crys = crystal.Crystal(np.dot(self.sclatt, nsuper), basis)
self.isscMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
self.assertTrue(np.allclose(crys.basis[0][0], np.array([0, 0, 0])))
def testsquareReduce(self):
"""If we start with a supercell, does it get reduced back to our start?"""
nsuper = np.array([[2, 0], [0, 2]], dtype=int)
doublebasis = [self.basis2d[0], np.array([0.5, 0]) + self.basis2d[0],
np.array([0, 0.5]) + self.basis2d[0], np.array([0.5, 0.5]) + self.basis2d[0]]
crys = crystal.Crystal(np.dot(self.squarelatt, nsuper), doublebasis)
self.issquareMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 1) # one atom in the unit cell
def testhcp(self):
"""If we start with a supercell, does it get reduced back to our start?"""
basis = [np.array([0, 0, 0]), np.array([1. / 3., 2. / 3., 1. / 2.])]
crys = crystal.Crystal(self.hexlatt, basis)
self.ishexMetric(crys)
self.assertEqual(len(crys.basis), 1) # one chemistry
self.assertEqual(len(crys.basis[0]), 2) # two atoms in the unit cell
# there needs to be [1/3,2/3,1/4] or [1/3,2/3,3/4], and then the opposite
# it's a little clunky; there's probably a better way to test this:
if np.any([np.allclose(u, np.array([1. / 3., 2. / 3., 0.25]))
for atomlist in crys.basis for u in atomlist]):
self.assertTrue(np.any([np.allclose(u, np.array([2. / 3., 1. / 3., 0.75]))
for atomlist in crys.basis for u in atomlist]))
elif np.any([np.allclose(u, np.array([1. / 3., 2. / 3., 0.75]))
for atomlist in crys.basis for u in atomlist]):
self.assertTrue(np.any([np.allclose(u, np.array([2. / 3., 1. / 3., 0.25]))
for atomlist in crys.basis for u in atomlist]))
else:
self.assertTrue(False, msg="HCP basis not correct")
self.assertEqual(len(crys.G), 24)
# for g in crys.G:
# print g.rot
# print g.cartrot, g.trans, g.indexmap
self.isspacegroup(crys)
self.assertEqual(len(crys.pointG[0][0]), 12)
self.assertEqual(len(crys.pointG[0][1]), 12)
def testLaGaO3(self):
"""Can we properly reduce down an LaGaO3 structure with errors in positions?"""
# this uses "real" DFT relaxation data to test the reduction capabilities
LaGa03latt = [np.array([ 7.88040734e+00, 5.87657472e-05, -1.95441808e-02]),
np.array([ -7.59206882e-05, 7.87786508e+00, 8.28811636e-05]),
np.array([ -1.95315626e-02, -5.74109318e-05, 7.88041614e+00])]
LaGaO3basis = [[np.array([ 2.02290790e-02, 2.32539034e-04, 9.91922251e-01]),
np.array([ 1.26313454e-02, 2.30601523e-04, 4.84327798e-01]),
np.array([ 0.97941805, 0.50023385, 0.01754055]),
np.array([ 0.98701667, 0.50023207, 0.52514002]),
np.array([ 5.12632654e-01, 2.30909936e-04, 9.84337122e-01]),
np.array([ 5.20224990e-01, 2.32577464e-04, 4.91932968e-01]),
np.array([ 0.48701525, 0.50023187, 0.02514135]),
np.array([ 0.47942077, 0.5002339 , 0.51754884])],
[np.array([ 0.24982273, 0.25023308, 0.25473045]),
np.array([ 0.24982282, 0.25023333, 0.75473148]),
np.array([ 0.249823 , 0.75023368, 0.25472946]),
np.array([ 0.24982247, 0.75023396, 0.75473027]),
np.array([ 0.74982257, 0.2502339 , 0.25473326]),
np.array([ 0.74982307, 0.25023197, 0.75473186]),
np.array([ 0.74982204, 0.75023295, 0.25473187]),
np.array([ 0.74982322, 0.75023469, 0.75473098])],
[np.array([ 0.28414742, 0.20916336, 0.00430709]),
np.array([ 0.0002463 , 0.20916015, 0.22041692]),
np.array([ 2.80317156e-01, 2.28151610e-04, 3.00655890e-01]),
np.array([ 0.21550181, 0.29129973, 0.50516544]),
np.array([ 0.99940227, 0.29128777, 0.78906602]),
np.array([ 2.03918412e-01, 2.36510236e-04, 7.24241274e-01]),
np.array([ 0.2841317 , 0.791303 , 0.00431445]),
np.array([ 2.54313708e-04, 7.91306290e-01, 2.20429168e-01]),
np.array([ 0.21933007, 0.50023581, 0.2088184 ]),
np.array([ 0.21551645, 0.70916116, 0.50515561]),
np.array([ 0.99939381, 0.7091728 , 0.78904879]),
np.array([ 0.29572872, 0.50022831, 0.78523308]),
np.array([ 0.71550064, 0.29129386, 0.00516782]),
np.array([ 0.4994013 , 0.29130198, 0.28906235]),
np.array([ 7.03903980e-01, 2.36323588e-04, 2.24257240e-01]),
np.array([ 0.78414767, 0.20916926, 0.50430849]),
np.array([ 0.50024549, 0.20917445, 0.72041481]),
np.array([ 7.80305988e-01, 2.27988377e-04, 8.00654063e-01]),
np.array([ 0.71551543, 0.7091663 , 0.0051578 ]),
np.array([ 0.49939281, 0.70915813, 0.28904503]),
np.array([ 0.79574297, 0.50022792, 0.28522595]),
np.array([ 0.78413198, 0.79129631, 0.50431609]),
np.array([ 0.50025359, 0.79129237, 0.72042732]),
np.array([ 0.71934128, 0.50023592, 0.70882833])]]
LaGaO3strict = crystal.Crystal(LaGa03latt, LaGaO3basis, ['La', 'Ga', 'O'],
threshold=1e-8)
LaGaO3toler = crystal.Crystal(LaGa03latt, LaGaO3basis, ['La', 'Ga', 'O'],
threshold=2e-5)
self.assertEqual(len(LaGaO3strict.G), 1)
self.assertEqual(len(LaGaO3toler.G), 2)
self.assertEqual([len(ulist) for ulist in LaGaO3strict.basis],
[len(ulist) for ulist in LaGaO3basis])
self.assertEqual([2*len(ulist) for ulist in LaGaO3toler.basis],
[len(ulist) for ulist in LaGaO3basis])
self.assertAlmostEqual(LaGaO3strict.volume, 2*LaGaO3toler.volume)
def testscgroupops(self):
"""Do we have 48 space group operations?"""
crys = crystal.Crystal(self.sclatt, self.basis)
self.assertEqual(len(crys.G), 48)
self.isspacegroup(crys)
# for g in crys.G:
# print g.rot, g.trans, g.indexmap
# print g.cartrot, g.carttrans
def testfccpointgroup(self):
"""Test out that we generate point groups correctly"""
crys = crystal.Crystal(self.fcclatt, self.basis)
for g in crys.G:
self.assertIn(g, crys.pointG[0][0])
def testsquaregroupops(self):
"""Do we have 8 space group operations?"""
crys = crystal.Crystal(self.squarelatt, self.basis2d)
self.assertEqual(len(crys.G), 8)
self.isspacegroup(crys)
# for g in crys.G:
# print g.rot, g.trans, g.indexmap
# print g.cartrot, g.carttrans
def testomegagroupops(self):
"""Build the omega lattice; make sure the space group is correct"""
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
self.assertEqual(crys.N, 3)
self.assertEqual(crys.atomindices, [(0, 0), (0, 1), (0, 2)])
self.assertEqual(len(crys.G), 24)
self.isspacegroup(crys)
def testcartesianposition(self):
"""Do we correctly map out our atom position (lattice vector + indices) in cartesian coord.?"""
crys = crystal.Crystal(self.fcclatt, self.basis)
lattvect = np.array([2, -1, 3])
for ind in crys.atomindices:
b = crys.basis[ind[0]][ind[1]]
pos = crys.lattice[:, 0] * (lattvect[0] + b[0]) + \
crys.lattice[:, 1] * (lattvect[1] + b[1]) + \
crys.lattice[:, 2] * (lattvect[2] + b[2])
self.assertTrue(np.allclose(pos, crys.pos2cart(lattvect, ind)))
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
for ind in crys.atomindices:
b = crys.basis[ind[0]][ind[1]]
pos = crys.lattice[:, 0] * (lattvect[0] + b[0]) + \
crys.lattice[:, 1] * (lattvect[1] + b[1]) + \
crys.lattice[:, 2] * (lattvect[2] + b[2])
self.assertTrue(np.allclose(pos, crys.pos2cart(lattvect, ind)))
def testmaptrans(self):
"""Does our map translation operate correctly?"""
basis = [[np.array([0, 0, 0])]]
trans, indexmap = crystal.maptranslation(basis, basis)
self.assertTrue(np.allclose(trans, np.array([0, 0, 0])))
self.assertEqual(indexmap, ((0,),))
oldbasis = [[np.array([0.2, 0, 0])]]
newbasis = [[np.array([-0.2, 0, 0])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertTrue(np.allclose(trans, np.array([0.4, 0, 0])))
self.assertEqual(indexmap, ((0,),))
oldbasis = [[np.array([0., 0., 0.]), np.array([1. / 3., 2. / 3., 1. / 2.])]]
newbasis = [[np.array([0., 0., 0.]), np.array([-1. / 3., -2. / 3., -1. / 2.])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertTrue(np.allclose(trans, np.array([1. / 3., -1. / 3., -1. / 2.])))
self.assertEqual(indexmap, ((1, 0),))
oldbasis = [[np.array([0., 0., 0.])],
[np.array([1. / 3., 2. / 3., 1. / 2.]), np.array([2. / 3., 1. / 3., 1. / 2.])]]
newbasis = [[np.array([0., 0., 0.])],
[np.array([2. / 3., 1. / 3., 1. / 2.]), np.array([1. / 3., 2. / 3., 1. / 2.])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertTrue(np.allclose(trans, np.array([0., 0., 0.])))
self.assertEqual(indexmap, ((0,), (1, 0)))
oldbasis = [[np.array([0., 0., 0.]), np.array([1. / 3., 2. / 3., 1. / 2.])]]
newbasis = [[np.array([0., 0., 0.]), np.array([-1. / 4., -1. / 2., -1. / 2.])]]
trans, indexmap = crystal.maptranslation(oldbasis, newbasis)
self.assertEqual(indexmap, None)
def testfccgroupops_directions(self):
"""Test out that we can apply group operations to directions"""
crys = crystal.Crystal(self.fcclatt, self.basis)
# 1. direction
direc = np.array([2., 0., 0.])
direc2 = np.dot(direc, direc)
count = np.zeros(3, dtype=int)
for g in crys.G:
rotdirec = crys.g_direc(g, direc)
self.assertAlmostEqual(np.dot(rotdirec, rotdirec), direc2)
costheta = np.dot(rotdirec, direc) / direc2
self.assertTrue(np.isclose(costheta, 1) or np.isclose(costheta, 0) or np.isclose(costheta, -1))
count[int(round(costheta + 1))] += 1
self.assertEqual(count[0], 8) ## antiparallel
self.assertEqual(count[1], 32) ## perpendicular
self.assertEqual(count[2], 8) ## parallel
def testomegagroupops_positions(self):
"""Test out that we can apply group operations to positions"""
# 2. position = lattice vector + 2-tuple atom-index
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
lattvec = np.array([-2, 3, 1])
for ind in crys.atomindices:
pos = crys.pos2cart(lattvec, ind)
for g in crys.G:
# testing g_pos: (transform an atomic position)
rotpos = crys.g_direc(g, pos)
self.assertTrue(np.allclose(rotpos,
crys.pos2cart(*crys.g_pos(g, lattvec, ind))))
# testing g_vect: (transform a vector position in the crystal)
rotlatt, rotind = crys.g_pos(g, lattvec, ind)
rotlatt2, u = crys.g_vect(g, lattvec, crys.basis[ind[0]][ind[1]])
self.assertTrue(np.allclose(rotpos, crys.unit2cart(rotlatt2, u)))
self.assertTrue(np.all(rotlatt == rotlatt2))
self.assertTrue(np.allclose(u, crys.basis[rotind[0]][rotind[1]]))
# test point group operations:
for g in crys.pointG[ind[0]][ind[1]]:
origin = np.zeros(3, dtype=int)
rotlatt, rotind = crys.g_pos(g, origin, ind)
self.assertTrue(np.all(rotlatt == origin))
self.assertEqual(rotind, ind)
def testinverspos(self):
"""Test the inverses of pos2cart and unit2cart"""
basis = [[np.array([0., 0., 0.]),
np.array([1. / 3., 2. / 3., 0.5]),
np.array([2. / 3., 1. / 3., 0.5])]]
crys = crystal.Crystal(self.hexlatt, basis)
lattvec = np.array([-2, 3, 1])
for ind in crys.atomindices:
lattback, uback = crys.cart2unit(crys.pos2cart(lattvec, ind))
self.assertTrue(np.all(lattback == lattvec))
self.assertTrue( | np.allclose(uback, crys.basis[ind[0]][ind[1]]) | numpy.allclose |
import numpy
from scipy.optimize import fsolve
def euler(simulation, cons, prim, aux):
dt = simulation.dt
rhs = simulation.rhs
return cons + dt * rhs(cons, prim, aux, simulation)
def rk2(simulation, cons, prim, aux):
dt = simulation.dt
rhs = simulation.rhs
cons1 = cons + dt * rhs(cons, prim, aux, simulation)
cons1 = simulation.bcs(cons1, simulation.grid.Npoints, simulation.grid.Ngz)
prim1, aux1 = simulation.model.cons2all(cons1, prim)
return 0.5 * (cons + cons1 + dt * rhs(cons1, prim1, aux1, simulation))
def rk3(simulation, cons, prim, aux):
dt = simulation.dt
rhs = simulation.rhs
cons1 = cons + dt * rhs(cons, prim, aux, simulation)
cons1 = simulation.bcs(cons1, simulation.grid.Npoints, simulation.grid.Ngz)
if simulation.fix_cons:
cons1 = simulation.model.fix_cons(cons1)
prim1, aux1 = simulation.model.cons2all(cons1, prim)
cons2 = (3 * cons + cons1 + dt * rhs(cons1, prim1, aux1, simulation)) / 4
cons2 = simulation.bcs(cons2, simulation.grid.Npoints, simulation.grid.Ngz)
if simulation.fix_cons:
cons2 = simulation.model.fix_cons(cons2)
prim2, aux2 = simulation.model.cons2all(cons2, prim1)
return (cons + 2 * cons2 + 2 * dt * rhs(cons2, prim2, aux2, simulation)) / 3
def rk_euler_split(rk_method, source):
def timestepper(simulation, cons, prim, aux):
consstar = rk_method(simulation, cons, prim, aux)
primstar, auxstar = simulation.model.cons2all(consstar, prim)
return consstar + simulation.dt * source(consstar, primstar, auxstar)
return timestepper
def rk_backward_euler_split(rk_method, source):
def timestepper(simulation, cons, prim, aux):
consstar = rk_method(simulation, cons, prim, aux)
primstar, auxstar = simulation.model.cons2all(consstar, prim)
def residual(consguess, cons_star, prim_old):
consguess = consguess.reshape(consguess.shape[0], 1)
prim_old = prim_old.reshape(prim_old.shape[0], 1)
cons_star = cons_star.reshape(cons_star.shape[0], 1)
primguess, auxguess = simulation.model.cons2all(consguess, prim_old)
return (consguess - cons_star - simulation.dt*source(consguess, primguess, auxguess)).ravel()
consnext = numpy.zeros_like(cons)
cons_initial_guess = consstar + \
0.5*simulation.dt*source(consstar,
primstar,
auxstar)
for i in range(cons.shape[1]):
consnext[:, i] = fsolve(residual, cons_initial_guess[:,i].ravel(),
args=(consstar[:, i].ravel(), prim[:, i].ravel()))
return numpy.reshape(consnext, cons.shape)
return timestepper
def imex222(source, source_fprime=None, source_guess=None):
gamma = 1 - 1/numpy.sqrt(2)
def residual1(consguess, dt, cons, prim, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = consguess - cons - dt * gamma * source(consguess,
primguess, auxguess)
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
def residual2(consguess, dt, cons, prim, k1, source1, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
k1 = k1.reshape((cons.shape[0], 1))
source1 = source1.reshape((cons.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = (consguess - cons - dt * (k1 + (1 - 2*gamma)*source1 + \
gamma*source(consguess, primguess, auxguess))).ravel()
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess).ravel()
return res
def residual2_noflux(consguess, dt, cons, prim, source1, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
source1 = source1.reshape((cons.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = (consguess - cons - dt * ((1 - 2*gamma)*source1 + \
gamma*source(consguess, primguess, auxguess))).ravel()
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess).ravel()
return res
# def residual1_prime(consguess, dt, cons, prim, simulation):
# consguess = consguess.reshape((cons.shape[0], 1))
# jac = numpy.eye(cons.shape[0])
# primguess, auxguess = simulation.model.cons2all(consguess, prim)
# jac -= dt * gamma * source_fprime(consguess, primguess, auxguess)
# return jac
# def residual2_prime(consguess, dt, cons, prim, k1, source1, simulation):
# """
# Whilst the result is idential to residual1_prime, the argument list
# is of course different
# """
# consguess = consguess.reshape((cons.shape[0], 1))
# jac = numpy.eye(cons.shape[0])
# primguess, auxguess = simulation.model.cons2all(consguess, prim)
# jac -= dt * gamma * source_fprime(consguess, primguess, auxguess)
# return jac
residual1_prime = None
def timestepper(simulation, cons, prim, aux):
Np = cons.shape[1]
dt = simulation.dt
rhs = simulation.rhs
consguess = cons.copy()
if source_guess:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
consguess = source_guess(consguess, primguess, auxguess)
cons1 = numpy.zeros_like(cons)
for i in range(Np):
cons1[:,i] = fsolve(residual1, consguess[:,i],
fprime=residual1_prime,
args=(dt, cons[:,i], prim[:,i], simulation),
xtol = 1e-12)
cons1 = simulation.bcs(cons1, simulation.grid.Npoints, simulation.grid.Ngz)
prim1, aux1 = simulation.model.cons2all(cons1, prim)
k1 = rhs(cons1, prim1, aux1, simulation)
source1 = source(cons1, prim1, aux1)
cons2 = numpy.zeros_like(cons)
for i in range(Np):
consguess_source = fsolve(residual2_noflux, cons1[:,i],
fprime=residual1_prime,
args=(dt, cons[:,i], prim1[:,i], source1[:,i], simulation),
xtol = 1e-12)
consguess_flux = cons1[:,i] + dt * k1[:, i]
consguess = 0.5 * (consguess_source + consguess_flux)
cons2[:,i] = fsolve(residual2, consguess,
fprime=residual1_prime,
args=(dt, cons[:,i], prim1[:,i], k1[:,i], source1[:,i], simulation),
xtol = 1e-12)
cons2 = simulation.bcs(cons2, simulation.grid.Npoints, simulation.grid.Ngz)
prim2, aux2 = simulation.model.cons2all(cons2, prim1)
k2 = rhs(cons2, prim2, aux2, simulation)
source2 = source(cons2, prim2, aux2)
return cons + simulation.dt * (k1 + k2 + source1 + source2) / 2
return timestepper
def imex433(source):
alpha = 0.24169426078821
beta = 0.06042356519705
eta = 0.12915286960590
def residual1(consguess, dt, cons, prim, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = consguess - cons - dt * alpha * source(consguess,
primguess, auxguess)
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
def residual2(consguess, dt, cons, prim, source1, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
source1 = source1.reshape((source1.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = consguess - cons - dt * (-alpha*source1 + alpha*source(consguess,
primguess,
auxguess))
if numpy.any(numpy.isnan(res)):
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
def residual3(consguess, dt, cons, prim, source2, k2, simulation):
consguess = consguess.reshape((cons.shape[0], 1))
cons = cons.reshape((cons.shape[0], 1))
prim = prim.reshape((prim.shape[0], 1))
source2 = source2.reshape((source2.shape[0], 1))
k2 = k2.reshape((k2.shape[0], 1))
try:
primguess, auxguess = simulation.model.cons2all(consguess, prim)
except ValueError:
res = 1e6 * numpy.ones_like(consguess)
return res.ravel()
res = consguess - cons - dt * (k2 + (1-alpha)*source2 + alpha*source(consguess,
primguess,
auxguess))
if numpy.any( | numpy.isnan(res) | numpy.isnan |
import sys
from pathlib import Path
import pytest
import numpy as np
import penguins as pg
datadir = Path(__file__).parents[1].resolve() / "penguins-testdata"
# -- Tests on _Dataset() base class -----------------
def test_dataset_initialisation():
"""Tests that parameters are being initialised correctly by the various
__init__() methods.
"""
# Parameters that are supposed to be read in upon initialisation.
# The rest are lazily read in.
initial_pars = ["aq", "td", "sw", "sfo1", "dw",
"o1p", "o1", "bf1", "si", "nuc1"]
# Check 1D
proton = pg.read(datadir, 1)
assert all(par in proton.pars for par in initial_pars)
assert proton["aq"] == pytest.approx(2.9360127)
assert proton["td"] == 65536
assert proton["sw"] == pytest.approx(15.9440, rel=1e-4)
assert proton["sfo1"] == pytest.approx(699.9935)
assert proton["dw"] == pytest.approx(44.800)
assert proton["bf1"] == pytest.approx(699.99)
assert proton["o1p"] == pytest.approx(5.00)
assert proton["o1"] == pytest.approx(3499.95)
assert proton["si"] == 65536
assert proton["nuc1"] == "1H"
# Check 2D
cosy = pg.read(datadir, 2)
assert all(par in cosy.pars for par in initial_pars)
assert np.allclose(cosy["aq"], np.array([0.0182784, 0.0731136]))
assert np.array_equal(cosy["td"], np.array([256, 1024]))
assert np.allclose(cosy["sw"], np.array([10.0041, 10.0041]))
assert np.allclose(cosy["sfo1"], np.array([699.9928, 699.9928]))
assert cosy["inf1"] == pytest.approx(142.8001)
assert cosy["dw"] == pytest.approx(71.400)
assert np.allclose(cosy["bf1"], np.array([699.99, 699.99]))
assert np.allclose(cosy["o1p"], np.array([4.00, 4.00]))
assert np.allclose(cosy["o1"], np.array([2799.96, 2799.96]))
assert np.array_equal(cosy["si"], np.array([1024, 2048]))
assert cosy["nuc1"] == ("1H", "1H")
def test_parDict():
"""Tests the implementation of parDict, i.e. that it is reading values
lazily and accurately.
"""
# Check 1D
proton = pg.read(datadir, 1)
assert "ns" not in proton.pars
assert proton["ns"] == 16
assert "pulprog" not in proton.pars
assert proton["pulprog"] == "zg60"
assert "rg" not in proton.pars
assert proton["rg"] == pytest.approx(7.12)
assert "lb" not in proton.pars
assert proton["lb"] == pytest.approx(0.3)
assert "nc_proc" not in proton.pars
assert proton["nc_proc"] == pytest.approx(-6)
# Check 2D
cosy = pg.read(datadir, 2)
assert "ns" not in cosy.pars
assert cosy["ns"] == 2
assert "pulprog" not in cosy.pars
assert cosy["pulprog"] == "jy-clipcosy"
assert "rg" not in cosy.pars
assert cosy["rg"] == pytest.approx(36)
assert "lb" not in cosy.pars
assert np.allclose(cosy["lb"], np.array([0, 0]))
assert "phc0" not in cosy.pars
assert np.allclose(cosy["phc0"], np.array([25.363, 90.363]))
assert "nc_proc" not in cosy.pars
assert cosy["nc_proc"] == pytest.approx(-3)
assert "gpnam12" not in cosy.pars
assert cosy["gpnam12"] == "SMSQ10.100"
# Check parameter with space and caps
assert "GPZ 12" not in cosy.pars
assert cosy["GPZ 12"] == pytest.approx(43)
assert "gpz12" not in cosy.pars
assert cosy["gpz12"] == cosy["GPZ 12"]
# Check deletion
assert "gpz12" in cosy.pars
del cosy["gpz12"]
assert "gpz12" not in cosy.pars
# Check errors
with pytest.raises(KeyError):
cosy["dontexist"]
proton["dontexist"]
# -- Tests on raw data handling ---------------------
def test_1d_raw_fid():
"""Test 1D raw data readin."""
proton = pg.read(datadir, 1)
fid = proton.fid
# /2 because fid is complex points and TD is both real + imag
assert fid.shape == (proton["td"] / 2,)
# Check raw_data accessor
assert proton.raw_data() is fid
# Check group delay shifting
fid2 = proton.raw_data(shift_grpdly=True)
assert | np.allclose(fid[67:167], fid2[:100]) | numpy.allclose |
""" Concept Activation Vectors in Python """
import numpy as np
import tensorflow as tf
from keras import backend as k
from keras.models import Sequential
from keras.layers import InputLayer, Reshape, Flatten
from sklearn.linear_model import SGDClassifier
class TCAV(object):
""" Class for concept activation vectors for Keras models.
Attributes:
model: A Sequential Keras model
model_f: A Sequential Keras model that is the first half of model
model_h: A Sequential Keras model that is the second half of model
cav: A numpy array containing the concept activation vector
sensitivity: A numpy array containing sensitivities
tcav_score: A list of the TCAV scores for the classes
y_labels: A numpy array containing class labels
"""
def __init__(self, model=None):
""" Initialize the class with empty variables
"""
self.model = model
self.model_f = None
self.model_h = None
self.cav = None
self.sensitivity = None
self.tcav_score = []
self.y_labels = None
def set_model(self, model=None):
""" Function to set the model for the TCAV object
Args:
model: A Keras 'Sequential' model
Raises:
ValueError: If the model is not of the Keras Sequential type
"""
self.model = model
def split_model(self, bottleneck, conv_layer=True):
""" Split the model on a given bottleneck layer
Args:
bottleneck: An integer specifying which layer to split the model
conv_layer: A Boolean value specifying if we are splitting on a convolutional layer
Returns:
model_f: The model containing up to layer 'bottleneck'
model_h: The model containing from layer 'bottleneck' to the end
Raises:
ValueError: If the bottleneck layer value is less than 0 or greater than the total number of layers
Warning: If the bottleneck layer is a convolutional layer
"""
if bottleneck < 0 or bottleneck >= len(self.model.layers):
raise ValueError(
"Bottleneck layer must be greater than or equal to 0 and less than the number of layers!"
)
self.model_f = Sequential()
self.model_h = Sequential()
for current_layer in range(0, bottleneck + 1):
self.model_f.add(self.model.layers[current_layer])
if conv_layer:
self.model_f.add(Flatten())
self.model_h.add(
InputLayer(
input_shape=self.model_f.layers[bottleneck + 1].output_shape[1:]
)
)
self.model_h.add(Reshape(self.model.layers[bottleneck + 1].input_shape[1:]))
for current_layer in range(bottleneck + 1, len(self.model.layers)):
self.model_h.add(self.model.layers[current_layer])
else:
self.model_h.add(
InputLayer(
input_shape=self.model.layers[bottleneck + 1].input_shape[1:]
)
)
for current_layer in range(bottleneck + 1, len(self.model.layers)):
self.model_h.add(self.model.layers[current_layer])
def _create_counterexamples(self, x_concept):
""" A function to create random counterexamples
Args:
x_concept: The training concept data
Return:
counterexamples: A numpy array of counterexamples
"""
n = x_concept.shape[0]
height = x_concept.shape[1]
width = x_concept.shape[2]
channels = x_concept.shape[3]
counterexamples = []
for i in range(0, n):
counterexamples.append(
np.rint(np.random.rand(height, width, channels) * 255)
)
return np.array(counterexamples)
def train_cav(self, x_concept):
""" Calculate the concept activation vector
Args:
x_concept: A numpy array of concept training data
Returns:
cav: A concept activation vector
"""
counterexamples = self._create_counterexamples(x_concept)
x_train_concept = np.append(x_concept, counterexamples, axis=0)
y_train_concept = np.repeat([1, 0], [x_concept.shape[0]], axis=0)
concept_activations = self.model_f.predict(x_train_concept)
lm = SGDClassifier(
loss="perceptron", eta0=1, learning_rate="constant", penalty=None
)
lm.fit(concept_activations, y_train_concept)
coefs = lm.coef_
self.cav = np.transpose(-1 * coefs)
def calculate_sensitivity(self, x_train, y_train):
""" Calculate and return the sensitivity
Args:
x_train: A numpy array of the training data
y_train: A numpy array of the training labels
"""
model_f_activations = self.model_f.predict(x_train)
reshaped_labels = np.array(y_train).reshape((x_train.shape[0], 1))
tf_y_labels = tf.convert_to_tensor(reshaped_labels, dtype=np.float32)
loss = k.binary_crossentropy(tf_y_labels, self.model_h.output)
grad = k.gradients(loss, self.model_h.input)
gradient_func = k.function([self.model_h.input], grad)
calc_grad = gradient_func([model_f_activations])[0]
sensitivity = np.dot(calc_grad, self.cav)
self.sensitivity = sensitivity
self.y_labels = y_train
def print_sensitivity(self):
""" Print the sensitivities in a readable way
"""
if type(self.y_labels) == list:
self.y_labels = np.array(self.y_labels)
print(
"The sensitivity of class 1 is ",
str(
np.sum(self.sensitivity[np.where(self.y_labels == 1)[0]] > 0)
/ np.where(self.y_labels == 1)[0].shape[0]
),
)
print(
"The sensitivity of class 0 is ",
str(
np.sum(self.sensitivity[np.where(self.y_labels == 0)[0]] > 0)
/ | np.where(self.y_labels == 0) | numpy.where |
import tensorflow as tf
import numpy as np
np.set_printoptions(precision=2, linewidth=200)
import cv2
import os
import time
import sys
#import tf_nndistance
import argparse
import glob
import PIL
import scipy.ndimage as ndimage
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from utils import *
#from plane_utils import *
from modules import *
from train_planenet import build_graph
#from train_sample import build_graph as build_graph_sample
from planenet import PlaneNet
from RecordReaderAll import *
#from SegmentationRefinement import *
from crfasrnn.crfasrnn_layer import CrfRnnLayer
#ALL_TITLES = ['planenet', 'pixelwise', 'pixelwise+RANSAC', 'depth observation+RANSAC', 'pixelwise+semantics+RANSAC', 'gt']
#ALL_METHODS = [('bl2_ll1_bw0.5_pb_pp_sm0', ''), ('pb_pp', 'pixelwise_1'), ('pb_pp', 'pixelwise_2'), ('pb_pp', 'pixelwise_3'), ('pb_pp', 'semantics'), ('pb_pp', 'gt')]
ALL_TITLES = ['PlaneNet', 'Oracle NYU toolbox', 'NYU toolbox', 'Oracle Manhattan', 'Manhattan', 'Oracle Piecewise', 'Piecewise']
#ALL_TITLES = ['PlaneNet', '[25] + depth', '[25]', '[9] + depth', '[9]', '[26] + depth', '[26]']
#ALL_METHODS = [('bl0_dl0_bw0.5_pb_pp_ps_sm0', ''), ('ll1_pb_pp', ''), ('bl0_ll1_bw0.5_pb_pp_ps_sm0', ''), ('ll1_bw0.5_pb_pp_sm0', '')]
#ALL_METHODS = [('bl0_dl0_ll1_bw0.5_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_ps', ''), ('bl0_dl0_ll1_ds0_pb_pp', '')]
#ALL_METHODS = [('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_2'), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_3'), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_6'), ('bl0_dl0_ll1_pb_pp_sm0', 'pixelwise_5')]
#ALL_METHODS = [('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_crfrnn10_sm0', ''), ('bl0_dl0_ll1_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', ''), ('bl0_dl0_ll1_pb_pp_sm0', '')]
#ALL_METHODS = [('bl0_dl0_ll1_pb_pp_sm0', '', 0), ('bl0_dl0_ll1_pb_pp_sm0', 'crfrnn', 0), ('bl0_dl0_crfrnn10_sm0', '')]
#ALL_METHODS = [['planenet_hybrid3_bl0_dl0_ll1_pb_pp_sm0', '', 0, 0], ['planenet_hybrid3_bl0_dl0_ll1_pb_pp_ps_sm0', 'pixelwise_2', 1, 0], ['', 'pixelwise_3', 1, 0], ['', 'pixelwise_4', 1, 0], ['', 'pixelwise_5', 1, 0], ['', 'pixelwise_6', 1, 0], ['', 'pixelwise_7', 1, 0]]
#ALL_METHODS = [['planenet_hybrid3_bl0_dl0_ll1_pb_pp_sm0', '', 0, 0], ['planenet_hybrid3_bl0_dl0_ll1_pb_pp_ps_sm0', 'pixelwise_2', 1, 0], ['', 'pixelwise_3', 1, 0], ['', 'pixelwise_4', 1, 0], ['', 'pixelwise_5', 1, 0], ['', 'pixelwise_6', 1, 0], ['', 'pixelwise_7', 1, 0]]
ALL_METHODS = [['sample_np10_hybrid3_bl0_dl0_ds0_crfrnn5_sm0', '', 0, 0], ['planenet_hybrid3_bl0_dl0_ll1_pb_pp_ps_sm0', 'pixelwise_2', 1, 0], ['', 'pixelwise_3', 1, 0], ['', 'pixelwise_4', 1, 0], ['', 'pixelwise_5', 1, 0], ['', 'pixelwise_6', 1, 0], ['', 'pixelwise_7', 1, 0]]
#ALL_METHODS = [('ll1_pb_pp', 'pixelwise_1'), ('crf1_pb_pp', 'pixelwise_2'), ('bl0_ll1_bw0.5_pb_pp_ps_sm0', 'pixelwise_3'), ('ll1_bw0.5_pb_pp_sm0', 'pixelwise_4')]
#ALL_TITLES = ['planenet', 'pixelwise']
#ALL_METHODS = [('bl0_ll1_bw0.5_pb_pp_ps_sm0', ''), ('bl0_ll1_bw0.5_pb_pp_ps_sm0', 'pixelwise_1')]
#ALL_TITLES = ['crf', 'different matching']
#ALL_METHODS = [('pb_pp_sm0', 'crf'), ('pb_pp_sm0', '')]
def writeHTML(options):
from html import HTML
titles = options.titles
h = HTML('html')
h.p('Results')
h.br()
path = '.'
#methods = ['planenet', 'pixelwise', 'pixelwise+RANSAC', 'GT+RANSAC', 'planenet+crf', 'pixelwise+semantics+RANSAC']
#methods = ['planenet', 'pixelwise', 'pixelwise+RANSAC', 'GT+RANSAC']
for index in xrange(options.numImages):
t = h.table(border='1')
r_inp = t.tr()
r_inp.td('input ' + str(index))
r_inp.td().img(src=path + '/' + str(index) + '_image.png')
r_inp.td().img(src=path + '/' + str(index) + '_depth_gt.png')
r_inp.td().img(src=path + '/' + str(index) + '_segmentation_gt.png')
r_inp.td().img(src=path + '/' + str(index) + '_semantics_gt.png')
r_inp.td().img(src=path + '/' + str(index) + '_depth_gt_plane.png')
r_inp.td().img(src=path + '/' + str(index) + '_depth_gt_diff.png')
# r = t.tr()
# r.td('PlaneNet prediction')
# r.td().img(src=firstFolder + '/' + str(index) + '_segmentation_pred.png')
# r.td().img(src=firstFolder + '/' + str(index) + '_depth_pred.png')
r = t.tr()
r.td('methods')
for method_index, method in enumerate(titles):
r.td(method)
continue
r = t.tr()
r.td('segmentation')
for method_index, method in enumerate(titles):
r.td().img(src=path + '/' + str(index) + '_segmentation_pred_' + str(method_index) + '.png')
continue
r = t.tr()
r.td('depth')
for method_index, method in enumerate(titles):
r.td().img(src=path + '/' + str(index) + '_depth_pred_' + str(method_index) + '.png')
continue
h.br()
continue
metric_titles = ['depth error 0.1', 'depth error 0.2', 'depth error 0.3', 'IOU 0.3', 'IOU 0.5', 'IOU 0.7']
h.p('Curves on plane accuracy')
for title in metric_titles:
h.img(src='curve_plane_' + title.replace(' ', '_') + '.png')
continue
h.p('Curves on pixel coverage')
for title in metric_titles:
h.img(src='curve_pixel_' + title.replace(' ', '_') + '.png')
continue
html_file = open(options.test_dir + '/index.html', 'w')
html_file.write(str(h))
html_file.close()
return
def evaluatePlanes(options):
#writeHTML(options)
#exit(1)
if not os.path.exists(options.test_dir):
os.system("mkdir -p %s"%options.test_dir)
pass
results = getResults(options)
gt_dict = results['gt']
predictions = results['pred']
saving = True
if gt_dict['image'].shape[0] != options.numImages or options.useCache == 1:
saving = False
pass
for key, value in gt_dict.iteritems():
if options.imageIndex >= 0:
gt_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
gt_dict[key] = value[:options.numImages]
pass
continue
for pred_dict in predictions:
for key, value in pred_dict.iteritems():
if options.imageIndex >= 0:
pred_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
pred_dict[key] = value[:options.numImages]
pass
continue
continue
#methods = ['planenet', 'pixelwise+RANSAC', 'GT+RANSAC']
#predictions[2] = predictions[3]
for image_index in xrange(options.visualizeImages):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_image.png', gt_dict['image'][image_index])
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt.png', drawDepthImage(gt_dict['depth'][image_index]))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_normal_gt.png', drawNormalImage(gt_dict['normal'][image_index]))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_gt.png', drawSegmentationImage(np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), blackIndex=options.numOutputPlanes))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_semantics_gt.png', drawSegmentationImage(gt_dict['semantics'][image_index], blackIndex=0))
plane_depths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
all_depths = np.concatenate([plane_depths, np.expand_dims(gt_dict['depth'][image_index], -1)], axis=2)
depth = np.sum(all_depths * np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), axis=2)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt_plane.png', drawDepthImage(depth))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt_diff.png', drawMaskImage((depth - gt_dict['depth'][image_index]) * 5 + 0.5))
for method_index, pred_dict in enumerate(predictions):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_dict['depth'][image_index]))
#if 'pixelwise' in options.methods[method_index][1]:
#continue
segmentation = pred_dict['segmentation'][image_index]
#segmentation = np.concatenate([segmentation, pred_dict['np_mask'][image_index]], axis=2)
numPlanes = options.numOutputPlanes
if 'pixelwise' in options.methods[method_index][1]:
numPlanes = pred_dict['plane'][image_index].shape[0]
#print(numPlanes)
pass
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(segmentation, blackIndex=numPlanes))
continue
continue
#post processing
for method_index, method in enumerate(options.methods):
if method[1] == '':
continue
if len(method) < 4 or method[3] == 0:
continue
if len(method) >= 3 and method[2] >= 0:
pred_dict = predictions[method[2]]
else:
pred_dict = predictions[method_index]
pass
if method[1] == 'graphcut':
#pred_dict = gt_dict
predSegmentations = []
predDepths = []
for image_index in xrange(options.numImages):
#if image_index != 3:
#continue
print('graph cut ' + str(image_index))
segmentation = np.argmax(np.concatenate([pred_dict['segmentation'][image_index], 1 - np.expand_dims(pred_dict['plane_mask'][image_index], -1)], axis=2), axis=2)
#pred_s = getSegmentationsGraphCut(pred_dict['plane'][image_index], gt_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['normal'][image_index], segmentation, pred_dict['semantics'][image_index], pred_dict['info'][image_index], gt_dict['num_planes'][image_index])
pred_p, pred_s, numPlanes = removeSmallSegments(pred_dict['plane'][image_index], gt_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['normal'][image_index], segmentation, pred_dict['semantics'][image_index], pred_dict['info'][image_index], gt_dict['num_planes'][image_index])
#pred_p, pred_s, numPlanes = pred_dict['plane'][image_index], segmentation, gt_dict['num_planes'][image_index]
print((gt_dict['num_planes'][image_index], numPlanes))
planeDepths = calcPlaneDepths(pred_p, WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, np.expand_dims(pred_dict['depth'][image_index], -1)], axis=2)
pred_d = allDepths.reshape(-1, options.numOutputPlanes + 1)[np.arange(WIDTH * HEIGHT), pred_s.reshape(-1)].reshape(HEIGHT, WIDTH)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
continue
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
new_pred_dict['segmentation'] = np.array(predSegmentations)
if method_index < len(predictions):
predictions[method_index] = new_pred_dict
else:
predictions.append(new_pred_dict)
pass
if method[1] == 'crf_tf':
predSegmentations = []
predDepths = []
image_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image')
segmentation_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, options.numOutputPlanes + 1], name='segmentation')
plane_inp = tf.placeholder(tf.float32, shape=[1, options.numOutputPlanes, 3], name='plane')
non_plane_depth_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 1], name='non_plane_depth')
info_inp = tf.placeholder(tf.float32, shape=[20], name='info')
plane_parameters = tf.reshape(plane_inp, (-1, 3))
plane_depths = planeDepthsModule(plane_parameters, WIDTH, HEIGHT, info_inp)
plane_depths = tf.transpose(tf.reshape(plane_depths, [HEIGHT, WIDTH, -1, options.numOutputPlanes]), [2, 0, 1, 3])
all_depths = tf.concat([plane_depths, non_plane_depth_inp], axis=3)
planesY = plane_inp[:, :, 1]
planesD = tf.maximum(tf.norm(plane_inp, axis=-1), 1e-4)
planesY /= planesD
planesY = tf.concat([planesY, tf.ones((1, 1))], axis=1)
#refined_segmentation = crfModule(segmentation_inp, plane_inp, non_plane_depth_inp, info_inp, numOutputPlanes = options.numOutputPlanes, numIterations=5)
imageDiff = calcImageDiff(image_inp)
#refined_segmentation, debug_dict = segmentationRefinementModule(segmentation_inp, all_depths, planesY, imageDiff, numOutputPlanes = options.numOutputPlanes + 1, numIterations=5)
refined_segmentation, debug_dict = meanfieldModule(segmentation_inp, all_depths, planesY, imageDiff, numOutputPlanes = options.numOutputPlanes + 1, maxDepthDiff=0.2, varDepthDiff=pow(0.2, 2))
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session(config=config) as sess:
sess.run(init_op)
for image_index in xrange(options.numImages):
#if image_index != 1:
#continue
print('crf tf ' + str(image_index))
allSegmentations = np.concatenate([pred_dict['segmentation'][image_index], pred_dict['np_mask'][image_index]], axis=2)
allSegmentations = softmax(allSegmentations)
pred_s, debug = sess.run([refined_segmentation, debug_dict], feed_dict={segmentation_inp: np.expand_dims(allSegmentations, 0), plane_inp: np.expand_dims(pred_dict['plane'][image_index], 0), non_plane_depth_inp: np.expand_dims(pred_dict['np_depth'][image_index], 0), info_inp: gt_dict['info'][image_index], image_inp: gt_dict['image'][image_index:image_index + 1]})
pred_s = pred_s[0]
planeDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, pred_dict['np_depth'][image_index]], axis=2)
pred_d = np.sum(allDepths * pred_s, axis=-1)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
if 'diff' in debug:
segmentation = np.argmax(allSegmentations, axis=-1)
for planeIndex in xrange(options.numOutputPlanes + 1):
cv2.imwrite('test/mask_' + str(planeIndex) + '.png', drawMaskImage(allSegmentations[:, :, planeIndex]))
continue
for planeIndex in xrange(debug['diff'].shape[-1]):
cv2.imwrite('test/cost_mask_' + str(planeIndex) + '.png', drawMaskImage(debug['diff'][0, :, :, planeIndex] / 2))
continue
exit(1)
pass
continue
pass
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
segmentations = np.array(predSegmentations)
new_pred_dict['segmentation'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['non_plane_mask'] = segmentations[:, :, :, options.numOutputPlanes:options.numOutputPlanes + 1]
#new_pred_dict['non_plane_mask'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['depth'] = np.array(predDepths)
if method_index < len(predictions):
predictions[method_index] = new_pred_dict
else:
predictions.append(new_pred_dict)
pass
pass
if method[1] == 'crf':
predSegmentations = []
predDepths = []
for image_index in xrange(options.numImages):
print('crf ' + str(image_index))
boundaries = pred_dict['boundary'][image_index]
boundaries = sigmoid(boundaries)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_boundary.png', drawMaskImage(np.concatenate([boundaries, np.zeros((HEIGHT, WIDTH, 1))], axis=2)))
allSegmentations = np.concatenate([pred_dict['segmentation'][image_index], pred_dict['np_mask'][image_index]], axis=2)
allSegmentations = softmax(allSegmentations)
planeDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, pred_dict['np_depth'][image_index]], axis=2)
#boundaries = np.concatenate([np.ones((allSegmentations.shape[0], allSegmentations.shape[1], 1)), -np.ones((allSegmentations.shape[0], allSegmentations.shape[1], 1))], axis=2)
#if options.imageIndex >= 0:
#boundaries = cv2.imread(options.test_dir + '/' + str(options.imageIndex) + '_boundary.png')
#else:
#boundaries = cv2.imread(options.test_dir + '/' + str(image_index) + '_boundary.png')
#pass
#boundaries = (boundaries > 128).astype(np.float32)[:, :, :2]
allDepths[:, :, options.numOutputPlanes] = 0
pred_s = refineSegmentation(gt_dict['image'][image_index], allSegmentations, allDepths, boundaries, numOutputPlanes = 20, numIterations=20, numProposals=5)
pred_d = allDepths.reshape(-1, options.numOutputPlanes + 1)[np.arange(WIDTH * HEIGHT), pred_s.reshape(-1)].reshape(HEIGHT, WIDTH)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
#segmentation = np.argmax(allSegmentations, axis=-1)
# for planeIndex in xrange(options.numOutputPlanes + 1):
# cv2.imwrite('test/mask_' + str(planeIndex) + '.png', drawMaskImage(allSegmentations[:, :, planeIndex]))
# continue
#cv2.imwrite(options.test_dir + '/mask_' + str(21) + '.png', drawDepthImage(pred_dict['np_depth'][0]))
#for plane_index in xrange(options.numOutputPlanes + 1):
#cv2.imwrite(options.test_dir + '/mask_' + str(plane_index) + '.png', drawMaskImage(pred_s == plane_index))
#continue
#exit(1)
continue
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
new_pred_dict['segmentation'] = np.array(predSegmentations)
new_pred_dict['depth'] = np.array(predDepths)
if method_index < len(predictions):
predictions[method_index] = new_pred_dict
else:
predictions.append(new_pred_dict)
pass
pass
if 'pixelwise' in method[1]:
predPlanes = []
predSegmentations = []
predDepths = []
predNumPlanes = []
for image_index in xrange(options.numImages):
pred_d = pred_dict['np_depth'][image_index].squeeze()
pred_n = pred_dict['np_normal'][image_index].squeeze()
if '_1' in method[1]:
pred_s = np.zeros(pred_dict['segmentation'][image_index].shape)
pred_p = np.zeros(pred_dict['plane'][image_index].shape)
elif '_2' in method[1]:
parameters = {'distanceCostThreshold': 0.1, 'smoothnessWeight': 0.05, 'semantics': True}
pred_p, pred_s = fitPlanesNYU(gt_dict['image'], gt_dict['depth'][image_index].squeeze(), gt_dict['normal'][image_index], gt_dict['semantics'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
elif '_3' in method[1]:
parameters = {'distanceCostThreshold': 0.1, 'smoothnessWeight': 0.03, 'semantics': True, 'distanceThreshold': 0.05}
pred_p, pred_s = fitPlanesNYU(gt_dict['image'], pred_d, pred_n, pred_dict['semantics'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
elif '_4' in method[1]:
parameters = {'numProposals': 5, 'distanceCostThreshold': 0.1, 'smoothnessWeight': 30, 'dominantLineThreshold': 3, 'offsetGap': 0.1}
pred_p, pred_s = fitPlanesManhattan(gt_dict['image'][image_index], gt_dict['depth'][image_index].squeeze(), gt_dict['normal'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
pred_d = np.zeros((HEIGHT, WIDTH))
elif '_5' in method[1]:
parameters = {'numProposals': 5, 'distanceCostThreshold': 0.1, 'smoothnessWeight': 100, 'dominantLineThreshold': 3, 'offsetGap': 0.6}
pred_p, pred_s = fitPlanesManhattan(gt_dict['image'][image_index], pred_d, pred_n, gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
pred_d = np.zeros((HEIGHT, WIDTH))
elif '_6' in method[1]:
parameters = {'distanceCostThreshold': 0.1, 'smoothnessWeight': 300, 'numProposals': 5, 'normalWeight': 1, 'meanshift': 0.2}
pred_p, pred_s = fitPlanesPiecewise(gt_dict['image'][image_index], gt_dict['depth'][image_index].squeeze(), gt_dict['normal'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
pred_d = np.zeros((HEIGHT, WIDTH))
elif '_7' in method[1]:
parameters = {'numProposals': 5, 'distanceCostThreshold': 0.1, 'smoothnessWeight': 300, 'normalWeight': 1, 'meanshift': 0.2}
pred_p, pred_s = fitPlanesPiecewise(gt_dict['image'][image_index], pred_d, pred_n, gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
pred_d = np.zeros((HEIGHT, WIDTH))
pass
predPlanes.append(pred_p)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
predNumPlanes.append(pred_p.shape[0])
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=pred_p.shape[0]))
#exit(1)
continue
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
new_pred_dict['plane'] = np.array(predPlanes)
new_pred_dict['segmentation'] = np.array(predSegmentations)
new_pred_dict['depth'] = np.array(predDepths)
new_pred_dict['num_planes'] = np.array(predNumPlanes)
if method_index < len(predictions):
predictions[method_index] = new_pred_dict
else:
predictions.append(new_pred_dict)
pass
#titles.append('pixelwise+semantics+RANSAC')
pass
if method[1] == 'crfrnn':
predSegmentations = []
predDepths = []
image_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image')
segmentation_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, options.numOutputPlanes + 1], name='segmentation')
refined_segmentation = CrfRnnLayer(image_dims=(HEIGHT, WIDTH), num_classes=21, theta_alpha=120., theta_beta=3., theta_gamma=3., num_iterations=10, name='crfrnn')([segmentation_inp, image_inp])
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session(config=config) as sess:
sess.run(init_op)
for image_index in xrange(options.numImages):
#if image_index != 1:
#continue
print('crf rnn ' + str(image_index))
allSegmentations = np.concatenate([pred_dict['segmentation'][image_index], pred_dict['np_mask'][image_index]], axis=2)
img = gt_dict['image'][image_index:image_index + 1].astype(np.float32) - 128
pred_s = sess.run(refined_segmentation, feed_dict={segmentation_inp: np.expand_dims(allSegmentations, 0), image_inp: img})
# print(pred_s.shape)
# print(pred_s[0].max())
# print(pred_s.sum(-1).max())
# exit(1)
pred_s = pred_s[0]
# print(allSegmentations.max())
# print(pred_s.max())
# print(img.max())
# print(img.min())
# print(np.abs(pred_s - allSegmentations).max())
# print(np.abs(np.argmax(pred_s, axis=-1) - np.argmax(allSegmentations, axis=-1)).max())
pred_s = one_hot(np.argmax(pred_s, axis=-1), options.numOutputPlanes + 1)
planeDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, pred_dict['np_depth'][image_index]], axis=2)
pred_d = np.sum(allDepths * pred_s, axis=-1)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
continue
pass
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
segmentations = np.array(predSegmentations)
new_pred_dict['segmentation'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['non_plane_mask'] = segmentations[:, :, :, options.numOutputPlanes:options.numOutputPlanes + 1]
#new_pred_dict['non_plane_mask'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['depth'] = np.array(predDepths)
if method_index < len(predictions):
predictions[method_index] = new_pred_dict
else:
predictions.append(new_pred_dict)
pass
pass
if saving:
np.save(options.result_filename, {'gt': gt_dict, 'pred': predictions})
pass
continue
#exit(1)
#print(results)
# depth = gt_dict['depth'][4]
# cv2.imwrite(options.test_dir + '/test_depth_gt.png', drawDepthImage(depth))
# pred_p, pred_s, pred_d = fitPlanes(depth, getSUNCGCamera(), numPlanes=20, planeAreaThreshold=3*4, numIterations=100, distanceThreshold=0.05, local=0.2)
# cv2.imwrite(options.test_dir + '/test_depth.png', drawDepthImage(pred_d))
# cv2.imwrite(options.test_dir + '/test_segmentation.png', drawSegmentationImage(pred_s))
# exit(1)
#plotResults(gt_dict, predictions, options)
if options.numImages > gt_dict['image'].shape[0]:
plotAll(options)
else:
plotResults(gt_dict, predictions, options)
pass
writeHTML(options)
return
def plotAll(options):
result_filenames = glob.glob(options.test_dir + '/results_*.npy')
assert(len(result_filenames) > 0)
results = np.load(result_filenames[0])
results = results[()]
gt_dict = results['gt']
predictions = results['pred']
for index in xrange(1, len(result_filenames)):
other_results = np.load(result_filenames[index])
other_results = other_results[()]
other_gt_dict = other_results['gt']
other_predictions = other_results['pred']
for k, v in other_gt_dict.iteritems():
gt_dict[k] = np.concatenate([gt_dict[k], v], axis=0)
continue
for methodIndex, other_pred_dict in enumerate(other_predictions):
if methodIndex == 1:
continue
for k, v in other_pred_dict.iteritems():
print(methodIndex, k)
print(predictions[methodIndex][k].shape)
print(v.shape)
predictions[methodIndex][k] = np.concatenate([predictions[methodIndex][k], v], axis=0)
continue
continue
continue
plotResults(gt_dict, predictions, options)
return
def plotResults(gt_dict, predictions, options):
titles = options.titles
pixel_metric_curves = []
plane_metric_curves = []
for method_index, pred_dict in enumerate(predictions):
if titles[method_index] == 'pixelwise':
continue
segmentations = pred_dict['segmentation']
#if method_index == 0:
#segmentations = softmax(segmentations)
#pass
#pixel_curves, plane_curves = evaluatePlaneSegmentation(pred_dict['plane'], segmentations, gt_dict['plane'], gt_dict['segmentation'], gt_dict['num_planes'], numOutputPlanes = options.numOutputPlanes)
pixel_curves = np.zeros((6, 13))
plane_curves = np.zeros((6, 13, 3))
numImages = segmentations.shape[0]
for image_index in xrange(numImages):
gtDepths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
predDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
if 'num_planes' in pred_dict:
predNumPlanes = pred_dict['num_planes'][image_index]
else:
predNumPlanes = options.numOutputPlanes
pass
#if image_index != 2:
#continue
pixelStatistics, planeStatistics = evaluatePlanePrediction(predDepths, segmentations[image_index], predNumPlanes, gtDepths, gt_dict['segmentation'][image_index], gt_dict['num_planes'][image_index])
# for planeIndex in xrange(options.numOutputPlanes):
# cv2.imwrite('test/mask_' + str(planeIndex) + '.png', drawMaskImage(gt_dict['segmentation'][image_index][:, :, planeIndex]))
# continue
# mask_1 = segmentations[image_index] == 8
# mask_2 = gt_dict['segmentation'][image_index][:, :, 3]
# print(mask_1.sum())
# print(mask_2.sum())
# cv2.imwrite('test/mask_pred.png', drawMaskImage(mask_1))
# cv2.imwrite('test/mask_gt.png', drawMaskImage(mask_2))
# cv2.imwrite('test/mask_intersection.png', drawMaskImage(mask_2 * mask_1 > 0.5))
# cv2.imwrite('test/mask_union.png', drawMaskImage((mask_2 + mask_1) > 0.5))
# print((mask_2 * mask_1 > 0.5).sum())
# print(((mask_2 + mask_1) > 0.5).sum())
#print(image_index, planeStatistics[4][5])
#exit(1)
# print(pred_dict['plane'][image_index])
# for planeIndex in xrange(options.numOutputPlanes):
# print((segmentations[image_index] == planeIndex).sum())
# continue
#exit(1)
pixel_curves += np.array(pixelStatistics)
plane_curves += np.array(planeStatistics)
continue
if len(pixel_metric_curves) == 0:
for metric_index, pixel_curve in enumerate(pixel_curves):
pixel_metric_curves.append([])
plane_metric_curves.append([])
continue
pass
for metric_index, pixel_curve in enumerate(pixel_curves):
pixel_metric_curves[metric_index].append(pixel_curve / numImages)
continue
for metric_index, plane_curve in enumerate(plane_curves):
#planeScore = plane_curve[:, 0] / plane_curve[:, 1]
plane_metric_curves[metric_index].append(plane_curve)
continue
continue
#np.save(options.test_dir + '/pixel_curves.npy', np.array(pixel_metric_curves))
#np.save(options.test_dir + '/plane_curves.npy', np.array(plane_metric_curves))
xs = []
xs.append((np.arange(13) * 0.1).tolist())
xs.append((np.arange(13) * 0.1).tolist())
xs.append((np.arange(13) * 0.1).tolist())
xs.append((np.arange(13) * 0.05).tolist())
xs.append((np.arange(13) * 0.05).tolist())
xs.append((np.arange(13) * 0.05).tolist())
xlabels = ['IOU threshold', 'IOU threshold', 'IOU threshold', 'depth threshold', 'depth threshold', 'depth threshold']
curve_titles = ['depth threshold 0.1', 'depth threshold 0.2', 'depth threshold 0.3', 'IOU 0.3', 'IOU 0.5', 'IOU 0.7']
curve_labels = [title for title in titles if title != 'pixelwise']
# metric_index = 4
# filename = options.test_dir + '/curves'
# pixel_curves = pixel_metric_curves[metric_index]
# plane_curves = plane_metric_curves[metric_index]
# plane_curves = [plane_curve[:, 0] / plane_curve[:, 1] for plane_curve in plane_curves]
# plotCurvesSubplot(xs[metric_index], [plane_curves, pixel_curves], filenames = [filename + '.png', filename + '_oracle.png'], xlabel=xlabels[metric_index], ylabels=['Per-plane recall', 'Per-pixel recall'], labels=curve_labels)
# return
for metric_index, curves in enumerate(pixel_metric_curves):
if metric_index not in [4]:
continue
#filename = options.test_dir + '/curve_pixel_' + curve_titles[metric_index].replace(' ', '_') + '.png'
#plotCurves(xs[metric_index], curves, filename = filename, xlabel=xlabels[metric_index], ylabel='pixel coverage', title=curve_titles[metric_index], labels=curve_labels)
filename = options.test_dir + '/curve_pixel_' + curve_titles[metric_index].replace(' ', '_').replace('.', '')
plotCurvesSplit(xs[metric_index], curves, filenames = [filename + '.png', filename + '_oracle.png'], xlabel=xlabels[metric_index], ylabel='Per-pixel recall', title=curve_titles[metric_index], labels=curve_labels)
#plotCurvesSubplot(xs[metric_index], curves, filename = filename + '.png', xlabel=xlabels[metric_index], ylabel='Per-pixel recall', title=curve_titles[metric_index], labels=curve_labels)
continue
for metric_index, curves in enumerate(plane_metric_curves):
if metric_index not in [4]:
continue
curves = [curve[:, 0] / curve[:, 1] for curve in curves]
#filename = options.test_dir + '/curve_plane_' + curve_titles[metric_index].replace(' ', '_') + '.png'
#plotCurves(xs[metric_index], curves, filename = filename, xlabel=xlabels[metric_index], ylabel='Per-plane recall', title=curve_titles[metric_index], labels=curve_labels)
filename = options.test_dir + '/curve_plane_' + curve_titles[metric_index].replace(' ', '_').replace('.', '')
plotCurvesSplit(xs[metric_index], curves, filenames = [filename + '.png', filename + '_oracle.png'], xlabel=xlabels[metric_index], ylabel='Per-plane recall', title=curve_titles[metric_index], labels=curve_labels)
continue
def gridSearch(options):
#writeHTML(options)
#exit(1)
if os.path.exists(options.result_filename):
results = np.load(options.result_filename)
results = results[()]
else:
assert(False)
pass
gt_dict = results['gt']
predictions = results['pred']
for key, value in gt_dict.iteritems():
if options.imageIndex >= 0:
gt_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
gt_dict[key] = value[:options.numImages]
pass
continue
for pred_dict in predictions:
for key, value in pred_dict.iteritems():
if options.imageIndex >= 0:
pred_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
pred_dict[key] = value[:options.numImages]
pass
continue
continue
#methods = ['planenet', 'pixelwise+RANSAC', 'GT+RANSAC']
titles = options.titles
for image_index in xrange(options.visualizeImages):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_image.png', gt_dict['image'][image_index])
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt.png', drawDepthImage(gt_dict['depth'][image_index]))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_normal_gt.png', drawNormalImage(gt_dict['normal'][image_index]))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_gt.png', drawSegmentationImage(np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), blackIndex=options.numOutputPlanes))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_semantics_gt.png', drawSegmentationImage(gt_dict['semantics'][image_index], blackIndex=0))
plane_depths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
all_depths = np.concatenate([plane_depths, np.expand_dims(gt_dict['depth'][image_index], -1)], axis=2)
depth = np.sum(all_depths * np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), axis=2)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt_plane.png', drawDepthImage(depth))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt_diff.png', drawMaskImage((depth - gt_dict['depth'][image_index]) * 5 + 0.5))
for method_index, pred_dict in enumerate(predictions):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_dict['depth'][image_index]))
if 'pixelwise' in options.methods[method_index][1]:
continue
segmentation = pred_dict['segmentation'][image_index]
segmentation = np.concatenate([segmentation, pred_dict['np_mask'][image_index]], axis=2)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(segmentation, blackIndex=options.numOutputPlanes))
continue
continue
#post processing
for method_index, method in enumerate(options.methods):
if len(method) == 3:
pred_dict = predictions[method[2]]
else:
pred_dict = predictions[method_index]
pass
if 'pixelwise_2' in method[1] or 'pixelwise_3' in method[1]:
bestScore = 0
configurationIndex = 0
for distanceCostThreshold in [0.3]:
for smoothnessWeight in [0.01, 0.03, 0.05]:
for distanceThreshold in [0.2]:
parameters = {'distanceCostThreshold': distanceCostThreshold, 'smoothnessWeight': smoothnessWeight, 'distanceThreshold': distanceThreshold, 'semantics': True}
score = 0
for image_index in xrange(options.numImages):
#cv2.imwrite('test/normal.png', drawNormalImage(gt_dict['normal'][image_index]))
if '_2' in method[1]:
pred_p, pred_s = fitPlanesNYU(gt_dict['image'][image_index], gt_dict['depth'][image_index].squeeze(), gt_dict['normal'][image_index], gt_dict['semantics'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
else:
pred_d = pred_dict['np_depth'][image_index].squeeze()
pred_n = pred_dict['np_normal'][image_index].squeeze()
pred_p, pred_s = fitPlanesNYU(gt_dict['image'][image_index], pred_d, pred_n, pred_dict['semantics'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
pass
predNumPlanes = pred_p.shape[0]
gtDepths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
planeDepths = calcPlaneDepths(pred_p, WIDTH, HEIGHT, gt_dict['info'][image_index])
pixelStatistics, planeStatistics = evaluatePlanePrediction(planeDepths, pred_s, predNumPlanes, gtDepths, gt_dict['segmentation'][image_index], gt_dict['num_planes'][image_index])
#print(pixelStatistics)
#exit(1)
#planeStatistics = np.array(planeStatistics)[1]
#accuracy = (planeStatistics[3:8, 0].astype(np.float32) / np.maximum(planeStatistics[3:8, 1], 1e-4)).mean()
pixelStatistics = np.array(pixelStatistics)[1]
accuracy = pixelStatistics[3:8].mean()
score += accuracy
#cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite('test/segmentation_pred_' + str(image_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
#exit(1)
continue
score /= options.numImages
print(score, parameters)
configurationIndex += 1
#exit(1)
if score > bestScore:
bestScore = score
bestParameters = parameters
pass
continue
continue
continue
print(bestScore)
print(bestParameters)
exit(1)
if 'pixelwise_4' in method[1] or 'pixelwise_5' in method[1]:
bestScore = 0
configurationIndex = 0
for distanceCostThreshold in [0.05]:
for smoothnessWeight in [30]:
for offsetGap in [-0.1]:
parameters = {'distanceCostThreshold': distanceCostThreshold, 'smoothnessWeight': smoothnessWeight, 'offsetGap': abs(offsetGap), 'meanshift': offsetGap}
score = 0
for image_index in xrange(options.numImages):
if '_4' in method[1]:
pred_p, pred_s = fitPlanesManhattan(gt_dict['image'][image_index], gt_dict['depth'][image_index].squeeze(), gt_dict['normal'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
else:
pred_d = pred_dict['np_depth'][image_index].squeeze()
pred_n = pred_dict['np_normal'][image_index].squeeze()
pred_p, pred_s = fitPlanesManhattan(gt_dict['image'][image_index], pred_d, pred_n, gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
pass
predNumPlanes = pred_p.shape[0]
gtDepths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
planeDepths = calcPlaneDepths(pred_p, WIDTH, HEIGHT, gt_dict['info'][image_index])
pixelStatistics, planeStatistics = evaluatePlanePrediction(planeDepths, pred_s, predNumPlanes, gtDepths, gt_dict['segmentation'][image_index], gt_dict['num_planes'][image_index])
#print(pixelStatistics)
#exit(1)
#planeStatistics = np.array(planeStatistics)[1]
#accuracy = (planeStatistics[3:8, 0].astype(np.float32) / np.maximum(planeStatistics[3:8, 1], 1e-4)).mean()
pixelStatistics = np.array(pixelStatistics)[1]
accuracy = pixelStatistics[3:8].mean()
score += accuracy
#cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
#cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
#exit(1)
continue
score /= options.numImages
print(score, parameters)
configurationIndex += 1
#exit(1)
if score > bestScore:
bestScore = score
bestParameters = parameters
pass
continue
continue
continue
print(bestScore)
print(bestParameters)
exit(1)
if 'pixelwise_6' in method[1] or 'pixelwise_7' in method[1]:
bestScore = 0
configurationIndex = 0
for distanceCostThreshold in [0.1]:
for smoothnessWeight in [300]:
for normalWeight in [1]:
for offset in [0.2]:
parameters = {'distanceCostThreshold': distanceCostThreshold, 'smoothnessWeight': smoothnessWeight, 'numProposals': 5, 'normalWeight': normalWeight, 'offsetGap': abs(offset), 'meanshift': offset}
score = 0
for image_index in xrange(options.numImages):
if '_6' in method[1]:
pred_p, pred_s = fitPlanesPiecewise(gt_dict['image'][image_index], gt_dict['depth'][image_index].squeeze(), gt_dict['normal'][image_index], gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
else:
pred_d = pred_dict['np_depth'][image_index].squeeze()
pred_n = pred_dict['np_normal'][image_index].squeeze()
pred_p, pred_s = fitPlanesPiecewise(gt_dict['image'][image_index], pred_d, pred_n, gt_dict['info'][image_index], numOutputPlanes=options.numOutputPlanes, parameters=parameters)
pass
predNumPlanes = pred_p.shape[0]
gtDepths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
planeDepths = calcPlaneDepths(pred_p, WIDTH, HEIGHT, gt_dict['info'][image_index])
pixelStatistics, planeStatistics = evaluatePlanePrediction(planeDepths, pred_s, predNumPlanes, gtDepths, gt_dict['segmentation'][image_index], gt_dict['num_planes'][image_index])
#print(pixelStatistics)
#exit(1)
#planeStatistics = np.array(planeStatistics)[1]
#accuracy = (planeStatistics[3:8, 0].astype(np.float32) / np.maximum(planeStatistics[3:8, 1], 1e-4)).mean()
pixelStatistics = np.array(pixelStatistics)[1]
accuracy = pixelStatistics[3:8].mean()
score += accuracy
#cv2.imwrite('test/depth_pred_' + str(configurationIndex) + '.png', drawDepthImage(pred_d))
cv2.imwrite('test/segmentation_pred_' + str(image_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
#exit(1)
continue
score /= options.numImages
print(score, parameters)
configurationIndex += 1
#exit(1)
if score > bestScore:
bestScore = score
bestParameters = parameters
pass
continue
continue
continue
continue
print(bestScore)
print(bestParameters)
exit(1)
if method[1] == 'crfrnn':
parameterConfigurations = []
for alpha in [15]:
for beta in [10]:
for gamma in [3]:
parameterConfigurations.append((alpha, beta, gamma))
continue
continue
continue
print(parameterConfigurations)
bestScore = 0
for parameters in parameterConfigurations:
tf.reset_default_graph()
image_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image')
segmentation_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, options.numOutputPlanes + 1], name='segmentation')
refined_segmentation = CrfRnnLayer(image_dims=(HEIGHT, WIDTH), num_classes=21, theta_alpha=parameters[0], theta_beta=parameters[1], theta_gamma=parameters[2], num_iterations=10, name='crfrnn')([segmentation_inp, image_inp])
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session(config=config) as sess:
sess.run(init_op)
score = 0.
for image_index in xrange(options.numImages):
#if image_index != 1:
#continue
print('crf as rnn', image_index)
allSegmentations = np.concatenate([pred_dict['segmentation'][image_index], pred_dict['np_mask'][image_index]], axis=2)
img = gt_dict['image'][image_index:image_index + 1].astype(np.float32) - 128
pred_s = sess.run(refined_segmentation, feed_dict={segmentation_inp: np.expand_dims(allSegmentations, 0), image_inp: img})
pred_s = pred_s[0]
#pred_s = allSegmentations
pred_s = one_hot(np.argmax(pred_s, axis=-1), options.numOutputPlanes + 1)
planeDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, pred_dict['np_depth'][image_index]], axis=2)
pred_d = np.sum(allDepths * pred_s, axis=-1)
# for planeIndex in xrange(options.numOutputPlanes):
# cv2.imwrite('test/mask_' + str(planeIndex) + '.png', drawMaskImage(pred_s[:, :, planeIndex]))
# cv2.imwrite('test/gt_mask_' + str(planeIndex) + '.png', drawMaskImage(gt_dict['segmentation'][image_index][:, :, planeIndex]))
# continue
# cv2.imwrite('test/depth_pred.png', drawDepthImage(pred_d))
# cv2.imwrite('test/depth_gt.png', drawDepthImage(gt_dict['depth'][image_index]))
# cv2.imwrite('test/depth_diff.png', drawMaskImage(np.abs(pred_d - gt_dict['depth'][image_index]) * 5))
# exit(1)
predNumPlanes = options.numOutputPlanes
gtDepths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
pixelStatistics, planeStatistics = evaluatePlanePrediction(planeDepths, pred_s[:, :, :options.numOutputPlanes], predNumPlanes, gtDepths, gt_dict['segmentation'][image_index], gt_dict['num_planes'][image_index])
#print(pixelStatistics)
#exit(1)
#planeStatistics = np.array(planeStatistics)[1]
#accuracy = (planeStatistics[3:8, 0].astype(np.float32) / np.maximum(planeStatistics[3:8, 1], 1e-4)).mean()
pixelStatistics = np.array(pixelStatistics)[1]
# print(pixelStatistics)
# pixelStatistics[3:8].mean()
# exit(1)
accuracy = pixelStatistics[3:8].mean()
score += accuracy
#cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
#cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
#exit(1)
continue
score /= options.numImages
print(score, parameters)
#exit(1)
if score > bestScore:
bestScore = score
bestParameters = parameters
pass
pass
continue
print(bestScore, bestParameters)
pass
continue
return
def evaluateDepthPrediction(options):
if not os.path.exists(options.test_dir):
os.system("mkdir -p %s"%options.test_dir)
pass
if options.useCache == 1 and os.path.exists(options.result_filename):
results = np.load(options.result_filename)
results = results[()]
else:
results = getResults(options)
if options.useCache != -2:
np.save(options.result_filename, results)
pass
pass
gt_dict = results['gt']
predictions = results['pred']
for key, value in gt_dict.iteritems():
if options.imageIndex >= 0:
gt_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
gt_dict[key] = value[:options.numImages]
pass
continue
for pred_dict in predictions:
for key, value in pred_dict.iteritems():
if options.imageIndex >= 0:
pred_dict[key] = value[options.imageIndex:options.imageIndex + 1]
elif value.shape[0] > options.numImages:
pred_dict[key] = value[:options.numImages]
pass
continue
continue
titles = options.titles
for image_index in xrange(options.visualizeImages):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_image.png', gt_dict['image'][image_index])
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt.png', drawDepthImage(gt_dict['depth'][image_index]))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_gt.png', drawSegmentationImage(np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), blackIndex=options.numOutputPlanes))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_semantics_gt.png', drawSegmentationImage(gt_dict['semantics'][image_index], blackIndex=0))
# plane_depths = calcPlaneDepths(gt_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
# all_depths = np.concatenate([plane_depths, np.expand_dims(gt_dict['depth'][image_index], -1)], axis=2)
# depth = np.sum(all_depths * np.concatenate([gt_dict['segmentation'][image_index], 1 - np.expand_dims(gt_dict['plane_mask'][image_index], -1)], axis=2), axis=2)
# cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_gt_plane.png', drawDepthImage(depth))
for method_index, pred_dict in enumerate(predictions):
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_dict['depth'][image_index]))
if titles[method_index] == 'pixelwise':
continue
segmentation = pred_dict['segmentation'][image_index]
segmentation = np.concatenate([segmentation, pred_dict['np_mask'][image_index]], axis=2)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(segmentation, blackIndex=options.numOutputPlanes))
continue
continue
#post processing
for method_index, method in enumerate(options.methods):
if method[1] == 'graphcut':
pred_dict = gt_dict
predSegmentations = []
predDepths = []
for image_index in xrange(options.numImages):
#if image_index != 3:
#continue
print('graph cut ' + str(image_index))
segmentation = np.argmax(np.concatenate([pred_dict['segmentation'][image_index], 1 - np.expand_dims(pred_dict['plane_mask'][image_index], -1)], axis=2), axis=2)
#pred_s = getSegmentationsGraphCut(pred_dict['plane'][image_index], gt_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['normal'][image_index], segmentation, pred_dict['semantics'][image_index], pred_dict['info'][image_index], gt_dict['num_planes'][image_index])
pred_p, pred_s, numPlanes = removeSmallSegments(pred_dict['plane'][image_index], gt_dict['image'][image_index], pred_dict['depth'][image_index], pred_dict['normal'][image_index], segmentation, pred_dict['semantics'][image_index], pred_dict['info'][image_index], gt_dict['num_planes'][image_index])
#pred_p, pred_s, numPlanes = pred_dict['plane'][image_index], segmentation, gt_dict['num_planes'][image_index]
print((gt_dict['num_planes'][image_index], numPlanes))
planeDepths = calcPlaneDepths(pred_p, WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, np.expand_dims(pred_dict['depth'][image_index], -1)], axis=2)
pred_d = allDepths.reshape(-1, options.numOutputPlanes + 1)[np.arange(WIDTH * HEIGHT), pred_s.reshape(-1)].reshape(HEIGHT, WIDTH)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
continue
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
new_pred_dict['segmentation'] = np.array(predSegmentations)
predictions[method_index] = new_pred_dict
if method[1] == 'crf_tf':
pred_dict = predictions[method_index]
predSegmentations = []
predDepths = []
image_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 3], name='image')
segmentation_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, options.numOutputPlanes + 1], name='segmentation')
plane_inp = tf.placeholder(tf.float32, shape=[1, options.numOutputPlanes, 3], name='plane')
non_plane_depth_inp = tf.placeholder(tf.float32, shape=[1, HEIGHT, WIDTH, 1], name='non_plane_depth')
info_inp = tf.placeholder(tf.float32, shape=[20], name='info')
plane_parameters = tf.reshape(plane_inp, (-1, 3))
plane_depths = planeDepthsModule(plane_parameters, WIDTH, HEIGHT, info_inp)
plane_depths = tf.transpose(tf.reshape(plane_depths, [HEIGHT, WIDTH, -1, options.numOutputPlanes]), [2, 0, 1, 3])
all_depths = tf.concat([plane_depths, non_plane_depth_inp], axis=3)
planesY = plane_inp[:, :, 1]
planesD = tf.maximum(tf.norm(plane_inp, axis=-1), 1e-4)
planesY /= planesD
planesY = tf.concat([planesY, tf.ones((1, 1))], axis=1)
#refined_segmentation = crfModule(segmentation_inp, plane_inp, non_plane_depth_inp, info_inp, numOutputPlanes = options.numOutputPlanes, numIterations=5)
imageDiff = calcImageDiff(image_inp)
#refined_segmentation, debug_dict = segmentationRefinementModule(segmentation_inp, all_depths, planesY, imageDiff, numOutputPlanes = options.numOutputPlanes + 1, numIterations=5)
refined_segmentation, debug_dict = meanfieldModule(segmentation_inp, all_depths, planesY, imageDiff, numOutputPlanes = options.numOutputPlanes + 1, maxDepthDiff=0.2, varDepthDiff=pow(0.2, 2))
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
with tf.Session(config=config) as sess:
sess.run(init_op)
for image_index in xrange(options.numImages):
#if image_index != 1:
#continue
print('crf tf ' + str(image_index))
allSegmentations = np.concatenate([pred_dict['segmentation'][image_index], pred_dict['np_mask'][image_index]], axis=2)
allSegmentations = softmax(allSegmentations)
pred_s, debug = sess.run([refined_segmentation, debug_dict], feed_dict={segmentation_inp: np.expand_dims(allSegmentations, 0), plane_inp: np.expand_dims(pred_dict['plane'][image_index], 0), non_plane_depth_inp: np.expand_dims(pred_dict['np_depth'][image_index], 0), info_inp: gt_dict['info'][image_index], image_inp: gt_dict['image'][image_index:image_index + 1]})
pred_s = pred_s[0]
planeDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, pred_dict['np_depth'][image_index]], axis=2)
pred_d = np.sum(allDepths * pred_s, axis=-1)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
if 'diff' in debug:
segmentation = np.argmax(allSegmentations, axis=-1)
for planeIndex in xrange(options.numOutputPlanes + 1):
cv2.imwrite('test/mask_' + str(planeIndex) + '.png', drawMaskImage(allSegmentations[:, :, planeIndex]))
continue
for planeIndex in xrange(debug['diff'].shape[-1]):
cv2.imwrite('test/cost_mask_' + str(planeIndex) + '.png', drawMaskImage(debug['diff'][0, :, :, planeIndex] / 2))
continue
exit(1)
pass
continue
pass
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
segmentations = np.array(predSegmentations)
new_pred_dict['segmentation'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['non_plane_mask'] = segmentations[:, :, :, options.numOutputPlanes:options.numOutputPlanes + 1]
#new_pred_dict['non_plane_mask'] = segmentations[:, :, :, :options.numOutputPlanes]
new_pred_dict['depth'] = np.array(predDepths)
predictions[method_index] = new_pred_dict
pass
if method[1] == 'crf':
pred_dict = predictions[method_index]
predSegmentations = []
predDepths = []
for image_index in xrange(options.numImages):
print('crf ' + str(image_index))
boundaries = pred_dict['boundary'][image_index]
boundaries = sigmoid(boundaries)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_boundary.png', drawMaskImage(np.concatenate([boundaries, np.zeros((HEIGHT, WIDTH, 1))], axis=2)))
allSegmentations = np.concatenate([pred_dict['segmentation'][image_index], pred_dict['np_mask'][image_index]], axis=2)
allSegmentations = softmax(allSegmentations)
planeDepths = calcPlaneDepths(pred_dict['plane'][image_index], WIDTH, HEIGHT, gt_dict['info'][image_index])
allDepths = np.concatenate([planeDepths, pred_dict['np_depth'][image_index]], axis=2)
#boundaries = np.concatenate([np.ones((allSegmentations.shape[0], allSegmentations.shape[1], 1)), -np.ones((allSegmentations.shape[0], allSegmentations.shape[1], 1))], axis=2)
#if options.imageIndex >= 0:
#boundaries = cv2.imread(options.test_dir + '/' + str(options.imageIndex) + '_boundary.png')
#else:
#boundaries = cv2.imread(options.test_dir + '/' + str(image_index) + '_boundary.png')
#pass
#boundaries = (boundaries > 128).astype(np.float32)[:, :, :2]
allDepths[:, :, options.numOutputPlanes] = 0
pred_s = refineSegmentation(gt_dict['image'][image_index], allSegmentations, allDepths, boundaries, numOutputPlanes = options.numOutputPlanes, numIterations=20, numProposals=5)
pred_d = allDepths.reshape(-1, options.numOutputPlanes + 1)[np.arange(WIDTH * HEIGHT), pred_s.reshape(-1)].reshape(HEIGHT, WIDTH)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s, blackIndex=options.numOutputPlanes))
#segmentation = np.argmax(allSegmentations, axis=-1)
for planeIndex in xrange(options.numOutputPlanes + 1):
cv2.imwrite('test/mask_' + str(planeIndex) + '.png', drawMaskImage(allSegmentations[:, :, planeIndex]))
continue
#cv2.imwrite(options.test_dir + '/mask_' + str(21) + '.png', drawDepthImage(pred_dict['np_depth'][0]))
#for plane_index in xrange(options.numOutputPlanes + 1):
#cv2.imwrite(options.test_dir + '/mask_' + str(plane_index) + '.png', drawMaskImage(pred_s == plane_index))
#continue
#exit(1)
continue
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
new_pred_dict['segmentation'] = np.array(predSegmentations)
new_pred_dict['depth'] = np.array(predDepths)
predictions[method_index] = new_pred_dict
pass
if 'pixelwise' in method[1]:
pred_dict = predictions[method_index]
predPlanes = []
predSegmentations = []
predDepths = []
for image_index in xrange(options.numImages):
pred_d = pred_dict['np_depth'][image_index].squeeze()
if '_1' in method[1]:
pred_s = np.zeros(pred_dict['segmentation'][image_index].shape)
pred_p = np.zeros(pred_dict['plane'][image_index].shape)
elif '_2' in methods[1]:
pred_p, pred_s, pred_d = fitPlanes(pred_d, gt_dict['info'][image_index], numPlanes=options.numOutputPlanes, planeAreaThreshold=3*4, numIterations=100, distanceThreshold=0.05, local=0.2)
elif '_3' in methods[1]:
pred_p, pred_s, pred_d = fitPlanes(pred_d, gt_dict['info'][image_index], numPlanes=options.numOutputPlanes, planeAreaThreshold=3*4, numIterations=100, distanceThreshold=0.05, local=0.2)
elif '_4' in methods[1]:
pred_p, pred_s, pred_d = fitPlanesSegmentation(pred_d, pred_dict['semantics'][image_index], gt_dict['info'][image_index], numPlanes=options.numOutputPlanes, planeAreaThreshold=3*4, numIterations=100, distanceThreshold=0.05, local=0.2)
pass
predPlanes.append(pred_p)
predSegmentations.append(pred_s)
predDepths.append(pred_d)
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_d))
cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage(pred_s))
continue
new_pred_dict = {}
for key, value in pred_dict.iteritems():
new_pred_dict[key] = value
continue
new_pred_dict['plane'] = np.array(predPlanes)
new_pred_dict['segmentation'] = np.array(predSegmentations)
new_pred_dict['depth'] = np.array(predDepths)
predictions[method_index] = new_pred_dict
#titles.append('pixelwise+semantics+RANSAC')
pass
continue
for method_index, pred_dict in enumerate(predictions):
print(titles[method_index])
evaluateDepths(pred_dict['depth'], gt_dict['depth'], np.ones(gt_dict['depth'].shape))
continue
return
def getResults(options):
checkpoint_prefix = 'checkpoint/'
methods = options.methods
predictions = []
if os.path.exists(options.result_filename):
if options.useCache == 1:
results = np.load(options.result_filename)
results = results[()]
return results
elif options.useCache == 2:
results = np.load(options.result_filename)
results = results[()]
gt_dict = results['gt']
predictions = results['pred']
else:
gt_dict = getGroundTruth(options)
pass
else:
gt_dict = getGroundTruth(options)
pass
for method_index, method in enumerate(methods):
if len(method) < 4 or method[3] < 2:
continue
if method[0] == '':
continue
method_options = copy.deepcopy(options)
if 'ds0' not in method[0]:
method_options.deepSupervisionLayers = ['res4b22_relu', ]
else:
method_options.deepSupervisionLayers = []
pass
method_options.predictConfidence = 0
method_options.predictLocal = 0
method_options.predictPixelwise = 1
method_options.predictBoundary = int('pb' in method[0])
method_options.anchorPlanes = 0
if 'ps' in method[0]:
method_options.predictSemantics = 1
else:
method_options.predictSemantics = 0
pass
if 'crfrnn' in method[0]:
method_options.crfrnn = int(method[0].split('crfrnn')[1].split('_')[0])
else:
method_options.crfrnn = 0
pass
if 'ap1' in method[0]:
method_options.anchorPlanes = 1
pass
method_options.numOutputPlanes = 20
if 'np10' in method[0]:
method_options.numOutputPlanes = 10
elif 'np15' in method[0]:
method_options.numOutputPlanes = 15
pass
method_options.checkpoint_dir = checkpoint_prefix + method[0]
print(method_options.checkpoint_dir)
method_options.suffix = method[1]
method_names = [previous_method[0] for previous_method in methods[:method_index]]
if method[0] in method_names:
pred_dict = predictions[method_names.index(method[0])]
elif method[0] == 'gt':
pred_dict = gt_dict
else:
pred_dict = getPrediction(method_options)
pass
# for image_index in xrange(options.visualizeImages):
# cv2.imwrite(options.test_dir + '/' + str(image_index) + '_depth_pred_' + str(method_index) + '.png', drawDepthImage(pred_dict['depth'][image_index]))
# cv2.imwrite(options.test_dir + '/' + str(image_index) + '_segmentation_pred_' + str(method_index) + '.png', drawSegmentationImage())
# continue
if len(method) >= 4 and method[3] == 3:
predictions.insert(0, pred_dict)
else:
if method_index < len(predictions):
predictions[method_index] = pred_dict
else:
predictions.append(pred_dict)
pass
pass
continue
#np.save(options.test_dir + '/curves.npy', curves)
results = {'gt': gt_dict, 'pred': predictions}
if options.useCache != -1:
np.save(options.result_filename, results)
pass
pass
return results
def getPrediction(options):
tf.reset_default_graph()
options.batchSize = 1
min_after_dequeue = 1000
reader = RecordReaderAll()
if options.dataset == 'SUNCG':
filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_SUNCG_val.tfrecords'], num_epochs=10000)
elif options.dataset == 'NYU_RGBD':
filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_nyu_rgbd_val.tfrecords'], num_epochs=1)
options.deepSupervision = 0
options.predictLocal = 0
elif options.dataset == 'matterport':
filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_matterport_val.tfrecords'], num_epochs=1)
else:
filename_queue = tf.train.string_input_producer([options.dataFolder + '/planes_scannet_val.tfrecords'], num_epochs=1)
pass
img_inp, global_gt_dict, local_gt_dict = reader.getBatch(filename_queue, numOutputPlanes=options.numOutputPlanes, batchSize=options.batchSize, min_after_dequeue=min_after_dequeue, getLocal=True, random=False)
training_flag = tf.constant(False, tf.bool)
options.gpu_id = 0
# if 'sample' not in options.checkpoint_dir:
# global_pred_dict, _, _ = build_graph(img_inp, img_inp, training_flag, options)
# else:
# global_pred_dict, _, _ = build_graph_sample(img_inp, img_inp, training_flag, options)
global_pred_dict, _, _ = build_graph(img_inp, img_inp, training_flag, options)
var_to_restore = tf.global_variables()
config=tf.ConfigProto()
config.gpu_options.allow_growth=True
config.allow_soft_placement=True
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
pred_dict = {}
with tf.Session(config=config) as sess:
sess.run(init_op)
#var_to_restore = [v for v in var_to_restore if 'res4b22_relu_non_plane' not in v.name]
loader = tf.train.Saver(var_to_restore)
loader.restore(sess, "%s/checkpoint.ckpt"%(options.checkpoint_dir))
#loader.restore(sess, options.fineTuningCheckpoint)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
predDepths = []
predPlanes = []
predSegmentations = []
predSemantics = []
predNonPlaneDepths = []
predNonPlaneNormals = []
predNonPlaneMasks = []
predBoundaries = []
for index in xrange(options.startIndex + options.numImages):
if index % 10 == 0:
print(('image', index))
pass
t0=time.time()
img, global_gt, global_pred = sess.run([img_inp, global_gt_dict, global_pred_dict])
if index < options.startIndex:
continue
pred_p = global_pred['plane'][0]
pred_s = global_pred['segmentation'][0]
pred_np_m = global_pred['non_plane_mask'][0]
pred_np_d = global_pred['non_plane_depth'][0]
pred_np_n = global_pred['non_plane_normal'][0]
if global_gt['info'][0][19] > 1 and global_gt['info'][0][19] < 4 and False:
pred_np_n = calcNormal(pred_np_d.squeeze(), global_gt['info'][0])
pass
pred_b = global_pred['boundary'][0]
predNonPlaneMasks.append(pred_np_m)
predNonPlaneDepths.append(pred_np_d)
predNonPlaneNormals.append(pred_np_n)
predBoundaries.append(pred_b)
all_segmentations = np.concatenate([pred_s, pred_np_m], axis=2)
plane_depths = calcPlaneDepths(pred_p, WIDTH, HEIGHT, global_gt['info'][0])
all_depths = np.concatenate([plane_depths, pred_np_d], axis=2)
segmentation = np.argmax(all_segmentations, 2)
pred_d = all_depths.reshape(-1, options.numOutputPlanes + 1)[np.arange(WIDTH * HEIGHT), segmentation.reshape(-1)].reshape(HEIGHT, WIDTH)
if 'semantics' in global_pred:
#cv2.imwrite('test/semantics.png', drawSegmentationImage(np.argmax(global_pred['semantics'][0], axis=-1)))
#exit(1)
predSemantics.append(np.argmax(global_pred['semantics'][0], axis=-1))
else:
predSemantics.append(np.zeros((HEIGHT, WIDTH)))
pass
predDepths.append(pred_d)
predPlanes.append(pred_p)
#predSegmentations.append(pred_s)
predSegmentations.append(segmentation)
continue
pred_dict['plane'] = | np.array(predPlanes) | numpy.array |
"""Functions related to computation of the log-likelihood."""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
import scipy.stats as _stats
import warnings as _warnings
import itertools as _itertools
import time as _time
import sys as _sys
from collections import OrderedDict as _OrderedDict
from . import basistools as _bt
from . import listtools as _lt
from . import jamiolkowski as _jam
from . import mpitools as _mpit
from . import slicetools as _slct
from ..objects.smartcache import smart_cached
TOL = 1e-20
# The log(Likelihood) within the standard (non-Poisson) picture is:
#
# L = prod_{i,sl} p_{i,sl}^N_{i,sl}
#
# Where i indexes the operation sequence, and sl indexes the spam label. N[i] is the total counts
# for the i-th circuit, and so sum_{sl} N_{i,sl} == N[i]. We can take the log:
#
# log L = sum_{i,sl} N_{i,sl} log(p_{i,sl})
#
# after patching (linear extrapolation below min_p and ignore f == 0 terms ( 0*log(0) == 0 ) ):
#
# logl = sum_{i,sl} N_{i,sl} log(p_{i,sl}) if p_{i,sl} >= min_p and N_{i,sl} > 0 # noqa
# N_{i,sl} log(min_p) + S * (p_{i,sl} - min_p) + S2 * (p_{i,sl} - min_p)**2 if p_{i,sl} < p_min and N_{i,sl} > 0 # noqa
# 0 if N_{i,sl} == 0 # noqa
#
# dlogL = sum_{i,sl} N_{i,sl} / p_{i,sl} * dp if p_{i,sl} >= min_p and N_{i,sl} > 0 # noqa
# (S + 2*S2*(p_{i,sl} - min_p)) * dp if p_{i,sl} < p_min and N_{i,sl} > 0 # noqa
# 0 if N_{i,sl} == 0 # noqa
#
# hlogL = sum_{i,sl} -N_{i,sl} / p_{i,sl}**2 * dp1 * dp2 + N_{i,sl} / p_{i,sl} *hp if p_{i,sl} >= min_p and N_{i,sl} > 0 # noqa
# 2*S2* dp1 * dp2 + (S + 2*S2*(p_{i,sl} - min_p)) * hp if p_{i,sl} < p_min and N_{i,sl} > 0 # noqa
# 0 if N_{i,sl} == 0 # noqa
#
# where S = N_{i,sl} / min_p is the slope of the line tangent to logl at min_p
# and S2 = 0.5*( -N_{i,sl} / min_p**2 ) is 1/2 the 2nd derivative of the logl term at min_p
# and hlogL == d/d1 ( d/d2 ( logl ) ) -- i.e. dp2 is the *first* derivative performed...
#Note: Poisson picture entered use when we allowed an EVec which was 1-{other EVecs} -- a
# (0,-1) spam index -- instead of assuming all probabilities of a given gat string summed
# to one -- a (-1,-1) spam index. The poisson picture gives a correct log-likelihood
# description when the probabilities (for a given operation sequence) may not sum to one, by
# interpreting them each as rates. In the standard picture, large circuit probabilities
# are not penalized (each standard logL term increases monotonically with each probability,
# and the reason this is ok when the probabilities sum to one is that for a probabilility
# that gets close to 1, there's another that is close to zero, and logL is very negative
# near zero.
# The log(Likelihood) within the Poisson picture is:
#
# L = prod_{i,sl} lambda_{i,sl}^N_{i,sl} e^{-lambda_{i,sl}} / N_{i,sl}!
#
# Where lamba_{i,sl} := p_{i,sl}*N[i] is a rate, i indexes the operation sequence,
# and sl indexes the spam label. N[i] is the total counts for the i-th circuit, and
# so sum_{sl} N_{i,sl} == N[i]. We can ignore the p-independent N_j! and take the log:
#
# log L = sum_{i,sl} N_{i,sl} log(N[i]*p_{i,sl}) - N[i]*p_{i,sl}
# = sum_{i,sl} N_{i,sl} log(p_{i,sl}) - N[i]*p_{i,sl} (where we ignore the p-independent log(N[i]) terms)
#
# after patching (linear extrapolation below min_p and "softening" f == 0 terms w/cubic below radius "a"):
#
# logl = sum_{i,sl} N_{i,sl} log(p_{i,sl}) - N[i]*p_{i,sl} if p_{i,sl} >= min_p and N_{i,sl} > 0 # noqa
# N_{i,sl} log(min_p) - N[i]*min_p + S * (p_{i,sl} - min_p) + S2 * (p_{i,sl} - min_p)**2 if p_{i,sl} < p_min and N_{i,sl} > 0 # noqa
# 0 - N[i]*p_{i,sl} if N_{i,sl} == 0 and p_{i,sl} >= a # noqa
# 0 - N[i]*( -(1/(3a**2))p_{i,sl}**3 + p_{i,sl}**2/a + (1/3)*a ) if N_{i,sl} == 0 and p_{i,sl} < a # noqa
# - N[i]*Y(1-sum(p_omitted)) added to "first" N_{i,sl} > 0 entry for omitted probabilities, where
# Y(p) = p if p >= a else ( -(1/(3a**2))p**3 + p**2/a + (1/3)*a )
#
# dlogL = sum_{i,sl} [ N_{i,sl} / p_{i,sl} - N[i] ] * dp if p_{i,sl} >= min_p and N_{i,sl} > 0 # noqa
# (S + 2*S2*(p_{i,sl} - min_p)) * dp if p_{i,sl} < p_min and N_{i,sl} > 0 # noqa
# -N[i] * dp if N_{i,sl} == 0 and p_{i,sl} >= a # noqa
# -N[i] * ( (-1/a**2)p_{i,sl}**2 + 2*p_{i,sl}/a ) * dp if N_{i,sl} == 0 and p_{i,sl} < a
# +N[i]*sum(dY/dp_omitted * dp_omitted) added to "first" N_{i,sl} > 0 entry for omitted probabilities
#
# hlogL = sum_{i,sl} -N_{i,sl} / p_{i,sl}**2 * dp1 * dp2 + [ N_{i,sl} / p_{i,sl} - N[i] ]*hp if p_{i,sl} >= min_p and N_{i,sl} > 0 # noqa
# 2*S2* dp1 * dp2 + (S + 2*S2*(p_{i,sl} - min_p)) * hp if p_{i,sl} < p_min and N_{i,sl} > 0 # noqa
# -N[i] * hp if N_{i,sl} == 0 and p_{i,sl} >= a # noqa
# -N[i]*( (-2/a**2)p_{i,sl} + 2/a ) * dp1 * dp2 # noqa
# - N[i]*( (-1/a**2)p_{i,sl}**2 + 2*p_{i,sl}/a ) * hp if N_{i,sl} == 0 and p_{i,sl} < a # noqa
# +N[i]*sum(d2Y/dp_omitted2 * dp_omitted1 * dp_omitted2 +
# dY/dp_omitted * hp_omitted) added to "first" N_{i,sl} > 0 entry for omitted probabilities # noqa
#
# where S = N_{i,sl} / min_p - N[i] is the slope of the line tangent to logl at min_p
# and S2 = 0.5*( -N_{i,sl} / min_p**2 ) is 1/2 the 2nd derivative of the logl term at min_p so
# logL_term = logL_term(min_p) + S * (p-min_p) + S2 * (p-min_p)**2
# and hlogL == d/d1 ( d/d2 ( logl ) ) -- i.e. dp2 is the *first* derivative performed...
#
# For cubic interpolation, use function F(p) (derived by Robin: match value, 1st-deriv, 2nd-deriv at p == r, and require
# min at p == 0):
# Given a radius r << 1 (but r>0):
# F(p) = piecewise{ if( p>r ) then p; else -(1/3)*p^3/r^2 + p^2/r + (1/3)*r }
# OLD: quadratic that doesn't match 2nd-deriv:
# F(p) = piecewise{ if( p>r ) then p; else (r-p)^2/(2*r) + p }
#@smart_cached
def logl_terms(model, dataset, circuit_list=None,
minProbClip=1e-6, probClipInterval=(-1e6, 1e6), radius=1e-4,
poissonPicture=True, check=False, opLabelAliases=None,
evaltree_cache=None, comm=None, smartc=None, wildcard=None):
"""
The vector of log-likelihood contributions for each operation sequence,
aggregated over outcomes.
Parameters
----------
This function takes the same arguments as :func:`logl` except it
doesn't perform the final sum over operation sequences and SPAM labels.
Returns
-------
numpy.ndarray
Array of length either `len(circuit_list)` or `len(dataset.keys())`.
Values are the log-likelihood contributions of the corresponding gate
string aggregated over outcomes.
"""
def smart(fn, *args, **kwargs):
if smartc:
return smartc.cached_compute(fn, args, kwargs)[1]
else:
if '_filledarrays' in kwargs: del kwargs['_filledarrays']
return fn(*args, **kwargs)
if circuit_list is None:
circuit_list = list(dataset.keys())
a = radius # parameterizes "roundness" of f == 0 terms
min_p = minProbClip
if evaltree_cache and 'evTree' in evaltree_cache:
evalTree = evaltree_cache['evTree']
lookup = evaltree_cache['lookup']
outcomes_lookup = evaltree_cache['outcomes_lookup']
#tree_circuit_list = evalTree.generate_circuit_list()
# Note: this is != circuit_list, as the tree hold *simplified* circuits
else:
#OLD: evalTree,lookup,outcomes_lookup = smart(model.bulk_evaltree,circuit_list, dataset=dataset)
evalTree, _, _, lookup, outcomes_lookup = smart(model.bulk_evaltree_from_resources,
circuit_list, comm, dataset=dataset)
#Fill cache dict if one was given
if evaltree_cache is not None:
evaltree_cache['evTree'] = evalTree
evaltree_cache['lookup'] = lookup
evaltree_cache['outcomes_lookup'] = outcomes_lookup
nEls = evalTree.num_final_elements()
probs = _np.zeros(nEls, 'd') # _np.empty( nEls, 'd' ) - .zeros b/c of caching
ds_circuit_list = _lt.apply_aliases_to_circuit_list(circuit_list, opLabelAliases)
if evaltree_cache and 'cntVecMx' in evaltree_cache:
countVecMx = evaltree_cache['cntVecMx']
totalCntVec = evaltree_cache['totalCntVec']
else:
countVecMx = _np.empty(nEls, 'd')
totalCntVec = _np.empty(nEls, 'd')
for (i, opStr) in enumerate(ds_circuit_list):
cnts = dataset[opStr].counts
totalCntVec[lookup[i]] = sum(cnts.values()) # dataset[opStr].total
countVecMx[lookup[i]] = [cnts.get(x, 0) for x in outcomes_lookup[i]]
#could add to cache, but we don't have option of circuitWeights
# here yet, so let's be conservative and not do this:
#if evaltree_cache is not None:
# evaltree_cache['cntVecMx'] = countVecMx
# evaltree_cache['totalCntVec'] = totalCntVec
#Detect omitted frequences (assumed to be 0) so we can compute liklihood correctly
firsts = []; indicesOfCircuitsWithOmittedData = []
for i, c in enumerate(circuit_list):
lklen = _slct.length(lookup[i])
if 0 < lklen < model.get_num_outcomes(c):
firsts.append(_slct.as_array(lookup[i])[0])
indicesOfCircuitsWithOmittedData.append(i)
if len(firsts) > 0:
firsts = _np.array(firsts, 'i')
indicesOfCircuitsWithOmittedData = _np.array(indicesOfCircuitsWithOmittedData, 'i')
else:
firsts = None
smart(model.bulk_fill_probs, probs, evalTree, probClipInterval, check, comm, _filledarrays=(0,))
if wildcard:
probs_in = probs.copy()
wildcard.update_probs(probs_in, probs, countVecMx / totalCntVec, circuit_list, lookup)
pos_probs = _np.where(probs < min_p, min_p, probs)
# XXX: aren't the next blocks duplicated elsewhere?
if poissonPicture:
S = countVecMx / min_p - totalCntVec # slope term that is derivative of logl at min_p
S2 = -0.5 * countVecMx / (min_p**2) # 2nd derivative of logl term at min_p
v = countVecMx * _np.log(pos_probs) - totalCntVec * pos_probs # dim KM (K = nSpamLabels, M = nCircuits)
# remove small positive elements due to roundoff error (above expression *cannot* really be positive)
v = _np.minimum(v, 0)
# quadratic extrapolation of logl at min_p for probabilities < min_p
v = _np.where(probs < min_p, v + S * (probs - min_p) + S2 * (probs - min_p)**2, v)
v = _np.where(countVecMx == 0,
-totalCntVec * _np.where(probs >= a, probs,
(-1.0 / (3 * a**2)) * probs**3 + probs**2 / a + a / 3.0),
v)
#special handling for f == 0 poissonPicture terms using quadratic rounding of function with minimum:
#max(0,(a-p))^2/(2a) + p
if firsts is not None:
omitted_probs = 1.0 - _np.array([_np.sum(pos_probs[lookup[i]])
for i in indicesOfCircuitsWithOmittedData])
v[firsts] -= totalCntVec[firsts] * \
_np.where(omitted_probs >= a, omitted_probs,
(-1.0 / (3 * a**2)) * omitted_probs**3 + omitted_probs**2 / a + a / 3.0)
else:
# (the non-poisson picture requires that the probabilities of the spam labels for a given string are constrained
# to sum to 1)
S = countVecMx / min_p # slope term that is derivative of logl at min_p
S2 = -0.5 * countVecMx / (min_p**2) # 2nd derivative of logl term at min_p
v = countVecMx * _np.log(pos_probs) # dim KM (K = nSpamLabels, M = nCircuits)
# remove small positive elements due to roundoff error (above expression *cannot* really be positive)
v = _np.minimum(v, 0)
# quadratic extrapolation of logl at min_p for probabilities < min_p
v = _np.where(probs < min_p, v + S * (probs - min_p) + S2 * (probs - min_p)**2, v)
v = _np.where(countVecMx == 0, 0.0, v)
#Note: no need to account for omitted probs at all (they contribute nothing)
#DEBUG
#print "num clipped = ",_np.sum(probs < min_p)," of ",probs.shape
#print "min/max probs = ",min(probs.flatten()),",",max(probs.flatten())
#for i in range(v.shape[1]):
# print "%d %.0f (%f) %.0f (%g)" % (i,v[0,i],probs[0,i],v[1,i],probs[1,i])
#Aggregate over outcomes:
# v[iElement] contains all logl contributions - now aggregate over outcomes
# terms[iCircuit] wiil contain logl contributions for each original gate
# string (aggregated over outcomes)
nCircuits = len(circuit_list)
terms = _np.empty(nCircuits, 'd')
for i in range(nCircuits):
terms[i] = _np.sum(v[lookup[i]], axis=0)
return terms
#@smart_cached
def logl(model, dataset, circuit_list=None,
minProbClip=1e-6, probClipInterval=(-1e6, 1e6), radius=1e-4,
poissonPicture=True, check=False, opLabelAliases=None,
evaltree_cache=None, comm=None, smartc=None, wildcard=None):
"""
The log-likelihood function.
Parameters
----------
model : Model
Model of parameterized gates
dataset : DataSet
Probability data
circuit_list : list of (tuples or Circuits), optional
Each element specifies a operation sequence to include in the log-likelihood
sum. Default value of None implies all the operation sequences in dataset
should be used.
minProbClip : float, optional
The minimum probability treated normally in the evaluation of the log-likelihood.
A penalty function replaces the true log-likelihood for probabilities that lie
below this threshold so that the log-likelihood never becomes undefined (which improves
optimizer performance).
probClipInterval : 2-tuple or None, optional
(min,max) values used to clip the probabilities predicted by models during MLEGST's
search for an optimal model (if not None). if None, no clipping is performed.
radius : float, optional
Specifies the severity of rounding used to "patch" the zero-frequency
terms of the log-likelihood.
evalTree : evaluation tree, optional
given by a prior call to bulk_evaltree for the same circuit_list.
Significantly speeds up evaluation of log-likelihood, even more so
when accompanied by countVecMx (see below).
poissonPicture : boolean, optional
Whether the log-likelihood-in-the-Poisson-picture terms should be included
in the returned logl value.
check : boolean, optional
If True, perform extra checks within code to verify correctness. Used
for testing, and runs much slower when True.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
evaltree_cache : dict, optional
A dictionary which server as a cache for the computed EvalTree used
in this computation. If an empty dictionary is supplied, it is filled
with cached values to speed up subsequent executions of this function
which use the *same* `model` and `circuit_list`.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
smartc : SmartCache, optional
A cache object to cache & use previously cached values inside this
function.
wildcard : WildcardBudget
A wildcard budget to apply to this log-likelihood computation.
This increases the returned log-likelihood value by adjusting
(by a maximal amount measured in TVD, given by the budget) the
probabilities produced by `model` to optimially match the data
(within the bugetary constraints) evaluating the log-likelihood.
Returns
-------
float
The log likelihood
"""
v = logl_terms(model, dataset, circuit_list,
minProbClip, probClipInterval, radius,
poissonPicture, check, opLabelAliases,
evaltree_cache, comm, smartc, wildcard)
return _np.sum(v) # sum over *all* dimensions
def logl_jacobian(model, dataset, circuit_list=None,
minProbClip=1e-6, probClipInterval=(-1e6, 1e6), radius=1e-4,
poissonPicture=True, check=False, comm=None,
memLimit=None, opLabelAliases=None, smartc=None,
verbosity=0):
"""
The jacobian of the log-likelihood function.
Parameters
----------
model : Model
Model of parameterized gates (including SPAM)
dataset : DataSet
Probability data
circuit_list : list of (tuples or Circuits), optional
Each element specifies a operation sequence to include in the log-likelihood
sum. Default value of None implies all the operation sequences in dataset
should be used.
minProbClip : float, optional
The minimum probability treated normally in the evaluation of the log-likelihood.
A penalty function replaces the true log-likelihood for probabilities that lie
below this threshold so that the log-likelihood never becomes undefined (which improves
optimizer performance).
probClipInterval : 2-tuple or None, optional
(min,max) values used to clip the probabilities predicted by models during MLEGST's
search for an optimal model (if not None). if None, no clipping is performed.
radius : float, optional
Specifies the severity of rounding used to "patch" the zero-frequency
terms of the log-likelihood.
evalTree : evaluation tree, optional
given by a prior call to bulk_evaltree for the same circuit_list.
Significantly speeds up evaluation of log-likelihood derivatives, even
more so when accompanied by countVecMx (see below). Defaults to None.
poissonPicture : boolean, optional
Whether the Poisson-picutre log-likelihood should be differentiated.
check : boolean, optional
If True, perform extra checks within code to verify correctness. Used
for testing, and runs much slower when True.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
memLimit : int, optional
A rough memory limit in bytes which restricts the amount of intermediate
values that are computed and stored.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
smartc : SmartCache, optional
A cache object to cache & use previously cached values inside this
function.
verbosity : int, optional
How much detail to print to stdout.
Returns
-------
numpy array
array of shape (M,), where M is the length of the vectorized model.
"""
def smart(fn, *args, **kwargs):
if smartc:
return smartc.cached_compute(fn, args, kwargs)[1]
else:
if '_filledarrays' in kwargs: del kwargs['_filledarrays']
return fn(*args, **kwargs)
if circuit_list is None:
circuit_list = list(dataset.keys())
C = 1.0 / 1024.0**3; nP = model.num_params()
persistentMem = 8 * nP + 8 * len(circuit_list) * (nP + 1) # in bytes
if memLimit is not None and memLimit < persistentMem:
raise MemoryError("DLogL Memory limit (%g GB) is " % (memLimit * C)
+ "< memory required to hold final results (%g GB)"
% (persistentMem * C))
#OLD: evalTree,lookup,outcomes_lookup = model.bulk_evaltree(circuit_list)
mlim = None if (memLimit is None) else memLimit - persistentMem
# Note: simplify_circuits doesn't support aliased dataset (yet)
dstree = dataset if (opLabelAliases is None) else None
evalTree, blkSize, _, lookup, outcomes_lookup = \
smart(model.bulk_evaltree_from_resources,
circuit_list, comm, mlim, "deriv", ['bulk_fill_dprobs'],
dstree, verbosity)
a = radius # parameterizes "roundness" of f == 0 terms
min_p = minProbClip
# Allocate persistent memory
jac = _np.zeros([1, nP])
nEls = evalTree.num_final_elements()
probs = _np.empty(nEls, 'd')
dprobs = _np.empty((nEls, nP), 'd')
ds_circuit_list = _lt.apply_aliases_to_circuit_list(circuit_list, opLabelAliases)
countVecMx = _np.empty(nEls, 'd')
totalCntVec = _np.empty(nEls, 'd')
for (i, opStr) in enumerate(ds_circuit_list):
cnts = dataset[opStr].counts
totalCntVec[lookup[i]] = sum(cnts.values()) # dataset[opStr].total
countVecMx[lookup[i]] = [cnts.get(x, 0) for x in outcomes_lookup[i]]
#Detect omitted frequences (assumed to be 0) so we can compute liklihood correctly
firsts = []; indicesOfCircuitsWithOmittedData = []
for i, c in enumerate(circuit_list):
lklen = _slct.length(lookup[i])
if 0 < lklen < model.get_num_outcomes(c):
firsts.append(_slct.as_array(lookup[i])[0])
indicesOfCircuitsWithOmittedData.append(i)
if len(firsts) > 0:
firsts = _np.array(firsts, 'i')
indicesOfCircuitsWithOmittedData = _np.array(indicesOfCircuitsWithOmittedData, 'i')
dprobs_omitted_rowsum = _np.empty((len(firsts), nP), 'd')
else:
firsts = None
smart(model.bulk_fill_dprobs, dprobs, evalTree, prMxToFill=probs,
clipTo=probClipInterval, check=check, comm=comm,
wrtBlockSize=blkSize, _filledarrays=(0, 'prMxToFill')) # FUTURE: set gatherMemLimit=?
pos_probs = _np.where(probs < min_p, min_p, probs)
if poissonPicture:
S = countVecMx / min_p - totalCntVec # slope term that is derivative of logl at min_p
S2 = -0.5 * countVecMx / (min_p**2) # 2nd derivative of logl term at min_p
#TODO: is v actualy needed/used here??
v = countVecMx * _np.log(pos_probs) - totalCntVec * pos_probs # dim KM (K = nSpamLabels, M = nCircuits)
# remove small positive elements due to roundoff error (above expression *cannot* really be positive)
v = _np.minimum(v, 0)
# quadratic extrapolation of logl at min_p for probabilities < min_p
v = _np.where(probs < min_p, v + S * (probs - min_p) + S2 * (probs - min_p)**2, v)
v = _np.where(countVecMx == 0,
-totalCntVec * _np.where(probs >= a, probs,
(-1.0 / (3 * a**2)) * probs**3 + probs**2 / a + a / 3.0),
v)
#special handling for f == 0 poissonPicture terms using quadratic rounding of function with minimum:
#max(0,(a-p))^2/(2a) + p
if firsts is not None:
omitted_probs = 1.0 - _np.array([_np.sum(pos_probs[lookup[i]])
for i in indicesOfCircuitsWithOmittedData])
v[firsts] -= totalCntVec[firsts] * \
_np.where(omitted_probs >= a, omitted_probs,
(-1.0 / (3 * a**2)) * omitted_probs**3 + omitted_probs**2 / a + a / 3.0)
dprobs_factor_pos = (countVecMx / pos_probs - totalCntVec)
dprobs_factor_neg = S + 2 * S2 * (probs - min_p)
dprobs_factor_zerofreq = -totalCntVec * _np.where(probs >= a, 1.0, (-1.0 / a**2) * probs**2 + 2 * probs / a)
dprobs_factor = _np.where(probs < min_p, dprobs_factor_neg, dprobs_factor_pos)
dprobs_factor = _np.where(countVecMx == 0, dprobs_factor_zerofreq, dprobs_factor)
if firsts is not None:
dprobs_factor_omitted = totalCntVec[firsts] * _np.where(
omitted_probs >= a, 1.0,
(-1.0 / a**2) * omitted_probs**2 + 2 * omitted_probs / a)
for ii, i in enumerate(indicesOfCircuitsWithOmittedData):
dprobs_omitted_rowsum[ii, :] = _np.sum(dprobs[lookup[i], :], axis=0)
jac = dprobs * dprobs_factor[:, None] # (KM,N) * (KM,1) (N = dim of vectorized model)
# need to multipy dprobs_factor_omitted[i] * dprobs[k] for k in lookup[i] and
# add to dprobs[firsts[i]] for i in indicesOfCircuitsWithOmittedData
if firsts is not None:
jac[firsts, :] += dprobs_factor_omitted[:, None] * dprobs_omitted_rowsum
# nCircuitsWithOmittedData x N
else:
# (the non-poisson picture requires that the probabilities of the spam labels for a given string are constrained
# to sum to 1)
S = countVecMx / min_p # slope term that is derivative of logl at min_p
S2 = -0.5 * countVecMx / (min_p**2) # 2nd derivative of logl term at min_p
v = countVecMx * _np.log(pos_probs) # dims K x M (K = nSpamLabels, M = nCircuits)
# remove small positive elements due to roundoff error (above expression *cannot* really be positive)
v = _np.minimum(v, 0)
# quadratic extrapolation of logl at min_p for probabilities < min_p
v = _np.where(probs < min_p, v + S * (probs - min_p) + S2 * (probs - min_p)**2, v)
v = _np.where(countVecMx == 0, 0.0, v)
dprobs_factor_pos = countVecMx / pos_probs
dprobs_factor_neg = S + 2 * S2 * (probs - min_p)
dprobs_factor = _np.where(probs < min_p, dprobs_factor_neg, dprobs_factor_pos)
dprobs_factor = _np.where(countVecMx == 0, 0.0, dprobs_factor)
jac = dprobs * dprobs_factor[:, None] # (KM,N) * (KM,1) (N = dim of vectorized model)
#Note: no correction from omitted probabilities needed in poissonPicture == False case.
# jac[iSpamLabel,iCircuit,iModelParam] contains all d(logl)/d(modelParam) contributions
return _np.sum(jac, axis=0) # sum over spam label and operation sequence dimensions
def logl_hessian(model, dataset, circuit_list=None, minProbClip=1e-6,
probClipInterval=(-1e6, 1e6), radius=1e-4, poissonPicture=True,
check=False, comm=None, memLimit=None,
opLabelAliases=None, smartc=None, verbosity=0):
"""
The hessian of the log-likelihood function.
Parameters
----------
model : Model
Model of parameterized gates (including SPAM)
dataset : DataSet
Probability data
circuit_list : list of (tuples or Circuits), optional
Each element specifies a operation sequence to include in the log-likelihood
sum. Default value of None implies all the operation sequences in dataset
should be used.
minProbClip : float, optional
The minimum probability treated normally in the evaluation of the log-likelihood.
A penalty function replaces the true log-likelihood for probabilities that lie
below this threshold so that the log-likelihood never becomes undefined (which improves
optimizer performance).
probClipInterval : 2-tuple or None, optional
(min,max) values used to clip the probabilities predicted by
models during MLEGST's search for an optimal model (if not None).
if None, no clipping is performed.
radius : float, optional
Specifies the severity of rounding used to "patch" the zero-frequency
terms of the log-likelihood.
poissonPicture : boolean, optional
Whether the Poisson-picutre log-likelihood should be differentiated.
check : boolean, optional
If True, perform extra checks within code to verify correctness. Used
for testing, and runs much slower when True.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
memLimit : int, optional
A rough memory limit in bytes which restricts the amount of intermediate
values that are computed and stored.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
smartc : SmartCache, optional
A cache object to cache & use previously cached values inside this
function.
verbosity : int, optional
How much detail to print to stdout.
Returns
-------
numpy array
array of shape (M,M), where M is the length of the vectorized model.
"""
def smart(fn, *args, **kwargs):
if smartc:
return smartc.cached_compute(fn, args, kwargs)[1]
else:
if '_filledarrays' in kwargs: del kwargs['_filledarrays']
return fn(*args, **kwargs)
nP = model.num_params()
if circuit_list is None:
circuit_list = list(dataset.keys())
# Estimate & check persistent memory (from allocs directly below)
C = 1.0 / 1024.0**3; nP = model.num_params()
persistentMem = 8 * nP**2 # in bytes
if memLimit is not None and memLimit < persistentMem:
raise MemoryError("HLogL Memory limit (%g GB) is " % (memLimit * C)
+ "< memory required to hold final results (%g GB)"
% (persistentMem * C))
# Allocate persistent memory
final_hessian = _np.zeros((nP, nP), 'd')
# Estimate & check intermediate memory
# - figure out how many row & column partitions are needed
# to fit computation within available memory (and use all cpus)
mlim = None if (memLimit is None) else memLimit - persistentMem
# Note: simplify_circuits doesn't support aliased dataset (yet)
dstree = dataset if (opLabelAliases is None) else None
evalTree, blkSize1, blkSize2, lookup, outcomes_lookup = \
smart(model.bulk_evaltree_from_resources,
circuit_list, comm, mlim, "deriv", ['bulk_hprobs_by_block'],
dstree, verbosity)
rowParts = int(round(nP / blkSize1)) if (blkSize1 is not None) else 1
colParts = int(round(nP / blkSize2)) if (blkSize2 is not None) else 1
a = radius # parameterizes "roundness" of f == 0 terms
min_p = minProbClip
#Detect omitted frequences (assumed to be 0) so we can compute liklihood correctly
firsts = []; indicesOfCircuitsWithOmittedData = []
for i, c in enumerate(circuit_list):
lklen = _slct.length(lookup[i])
if 0 < lklen < model.get_num_outcomes(c):
firsts.append(_slct.as_array(lookup[i])[0])
indicesOfCircuitsWithOmittedData.append(i)
if len(firsts) > 0:
firsts = _np.array(firsts, 'i')
indicesOfCircuitsWithOmittedData = _np.array(indicesOfCircuitsWithOmittedData, 'i')
else:
firsts = None
if poissonPicture:
#NOTE: hessian_from_hprobs MAY modify hprobs and dprobs12 (to save mem)
def hessian_from_hprobs(hprobs, dprobs12, cntVecMx, totalCntVec, pos_probs):
""" Factored-out computation of hessian from raw components """
# Notation: (K=#spam, M=#strings, N=#wrtParams1, N'=#wrtParams2 )
totCnts = totalCntVec # shorthand
S = cntVecMx / min_p - totCnts # slope term that is derivative of logl at min_p
S2 = -0.5 * cntVecMx / (min_p**2) # 2nd derivative of logl term at min_p
#Allocate these above? Need to know block sizes of dprobs12 & hprobs...
if firsts is not None:
dprobs12_omitted_rowsum = _np.empty((len(firsts),) + dprobs12.shape[1:], 'd')
hprobs_omitted_rowsum = _np.empty((len(firsts),) + hprobs.shape[1:], 'd')
# # (K,M,1,1) * (K,M,N,N')
# hprobs_pos = (-cntVecMx / pos_probs**2)[:,:,None,None] * dprobs12
# # (K,M,1,1) * (K,M,N,N')
# hprobs_pos += (cntVecMx / pos_probs - totalCntVec[None,:])[:,:,None,None] * hprobs
# # (K,M,1,1) * (K,M,N,N')
# hprobs_neg = (2*S2)[:,:,None,None] * dprobs12 + (S + 2*S2*(probs - min_p))[:,:,None,None] * hprobs
# hprobs_zerofreq = _np.where( (probs >= a)[:,:,None,None],
# -totalCntVec[None,:,None,None] * hprobs,
# (-totalCntVec[None,:] * ( (-2.0/a**2)*probs + 2.0/a))[:,:,None,None] \
# * dprobs12
# - (totalCntVec[None,:] * ((-1.0/a**2)*probs**2 + 2*probs/a))[:,:,None,None] \
# * hprobs )
# hessian = _np.where( (probs < min_p)[:,:,None,None], hprobs_neg, hprobs_pos)
# hessian = _np.where( (cntVecMx == 0)[:,:,None,None], hprobs_zerofreq, hessian) # (K,M,N,N')
omitted_probs = 1.0 - _np.array([_np.sum(pos_probs[lookup[i]]) for i in indicesOfCircuitsWithOmittedData])
for ii, i in enumerate(indicesOfCircuitsWithOmittedData):
dprobs12_omitted_rowsum[ii, :, :] = _np.sum(dprobs12[lookup[i], :, :], axis=0)
hprobs_omitted_rowsum[ii, :, :] = _np.sum(hprobs[lookup[i], :, :], axis=0)
#Accomplish the same thing as the above commented-out lines,
# but with more memory effiency:
dprobs12_coeffs = \
_np.where(probs < min_p, 2 * S2, -cntVecMx / pos_probs**2)
zfc = _np.where(probs >= a, 0.0, -totCnts * ((-2.0 / a**2) * probs + 2.0 / a))
dprobs12_coeffs = _np.where(cntVecMx == 0, zfc, dprobs12_coeffs)
hprobs_coeffs = \
_np.where(probs < min_p, S + 2 * S2 * (probs - min_p),
cntVecMx / pos_probs - totCnts)
zfc = _np.where(probs >= a, -totCnts,
-totCnts * ((-1.0 / a**2) * probs**2 + 2 * probs / a))
hprobs_coeffs = _np.where(cntVecMx == 0, zfc, hprobs_coeffs)
if firsts is not None:
dprobs12_omitted_coeffs = totCnts[firsts] * _np.where(
omitted_probs >= a, 0.0, (-2.0 / a**2) * omitted_probs + 2.0 / a)
hprobs_omitted_coeffs = totCnts[firsts] * _np.where(
omitted_probs >= a, 1.0,
(-1.0 / a**2) * omitted_probs**2 + 2 * omitted_probs / a)
# hessian = hprobs_coeffs * hprobs + dprobs12_coeff * dprobs12
# but re-using dprobs12 and hprobs memory (which is overwritten!)
hprobs *= hprobs_coeffs[:, None, None]
dprobs12 *= dprobs12_coeffs[:, None, None]
if firsts is not None:
hprobs[firsts, :, :] += hprobs_omitted_coeffs[:, None, None] * hprobs_omitted_rowsum
dprobs12[firsts, :, :] += dprobs12_omitted_coeffs[:, None, None] * dprobs12_omitted_rowsum
hessian = dprobs12; hessian += hprobs
# hessian[iSpamLabel,iCircuit,iModelParam1,iModelParams2] contains all
# d2(logl)/d(modelParam1)d(modelParam2) contributions
return _np.sum(hessian, axis=0)
# sum over spam label and operation sequence dimensions (operation sequences in evalSubTree)
# adds current subtree contribution for (N,N')-sized block of Hessian
else:
#(the non-poisson picture requires that the probabilities of the spam labels for a given string are constrained
#to sum to 1)
#NOTE: hessian_from_hprobs MAY modify hprobs and dprobs12 (to save mem)
def hessian_from_hprobs(hprobs, dprobs12, cntVecMx, totalCntVec, pos_probs):
""" Factored-out computation of hessian from raw components """
S = cntVecMx / min_p # slope term that is derivative of logl at min_p
S2 = -0.5 * cntVecMx / (min_p**2) # 2nd derivative of logl term at min_p
# # (K,M,1,1) * (K,M,N,N')
# hprobs_pos = (-cntVecMx / pos_probs**2)[:,:,None,None] * dprobs12
# # (K,M,1,1) * (K,M,N,N')
# hprobs_pos += (cntVecMx / pos_probs)[:,:,None,None] * hprobs
# # (K,M,1,1) * (K,M,N,N')
# hprobs_neg = (2*S2)[:,:,None,None] * dprobs12 + (S + 2*S2*(probs - min_p))[:,:,None,None] * hprobs
# hessian = _np.where( (probs < min_p)[:,:,None,None], hprobs_neg, hprobs_pos)
# # (K,M,N,N')
# hessian = _np.where( (cntVecMx == 0)[:,:,None,None], 0.0, hessian)
#Accomplish the same thing as the above commented-out lines,
# but with more memory effiency:
dprobs12_coeffs = \
_np.where(probs < min_p, 2 * S2, -cntVecMx / pos_probs**2)
dprobs12_coeffs = _np.where(cntVecMx == 0, 0.0, dprobs12_coeffs)
hprobs_coeffs = \
_np.where(probs < min_p, S + 2 * S2 * (probs - min_p),
cntVecMx / pos_probs)
hprobs_coeffs = _np.where(cntVecMx == 0, 0.0, hprobs_coeffs)
# hessian = hprobs_coeffs * hprobs + dprobs12_coeff * dprobs12
# but re-using dprobs12 and hprobs memory (which is overwritten!)
hprobs *= hprobs_coeffs[:, None, None]
dprobs12 *= dprobs12_coeffs[:, None, None]
hessian = dprobs12; hessian += hprobs
#Note: no need to correct for omitted probs (zero contribution)
return _np.sum(hessian, axis=0) # see comments as above
#Note - we could in the future use comm to distribute over
# subtrees here. We currently don't because we parallelize
# over columns and it seems that in almost all cases of
# interest there will be more hessian columns than processors,
# so adding the additional ability to parallelize over
# subtrees would just add unnecessary complication.
#get distribution across subtrees (groups if needed)
subtrees = evalTree.get_sub_trees()
mySubTreeIndices, subTreeOwners, mySubComm = evalTree.distribute(comm)
# Allocate memory (alloc max required & take views)
max_nEls = max([subtrees[i].num_final_elements() for i in mySubTreeIndices])
probs_mem = _np.empty(max_nEls, 'd')
# Fill cntVecMx, totalCntVec for all elements (all subtrees)
nEls = evalTree.num_final_elements()
cntVecMx_all = _np.empty(nEls, 'd')
totalCntVec_all = _np.empty(nEls, 'd')
ds_subtree_circuit_list = _lt.apply_aliases_to_circuit_list(circuit_list, opLabelAliases)
for (i, opStr) in enumerate(ds_subtree_circuit_list):
cnts = dataset[opStr].counts
totalCntVec_all[lookup[i]] = sum(cnts.values()) # dataset[opStr].total
cntVecMx_all[lookup[i]] = [cnts.get(x, 0) for x in outcomes_lookup[i]]
tStart = _time.time()
#Loop over subtrees
for iSubTree in mySubTreeIndices:
evalSubTree = subtrees[iSubTree]
sub_nEls = evalSubTree.num_final_elements()
if evalSubTree.myFinalElsToParentFinalElsMap is not None:
#Then `evalSubTree` is a nontrivial sub-tree and its .spamtuple_indices
# will index the *parent's* final index array space, which we
# usually want but NOT here, where we fill arrays just big
# enough for each subtree separately - so re-init spamtuple_indices
evalSubTree = evalSubTree.copy()
evalSubTree.recompute_spamtuple_indices(bLocal=True)
# Create views into pre-allocated memory
probs = probs_mem[0:sub_nEls]
# Take portions of count arrays for this subtree
cntVecMx = cntVecMx_all[evalSubTree.final_element_indices(evalTree)]
totalCntVec = totalCntVec_all[evalSubTree.final_element_indices(evalTree)]
assert(len(cntVecMx) == len(probs))
#compute pos_probs separately
smart(model.bulk_fill_probs, probs, evalSubTree,
clipTo=probClipInterval, check=check,
comm=mySubComm, _filledarrays=(0,))
pos_probs = _np.where(probs < min_p, min_p, probs)
nCols = model.num_params()
blocks1 = _mpit.slice_up_range(nCols, rowParts)
blocks2 = _mpit.slice_up_range(nCols, colParts)
sliceTupList_all = list(_itertools.product(blocks1, blocks2))
#cull out lower triangle blocks, which have no overlap with
# the upper triangle of the hessian
sliceTupList = [(slc1, slc2) for slc1, slc2 in sliceTupList_all
if slc1.start <= slc2.stop]
loc_iBlks, blkOwners, blkComm = \
_mpit.distribute_indices(list(range(len(sliceTupList))), mySubComm)
mySliceTupList = [sliceTupList[i] for i in loc_iBlks]
subtree_hessian = _np.zeros((nP, nP), 'd')
k, kmax = 0, len(mySliceTupList)
for (slice1, slice2, hprobs, dprobs12) in model.bulk_hprobs_by_block(
evalSubTree, mySliceTupList, True, blkComm):
rank = comm.Get_rank() if (comm is not None) else 0
if verbosity > 3 or (verbosity == 3 and rank == 0):
iSub = mySubTreeIndices.index(iSubTree)
print("rank %d: %gs: block %d/%d, sub-tree %d/%d, sub-tree-len = %d"
% (rank, _time.time() - tStart, k, kmax, iSub,
len(mySubTreeIndices), len(evalSubTree)))
_sys.stdout.flush(); k += 1
subtree_hessian[slice1, slice2] = \
hessian_from_hprobs(hprobs, dprobs12, cntVecMx,
totalCntVec, pos_probs)
#NOTE: hessian_from_hprobs MAY modify hprobs and dprobs12
#Gather columns from different procs and add to running final hessian
#_mpit.gather_slices_by_owner(slicesIOwn, subtree_hessian,[], (0,1), mySubComm)
_mpit.gather_slices(sliceTupList, blkOwners, subtree_hessian, [], (0, 1), mySubComm)
final_hessian += subtree_hessian
#gather (add together) final_hessians from different processors
if comm is not None and len(set(subTreeOwners.values())) > 1:
if comm.Get_rank() not in subTreeOwners.values():
# this proc is not the "owner" of its subtrees and should not send a contribution to the sum
final_hessian[:, :] = 0.0 # zero out hessian so it won't contribute
final_hessian = comm.allreduce(final_hessian)
#copy upper triangle to lower triangle (we only compute upper)
for i in range(final_hessian.shape[0]):
for j in range(i + 1, final_hessian.shape[1]):
final_hessian[j, i] = final_hessian[i, j]
return final_hessian # (N,N)
def logl_approximate_hessian(model, dataset, circuit_list=None,
minProbClip=1e-6, probClipInterval=(-1e6, 1e6), radius=1e-4,
poissonPicture=True, check=False, comm=None,
memLimit=None, opLabelAliases=None, smartc=None,
verbosity=0):
"""
An approximate Hessian of the log-likelihood function.
An approximation to the true Hessian is computed using just the Jacobian
(and *not* the Hessian) of the probabilities w.r.t. the model
parameters. Let `J = d(probs)/d(params)` and denote the Hessian of the
log-likelihood w.r.t. the probabilities as `d2(logl)/dprobs2` (a *diagonal*
matrix indexed by the term, i.e. probability, of the log-likelihood). Then
this function computes:
`H = J * d2(logl)/dprobs2 * J.T`
Which simply neglects the `d2(probs)/d(params)2` terms of the true Hessian.
Since this curvature is expected to be small at the MLE point, this
approximation can be useful for computing approximate error bars.
Parameters
----------
model : Model
Model of parameterized gates (including SPAM)
dataset : DataSet
Probability data
circuit_list : list of (tuples or Circuits), optional
Each element specifies a operation sequence to include in the log-likelihood
sum. Default value of None implies all the operation sequences in dataset
should be used.
minProbClip : float, optional
The minimum probability treated normally in the evaluation of the log-likelihood.
A penalty function replaces the true log-likelihood for probabilities that lie
below this threshold so that the log-likelihood never becomes undefined (which improves
optimizer performance).
probClipInterval : 2-tuple or None, optional
(min,max) values used to clip the probabilities predicted by models during MLEGST's
search for an optimal model (if not None). if None, no clipping is performed.
radius : float, optional
Specifies the severity of rounding used to "patch" the zero-frequency
terms of the log-likelihood.
evalTree : evaluation tree, optional
given by a prior call to bulk_evaltree for the same circuit_list.
Significantly speeds up evaluation of log-likelihood derivatives, even
more so when accompanied by countVecMx (see below). Defaults to None.
poissonPicture : boolean, optional
Whether the Poisson-picutre log-likelihood should be differentiated.
check : boolean, optional
If True, perform extra checks within code to verify correctness. Used
for testing, and runs much slower when True.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
memLimit : int, optional
A rough memory limit in bytes which restricts the amount of intermediate
values that are computed and stored.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
smartc : SmartCache, optional
A cache object to cache & use previously cached values inside this
function.
verbosity : int, optional
How much detail to print to stdout.
Returns
-------
numpy array
array of shape (M,M), where M is the length of the vectorized model.
"""
def smart(fn, *args, **kwargs):
if smartc:
return smartc.cached_compute(fn, args, kwargs)[1]
else:
if '_filledarrays' in kwargs: del kwargs['_filledarrays']
return fn(*args, **kwargs)
if circuit_list is None:
circuit_list = list(dataset.keys())
C = 1.0 / 1024.0**3; nP = model.num_params()
persistentMem = 8 * nP**2 + 8 * len(circuit_list) * (nP + 1) # in bytes
if memLimit is not None and memLimit < persistentMem:
raise MemoryError("DLogL Memory limit (%g GB) is " % (memLimit * C)
+ "< memory required to hold final results (%g GB)"
% (persistentMem * C))
#OLD: evalTree,lookup,outcomes_lookup = model.bulk_evaltree(circuit_list)
mlim = None if (memLimit is None) else memLimit - persistentMem
# Note: simplify_circuits doesn't support aliased dataset (yet)
dstree = dataset if (opLabelAliases is None) else None
evalTree, blkSize, _, lookup, outcomes_lookup = \
smart(model.bulk_evaltree_from_resources,
circuit_list, comm, mlim, "deriv", ['bulk_fill_dprobs'],
dstree, verbosity)
a = radius # parameterizes "roundness" of f == 0 terms
min_p = minProbClip
# Allocate persistent memory
#hessian = _np.zeros( (nP,nP), 'd') # allocated below by assignment
nEls = evalTree.num_final_elements()
probs = _np.empty(nEls, 'd')
dprobs = _np.empty((nEls, nP), 'd')
ds_circuit_list = _lt.apply_aliases_to_circuit_list(circuit_list, opLabelAliases)
cntVecMx = _np.empty(nEls, 'd')
totalCntVec = _np.empty(nEls, 'd')
for (i, opStr) in enumerate(ds_circuit_list):
cnts = dataset[opStr].counts
totalCntVec[lookup[i]] = sum(cnts.values()) # dataset[opStr].total
cntVecMx[lookup[i]] = [cnts.get(x, 0) for x in outcomes_lookup[i]]
smart(model.bulk_fill_dprobs, dprobs, evalTree, prMxToFill=probs,
clipTo=probClipInterval, check=check, comm=comm,
wrtBlockSize=blkSize, _filledarrays=(0, 'prMxToFill')) # FUTURE: set gatherMemLimit=?
pos_probs = _np.where(probs < min_p, min_p, probs)
#Note: these approximate-hessian formula are similar to (but simpler than) the
# computations done by the `hessian_from_probs` functions in `logl_hessian(...)`.
# They compute just the hessian of the log-likelihood w.r.t. the probabilities -
# which correspond to just the `dprobs12_coeffs` variable of the aforementioned
# functions. This is so b/c in this case the "dp1" and "dp2" terms are delta
# functions and "hp==0" (b/c the "params" here are just the probabilities
# themselves) - so only the X*dp1*dp2 terms survive the general expressions
# found above.
if poissonPicture:
totCnts = totalCntVec # shorthand
S2 = -0.5 * cntVecMx / (min_p**2) # 2nd derivative of logl term at min_p
dprobs12_coeffs = \
_np.where(probs < min_p, 2 * S2, -cntVecMx / pos_probs**2)
zfc = _np.where(probs >= a, 0.0, -totCnts * ((-2.0 / a**2) * probs + 2.0 / a))
dprobs12_coeffs = _np.where(cntVecMx == 0, zfc, dprobs12_coeffs)
# a 1D array of the diagonal of d2(logl)/dprobs2; shape = (nEls,)
else:
S2 = -0.5 * cntVecMx / (min_p**2) # 2nd derivative of logl term at min_p
dprobs12_coeffs = \
_np.where(probs < min_p, 2 * S2, -cntVecMx / pos_probs**2)
dprobs12_coeffs = _np.where(cntVecMx == 0, 0.0, dprobs12_coeffs)
# a 1D array of the diagonal of d2(logl)/dprobs2; shape = (nEls,)
# In notation in docstring:
# J = dprobs.T (shape nEls,nP)
# diagonal of d2(logl)/dprobs2 = dprobs12_coeffs (var name kept to preserve
# similarity w/functions in logl_hessian)
# So H = J * d2(logl)/dprobs2 * J.T becomes:
hessian = _np.dot(dprobs.T, dprobs12_coeffs[:, None] * dprobs)
return hessian
#@smart_cached
def logl_max(model, dataset, circuit_list=None, poissonPicture=True,
check=False, opLabelAliases=None, evaltree_cache=None,
smartc=None):
"""
The maximum log-likelihood possible for a DataSet. That is, the
log-likelihood obtained by a maximal model that can fit perfectly
the probability of each operation sequence.
Parameters
----------
model : Model
the model, used only for operation sequence compilation
dataset : DataSet
the data set to use.
circuit_list : list of (tuples or Circuits), optional
Each element specifies a operation sequence to include in the max-log-likelihood
sum. Default value of None implies all the operation sequences in dataset should
be used.
poissonPicture : boolean, optional
Whether the Poisson-picture maximum log-likelihood should be returned.
check : boolean, optional
Whether additional check is performed which computes the max logl another
way an compares to the faster method.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
evaltree_cache : dict, optional
A dictionary which server as a cache for the computed EvalTree used
in this computation. If an empty dictionary is supplied, it is filled
with cached values to speed up subsequent executions of this function
which use the *same* `model` and `circuit_list`.
smartc : SmartCache, optional
A cache object to cache & use previously cached values inside this
function.
Returns
-------
float
"""
maxLogLTerms = logl_max_terms(model, dataset, circuit_list,
poissonPicture, opLabelAliases,
evaltree_cache, smartc)
# maxLogLTerms[iSpamLabel,iCircuit] contains all logl-upper-bound contributions
maxLogL = _np.sum(maxLogLTerms) # sum over *all* dimensions
if check:
L = 0
for circuit in circuit_list:
dsRow = dataset[circuit]
N = dsRow.total # sum of counts for all outcomes (all spam labels)
for n in dsRow.counts.values():
f = n / N
if f < TOL and n == 0: continue # 0 * log(0) == 0
if poissonPicture:
L += n * _np.log(f) - N * f
else:
L += n * _np.log(f)
if not _np.isclose(maxLogL, L):
_warnings.warn("Log-likelihood upper bound mismatch: %g != %g (diff=%g)" %
(maxLogL, L, maxLogL - L))
return maxLogL
#@smart_cached
def logl_max_terms(model, dataset, circuit_list=None,
poissonPicture=True, opLabelAliases=None,
evaltree_cache=None, smartc=None):
"""
The vector of maximum log-likelihood contributions for each operation sequence,
aggregated over outcomes.
Parameters
----------
This function takes the same arguments as :func:`logl_max` except it
doesn't perform the final sum over operation sequences and SPAM labels.
Returns
-------
numpy.ndarray
Array of length either `len(circuit_list)` or `len(dataset.keys())`.
Values are the maximum log-likelihood contributions of the corresponding
operation sequence aggregated over outcomes.
"""
def smart(fn, *args, **kwargs):
if smartc:
return smartc.cached_compute(fn, args, kwargs)[1]
else:
if '_filledarrays' in kwargs: del kwargs['_filledarrays']
return fn(*args, **kwargs)
if evaltree_cache and 'evTree' in evaltree_cache:
evalTree = evaltree_cache['evTree']
lookup = evaltree_cache['lookup']
outcomes_lookup = evaltree_cache['outcomes_lookup']
nEls = evalTree.num_final_elements()
else:
if circuit_list is None:
circuit_list = list(dataset.keys())
_, lookup, outcomes_lookup, nEls = \
smart(model.simplify_circuits, circuit_list, dataset)
#Note: we don't actually need an evaltree, so we
# won't make one here and so won't fill an empty
# evaltree_cache.
circuit_list = _lt.apply_aliases_to_circuit_list(circuit_list, opLabelAliases)
if evaltree_cache and 'cntVecMx' in evaltree_cache:
countVecMx = evaltree_cache['cntVecMx']
totalCntVec = evaltree_cache['totalCntVec']
else:
countVecMx = _np.empty(nEls, 'd')
totalCntVec = _np.empty(nEls, 'd')
for (i, opStr) in enumerate(circuit_list):
cnts = dataset[opStr].counts
totalCntVec[lookup[i]] = sum(cnts.values()) # dataset[opStr].total
countVecMx[lookup[i]] = [cnts.get(x, 0) for x in outcomes_lookup[i]]
#could add to cache, but we don't have option of circuitWeights
# here yet, so let's be conservative and not do this:
#if evaltree_cache is not None:
# evaltree_cache['cntVecMx'] = countVecMx
# evaltree_cache['totalCntVec'] = totalCntVec
countVecMx = countVecMx.clip(min=0.0) # fix roundoff errors giving small negative counts ~ -1e-16, etc.
freqs = countVecMx / totalCntVec
freqs_nozeros = _np.where(countVecMx == 0, 1.0, freqs) # set zero freqs to 1.0 so np.log doesn't complain
if poissonPicture:
maxLogLTerms = countVecMx * (_np.log(freqs_nozeros) - 1.0)
else:
maxLogLTerms = countVecMx * _np.log(freqs_nozeros)
# set 0 * log(0) terms explicitly to zero since numpy doesn't know this limiting behavior
maxLogLTerms[countVecMx == 0] = 0.0
#Aggregate over outcomes:
# maxLogLTerms[iElement] contains all logl-upper-bound contributions
# terms[iCircuit] wiil contain contributions for each original gate
# string (aggregated over outcomes)
nCircuits = len(circuit_list)
terms = _np.empty(nCircuits, 'd')
for i in range(nCircuits):
terms[i] = _np.sum(maxLogLTerms[lookup[i]], axis=0)
return terms
def two_delta_logl_nsigma(model, dataset, circuit_list=None,
minProbClip=1e-6, probClipInterval=(-1e6, 1e6), radius=1e-4,
poissonPicture=True, opLabelAliases=None,
dof_calc_method='nongauge', wildcard=None):
"""See docstring for :function:`pygsti.tools.two_delta_logl` """
assert(dof_calc_method is not None)
return two_delta_logl(model, dataset, circuit_list,
minProbClip, probClipInterval, radius,
poissonPicture, False, opLabelAliases,
None, None, dof_calc_method, None, wildcard)[1]
def two_delta_logl(model, dataset, circuit_list=None,
minProbClip=1e-6, probClipInterval=(-1e6, 1e6), radius=1e-4,
poissonPicture=True, check=False, opLabelAliases=None,
evaltree_cache=None, comm=None, dof_calc_method=None,
smartc=None, wildcard=None):
"""
Twice the difference between the maximum and actual log-likelihood,
optionally along with Nsigma (# std deviations from mean) and p-value
relative to expected chi^2 distribution (when `dof_calc_method` is
not None).
This function's arguments are supersets of :function:`logl`, and
:function:`logl_max`. This is a convenience function, equivalent to
`2*(logl_max(...) - logl(...))`, whose value is what is often called
the *log-likelihood-ratio* between the "maximal model" (that which trivially
fits the data exactly) and the model given by `model`.
Parameters
----------
model : Model
Model of parameterized gates
dataset : DataSet
Probability data
circuit_list : list of (tuples or Circuits), optional
Each element specifies a operation sequence to include in the log-likelihood
sum. Default value of None implies all the operation sequences in dataset
should be used.
minProbClip : float, optional
The minimum probability treated normally in the evaluation of the log-likelihood.
A penalty function replaces the true log-likelihood for probabilities that lie
below this threshold so that the log-likelihood never becomes undefined (which improves
optimizer performance).
probClipInterval : 2-tuple or None, optional
(min,max) values used to clip the probabilities predicted by models during MLEGST's
search for an optimal model (if not None). if None, no clipping is performed.
radius : float, optional
Specifies the severity of rounding used to "patch" the zero-frequency
terms of the log-likelihood.
evalTree : evaluation tree, optional
given by a prior call to bulk_evaltree for the same circuit_list.
Significantly speeds up evaluation of log-likelihood, even more so
when accompanied by countVecMx (see below).
poissonPicture : boolean, optional
Whether the log-likelihood-in-the-Poisson-picture terms should be included
in the computed log-likelihood values.
check : boolean, optional
If True, perform extra checks within code to verify correctness. Used
for testing, and runs much slower when True.
opLabelAliases : dictionary, optional
Dictionary whose keys are operation label "aliases" and whose values are tuples
corresponding to what that operation label should be expanded into before querying
the dataset. Defaults to the empty dictionary (no aliases defined)
e.g. opLabelAliases['Gx^3'] = ('Gx','Gx','Gx')
evaltree_cache : dict, optional
A dictionary which server as a cache for the computed EvalTree used
in this computation. If an empty dictionary is supplied, it is filled
with cached values to speed up subsequent executions of this function
which use the *same* `model` and `circuit_list`.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors.
dof_calc_method : {None, "all", "nongauge"}
How `model`'s number of degrees of freedom (parameters) are obtained
when computing the number of standard deviations and p-value relative to
a chi2_k distribution, where `k` is additional degrees of freedom
possessed by the maximal model. If None, then `Nsigma` and `pvalue` are
not returned (see below).
smartc : SmartCache, optional
A cache object to cache & use previously cached values inside this
function.
wildcard : WildcardBudget
A wildcard budget to apply to this log-likelihood computation.
This increases the returned log-likelihood value by adjusting
(by a maximal amount measured in TVD, given by the budget) the
probabilities produced by `model` to optimially match the data
(within the bugetary constraints) evaluating the log-likelihood.
Returns
-------
twoDeltaLogL : float
2*(loglikelihood(maximal_model,data) - loglikelihood(model,data))
Nsigma, pvalue : float
Only returned when `dof_calc_method` is not None.
"""
twoDeltaLogL = 2 * (logl_max(model, dataset, circuit_list, poissonPicture,
check, opLabelAliases, evaltree_cache, smartc)
- logl(model, dataset, circuit_list,
minProbClip, probClipInterval, radius,
poissonPicture, check, opLabelAliases,
evaltree_cache, comm, smartc, wildcard))
if dof_calc_method is None:
return twoDeltaLogL
elif dof_calc_method == "nongauge":
if hasattr(model, 'num_nongauge_params'):
mdl_dof = model.num_nongauge_params()
else:
mdl_dof = model.num_params()
elif dof_calc_method == "all":
mdl_dof = model.num_params()
else: raise ValueError("Invalid `dof_calc_method` arg: %s" % dof_calc_method)
if circuit_list is not None:
ds_strs = _lt.apply_aliases_to_circuit_list(circuit_list, opLabelAliases)
else: ds_strs = None
Ns = dataset.get_degrees_of_freedom(ds_strs)
k = max(Ns - mdl_dof, 1)
if Ns <= mdl_dof: _warnings.warn("Max-model params (%d) <= model params (%d)! Using k == 1." % (Ns, mdl_dof))
Nsigma = (twoDeltaLogL - k) / _np.sqrt(2 * k)
pvalue = 1.0 - _stats.chi2.cdf(twoDeltaLogL, k)
return twoDeltaLogL, Nsigma, pvalue
def two_delta_logl_terms(model, dataset, circuit_list=None,
minProbClip=1e-6, probClipInterval=(-1e6, 1e6), radius=1e-4,
poissonPicture=True, check=False, opLabelAliases=None,
evaltree_cache=None, comm=None, dof_calc_method=None,
smartc=None, wildcard=None):
"""
The vector of twice the difference between the maximum and actual
log-likelihood for each operation sequence, aggregated over outcomes.
Optionally (when `dof_calc_method` is not None) returns parallel vectors
containing the Nsigma (# std deviations from mean) and the p-value relative
to expected chi^2 distribution for each sequence.
Parameters
----------
This function takes the same arguments as :func:`two_delta_logl` except it
doesn't perform the final sum over operation sequences and SPAM labels.
Returns
-------
twoDeltaLogL_terms : numpy.ndarray
Nsigma, pvalue : numpy.ndarray
Only returned when `dof_calc_method` is not None.
"""
twoDeltaLogL_terms = 2 * (logl_max_terms(model, dataset, circuit_list, poissonPicture,
opLabelAliases, evaltree_cache, smartc)
- logl_terms(model, dataset, circuit_list,
minProbClip, probClipInterval, radius,
poissonPicture, check, opLabelAliases,
evaltree_cache, comm, smartc, wildcard))
if dof_calc_method is None: return twoDeltaLogL_terms
elif dof_calc_method == "all": mdl_dof = model.num_params()
elif dof_calc_method == "nongauge": mdl_dof = model.num_nongauge_params()
else: raise ValueError("Invalid `dof_calc_method` arg: %s" % dof_calc_method)
if circuit_list is not None:
ds_strs = _lt.apply_aliases_to_circuit_list(circuit_list, opLabelAliases)
else: ds_strs = None
Ns = dataset.get_degrees_of_freedom(ds_strs)
k = max(Ns - mdl_dof, 1)
# HACK - just take a single average #dof per circuit to use as chi_k distribution!
k = int(_np.ceil(k / (1.0 * len(circuit_list))))
Nsigma = (twoDeltaLogL_terms - k) / _np.sqrt(2 * k)
pvalue = _np.array([1.0 - _stats.chi2.cdf(x, k) for x in twoDeltaLogL_terms], 'd')
return twoDeltaLogL_terms, Nsigma, pvalue
def forbidden_prob(model, dataset):
"""
Compute the sum of the out-of-range probabilities
generated by model, using only those operation sequences
contained in dataset. Non-zero value indicates
that model is not in XP for the supplied dataset.
Parameters
----------
model : Model
model to generate probabilities.
dataset : DataSet
data set to obtain operation sequences. Dataset counts are
used to check for zero or all counts being under a
single spam label, in which case out-of-bounds probabilities
are ignored because they contribute zero to the logl sum.
Returns
-------
float
sum of the out-of-range probabilities.
"""
forbidden_prob = 0
for mdl, dsRow in dataset.items():
probs = model.probs(mdl)
for (spamLabel, p) in probs.items():
if p < TOL:
if round(dsRow[spamLabel]) == 0: continue # contributes zero to the sum
else: forbidden_prob += abs(TOL - p) + TOL
elif p > 1 - TOL:
if round(dsRow[spamLabel]) == dsRow.total: continue # contributes zero to the sum
else: forbidden_prob += abs(p - (1 - TOL)) + TOL
return forbidden_prob
def prep_penalty(rhoVec, basis):
"""
Penalty assigned to a state preparation (rho) vector rhoVec. State
preparation density matrices must be positive semidefinite
and trace == 1. A positive return value indicates an
these criteria are not met and the rho-vector is invalid.
Parameters
----------
rhoVec : numpy array
rho vector array of shape (N,1) for some N.
basis : {"std", "gm", "pp", "qt"}
The abbreviation for the basis used to interpret rhoVec
("gm" = Gell-Mann, "pp" = Pauli-product, "std" = matrix unit,
"qt" = qutrit, or standard).
Returns
-------
float
"""
# rhoVec must be positive semidefinite and trace = 1
rhoMx = _bt.vec_to_stdmx(_np.asarray(rhoVec), basis)
evals = _np.linalg.eigvals(rhoMx) # could use eigvalsh, but wary of this since eigh can be wrong...
sumOfNeg = sum([-ev.real for ev in evals if ev.real < 0])
tracePenalty = abs(rhoVec[0, 0] - (1.0 / _np.sqrt(rhoMx.shape[0])))
# 0th el is coeff of I(dxd)/sqrt(d) which has trace sqrt(d)
#print "Sum of neg = ",sumOfNeg #DEBUG
#print "Trace Penalty = ",tracePenalty #DEBUG
return sumOfNeg + tracePenalty
def effect_penalty(EVec, basis):
"""
Penalty assigned to a POVM effect vector EVec. Effects
must have eigenvalues between 0 and 1. A positive return
value indicates this criterion is not met and the E-vector
is invalid.
Parameters
----------
EVec : numpy array
effect vector array of shape (N,1) for some N.
basis : {"std", "gm", "pp", "qt"}
The abbreviation for the basis used to interpret EVec
("gm" = Gell-Mann, "pp" = Pauli-product, "std" = matrix unit,
"qt" = qutrit, or standard).
Returns
-------
float
"""
# EVec must have eigenvalues between 0 and 1
EMx = _bt.vec_to_stdmx(_np.asarray(EVec), basis)
evals = _np.linalg.eigvals(EMx) # could use eigvalsh, but wary of this since eigh can be wrong...
sumOfPen = 0
for ev in evals:
if ev.real < 0: sumOfPen += -ev.real
if ev.real > 1: sumOfPen += ev.real - 1.0
return sumOfPen
def cptp_penalty(model, include_spam_penalty=True):
"""
The sum of all negative Choi matrix eigenvalues, and
if include_spam_penalty is True, the rho-vector and
E-vector penalties of model. A non-zero value
indicates that the model is not CPTP.
Parameters
----------
model : Model
the model to compute CPTP penalty for.
include_spam_penalty : bool, optional
if True, also test model for invalid SPAM
operation(s) and return sum of CPTP penalty
with rhoVecPenlaty(...) and effect_penalty(...)
for each rho and E vector.
Returns
-------
float
CPTP penalty (possibly with added spam penalty).
"""
ret = _jam.sum_of_negative_choi_evals(model)
if include_spam_penalty:
b = model.basis
ret += sum([prep_penalty(r, b) for r in model.preps.values()])
ret += sum([effect_penalty(e, b) for povm in model.povms.values()
for e in povm.values()])
return ret
#@smart_cached
def two_delta_loglfn(N, p, f, minProbClip=1e-6, poissonPicture=True):
"""
Term of the 2*[log(L)-upper-bound - log(L)] sum corresponding
to a single operation sequence and spam label.
Parameters
----------
N : float or numpy array
Number of samples.
p : float or numpy array
Probability of 1st outcome (typically computed).
f : float or numpy array
Frequency of 1st outcome (typically observed).
minProbClip : float, optional
Minimum probability clip point to avoid evaluating
log(number <= zero)
poissonPicture : boolean, optional
Whether the log-likelihood-in-the-Poisson-picture terms should be included
in the returned logl value.
Returns
-------
float or numpy array
"""
#TODO: change this function to handle nan's in the inputs without warnings, since
# fiducial pair reduction may pass inputs with nan's legitimately and the desired
# behavior is to just let the nan's pass through to nan's in the output.
cp = _np.clip(p, minProbClip, 1e10) # effectively no upper bound
nan_indices = _np.isnan(f) # get indices of invalid entries
if not _np.isscalar(f): f[nan_indices] = 0.0
#set nan's to zero to avoid RuntimeWarnings (invalid value)
zf = _np.where(f < 1e-10, 0.0, f) # set zero-freqs to zero
nzf = _np.where(f < 1e-10, 1.0, f) # set zero-freqs to one -- together
# w/above line makes 0 * log(0) == 0
if not | _np.isscalar(f) | numpy.isscalar |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Three synthetic data generations.
"""
# Necessary functions and packages call
import numpy as np
def synthetic_data_loading(data_name='Syn1', data_no=1000, seed=0):
"""Generates synthetic datasets.
Args:
data_name: Syn1, Syn2, Syn3
data_no: number of training and testing sets
seed: random seed
Returns:
x_train: training features
y_train: training labels
x_test: testing features
y_test: testing labels
c_test: ground truth weights
test_idx: order of testing set index based on the distance from the boundary
"""
# X generation (X ~ N(0,I))
np.random.seed(seed)
data_x = | np.random.normal(0, 1, [2 * data_no, 11]) | numpy.random.normal |
# Copyright 2019-2020 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import networkx as nx
import numpy as np
import pandas as pd
import pytest
from IPython.display import Image
from mock import patch
from sklearn.exceptions import NotFittedError
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import KFold, cross_val_score
from causalnex.structure import DAGClassifier, DAGRegressor
from causalnex.structure import data_generators as dg
class TestDAGSklearn:
""" Tests aspects common to both DAGRegressor and DAGClassifier """
@pytest.mark.parametrize("model", [DAGRegressor, DAGClassifier])
@pytest.mark.parametrize(
"val, msg, error",
[
({"alpha": "0.0"}, "alpha should be numeric", TypeError),
({"beta": "0.0"}, "beta should be numeric", TypeError),
({"fit_intercept": 0}, "fit_intercept should be a bool", TypeError),
({"threshold": "0.0"}, "threshold should be numeric", TypeError),
],
)
def test_input_type_assertion(self, val, msg, error, model):
with pytest.raises(error, match=msg):
model(**val)
@pytest.mark.parametrize("model", [DAGRegressor, DAGClassifier])
def test_notfitted_error(self, model):
m = model()
X = np.random.normal(size=(100, 2))
with pytest.raises(NotFittedError):
m.predict(X)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_tabu_parent_nodes(self, model, y):
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y, name="test")
m = model(dependent_target=True, tabu_parent_nodes=["test"])
assert "test" in m.tabu_parent_nodes
m = model(dependent_target=True, tabu_parent_nodes=[])
m.fit(X, y)
assert "test" not in m.tabu_parent_nodes
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_numpy_fit(self, model, y):
m = model()
X = np.random.normal(size=(100, 2))
m.fit(X, y)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_pandas_fit(self, model, y):
m = model()
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y)
m.fit(X, y)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
@pytest.mark.parametrize(
"fit_intercept, equals_zero", [(True, False), (False, True)]
)
def test_intercept(self, fit_intercept, equals_zero, model, y):
m = model(fit_intercept=fit_intercept)
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y)
m.fit(X, y)
# intercept should return zero when fit_intercept == False
assert (m.intercept_ == 0) is equals_zero
assert isinstance(m.intercept_, float)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
@pytest.mark.parametrize("enforce_dag", [True, False])
def test_plot_dag(self, enforce_dag, model, y):
m = model()
X = np.random.normal(size=(100, 2))
m.fit(X, y)
image = m.plot_dag(enforce_dag=enforce_dag)
assert isinstance(image, Image)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_plot_dag_importerror(self, model, y):
with patch.dict("sys.modules", {"IPython.display": None}):
m = model()
X = np.random.normal(size=(100, 2))
m.fit(X, y)
with pytest.raises(
ImportError,
match=r"plot_dag method requires IPython installed.",
):
m.plot_dag()
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
@pytest.mark.parametrize(
"hidden_layer_units", [None, [], [0], [1], (0,), (1,), [1, 1], (1, 1)]
)
def test_hidden_layer_units(self, hidden_layer_units, model, y):
m = model(hidden_layer_units=hidden_layer_units)
X = np.random.normal(size=(100, 2))
m.fit(X, y)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_enforce_dag(self, model, y):
m = model(enforce_dag=True)
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y)
m.fit(X, y)
assert nx.algorithms.is_directed_acyclic_graph(m.graph_)
@pytest.mark.parametrize(
"model, y",
[
(DAGRegressor, np.random.normal(size=(100,))),
(DAGClassifier, np.random.randint(2, size=(100,))),
],
)
def test_container_predict_type(self, model, y):
m = model()
X = np.random.normal(size=(100, 2))
m.fit(X, y)
assert isinstance(m.predict(X), np.ndarray)
m = model()
X = np.random.normal(size=(100, 2))
X, y = pd.DataFrame(X), pd.Series(y)
m.fit(X, y)
assert isinstance(m.predict(X), np.ndarray)
class TestDAGRegressor:
@pytest.mark.parametrize("hidden_layer_units", [None, [2], [2, 2]])
def test_coef(self, hidden_layer_units):
reg = DAGRegressor(hidden_layer_units=hidden_layer_units)
X, y = (
pd.DataFrame(np.random.normal(size=(100, 1))),
pd.Series(np.random.normal(size=(100,))),
)
X["true_feat"] = y * -3
reg.fit(X, y)
assert isinstance(reg.coef_, np.ndarray)
coef_ = pd.Series(reg.coef_, index=X.columns)
# assert that the sign of the coefficient is correct for both nonlinear and linear cases
assert coef_["true_feat"] < 0
@pytest.mark.parametrize("hidden_layer_units", [None, [2], [2, 2]])
def test_feature_importances(self, hidden_layer_units):
reg = DAGRegressor(hidden_layer_units=hidden_layer_units)
X, y = (
pd.DataFrame(np.random.normal(size=(100, 1))),
pd.Series(np.random.normal(size=(100,))),
)
X["true_feat"] = y * -3
reg.fit(X, y)
assert isinstance(reg.feature_importances_, np.ndarray)
coef_ = pd.Series(reg.feature_importances_, index=X.columns)
# assert that the sign of the coefficient is positive for both nonlinear and linear cases
assert coef_["true_feat"] > 0
@pytest.mark.parametrize("standardize", [True, False])
def test_nonlinear_performance(self, standardize):
| np.random.seed(42) | numpy.random.seed |
# Copyright 2019 <NAME> & <NAME>
#
# This file is part of OBStools.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
:mod:`~obstools.atacr.utils` contains several functions that are used in the
class methods of `~obstools.atacr.classes`.
"""
import os
import math
import numpy as np
import fnmatch
from matplotlib import pyplot as plt
from obspy.core import read, Stream, Trace, AttribDict, UTCDateTime
def traceshift(trace, tt):
"""
Function to shift traces in time given travel time
Parameters
----------
trace : :class:`~obspy.core.Trace` object
Trace object to update
tt : float
Time shift in seconds
Returns
-------
rtrace : :class:`~obspy.core.Trace` object
Updated trace object
"""
# Define frequencies
nt = trace.stats.npts
dt = trace.stats.delta
freq = np.fft.fftfreq(nt, d=dt)
# Fourier transform
ftrace = np.fft.fft(trace.data)
# Shift
for i in range(len(freq)):
ftrace[i] = ftrace[i]*np.exp(-2.*np.pi*1j*freq[i]*tt)
# Back Fourier transform and return as trace
rtrace = trace.copy()
rtrace.data = np.real(np.fft.ifft(ftrace))
# Update start time
rtrace.stats.starttime -= tt
return rtrace
def QC_streams(start, end, st):
"""
Function for quality control of traces, which compares the
start and end times that were requested, as well as the total n
length of the traces.
Parameters
----------
start : :class:`~obspy.core.UTCDateTime` object
Start time of requested stream
end : :class:`~obspy.core.UTCDateTime` object
End time of requested stream
st : :class:`~obspy.core.Stream` object
Stream object with all trace data
Returns
-------
(pass): bool
Whether the QC test has passed
st : :class:`~obspy.core.Stream` object
Updated stream object
"""
# Check start times
if not np.all([tr.stats.starttime == start for tr in st]):
print("* Start times are not all close to true start: ")
[print("* "+tr.stats.channel+" " +
str(tr.stats.starttime)+" " +
str(tr.stats.endtime)) for tr in st]
print("* True start: "+str(start))
print("* -> Shifting traces to true start")
delay = [tr.stats.starttime - start for tr in st]
st_shifted = Stream(
traces=[traceshift(tr, dt) for tr, dt in zip(st, delay)])
st = st_shifted.copy()
# Try trimming
dt = st[0].stats.delta
try:
st.trim(start, end-dt, fill_value=0., pad=True)
except Exception:
print("* Unable to trim")
print("* -> Skipping")
print("**************************************************")
return False, None
# Check final lengths - they should all be equal if start times
# and sampling rates are all equal and traces have been trimmed
sr = st[0].stats.sampling_rate
if not np.allclose([tr.stats.npts for tr in st[1:]], st[0].stats.npts):
print("* Lengths are incompatible: ")
[print("* "+str(tr.stats.npts)) for tr in st]
print("* -> Skipping")
print("**************************************************")
return False, None
elif not np.allclose([st[0].stats.npts], int((end - start)*sr), atol=1):
print("* Length is too short: ")
print("* "+str(st[0].stats.npts) +
" ~= "+str(int((end - start)*sr)))
print("* -> Skipping")
print("**************************************************")
return False, None
else:
return True, st
def update_stats(tr, stla, stlo, stel, cha, evla=None, evlo=None):
"""
Function to include SAC metadata to :class:`~obspy.core.Trace` objects
Parameters
----------
tr : :class:`~obspy.core.Trace` object
Trace object to update
stla : float
Latitude of station
stlo : float
Longitude of station
stel : float
Station elevation (m)
cha : str
Channel for component
evla : float, optional
Latitude of event
evlo : float, optional
Longitute of event
Returns
-------
tr : :class:`~obspy.core.Trace` object
Updated trace object
"""
tr.stats.sac = AttribDict()
tr.stats.sac.stla = stla
tr.stats.sac.stlo = stlo
tr.stats.sac.stel = stel
tr.stats.sac.kcmpnm = cha
tr.stats.channel = cha
if evla is not None and evlo is not None:
tr.stats.sac.evla = evla
tr.stats.sac.evlo = evlo
return tr
def get_data(datapath, tstart, tend):
"""
Function to grab all available noise data given a path and data time range
Parameters
----------
datapath : str
Path to noise data folder
tstart : :class:`~obspy.class.UTCDateTime`
Start time for query
tend : :class:`~obspy.class.UTCDateTime`
End time for query
Returns
-------
tr1, tr2, trZ, trP : :class:`~obspy.core.Trace` object
Corresponding trace objects for components H1, H2, HZ and HP. Returns
empty traces for missing components.
"""
# Define empty streams
trN1 = Stream()
trN2 = Stream()
trNZ = Stream()
trNP = Stream()
# Time iterator
t1 = tstart
# Cycle through each day within time range
while t1 < tend:
# Time stamp used in file name
tstamp = str(t1.year).zfill(4)+'.'+str(t1.julday).zfill(3)+'.'
# Cycle through directory and load files
p = datapath.glob('*.*')
files = [x for x in p if x.is_file()]
for file in files:
if fnmatch.fnmatch(str(file), '*' + tstamp + '*1.SAC'):
tr = read(str(file))
trN1.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*2.SAC'):
tr = read(str(file))
trN2.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*Z.SAC'):
tr = read(str(file))
trNZ.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*H.SAC'):
tr = read(str(file))
trNP.append(tr[0])
# Increase increment
t1 += 3600.*24.
# Fill with empty traces if components are not found
ntr = len(trNZ)
if not trN1 and not trN2:
for i in range(ntr):
trN1.append(Trace())
trN2.append(Trace())
if not trNP:
for i in range(ntr):
trNP.append(Trace())
if ntr > 0:
# Check that all sampling rates are equal - otherwise resample
if trNZ[0].stats.sampling_rate != trNP[0].stats.sampling_rate:
# These checks assume that all seismic data have the same sampling
if trNZ[0].stats.sampling_rate < trNP[0].stats.sampling_rate:
trNP.resample(trNZ[0].stats.sampling_rate, no_filter=False)
else:
trNZ.resample(trNP[0].stats.sampling_rate, no_filter=False)
if trN1:
trN1.resample(trNP[0].stats.sampling_rate, no_filter=False)
if trN2:
trN2.resample(trNP[0].stats.sampling_rate, no_filter=False)
return trN1, trN2, trNZ, trNP
def get_event(eventpath, tstart, tend):
"""
Function to grab all available earthquake data given a path and data time
range
Parameters
----------
eventpath : str
Path to earthquake data folder
tstart : :class:`~obspy.class.UTCDateTime`
Start time for query
tend : :class:`~obspy.class.UTCDateTime`
End time for query
Returns
-------
tr1, tr2, trZ, trP : :class:`~obspy.core.Trace` object
Corresponding trace objects for components H1, H2, HZ and HP. Returns
empty traces for missing components.
"""
# Find out how many events from Z.SAC files
eventfiles = list(eventpath.glob('*Z.SAC'))
if not eventfiles:
raise(Exception("No event found in folder "+str(eventpath)))
# Extract events from time stamps
prefix = [file.name.split('.') for file in eventfiles]
evstamp = [p[0]+'.'+p[1]+'.'+p[2]+'.'+p[3]+'.' for p in prefix]
evDateTime = [UTCDateTime(p[0]+'-'+p[1]+'T'+p[2]+":"+p[3]) for p in prefix]
# Define empty streams
tr1 = Stream()
tr2 = Stream()
trZ = Stream()
trP = Stream()
# Cycle over all available files in time range
for event, tstamp in zip(evDateTime, evstamp):
if event >= tstart and event <= tend:
# Cycle through directory and load files
p = list(eventpath.glob('*.SAC'))
files = [x for x in p if x.is_file()]
for file in files:
if fnmatch.fnmatch(str(file), '*' + tstamp + '*1.SAC'):
tr = read(str(file))
tr1.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*2.SAC'):
tr = read(str(file))
tr2.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*Z.SAC'):
tr = read(str(file))
trZ.append(tr[0])
elif fnmatch.fnmatch(str(file), '*' + tstamp + '*H.SAC'):
tr = read(str(file))
trP.append(tr[0])
# Fill with empty traces if components are not found
ntr = len(trZ)
if not tr1 and not tr2:
for i in range(ntr):
tr1.append(Trace())
tr2.append(Trace())
if not trP:
for i in range(ntr):
trP.append(Trace())
if ntr > 0:
# Check that all sampling rates are equal - otherwise resample
if trZ[0].stats.sampling_rate != trP[0].stats.sampling_rate:
# These checks assume that all seismic data have the same sampling
if trZ[0].stats.sampling_rate < trP[0].stats.sampling_rate:
trP.resample(trZ[0].stats.sampling_rate, no_filter=False)
else:
trZ.resample(trP[0].stats.sampling_rate, no_filter=False)
if tr1:
tr1.resample(trP[0].stats.sampling_rate, no_filter=False)
if tr2:
tr2.resample(trP[0].stats.sampling_rate, no_filter=False)
return tr1, tr2, trZ, trP
def calculate_tilt(ft1, ft2, ftZ, ftP, f, goodwins, tiltfreq=[0.005, 0.035]):
"""
Determines tilt direction from maximum coherence between rotated H1 and Z.
Parameters
----------
ft1, ft2, ftZ, ftP : :class:`~numpy.ndarray`
Fourier transform of corresponding H1, H2, HZ and HP components
f : :class:`~numpy.ndarray`
Frequency axis in Hz
goodwins : list
List of booleans representing whether a window is good (True) or not
(False). This attribute is returned from the method
:func:`~obstools.atacr.classes.DayNoise.QC_daily_spectra`
tiltfreq : list, optional
Two floats representing the frequency band at which the tilt is
calculated
Returns
-------
cHH, cHZ, cHP : :class:`~numpy.ndarray`
Arrays of power and cross-spectral density functions of components HH
(rotated H1 in direction of maximum tilt), HZ, and HP
coh : :class:`~numpy.ndarray`
Coherence value between rotated H and Z components, as a function of
directions (azimuths)
ph : :class:`~numpy.ndarray`
Phase value between rotated H and Z components, as a function of
directions (azimuths)
direc : :class:`~numpy.ndarray`
Array of directions (azimuths) considered
tilt : float
Direction (azimuth) of maximum coherence between rotated H1 and Z
coh_value : float
Coherence value at tilt direction
phase_value : float
Phase value at tilt direction
"""
direc = np.arange(0., 360., 10.)
coh = np.zeros(len(direc))
ph = np.zeros(len(direc))
cZZ = np.abs(np.mean(ftZ[goodwins, :] *
np.conj(ftZ[goodwins, :]), axis=0))[0:len(f)]
for i, d in enumerate(direc):
# Rotate horizontals
ftH = rotate_dir(ft1, ft2, d)
# Get transfer functions
cHH = np.abs(np.mean(ftH[goodwins, :] *
np.conj(ftH[goodwins, :]), axis=0))[0:len(f)]
cHZ = np.mean(ftH[goodwins, :] *
np.conj(ftZ[goodwins, :]), axis=0)[0:len(f)]
Co = coherence(cHZ, cHH, cZZ)
Ph = phase(cHZ)
# Calculate coherence over frequency band
coh[i] = np.mean(Co[(f > tiltfreq[0]) & (f < tiltfreq[1])])
ph[i] = np.pi/2. - np.mean(Ph[(f > tiltfreq[0]) & (f < tiltfreq[1])])
# Index where coherence is max
ind = np.argwhere(coh == coh.max())
# Phase and direction at maximum coherence
phase_value = ph[ind[0]][0]
coh_value = coh[ind[0]][0]
tilt = direc[ind[0]][0]
# Refine search
rdirec = np.arange(direc[ind[0]][0]-10., direc[ind[0]][0]+10., 1.)
rcoh = np.zeros(len(direc))
rph = np.zeros(len(direc))
for i, d in enumerate(rdirec):
# Rotate horizontals
ftH = rotate_dir(ft1, ft2, d)
# Get transfer functions
cHH = np.abs(np.mean(ftH[goodwins, :] *
np.conj(ftH[goodwins, :]), axis=0))[0:len(f)]
cHZ = np.mean(ftH[goodwins, :] *
np.conj(ftZ[goodwins, :]), axis=0)[0:len(f)]
Co = coherence(cHZ, cHH, cZZ)
Ph = phase(cHZ)
# Calculate coherence over frequency band
rcoh[i] = np.mean(Co[(f > tiltfreq[0]) & (f < tiltfreq[1])])
rph[i] = np.pi/2. - np.mean(Ph[(f > tiltfreq[0]) & (f < tiltfreq[1])])
# Index where coherence is max
ind = np.argwhere(rcoh == rcoh.max())
# Phase and direction at maximum coherence
phase_value = rph[ind[0]][0]
coh_value = rcoh[ind[0]][0]
tilt = rdirec[ind[0]][0]
# Phase has to be close to zero - otherwise add pi
if phase_value > 0.5*np.pi:
tilt += 180.
if tilt > 360.:
tilt -= 360.
# print('Maximum coherence for tilt = ', tilt)
# Now calculate spectra at tilt direction
ftH = rotate_dir(ft1, ft2, tilt)
# Get transfer functions
cHH = np.abs(np.mean(ftH[goodwins, :] *
np.conj(ftH[goodwins, :]), axis=0))[0:len(f)]
cHZ = np.mean(ftH[goodwins, :]*np.conj(ftZ[goodwins, :]), axis=0)[0:len(f)]
if np.any(ftP):
cHP = np.mean(ftH[goodwins, :] *
np.conj(ftP[goodwins, :]), axis=0)[0:len(f)]
else:
cHP = None
return cHH, cHZ, cHP, coh, ph, direc, tilt, coh_value, phase_value
def smooth(data, nd, axis=0):
"""
Function to smooth power spectral density functions from the convolution
of a boxcar function with the PSD
Parameters
----------
data : :class:`~numpy.ndarray`
Real-valued array to smooth (PSD)
nd : int
Number of samples over which to smooth
axis : int, optional
axis over which to perform the smoothing
Returns
-------
filt : :class:`~numpy.ndarray`, optional
Filtered data
"""
if np.any(data):
if data.ndim > 1:
filt = np.zeros(data.shape)
for i in range(data.shape[::-1][axis]):
if axis == 0:
filt[:, i] = np.convolve(
data[:, i], np.ones((nd,))/nd, mode='same')
elif axis == 1:
filt[i, :] = np.convolve(
data[i, :], np.ones((nd,))/nd, mode='same')
else:
filt = np.convolve(data, np.ones((nd,))/nd, mode='same')
return filt
else:
return None
def admittance(Gxy, Gxx):
"""
Calculates admittance between two components
Parameters
---------
Gxy : :class:`~numpy.ndarray`
Cross spectral density function of `x` and `y`
Gxx : :class:`~numpy.ndarray`
Power spectral density function of `x`
Returns
-------
: :class:`~numpy.ndarray`, optional
Admittance between `x` and `y`
"""
if np.any(Gxy) and np.any(Gxx):
return np.abs(Gxy)/Gxx
else:
return None
def coherence(Gxy, Gxx, Gyy):
"""
Calculates coherence between two components
Parameters
---------
Gxy : :class:`~numpy.ndarray`
Cross spectral density function of `x` and `y`
Gxx : :class:`~numpy.ndarray`
Power spectral density function of `x`
Gyy : :class:`~numpy.ndarray`
Power spectral density function of `y`
Returns
-------
: :class:`~numpy.ndarray`, optional
Coherence between `x` and `y`
"""
if np.any(Gxy) and np.any(Gxx) and np.any(Gxx):
return np.abs(Gxy)**2/(Gxx*Gyy)
else:
return None
def phase(Gxy):
"""
Calculates phase angle between two components
Parameters
---------
Gxy : :class:`~numpy.ndarray`
Cross spectral density function of `x` and `y`
Returns
-------
: :class:`~numpy.ndarray`, optional
Phase angle between `x` and `y`
"""
if np.any(Gxy):
return | np.angle(Gxy) | numpy.angle |
# dd_models.py - Distance distribtion parametric models
# ---------------------------------------------------------------------------
# This file is a part of DeerLab. License is MIT (see LICENSE.md).
# Copyright(c) 2019-2021: <NAME>, <NAME> and other contributors.
import math as m
import numpy as np
import scipy.special as spc
import inspect
from deerlab.utils import metadata
# =================================================================
def docstr_header(title,fcnstr):
"Definition of the header for all distribution models"
return f"""
{title}
The function takes a list or array of parameters and returns the calculated distance distribution::
P = {fcnstr}(r,param)
The built-in information on the model can be accessed via its attributes::
{fcnstr}.parameters # String list of parameter names
{fcnstr}.units # String list of metric units of parameters
{fcnstr}.start # List of values used as start values during optimization
{fcnstr}.lower # List of values used as lower bounds during optimization
{fcnstr}.upper # List of values used as upper bounds during optimization
Parameters
----------
r : array_like
Distance axis, in nanometers.
param : array_like
List of model parameter values.
Returns
-------
P : ndarray
Distance distribution.
"""
# =================================================================
# =================================================================
def docstr_example(fcnstr):
return f"""
Examples
--------
Example of the model evaluated at the start values of the parameters:
.. plot::
import deerlab as dl
import matplotlib.pyplot as plt
import numpy as np
model = dl.{fcnstr}
r = np.linspace(2,5,400)
info = model()
par0 = info['Start']
P = model(r,par0)
plt.figure(figsize=[6,3])
plt.plot(r,P)
plt.xlabel('r (nm)',fontsize=13)
plt.ylabel('P (nm⁻¹)',fontsize=13)
plt.grid(alpha=0.4)
plt.tick_params(labelsize=12)
plt.tick_params(labelsize=12)
plt.tight_layout()
"""
# =================================================================
# =================================================================
def docstring():
"""
Decorator: Insert docstring header to a pre-existing docstring
"""
sep="\n"
def _decorator(func):
docstr = func.__doc__
title = docstr.split("Notes",1)[0]
docstr = docstr.replace(title,"")
func.__doc__ = sep.join([docstr_header(title,func.__name__),docstr])
func.__doc__ = sep.join([func.__doc__,docstr_example(func.__name__)])
return func
return _decorator
# =================================================================
# =================================================================
def _parsparam(r,p,npar):
r,p = np.atleast_1d(r,p)
if len(p)!=npar:
raise ValueError(f'The model function requires {npar} parameters, but {len(p)} are provided.')
return r,p
# =================================================================
# =================================================================
def _normalize(r,P):
if not all(P==0):
P = P/np.trapz(P,r)
return P
# =================================================================
# =================================================================
def _multigaussfun(r,r0,sig,a):
"Compute a distribution with multiple Gaussians"
n = len(r0)
P = np.zeros_like(r)
for k in range(n):
P += a[k]*m.sqrt(1/(2*m.pi))*1/sig[k]*np.exp(-0.5*((r-r0[k])/sig[k])**2)
P = _normalize(r,P)
return P
# =================================================================
def _multirice3dfun(r,nu,sig,a):
# =================================================================
"Compute a distribution with multiple Gaussians"
N = len(nu)
nu = | np.maximum(nu,0) | numpy.maximum |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 02 15:14:11 2017
@author: slauniai
******************************************************************************
TopModel (Beven & Kirkby) -implementation for SpatHy -integration
Topmodel() allows spatially varying soil depths and transmissivity
Topmodel_Homogenous() assumes constant properties and hydrologic similarity \n
retermined from TWI = log (a / tan(b))
(C) <NAME>, 2016-
Last edit: 7.2.2018 / <NAME>
******************************************************************************
"""
import numpy as np
# import matplotlib.pyplot as plt
eps = np.finfo(float).eps # machine epsilon
class Topmodel_Homogenous():
def __init__(self, pp, cellarea, cmask, flowacc, slope, S_initial=None,
outputs=False):
"""
sets up Topmodel for the catchment assuming homogenous
effective soil depth 'm' and sat. hydr. conductivity 'ko'.
This is the 'classic' version of Topmodel where hydrologic similarity\
index is TWI = log(a / tan(b)).
Args:
pp - parameter dict with keys:
dt - timestep [s]
ko - soil transmissivity at saturation [m/s]
m - effective soil depth (m), i.e. decay factor of Ksat with depth
twi_cutoff - max allowed twi -index
so - initial catchment average saturation deficit (m)
cmask - catchment mask, 1 = catchment_cell
cellarea - gridcell area [m2]
flowacc - flow accumulation per unit contour length (m)
slope - local slope (deg)
S_initial - initial storage deficit, overrides that in 'pp'
outputs - True stores outputs after each timestep into dictionary
"""
if not S_initial:
S_initial = pp['so']
self.dt = float(pp['dt'])
self.cmask = cmask
self.CellArea = cellarea
dx = cellarea**0.5
self.CatchmentArea = np.size(cmask[cmask == 1])*self.CellArea
# topography
self.a = flowacc*cmask # flow accumulation grid
self.slope = slope*cmask # slope (deg) grid
# effective soil depth [m]
self.M = pp['m']
# lat. hydr. conductivity at surface [m2/timestep]
# self.To = pp['ko']*pp['m']*self.dt
self.To = pp['ko']*self.dt
"""
local and catchment average hydrologic similarity indices (xi, X).
Set xi > twi_cutoff equal to cutoff value to remove tail of twi-distribution.
This concerns mainly the stream network cells. 'Outliers' in twi-distribution are
problem for streamflow prediction
"""
slope_rad = np.radians(self.slope) # deg to rad
xi = np.log(self.a / dx / (np.tan(slope_rad) + eps))
# apply cutoff
clim = np.percentile(xi[xi > 0], pp['twi_cutoff'])
xi[xi > clim] = clim
self.xi = xi
self.X = 1.0 / self.CatchmentArea*np.nansum(self.xi*self.CellArea)
# baseflow rate when catchment Smean=0.0
self.Qo = self.To*np.exp(-self.X)
# catchment average saturation deficit S [m] is the only state variable
s = self.local_s(S_initial)
s[s < 0] = 0.0
self.S = np.nanmean(s)
# create dictionary of empty lists for saving results
if outputs:
self.results = {'S': [], 'Qb': [], 'Qr': [], 'Qt': [], 'qr': [],
'fsat': [], 'Mbe': [], 'R': []
}
def local_s(self, Smean):
"""
computes local storage deficit s [m] from catchment average
"""
s = Smean + self.M*(self.X - self.xi)
return s
def subsurfaceflow(self):
"""subsurface flow to stream network (per unit catchment area)"""
Qb = self.Qo*np.exp(-self.S / (self.M + eps))
return Qb
def run_timestep(self, R):
"""
runs a timestep, updates saturation deficit and returns fluxes
Args:
R - recharge [m per unit catchment area] during timestep
OUT:
Qb - baseflow [m per unit area]
Qr - returnflow [m per unit area]
qr - distributed returnflow [m]
fsat - saturated area fraction [-]
Note:
R is the mean drainage [m] from bucketgrid.
"""
# initial conditions
So = self.S
s = self.local_s(So)
# subsurface flow, based on initial state
Qb = self.subsurfaceflow()
# update storage deficit and check where we have returnflow
S = So + Qb - R
s = self.local_s(S)
# returnflow grid
qr = -s
qr[qr < 0] = 0.0 # returnflow grid, m
# average returnflow per unit area
Qr = | np.nansum(qr) | numpy.nansum |
import numpy as np
import symjax
import symjax.tensor as T
# we create a simple mapping with 2 matrix multiplications interleaved
# with nonlinearities
x = T.Placeholder((8,), "float32")
w_1 = T.Variable(T.random.randn((16, 8)))
w_2 = T.Variable(T.random.randn((2, 16)))
# the output can be computed easily as
output = w_2.dot(T.relu(w_1.dot(x)))
# now suppose we also wanted the same mapping but with a noise input
epsilon = T.random.randn((8,))
output_noisy = output.clone({x: x + epsilon})
f = symjax.function(x, outputs=[output, output_noisy])
for i in range(10):
print(f( | np.ones(8) | numpy.ones |
import os
import numpy as np
import sys
import cStringIO
import re
import scipy.io as sio
import copy
def cell2strtable(celltable, delim='\t'):
''' convert a cell table into a string table that can be printed nicely
Parameters:
celltable - array-like, ndarray with rows and columns in desired order
delim - str, delimter to combine columns of celltable
[default = '\\t' (strictly 4 spaces)]
Returns:
strtable - str, string version of celltable that prints as table
Example:
celltable = np.array([['Column 1 Title','Column 2 Title',''],
['Row 2 Column 1 is longer...','Row 2 Column 2','Extra Column!']])
delim='\t'
strtable = cell2strtable(celltable, delim)
print(strtable)
Column 1 Title Column 2 Title
Row 2 Column 1 is longer... Row 2 Column 2 Extra Column!
'''
# change \t to 4 spaces
if delim == '\t':
delim = ' '
# check that celltable is ndarray and object
if type(celltable) != np.ndarray:
celltable = np.array([celltable], dtype=np.object)
elif celltable.dtype != np.object: # copy as np.object
celltable = copy.deepcopy(celltable).astype(np.object)
else: # copy as is
celltable = copy.deepcopy(celltable)
# if len(shape) == 1, reshape
if len(celltable.shape)==1:
celltable = np.reshape(celltable, (1,celltable.shape[0]))
# convert all to string
for i,x in enumerate(celltable.ravel()):
celltable.ravel()[i] = np.str(x)
# get max length in each column
max_len = []
for x in celltable.transpose():
max_len.append(np.max([len(y) for y in x]))
# pad each column with zeros
for i,r in enumerate(celltable):
for ii,c in enumerate(r):
if len(c) < max_len[i]:
spaces = ''.join([' ' for n in range(max_len[ii]-len(c))])
celltable[i][ii] = c + spaces
# join strings with delim
strtable = []
if len(celltable.shape) > 1:
for r in range(celltable.shape[0]):
strtable.append(delim.join(celltable[r]))
strtable = '\n'.join(strtable)
else:
strtable = delim.join(celltable)
return strtable
def py2mat(A, filename, variable):
''' load from or save to matlab format
Parameters:
A - object, object to save (set to None if loading from filename)
filename - str, file to load from or save to
variable - str, variable name to load or save
Returns:
A - object, object converted from file or converted to matlab format
Example:
A = {0: {'spm': {'temporal': {'st': {'nslices': {0: 28},
'prefix': {0: u'a'},
'refslice': {0: 1},
'scans': {0: {0: u'<UNDEFINED>'}},
'so': {0: 1, 1: 3, 2: 5, 3: 7, 4: 9, 5: 11, 6: 13, 7: 15, 8: 17,
9: 19, 10: 21, 11: 23, 12: 25, 13: 27, 14: 2, 15: 4, 16: 6,
17: 8, 18: 10, 19: 12, 20: 14, 21: 16, 22: 18, 23: 20, 24: 22,
25: 24, 26: 26, 27: 28},
'ta': {0: 1.9285714285714286},
'tr': {0: 2}}}}}}
'''
# load from filename
if A==None:
# init out
out = np.array([], np.object)
# load filename as matlab dtype
A = sio.loadmat(filename, mat_dtype=True)
A = A[variable]
# get substructs of A
S0 = struct2sub(A)
# for each level, get dtype
S1 = np.empty(len(S0), dtype=np.object).tolist()
cell = np.zeros(len(S0), dtype=np.bool).tolist()
for i,S_ in enumerate(S0):
S1[i] = []
cell[i] = []
for n in range(1, len(S_)):
A_ = subsref(A, S_[:n])
# cell index
if A_.dtype == np.object:
# set single index
if A_.ndim == 1:
S1[i].append(S_[n])
cell[i].append(copy.deepcopy(S1[i]))
# set cell array
elif A_.shape[0] > 1:
S1[i].append(S_[n])
cell[i].append(copy.deepcopy(S1[i]))
# field name
elif A_.dtype.names != None:
# set fieldname
if A_.ndim == 0:
S1[i].append(A_.dtype.names[S_[n]])
# set noncell array
elif A_.shape[0] > 1:
S1[i].append(S_[n])
elif A_.ndim > 0 and A_.shape[0] > 1:
S1[i].append(S_[n])
# set values
for S0_, S1_ in zip(S0, S1):
item = subsref(A, S0_)
out = subsasgn(out, S1_, item, list)
# set cells as numpy arrays
for C_ in cell:
# first cell is implied
for c in C_[1:]:
out = subsasgn(out, c, np.array([subsref(out, c)], np.object))
else: # copy A
A = copy.deepcopy(A)
# get substructs for A at each level
S0 = struct2sub(A, dict_out=True)
# set additional dimension for matlab
for k in S0.keys():
for S_ in S0[k]:
A_ = subsref(A, S_)
# if list without following or preceding list, set extra dim
if type(A_)==list and type(subsref(A, S_[:-1]))!=list and \
type(A_[0])!=list:
A = subsasgn(A, S_, [A_])
S0 = struct2sub(A, dict_out=True)
# set dicts as arrays with dtype
l = S0.keys()
l.reverse()
for k in l:
for S_ in S0[k]:
A_ = subsref(A, S_)
# set dict to array with keys as dtype
if type(A_) == dict:
A = subsasgn(A, S_, np.array([tuple(A_.values())],
np.dtype([(k, np.object) for k in A_.keys()])))
S0 = struct2sub(A, dict_out=True)
# set out to dict using variable
out = {variable: A}
# save mat
sio.savemat(filename, out)
return out
def subsref(A, S):
''' return value from A using references in S
Parameters:
A - object, object to return value from
S - list, indices/fields to reference to obtain value from A (see Example)
Returns:
value - any, value to index from A using S
Example:
A = {0: {'test': [9,8,7]}}
S = [0, 'test', 1]
value = subsref(A, S)
value =
8
'''
# copy S
S = list(S)
# copy A
value = copy.deepcopy(A)
# for each substruct, get value
for S_ in S:
if type(S_) == str and re.match('.*:.*', S_) != None:
value = eval('value[{S_}]'.format(S_=S_))
else:
value = value[S_]
return value
def subsasgn(A, S, C, append_type=None):
''' set value in A using reference in S
Parameters:
A - object, object to set value
S - list, indices/fields to reference when setting value
C - any, value to set in A at reference S
append_type - type, type of iterable to append if needed (e.g., list)
[default is None, sets to type(A)]
Returns:
A - object, updated object with value set at reference S
Example:
A = {0: {'spm': {'util': {'disp': {'data': '<UNDEFINED>'}}}}}
S = [0, 'spm', 'util', 'disp', 'data']
C = './mri/anatomical.nii'
subsasgn(A, S, C)
A =
{0: {'spm': {'util': {'disp': {'data': './mri/anatomical.nii'}}}}}
Note: Only tested for dict, list, and ndarray. If S == [], A is set to C
'''
# copy A
A = copy.deepcopy(A)
value = A
# set default for setting new index
if append_type == None:
def_val = type(A)([])
else:
def_val = append_type([])
# ensure def_val has ndim > 0
if type(def_val).__module__ == np.__name__ and def_val.ndim == 0:
def_val = np.array([None], dtype=A.dtype)
# for each level in S, index value
for i,S_ in enumerate(S):
# add new key to dict
if type(value) == dict and S_ not in value.keys():
value[S_] = copy.deepcopy(def_val)
# set value to dict and add key with new value type(A)
elif type(value) != dict and type(S_) == str:
value = {}
value[S_] = copy.deepcopy(def_val)
# append list
elif type(value) == list and S_ >= len(value):
for _ in range(S_ - len(value) + 1):
value.append(copy.deepcopy(def_val))
# append ndarray with None
elif type(value).__module__ == np.__name__:
if value.ndim == 0:
value = np.array([value])
if S_ >= len(value):
for _ in range(S_ - len(value) + 1):
value = np.append(value, None)
# if None, set as list
elif value == None:
value = []
for _ in range(S_ - len(value) + 1):
value.append([])
# set value to A at current substruct
if i > 0 and len(S[:i]) > 0:
exec('A' + sub2str(S[:i]) + '= value')
else:
A = value
# evaluate : string
if type(S_) == str and re.match('.*:.*', S_) != None:
value = eval('value[{S_}]'.format(S_=S_))
else: # index value using S_
value = value[S_]
# set complete reference to C
if len(S) > 0:
exec('A' + sub2str(S) + '= C')
else: # simple set
A = C
return A
def sub2str(S):
''' convert a "substruct" to a "string representation" or vice versa
Parameters:
S - list or str, substruct/string representation to convert
Returns:
S - list or str, converted substruct/string representation
Example 1:
S = [0, 'field1', 0, 'field2', 1]
str_rep = sub2str(S)
str_rep =
'[0]["field1"][0]["field2"][1]'
Example 2:
str_rep = '["field1"]["field2"][4]'
S = sub2str(str_rep)
S =
['field1', 'field2', 4]
'''
# copy S
if type(S) != str:
S = list(S)
# init output
out = []
# if str, output array
if type(S) == str:
S = re.split('[\[\]]', S)
S = [S for S in S if S != '']
for S_ in S:
if S_.isdigit():
out.append(int(S_))
else:
out.append(re.sub('"', '', S_))
else: # if array, output str
if not np.iterable(S):
S = [S,]
for S_ in S:
if type(S_) == str:
out.append('"' + S_ + '"')
else:
out.append(str(S_))
out = '[' + ']['.join(out) + ']'
return out
def struct2sub(A, r=np.inf, dict_out=False):
''' return all "substructs" from A through levels r
Parameters:
A - object, object to return substructs from
r - number, number of levels to search when obtaining substructs. Returns
substruct lists with maximum length of r + 1 (0 is first level)
[default is np.inf, i.e. all levels of A]
dict_out - bool, return each level list of substruct as dict with keys
corresponding to levels
[default is False]
Returns:
S - list, list of substructs for each value in A through levels r
Example:
A = {'test': {0: 12, 1: '2'}, 'test2': 3}
r = 1
S =
[['test', 0], ['test', 1], ['test2']]
'''
# copy A
A = copy.deepcopy(A)
# get substruct based on type
S = {0: []}
if type(A) == dict:
S[0] = [[S_] for S_ in A.keys()]
elif type(A) == list or type(A) == tuple:
S[0] = [[S_] for S_ in range(len(A))]
elif type(A).__module__ == np.__name__:
if A.ndim > 0 or type(A) == np.void:
A = list(A)
S[0] = [[S_] for S_ in range(len(A))]
# ensure list is not empty
if len(S[0]) == 0:
S[0] = [[],]
# # if r is zero, return
if r == 0:
return S[0]
# for each level, get struct2sub and append to previous
r_ = 0
while r_ < r:
S[r_+1] = []
for S0 in S[r_]:
for S1 in struct2sub(subsref(A, S0), 0):
S[r_+1].append(S0 + S1)
if len(struct2sub(subsref(A, S0), 0)) == 0:
S[r_+1].append(S[r_])
if S[r_] == S[r_+1]:
S.pop(r_+1, None)
break
else:
r_ += 1
if dict_out: # return dict
return S
else: # return S at level r_
return S[r_]
def pebl_getfield(A, S=None, R=None, expr=None, fun=None, r=np.inf):
''' get values from object, A, using substructs or string representations
Parameters:
A - object, object to return values from
Options:
S - list, substruct to get value from A
[defualt is None]
R - list or str, string representation to get value from A
[default is None]
expr - str, expression to search string representations to get value
from A
[default is None]
fun - dict, dict containing function to search for values within A. keys
within the dict should contain 'fun', and integers corresponding to
argument index (see Example 2). Each C will be input as the argument
not contained in the dict keys (i.e. at index 0 for Example 2).
[default is None]
r - int, number of level to search within A (each level is field or
index reference)
[default is np.inf]
Returns:
C - list, values returned from A
(i.e. C[0] == subsref(A, S[0]) or eval('A' + R[0]))
S - list, substructs used to return values from A
R - list, string representations used to return values from A
Example 1:
A = {0: {'spm': {'util': {'disp': {'data': '<UNDEFINED>'}}}}}
expr = '.*\["disp"]'
C, S, R = pebl_getfield(A, expr=expr)
C =
array([{'data': '<UNDEFINED>'}], dtype=object)
S =
[[0, 'spm', 'util', 'disp']]
R =
['[0]["spm"]["util"]["disp"]']
Example 2:
A = {'test1': {0: 3}, 'test2': [2,3,4,5], 'test3': []}
fun = {'fun': np.equal, 1: 3}
C, S, R = pebl_getfield(A, fun=fun)
C =
array([3, 3], dtype=object)
S =
[['test1', 0], ['test2', 1]]
R =
['["test1"][0]', '["test2"][1]']
'''
# if S exists, get copy
if S != None:
if type(S)!=list or type(S[0])!=list:
S = [S,]
else:
S = list(S)
else: # get substructs of A
S = []
if not np.iterable(r):
r = [r,]
for rr in r:
S = S + struct2sub(A, rr)
# if R exists, update S
if R != None:
if not np.iterable(R):
R = [R,]
else:
R = list(R)
S = []
for R_ in R:
S.append(sub2str(R_))
else: # if R doesnt exist, set from S
R = []
for S_ in S:
R.append(sub2str(S_))
# find R using regex
if expr != None:
tmp = list(R)
R = []
# copy expr
if type(expr) == str:
expr = [expr,]
else:
expr = list(expr)
for e in expr:
m = [re.findall(e, R_) for R_ in tmp]
m = np.unique([m[0] for m in m if len(m) > 0])
R = np.append(R, m)
R = np.unique(R).tolist()
# update S
S = []
for R_ in R:
S.append(sub2str(R_))
# use subsref to get values
C = []
for S_ in S:
C.append(subsref(A, S_))
# search using function
if fun != None:
# copy fun
if type(fun) != dict:
fun = {'fun': fun}
else:
fun = dict(fun)
# set fnd array of false
fnd = np.zeros(len(C), dtype=np.bool)
# get key positions for function call
key_ns = [k for k in fun.keys() if type(k) == int]
key_rng = range(np.max(key_ns)+1)
c_idx = [k for k in key_rng if k not in key_ns]
if len(c_idx) == 0:
c_idx = np.max(key_ns)+1
else:
c_idx = c_idx[0]
# for each C_ evalutate function
for i, C_ in enumerate(C):
# set c_idx to C_
fun[c_idx] = C_
# set args for input
args = [fun[k] for k in key_rng]
# evaluate function
tmp = fun['fun'](*args)
if tmp == NotImplemented:
fnd[i] = False
else:
fnd[i] = tmp
# set to true indices
C = | np.array(C, dtype=np.object) | numpy.array |
"""Custom preprocessing functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import generator_stop
from __future__ import print_function
import multiprocessing as mp
import os
import re
from functools import partial
import numba
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from erinn.utils.io_utils import read_config_file
from erinn.utils.io_utils import read_pkl
from erinn.utils.io_utils import write_pkl
def log_transform(arr, inverse=False, inplace=True):
"""
Perform a logarithmic transformation or an inverse logarithmic transformation.
new_array[i] = log10(arr[i] + 1), arr[i] >= 0
new_array[i] = -log10(abs(arr[i] - 1)), arr[i] < 0
Parameters
----------
arr : numpy.ndarray
An array which you want to perform logarithmic transformation or inverse logarithmic transformation.
inverse : bool
Whether to perform an inverse transformation.
inplace : bool
Whether to use inplace mode.
Returns
-------
new_arr : numpy.ndarray, optional
If `inplace` is False, then a transformed array is returned.
References
----------
https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html
https://stackoverflow.com/questions/21610198/runtimewarning-divide-by-zero-encountered-in-log
"""
if inplace:
# method 1: use boolean mask
if inverse:
mask = (arr >= 0)
arr[mask] = np.power(10, arr[mask]) - 1
arr[~mask] = -np.power(10, -arr[~mask]) + 1
else:
mask = (arr >= 0)
arr[mask] = | np.log10(arr[mask] + 1) | numpy.log10 |
"""
This script can be used to evaluate a trained model on 3D pose/shape and masks/part segmentation. You first need to download the datasets and preprocess them.
Add SLP operation, read model from exp folder or from load. Result to exp folder.
read either from specific model weights, or from the latest of current one.
exp specific evaluation.
this only works for the one with depth evaluaitons
Example usage:
```
python3 eval_ori.py --checkpoint=data/model_checkpoint.pt --dataset=h36m-p1 --log_freq=20
```
Running the above command will compute the MPJPE and Reconstruction Error on the Human3.6M dataset (Protocol I). The ```--dataset``` option can take different values based on the type of evaluation you want to perform:
1. Human3.6M Protocol 1 ```--dataset=h36m-p1```
2. Human3.6M Protocol 2 ```--dataset=h36m-p2```
3. 3DPW ```--dataset=3dpw```
4. LSP ```--dataset=lsp```
5. MPI-INF-3DHP ```--dataset=mpi-inf-3dhp```
"""
import torch
from torch.utils.data import DataLoader
import numpy as np
import cv2
import os
import argparse
import json
from collections import namedtuple
from tqdm import tqdm
import torchgeometry as tgm
import os.path as osp
from glob import glob
import config
import constants
from models import hmr, SMPL
from datasets import BaseDataset, SLP_RD, SPIN_ADP
from utils.imutils import uncrop
from utils.pose_utils import reconstruction_error
from utils.part_utils import PartRenderer
import utils.utils as ut_t
from utils.renderer import Renderer
from models import hmr, SMPL
from utils import TrainOptions
action_names = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Posing', 'Purchases', 'Sitting',
'SittingDown', 'Smoking', 'Photo', 'Waiting', 'Walking', 'WalkDog', 'WalkTogether']
def run_evaluation(model, dataset_name, dataset, out_fd='logs/tmp',
batch_size=32, img_res=224,
num_workers=32, shuffle=False, log_freq=50, svImg_freq=3, iter=-1, if_ldImg=False, if_cam_rct=False):
"""Run evaluation on the datasets and metrics we report in the paper.
if_ldImg: use the lead image to save the no image , 5 bits
"""
# context setting
result_file = osp.join(out_fd, 'eval_rst.npz')
metric_file = osp.join(out_fd, 'eval_metric.json')
metric = {} # dictionary
if svImg_freq>0:
vid_fd = osp.join(out_fd, 'vis')
if not osp.exists(vid_fd):
os.makedirs(vid_fd)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Transfer model to the GPU
model.to(device)
# Load SMPL model
if_ptc = True
if dataset_name in ['MIMM', 'SyRIP']:
smpl_pth = osp.join(config.SMPL_MODEL_DIR, 'SMIL.pkl')
if dataset_name == 'SyRIP': # only SyRIP on depth
if_ptc = False
t_smil = torch.tensor([0.0, 0, -0.46], requires_grad=False).cuda() # no grad
s_smil = 2.75
# if_cam_rct = True
else:
smpl_pth = config.SMPL_MODEL_DIR
# if_cam_rct = False
smpl_neutral = SMPL(smpl_pth,
create_transl=False).to(device) # if infan tuse the SMIL model
smpl_male = SMPL(config.SMPL_MODEL_DIR,
gender='male',
create_transl=False).to(device)
smpl_female = SMPL(config.SMPL_MODEL_DIR,
gender='female',
create_transl=False).to(device)
renderer = PartRenderer()
render_vis = Renderer(focal_length=constants.FOCAL_LENGTH, img_res=constants.IMG_RES, faces=smpl_neutral.faces)
# add the img render to gen images. every several parts.
# Regressor for H36m joints
J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
save_results = result_file is not None
# Disable shuffling if you want to save the results
if save_results:
shuffle=False
# Create dataloader for the dataset
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
# Pose metrics
# MPJPE and Reconstruction error for the non-parametric and parametric shapes
mpjpe = np.zeros(len(dataset))
recon_err = np.zeros(len(dataset))
mpjpe_smpl = np.zeros(len(dataset))
recon_err_smpl = np.zeros(len(dataset))
# MPJPE and Reconstruction error for each action
mpjpe_dict = {}
recon_err_dict = {}
for action in action_names:
mpjpe_dict[action] = []
recon_err_dict[action] = []
# images name list
imgnames_list = []
# Shape metrics
# Mean per-vertex error
shape_err = np.zeros(len(dataset))
shape_err_smpl = np.zeros(len(dataset))
# aligned depth error
ptc_err = np.zeros(len(dataset)) # reconstrcuted
depth_err = np.zeros(len(dataset))
# Mask and part metrics
# Accuracy
accuracy = 0.
parts_accuracy = 0.
# True positive, false positive and false negative
tp = np.zeros((2,1))
fp = np.zeros((2,1))
fn = np.zeros((2,1))
parts_tp = np.zeros((7,1))
parts_fp = np.zeros((7,1))
parts_fn = np.zeros((7,1))
# Pixel count accumulators
pixel_count = 0
parts_pixel_count = 0
# Store SMPL parameters
smpl_pose = np.zeros((len(dataset), 72))
smpl_betas = np.zeros((len(dataset), 10))
smpl_camera = np.zeros((len(dataset), 3))
pred_joints = np.zeros((len(dataset), 17, 3))
eval_pose = False
eval_masks = False
eval_parts = False
eval_depth = False
# Choose appropriate evaluation for each dataset
if dataset_name == 'h36m-p1' or dataset_name == 'h36m-p2' or dataset_name == '3dpw' or dataset_name == 'mpi-inf-3dhp' or dataset_name == 'SLP' or dataset_name == 'MIMM':
eval_pose = True # eval pose these 3
eval_depth = True
elif dataset_name == 'lsp': # lsp for masks and parts
eval_masks = True
eval_parts = True
annot_path = config.DATASET_FOLDERS['upi-s1h']
joint_mapper_h36m = constants.H36M_TO_J17 if dataset_name == 'mpi-inf-3dhp' else constants.H36M_TO_J14 # from h36 17 -> lsp + pelvis, spine, neck(upper)
joint_mapper_gt = constants.J24_TO_J17 if dataset_name == 'mpi-inf-3dhp' else constants.J24_TO_J14 # from smpl 24 -> lsp neck , head(h36) pelvis, spine, 17 jaw
# Iterate over the entire dataset
for step, batch in enumerate(tqdm(data_loader, desc='Eval', total=len(data_loader))):
if iter>0 and step == iter:
break
# Get ground truth annotations from the batch
gt_pose = batch['pose'].to(device)
# print('get gt_pose', gt_pose)
gt_betas = batch['betas'].to(device)
gt_vertices = smpl_neutral(betas=gt_betas, body_pose=gt_pose[:, 3:], global_orient=gt_pose[:, :3]).vertices # with batch x jt, root centered no trans
images = batch['img'].to(device)
gender = batch['gender'].to(device)
curr_batch_size = images.shape[0]
imgs_RGB = batch['img_RGB'].to(device)
depths_dn = batch['depth_dn'].to(device)
gt_keypoints_3d = batch['pose_3d'].cuda()
masks2 = batch['mask2']
imgnames = batch['imgname']
gt_2d = batch['keypoints']
imgnames_list += imgnames
# read in depth , pred depth, mask out, estimate the bias, update depth, ptc calculation , crop(bb) or (mask)
with torch.no_grad(): # get pred
pred_rotmat, pred_betas, pred_camera = model(images) # pose, shape, camera(ortho projection), z, x, y ?
if if_cam_rct:
pred_camera += t_smil
pred_camera[:, 0] *= s_smil # 64 match 3 non singular dimension
pred_output = smpl_neutral(betas=pred_betas, body_pose=pred_rotmat[:,1:], global_orient=pred_rotmat[:,0].unsqueeze(1), pose2rot=False)
pred_vertices = pred_output.vertices
if save_results:
rot_pad = torch.tensor([0,0,1], dtype=torch.float32, device=device).view(1,3,1)
rotmat = torch.cat((pred_rotmat.view(-1, 3, 3), rot_pad.expand(curr_batch_size * 24, -1, -1)), dim=-1) # why 24 ? 24 joint each a 3x3
pred_pose = tgm.rotation_matrix_to_angle_axis(rotmat).contiguous().view(-1, 72) # flatten
smpl_pose[step * batch_size:step * batch_size + curr_batch_size, :] = pred_pose.cpu().numpy()
smpl_betas[step * batch_size:step * batch_size + curr_batch_size, :] = pred_betas.cpu().numpy()
smpl_camera[step * batch_size:step * batch_size + curr_batch_size, :] = pred_camera.cpu().numpy()
camera_translation_bch = torch.stack([pred_camera[:, 1], pred_camera[:, 2],2 * constants.FOCAL_LENGTH / (constants.IMG_RES * pred_camera[:, 0] + 1e-9)],dim=-1) # metric , pred_cam [-1, 1] res range , pred_camera [ z, x, y]? last dim? , render flip the x, direction, neural render not change.
idx_bs = step * batch_size # idx of the smpl
for i, pred_v in enumerate(pred_vertices): # loop bch
# if if_svImg: # only the first in batch will be saved, otherwise loop it
idx = idx_bs + i # current bs
img_RGB_t = imgs_RGB[i].cpu().numpy() # to image format
img_RGB_t = np.transpose(img_RGB_t, (1,2,0)) # 0 ~1
if if_ldImg:
raise ValueError('not implemneted')
else:
img_rd = img_RGB_t
mask2 = masks2[i] # mask 2
depth_dn_t = depths_dn[i].cpu().numpy().squeeze() # remove leading channel
# Calculate camera parameters for rendering
camera_translation_t = camera_translation_bch[i].cpu().numpy() # bch 1st
pred_vertices0 = pred_v.cpu().numpy() # single sample
img_shape, valid_mask, rend_depth = render_vis(pred_vertices0, camera_translation_t, img_rd) # 0 ~1
valid_mask = valid_mask.squeeze() # get rid of the end 1 dim
# Render side views
aroundy = cv2.Rodrigues(np.array([0, np.radians(90.), 0]))[0] # x, y ,z right, up , outward
center = pred_vertices0.mean(axis=0)
rot_vertices = np.dot((pred_vertices0 - center), aroundy) + center # rotate body
mask_u = np.logical_and(mask2.cpu().numpy(), valid_mask)
# if mask_u all false, then set 0 to error -1 filter out later, d_dn_t not change
if (~mask_u).all(): # if empty valid
err_d_t = -1. # specific label
else:
d_rend_valid = rend_depth[mask_u] # rend depth value range
d_dn_valid = depth_dn_t[mask_u] # vec only
# d_dn_mask = np.logical_and(depth_dn_t<2.11, depth_dn_t>0) # 2.1m, no boundary only bed, sligh 0.01 margin to cover all bed, depth denoised
depth_dn_t = depth_dn_t + (d_rend_valid.mean() - d_dn_valid.mean()) # the z direction, err: infant empty slice
d_dn_valid = d_dn_valid + (d_rend_valid.mean() - d_dn_valid.mean()) # the z direction
# print('idx', idx)
# print('d_rend_valid shape', d_dn_valid.shape)
err_d_t = np.absolute(d_rend_valid - d_dn_valid).mean() * 1000.
# rend_depth[rend_depth==0] = rend_depth[mask_u].max() + 0.5 # 0.5 far away,
# err infant empty, for max assume no mask is there.
# print("current d error", err_d_t)
# print('rend_depth range', rend_depth.min(), rend_depth.max())
# print("rend_depth upper corner", rend_depth[:10,:10]) # check corner area
# print("depth_dn_t rg", depth_dn_t.min(), depth_dn_t.max())
# print('mask type:', valid_mask.dtype) # mask type bool
depth_err[idx]= err_d_t # average error, to mm, broad cast 2224,224 -> 7053
if svImg_freq>0 and idx % svImg_freq == 0: # only greater than 0 save the image
trans = camera_translation_t.copy() # ptc, x,y z to world, x, -y , -z
trans[1] *= -1 # y opposite direction.
if if_ptc and err_d_t>0:
ptc = ut_t.get_ptc_mask(depth_dn_t, [constants.FOCAL_LENGTH, ] * 2,
mask=mask_u) # empty ptc give None to not render
ptc[:, 1] *= -1 # y flipped? point up?
ptc = ptc - trans # camera coordinate, to world
rot_ptc = np.dot((ptc - center), aroundy) + center
else:
ptc = None
rot_ptc = None # not render
# get the ptc version front view
img_shape, _, _ = render_vis(pred_vertices0, camera_translation_t, img_rd, ptc=None) # 0 ~1 only if want to have ptc on front view
img_shape_white, _, _ = render_vis(pred_vertices0, camera_translation_t, np.ones_like(img_rd), ptc=None) # smpl without bg f
img_shape_side_ptc, _, _ = render_vis(rot_vertices, camera_translation_t, | np.ones_like(img_rd) | numpy.ones_like |
"""Implements Exact Maximum Entropy IRL from my thesis"""
import numpy as np
from numba import jit
from numba import types
from numba.typed import Dict, List
from scipy.optimize import minimize
from mdp_extras import (
Linear,
Disjoint,
trajectory_reward,
DiscreteExplicitExtras,
DiscreteImplicitExtras,
)
# Placeholder for 'negative infinity' which doesn't cause NaN in log-space operations
_NINF = np.finfo(np.float64).min
@jit(nopython=True)
def nb_backward_pass_log(p0s, L, t_mat, gamma=1.0, rs=None, rsa=None, rsas=None):
"""Compute backward message passing variable in log-space
Args:
p0s (numpy array): Starting state probabilities
L (int): Maximum path length
t_mat (numpy array): |S|x|A|x|S| transition matrix
gamma (float): Discount factor
rs (numpy array): |S| array of linear state reward weights
rsa (numpy array): |S|x|A| array of linear state-action reward weights
rsas (numpy array): |S|x|A|x|S| array of linear state-action-state reward weights
Returns:
(numpy array): |S|xL array of backward message values in log space
"""
if rs is None:
rs = np.zeros(t_mat.shape[0])
if rsa is None:
rsa = np.zeros(t_mat.shape[0:2])
if rsas is None:
rsas = np.zeros(t_mat.shape[0:3])
alpha = np.zeros((t_mat.shape[0], L))
alpha[:, 0] = np.log(p0s) + rs
for t in range(L - 1):
for s2 in range(t_mat.shape[2]):
# Find maximum value among all parents of s2
m_t = _NINF
for s1 in range(t_mat.shape[0]):
for a in range(t_mat.shape[1]):
if t_mat[s1, a, s2] == 0:
continue
m_t = max(
m_t,
(
alpha[s1, t]
+ np.log(t_mat[s1, a, s2])
+ gamma ** ((t + 1) - 1) * (rsa[s1, a] + rsas[s1, a, s2])
),
)
m_t += (gamma ** (t + 1)) * rs[s2]
# Compute next column of alpha in log-space
for s1 in range(t_mat.shape[0]):
for a in range(t_mat.shape[1]):
if t_mat[s1, a, s2] == 0:
continue
alpha[s2, t + 1] += t_mat[s1, a, s2] * np.exp(
alpha[s1, t]
+ gamma ** ((t + 1) - 1) * (rsa[s1, a] + rsas[s1, a, s2])
+ (gamma ** (t + 1)) * rs[s2]
- m_t
)
alpha[s2, t + 1] = m_t + np.log(alpha[s2, t + 1])
return alpha
@jit(nopython=True)
def nb_backward_pass_log_deterministic_stateonly(
p0s, L, parents, rs, gamma=1.0, padded=False
):
"""Compute backward message passing variable in log-space
This version of the backward pass function makes extra assumptions so we can handle
some much larger problems
- Dynamics are deterministic
- Rewards are state-only
Args:
p0s (numpy array): Starting state probabilities
L (int): Maximum path length
parents (numpy array): Fixed-size parents array. Rows indices correspond to
states, and the first X elements of each row contain the parent state IDs
for that state. Any remaining elements of that row are then -1.
rs (numpy array): |S| array of linear state reward weights
gamma (float): Discount factor
padded (bool): Is this MDP padded? In which case, we need to handle the parents
array with extra caution (it won't have the auxiliary state/action included)
Returns:
(numpy array): |S|xL array of backward message values in log space
"""
num_states = len(p0s)
alpha = np.zeros((num_states, L))
alpha[:, 0] = | np.log(p0s) | numpy.log |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Inference module utilities
"""
import numpy as np
from scipy import stats
from scipy.stats import rankdata, wilcoxon
from collections.abc import Iterable
from rsatoolbox.model import Model
from rsatoolbox.rdm import RDMs
from .matrix import pairwise_contrast
from .rdm_utils import batch_to_matrices
def input_check_model(models, theta=None, fitter=None, N=1):
""" Checks whether model related inputs to evaluations are valid and
generates an evaluation-matrix of fitting size.
Args:
model : [list of] rsatoolbox.rdm.RDMs
the models to be evaluated
theta : numpy.ndarray or list , optional
Parameter(s) for the model(s). The default is None.
fitter : [list of] function, optional
fitting function to overwrite the model default.
The default is None, i.e. keep default
N : int, optional
number of samples/rows in evaluations matrix. The default is 1.
Returns:
evaluations : numpy.ndarray
empty evaluations-matrix
theta : list
the processed and checked model parameters
fitter : [list of] functions
checked and processed fitter functions
"""
if isinstance(models, Model):
models = [models]
elif not isinstance(models, Iterable):
raise ValueError('model should be an rsatoolbox.model.Model or a list of'
+ ' such objects')
if N > 1:
evaluations = np.zeros((N, len(models)))
else:
evaluations = np.zeros(len(models))
if theta is not None:
assert isinstance(theta, Iterable), 'If a list of models is' \
+ ' passed theta must be a list of parameters'
assert len(models) == len(theta), 'there should equally many' \
+ ' models as parameters'
else:
theta = [None] * len(models)
if fitter is None:
fitter = [None] * len(models)
elif isinstance(fitter, Iterable):
assert len(fitter) == len(models), 'if fitters are passed ' \
+ 'there should be as many as models'
else:
fitter = [fitter] * len(models)
for k, model in enumerate(models):
if fitter[k] is None:
fitter[k] = model.default_fitter
return models, evaluations, theta, fitter
def pool_rdm(rdms, method='cosine'):
"""pools multiple RDMs into the one with maximal performance under a given
evaluation metric
rdm_descriptors of the generated rdms are empty
Args:
rdms (rsatoolbox.rdm.RDMs):
RDMs to be pooled
method : String, optional
Which comparison method to optimize for. The default is 'cosine'.
Returns:
rsatoolbox.rdm.RDMs: the pooled RDM, i.e. a RDM with maximal performance
under the chosen method
"""
rdm_vec = rdms.get_vectors()
if method == 'euclid':
rdm_vec = _nan_mean(rdm_vec)
elif method == 'neg_riem_dist':
rdm_vec = _nan_mean(rdm_vec)
elif method == 'cosine':
rdm_vec = rdm_vec / np.sqrt(np.nanmean(rdm_vec ** 2, axis=1,
keepdims=True))
rdm_vec = _nan_mean(rdm_vec)
elif method == 'corr':
rdm_vec = rdm_vec - np.nanmean(rdm_vec, axis=1, keepdims=True)
rdm_vec = rdm_vec / np.nanstd(rdm_vec, axis=1, keepdims=True)
rdm_vec = _nan_mean(rdm_vec)
rdm_vec = rdm_vec - np.nanmin(rdm_vec)
elif method == 'cosine_cov':
rdm_vec = rdm_vec / np.sqrt(np.nanmean(rdm_vec ** 2, axis=1,
keepdims=True))
rdm_vec = _nan_mean(rdm_vec)
elif method == 'corr_cov':
rdm_vec = rdm_vec - np.nanmean(rdm_vec, axis=1, keepdims=True)
rdm_vec = rdm_vec / np.nanstd(rdm_vec, axis=1, keepdims=True)
rdm_vec = _nan_mean(rdm_vec)
rdm_vec = rdm_vec - np.nanmin(rdm_vec)
elif method == 'spearman' or method == 'rho-a':
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
elif method == 'rho-a':
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
elif method == 'kendall' or method == 'tau-b':
Warning('Noise ceiling for tau based on averaged ranks!')
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
elif method == 'tau-a':
Warning('Noise ceiling for tau based on averaged ranks!')
rdm_vec = np.array([_nan_rank_data(v) for v in rdm_vec])
rdm_vec = _nan_mean(rdm_vec)
else:
raise ValueError('Unknown RDM comparison method requested!')
return RDMs(rdm_vec,
dissimilarity_measure=rdms.dissimilarity_measure,
descriptors=rdms.descriptors,
rdm_descriptors=None,
pattern_descriptors=rdms.pattern_descriptors)
def _nan_mean(rdm_vector):
""" takes the average over a rdm_vector with nans for masked entries
without a warning
Args:
rdm_vector(numpy.ndarray): set of rdm_vectors to be averaged
Returns:
rdm_mean(numpy.ndarray): the mean rdm
"""
nan_idx = ~np.isnan(rdm_vector[0])
mean_values = | np.mean(rdm_vector[:, nan_idx], axis=0) | numpy.mean |
# -*- coding: utf-8 -*-
"""
"""
import matplotlib.pyplot as plt
import numpy as np
import copy
import pickle
from tqdm.auto import trange
from scipy.ndimage import binary_dilation
from mpl_toolkits.axes_grid1 import make_axes_locatable
import lenstronomy.Util.data_util as data_util
import lenstronomy.Util.util as util
import lenstronomy.Plots.plot_util as plot_util
from lenstronomy.Util.param_util import phi_q2_ellipticity
from lenstronomy.SimulationAPI.sim_api import SimAPI
from lenstronomy.Workflow.fitting_sequence import FittingSequence
from lenstronomy.Plots.model_plot import ModelPlot
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LensModel.lens_model_extensions import LensModelExtensions
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
# plot settings
import seaborn as sns
# to change tex to Times New Roman in mpl
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Times New Roman'
plt.rcParams['mathtext.rm'] = 'serif'
plt.rcParams['mathtext.it'] = 'serif:italic'
plt.rcParams['mathtext.bf'] = 'serif:bold'
plt.rcParams['mathtext.fontset'] = 'custom'
def set_fontscale(font_scale=1):
sns.set(style='ticks', context=None,
font='Times New Roman',
rc={#"text.usetex": True,
#"font.family": 'serif',
#"font.serif": 'Times New Roman',
#"mathtext.rm": 'serif',
#"mathtext.it": 'serif:italic',
#"mathtext.bf": 'serif:bold',
#"mathtext.fontset": 'custom',
"xtick.direction": "in",
"ytick.direction": "in",
"axes.linewidth": 0.5*font_scale,
"axes.labelsize": 9*font_scale,
"font.size": 9*font_scale,
"axes.titlesize": 9*font_scale,
"legend.fontsize": 8*font_scale,
"xtick.labelsize": 8*font_scale,
"ytick.labelsize": 8*font_scale,
})
set_fontscale(2.)
palette = sns.color_palette('muted', 8)
palette.as_hex()
class LensingETC(object):
"""
Contains all the methods to simulate and model mock lenses, and plot the
results.
"""
def __init__(self, lens_specifications=None, filter_specifications=None,
observing_scenarios=None, psfs=None,
magnitude_distributions=None, use_pemd=False,
source_galaxy_indices=[], source_galaxy_shapelet_coeffs=None
):
"""
Setup the `LensingETC` class for simulation if arguments are provided.
It is possible to create an instance without passing any argument to
plot and examine the outputs.
:param lens_specifications: description of the lens sample
:type lens_specifications: `dict`
:param filter_specifications: description of the filters
:type filter_specifications: `dict`
:param observing_scenarios: description of the observing scenarios
:type observing_scenarios: `list`
:param psfs: PSFs for simulation and modeling
:type psfs: `dict`
:param magnitude_distributions: sampling functions for the magnitudes
:type magnitude_distributions: `dict`
:param use_pemd: if `True`, use FASTELL code, requires installation
of fastell4py package
:type use_pemd: `bool`
:param source_galaxy_indices: (optional) list of indices can be
provided to
select specific galaxy morphologies as sources, this list can be
curated by inspecting the galaxy structures with the notebook
source_galaxies/Inspect source galaxy structure.ipynb. If
source_galaxy_indices=None, then source galaxies wll be randomly
selected. If not provided, random source galaxies will be used.
:type source_galaxy_indices: `list`
:param source_galaxy_shapelet_coeffs: (optional) array containing
shapelet coefficients for source galaxies. If not provided,
a pre-existing library of galaxies will be used.
:type source_galaxy_shapelet_coeffs: `numpy.array`
"""
do_simulation = False
if np.any([a is not None for a in
[lens_specifications, filter_specifications,
observing_scenarios, magnitude_distributions]]
):
if np.any([a is None for a in
[lens_specifications, filter_specifications,
observing_scenarios, magnitude_distributions]]
):
raise ValueError("One/more from lens_specifications, "
"filter_specifications, "
"observing_scenarios, "
"magnitude_distributions is not provided!")
else:
do_simulation = True
if do_simulation:
self.num_lenses = lens_specifications['num_lenses']
self._with_point_source = lens_specifications['with_point_source']
self.filter_specifications = filter_specifications
self.observing_scenarios = observing_scenarios
self.simulation_psfs = psfs['simulation']
self.modeling_psfs = psfs['modeling']
if 'psf_uncertainty_level' in psfs:
self._psf_uncertainty_level = psfs['psf_uncertainty_level']
else:
self._psf_uncertainty_level = 0.5
self.lens_magnitude_distributions = magnitude_distributions['lens']
self.source_magnitude_distributions = magnitude_distributions['source']
if self._with_point_source:
self.quasar_magnitude_distributions = magnitude_distributions[
'quasar']
self.num_pixels = self.filter_specifications['num_pixel']
self.pixel_scales = self.filter_specifications['pixel_scale']
self.num_filters = self.filter_specifications['num_filter']
self.num_scenarios = len(self.observing_scenarios)
self._kwargs_model = {
'lens_model_list': ['PEMD' if use_pemd else 'EPL', 'SHEAR'],
'lens_light_model_list': ['SERSIC_ELLIPSE'],
'source_light_model_list': ['SHAPELETS'],
'point_source_model_list': ['SOURCE_POSITION'] if
self._with_point_source else []
}
self._kwargs_model_smooth_source = {
'lens_model_list': ['PEMD' if use_pemd else 'EPL', 'SHEAR'],
'lens_light_model_list': ['SERSIC_ELLIPSE'],
'source_light_model_list': ['SERSIC_ELLIPSE'],
'point_source_model_list': ['SOURCE_POSITION'] if
self._with_point_source else []
}
self._shapelet_coeffs = np.load(
'source_galaxy_shapelet_coefficients_nmax50.npz')['arr_0']
self._kwargs_lenses = []
self._source_positions = []
self._lens_ellipticities = []
self._source_ellipticities = []
if self._with_point_source:
self._image_positions = []
else:
self._image_positions = None
if not source_galaxy_indices:
source_galaxy_indices = np.random.randint(0,
len(self._shapelet_coeffs), self.num_lenses)
if source_galaxy_shapelet_coeffs is None:
self._source_galaxy_shapelet_coeffs = self._shapelet_coeffs[
source_galaxy_indices]
else:
self._source_galaxy_shapelet_coeffs = \
source_galaxy_shapelet_coeffs
for j in range(self.num_lenses):
q = np.random.uniform(0.7, 0.9)
phi = np.random.uniform(-90, 90)
self._lens_ellipticities.append([q, phi])
e1, e2 = phi_q2_ellipticity(phi*np.pi/180, q)
theta_E = np.random.uniform(1.2, 1.6)
self._kwargs_lenses.append([
{'theta_E': theta_E,
'gamma': np.random.uniform(1.9, 2.1),
'e1': e1,
'e2': e2,
'center_x': 0, 'center_y': 0},
{'gamma1': np.random.uniform(-0.08, 0.08),
'gamma2': np.random.uniform(-0.08, 0.08),
'ra_0': 0,
'dec_0': 0}
])
r = np.random.uniform(0.05, 0.35) * theta_E
phi = np.random.uniform(-np.pi, np.pi)
self._source_positions.append([r * np.cos(phi), r * np.sin(phi)])
self._source_ellipticities.append([
np.random.uniform(-0.3, 0.3), np.random.uniform(-0.3, 0.3)
])
if self._with_point_source:
self._image_positions.append(
self._get_point_image_positions(
self._kwargs_lenses[-1],
self._source_positions[-1]
))
self._weighted_exposure_time_maps = \
self._get_weighted_exposure_time_maps()
self.sim_apis = self._get_sim_apis(self._kwargs_model)
self.sim_apis_smooth_source = self._get_sim_apis(
self._kwargs_model_smooth_source)
self.image_sims = self._get_image_sims(self.sim_apis)
self._kwargs_light = self._get_kwargs_light()
self.simulated_data = self._simulate_data()
self._walker_ratio = 8
def _get_point_image_positions(self, kwargs_lens,
source_position):
"""
Solve the lens equation to get the image position.
:param kwargs_lens: lens model parameters in lenstronomy convention
:type kwargs_lens:
:param source_position: x and y positions of source
:type source_position: `tuple`
:return:
:rtype:
"""
lens_model = LensModel(self._kwargs_model['lens_model_list'])
lens_equation_solver = LensEquationSolver(lens_model)
x_image, y_image = lens_equation_solver.image_position_from_source(
kwargs_lens=kwargs_lens, sourcePos_x=source_position[0],
sourcePos_y=source_position[1], min_distance=0.01,
search_window=5,
precision_limit=10 ** (-10), num_iter_max=100)
return x_image, y_image
def _get_weighted_exposure_time_maps(self):
"""
Simulate cosmic ray hit map and return the weighted exposure time
map for all combinations of lenses and observing scenarios.
:return:
:rtype:
"""
weighted_exposure_time_maps = []
for j in range(self.num_lenses):
weighted_exposure_time_maps_scenarios = []
for n in range(self.num_scenarios):
weighted_exposure_time_maps_filters = []
for i in range(self.num_filters):
simulate_cosmic_ray = False
if 'simulate_cosmic_ray' in self.observing_scenarios[n]:
if not self.observing_scenarios[n][
'simulate_cosmic_ray'][i]:
simulate_cosmic_ray = False
else:
simulate_cosmic_ray = True
if self.observing_scenarios[n][
'simulate_cosmic_ray'][i]:
cosmic_ray_count_rate = 2.4e-3
else:
cosmic_ray_count_rate = \
self.observing_scenarios[n][
'simulate_cosmic_ray'][i]
if simulate_cosmic_ray:
weighted_exposure_time_maps_filters.append(
self._make_weighted_exposure_time_map(
self.observing_scenarios[n]['exposure_time'][i],
self.num_pixels[i],
self.pixel_scales[i],
self.observing_scenarios[n]['num_exposure'][i],
cosmic_ray_count_rate
)
)
else:
weighted_exposure_time_maps_filters.append(
np.ones((self.num_pixels[i], self.num_pixels[i])) *
self.observing_scenarios[n]['exposure_time'][i])
weighted_exposure_time_maps_scenarios.append(
weighted_exposure_time_maps_filters)
weighted_exposure_time_maps.append(
weighted_exposure_time_maps_scenarios)
return weighted_exposure_time_maps
@property
def walker_ratio(self):
"""
Get the emcee walker ratio.
:return:
:rtype:
"""
if hasattr(self, '_walker_ratio'):
return self._walker_ratio
else:
self._walker_ratio = 8
return self._walker_ratio
def set_walker_ratio(self, ratio):
"""
Set the emcee walker ratio.
:param ratio: walker ratio
:type ratio: `int`
:return:
:rtype:
"""
self._walker_ratio = ratio
def plot_simualated_data(self, vmax=None, vmin=None, figsize=None):
"""
Plot the montage of simulated lenses.
:param vmax: `vmax` for plotted lenses' log_10(flux).
:type vmax: `list`
:param vmin: `vmin` for plotted lenses' log_10(flux).
:type vmin: `list`
:param figsize: figure size
:type figsize: `tuple`
:return:
:rtype:
"""
nrows = self.num_lenses
ncols = self.num_scenarios * self.num_filters
fig, axes = plt.subplots(nrows=nrows,
ncols=ncols,
figsize=figsize if figsize else
(max(nrows * 3, 10), max(ncols * 5, 6))
)
if nrows == 1 and ncols == 1:
axes = [[axes]]
elif nrows == 1:
axes = [axes]
elif ncols == 1:
axes = [[ax] for ax in axes]
if vmax is None:
vmax = [2] * self.num_filters
if vmin is None:
vmin = [-4] * self.num_filters
for j in range(self.num_lenses):
for n in range(self.num_scenarios):
for i in range(self.num_filters):
axes[j][n*self.num_filters+i].matshow(
np.log10(self.simulated_data[j][n][i]),
cmap='cubehelix', origin='lower',
vmin=vmin[i],
vmax=vmax[i]
)
axes[j][n * self.num_filters + i].set_xticks([])
axes[j][n * self.num_filters + i].set_yticks([])
axes[j][n * self.num_filters + i].set_aspect('equal')
if j == 0:
axes[j][n * self.num_filters + i].set_title(
'Scenario: {}, filter: {}'.format(n+1, i+1))
if n == 0 and i == 0:
axes[j][n * self.num_filters + i].set_ylabel('Lens: '
'{}'.format(j+1))
fig.tight_layout()
return fig
def plot_exposure_maps(self, figsize=None):
"""
Plot the exposure map montage for all the combinations of lenses and
scenarios.
:param figsize: figure size
:type figsize: `tuple`
:return:
:rtype:
"""
nrows = self.num_lenses
ncols = self.num_scenarios * self.num_filters
fig, axes = plt.subplots(nrows=nrows,
ncols=ncols,
figsize=figsize if figsize else
(max(nrows*3, 10), max(ncols*5, 6))
)
if nrows == 1 and ncols == 1:
axes = [[axes]]
elif nrows == 1:
axes = [axes]
elif ncols == 1:
axes = [[ax] for ax in axes]
for j in range(self.num_lenses):
for n in range(self.num_scenarios):
for i in range(self.num_filters):
im = axes[j][n*self.num_filters+i].matshow(
self._weighted_exposure_time_maps[j][n][i] *
self.observing_scenarios[n]['num_exposure'][i],
cmap='viridis', origin='lower', vmin=0
)
divider = make_axes_locatable(axes[j][
n*self.num_filters+i])
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(im, cax=cax, label='(seconds)')
axes[j][n * self.num_filters + i].set_xticks([])
axes[j][n * self.num_filters + i].set_yticks([])
axes[j][n * self.num_filters + i].set_aspect('equal')
if j == 0:
axes[j][n * self.num_filters + i].set_title(
'Scenario: {}, filter: {}'.format(n+1, i+1))
if n == 0 and i == 0:
axes[j][n * self.num_filters + i].set_ylabel('Lens: '
'{}'.format(j+1))
fig.tight_layout()
return fig
def _simulate_data(self):
"""
Simulate data for all the combinations of lenses and scenarios.
:return:
:rtype:
"""
simulated_data_lenses = []
for j in range(self.num_lenses):
simulated_data_scenarios = []
for n in range(self.num_scenarios):
simulated_data_filters = []
for i in range(self.num_filters):
kwargs_lens_light, kwargs_source, \
kwargs_ps = self._kwargs_light[j][n][i]
simulated_image = self.image_sims[j][n][i].image(
self._kwargs_lenses[j],
kwargs_source, kwargs_lens_light, kwargs_ps,
source_add=True, lens_light_add=True,
point_source_add=True if self._with_point_source else False
)
simulated_image[simulated_image < 0] = 1e-10
simulated_image += self.sim_apis[j][n][i].noise_for_model(
model=simulated_image)
simulated_data_filters.append(simulated_image)
simulated_data_scenarios.append(simulated_data_filters)
simulated_data_lenses.append(simulated_data_scenarios)
return simulated_data_lenses
def _get_image_sims(self, sim_apis):
"""
Call the `image_model_class()` method for all the `SimAPI` class
instances for each combination of lens and scenarios.
:param sim_apis: `SimAPI` class instances
:type sim_apis: `list`
:return:
:rtype:
"""
image_sims = []
for j in range(self.num_lenses):
image_sims_scenarios = []
for n in range(self.num_scenarios):
image_sim_filters = []
for i in range(self.num_filters):
kwargs_numerics = {
'point_source_supersampling_factor':
self.filter_specifications[
'simulation_psf_supersampling_resolution'][i],
'supersampling_factor': 3
}
image_sim_filters.append(
sim_apis[j][n][i].image_model_class(kwargs_numerics)
)
image_sims_scenarios.append(image_sim_filters)
image_sims.append(image_sims_scenarios)
return image_sims
def _get_sim_apis(self, kwargs_model):
"""
Create `SimAPI` class instances for each combination of lenses and
scenarios.
:param kwargs_model:
:type kwargs_model:
:return:
:rtype:
"""
sim_apis = []
for j in range(self.num_lenses):
sim_api_scenarios = []
for n in range(self.num_scenarios):
sim_api_filters = []
kwargs_observation = self._get_filter_kwargs(j, n)
for i in range(self.num_filters):
sim_api_filters.append(SimAPI(numpix=self.num_pixels[i],
kwargs_single_band=kwargs_observation[i],
kwargs_model=kwargs_model))
sim_api_scenarios.append(sim_api_filters)
sim_apis.append(sim_api_scenarios)
return sim_apis
def _make_weighted_exposure_time_map(self, exposure_time, num_pixel,
pixel_scale, num_exposure,
cosmic_ray_count_rate=2.4e-3):
"""
Make weighted exposure time map from simulated cosmic ray hit maps.
:param exposure_time: total exposure time
:type exposure_time: `float`
:param num_pixel: number of pixels along one side
:type num_pixel: `int`
:param pixel_scale: size of pixel in arcsecond unit
:type pixel_scale: `float`
:param num_exposure: number of exposures
:type num_exposure: `int`
:param cosmic_ray_count_rate: cosmic ray count rate in
event/s/arcsec^2 unit
:type cosmic_ray_count_rate: `float`
:return:
:rtype:
"""
exposure_time_map = np.ones((num_pixel, num_pixel)) * exposure_time
cosmic_ray_weight_map = 0.
for i in range(num_exposure):
cosmic_ray_count = cosmic_ray_count_rate * (num_pixel *
pixel_scale)**2 * exposure_time
cosmic_ray_weight_map += self._create_cr_hitmap(num_pixel,
pixel_scale,
cosmic_ray_count
)
exposure_time_map *= cosmic_ray_weight_map / num_exposure
# replace 0's with very small number to avoid divide by 0
exposure_time_map[exposure_time_map == 0.] = 1e-10
return exposure_time_map
def _get_filter_kwargs(self, n_lens, scenario_index):
"""
Get dictionary containing filter specifications for each filter for
one scenario.
:param n_lens: index of lense
:type n_lens: `int`
:param scenario_index: index of observing scenario
:type scenario_index: `int`
:return:
:rtype:
"""
filter_kwargs = []
for i in range(self.num_filters):
exposure_time = self._weighted_exposure_time_maps[n_lens][
scenario_index][i]
filter_kwargs.append(
{
'read_noise': self.filter_specifications['read_noise'][i],
'ccd_gain': self.filter_specifications['ccd_gain'][i],
'sky_brightness': self.filter_specifications[
'sky_brightness'][i],
'magnitude_zero_point':
self.filter_specifications[
'magnitude_zero_point'][i],
'exposure_time': exposure_time,
'num_exposures': self.observing_scenarios[
scenario_index]['num_exposure'][i],
'seeing': self.filter_specifications['seeing'][i],
'pixel_scale': self.filter_specifications[
'pixel_scale'][i],
'psf_type': 'PIXEL',
'kernel_point_source': self.simulation_psfs[i],
'point_source_supersampling_factor': self.filter_specifications[
'simulation_psf_supersampling_resolution'][i]
})
return filter_kwargs
def _get_kwargs_light(self):
"""
Get `kwargs_light` for all lenses for lenstronomy.
:return:
:rtype:
"""
kwargs_light_lenses = []
for j in range(self.num_lenses):
kwargs_light_scenarios = []
lens_magnitudes = self.lens_magnitude_distributions()
source_magnitudes = self.source_magnitude_distributions()
if self._with_point_source:
ps_magnitudes = self.quasar_magnitude_distributions()
source_R_sersic = np.random.uniform(0.1, 0.2)
for n in range(self.num_scenarios):
kwargs_light = []
for i in range(self.num_filters):
q, phi = self._lens_ellipticities[j]
e1, e2 = phi_q2_ellipticity(phi*np.pi/180., q)
kwargs_lens_light_mag = [{
'magnitude': lens_magnitudes[i],
'R_sersic': 1.,
'n_sersic': 4,
'e1': e1, 'e2': e2,
'center_x': 0, 'center_y': 0
}]
kwargs_source_light_mag = [{
'magnitude': source_magnitudes[i],
'R_sersic': source_R_sersic,
'n_sersic': 1,
'e1': self._source_ellipticities[j][0],
'e2': self._source_ellipticities[j][0],
'center_x': self._source_positions[j][0],
'center_y': self._source_positions[j][1]
}]
kwargs_ps_mag = [{
'ra_source': self._source_positions[j][0],
'dec_source': self._source_positions[j][1],
'magnitude': ps_magnitudes[i]
}] if self._with_point_source else []
kwargs_lens_light, kwargs_source_smooth, kwargs_ps = \
self.sim_apis_smooth_source[j][n][
i].magnitude2amplitude(
kwargs_lens_light_mag, kwargs_source_light_mag,
kwargs_ps_mag)
smooth_light_model = LightModel(['SERSIC_ELLIPSE'])
shapelet_light_model = LightModel(['SHAPELETS'])
x, y = util.make_grid(200, 0.01)
smooth_flux = np.sum(smooth_light_model.surface_brightness(
x, y, kwargs_source_smooth))
kwargs_source = [{
'n_max': self.filter_specifications[
'simulation_shapelet_n_max'][i],
'beta': source_R_sersic,
'amp': self._source_galaxy_shapelet_coeffs[j],
'center_x': self._source_positions[j][0],
'center_y': self._source_positions[j][1]
}]
shapelet_flux = np.sum(
shapelet_light_model.surface_brightness(
x, y, kwargs_source))
kwargs_source[0]['amp'] *= smooth_flux / shapelet_flux
kwargs_light.append([kwargs_lens_light, kwargs_source,
kwargs_ps])
kwargs_light_scenarios.append(kwargs_light)
kwargs_light_lenses.append(kwargs_light_scenarios)
return kwargs_light_lenses
def _get_kwargs_data(self, n_lens, n_scenario):
"""
Get `kwargs_data` for lenstronomy for one combination of lens and
scenario.
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:return:
:rtype:
"""
kwargs_data_list = []
for i in range(self.num_filters):
kwargs_data_list.append({
'image_data': self.simulated_data[n_lens][n_scenario][i],
'background_rms': self.sim_apis[n_lens][n_scenario][
i].background_noise,
'noise_map': None,
'exposure_time': (self._weighted_exposure_time_maps[n_lens][
n_scenario][i] *
self.observing_scenarios[n_scenario][
'num_exposure'][i]),
'ra_at_xy_0': -(self.num_pixels[i] - 1)/2. * self.pixel_scales[i],
'dec_at_xy_0': -(self.num_pixels[i] - 1)/2. * self.pixel_scales[i],
'transform_pix2angle': np.array([[self.pixel_scales[i], 0],
[0, self.pixel_scales[i]]
])
})
return kwargs_data_list
def _get_kwargs_psf(self):
"""
Get `kwargs_psf` for all filters for lenstronomy.
:return:
:rtype:
"""
kwargs_psf_list = []
for i in range(self.num_filters):
if self._psf_uncertainty_level > 0.:
max_noise = np.max(self.modeling_psfs[i]) * self._psf_uncertainty_level
exposure_time = np.max(self.modeling_psfs[i]) / max_noise**2
# F*t = (N*t)^2
psf_uncertainty = np.sqrt(self.modeling_psfs[i] *
exposure_time) / exposure_time
else:
psf_uncertainty = None
kwargs_psf_list.append({
'psf_type': "PIXEL",
'kernel_point_source': self.modeling_psfs[i],
'kernel_point_source_init': self.modeling_psfs[i],
'psf_error_map': psf_uncertainty,
'point_source_supersampling_factor': self.filter_specifications[
'modeling_psf_supersampling_resolution'][i]
})
return kwargs_psf_list
def _get_kwargs_params(self, n_lens, n_scenario):
"""
Get `kwargs_params` for lenstronomy for one combination of
lense and scenario.
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:return:
:rtype:
"""
# initial guess of non-linear parameters, starting from the truth
# for fast convergence of the MCMC
kwargs_lens_init = self._kwargs_lenses[n_lens]
kwargs_lens_light_init = [
self._kwargs_light[n_lens][n_scenario][i][0][0] for i in range(
self.num_filters)
]
kwargs_source_init = [
self._kwargs_light[n_lens][n_scenario][i][1][0] for i in range(
self.num_filters)
]
for i in range(self.num_filters):
kwargs_source_init[i]['n_max'] = self.filter_specifications[
'modeling_shapelet_n_max'][i]
kwargs_ps_init = [
self._kwargs_light[n_lens][n_scenario][0][2][0]
] if self._with_point_source else []
if self._with_point_source:
num_image = len(self._image_positions[n_lens][0])
kwargs_ps_init[0]['ra_source'] = kwargs_source_init[0]['center_x']
kwargs_ps_init[0]['dec_source'] = kwargs_source_init[0]['center_y']
# kwargs_ps_init[0]['ra_image'] = self._image_positions[n_lens][0]
# kwargs_ps_init[0]['dec_image'] = self._image_positions[n_lens][1]
# initial spread in parameter estimation
kwargs_lens_sigma = [
{'theta_E': 0.01, 'e1': 0.01, 'e2': 0.01, 'gamma': .02,
'center_x': 0.05, 'center_y': 0.05},
{'gamma1': 0.01, 'gamma2': 0.01}]
kwargs_lens_light_sigma = [
{'R_sersic': 0.05, 'n_sersic': 0.1, 'e1': 0.01, 'e2': 0.01,
'center_x': .01, 'center_y': 0.01} for _ in range(
self.num_filters)]
kwargs_source_sigma = [
{'beta': 0.01,
#'n_sersic': .05, 'e1': 0.05, 'e2': 0.05,
'center_x': 0.05, 'center_y': 0.05} for _ in range(
self.num_filters)]
kwargs_ps_sigma = [{#'ra_image': 5e-5*np.ones(num_image),
#'dec_image': 5e-5*np.ones(num_image),
'ra_source': 5e-5,
'dec_source': 5e-5
}] if self._with_point_source else []
# hard bound lower limit in parameter space
kwargs_lower_lens = [
{'theta_E': 0, 'e1': -0.5, 'e2': -0.5, 'gamma': 1.5,
'center_x': -10., 'center_y': -10},
{'gamma1': -0.5, 'gamma2': -0.5}]
kwargs_lower_source = [
{'beta': 0.001,
#'n_sersic': 0.5, 'e1': -0.5, 'e2': -0.5,
'center_x': -10, 'center_y': -10} for _ in range(
self.num_filters)]
kwargs_lower_lens_light = [
{'R_sersic': 0.001, 'n_sersic': 0.5, 'e1': -0.5, 'e2': -0.5,
'center_x': -10, 'center_y': -10} for _ in range(
self.num_filters)]
kwargs_lower_ps = [{#'ra_image': -1.5*np.ones(num_image),
#'dec_image': -1.5*np.ones(num_image),
'ra_source': -1.5,
'dec_source': -1.5
}] if self._with_point_source else []
# hard bound upper limit in parameter space
kwargs_upper_lens = [
{'theta_E': 10, 'e1': 0.5, 'e2': 0.5, 'gamma': 2.5,
'center_x': 10., 'center_y': 10},
{'gamma1': 0.5, 'gamma2': 0.5}]
kwargs_upper_source = [
{'beta': 10,
#'n_sersic': 5., 'e1': 0.5, 'e2': 0.5,
'center_x': 10, 'center_y': 10} for _ in range(self.num_filters)]
kwargs_upper_lens_light = [
{'R_sersic': 10, 'n_sersic': 5., 'e1': 0.5, 'e2': 0.5,
'center_x': 10, 'center_y': 10} for _ in range(self.num_filters)]
kwargs_upper_ps = [{#'ra_image': 1.5*np.ones(num_image),
#'dec_image': 1.5*np.ones(num_image)
'ra_source': 1.5,
'dec_source': 1.5
}] if self._with_point_source else []
# keeping parameters fixed
kwargs_lens_fixed = [{}, {'ra_0': 0, 'dec_0': 0}]
kwargs_source_fixed = [{'n_max': self.filter_specifications[
'modeling_shapelet_n_max'][i]} for i in range(
self.num_filters)]
kwargs_lens_light_fixed = [{} for _ in range(self.num_filters)]
kwargs_ps_fixed = [{}] if self._with_point_source else []
lens_params = [kwargs_lens_init, kwargs_lens_sigma, kwargs_lens_fixed,
kwargs_lower_lens, kwargs_upper_lens]
source_params = [kwargs_source_init, kwargs_source_sigma,
kwargs_source_fixed, kwargs_lower_source,
kwargs_upper_source]
lens_light_params = [kwargs_lens_light_init, kwargs_lens_light_sigma,
kwargs_lens_light_fixed, kwargs_lower_lens_light,
kwargs_upper_lens_light]
ps_params = [kwargs_ps_init, kwargs_ps_sigma, kwargs_ps_fixed,
kwargs_lower_ps, kwargs_upper_ps]
kwargs_params = {'lens_model': lens_params,
'source_model': source_params,
'lens_light_model': lens_light_params,
'point_source_model': ps_params}
return kwargs_params
def _get_multi_band_list(self, n_lens, n_scenario):
"""
Get `multi_band_list` for lenstronomy for one combination of
lense and scenario.
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:return:
:rtype:
"""
kwargs_data_list = self._get_kwargs_data(n_lens, n_scenario)
kwargs_psf_list = self._get_kwargs_psf()
multi_band_list = []
for i in range(self.num_filters):
psf_supersampling_factor = self.filter_specifications[
'simulation_psf_supersampling_resolution'][i]
kwargs_numerics = {'supersampling_factor': 3,
'supersampling_convolution': True if
psf_supersampling_factor > 1 else False,
'supersampling_kernel_size': 5,
'point_source_supersampling_factor':
psf_supersampling_factor,
'compute_mode': 'adaptive',
}
image_band = [kwargs_data_list[i], kwargs_psf_list[i],
kwargs_numerics]
multi_band_list.append(image_band)
return multi_band_list
def _get_kwargs_constraints(self, n_lens, n_scenario):
"""
Get `kwargs_constraints` for lenstronomy for one combination of
lense and scenario.
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:return:
:rtype:
"""
kwargs_constraints = {
'joint_lens_with_light': [[0, 0, ['center_x',
'center_y'
]]] if not
self._with_point_source else [],
'joint_lens_light_with_lens_light': [[0, i, ['center_x',
'center_y',
'e1', 'e2',
'n_sersic'
]] for i
in range(1,
self.num_filters)],
'joint_source_with_source': [[0, i, ['center_x',
'center_y',
'beta'
]] for i
in range(1, self.num_filters)],
'joint_source_with_point_source': [[0, 0]] if self._with_point_source
else [],
# 'num_point_source_list': None,
# 'solver_type': 'None'
}
if self._with_point_source:
num_images = len(self._image_positions[n_lens][0])
# kwargs_constraints['solver_type'] = 'PROFILE_SHEAR' if \
# num_images == 4 else 'CENTER'
# kwargs_constraints['num_point_source_list'] = [num_images]
return kwargs_constraints
def _get_kwargs_likelihood(self, n_lens, n_scenario):
"""
Get `kwargs_likelihood` for lenstronomy for one combination of
lense and scenario.
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:return:
:rtype:
"""
total_exposure_times = np.array(self.observing_scenarios[n_scenario][
'exposure_time']) \
* np.array(self.observing_scenarios[n_scenario][
'num_exposure'])
bands_compute = []
for i in range(self.num_filters):
bands_compute.append(True if total_exposure_times[i] > 0 else False)
mask_list = []
for i in range(self.num_filters):
if 'simulate_cosmic_ray' in self.observing_scenarios[n_scenario]:
if self.observing_scenarios[n_scenario]['simulate_cosmic_ray'][i]:
weighted_exposure_time_map = \
self._weighted_exposure_time_maps[n_lens][n_scenario][i]
mask = np.ones_like(weighted_exposure_time_map)
mask[weighted_exposure_time_map <= 1e-10] = 0.
mask_list.append(mask)
else:
mask_list.append(None)
else:
mask_list.append(None)
# for galaxy-galxy lenses
kwargs_likelihood = {
'force_no_add_image': False,
'source_marg': False,
# 'point_source_likelihood': True,
# 'position_uncertainty': 0.00004,
# 'check_solver': False,
# 'solver_tolerance': 0.001,
'check_positive_flux': True,
'check_bounds': True,
'bands_compute': bands_compute,
'image_likelihood_mask_list': mask_list
}
return kwargs_likelihood
def _fit_one_model(self, n_lens, n_scenario, num_threads=1, n_run=500):
"""
Run MCMC chain for one combination of lens and scenario.
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:param num_threads: number of threads for multiprocessing,
if 1 multiprocessing will not be used.
:type num_threads: `int`
:param n_run: number of MCMC steps
:type n_run: `int`
:return:
:rtype:
"""
multi_band_list = self._get_multi_band_list(n_lens, n_scenario)
kwargs_data_joint = {'multi_band_list': multi_band_list,
'multi_band_type': 'multi-linear'}
kwargs_params = self._get_kwargs_params(n_lens, n_scenario)
kwargs_model = self._get_kwargs_model()
kwargs_constraints = self._get_kwargs_constraints(n_lens, n_scenario)
kwargs_likelihood = self._get_kwargs_likelihood(n_lens, n_scenario)
fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model,
kwargs_constraints, kwargs_likelihood,
kwargs_params)
fitting_kwargs_list = [
['MCMC',
{'n_burn': 0, 'n_run': n_run, 'walkerRatio': 8,
'sigma_scale': 1e-2, 'progress': True,
'threadCount': num_threads}]
]
chain_list = fitting_seq.fit_sequence(fitting_kwargs_list)
kwargs_result = fitting_seq.best_fit()
return [[kwargs_data_joint, kwargs_model,
kwargs_constraints, kwargs_likelihood, kwargs_params],
[chain_list, kwargs_result]]
def _extend_chain(self, n_lens, n_scenario, run_id, num_threads=1,
n_run=500, save_directory='./temp/'):
"""
Extend MCMC chain for one combination of lens and scenario.
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:param run_id: run ID of the previous run to be exteded
:type run_id: `str`
:param num_threads: number of threads for multiprocessing,
if 1 multiprocessing will not be used
:type num_threads: `int`
:param n_run: number of new MCMC steps
:type n_run: `int`
:param save_directory: save directory, must be same with the
previous run
:type save_directory: `str`
:return:
:rtype:
"""
save_file = save_directory + '{}_lens_{}_scenario_{' \
'}.pickle'.format(run_id, n_lens,
n_scenario)
with open(save_file, 'rb') as f:
[input, output] = pickle.load(f)
[kwargs_data_joint, kwargs_model,
kwargs_constraints, kwargs_likelihood, kwargs_params] = input
chain_list = output[0]
samples_mcmc = chain_list[0][1]
n_params = samples_mcmc.shape[1]
n_walkers = self.walker_ratio * n_params
n_step = int(samples_mcmc.shape[0] / n_walkers)
print('N_step: {}, N_walkers: {}, N_params: {}'.format(n_step,
n_walkers,
n_params))
chain = np.empty((n_walkers, n_step, n_params))
for i in np.arange(n_params):
samples = samples_mcmc[:, i].T
chain[:, :, i] = samples.reshape((n_step, n_walkers)).T
fitting_seq = FittingSequence(kwargs_data_joint, kwargs_model,
kwargs_constraints, kwargs_likelihood,
kwargs_params)
fitting_kwargs_list = [
['MCMC',
{'n_burn': 0, 'n_run': n_run, 'walkerRatio': 8,
'init_samples': chain[:, -1, :],
#'sigma_scale': 3,
'progress': True,
'threadCount': num_threads}]
]
new_chain_list = fitting_seq.fit_sequence(fitting_kwargs_list)
kwargs_result = fitting_seq.best_fit()
chain_list.append(new_chain_list[0])
return [[kwargs_data_joint, kwargs_model,
kwargs_constraints, kwargs_likelihood, kwargs_params],
[new_chain_list, kwargs_result]]
def _get_kwargs_model(self):
"""
Get `kwargs_model` for lenstronomy.
:return:
:rtype:
"""
kwargs_model = copy.deepcopy(self._kwargs_model)
kwargs_model['lens_light_model_list'] = [self._kwargs_model[
'lens_light_model_list'][0] for _ in range(
self.num_filters)]
kwargs_model['source_light_model_list'] = [self._kwargs_model[
'source_light_model_list'][0] for _ in range(
self.num_filters)]
kwargs_model['index_lens_light_model_list'] = [[i] for i in range(
self.num_filters)]
kwargs_model['index_source_light_model_list'] = [[i] for i in range(
self.num_filters)]
if self._with_point_source:
kwargs_model['point_source_model_list'] = ['SOURCE_POSITION']
return kwargs_model
def fit_models(self, run_id='', num_threads=1, n_run=500,
save_directory='./temp/', start_lens=0):
"""
Run MCMC chains for all combinations of lenses and scenarios.
:param run_id: run ID to differentiate between multiple runs
:type run_id: `str`
:param num_threads: number of multiprocessing threads,
if 1 multiprocessing will not be used
:type num_threads: `int`
:param n_run: number of MCMC steps
:type n_run: `int`
:param save_directory: directory to save MCMC outputs
:type save_directory: `str`
:param start_lens: lens index to start MCMC runs from, to resume a
stopped run
:type start_lens: `int`
:return:
:rtype:
"""
for j in range(start_lens, self.num_lenses):
for n in range(self.num_scenarios):
print('Running lens: {}/{}, scenario: {}/{}'.format(
j+1, self.num_lenses, n+1, self.num_scenarios
))
model_fit = self._fit_one_model(
j, n,
num_threads=num_threads,
n_run=n_run
)
save_file = save_directory + '{}_lens_{}_scenario_{' \
'}.pickle'.format(run_id, j, n)
with open(save_file, 'wb') as f:
pickle.dump(model_fit, f)
def extend_chains(self, num_lenses, num_scenarios,
run_id='', extend_id='', num_threads=1, n_run=500,
save_directory='./temp/', start_lens=0):
"""
Extend chains for all combinations of lenses and scenarios.
:param num_lenses: total number of lenses in the setup
:type num_lenses: `int`
:param num_scenarios: total number of scenarios in the setup
:type num_scenarios: `int`
:param run_id: run ID to differentiate between different runs
:type run_id: `str`
:param extend_id: extension ID
:type extend_id: `str`
:param num_threads: number of multiprocessing threads,
if 1 multiprocessing will not be used
:type num_threads: `int`
:param n_run: number of MCMC steps
:type n_run: `int`
:param save_directory: directory to save outputs, must be same with
the save directory of the previous run to be extended
:type save_directory: `str`
:param start_lens: index of lens to start from, to resume a
prevously stopped call to this method
:type start_lens: `int`
:return:
:rtype:
"""
for j in range(start_lens, num_lenses):
for n in range(num_scenarios):
print('Running lens: {}/{}, scenario: {}/{}'.format(
j+1, self.num_lenses, n+1, self.num_scenarios
))
model_fit = self._extend_chain(
j, n, run_id,
num_threads=num_threads,
n_run=n_run, save_directory=save_directory
)
save_file = save_directory + '{}{}_lens_{}_scenario_{' \
'}.pickle'.format(run_id,
extend_id, j, n)
with open(save_file, 'wb') as f:
pickle.dump(model_fit, f)
@classmethod
def plot_lens_models(self, run_id, num_lens, num_scenario, num_filters=1,
save_directory='./temp/'):
"""
Plot the lens model of one combination of lens and scenario after
running the MCMC chain.
:param run_id: run ID
:type run_id: `str`
:param num_lens: index of lens
:type num_lens: `int`
:param num_scenario: index of scenario
:type num_scenario: `int`
:param num_filters: number of filters
:type num_filters: `int`
:param save_directory: directory of saved output files
:type save_directory: `str`
:return:
:rtype:
"""
save_file = save_directory + '{}_lens_{}_scenario_{' \
'}.pickle'.format(run_id, num_lens,
num_scenario)
with open(save_file, 'rb') as f:
[input, output] = pickle.load(f)
multi_band_list = input[0]['multi_band_list']
kwargs_model = input[1]
kwargs_likelihood = input[3]
kwargs_result = output[1]
lens_plot = ModelPlot(multi_band_list, kwargs_model,
kwargs_result,
arrow_size=0.02, # cmap_string=cmap,
likelihood_mask_list=kwargs_likelihood[
'image_likelihood_mask_list'],
multi_band_type='multi-linear',
cmap_string='cubehelix',
# , source_marg=True, linear_prior=[1e5, 1e5, 1e5]
)
fig, axes = plt.subplots(num_filters, 3,
figsize=(num_filters*8, 10),
sharex=False, sharey=False)
if num_filters == 1:
axes = [axes]
for i in range(num_filters):
lens_plot.data_plot(ax=axes[i][0], band_index=i,
v_max=2, v_min=-4,
text='Filter {}'.format(i+1))
lens_plot.model_plot(ax=axes[i][1], band_index=i,
v_max=2, v_min=-4)
lens_plot.normalized_residual_plot(ax=axes[i][2], band_index=i,
v_max=5, v_min=-5, cmap='RdBu')
return fig
def plot_mcmc_trace(self, run_id, n_lens, n_scenario,
save_directory='./temp/'):
"""
Plot MCMC trace for one combination of lens and scenario.
:param run_id: run ID
:type run_id: `str`
:param n_lens: index of lens
:type n_lens: `int`
:param n_scenario: index of scenario
:type n_scenario: `int`
:param save_directory: directory that has the saved MCMC output
:type save_directory: `str`
:return:
:rtype:
"""
save_file = save_directory + '{}_lens_{}_scenario_{' \
'}.pickle'.format(run_id, n_lens,
n_scenario)
with open(save_file, 'rb') as f:
[_, output] = pickle.load(f)
chain_list = output[0]
samples_mcmc = chain_list[-1][1]
param_mcmc = chain_list[-1][2]
n_params = samples_mcmc.shape[1]
n_walkers = self.walker_ratio * n_params
n_step = int(samples_mcmc.shape[0] / n_walkers)
print('N_step: {}, N_walkers: {}, N_params: {}'.format(n_step,
n_walkers,
n_params))
chain = np.empty((n_walkers, n_step, n_params))
for i in np.arange(n_params):
samples = samples_mcmc[:, i].T
chain[:, :, i] = samples.reshape((n_step, n_walkers)).T
mean_pos = np.zeros((n_params, n_step))
median_pos = np.zeros((n_params, n_step))
std_pos = np.zeros((n_params, n_step))
q16_pos = np.zeros((n_params, n_step))
q84_pos = np.zeros((n_params, n_step))
for i in np.arange(n_params):
for j in np.arange(n_step):
mean_pos[i][j] = np.mean(chain[:, j, i])
median_pos[i][j] = np.median(chain[:, j, i])
std_pos[i][j] = np.std(chain[:, j, i])
q16_pos[i][j] = np.percentile(chain[:, j, i], 16.)
q84_pos[i][j] = np.percentile(chain[:, j, i], 84.)
fig, ax = plt.subplots(n_params, sharex=True, figsize=(8, 6))
burnin = -1
last = n_step
medians = []
# param_values = [median_pos[0][last - 1],
# (q84_pos[0][last - 1] - q16_pos[0][last - 1]) / 2,
# median_pos[1][last - 1],
# (q84_pos[1][last - 1] - q16_pos[1][last - 1]) / 2]
for i in range(n_params):
print(param_mcmc[i],
'{:.4f} ± {:.4f}'.format(median_pos[i][last - 1], (
q84_pos[i][last - 1] - q16_pos[i][
last - 1]) / 2))
ax[i].plot(median_pos[i][:last], c='g')
ax[i].axhline(np.median(median_pos[i][burnin:last]), c='r',
lw=1)
ax[i].fill_between(np.arange(last), q84_pos[i][:last],
q16_pos[i][:last], alpha=0.4)
ax[i].set_ylabel(param_mcmc[i], fontsize=10)
ax[i].set_xlim(0, last)
medians.append(np.median(median_pos[i][burnin:last]))
fig.set_size_inches((12., 2 * len(param_mcmc)))
return fig
@staticmethod
def _create_cr_hitmap(num_pix, pixel_scale, cosmic_ray_count):
"""
Simulate a cosmic ray hit map.
:param num_pix: number of pixels
:type num_pix: `int`
:param pixel_scale: pixel size
:type pixel_scale: `float`
:param cosmic_ray_count: cosmic ray count
:type cosmic_ray_count: `int`
:return:
:rtype:
"""
map = np.ones((num_pix, num_pix))
image_size = num_pix * pixel_scale
for i in range(10):
n_cr = int(np.random.normal(loc=cosmic_ray_count,
scale=np.sqrt(cosmic_ray_count)
))
if n_cr > 0:
break
if n_cr < 1:
n_cr = 0
for i in range(n_cr):
x = | np.random.randint(0, num_pix) | numpy.random.randint |
"""
Copyright 2021 <NAME>, <NAME>, GlaxoSmithKline plc; <NAME>, University of Oxford; <NAME>, MIT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import torch
import numpy as np
from sklearn import ensemble
from slingpy.models.torch_model import TorchModel
from slingpy.models.sklearn_model import SklearnModel
from typing import List, AnyStr, Union, Type, Optional
from slingpy.models.abstract_base_model import AbstractBaseModel
from genedisco.models.abstract_meta_models import AbstractMetaModel
from slingpy.models.pickleable_base_model import PickleableBaseModel
from slingpy.data_access.data_sources.abstract_data_source import AbstractDataSource
from genedisco.models.abstract_embedding_retrieval_model import EmbeddingRetrievalModel
class SklearnRandomForestRegressor(AbstractMetaModel, SklearnModel):
"""The random forest regressor model from sklean (non-deep models)"""
def __init__(self,
base_module: ensemble.BaseEnsemble,
num_target_samples: Optional[int] = None):
"""The wrapping model for scikit learn ensemble regressors.
The wrapping model augments the base_module with the capability to
output uncertainty along with the average prediction.
Args:
base_module: The base forest ensemble module. It can be from
{RandomForestRegressor, ExtraTreesRegressor}
"""
SklearnModel.__init__(self, base_module)
self.n_estimators = base_module.n_estimators
self.num_target_samples = num_target_samples
def save(self, file_path: AnyStr):
AbstractMetaModel.save(self, file_path)
def save_folder(self, save_folder_path, overwrite=True):
file_path = os.path.join(save_folder_path, "model.pickle")
PickleableBaseModel.save(self, file_path)
@classmethod
def load_folder(cls: Type["TarfileSerialisationBaseModel"], save_folder_path: AnyStr) -> AbstractBaseModel:
file_path = os.path.join(save_folder_path, "model.pickle")
model = PickleableBaseModel.load(file_path)
return model
def get_samples(self, x: np.ndarray, k: Optional[int] = 1):
k = self.n_estimators or k
if k > self.n_estimators:
raise ValueError("The number of requested samples cannot exceed"+
" the number of estimators in ensemble models.")
else:
random_indices = np.random.randint(self.n_estimators, size=k)
y_samples = [self.model.estimators_[i].predict(x)
for i in random_indices]
y_samples = np.swapaxes(y_samples, 0, 1)
return y_samples
def get_model_prediction(self, data, return_std_and_margin):
if return_std_and_margin:
y_samples = self.get_samples(data, self.num_target_samples)
if y_samples.ndim == 3 and y_samples.ndimsy_samples.shape[-1] > 1:
raise NotImplementedError("Output uncertainty is only" +
"implemented for 1D output.")
else:
y_stds = np.std(y_samples, axis=1)
y_margins = np.max(y_samples, axis=1) - np.min(y_samples, axis=1)
y_preds = self.model.predict(data)
return [y_preds, y_stds, y_margins]
else:
y_pred = self.model.predict(data)
return y_pred
def predict(self,
dataset_x: AbstractDataSource,
batch_size: int = 256,
return_std_and_margin: bool = False) -> List[np.ndarray]:
"""
Args:
dataset_x: Input dataset to be evaluated.
batch_size:
return_std_and_margin: If True, return the epistemic uncertainty of the output.
Returns: If return_std_and_margin is True, returns ([output_means], [output_stds]).
Otherwise, returns [output_means]
"""
if self.model is None:
self.model = self.build()
available_indices = dataset_x.get_row_names()
if return_std_and_margin:
all_ids, y_preds, y_stds, y_trues, y_margins = [], [], [], [], []
else:
all_ids, y_preds, y_trues = [], [], []
while len(available_indices) > 0:
current_indices = available_indices[:batch_size]
available_indices = available_indices[batch_size:]
data = self.merge_strategy_x.resolve(
dataset_x.subset(list(current_indices)).get_data())[0]
if return_std_and_margin:
y_pred, y_std, y_margin = self.get_model_prediction(data, return_std_and_margin=True)
y_preds.append(y_pred)
y_stds.append(y_std)
y_margins.append(y_margin)
else:
y_pred = self.get_model_prediction(data, return_std_and_margin=False)
y_preds.append(y_pred)
y_preds = np.concatenate(y_preds, axis=0)
if return_std_and_margin:
y_stds = np.concatenate(y_stds, axis=0)
y_margins = | np.concatenate(y_margins, axis=0) | numpy.concatenate |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.