prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Unit tests for optimizers.
"""
import numpy as np
import pytest
from numpy.linalg import norm
from sklearn.base import BaseEstimator
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lasso
from sklearn.utils.validation import check_is_fitted
from pysindy.optimizers import ConstrainedSR3
from pysindy.optimizers import SINDyOptimizer
from pysindy.optimizers import SR3
from pysindy.optimizers import STLSQ
from pysindy.utils import supports_multiple_targets
class DummyLinearModel(BaseEstimator):
# Does not natively support multiple targets
def fit(self, x, y):
self.coef_ = np.ones(x.shape[1])
self.intercept_ = 0
return self
def predict(self, x):
return x
class DummyEmptyModel(BaseEstimator):
# Does not have fit or predict methods
def __init__(self):
self.fit_intercept = False
self.normalize = False
class DummyModelNoCoef(BaseEstimator):
# Does not set the coef_ attribute
def fit(self, x, y):
self.intercept_ = 0
return self
def predict(self, x):
return x
@pytest.mark.parametrize(
"cls, support",
[
(Lasso, True),
(STLSQ, True),
(SR3, True),
(ConstrainedSR3, True),
(DummyLinearModel, False),
],
)
def test_supports_multiple_targets(cls, support):
assert supports_multiple_targets(cls()) == support
@pytest.fixture(params=["data_derivative_1d", "data_derivative_2d"])
def data(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(
"optimizer",
[
STLSQ(),
SR3(),
ConstrainedSR3(),
Lasso(fit_intercept=False),
ElasticNet(fit_intercept=False),
DummyLinearModel(),
],
)
def test_fit(data, optimizer):
x, x_dot = data
if len(x.shape) == 1:
x = x.reshape(-1, 1)
opt = SINDyOptimizer(optimizer, unbias=False)
opt.fit(x, x_dot)
check_is_fitted(opt)
assert opt.complexity >= 0
if len(x_dot.shape) > 1:
assert opt.coef_.shape == (x.shape[1], x_dot.shape[1])
else:
assert opt.coef_.shape == (1, x.shape[1])
@pytest.mark.parametrize(
"optimizer",
[STLSQ(), SR3()],
)
def test_not_fitted(optimizer):
with pytest.raises(NotFittedError):
optimizer.predict(np.ones((1, 3)))
@pytest.mark.parametrize("optimizer", [STLSQ(), SR3()])
def test_complexity_not_fitted(optimizer, data_derivative_2d):
with pytest.raises(NotFittedError):
optimizer.complexity
x, _ = data_derivative_2d
optimizer.fit(x, x)
assert optimizer.complexity > 0
@pytest.mark.parametrize(
"kwargs", [{"normalize": True}, {"fit_intercept": True}, {"copy_X": False}]
)
def test_alternate_parameters(data_derivative_1d, kwargs):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = STLSQ(**kwargs)
model.fit(x, x_dot)
model.fit(x, x_dot, sample_weight=x[:, 0])
check_is_fitted(model)
@pytest.mark.parametrize("optimizer", [STLSQ, SR3, ConstrainedSR3])
@pytest.mark.parametrize("params", [dict(threshold=-1), dict(max_iter=0)])
def test_general_bad_parameters(optimizer, params):
with pytest.raises(ValueError):
optimizer(**params)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
@pytest.mark.parametrize(
"params",
[dict(nu=0), dict(tol=0), dict(trimming_fraction=-1), dict(trimming_fraction=2)],
)
def test_sr3_bad_parameters(optimizer, params):
with pytest.raises(ValueError):
optimizer(**params)
@pytest.mark.parametrize(
"error, optimizer, params",
[
(ValueError, STLSQ, dict(alpha=-1)),
(NotImplementedError, SR3, dict(thresholder="l2")),
(NotImplementedError, ConstrainedSR3, dict(thresholder="l2")),
(ValueError, ConstrainedSR3, dict(thresholder="weighted_l0", thresholds=None)),
(ValueError, ConstrainedSR3, dict(thresholder="weighted_l0", thresholds=None)),
(ValueError, ConstrainedSR3, dict(thresholds=-np.ones((5, 5)))),
],
)
def test_specific_bad_parameters(error, optimizer, params):
with pytest.raises(error):
optimizer(**params)
def test_bad_optimizers(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
with pytest.raises(AttributeError):
opt = SINDyOptimizer(DummyEmptyModel())
with pytest.raises(AttributeError):
opt = SINDyOptimizer(DummyModelNoCoef())
opt.fit(x, x_dot)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_initial_guess_sr3(optimizer):
x = np.random.standard_normal((10, 3))
x_dot = np.random.standard_normal((10, 2))
control_model = optimizer(max_iter=1).fit(x, x_dot)
initial_guess = np.random.standard_normal((x_dot.shape[1], x.shape[1]))
guess_model = optimizer(max_iter=1, initial_guess=initial_guess).fit(x, x_dot)
assert np.any(np.not_equal(control_model.coef_, guess_model.coef_))
# The different capitalizations are intentional;
# I want to make sure different versions are recognized
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
@pytest.mark.parametrize("thresholder", ["L0", "l1"])
def test_prox_functions(data_derivative_1d, optimizer, thresholder):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = optimizer(thresholder=thresholder)
model.fit(x, x_dot)
check_is_fitted(model)
def test_cad_prox_function(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = SR3(thresholder="cAd")
model.fit(x, x_dot)
check_is_fitted(model)
@pytest.mark.parametrize("thresholder", ["weighted_l0", "weighted_l1"])
def test_weighted_prox_functions(data, thresholder):
x, x_dot = data
if x.ndim == 1:
x = x.reshape(-1, 1)
thresholds = np.ones((1, 1))
else:
thresholds = np.ones((x_dot.shape[1], x.shape[1]))
model = ConstrainedSR3(thresholder=thresholder, thresholds=thresholds)
model.fit(x, x_dot)
check_is_fitted(model)
@pytest.mark.parametrize("thresholder", ["L0", "l1"])
def test_constrained_sr3_prox_functions(data_derivative_1d, thresholder):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = ConstrainedSR3(thresholder=thresholder)
model.fit(x, x_dot)
check_is_fitted(model)
def test_unbias(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
optimizer_biased = SINDyOptimizer(
STLSQ(threshold=0.01, alpha=0.1, max_iter=1), unbias=False
)
optimizer_biased.fit(x, x_dot)
optimizer_unbiased = SINDyOptimizer(
STLSQ(threshold=0.01, alpha=0.1, max_iter=1), unbias=True
)
optimizer_unbiased.fit(x, x_dot)
assert (
norm(optimizer_biased.coef_ - optimizer_unbiased.coef_)
/ norm(optimizer_unbiased.coef_)
> 1e-9
)
def test_unbias_external(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
optimizer_biased = SINDyOptimizer(
Lasso(alpha=0.1, fit_intercept=False, max_iter=1), unbias=False
)
optimizer_biased.fit(x, x_dot)
optimizer_unbiased = SINDyOptimizer(
Lasso(alpha=0.1, fit_intercept=False, max_iter=1), unbias=True
)
optimizer_unbiased.fit(x, x_dot)
assert (
| norm(optimizer_biased.coef_ - optimizer_unbiased.coef_) | numpy.linalg.norm |
import numpy as np
from scipy.optimize import linear_sum_assignment
from collections import defaultdict
from utils.utils import parse_camera_param
def global2pixel(person_coords, camera_id, camera_param_dict):
# det : X Y Z
world_coord = person_coords / camera_param_dict['discretization_factor'] + camera_param_dict['min_volume']
trans_coord = world_coord - camera_param_dict[camera_id]['Translation']
uvw = np.linalg.inv(camera_param_dict[camera_id]['Rotation']) @ trans_coord.transpose(1, 0)
uvw = uvw.transpose(1, 0)
pixel_coords = uvw / camera_param_dict[camera_id]['FInv'] / uvw[:, 2:3] + camera_param_dict[camera_id]['C']
return pixel_coords[:, :2]
def batch_euc_dist(point1, point2):
point1_reshape = point1[:, np.newaxis, :]
point2_reshape = point2[np.newaxis, :, :]
sub = point1_reshape - point2_reshape
dist = np.linalg.norm(sub, ord=2, axis=-1)
return dist
def batch_cosine_dist(feat1, feat2):
assert feat1.shape[1] == feat2.shape[1]
feat1 = feat1 / np.linalg.norm(feat1, ord=2, axis=-1, keepdims=True)
feat2 = feat2 / np.linalg.norm(feat2, ord=2, axis=-1, keepdims=True)
sim_matrix = feat1 @ feat2.T
return 1 - sim_matrix
def batch_ious(det, gt):
det[:, 2:4] += det[:, :2]
gt[:, 2:4] += gt[:, :2]
det = det[:, np.newaxis, :]
gt = gt[np.newaxis, :, :]
max_x1 = np.maximum(det[..., 0], gt[..., 0])
min_x2 = np.minimum(det[..., 2], gt[..., 2])
max_y1 = np.maximum(det[..., 1], gt[..., 1])
min_y2 = np.minimum(det[..., 3], gt[..., 3])
i = np.maximum(min_y2 - max_y1, 0) * | np.maximum(min_x2 - max_x1, 0) | numpy.maximum |
import numpy as np
import random
import hdnet.hopfield as hdn
import os
from scipy.sparse import csr_matrix, save_npz, load_npz
import scipy.io as spio
import math
import scipy.special as sps
import scipy.optimize as spo
import pickle
from multiprocessing import Pool
from tqdm import tqdm
from collections import deque
from sklearn.metrics import confusion_matrix
# TODO make data_type auto select, so that a folder can have multiple data types
# TODO print units for StimTimes and Ts when loading data
class ModifiedHopfieldNet:
"""
Argumentss:
N = number of nodes to build Hopfield network with
in_directory = the relative path to a folder containing the raw .mat files
out_directory = the relative path to a folder that you want to store the python data. If not used, defaults to in_directory
splits: the number of times to split the data to train on each portion (slices sequentially
--> default: 3 splits = beginning, middle, end thirds of experiment)
train_percent = the percentage of each chunk of data (the number of chunks as defined by splits) that will be
used to train a Hopfield network --> default: 0.66
num_nets: The number of replications to use when training Hopfield networks --> default: 5
exp_type = 'map' to conduct a MAP estimate analysis, J to analyze the changing connectivity matrices through time.
--> default: 'J'
"""
def __init__(self, in_directory, out_directory=None, splits=3, train_percent=0.66, num_nets=50, exp_type='J',
data_type='sim', dt=1600, n_jobs=1, N=None, num_bs=1, threshold=250, stim_shift=0):
self.in_directory = in_directory
self.requested_files = None
self.type = exp_type
if out_directory is None:
self.out_directory = self.in_directory
else:
self.out_directory = out_directory
self.dt = dt
self.dats = []
self.filenames = []
if N is None:
self.N = 0
self.N = self.get_N()
else:
self.N = N
self.splits = splits
self.experiments = []
self.train_percent = train_percent
self.num_nets = num_nets
self.networks = []
self.data_type = data_type
self.n_jobs = n_jobs
self.num_bs = num_bs
self.threshold = threshold
self.bs_inds = []
self.filenames = []
self.bs_inds = []
self.thetas = []
self.Js = []
self.stim_shift = stim_shift
if (self.n_jobs == 1) & (self.type != 'MI') & (self.type != 'map'):
self.load_and_save_data(dt=self.dt)
if (self.n_jobs > 1) & (self.type == 'J'):
files = []
for file in os.listdir(self.in_directory):
filename = file[:-4] + f'_N_{self.N}_{self.dt}_sparse.npz'
if filename not in os.listdir(self.out_directory):
files.append(file)
p = Pool(self.n_jobs)
p.map(self.run_multiprocessing_J, files)
p.close()
p.join()
def get_N(self):
dats, _ = self.get_dats(get_N=True)
try:
Cs = dats[0][0]['Cs']
except:
Cs = dats[0][0]['data']['Cs']
Cs = np.array([a[0] for a in Cs.tolist()[0][0].tolist()], dtype='uint8')
N = np.max(Cs) + 1
if self.type in ['map', 'MAP', 'MI', 'mi']:
N += 1
return N
def load_and_save_data(self, **kwargs):
input_files = np.asarray(os.listdir(self.in_directory))
print("The following files are in your input directory: \n")
for k, file in enumerate(input_files):
print(f'File {k}: {file}')
requested_files = input('Please enter the index of files your would like to analyze separated by a comma. \n '
'For example, if you wanted the first and third file, type "0, 2" \n '
'If you would like to use all listed files, just press Enter')
if requested_files == '':
requested_files = input_files
else:
requested_files = requested_files.split(',')
for k, file in enumerate(requested_files):
requested_files[k] = int(file)
requested_files = input_files[requested_files]
for file in requested_files:
filename = file[:-4] + f'_N_{self.N}_{self.dt}_sparse.npz'
if filename not in os.listdir(self.out_directory):
print(f'---------------- The file {filename} was not found in the out directory -----------------')
print(f'---------------------- Importing .mat file: {file} instead ----------------------')
dat = spio.loadmat(os.path.join(self.in_directory, file))
ys = self.binaryVecs(dat, **kwargs)
self.experiments.append(ys)
y_sparse = csr_matrix(ys, dtype='uint8')
save_npz(os.path.join(self.out_directory, filename), y_sparse)
else:
print(f'------------------ Loading file: {filename} from out directory --------------------')
try:
ys = load_npz(os.path.join(self.out_directory, filename)).toarray()
except:
dat = spio.loadmat(os.path.join(self.in_directory, file))
ys = self.binaryVecs(dat, **kwargs)
y_sparse = csr_matrix(ys, dtype='uint8')
save_npz(os.path.join(self.out_directory, filename), y_sparse)
self.experiments.append(ys)
def build_and_train_networks(self):
all_accs = []
for i, memories in enumerate(self.experiments):
accs = []
print(f'---------------------- Conducting experiment: {i} ----------------------')
memories_chunked = self.chunked(memories, self.splits)
experiment_nets = []
for j, memory_chunk in enumerate(memories_chunked):
avg_acc = []
chunked_nets = []
for _ in tqdm(range(self.num_nets)):
hop = hdn.HopfieldNetMPF(N=self.N)
rand_memories = np.array(
[random.choice(memory_chunk) for _ in range(round(len(memory_chunk) * self.train_percent))])
hop.store_patterns_using_mpf(rand_memories + 0.0)
if self.type == 'map':
avg_acc.append(self.get_accuracy(memory_chunk, hop, precision_recall=False))
chunked_nets.append(hop)
experiment_nets.append(chunked_nets)
accs.append(avg_acc)
if self.type == 'map':
print(f'Experiment: {i} // Chunk: {j} // Avg Accuracy: {round(np.mean(avg_acc), 3)} +/- '
f'{round(np.std(avg_acc), 3)}')
else:
print(f'Experiment: {i} // Chunk: {j}')
all_accs.append(accs)
print(f'---------------------- Finished experiment: {i} ----------------------')
self.networks.append(experiment_nets)
return all_accs
@staticmethod
def explicitMLE(means, corrs):
n = len(means)
if n > 20:
raise ValueError('Cannot peform fitting when N>20. Reduce N and try again!')
#
def logL(x):
J = np.reshape(x[:n ** 2], [n, n])
h = x[n ** 2:]
# get first term
Z = 0
for i in range(np.power(2, n)):
x = np.asarray(list([int(j) for j in np.binary_repr(i, width=n)]))
E = -np.inner(h, x) + np.inner(x, np.dot(J, x))
Z += np.exp(E)
# combine into logL
logL = -np.sum(means * h) + np.sum(corrs * J) - np.log(Z)
return logL
# 1. Do MLE fit
# For now, assume contrastive divergence unnecessary
# record log likelihood and actual J's and h's
def jac_MLE(x):
J = np.reshape(x[:n ** 2], [n, n])
h = x[n ** 2:]
#
moments_model = np.zeros(n ** 2 + n)
Z = 0
for i in range(np.power(2, n)):
x = np.asarray(list([int(j) for j in np.binary_repr(i, width=n)]))
E = -np.inner(h, x) + np.inner(x, np.dot(J, x))
Z += np.exp(E)
moments_model[:n ** 2] += np.exp(E) * np.reshape(np.outer(x, x), [n ** 2])
moments_model[n ** 2:] += -np.exp(E) * x
moments_model /= Z
moments_data = np.hstack([np.reshape(corrs, n ** 2), -means])
return moments_data - moments_model
foo_MLE = spo.minimize(lambda x: -logL(x), x0=np.random.uniform(size=n ** 2 + n), jac=lambda x: -jac_MLE(x))
logL_MLE = -foo_MLE.fun;
J_guess = foo_MLE.x[:n ** 2];
h_guess = foo_MLE.x[n ** 2:]
return h_guess, np.reshape(J_guess, [n, n])
def contrastiveDivergence(self, means, corrs, alpha=0.1, thresh=0.05):
n = len(means)
# choose initial J and theta
J_guess = np.random.uniform(size=[n, n])
theta_guess = np.random.uniform(size=n)
grad_norm = thresh * 2
while grad_norm > thresh:
# do gradient ascent
# get better estimate of gradient
mean_model = np.zeros(n)
corr_model = | np.zeros([n, n]) | numpy.zeros |
"""Main module of dsmpy. Contains the classes for core dsmpy objects
and the methods for serial and parallel computation
of synthetic seismograms using the Fortran DSM."""
from dsmpy._tish import _tish, _calthetaphi
from dsmpy._tish import parameters as tish_parameters
from dsmpy._tish import _pinput as _pinput_sh
from dsmpy._tipsv import _pinput, _tipsv
from dsmpy.spc import spctime
from dsmpy.event import Event
from dsmpy.station import Station
from dsmpy.seismicmodel import SeismicModel
from obspy import Trace
from obspy.core.trace import Stats
from obspy.core.util.attribdict import AttribDict
import obspy.signal.filter
import os
import glob
import matplotlib.pyplot as plt
import pickle
import numpy as np
from mpi4py import MPI
import time
import functools
import warnings
from sqlalchemy.sql.functions import rank
default_params = dict(
re=0.01, ratc=1e-10, ratl=1e-5, omegai=0.0014053864092981234)
def _is_iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
return True
class PyDSMInputFile:
"""Input file for dsmpy.
Args:
input_file (str): path of dsmpy input file
"""
def __init__(self, input_file):
self.input_file = input_file
def read(self):
params = dict()
params['verbose'] = 0
with open(self.input_file, 'r') as f:
for line in f:
if line.strip().startswith('#'):
continue
key, value = self._parse_line(line)
if key is not None:
params[key] = value
return params
def _parse_line(self, line):
key, value = line.strip().split()[:2]
if key == 'sac_files':
full_path = os.path.expanduser(value.strip())
value_parsed = list(glob.iglob(full_path))
elif key == 'tlen':
value_parsed = float(value)
elif key == 'nspc':
value_parsed = int(value)
elif key == 'sampling_hz':
value_parsed = int(value)
elif key == 'seismic_model':
value_parsed = value.strip().lower()
elif key == 'mode':
value_parsed = int(value)
elif key == 'output_folder':
full_path = os.path.expanduser(value.strip())
value_parsed = full_path
elif key == 'verbose':
value_parsed = int(value)
else:
print('Warning: key {} undefined. Ignoring.'.format(key))
return None, None
return key, value_parsed
class PyDSMOutput:
"""Output from dsmpy compute methods.
Args:
spcs (ndarray): seismic spectra computed with DSM.
Shape is (3, nr, nspc+1).
stations (ndarray of Station): stations. Shape is (nr,)
event (Event): seismic event (the source).
sampling_hz (int): sampling frequency for time-domain waveforms
tlen (float): duration of the synthetics (in seconds)
(better to be 2**n/10)
nspc (int): number of frequency points in the synthetics
(better to be 2**n)
omegai (float): numerical damping using in DSM.
Default is default_params['omegai'].
Better to leave it at its default value.
model_id (str): seismic model identifier (e.g., 'PREM', 'mod1').
(default is None).
"""
color_count = 0
colors = (
'blue', 'red', 'green', 'orange',
'purple', 'brown', 'pink', 'cyan',
'magenta', '')
def __init__(
self, spcs, stations, event,
sampling_hz, tlen, nspc,
omegai=default_params['omegai'], model_id=None):
self.spcs = spcs
self.stations = stations
self.event = event
self.sampling_hz = sampling_hz
self.tlen = tlen
self.nspc = nspc
self.omegai = omegai
self.components = ('Z', 'R', 'T')
self.dt = 1 / self.sampling_hz
self.us = None
self.ts = None
self.model_id = model_id
@classmethod
def output_from_pydsm_input(cls, spcs, pydsm_input):
return cls(
spcs, pydsm_input.stations, pydsm_input.event,
pydsm_input.sampling_hz,
pydsm_input.tlen, pydsm_input.nspc, pydsm_input.omegai)
def get_us_shape(self) -> (int, int, int):
"""Return the shape of self.us (without doing FFT)."""
spct = spctime.SpcTime(self.tlen, self.nspc,
self.sampling_hz, self.omegai,
self.event.source_time_function)
return (3, len(self.stations), spct.npts)
def to_time_domain(self, source_time_function=None):
"""Compute time domain waveforms from spetcra.
Args:
source_time_function (SourceTimeFunction):
source time function object (default is None).
"""
# if self.us is not None:
# return
if source_time_function is None:
spct = spctime.SpcTime(self.tlen, self.nspc,
self.sampling_hz, self.omegai,
self.event.source_time_function)
else:
spct = spctime.SpcTime(self.tlen, self.nspc,
self.sampling_hz, self.omegai,
source_time_function)
us = spct.spctime(self.spcs)
self.us = us
self.ts = np.linspace(0, self.tlen,
spct.npts, endpoint=False)
def free(self):
"""Free memory for the large fields self.us and self.ts.
Can be used after processing the time-domain waveform in cases
where a large number of waveforms are used, which can quickly
requires a lot of memory.
Examples:
>>> output.to_time_domain()
>>> # do some operations on output.us
>>> output.free()
"""
del self.us
del self.ts
self.us = None
self.ts = None
def set_source_time_function(self, source_time_function):
"""Set the source time function for convolution.
Args:
source_time_function (SourceTimeFunction):
source time function object.
"""
self.event.source_time_function = source_time_function
def write(self, root_path, format):
"""Write to file using obspy.io.write.
Args:
root_path (str): path of root folder in which to write.
format (str): output files format ('sac').
"""
for tr in self.get_traces():
filename = '.'.join((
tr.stats.station, tr.stats.network, tr.stats.sac.kevnm,
tr.stats.component, format))
tr.write(os.path.join(root_path, filename), format=format)
def save(self, path):
"""Save self using pickle.dump().
Args:
path (str): name of the output file.
"""
with open(path, 'wb') as f:
pickle.dump(self, f)
@staticmethod
def load(path):
"""Read file into self using pickle.load().
Args:
path (str): name of the file that contains self.
Returns:
PyDSMOutput: the loaded PyDSMOutput object.
"""
with open(path, 'rb') as f:
output = pickle.load(f)
return output
def filter(self, freq, freq2=0., type='lowpass', zerophase=False):
"""Filter time-domain waveforms using obspy.signal.filter.
Args:
freq (float): filter frequency
freq2 (float): filter maximum frequency
(for bandpass filters only)
type (str): type of filter ('lowpass', 'bandpass').
zerophase (bool): use zero-phase filter (default is False).
"""
if self.us is None:
self.to_time_domain(self.event.source_time_function)
if type not in {'bandpass', 'lowpass'}:
raise ValueError('Expect "bandpass" or "lowpass" for arg "type"')
if type == 'bandpass':
assert freq2 > freq
for icomp in range(3):
for ir in range(self.us.shape[1]):
if type == 'lowpass':
self.us[icomp, ir] = obspy.signal.filter.lowpass(
self.us[icomp, ir], freq,
df=self.sampling_hz, zerophase=zerophase)
elif type == 'bandpass':
self.us[icomp, ir] = obspy.signal.filter.bandpass(
self.us[icomp, ir], freq, freq2,
df=self.sampling_hz, zerophase=zerophase)
def get_traces(self):
"""Return list of obspy.Trace.
Returns:
list ot obspy.Trace: traces
"""
traces = []
if self.us is None:
self.to_time_domain()
for icomp in range(3):
for ista in range(self.get_nr()):
station = self.stations[ista]
data = self.us[icomp, ista]
stats = Stats()
stats.network = station.network
stats.station = station.name
stats.sampling_rate = self.sampling_hz
stats.delta = self.dt
stats.starttime = 0.
# stats.endtime = self.tlen
stats.npts = len(data)
stats.component = self.components[icomp]
sac_header = AttribDict(**dict(
b=0, delta=self.dt, depmax=data.max(),
depmin=data.min(), depmen=data.mean(),
e=self.tlen, npts=len(data), evdp=self.event.depth,
evla=self.event.latitude, evlo=self.event.longitude,
kevnm=self.event.event_id, knetwk=station.network,
kstnm=station.name, gcarc=0.))
stats.sac = sac_header
trace = Trace(data=data, header=stats)
traces.append(trace)
return traces
def window_spcs(self, windows, window_width):
"""Window the spectra in the frequency domain
using gaussian windows to isolate portions
of time-domain waveforms.
Args:
windows (list of Window): windows.
window_width (float): duration (in seconds).
Returns:
PyDSMOutput: windowed PyDSMOutput object.
"""
gaussian_windows = [
window.get_gaussian_window_in_frequency_domain(
self.nspc, self.tlen, window_width)
for window in windows]
spcs_windowed = np.zeros_like(self.spcs)
for i in range(self.nspc + 1):
start = self.nspc - i
end = start + self.nspc + 1
spcs_windowed[:, :, i] = np.sum(
self.spcs * gaussian_windows[:, start:end], axis=2)
output = self.__copy__()
output.spcs = spcs_windowed
return output
def get_nr(self):
"""Returns the number of receivers (stations)."""
return len(self.stations)
def _normalize(self, ys, mode='self'):
if mode == 'self':
maxs = ys.max(axis=2).reshape(3, -1)
maxs = np.where(maxs == 0, np.inf, maxs)
maxs = maxs.reshape((*ys.shape[:2], 1))
return 0.5 * ys / maxs
elif mode == 'none':
# TODO ensure minimum distance
maxs = ys[:, 0, :].max(axis=1).reshape(3, 1, 1)
return .8 * ys / maxs
def _plot(
self, xs, ys, color='black', axes=None, distance_min=0.,
distance_max=np.inf, label=None, normalize='self',
xlabel='Time (s)', slowness=0.):
if axes is None:
fig, axes = plt.subplots(3, 1, sharey=True, sharex=True)
else:
assert len(axes) == 3
fig = None
PyDSMOutput.color_count += 1
distances = np.zeros(len(self.stations), dtype=np.float64)
for ir in range(len(self.stations)):
distances[ir] = self.event.get_epicentral_distance(
self.stations[ir])
if distance_min == 0:
distance_min = distances.min()
indexes = (distances >= distance_min) & (distances <= distance_max)
ys_ = self._normalize(ys[:, indexes, :], mode=normalize)
distances_ = distances[indexes]
for ir in range(indexes.sum()):
label_ = label if ir == len(self.stations) - 1 else None
# reduced time plots
reduce_time = (distances_[ir] - distance_min) * slowness
reduce_index = int(reduce_time * self.sampling_hz)
for icomp in range(3):
axes[icomp].plot(
xs[reduce_index:] - reduce_time,
(ys_[icomp, ir, reduce_index:]
+ distances_[ir]),
color=color, label=label_)
axes[icomp].set_xlabel(xlabel)
axes[icomp].set_title(self.components[icomp])
if label is not None:
axes[icomp].legend()
axes[0].set_ylabel('Distance (deg)')
return fig, axes
def plot_spc(
self, color='black', axes=None, distance_min=0.,
distance_max=np.inf, label=None, normalize='self'):
"""Plot a frequency-domain (spectra) record section.
Args:
axes (matplotlib.axes): ax on which to plot
distance_min (float): minimum epicentral distance (deg)
distance_max (float): maximum epicentral distance (deg)
label (str): label for model name
normalize (str): 'self' for self-normalization
or 'none' to see amplitude decay with distance
Returns:
figure: matplotlib figure object.
Axes: matplotlib Axes object.
"""
freqs = np.linspace(
0, self.nspc / self.tlen, self.nspc + 1, endpoint=True)
return self._plot(
freqs, | np.abs(self.spcs) | numpy.abs |
import numpy as np
from ..decorator import check_vector
class MAE:
def predict(self, y_predict, y):
result = np.mean(np.abs(y_predict - y))
return result
def __str__(self):
return 'mae'
class MSE:
@check_vector
def predict(self, y_predict, y):
result = np.square(y_predict - y)
result = np.mean(result)
return result
@check_vector
def d(self, y_predict, y):
diff = y_predict - y
shape = diff.shape
result = 1
result = result * np.ones(shape)/np.prod(shape)
result = result * 2 * diff
return result
def __str__(self):
return 'mse'
class BINARY_CROSS_ENTROPY:
@check_vector
def predict(self, y_predict, y):
H = self.get_H(y_predict, y)
result = np.mean(H)
return result
def relu(self, X):
return np.maximum(X,0)
def transform(self, X):
# input : 변환하고자하는 값
# output[1] : 변환된 값
# 시그모이드 값 대입하는 함수
return np.exp(-self.relu(-X))/(1.0 + np.exp(-np.abs(X)))
def derv_H_sigmoid(self, logit, z): # z는 참 레이블
return -z + self.transform(logit)
def get_H(self, X, z):
return self.relu(X) - X * z + np.log(1 + np.exp(-np.abs(X)))
@check_vector
def d(self, y_predict, y):
result = 1.0 / np.prod(y_predict.shape)
result = result * self.derv_H_sigmoid(y_predict, y)
return result
def __str__(self):
return 'BINARY_CROSS_ENTROPY'
class ACCURACY:
@check_vector
def predict(self, y_predict, y):
est_yes = np.greater(y_predict, 0.5)
ans_yes = np.greater(y, 0.5)
est_no = np.logical_not(est_yes)
ans_no = np.logical_not(ans_yes)
tp = np.sum(np.logical_and(est_yes, ans_yes))
fp = np.sum(np.logical_and(est_yes, ans_no))
fn = np.sum(np.logical_and(est_no, ans_yes))
tn = np.sum(np.logical_and(est_no, ans_no))
result = self.safe_div(tp+tn, tp+tn+fp+fn)
return result
def safe_div(self, p, q):
p, q = float(p), float(q)
if np.abs(q) < 1.0e-20: return np.sign(p)
return p / q
def __str__(self):
return 'ACCURACY'
class PRECISION:
@check_vector
def predict(self, y_predict, y):
est_yes = np.greater(y_predict, 0)
ans_yes = np.greater(y, 0.5)
est_no = np.logical_not(est_yes)
ans_no = np.logical_not(ans_yes)
tp = np.sum(np.logical_and(est_yes, ans_yes))
fp = np.sum(np.logical_and(est_yes, ans_no))
fn = np.sum(np.logical_and(est_no, ans_yes))
tn = np.sum(np.logical_and(est_no, ans_no))
result = self.safe_div(tp, tp+fp)
return result
def safe_div(self, p, q):
p, q = float(p), float(q)
if np.abs(q) < 1.0e-20: return np.sign(p)
return p / q
def __str__(self):
return 'PRECISION'
class RECALL:
@check_vector
def predict(self, y_predict, y):
est_yes = np.greater(y_predict, 0)
ans_yes = np.greater(y, 0.5)
est_no = np.logical_not(est_yes)
ans_no = np.logical_not(ans_yes)
tp = np.sum(np.logical_and(est_yes, ans_yes))
fp = np.sum(np.logical_and(est_yes, ans_no))
fn = np.sum(np.logical_and(est_no, ans_yes))
tn = np.sum(np.logical_and(est_no, ans_no))
result = self.safe_div(tp, tp+fn)
return result
def safe_div(self, p, q):
p, q = float(p), float(q)
if np.abs(q) < 1.0e-20: return np.sign(p)
return p / q
def __str__(self):
return 'RECALL'
class F1:
@check_vector
def predict(self, y_predict, y):
est_yes = np.greater(y_predict, 0)
ans_yes = np.greater(y, 0.5)
est_no = np.logical_not(est_yes)
ans_no = np.logical_not(ans_yes)
tp = np.sum(np.logical_and(est_yes, ans_yes))
fp = np.sum(np.logical_and(est_yes, ans_no))
fn = np.sum(np.logical_and(est_no, ans_yes))
tn = np.sum(np.logical_and(est_no, ans_no))
recall = self.safe_div(tp, tp+fn)
precision = self.safe_div(tp, tp+fp)
result = 2 * self.safe_div(recall*precision, recall+precision)
return result
def safe_div(self, p, q):
p, q = float(p), float(q)
if | np.abs(q) | numpy.abs |
import numpy as np
import matplotlib.pyplot as plt
import torch
from sklearn.metrics import f1_score
import torch.nn.functional as F
import importlib
def shot_acc(preds, labels, train_data, many_shot_thr=100, low_shot_thr=20, acc_per_cls=False):
if isinstance(train_data, np.ndarray):
training_labels = np.array(train_data).astype(int)
else:
training_labels = np.array(train_data.dataset.labels).astype(int)
if isinstance(preds, torch.Tensor):
preds = preds.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
elif isinstance(preds, np.ndarray):
pass
else:
raise TypeError('Type ({}) of preds not supported'.format(type(preds)))
train_class_count = []
test_class_count = []
class_correct = []
for l in np.unique(labels):
train_class_count.append(len(training_labels[training_labels == l]))
test_class_count.append(len(labels[labels == l]))
class_correct.append((preds[labels == l] == labels[labels == l]).sum())
many_shot = []
median_shot = []
low_shot = []
for i in range(len(train_class_count)):
if train_class_count[i] > many_shot_thr:
many_shot.append((class_correct[i] / test_class_count[i]))
elif train_class_count[i] < low_shot_thr:
low_shot.append((class_correct[i] / test_class_count[i]))
else:
median_shot.append((class_correct[i] / test_class_count[i]))
if len(many_shot) == 0:
many_shot.append(0)
if len(median_shot) == 0:
median_shot.append(0)
if len(low_shot) == 0:
low_shot.append(0)
if acc_per_cls:
class_accs = [c / cnt for c, cnt in zip(class_correct, test_class_count)]
return np.mean(many_shot), np.mean(median_shot), | np.mean(low_shot) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 10 18:03:32 2018
This file is the new version for calculating the uncertainty value in each patch
It's better because:
1. It's a dynamic way of chosing most uncertain patch, since the provided patch can have overlapping for the adjacent pixels
2. It can be further developed to have the weighted uncertain for each patch by 1/(h*w) where h and w are the height and
width of the patch.
The thing I needs to be careful about this is that:
1. The selected most uncertain patch needs to be able to transformed back to the binary mask
2. The uncertainty value for the previously selected patch needs to be not considered during the selection. I think I can still set a
fixed number of patches, it's just it will be much more than before.
@author: s161488
"""
import numpy as np
from scipy import signal
def calculate_score_for_patch(image, kernel, stride_size, Num_Most_Uncert_Patch, crit = None, higher = True):
"""This function is used to calculate the utility score for each patch.
Args:
uncertainty_est: [Im_h, Im_w]
kernel: [k_h, k_w]
Returns:
most_uncert_image_index: [Num_Most_Selec] this should be the real image index
%most_uncert_patch_index: [Num_Most_Selec] this should be the numeric index for the selected patches
binary_mask: [Num_Most_Selec, Im_h, Im_w,1]
%pseudo_label: [Num_Most_Selec, Im_h, Im_w,1]
Op:
Before, I enter the uncert_est, I need to consider if there are already selected patches in the last acquisition step.
If there are some selected patches in the last acquisition step, then it can be annotated by the binary mask. Therefore,
before I enter the uncert_est, the uncertainty value for the selected patches should be zero.
Then the evaluation for the rest patches will be as same as below
Also, another thing needs to be considered is that if there are overlapping betweeen the new selected images and the previously
selected images, I need to aggregate the binary mask, as same as the ground truth label. This step will be as same as before.
"""
Im_h, Im_w = np.shape(image)
kh, kw = np.shape(kernel)
h_num_patch = Im_h-kh+1
w_num_patch = Im_w-kw+1
num_row_wise = h_num_patch//stride_size
num_col_wise = w_num_patch//stride_size
if stride_size == 1:
tot_num_patch_per_im = num_row_wise*num_col_wise
else:
tot_num_patch_per_im = (num_row_wise+1)*(num_col_wise+1)
patch_tot = select_patches_in_image_area(image, kernel, stride_size, num_row_wise, num_col_wise)
patch_tot = | np.reshape(patch_tot, [-1]) | numpy.reshape |
import rospy
from std_msgs.msg import Float64MultiArray
from cv_bridge import CvBridge, CvBridgeError
import message_filters
from sensor_msgs.msg import Image
import ros_numpy
import sys
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
from utils import detector_utils as detector_utils
import tensorflow as tf
from multiprocessing import Queue, Pool
import datetime
import argparse
from scipy import ndimage
import numpy as np
from IPython import embed
import open3d as o3d
tf.debugging.set_log_device_placement(True)
frame_processed = 0
score_thresh = 0.2
# Create a worker thread that loads graph and
# does detection on images in an input queue and puts it on an output queue
rgb_img = []
depth_img = []
focalLengthX = 624.3427734375
focalLengthY = 624.3428344726562
centerX = 305.03887939453125
centerY = 244.86605834960938
cube_size = [200, 200, 200]
def display_inlier_outlier(cloud, ind):
inlier_cloud = cloud.select_down_sample(ind)
outlier_cloud = cloud.select_down_sample(ind, invert=True)
print("Showing outliers (red) and inliers (gray): ")
outlier_cloud.paint_uniform_color([1, 0, 0])
inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])
def callback(rgb_msg, depth_msg):
global rgb_img, depth_img
try:
rgb_img = ros_numpy.numpify(rgb_msg)
depth_img = ros_numpy.numpify(depth_msg)
except CvBridgeError as e:
rospy.logerr(e)
def calculateCoM(dpt):
"""
Calculate the center of mass
:param dpt: depth image
:return: (x,y,z) center of mass
"""
dc = dpt.copy()
dc[dc < 0] = 0
dc[dc > 10000] = 0
cc = ndimage.measurements.center_of_mass(dc > 0)
num = np.count_nonzero(dc)
com = np.array((cc[1]*num, cc[0]*num, dc.sum()), np.float)
if num == 0:
return np.array((0, 0, 0), np.float)
else:
return com/num
def clean_depth_map(depth, com, size, com_type="2D"):
if com_type == "2D":
com3d = [(com[0] + int(left) - depth.shape[1]/2) * com[2] / focalLengthX,
(com[1] + int(top) - depth.shape[0]/2) * com[2] / focalLengthY, com[2]]
else:
com3d = com
x_min = com3d[0] - size[0] / 2
x_max = com3d[0] + size[0] / 2
y_min = com3d[1] - size[1] / 2
y_max = com3d[1] + size[1] / 2
z_min = com3d[2] - size[2] / 2
z_max = com3d[2] + size[2] / 2
points = depth2pc(depth, True, left, top)
points_tmp = points.copy()
if len(points):
hand_points_ind = np.all(
np.concatenate((points[:, 0].reshape(-1, 1) > x_min, points[:, 0].reshape(-1, 1) < x_max,
points[:, 1].reshape(-1, 1) > y_min, points[:, 1].reshape(-1, 1) < y_max,
points[:, 2].reshape(-1, 1) > z_min, points[:, 2].reshape(-1, 1) < z_max), axis=1), axis=1)
points_tmp = points[hand_points_ind]
depth = pc2depth(points[hand_points_ind])
return points_tmp, depth
def jointsImgTo3D(sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((sample.shape[0], 3), np.float32)
for i in range(sample.shape[0]):
ret[i] = jointImgTo3D(sample[i])
return ret
def jointImgTo3D(sample):
"""
Normalize sample to metric 3D
:param sample: joints in (x,y,z) with x,y in image coordinates and z in mm
:return: normalized joints in mm
"""
ret = np.zeros((3,), np.float32)
# convert to metric using f
ret[0] = (sample[0]-centerX)*sample[2]/focalLengthX
ret[1] = (sample[1]-centerY)*sample[2]/focalLengthY
ret[2] = sample[2]
return ret
def depth2pc(depth, after_crop=False, left=0, top=0):
points = []
for v in range(depth.shape[0]):
for u in range(depth.shape[1]):
Z = int(depth[v, u])
if Z == 0:
continue
v_m = v
u_m = u
if after_crop:
v_m = v + int(top)
u_m = u + int(left)
X = int((u_m - centerX) * Z / focalLengthX)
Y = int((v_m - centerY) * Z / focalLengthY)
points.append([X, Y, Z])
points_np = np.array(points)
return points_np
def pc2depth(pc_local):
pc = pc_local.copy()
width = 640
height = 480
pc[:, 0] = pc[:, 0] / pc[:, 2].astype(float) * focalLengthX + centerX
pc[:, 1] = pc[:, 1] / pc[:, 2].astype(float) * focalLengthY + centerY
uvd = []
for i in range(pc.shape[0]):
if 0 < pc[i, 0] < width and 0 < pc[i, 1] < height:
uvd.append(pc[i, :].astype(int))
depth = uvd2depth(np.array(uvd), width, height)
return depth
def depth2uvd(depth):
depth = depth.squeeze()
v, u = np.where(depth != 0)
v = v.reshape(-1, 1)
u = u.reshape(-1, 1)
return np.concatenate([u, v, depth[v, u]], axis=1)
def uvd2depth(uvd, width, height):
depth = np.zeros((height, width, 1), np.uint16)
depth[uvd[:, 1], uvd[:, 0]] = uvd[:, 2].reshape(-1, 1)
return depth
def joint3DToImg(sample):
"""
Denormalize sample from metric 3D to image coordinates
:param sample: joints in (x,y,z) with x,y and z in mm
:return: joints in (x,y,z) with x,y in image coordinates and z in mm
"""
ret = np.zeros((3,), np.float32)
# convert to metric using f
if sample[2] == 0.:
ret[0] = centerX
ret[1] = centerY
return ret
ret[0] = sample[0]/sample[2]*focalLengthX+centerX
ret[1] = sample[1]/sample[2]*focalLengthY+centerY
ret[2] = sample[2]
return ret
def worker(input_q, depth_q, output_q, cap_params, frame_processed):
global rgb_img, depth_img
print(">> loading frozen model for worker")
detection_graph, sess = detector_utils.load_inference_graph()
sess = tf.Session(graph=detection_graph)
im_width, im_height = (640, 480)
pcd = o3d.geometry.PointCloud()
pcd_crop = o3d.geometry.PointCloud()
inlier_cloud = o3d.geometry.PointCloud()
previous_center_point = np.array([0, 0, 0])
while True:
# print("> ===== in worker loop, frame ", frame_processed)
image_np = input_q.get()
depth_np = depth_q.get()
if image_np is not None:
# Actual detection. Variable boxes contains the bounding box coordinates for hands detected,
# while scores contains the confidence for each of these boxes.
# Hint: If len(boxes) > 1 , you may assume you have found at least one hand (within your score threshold)
#
boxes, scores = detector_utils.detect_objects(
image_np, detection_graph, sess)
# boxes, scores = detector_utils.gpu_detect_objects(image_np,
# detection_graph, sess)
ind = | np.argmax(scores) | numpy.argmax |
import numpy as np
class CEM:
def __init__(self, args, num_params, mu_init=None):
# Params inform.
self.num_params = num_params
self.mu = np.zeros(self.num_params) if (mu_init is None) else np.array(mu_init)
# Dist the inform.
self.pop_size = args.pop_size
self.cov = args.cov_init * np.ones(self.num_params)
self.cov_limit = args.cov_limit
self.damp = args.cov_init
self.alpha = args.cov_alpha
# Elitism
self.elitism = args.elitism
self.elite_param = self.mu
# Parents and weights
self.parents = (args.pop_size // 2)
self.weights = np.array([np.log((self.parents + 1) / i) for i in range(1, self.parents + 1)])
self.weights /= self.weights.sum()
# Print the inform.
print(f"\nThe weight of CEM update: {np.round(self.weights, 3)}")
print(f"ES elitism: {self.elitism}")
def sampling(self, pop_size):
# Sample perturbation from N(0, np.sqrt(cov))
epsilon = np.sqrt(self.cov) * np.random.randn(pop_size, self.num_params)
# Generate policy individuals
inds = self.mu + epsilon
# If use elitism, change last individual as previous elite individual
if self.elitism:
inds[0] = self.elite_param
return inds
def update(self, params, scores):
# Get index following ascending scores order, and determine parents
idx_sorted = np.argsort(-np.array(scores))
params_parents = params[idx_sorted[:self.parents]]
# Update cov
z = (params_parents - self.mu)
self.damp = (1 - self.alpha) * self.damp + self.alpha * self.cov_limit
self.cov = 1 / self.parents * self.weights @ (z * z) + self.damp * | np.ones(self.num_params) | numpy.ones |
#%%
import os
import sys
import numpy as np
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='')
parser.add_argument('--s_dataset', type=str, choices=['AMAZON_RO', 'AMAZON_PA', 'CERRADO_MA'], default='AMAZON_RO', help='source dataset')
parser.add_argument('--t_dataset', type=str, choices=['AMAZON_RO', 'AMAZON_PA', 'CERRADO_MA'], default='CERRADO_MA', help='target dataset')
args = parser.parse_args()
s_dataset, t_dataset = args.s_dataset, args.t_dataset
labels = []
labels.append('1) Tr:T, Ts:T, ')
labels.append('2) Tr:S, Ts:T, ')
labels.append('3) ADDA, ')
colors = []
colors.append('tab:green')
colors.append('tab:orange')
colors.append('tab:blue')
alias = {
'AMAZON_RO' : 'RO',
'AMAZON_PA' : 'PA',
'CERRADO_MA' : 'MA',
}
def correct_nan_values(arr, before_value, last_value):
before = np.zeros_like(arr)
after = np.zeros_like(arr)
arr_ = arr.copy()
index = 0
if before_value == 1: before_value = arr[~np.isnan(arr)][0]
if last_value == 1: last_value = arr[~ | np.isnan(arr) | numpy.isnan |
#!/usr/bin/env python3
from asist_nsf_2018.experiments import experiments
from asist.wave_probe import read_wave_probe_csv
from asist.utility import running_mean
from asist.pressure import read_pressure_from_netcdf
import matplotlib.pyplot as plt
from matplotlib.dates import num2date, date2num
import numpy as np
import os
from scipy.io import loadmat
from scipy.signal import detrend
from datetime import datetime, timedelta
from dispersion import w2k
from netCDF4 import Dataset
from process_leg import leg1, leg2, leg_slope
plt.rcParams.update({'font.size': 12})
def get_run_elevations(eta, start_index, n, run_length=360, frequency=100, offset=30):
n0 = start_index + n * run_length * frequency + offset * frequency
n1 = n0 + run_length * frequency - 2 * offset * frequency
return eta[n0:n1]
def demean(x):
return x - np.mean(x)
def variance_spectrum(eta, sampling_rate, fmin=0.1, fmax=100):
e = demean(detrend(eta))
n = e.size
f = np.fft.fftfreq(n, 1 / sampling_rate)[:n//2]
#df = 2 * sampling_rate / e.size
df = sampling_rate / e.size
ai = 2 * np.abs(np.fft.fft(e)[:n//2]) / n
F = ai**2 / 2 / df
mask = (f >= fmin) & (f < fmax)
return F[mask], f[mask], df
def cp_cg(F, f, df, depth):
w = 2 * np.pi * f
k = w2k(w, depth)[0]
cp = w[1:] / k[1:]
cg = np.diff(w) / np.diff(k)
return cp, cg
def sig_wave_height(F, df):
"""Significant wave height [m]."""
return 4 * np.sqrt(np.sum(F * df))
def mean_wave_period(F, f, df):
"""First-order mean wave period [s]."""
return np.sum(F * df) / np.sum(F * f * df)
def wave_energy(F, df, rhow=1000, g=9.8):
"""Returns total wave energy."""
return rhow * g * np.sum(F * df)
def radiation_stress(F, f, df, depth, rhow=1000, g=9.8):
"""Returns radiation stress."""
cp, cg = cp_cg(F, f, df, depth)
rad_stress_fac = 2 * cg / cp - 0.5
return rhow * g * np.sum(rad_stress_fac * F[1:] * df)
def mean_water_height(eta, exp, start_index):
fan, h = [], []
for n, run in enumerate(exp.runs[:-1]):
fan.append(run.fan)
e = get_run_elevations(eta, start_index, n)
if n == 0:
offset = np.mean(e)
h.append(0)
else:
h.append(np.mean(e) - offset)
return np.array(fan), np.array(h)
def mean_slope(h1, h2, dx, rhow=1000, g=9.8, depth=0.42):
h1, h2 = np.array(h1), np.array(h2)
hmean = 0.5 * (h1 + h2) + depth
return rhow * g * hmean * (h2 - h1) / dx
def get_wave_properties(eta, exp, start_index):
fan, swh, mwp, Sxx = [], [], [], []
for n, run in enumerate(exp.runs[:-1]):
e = get_run_elevations(eta, start_index, n)
F, f, df = variance_spectrum(e, 100)
fan.append(run.fan)
swh.append(sig_wave_height(F, df))
mwp.append(mean_wave_period(F, f, df))
Sxx.append(radiation_stress(F, f, df, 0.42))
return np.array(fan), np.array(swh), np.array(mwp), np.array(Sxx)
path = os.environ['WAVEPROBE_DATA_PATH']
L2_DATA_PATH = os.environ['L2_DATA_PATH']
hidrag_path = '/home/milan/Work/sustain/data/hidrag'
frequency = 100 # Hz
run_length = 360 # s
fetch1_c18 = 6.02
fetch2_c18 = 8.71
fetch1_d04 = 4.592
fetch2_d04 = 8.991
dx_c18 = fetch2_c18 - fetch1_c18
dx_d04 = fetch2_d04 - fetch1_d04
# experiments to process
exp_name = 'asist-windonly-fresh'
exp = experiments[exp_name]
known_index = 241500
start_index_fan = 10
_, time, eta3 = read_wave_probe_csv(path + '/' + exp_name + '/ch3.csv')
_, time, eta4 = read_wave_probe_csv(path + '/' + exp_name + '/ch4.csv')
_, time, eta6 = read_wave_probe_csv(path + '/' + exp_name + '/ch6.csv')
start_index = known_index - (start_index_fan // 5)\
* run_length * frequency
start_index = 0 if start_index < 0 else start_index
fan, swh3, mwp3, Sxx3 = get_wave_properties(eta3, exp, start_index)
fan, swh4, mwp4, Sxx4 = get_wave_properties(eta4, exp, start_index)
fan, swh6, mwp6, Sxx6 = get_wave_properties(eta6, exp, start_index)
# radiation stress
Sxx3, Sxx4, Sxx6 = map(np.array, [Sxx3, Sxx4, Sxx6])
# radiation stress gradient
dSdx3 = (Sxx6 - Sxx3) / dx_c18
dSdx4 = (Sxx6 - Sxx4) / dx_c18
fan, h3 = mean_water_height(eta3, exp, start_index)
fan, h4 = mean_water_height(eta4, exp, start_index)
fan, h6 = mean_water_height(eta6, exp, start_index)
# mean slopes
s3 = (h6 - h3) / dx_c18
s4 = (h6 - h4) / dx_c18
# air pressure gradient
with Dataset(L2_DATA_PATH + '/air-pressure_asist-christian-shadowgraph.nc') as nc:
seconds = nc.variables['Time'][:]
seconds -= seconds[0]
origin = datetime.strptime(nc.variables['Time'].origin, '%Y-%m-%dT%H:%M:%S')
time_air = np.array([origin + timedelta(seconds=s) for s in seconds])
dpdx_air = nc.variables['dpdx'][:]
fan_air = nc.variables['fan'][:]
exp = experiments['asist-christian-shadowgraph']
dpdx_c18 = []
for run in exp.runs[:-1]:
t0 = run.start_time + timedelta(seconds=30)
t1 = run.end_time - timedelta(seconds=30)
mask = (time_air > t0) & (time_air < t1)
dpdx_c18.append(np.mean(dpdx_air[mask]))
rhow = 1000
rhoa = 1.15
g = 9.8
depth = 0.42
dpdx_c18 = - np.array(dpdx_c18) / (rhow * g)
### HIDRAG data
# Location of probes from entrance to tank
ps1 = 3.014
ps2 = 7.012
ps3 = 11.009
mat = loadmat(hidrag_path + '/uwvsu2-24.mat')
U = mat['ups'][0]
uw = mat['uw'][0]
LEG3 = mat['LEG3'][0] * 1e-2
LEG1 = mat['LEG1'][0] * 1e-2
M1 = mat['M1'][0]
M3 = mat['M3'][0]
ps13 = - mat['ps13'][0] * 1e-2
ps12 = - mat['ps12'][0] * 1e-2
dpdx = ps13 / (ps3 - ps1)
dpdx -= dpdx[0]
LEG3 -= LEG3[0]
LEG1 -= LEG1[0]
U = np.array([0] + list(U))
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, xlim=(0, 25))
plt.plot(U[1:], LEG1, 'b--', marker='o', ms=5, label='D04, #1, 4.6 m')
plt.plot(U[1:], LEG3, 'b-', marker='o', ms=5, label='D04, #2, 9.0 m')
plt.plot(U, h4, 'r--', marker='o', ms=5, label='C18, #1, 6.0 m')
plt.plot(U, h6, 'r-', marker='o', ms=5, label='C18, #2, 8.7 m')
plt.plot(U, leg1, 'r--', marker='*', ms=10, label='C18, LEG1')
plt.plot(U, leg2, 'r-', marker='*', ms=10, label='C18, LEG2')
plt.plot([0, 50], [0, 0], 'k--')
plt.legend(loc='lower left', fancybox=True, shadow=True)
plt.grid()
plt.xlabel('Wind speed [m/s]')
plt.ylabel('Mean surface elevation [m]')
plt.title('Mean elevation as function of wind speed')
plt.savefig('HIDRAG_elevation.png', dpi=100)
plt.close(fig)
slope_d04 = (LEG3 - LEG1) / dx_d04
slope_c18 = (h6 - h4) / dx_c18
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, xlim=(0, 25))
plt.plot(U[1:], slope_d04, 'b-', marker='o', ms=5, label='D04 dh/dx')
plt.plot(U[1:], dpdx, 'b-', marker='*', ms=8, label='D04 dp/dx')
plt.plot(U, slope_c18, 'r-', marker='o', ms=5, label='C18 dh/dx')
plt.plot(U, leg_slope, 'r-', marker='s', ms=5, label='C18 LEG dh/dx')
plt.plot(U[1:], dpdx_c18[1:], 'r-', marker='*', ms=8, label='C18 dp/dx')
plt.plot([0, 50], [0, 0], 'k--')
plt.legend(loc='upper left', fancybox=True, shadow=True)
plt.grid()
plt.xlabel('Wind speed [m/s]')
plt.ylabel('Elevation and pressure slope')
plt.title('Elevation and pressure slope vs wind speed')
plt.savefig('HIDRAG_slope.png', dpi=100)
plt.close(fig)
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, xlim=(0, 25))
plt.plot(U[1:], M1, 'b--', marker='o', ms=5, label='D04, #1, 4.6 m')
plt.plot(U[1:], M3, 'b-', marker='o', ms=5, label='D04, #2, 9.0 m')
plt.plot(U, Sxx4, 'r--', marker='o', ms=5, label='C18, #1, 6.0 m')
plt.plot(U, Sxx6, 'r-', marker='o', ms=5, label='C18, #2, 8.7 m')
plt.legend(loc='upper left', fancybox=True, shadow=True)
plt.grid()
plt.plot([0, 50], [0, 0], 'k--')
plt.xlabel('Wind speed [m/s]')
plt.ylabel(r'$S_{xx}$ [$kg/s^3$]')
plt.title('Radiation stress $S_{xx}$ vs wind speed')
plt.savefig('HIDRAG_Sxx.png', dpi=100)
plt.close(fig)
dSdx_d04 = (M3 - M1) / dx_d04
dSdx_c18 = (Sxx6 - Sxx4) / dx_c18
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111, xlim=(0, 25))
plt.plot(U[1:], dSdx_d04, 'b-', marker='o', ms=5, label='D04')
plt.plot(U, dSdx_c18, 'r-', marker='o', ms=5, label='C18')
plt.legend(loc='upper left', fancybox=True, shadow=True)
plt.grid()
plt.plot([0, 50], [0, 0], 'k--')
plt.xlabel('Wind speed [m/s]')
plt.ylabel(r'$dS_{xx}/dx$ [$N/m^2$]')
plt.title('Radiation stress gradient $dS_{xx}/dx$ vs wind speed')
plt.savefig('HIDRAG_dSdx.png', dpi=100)
plt.close(fig)
# Bottom stress from Brian
taub = rhow * | np.array([.0007, .0014, .0013, .0025, .0030, .0038, .0054, .0040, .0061, .01, .0052, 0.0046]) | numpy.array |
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017-2018 <NAME> and collaborators.
# Licensed under the MIT License.
"""Train and use artificial neural network approximations ("regressions") of
synchrotron radiative transfer coefficients as a function of various physical
input parameters.
The meat of the neural network code is in the ``impl`` module to avoid
importing Keras unless needed.
"""
from __future__ import absolute_import, division, print_function
__all__ = '''
AbsLogMapping
DirectMapping
DomainRange
LogMapping
LogitMapping
Mapping
NegLogMapping
NinthRootMapping
SampleData
SignMapping
basic_load
detrivialize_stokes_basis
mapping_from_dict
mapping_from_samples
'''.split()
from collections import OrderedDict
from six.moves import range
import numpy as np
import pandas as pd
from pwkit.io import Path
class Mapping(object):
"""An abstract base class for parameter transformations.
Mapping classes are used to translate the physical parameters fed into
*neurosynchro* into normalized values that are easier to work with
numerically. You will not normally need to use them directly.
"""
desc = None # set by subclasses
trainer = None
phys_bounds_mode = 'empirical'
"Blah blah"
normalization_mode = 'gaussian'
out_of_sample = 'ignore'
def __init__(self, name):
self.name = name
@classmethod
def from_info_and_samples(cls, info, phys_samples):
"""Create a new :class:`Mapping` from a dictionary of information and a set of samples.
**Call signature**
*info*
A dictionary of attributes, passed into :meth:`Mapping.from_dict`.
*phys_samples*
A 1D Numpy array of samples of this parameter, in no particular order.
Return value
A new instance of :class:`Mapping` (or one of its subclasses) with initialized
bounds parameters.
"""
inst = cls.from_dict(info, load_bounds=False)
valid = np.isfinite(phys_samples) & inst._is_valid(phys_samples)
n_rej = phys_samples.size - valid.sum()
print('%s: rejecting %d samples out of %d' % (inst.name, n_rej, phys_samples.size))
phys_samples = phys_samples[valid]
if phys_samples.size < 3:
raise Exception('not enough valid samples for %s' % inst.name)
if inst.phys_bounds_mode == 'empirical':
inst.p_min = phys_samples.min()
inst.p_max = phys_samples.max()
elif inst.phys_bounds_mode == 'theta':
inst.p_min = 0.
inst.p_max = 0.5 * np.pi
else:
raise ValueError('unrecognized phys_bounds_mode value %r for %s' %
(inst.phys_bounds_mode, inst.name))
# Pluggable "transform"
transformed = inst._to_xform(phys_samples)
if inst.normalization_mode == 'gaussian':
inst.x_mean = transformed.mean()
inst.x_std = transformed.std()
elif inst.normalization_mode == 'unit_interval':
# Maps the physical values to the unit interval [0, 1].
inst.x_mean = transformed.min()
inst.x_std = transformed.max() - inst.x_mean
if inst.x_std == 0:
inst.x_std = 1.
else:
raise ValueError('unrecognized normalization_mode value %r for %s' %
(inst.normalization_mode, inst.name))
# Normalize
normed = (transformed - inst.x_mean) / inst.x_std
inst.n_min = normed.min()
inst.n_max = normed.max()
return inst
def __repr__(self):
return '<Mapping %s %s mean=%r sd=%r>' % (self.name, self.desc, self.x_mean, self.x_std)
def phys_to_norm(self, phys):
"""Map "physical" parameters to normalized values
**Argument**
*phys*
An array of "physical" input values (see :ref:`transformations`).
**Return values**
This method returns a tuple ``(normalized, oos)``.
*normalized*
The normalized versions of the input data.
*oos*
An array of booleans of the same shape as the input data. True
values indicate inputs that were out of the sample that was used to
define the mapping.
"""
# note: using prefix ~ instead of np.logical_not fails for scalars
oos = np.logical_not((phys >= self.p_min) & (phys <= self.p_max)) # catches NaNs
any_oos = np.any(oos)
if any_oos:
if self.out_of_sample == 'ignore':
pass
elif self.out_of_sample == 'clip':
phys = np.clip(phys, self.p_min, self.p_max)
elif self.out_of_sample == 'nan':
phys = phys.copy()
phys[oos] = np.nan
else:
raise Exception('unrecognized out-of-sample behavior %r' % self.out_of_sample)
return (self._to_xform(phys) - self.x_mean) / self.x_std, any_oos
def norm_to_phys(self, norm):
"""Map "normalized" parameters to "physical" values
**Argument**
*norm*
An array of "normalized" input values (see :ref:`transformations`).
**Return values**
This method returns a tuple ``(phys, oos)``.
*phys*
The physical versions of the input data.
*oos*
An array of booleans of the same shape as the input data. True
values indicate inputs that were out of the sample that was used to
define the mapping.
"""
oos = np.logical_not((norm >= self.n_min) & (norm <= self.n_max)) # catches NaNs
any_oos = np.any(oos)
if any_oos:
if self.out_of_sample == 'ignore':
pass
elif self.out_of_sample == 'clip':
norm = np.clip(norm, self.n_min, self.n_max)
elif self.out_of_sample == 'nan':
norm = norm.copy()
norm[oos] = np.nan
else:
raise Exception('unrecognized out-of-sample behavior %r' % self.out_of_sample)
return self._from_xform(norm * self.x_std + self.x_mean), any_oos
def to_dict(self):
"""Serialize this :class:`Mapping` into an ordered dictionary."""
d = OrderedDict()
d['name'] = self.name
d['maptype'] = self.desc
if self.phys_bounds_mode is not None:
d['phys_bounds_mode'] = self.phys_bounds_mode
if self.normalization_mode is not None:
d['normalization_mode'] = self.normalization_mode
if self.trainer is not None:
d['trainer'] = self.trainer
if self.out_of_sample is not None:
d['out_of_sample'] = self.out_of_sample
d['x_mean'] = self.x_mean
d['x_std'] = self.x_std
d['phys_min'] = self.p_min
d['phys_max'] = self.p_max
d['norm_min'] = self.n_min
d['norm_max'] = self.n_max
return d
@classmethod
def from_dict(cls, info, load_bounds=True):
"""Deserialize an ordered dictionary into a new :class:`Mapping` instance.
**Call signature**
*info*
A dictionary of parameters, as generated by :meth:`Mapping.to_dict`.
*load_bounds* (default: :const:`True`)
If true, deserialize bounds information such as the maximum and minimum
observed physical values. If :const:`False`, these are left uninitialized.
Return value
A new :class:`Mapping` instance.
"""
if str(info['maptype']) != cls.desc:
raise ValueError('info is for maptype %s but this class is %s' % (info['maptype'], cls.desc))
inst = cls(str(info['name']))
if 'phys_bounds_mode' in info:
inst.phys_bounds_mode = info['phys_bounds_mode']
if 'normalization_mode' in info:
inst.normalization_mode = info['normalization_mode']
if 'trainer' in info:
inst.trainer = info['trainer']
if 'out_of_sample' in info:
inst.out_of_sample = info['out_of_sample']
if load_bounds:
inst.x_mean = float(info['x_mean'])
inst.x_std = float(info['x_std'])
inst.p_min = float(info['phys_min'])
inst.p_max = float(info['phys_max'])
inst.n_min = float(info['norm_min'])
inst.n_max = float(info['norm_max'])
return inst
class AbsLogMapping(Mapping):
desc = 'abs_log'
def _to_xform(self, p):
return np.log10(np.abs(p))
def _from_xform(self, x):
return 10**x # XXX not invertible!
def _is_valid(self, p):
return p != 0
class DirectMapping(Mapping):
desc = 'direct'
def _to_xform(self, p):
return p
def _from_xform(self, x):
return x
def _is_valid(self, p):
return np.ones(p.shape, dtype=np.bool)
class LogMapping(Mapping):
desc = 'log'
def _to_xform(self, p):
return np.log10(p)
def _from_xform(self, x):
return 10**x
def _is_valid(self, p):
return (p > 0)
class LogitMapping(Mapping):
desc = 'logit'
def _to_xform(self, p):
return np.log(p / (1. - p))
def _from_xform(self, x):
return np.exp(x) / (np.exp(x) + 1)
def _is_valid(self, p):
# Infinities are hard to deal with so we don't allow p = 0 or p = 1.
return (p > 0) & (p < 1)
class NegLogMapping(Mapping):
desc = 'neg_log'
def _to_xform(self, p):
return np.log10(-p)
def _from_xform(self, x):
return -(10**x)
def _is_valid(self, p):
return (p < 0)
class NinthRootMapping(Mapping):
desc = 'ninth_root'
def _to_xform(self, p):
return np.cbrt(np.cbrt(p))
def _from_xform(self, x):
return x**9
def _is_valid(self, p):
return np.ones(p.shape, dtype=np.bool)
class SignMapping(Mapping):
desc = 'sign'
def _to_xform(self, p):
return | np.sign(p) | numpy.sign |
import util
import numpy as np
import tensorflow as tf
from keras.utils.np_utils import *
import riemannian
from scipy import signal
import pyriemann
from pyriemann.utils.mean import mean_covariance
MOVEMENT_START = 1 * 160 # MI starts 1s after trial begin
MOVEMENT_END = 5 * 160 # MI lasts 4 seconds
NOISE_LEVEL = 0.01
clas = 4
fc = 160
aug = 40
ntrials = 84
def load_raw_data(electrodes, subject=None, num_classes=2, long_edge=False):
# load from file
trials = []
labels = []
if subject == None:
# subject_ids = range(1, 110)
subject_ids = range(1, 11)
else:
try:
subject_ids = [int(subject)]
except:
subject_ids = subject
for subject_id in subject_ids:
print("load subject %d" % (subject_id,))
t, l, loc, fs = util.load_physionet_data(subject_id, num_classes, long_edge=long_edge)
if num_classes == 2 and t.shape[0] != 42:
# drop subjects with less trials
continue
trials.append(t[:, :, electrodes])
labels.append(l)
return np.array(trials).reshape((len(trials),) + trials[0].shape), np.array(labels)
def split_idx( idx,a,b):
"""
Shuffle and split a list of indexes into training and test data with a fixed
random seed for reproducibility
run: index of the current split (zero based)
nruns: number of splits (> run)
idx: list of indices to split
"""
rs = np.random.RandomState()
rs.shuffle(idx)
start = int(a / 10. * len(idx))
end = int((b+a) / 10. * len(idx))
train_idx = idx[0:start]
test_idx = idx[start:end]
val_idx = idx[end:]
return train_idx, val_idx, test_idx
# return train_idx, test_idx
def n_classfilter(x,y,arg):
# x = np.squeeze(x)
signal = np.zeros((5,)+x.shape)
label = np.zeros((5,)+y.shape)
signal[0,:] = filter(x,0.5,4,arg)
label[0,:] = y
signal[1, :] = filter(x, 4, 8,arg)
label[1, :] = y
signal[2, :] = filter(x, 8, 13,arg)
label[2, :] = y
signal[3, :] = filter(x, 13, 32,arg)
label[3, :] = y
signal[4, :] = filter(x, 32, 50,arg)
label[4, :] = y
return signal,label
def filter(x,low_filter,high_filter, aru):
Wn = [low_filter*2/fc,high_filter*2/fc]
b, a = signal.butter(3, Wn, 'bandpass')
# x = x.transpose((0, 1, 2, 4, 3))
fdata = np.zeros(x.shape)
if aru:
for i in range(len(x)):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
for l in range(x.shape[4]):
fdata[i, j, k, :, l] = signal.filtfilt(b, a, x[i, j, k, :, l])
# fdata = fdata.transpose((0, 1, 2, 4, 3))
return fdata
else:
for i in range(len(x)):
for j in range(x.shape[1]):
for l in range(x.shape[3]):
fdata[i, j, :, l] = signal.filtfilt(b, a, x[i, j, :, l])
# fdata = fdata.transpose((0, 1, 2, 4, 3))
return fdata
def n_class_signal_mapping(x_train, y_train, x_val, y_val):
x1_train = np.zeros((x_train.shape[0:4] + (64,64, )))
y1_train = np.zeros((y_train.shape))
x1_val = np.zeros((x_val.shape[0:3] + (64, 64,)))
y1_val = np.zeros((y_val.shape))
for j in range(len(x_train)):
x1_train[j, :], y1_train[j, :], x1_val[j, :], y1_val[j, :] = signal_mapping(x_train[j], y_train[j], x_val[j], y_val[j])
print("yes")
x1_train = x1_train.transpose(1, 2, 3, 4, 5, 0)
x1_val = x1_val.transpose(1, 2, 3, 4, 0)
# y1_train = y1_train[0]
# y1_val = y1_val[0]
return x1_train, y1_train, x1_val, y1_val
def signal_mapping(x_train, y_train, x_val, y_val):
#训练集
signals1,core = Signals_Covariance(x_train,None,mean_all=True)
#测试集
signals2 = Signals_Covariance(x_val, core, mean_all=False)
# y_test = y_val.reshape((-1,))
# y_train = y_train.reshape((-1,))
return signals1,y_train,signals2,y_val
def Signals_Covariance(signals,core_test,mean_all=True):
if mean_all:
signal = signals.reshape((-1,) + (signals.shape[-2:]))
signal = np.transpose(signal, axes=[0, 2, 1])
x_out = pyriemann.estimation.Covariances().fit_transform(signal)
core = mean_covariance(x_out, metric='riemann')
# core = training_data_cov_means(X,y,num_classes=4)
core = core ** (-1 / 2)
signal1, core = signal_covar(signals, core, mean_all)
return signal1,core
else:
core = core_test ** (-1 / 2)
signal1 = signal_covar(signals, core, mean_all)
return signal1
#对输入的数组进行协方差,并返回同纬度结果[n,84,q,960,64] -> [n,84,q,64,960] -> [n,84,q,64,64]
# [n, 84, 960, 64] -> [n, 84, 64, 960] -> [n, 84, 64, 64]
def signal_covar(signal,core, mean_all):
if mean_all:
signal = np.transpose(signal, axes=[0, 1, 2, 4, 3])
signals = np.zeros((signal.shape[0:4])+(64,))
for i in range(len(signal)):
for j in range(signal.shape[1]):
signal1 = pyriemann.estimation.Covariances().fit_transform(signal[i, j, :])
signal2 = core * signal1 * core
signals[i, j, :] = np.log(signal2)
return signals, core
else:
signal = np.transpose(signal, axes=[0, 1, 3, 2])
signals = np.zeros((signal.shape[0:3]) + (64,))
for i in range(len(signal)):
signal1 = pyriemann.estimation.Covariances().fit_transform(signal[i, :])
signal2 = core * signal1 * core
signals[i, :] = np.log(signal2)
return signals
#训练集高斯增强,在对数据进行频段划分
def AugmentG(X, y, augmulitple,train_idx, val_idx, test_idx):
x_train, y_train = AddGussio(X[train_idx], y[train_idx], 0, 0.01, augmulitple=augmulitple)
# x_train, y_train = n_classfilter(x_train, y_train, arg = True)
x_val = X[val_idx]
# x_val, y_val = n_classfilter(X[val_idx], y[val_idx], arg = False)
y_val = y[val_idx]
# x_train = np.transpose(x_train, axes=[1, 2, 3, 4, 5, 0])
# y_train = np.transpose(y_train,axes=[1,2,3,0])
# x_val = np.transpose(x_val,axes=[1,2,3,4,0])
# y_val = np.transpose(y_val,axes=[1,2,0])
x_test = X[test_idx]
y_test = y[test_idx]
return x_train, y_train, x_val, y_val, x_test, y_test
def AugmentAver(X, y, augmulitple,train_idx, val_idx, test_idx):
x_train = X[train_idx]
y_train = y[train_idx]
x_val = X[val_idx]
y_val = y[val_idx]
x_test = X[test_idx]
y_test = y[test_idx]
xout = np.zeros((x_train.shape[:2]+(augmulitple,)+x_train.shape[-2:]))
yout = np.zeros((x_train.shape[:2]+(augmulitple,)))
for i in range(len(x_train)):
for j in range(x_train.shape[1]):
k = 0
while k < augmulitple:
subject = np.random.choice(len(x_train))
trial = np.random.randint(0, x_train.shape[1])
if y_train[i,j] == y_train[subject,trial]:
xout[i, j, k, :] = (x_train[i,j,:] + x_train[subject,trial,:])/2.
yout[i, j, k] = y_train[i, j]
k = k+1
return xout, yout, x_val, y_val, x_test, y_test
def AddGussio(x,y,sigam,mu,augmulitple):
# x = np.squeeze(x)
signal = np.zeros((len(x),) + (x.shape[1],) + (augmulitple,) + x.shape[-2:])
labels=np.zeros((len(x),)+((x.shape[1]),) + (augmulitple,))
for i in range(augmulitple):
# TODO:样本点进行增强(960, 64),加两个for循环
for j in range(len(x)):
for k in range(x.shape[1]):
x1 = np.random.normal(loc=sigam, scale=mu, size=(960,64))
# ix = i*int(x.shape[1]) +k
signal[j,k,i, :,:] = x[j,k,:,:] + x1
labels[j,k,i] = y[j,k]
return signal,labels
def crossval_gen(X, y, batch_size,nsample):
"""
Generator that produces training batches in an infinite loop by
randomly selecting them from the training data, normalizing them,
and adding a little noise
"""
while True:
# X = X.reshape((X.shape+(1,)))
Xout = np.zeros((batch_size,nsample,64))
yout = np.zeros((batch_size))
for i in range(batch_size):
# randomly choose subject and trial
subject = np.random.choice(len(X))
trial = np.random.randint(0, X.shape[1])
# Xout[i,:] = X[subject, trial, :,:]
augment = | np.random.randint(0, X.shape[2]) | numpy.random.randint |
import numpy as np
import torch
import warnings
import time
from PIL import Image
import random
def noise_matrix(mixing_ratio, num_classes, noise_type, class_to_idx):
if noise_type == 'unif':
return uniform_mix_C(mixing_ratio, num_classes)
elif noise_type == 'flip':
return flip_labels_C(mixing_ratio, num_classes)
elif noise_type == 'flip2':
return flip_labels_C_two(mixing_ratio, num_classes)
elif noise_type == 'asym':
return asym_noise(mixing_ratio, num_classes, class_to_idx)
else:
raise NotImplementedError
def asym_noise(mixing_ratio, num_classes, class_to_idx=None):
if num_classes == 10:
P = np.eye(10)
# automobile <- truck
P[9, 9], P[9, 1] = 1. - mixing_ratio, mixing_ratio
# bird -> airplane
P[2, 2], P[2, 0] = 1. - mixing_ratio, mixing_ratio
# cat <-> dog
P[3, 3], P[3, 5] = 1. - mixing_ratio, mixing_ratio
P[5, 5], P[5, 3] = 1. - mixing_ratio, mixing_ratio
# deer -> horse
P[4, 4], P[4, 7] = 1. - mixing_ratio, mixing_ratio
return P
else:
super_class = {}
super_class['aquatic mammals'] = [
'beaver', 'dolphin', 'otter', 'seal', 'whale']
super_class['fish'] = ['aquarium_fish',
'flatfish', 'ray', 'shark', 'trout']
super_class['flowers'] = [
'orchid', 'poppy', 'rose', 'sunflower', 'tulip']
super_class['food containers'] = [
'bottle', 'bowl', 'can', 'cup', 'plate']
super_class['fruit and vegetables'] = [
'apple', 'mushroom', 'orange', 'pear', 'sweet_pepper']
super_class['household electrical devices'] = [
'clock', 'keyboard', 'lamp', 'telephone', 'television']
super_class['household furniture'] = [
'bed', 'chair', 'couch', 'table', 'wardrobe']
super_class['insects'] = ['bee', 'beetle',
'butterfly', 'caterpillar', 'cockroach']
super_class['large carnivores'] = [
'bear', 'leopard', 'lion', 'tiger', 'wolf']
super_class['large man-made outdoor things'] = ['bridge',
'castle', 'house', 'road', 'skyscraper']
super_class['large natural outdoor scenes'] = [
'cloud', 'forest', 'mountain', 'plain', 'sea']
super_class['large omnivores and herbivores'] = [
'camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo']
super_class['medium mammals'] = [
'fox', 'porcupine', 'possum', 'raccoon', 'skunk']
super_class['non-insect invertebrates'] = ['crab',
'lobster', 'snail', 'spider', 'worm']
super_class['people'] = ['baby', 'boy', 'girl', 'man', 'woman']
super_class['reptiles'] = ['crocodile',
'dinosaur', 'lizard', 'snake', 'turtle']
super_class['small mammals'] = ['hamster',
'mouse', 'rabbit', 'shrew', 'squirrel']
super_class['trees'] = ['maple_tree', 'oak_tree',
'palm_tree', 'pine_tree', 'willow_tree']
super_class['vehicles 1'] = ['bicycle', 'bus',
'motorcycle', 'pickup_truck', 'train']
super_class['vehicles 2'] = ['lawn_mower',
'rocket', 'streetcar', 'tank', 'tractor']
pass
P = np.eye(100)
for k, v in super_class.items():
for idx in range(5):
src_class, tgt_class = class_to_idx[v[idx]], class_to_idx[v[(
idx+1) % 5]]
P[src_class, src_class], P[src_class, tgt_class] = 1. - \
mixing_ratio, mixing_ratio
return P
def uniform_mix_C(mixing_ratio, num_classes):
'''
returns a linear interpolation of a uniform matrix and an identity matrix
'''
return mixing_ratio * np.full((num_classes, num_classes), 1 / num_classes) + \
(1 - mixing_ratio) * np.eye(num_classes)
def flip_labels_C(corruption_prob, num_classes):
'''
returns a matrix with (1 - corruption_prob) on the diagonals, and corruption_prob
concentrated in only one other entry for each row
'''
# np.random.seed(seed)
C = np.eye(num_classes) * (1 - corruption_prob)
row_indices = np.arange(num_classes)
for i in range(num_classes):
C[i][ | np.random.choice(row_indices[row_indices != i]) | numpy.random.choice |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 10:45:47 2020
@author: ben05
"""
import numpy as np
from scipy import stats
import sys, os, h5py, glob, csv
import io, re
import pointCollection as pc
import importlib.resources
from netCDF4 import Dataset
import matplotlib.pyplot as plt
from ATL14_attrs_meta import write_atl14meta
#from ATL11.h5util import create_attribute
def ATL14_write2nc(args):
dz_dict ={'x':'x', # ATL14 varname : z0.h5 varname
'y':'y',
'h':'z0',
'h_sigma':'sigma_z0',
'cell_area':'cell_area',
'ice_mask':'mask',
'data_count':'count',
'misfit_rms':'misfit_rms',
'misfit_scaled_rms':'misfit_scaled_rms',
}
nctype = {'float64':'f8',
'float32':'f4',
'int8':'i1'}
# establish output file
fileout = args.base_dir.rstrip('/') + '/ATL14_' + args.region + '_' + args.cycles + '_100m_' + args.Release + '_' + args.version +'.nc'
print('output file:',fileout)
with Dataset(fileout,'w',clobber=True) as nc:
nc.setncattr('GDAL_AREA_OR_POINT','Area')
nc.setncattr('Conventions','CF-1.6')
if args.region in ['AK','CN','CS','GL','IC','SV','RU']:
crs_var = nc.createVariable('Polar_Stereographic',np.byte,())
crs_var.standard_name = 'Polar_Stereographic'
crs_var.grid_mapping_name = 'polar_stereographic'
crs_var.straight_vertical_longitude_from_pole = -45.0
crs_var.latitude_of_projection_origin = 90.0
crs_var.standard_parallel = 70.0
crs_var.scale_factor_at_projection_origin = 1.
crs_var.false_easting = 0.0
crs_var.false_northing = 0.0
crs_var.semi_major_axis = 6378.137
crs_var.semi_minor_axis = 6356.752
crs_var.inverse_flattening = 298.257223563
crs_var.spatial_epsg = '3413'
crs_var.spatial_ref = 'PROJCS["WGS 84 / NSIDC Sea Ice Polar Stereographic North",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",70],PARAMETER["central_meridian",-45],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3413"]]'
crs_var.crs_wkt = ('PROJCS["WGS 84 / NSIDC Sea Ice Polar Stereographic North",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",70],PARAMETER["central_meridian",-45],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["X",EAST],AXIS["Y",NORTH],AUTHORITY["EPSG","3413"]]')
elif args.region == 'AA':
crs_var = nc.createVariable('Polar_Stereographic',np.byte,())
crs_var.standard_name = 'Polar_Stereographic'
crs_var.grid_mapping_name = 'polar_stereographic'
crs_var.straight_vertical_longitude_from_pole = 0.0
crs_var.latitude_of_projection_origin = -90.0
crs_var.standard_parallel = -71.0
crs_var.scale_factor_at_projection_origin = 1.
crs_var.false_easting = 0.0
crs_var.false_northing = 0.0
crs_var.semi_major_axis = 6378.137
crs_var.semi_minor_axis = 6356.752
crs_var.inverse_flattening = 298.257223563
crs_var.spatial_epsg = '3031'
crs_var.spatial_ref = 'PROJCS["WGS 84 / Antarctic Polar Stereographic",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",-71],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","3031"]]'
crs_var.crs_wkt = ('PROJCS["WGS 84 / Antarctic Polar Stereographic",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]],PROJECTION["Polar_Stereographic"],PARAMETER["latitude_of_origin",-71],PARAMETER["central_meridian",0],PARAMETER["scale_factor",1],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AXIS["Easting",EAST],AXIS["Northing",NORTH],AUTHORITY["EPSG","3031"]]')
# make tile_stats group (ATBD 4.1.2.1, Table 3)
tilegrp = nc.createGroup('tile_stats')
tile_stats = {'x': { 'data': [], 'description':'tile-center x-coordinate, in projected coordinates', 'mapped':np.array(())},
'y': { 'data': [], 'description':'tile-center y-coordinate, in projected coordinates', 'mapped':np.array(())},
'N_data': { 'data': [], 'description':'number of data used in fit', 'mapped':np.array(())},
'RMS_data': { 'data': [], 'description':'root mean of squared, scaled data misfits', 'mapped':np.array(())},
'RMS_bias': { 'data': [], 'description':'root mean of squared, scaled bias values', 'mapped':np.array(())},
'N_bias': { 'data': [], 'description':'number of bias values solved for', 'mapped':np.array(())},
'RMS_d2z0dx2': { 'data': [], 'description':'root mean square of the constraint equation residuals for the second spatial derivative of z0', 'mapped':np.array(())},
'RMS_d2zdt2': { 'data': [], 'description':'root mean square of the constraint equation residuals for the second temporal derivative of dz', 'mapped':np.array(())},
'RMS_d2zdx2dt' : { 'data': [], 'description':'root mean square of the constraint equation residuals for the second temporal derivative of dz/dt', 'mapped':np.array(())}
}
# work through the tiles in all three subdirectories
for sub in ['centers','edges','corners']:
files = os.listdir(os.path.join(args.base_dir,sub))
for file in files:
try:
tile_stats['x']['data'].append(int(re.match(r'^.*E(.*)\_.*$',file).group(1)))
except Exception as e:
print(f"problem with [ {file} ], skipping")
continue
tile_stats['y']['data'].append(int(re.match(r'^.*N(.*)\..*$',file).group(1)))
with h5py.File(os.path.join(args.base_dir,sub,file),'r') as h5:
tile_stats['N_data']['data'].append( np.sum(h5['data']['three_sigma_edit'][:]) )
tile_stats['RMS_data']['data'].append( h5['RMS']['data'][()] ) # use () for getting a scalar.
tile_stats['RMS_bias']['data'].append( np.sqrt(np.mean((h5['bias']['val'][:]/h5['bias']['expected'][:])**2)) )
tile_stats['N_bias']['data'].append( len(h5['bias']['val'][:]) ) #### or all BUT the zeros.
tile_stats['RMS_d2z0dx2']['data'].append( h5['RMS']['grad2_z0'][()] )
tile_stats['RMS_d2zdt2']['data'].append( h5['RMS']['d2z_dt2'][()] )
tile_stats['RMS_d2zdx2dt']['data'].append( h5['RMS']['grad2_dzdt'][()] )
# establish output grids
for key in tile_stats.keys():
if 'x' == key or 'y' == key or 'N_data' == key: #integers
tile_stats[key]['mapped'] = np.zeros( [len(np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40)),
len(np.arange(np.min(tile_stats['x']['data']),np.max(tile_stats['x']['data'])+40,40))],
dtype=int)
else:
tile_stats[key]['mapped'] = np.zeros( [len(np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40)),
len(np.arange(np.min(tile_stats['x']['data']),np.max(tile_stats['x']['data'])+40,40))],
dtype=float)
# fill grids
for i, (yt,xt) in enumerate(zip(tile_stats['y']['data'],tile_stats['x']['data'])):
for key in tile_stats.keys():
# fact helps convert x,y in km to m
if 'x' not in key and 'y' not in key:
tile_stats[key]['mapped'][int((yt-np.min(tile_stats['y']['data']))/40),int((xt-np.min(tile_stats['x']['data']))/40)] = \
tile_stats[key]['data'][i]
tile_stats[key]['mapped'] = np.ma.masked_where(tile_stats[key]['mapped'] == 0, tile_stats[key]['mapped'])
# make dimensions, fill them as variables
tilegrp.createDimension('y',len(np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40)))
y = tilegrp.createVariable('y', np.dtype('int32'), ('y',))
y[:]=np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40) * 1000 # convert from km to meter
y.units = 'meter'
y.description = tile_stats['y']['description']
y.grid_mapping = 'Polar_Stereographic'
tilegrp.createDimension('x',len(np.arange(np.min(tile_stats['x']['data']),np.max(tile_stats['x']['data'])+40,40)))
x = tilegrp.createVariable('x', np.dtype('int32'), ('x',))
x[:]=np.arange( | np.min(tile_stats['x']['data']) | numpy.min |
import os
import numpy as np
import time
import matplotlib.pyplot as plt; plt.ion()
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import Planner
def tic():
return time.time()
def toc(tstart, nm=""):
print('%s took: %s sec.\n' % (nm,(time.time() - tstart)))
def load_map(fname):
'''
Loads the bounady and blocks from map file fname.
boundary = [['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b']]
blocks = [['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b'],
...,
['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b']]
'''
mapdata = np.loadtxt(fname,dtype={'names': ('type', 'xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b'),\
'formats': ('S8','f', 'f', 'f', 'f', 'f', 'f', 'f','f','f')})
blockIdx = mapdata['type'] == b'block'
boundary = mapdata[~blockIdx][['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b']].view('<f4').reshape(-1,11)[:,2:]
blocks = mapdata[blockIdx][['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax','r','g','b']].view('<f4').reshape(-1,11)[:,2:]
return boundary, blocks
def draw_map(boundary, blocks, start, goal):
'''
Visualization of a planning problem with environment boundary, obstacle blocks, and start and goal points
'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
hb = draw_block_list(ax,blocks)
hs = ax.plot(start[0:1],start[1:2],start[2:],'ro',markersize=7,markeredgecolor='k')
hg = ax.plot(goal[0:1],goal[1:2],goal[2:],'go',markersize=7,markeredgecolor='k')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_xlim(boundary[0,0],boundary[0,3])
ax.set_ylim(boundary[0,1],boundary[0,4])
ax.set_zlim(boundary[0,2],boundary[0,5])
return fig, ax, hb, hs, hg
def draw_block_list(ax,blocks):
'''
Subroutine used by draw_map() to display the environment blocks
'''
v = np.array([[0,0,0],[1,0,0],[1,1,0],[0,1,0],[0,0,1],[1,0,1],[1,1,1],[0,1,1]],dtype='float')
f = np.array([[0,1,5,4],[1,2,6,5],[2,3,7,6],[3,0,4,7],[0,1,2,3],[4,5,6,7]])
clr = blocks[:,6:]/255
n = blocks.shape[0] # n obstacles
d = blocks[:,3:6] - blocks[:,:3]
vl = np.zeros((8*n,3)) # 8 vertices
fl = np.zeros((6*n,4),dtype='int64') # 6 faces
fcl = np.zeros((6*n,3)) # face color
for k in range(n):
vl[k*8:(k+1)*8,:] = v * d[k] + blocks[k,:3]
fl[k*6:(k+1)*6,:] = f + k*8
fcl[k*6:(k+1)*6,:] = clr[k,:]
if type(ax) is Poly3DCollection:
ax.set_verts(vl[fl])
else:
pc = Poly3DCollection(vl[fl], alpha=0.25, linewidths=1, edgecolors='k')
pc.set_facecolor(fcl)
h = ax.add_collection3d(pc)
return h
def runtest(mapfile, start, goal, verbose, **kwargs):
'''
# res=1, eps=10, stopping_criteria="res"
This function:
* load the provided mapfile
* creates a motion planner
* plans a path from start to goal
* checks whether the path is collision free and reaches the goal
* computes the path length as a sum of the Euclidean norm of the path segments
'''
# Load a map and instantiate a motion planner
boundary, blocks = load_map(mapfile)
MP = Planner.AStarPlanner(boundary, blocks) # TODO: replace this with your own planner implementation
# Display the environment
if verbose:
fig, ax, hb, hs, hg = draw_map(boundary, blocks, start, goal)
# Call the motion planner
t0 = tic()
path, closed = MP.plan(start, goal, **kwargs)
toc(t0,f"Planning for {os.path.basename(mapfile).split('.')[0]}")
print(f"Expanded {len(closed)} nodes")
# Plot the path
if verbose:
# x,y,z = zip(*closed)
# ax.scatter(x,y,z)
ax.plot(path[:,0],path[:,1],path[:,2],'r-')
# TODO: You should verify whether the path actually intersects any of the obstacles in continuous space
# TODO: You can implement your own algorithm or use an existing library for segment and
# axis-aligned bounding box (AABB) intersection
collision = False
goal_reached = sum((path[-1]-goal)**2) <= 0.1
success = (not collision) and goal_reached
pathlength = np.sum(np.sqrt(np.sum(np.diff(path,axis=0)**2,axis=1)))
return success, pathlength
def test_single_cube(verbose = False):
print('Running single cube test...\n')
start = np.array([2.3, 2.3, 1.3])
goal = np.array([7.0, 7.0, 5.5])
success, pathlength = runtest('./maps/single_cube.txt', start, goal, verbose, res=1, eps=10)
print('Success: %r'%success)
print('Path length: %d'%pathlength)
print('\n')
def test_maze(verbose = False):
print('Running maze test...\n')
start = np.array([0.0, 0.0, 1.0])
goal = | np.array([12.0, 12.0, 5.0]) | numpy.array |
#!/usr/bin/env python
# In case of poor (Sh***y) commenting contact <EMAIL>
# Basic
import sys
# import os
# Testing
# import pdb
# import time, timeit
# import line_profiler
# Analysis
from scipy.integrate import dblquad, odeint
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import h5py
import yaml
# from math import *
# Speed
from numba import jit, njit
# Other importing
# sys.path.append(os.path.join(os.path.dirname(__file__), '[PATH]'))
from FP_helpers import spring_torque_ang
"""@package docstring
File: TB_ODE_solver.py
Author: <NAME>
Email: <EMAIL>
Description:
"""
@jit
def xlink_stretch_ang(s1, s2, phi):
"""!TODO: Docstring for xlink_stretch_ang.
@param s1: TODO
@param s2: TODO
@param phi: TODO
@return: TODO
"""
return np.sqrt((s1 * s1) + (s2 * s2) - (2. * s1 * s2 * | np.cos(phi) | numpy.cos |
import os
import numpy as np
import h5py
import random as rn
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms, utils
import librosa
import cv2
import albumentations as A
from torchvision import transforms
from scipy.spatial import procrustes
from scipy import signal
import scipy.ndimage.filters as fi
def gkern2(means, nsig=9):
"""Returns a 2D Gaussian kernel array."""
inp = np.zeros((128, 128))
if int(means[1]) > 127 or int(means[0]) > 127:
inp[92, 92] = 1
else:
inp[int(means[1]), int(means[0])] = 1
return fi.gaussian_filter(inp, nsig)
emotion_dict = {'ANG':0, 'DIS':1, 'FEA':2, 'HAP':3, 'NEU':4, 'SAD':5}
intensity_dict = {'XX':0, 'LO':1, 'MD':2, 'HI':3}
class DatasetContainer():
def __init__(self, args, val=False):
self.args = args
self.filelist = []
if not val:
path = self.args.in_path
else:
path = self.args.val_path
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if os.path.splitext(filename)[1] == '.hdf5':
labels = os.path.splitext(filename)[0].split('_')
emotion = emotion_dict[labels[2]]
emotion_intensity = intensity_dict[labels[3]]
if val:
if emotion_intensity != 3:
continue
self.filelist.append((root, filename, emotion, emotion_intensity))
self.filelist = np.array(self.filelist)
print('Num files: ', len(self.filelist))
def getDset(self):
return FaceDset(self.filelist, self.args)
class FaceDset(Dataset):
def __init__(self, filelist, args):
self.filelist = filelist
self.args = args
self.transform = transforms.Compose([transforms.ToTensor()])
target = {}
for i in range(1, self.args.num_frames):
target['image' + str(i)] = 'image'
self.augments = A.Compose([
A.RandomBrightnessContrast(p=0.2),
A.RandomGamma(p=0.2),
A.CLAHE(p=0.2),
A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=0.2),
A.ChannelShuffle(p=0.2),
A.RGBShift(p=0.2),
A.RandomBrightness(p=0.2),
A.RandomContrast(p=0.2),
# A.HorizontalFlip(p=0.5),
A.GaussNoise(var_limit=(10.0, 50.0), p=0.25)
], additional_targets=target, p=0.8)
self.c_augments = A.Compose([A.GaussNoise(p=1),
], p=0.5)
self.normTransform = A.Compose([
A.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), always_apply=True)
], additional_targets=target, p=1)
def __len__(self):
return len(self.filelist)
# def normFrame(self, frame):
# normTransform = self.normTransform(image=frame)
# frame = self.transform(normTransform['image'])
# return frame
def normFrame(self, frame):
normTransform = self.normTransform(image=frame)
frame = normTransform['image']
frame = np.moveaxis(frame, 2, 0)
return torch.from_numpy(frame)
def augmentVideo(self, video):
args = {}
args['image'] = video[0, :, :, :]
for i in range(1, self.args.num_frames):
args['image' + str(i)] = video[i, :, :, :]
result = self.augments(**args)
video[0, :, :, :] = result['image']
for i in range(1, self.args.num_frames):
video[i, :, :, :] = result['image' + str(i)]
return video
def __getitem__(self, idx):
filename = self.filelist[idx]
emotion = int(filename[2])
emotion = to_categorical(emotion, num_classes=6)
emotion_intensity = int(filename[3]) # We don't use this info
filename = filename[:2]
dset = h5py.File(os.path.join(*filename), 'r')
try:
idx = np.random.randint(dset['video'].shape[0]-self.args.num_frames, size=1)[0]
except:
return self.__getitem__(np.random.randint(len(self.filelist)-1, size=1)[0])
video = dset['video'][idx:idx+self.args.num_frames, :, :, :]
lmarks = dset['lmarks'][idx:idx+self.args.num_frames, 48:, :]
lmarks = np.mean(lmarks, axis=1)
video = self.augmentVideo(video)
att_list = []
video_normed = []
for i in range(video.shape[0]):
video_normed.append(self.normFrame(video[i, :, :, :]))
att = gkern2(lmarks[i, :])
att = att / np.max(att)
att_list.append(att)
video_normed = torch.stack(video_normed, 0)
att_list = np.array(att_list)
speech = dset['speech'][:]
speech = speech/np.max(np.abs(speech))
speech = speech[ int(idx*self.args.increment): int((idx+self.args.num_frames)*self.args.increment)]
speech = np.reshape(speech, (1, speech.shape[0]))
if speech.shape[1] != self.args.increment*self.args.num_frames:
return self.__getitem__(np.random.randint(len(self.filelist)-1, size=1)[0])
return speech, video_normed, att_list, emotion
def to_categorical(y, num_classes=None, dtype='float32'):
"""Converts a class vector (integers) to binary class matrix.
E.g. for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
dtype: The data type expected by the input, as a string
(`float32`, `float64`, `int32`...)
# Returns
A binary matrix representation of the input. The classes axis
is placed last.
# Example
```python
# Consider an array of 5 labels out of a set of 3 classes {0, 1, 2}:
> labels
array([0, 2, 1, 2, 0])
# `to_categorical` converts this into a matrix with as many
# columns as there are classes. The number of rows
# stays the same.
> to_categorical(labels)
array([[ 1., 0., 0.],
[ 0., 0., 1.],
[ 0., 1., 0.],
[ 0., 0., 1.],
[ 1., 0., 0.]], dtype=float32)
```
"""
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = | np.reshape(categorical, output_shape) | numpy.reshape |
#!/usr/bin/env python
# coding: utf-8
import os
import numpy as np
import random
import math
import pydicom
import pandas as pd
import shutil
import tensorflow as tf
import xml.etree.ElementTree as ET
from functools import partial, update_wrapper
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
os.environ['TF_ENABLE_AUTO_MIXED_PRECISION'] = '1'
# Retrieving blocks of a numpy array
from skimage.util import view_as_blocks
# Retrieving blocks of a numpy array with given stride sizes
from skimage.util.shape import view_as_windows
from random import randint
from tqdm import tqdm
from random import randint
import matplotlib.pyplot as plt
from keras.models import Model, load_model
from keras.layers import Input, Flatten, Dense, concatenate, Conv2D, Conv3D, MaxPooling2D, MaxPooling3D, \
Conv2DTranspose, Conv3DTranspose
from keras.layers import Activation, add, multiply, Lambda
from keras.layers import AveragePooling2D, AveragePooling3D, average, UpSampling2D, UpSampling3D, Dropout
from keras.optimizers import Adam, SGD, RMSprop
from keras.initializers import glorot_normal, random_normal, random_uniform
from keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from tensorflow.keras.callbacks import *
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.losses import binary_crossentropy
import numpy as np
from hyperopt import fmin, hp, tpe, Trials, space_eval
from hyperopt.pyll import scope as ho_scope
from hyperopt.pyll.stochastic import sample as ho_sample
class plaquetypes:
limits = pd.DataFrame(
[['DenseCalcium', 351, 10000], ['Fibrous', 131, 350], ['FibrousFatty', 76, 130], ['NecroticCore', -30, 75],
['NonCalcified', -1000, 350]], columns=['type', 'lower', 'upper'])
# ,['MedisDenseCalcium',351,10000],['MedisFibrous',151,350],['MedisFibrousFatty',31,150],['MedisNecroticCore',-100,30]
def wrapped_partial(func, *args, **kwargs):
partial_func = partial(func, *args, **kwargs)
update_wrapper(partial_func, func)
return partial_func
def get_pairs(vesel_list):
return_list = []
for ID in vesel_list:
if ID.endswith('NoContour.dcm'):
return_list.append([ID, ID[:-13] + "Contour1.dcm", ID[:-13] + "Contour2.dcm", ID[:8]])
return return_list
def find_included_segment(xml_file):
try:
doc = ET.parse(xml_file)
root = doc.getroot()
info = 0
included = False
while not (included):
info += 1
if (root[2][info][25].attrib['value'] == 'SEGMENT_TYPE_NORMAL'):
breakpoints = [root[2][info][24].attrib['value'], root[2][info][1].attrib['value']]
included = True
except:
doc = ET.parse(xml_file)
root = doc.getroot()
info = 0
included = False
while not (included):
info += 1
if (root[3][info][25].attrib['value'] == 'SEGMENT_TYPE_NORMAL'):
breakpoints = [root[3][info][24].attrib['value'], root[3][info][1].attrib['value']]
included = True
return breakpoints
def get_xml(basic_path2, dicom):
dicom = dicom.replace('_', '/')
dicom = dicom[:-13] + 'data_model.xml'
xml_file = os.path.join(basic_path2, dicom)
return xml_file
def get_image_mask_for_ID_tuple(ID_tuple, basic_path, basic_path2, dir_to_save, plaques_only):
image_path = os.path.join(basic_path, ID_tuple[0])
mask_1_path = os.path.join(basic_path, ID_tuple[1])
mask_2_path = os.path.join(basic_path, ID_tuple[2])
image = pydicom.dcmread(image_path)
mask_1 = pydicom.dcmread(mask_1_path)
mask_2 = pydicom.dcmread(mask_2_path)
if plaques_only==True:
xml_file = get_xml(basic_path2, ID_tuple[0])
breakpoints = np.array(find_included_segment(xml_file))
breakpoints = breakpoints.astype(float)
breakpoints = breakpoints / image.SpacingBetweenSlices
if 'Plakk' in dir_to_save:
image_array = (image.pixel_array)[int(breakpoints[0]):int(breakpoints[1]), :, :]
mask_1_array = (mask_1.pixel_array)[int(breakpoints[0]):int(breakpoints[1]), :, :]
mask_2_array = (mask_2.pixel_array)[int(breakpoints[0]):int(breakpoints[1]), :, :]
else:
image_array = (image.pixel_array)[int(breakpoints[0]):, :, :]
mask_1_array = (mask_1.pixel_array)[int(breakpoints[0]):, :, :]
mask_2_array = (mask_2.pixel_array)[int(breakpoints[0]):, :, :]
else:
image_array = (image.pixel_array)
mask_1_array = (mask_1.pixel_array)
mask_2_array = (mask_2.pixel_array)
return [image_array, mask_1_array, mask_2_array]
def apply_breakpoints(image_array, breakpoints, dir_to_save):
if 'Plakk' in dir_to_save:
image_array = (image_array)[int(breakpoints[0]):int(breakpoints[1]), :, :]
else:
image_array = (image_array)[int(breakpoints[0]):, :, :]
return image_array
def osszefuz(x):
x.append(x[0][:7])
x.append(x[0][16:18])
return x
def save_all_patch_for_image_mask_pair(ID_tuple,
dir_to_save,
patch_shape,
stride_size,
train_val_test,
basic_path,
basic_path2,
truncate=True,
plaques_only=False
):
"""Saves all 3 dimensional patches
Arguments
-----------
ID_tuple
dir_to_save : string
Folder to save the patches.
train_val_test : string
possible values: 'train', 'val', or 'test'.
Subfolders for dataset split.
Outputs
-----------
None
"""
image_array, mask_1, mask_2 = get_image_mask_for_ID_tuple(ID_tuple, basic_path, basic_path2, dir_to_save,plaques_only)
mask_1 = np.where(mask_1 > 0, 1, 0)
mask_2 = np.where(mask_2 > 0, 1, 0)
dif_array = np.where(mask_2 - mask_1 == 1, 1, 0)
# dif_array = get_all_plaques(dif_array)
# Count saved patches
total_patches_for_ID = 0
image_to_pad = image_array
nodule_to_pad = mask_1
lung_to_pad = mask_2
dif_to_pad = dif_array
# Order of the saved patch, appended to filename
patch_count = 0
# Shape of original images
size_X = image_to_pad.shape[2]
size_Y = image_to_pad.shape[1]
size_Z = image_to_pad.shape[0]
image_to_block = np.zeros((size_Z + patch_shape[2],
size_Y,
size_X))
image_to_block[:size_Z, :size_Y, :size_X] = image_to_pad
nodule_to_block = np.zeros((size_Z + patch_shape[2],
size_Y,
size_X))
nodule_to_block[:size_Z, :size_Y, :size_X] = nodule_to_pad
lung_to_block = np.zeros((size_Z + patch_shape[2],
size_Y,
size_X))
lung_to_block[:size_Z, :size_Y, :size_X] = lung_to_pad
dif_to_block = np.zeros((size_Z + patch_shape[2],
size_Y,
size_X))
dif_to_block[:size_Z, :size_Y, :size_X] = dif_to_pad
# patch_shape is originally in order XYZ, however for view as window we need it in ZYX
patch_shape_ZYX = [patch_shape[2], patch_shape[1], patch_shape[0]]
# Same as patch_shape
stride_size_ZYX = [stride_size[2], stride_size[1], stride_size[0]]
# Create blocks of the numpy arrays using view_as_blocks from skimage.util
image_patches = view_as_windows(image_to_block, window_shape=patch_shape_ZYX, step=stride_size_ZYX)
nodule_patches = view_as_windows(nodule_to_block, window_shape=patch_shape_ZYX, step=stride_size_ZYX)
lung_patches = view_as_windows(lung_to_block, window_shape=patch_shape_ZYX, step=stride_size_ZYX)
dif_patches = view_as_windows(dif_to_block, window_shape=patch_shape_ZYX, step=stride_size_ZYX)
# view_as_windows creates 6 dimensional numpy arrays:
# first 3 dimensions encode the position of the patch, last 3 dimensions encode patch shape.
# We will iterate through the number of patches
number_of_patches = image_patches.shape[0] * image_patches.shape[1] * image_patches.shape[2]
for counter in range(number_of_patches):
patch_coor_1 = int(counter // (image_patches.shape[1] * image_patches.shape[2]))
patch_coor_2 = int(((counter - patch_coor_1 * image_patches.shape[1] * image_patches.shape[2])
// image_patches.shape[2]))
patch_coor_3 = int(counter - patch_coor_1 * image_patches.shape[1] * image_patches.shape[2]
- patch_coor_2 * image_patches.shape[2])
image_patch = image_patches[patch_coor_1][patch_coor_2][patch_coor_3]
nodule_patch = nodule_patches[patch_coor_1][patch_coor_2][patch_coor_3]
lung_patch = lung_patches[patch_coor_1][patch_coor_2][patch_coor_3]
dif_patch = dif_patches[patch_coor_1][patch_coor_2][patch_coor_3]
if truncate == True:
# vedd ki a 16:48
image_patch = image_patch[:, 16:48, 16:48]
nodule_patch = nodule_patch[:, 16:48, 16:48]
lung_patch = lung_patch[:, 16:48, 16:48]
dif_patch = dif_patch[:, 16:48, 16:48]
if plaques_only and np.count_nonzero(dif_patch) > 0:
image_patch_file = os.path.join(dir_to_save, train_val_test, "images",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(image_patch_file, image_patch.astype(np.float32))
nodule_patch_file = os.path.join(dir_to_save, train_val_test, "masks_1",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(nodule_patch_file, nodule_patch.astype(np.uint8))
lung_patch_file = os.path.join(dir_to_save, train_val_test, "masks_2",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(lung_patch_file, lung_patch.astype(np.uint8))
plaque_patch_file = os.path.join(dir_to_save, train_val_test, "plaques",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(plaque_patch_file, dif_patch.astype(np.uint8))
patch_count += 1
total_patches_for_ID += 1
if plaques_only == False:
image_patch_file = os.path.join(dir_to_save, train_val_test, "images",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(image_patch_file, image_patch.astype(np.float32))
nodule_patch_file = os.path.join(dir_to_save, train_val_test, "masks_1",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(nodule_patch_file, nodule_patch.astype(np.uint8))
lung_patch_file = os.path.join(dir_to_save, train_val_test, "masks_2",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(lung_patch_file, lung_patch.astype(np.uint8))
plaque_patch_file = os.path.join(dir_to_save, train_val_test, "plaques",
ID_tuple[0][:-13] + str(patch_count) + '.npy')
np.save(plaque_patch_file, dif_patch.astype(np.uint8))
patch_count += 1
total_patches_for_ID += 1
def save_all_patch(ID_tuple_list,
dir_to_save,
patch_shape,
stride_size,
basic_path,
basic_path2,
truncate=True,
train_val_test_split=[0.8, 0.2, 0.0],
plaques_only=True,
val_patients=None,
test_patients=None):
# First delete the directory, where we would like to save the patches to avoid naming collisions
if os.path.exists(dir_to_save):
shutil.rmtree(dir_to_save)
# Create parent directory
os.mkdir(dir_to_save)
os.mkdir(os.path.join(dir_to_save, "file_logs"))
# Then create folders train, test, val containing images and masks folders.
train_dir, test_dir, val_dir = [os.path.join(dir_to_save, "train"),
os.path.join(dir_to_save, "test"),
os.path.join(dir_to_save, "val")]
# Create train_dir
os.mkdir(train_dir)
os.mkdir(os.path.join(train_dir, "images"))
os.mkdir(os.path.join(train_dir, "plaques"))
os.mkdir(os.path.join(train_dir, "masks_1"))
os.mkdir(os.path.join(train_dir, "masks_2"))
# Create test_dir
os.mkdir(test_dir)
os.mkdir(os.path.join(test_dir, "images"))
os.mkdir(os.path.join(test_dir, "plaques"))
os.mkdir(os.path.join(test_dir, "masks_1"))
os.mkdir(os.path.join(test_dir, "masks_2"))
# Create val_dir
os.mkdir(val_dir)
os.mkdir(os.path.join(val_dir, "images"))
os.mkdir(os.path.join(val_dir, "plaques"))
os.mkdir(os.path.join(val_dir, "masks_1"))
os.mkdir(os.path.join(val_dir, "masks_2"))
total_number_of_IDs = len(ID_tuple_list)
# Create thresholds for train-val-test split
number_of_IDs_train = int(train_val_test_split[0] * total_number_of_IDs)
number_of_IDs_val = int(train_val_test_split[1] * total_number_of_IDs)
number_of_IDs_test = int(train_val_test_split[2] * total_number_of_IDs)
patients = []
for counter, ID_tuple in tqdm(enumerate(ID_tuple_list)):
patients.append(ID_tuple[3])
patients = np.unique(patients)
random.seed(42)
if test_patients==None:
test_patients = random.sample(set(patients).difference(set(val_patients)),
int(len(patients) * train_val_test_split[2]))
if val_patients==None:
val_patients = random.sample(set(patients), int(len(patients) * train_val_test_split[1]))
# Save images to the corresponding subfolders using the functions above.
for counter, ID_tuple in tqdm(enumerate(ID_tuple_list)):
if ID_tuple[3].rstrip('_') in val_patients:
train_val_test = "val"
elif ID_tuple[3].rstrip('_') in test_patients:
train_val_test = "test"
else:
train_val_test = "train"
save_all_patch_for_image_mask_pair(ID_tuple,
patch_shape=patch_shape,
stride_size=stride_size,
truncate=truncate,
dir_to_save=dir_to_save,
train_val_test=train_val_test,
basic_path=basic_path,
basic_path2=basic_path2,
plaques_only=plaques_only)
return val_patients
epsilon = 1e-5
smooth = 1
def dsc(y_true, y_pred, args):
smooth = args.smooth
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred, args):
loss = 1 - dsc(y_true, y_pred, args)
return loss
def tp(y_true, y_pred, args):
smooth = args.smooth
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pos = K.round(K.clip(y_true, 0, 1))
tp = (K.sum(y_pos * y_pred_pos) + smooth) / (K.sum(y_pos) + smooth)
return tp
def tn(y_true, y_pred, args):
smooth = args.smooth
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tn = (K.sum(y_neg * y_pred_neg) + smooth) / (K.sum(y_neg) + smooth)
return tn
def fp(y_true, y_pred, args):
smooth = args.smooth
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
fp = (K.sum(y_neg * y_pred_pos) + smooth) / (K.sum(y_pred) + smooth)
return fp
def fn(y_true, y_pred, args):
smooth = args.smooth
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
fn = (K.sum(y_pos * y_pred_neg) + smooth) / (K.sum(y_pred_neg) + smooth)
return fn
def tversky(y_true, y_pred, args):
smooth = args.smooth
y_true_pos = K.flatten(y_true)
y_pred_pos = K.flatten(y_pred)
true_pos = K.sum(y_true_pos * y_pred_pos)
false_neg = K.sum(y_true_pos * (1 - y_pred_pos))
false_pos = K.sum((1 - y_true_pos) * y_pred_pos)
alpha = args.alpha
return (true_pos + smooth) / (true_pos + alpha * false_neg + (1 - alpha) * false_pos + smooth)
def tversky_loss(y_true, y_pred, args):
return 1 - tversky(y_true, y_pred, args)
def focal_tversky(y_true, y_pred, args):
gamma = args.gamma = 0.75
pt_1 = tversky(y_true, y_pred, args)
return K.pow((1 - pt_1), gamma)
def multiloss(y_true, y_pred, args):
if args.loss_type == 1:
return focal_tversky(y_true, y_pred, args)
else:
return dice_loss(y_true, y_pred, args)
# Visaulization functions
def gray_to_colored(im):
colored = np.repeat(np.expand_dims(im, axis=-1), 3, axis=-1).astype(float)
colored = 1 * (colored - np.amin(colored)) / (np.amax(colored) - np.amin(colored))
return colored
def superimpose_mask(image_array, mask_array, opacity=0.8):
superimposed = gray_to_colored(image_array)
reds = np.zeros(mask_array.shape + (3,)).astype(np.bool)
reds[:, :, 0] = mask_array == 1
superimposed[reds] = opacity * 1 + (1 - opacity) * superimposed[reds]
return superimposed
def visualize_slice_mask_pair(image_array, mask_1_array, mask_2_array, plaque_array, opacity=0.8, name=""):
ax, plots = plt.subplots(2, 4, figsize=(25, 10))
ax.suptitle(name)
plots[0, 0].axis('off')
plots[0, 0].imshow(mask_2_array - mask_1_array, cmap=plt.cm.bone)
plots[0, 1].axis('off')
plots[0, 1].imshow(mask_1_array, cmap=plt.cm.bone)
plots[0, 2].axis('off')
plots[0, 2].imshow(mask_2_array, cmap=plt.cm.bone)
plots[0, 3].axis('off')
plots[0, 3].imshow(plaque_array, cmap=plt.cm.bone)
plots[1, 0].axis('off')
plots[1, 0].imshow(superimpose_mask(image_array, mask_2_array - mask_1_array, opacity=opacity))
plots[1, 1].axis('off')
plots[1, 1].imshow(superimpose_mask(image_array, mask_1_array, opacity=opacity))
plots[1, 2].axis('off')
plots[1, 2].imshow(superimpose_mask(image_array, mask_2_array, opacity=opacity))
plots[1, 3].axis('off')
plots[1, 3].imshow(superimpose_mask(image_array, plaque_array, opacity=opacity))
plt.show()
def expand_as_3d(tensor, rep, name):
my_repeat = Lambda(lambda x, repnum: K.repeat_elements(x, repnum, axis=4), arguments={'repnum': rep},
name='psi_up' + name)(tensor)
return my_repeat
def AttnGatingBlock3D(x, g, inter_shape, name):
'''
Analogous implementation of the 3D attention gate used in the Attention U-Net 3D.
'''
shape_x = K.int_shape(x) # 32
shape_g = K.int_shape(g) # 16
theta_x = Conv3D(inter_shape, (2, 2, 2), strides=(2, 2, 2), padding='same', name='xl' + name)(x) # 16
shape_theta_x = K.int_shape(theta_x)
phi_g = Conv3D(inter_shape, (1, 1, 1), padding='same')(g)
upsample_g = Conv3DTranspose(inter_shape, (3, 3, 3), strides=(
shape_theta_x[1] // shape_g[1], shape_theta_x[2] // shape_g[2], shape_theta_x[3] // shape_g[3]), padding='same',
name='g_up' + name)(phi_g) # 16
concat_xg = add([upsample_g, theta_x])
act_xg = Activation('relu')(concat_xg)
psi = Conv3D(1, (1, 1, 1), padding='same', name='psi' + name)(act_xg)
sigmoid_xg = Activation('sigmoid')(psi)
shape_sigmoid = K.int_shape(sigmoid_xg)
upsample_psi = UpSampling3D(
size=(shape_x[1] // shape_sigmoid[1], shape_x[2] // shape_sigmoid[2], shape_x[3] // shape_sigmoid[3]))(
sigmoid_xg) # 32
upsample_psi = expand_as_3d(upsample_psi, shape_x[4], name)
y = multiply([upsample_psi, x], name='q_attn' + name)
result = Conv3D(shape_x[4], (1, 1, 1), padding='same', name='q_attn_conv' + name)(y)
result_bn = BatchNormalization(name='q_attn_bn' + name)(result)
return result_bn
def UnetConv3D(input, outdim, is_batchnorm, name):
'''
Analogous implementation of the pair of convolutional layers used by the U-Net 3D.
'''
x = Conv3D(outdim, (3, 3, 3), strides=(1, 1, 1), kernel_initializer="glorot_normal", padding="same",
name=name + '_1')(input)
if is_batchnorm:
x = BatchNormalization(name=name + '_1_bn')(x)
x = Activation('relu', name=name + '_1_act')(x)
x = Conv3D(outdim, (3, 3, 3), strides=(1, 1, 1), kernel_initializer="glorot_normal", padding="same",
name=name + '_2')(x)
if is_batchnorm:
x = BatchNormalization(name=name + '_2_bn')(x)
x = Activation('relu', name=name + '_2_act')(x)
return x
def UnetGatingSignal3D(input, is_batchnorm, name):
'''
Implementation of the gating signal appearing in the upsampling branch of the Attention U-Net 3D:
simply a 1x1 convolution followed by batch normalization and ReLU.
'''
shape = K.int_shape(input)
x = Conv3D(shape[4] * 1, (1, 1, 1), strides=(1, 1, 1), padding="same", kernel_initializer="glorot_normal",
name=name + '_conv')(input)
if is_batchnorm:
x = BatchNormalization(name=name + '_bn')(x)
x = Activation('relu', name=name + '_act')(x)
return x
def tiny_attn_unet3D(opt, input_size, args):
'''
Analogue of the above defined attn_unet3D with less layers, resulting in a smaller U shape.
'''
inputs = Input(shape=input_size)
conv1 = UnetConv3D(inputs, 8*args.kernel_power, is_batchnorm=True, name='conv1')
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
pool1 = Dropout(args.dropout)(pool1)
conv2 = UnetConv3D(pool1, 8*args.kernel_power, is_batchnorm=True, name='conv2')
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
pool2 = Dropout(args.dropout)(pool2)
center = UnetConv3D(pool2, 16*args.kernel_power, is_batchnorm=True, name='center')
g3 = UnetGatingSignal3D(center, is_batchnorm=True, name='g3')
attn3 = AttnGatingBlock3D(conv2, g3, 8*args.kernel_power, '_3')
up3 = concatenate([Conv3DTranspose(8*args.kernel_power, (3, 3, 3), strides=(2, 2, 2), padding='same', activation='relu',
kernel_initializer="glorot_normal")(center), attn3], name='up3')
up4 = concatenate([Conv3DTranspose(8*args.kernel_power, (3, 3, 3), strides=(2, 2, 2), padding='same', activation='relu',
kernel_initializer="glorot_normal")(up3), conv1], name='up4')
mask_1 = Conv3D(1, (1, 1, 1), activation='sigmoid', name='mask_1')(up4)
mask_2 = Conv3D(1, (1, 1, 1), activation='sigmoid', name='mask_2')(up4)
dif = Conv3D(1, (1, 1, 1), activation='sigmoid', name='dif')(up4)
model = Model(inputs=[inputs], outputs=[mask_1, mask_2, dif])
model.compile(optimizer=opt,
loss=[wrapped_partial(dice_loss, args=args), wrapped_partial(dice_loss, args=args),
wrapped_partial(multiloss, args=args)],
loss_weights=[0.1, 0.1, 0.8],
metrics=[[wrapped_partial(dsc, args=args)], [wrapped_partial(dsc, args=args)],
[wrapped_partial(dsc, args=args), wrapped_partial(tp, args=args),
wrapped_partial(tn, args=args)]])
return model
def full_ct_model_evaluation(image, model, z_stride, which_prediction):
# Shape of original images
size_X = image.shape[2]
size_Y = image.shape[1]
size_Z = image.shape[0]
image_paded = np.zeros((size_Z + 24,
size_Y,
size_X))
image_paded[:size_Z, :size_Y, :size_X] = image / 512
prediction_array = np.zeros((size_Z + 24,
size_Y,
size_X))
coverage_array = np.zeros((size_Z + 24,
size_Y,
size_X))
# Containers for batch predictions
patch_boundaries_list = []
counter = 0
# Stride along Z axis:
for z_start in range(0, prediction_array.shape[2], z_stride):
z_end = z_start + 24
if (np.count_nonzero(image[z_start:z_end, :, :]) > 1):
patch_boundaries_list.append([z_start, z_end])
for patch_index in range(0, len(patch_boundaries_list)):
# patch_boundaries in current batch
temporal_boundaries = patch_boundaries_list[patch_index]
temp_patches = []
# Extracting patches for prediction
current_patch = image_paded[temporal_boundaries[0]:temporal_boundaries[1],
16:48,
16:48]
current_patch = np.expand_dims(current_patch, axis=0)
# Updating prediction_array and coverage_array
predicted_patch = model.predict(np.expand_dims(current_patch, axis=-1))
# 0 belső maszk 1 külső maszk 2 differencia
prediction = predicted_patch[which_prediction]
prediction = np.reshape(prediction, [24, 32, 32])
prediction_array[temporal_boundaries[0]:temporal_boundaries[1],
16:48,
16:48] += prediction
# print(prediction_array[32, 32, 32])
coverage_array[temporal_boundaries[0]:temporal_boundaries[1],
:,
:] += 1
coverage_array = np.maximum(coverage_array, np.ones(coverage_array.shape))
# Taking the average prediction value for the pixels
prediction_array = np.divide(prediction_array, coverage_array)
# print(prediction_array[32,32,32])
# Removing the prediction values outside of the CT
prediction_array = prediction_array[0:size_Z, 0:size_Y, 0:size_X]
# The average prediction value is continuous between 0 and 1,
# so for the segmentation we have to threshold it
prediction_array = (prediction_array > 1 / 2) * 1
return prediction_array
def mask_from_dicom(contour_file_name):
ds = pydicom.dcmread(contour_file_name)
pixels = np.array(ds.pixel_array)
pixels = | np.where(pixels > 0, 1, 0) | numpy.where |
from yolo_top import yolov3
import numpy as np
import tensorflow as tf
from config import cfg, ckpt_dir, testset, result_dir, score_threshold
from PIL import Image, ImageDraw, ImageFont
from draw_boxes import draw_boxes
import cv2
import matplotlib.pyplot as plt
import os
import glob
import xml.etree.cElementTree as ET
# IMG_ID ='008957'
repo_dir = str(os.getcwd())
os.chdir(testset)
image_ids = os.listdir('.')
image_ids = glob.glob(str(image_ids) + '*.' + cfg.image_format)
os.chdir(repo_dir)
def indent(elem, level=0):
i = "\n" + level * " "
j = "\n" + (level - 1) * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
def draw_xml(image, boxes, box_classes, class_names, scores=None, image_id=''):
image = Image.fromarray(np.floor(image * 255 + 0.5).astype('uint8'))
root = ET.Element("annotation")
folder = ET.SubElement(root, "folder")
folder.text = ''
filename = ET.SubElement(root, "filename")
filename.text = str(image_id)
source = ET.SubElement(root, "source")
database = ET.SubElement(source, "database")
database.text = 'ILSVRC_2014'
size = ET.SubElement(root, "size")
width = ET.SubElement(size, "width")
width.text = str(image.size[0])
height = ET.SubElement(size, "height")
height.text = str(image.size[1])
depth = ET.SubElement(size, "depth")
depth.text = '3'
ET.SubElement(root, "segmented").text = '0'
for box, cls_idx in zip(boxes, box_classes):
obj = ET.SubElement(root, "object")
ET.SubElement(obj, "name").text = str(class_names[cls_idx])
ET.SubElement(obj, "pose").text = 'Unspecified'
ET.SubElement(obj, "truncated").text = '0'
ET.SubElement(obj, "difficult").text = '0'
bndbox = ET.SubElement(obj, "bndbox")
top, left, bottom, right = box
top = max(0, | np.floor(top + 0.5) | numpy.floor |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 05 14:05:24 2013
Aug 15 2020: add brunnermunzel, rank_compare_2indep
Author: <NAME>
"""
from statsmodels.compat.python import lzip
import numpy as np
from numpy.testing import (assert_allclose, assert_almost_equal,
assert_approx_equal, assert_)
from scipy import stats
import pytest
from statsmodels.stats.contingency_tables import (
mcnemar, cochrans_q, SquareTable)
from statsmodels.sandbox.stats.runs import (Runs,
runstest_1samp, runstest_2samp)
from statsmodels.sandbox.stats.runs import mcnemar as sbmcnemar
from statsmodels.stats.nonparametric import (
rank_compare_2indep, rank_compare_2ordinal, prob_larger_continuous,
cohensd2problarger)
from statsmodels.tools.testing import Holder
def _expand_table(table):
'''expand a 2 by 2 contingency table to observations
'''
return np.repeat([[1, 1], [1, 0], [0, 1], [0, 0]], table.ravel(), axis=0)
def test_mcnemar_exact():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
f_obs4 = np.array([[101, 30], [60, 33]])
f_obs5 = np.array([[101, 10], [30, 33]])
f_obs6 = np.array([[101, 10], [10, 33]])
#vassar college online computation
res1 = 0.000004
res2 = 0.378688
res3 = 0.089452
res4 = 0.00206
res5 = 0.002221
res6 = 1.
stat = mcnemar(f_obs1, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res1], decimal=6)
stat = mcnemar(f_obs2, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res2], decimal=6)
stat = mcnemar(f_obs3, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [59, res3], decimal=6)
stat = mcnemar(f_obs4, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [30, res4], decimal=6)
stat = mcnemar(f_obs5, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res5], decimal=6)
stat = mcnemar(f_obs6, exact=True)
assert_almost_equal([stat.statistic, stat.pvalue], [10, res6], decimal=6)
def test_mcnemar_chisquare():
f_obs1 = np.array([[101, 121], [59, 33]])
f_obs2 = np.array([[101, 70], [59, 33]])
f_obs3 = np.array([[101, 80], [59, 33]])
#> mcn = mcnemar.test(matrix(c(101, 121, 59, 33),nrow=2))
res1 = [2.067222e01, 5.450095e-06]
res2 = [0.7751938, 0.3786151]
res3 = [2.87769784, 0.08981434]
stat = mcnemar(f_obs1, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res1, rtol=1e-6)
stat = mcnemar(f_obs2, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res2, rtol=1e-6)
stat = mcnemar(f_obs3, exact=False)
assert_allclose([stat.statistic, stat.pvalue], res3, rtol=1e-6)
# test correction = False
res1 = [2.135556e01, 3.815136e-06]
res2 = [0.9379845, 0.3327967]
res3 = [3.17266187, 0.07488031]
res = mcnemar(f_obs1, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res1, rtol=1e-6)
res = mcnemar(f_obs2, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res2, rtol=1e-6)
res = mcnemar(f_obs3, exact=False, correction=False)
assert_allclose([res.statistic, res.pvalue], res3, rtol=1e-6)
def test_mcnemar_vectorized(reset_randomstate):
ttk = np.random.randint(5,15, size=(2,2,3))
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=False)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False) for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=False, correction=False)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=False, correction=False)
for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
with pytest.deprecated_call():
res = sbmcnemar(ttk, exact=True)
with pytest.deprecated_call():
res1 = lzip(*[sbmcnemar(ttk[:, :, i], exact=True) for i in range(3)])
assert_allclose(res, res1, rtol=1e-13)
def test_symmetry_bowker():
table = np.array([0, 3, 4, 4, 2, 4, 1, 2, 4, 3, 5, 3, 0, 0, 2, 2, 3, 0, 0,
1, 5, 5, 5, 5, 5]).reshape(5, 5)
res = SquareTable(table, shift_zeros=False).symmetry()
mcnemar5_1 = dict(statistic=7.001587, pvalue=0.7252951, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_1['statistic'], mcnemar5_1['pvalue']],
rtol=1e-7)
res = SquareTable(1 + table, shift_zeros=False).symmetry()
mcnemar5_1b = dict(statistic=5.355988, pvalue=0.8661652, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_1b['statistic'], mcnemar5_1b['pvalue']],
rtol=1e-7)
table = np.array([2, 2, 3, 6, 2, 3, 4, 3, 6, 6, 6, 7, 1, 9, 6, 7, 1, 1, 9,
8, 0, 1, 8, 9, 4]).reshape(5, 5)
res = SquareTable(table, shift_zeros=False).symmetry()
mcnemar5_2 = dict(statistic=18.76432, pvalue=0.04336035, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_2['statistic'], mcnemar5_2['pvalue']],
rtol=1.5e-7)
res = SquareTable(1 + table, shift_zeros=False).symmetry()
mcnemar5_2b = dict(statistic=14.55256, pvalue=0.1492461, parameters=(10,),
distr='chi2')
assert_allclose([res.statistic, res.pvalue],
[mcnemar5_2b['statistic'], mcnemar5_2b['pvalue']],
rtol=1e-7)
def test_cochransq():
#example from dataplot docs, Conovover p. 253
#http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/cochran.htm
x = np.array([[1, 1, 1],
[1, 1, 1],
[0, 1, 0],
[1, 1, 0],
[0, 0, 0],
[1, 1, 1],
[1, 1, 1],
[1, 1, 0],
[0, 0, 1],
[0, 1, 0],
[1, 1, 1],
[1, 1, 1]])
res_qstat = 2.8
res_pvalue = 0.246597
res = cochrans_q(x)
assert_almost_equal([res.statistic, res.pvalue], [res_qstat, res_pvalue])
#equivalence of mcnemar and cochranq for 2 samples
a,b = x[:,:2].T
res = cochrans_q(x[:, :2])
with pytest.deprecated_call():
assert_almost_equal(sbmcnemar(a, b, exact=False, correction=False),
[res.statistic, res.pvalue])
def test_cochransq2():
# from an example found on web, verifies 13.286
data = np.array('''
0 0 0 1
0 0 0 1
0 0 0 1
1 1 1 1
1 0 0 1
0 1 0 1
1 0 0 1
0 0 0 1
0 1 0 0
0 0 0 0
1 0 0 1
0 0 1 1'''.split(), int).reshape(-1, 4)
res = cochrans_q(data)
assert_allclose([res.statistic, res.pvalue], [13.2857143, 0.00405776], rtol=1e-6)
def test_cochransq3():
# another example compared to SAS
# in frequency weight format
dt = [('A', 'S1'), ('B', 'S1'), ('C', 'S1'), ('count', int)]
dta = np.array([('F', 'F', 'F', 6),
('U', 'F', 'F', 2),
('F', 'F', 'U', 16),
('U', 'F', 'U', 4),
('F', 'U', 'F', 2),
('U', 'U', 'F', 6),
('F', 'U', 'U', 4),
('U', 'U', 'U', 6)], dt)
cases = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 0],
[1, 1, 0],
[0, 1, 1],
[1, 1, 1]])
count = np.array([ 6, 2, 16, 4, 2, 6, 4, 6])
data = np.repeat(cases, count, 0)
res = cochrans_q(data)
assert_allclose([res.statistic, res.pvalue], [8.4706, 0.0145], atol=5e-5)
def test_runstest(reset_randomstate):
#comparison numbers from R, tseries, runs.test
#currently only 2-sided used
x = np.array([1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1])
z_twosided = 1.386750
pvalue_twosided = 0.1655179
z_greater = 1.386750
pvalue_greater = 0.08275893
z_less = 1.386750
pvalue_less = 0.917241
#print Runs(x).runs_test(correction=False)
assert_almost_equal(np.array(Runs(x).runs_test(correction=False)),
[z_twosided, pvalue_twosided], decimal=6)
# compare with runstest_1samp which should have same indicator
assert_almost_equal(runstest_1samp(x, correction=False),
[z_twosided, pvalue_twosided], decimal=6)
x2 = x - 0.5 + np.random.uniform(-0.1, 0.1, size=len(x))
assert_almost_equal(runstest_1samp(x2, cutoff=0, correction=False),
[z_twosided, pvalue_twosided], decimal=6)
assert_almost_equal(runstest_1samp(x2, cutoff='mean', correction=False),
[z_twosided, pvalue_twosided], decimal=6)
assert_almost_equal(runstest_1samp(x2, cutoff=x2.mean(), correction=False),
[z_twosided, pvalue_twosided], decimal=6)
# check median
assert_almost_equal(runstest_1samp(x2, cutoff='median', correction=False),
runstest_1samp(x2, cutoff=np.median(x2), correction=False),
decimal=6)
def test_runstest_2sample():
# regression test, checked with MonteCarlo and looks reasonable
x = [31.8, 32.8, 39.2, 36, 30, 34.5, 37.4]
y = [35.5, 27.6, 21.3, 24.8, 36.7, 30]
y[-1] += 1e-6 #avoid tie that creates warning
groups = np.concatenate((np.zeros(len(x)), np.ones(len(y))))
res = runstest_2samp(x, y)
res1 = (0.022428065200812752, 0.98210649318649212)
assert_allclose(res, res1, rtol=1e-6)
# check as stacked array
res2 = runstest_2samp(x, y)
assert_allclose(res2, res, rtol=1e-6)
xy = np.concatenate((x, y))
res_1s = runstest_1samp(xy)
assert_allclose(res_1s, res1, rtol=1e-6)
# check cutoff
res2_1s = runstest_1samp(xy, xy.mean())
assert_allclose(res2_1s, res_1s, rtol=1e-6)
def test_brunnermunzel_one_sided():
# copied from scipy with adjustment
x = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
# revere direction to match our definition
x, y = y, x
# Results are compared with R's lawstat package.
u1, p1 = rank_compare_2indep(x, y
).test_prob_superior(alternative='smaller')
u2, p2 = rank_compare_2indep(y, x
).test_prob_superior(alternative='larger')
u3, p3 = rank_compare_2indep(x, y
).test_prob_superior(alternative='larger')
u4, p4 = rank_compare_2indep(y, x
).test_prob_superior(alternative='smaller')
assert_approx_equal(p1, p2, significant=significant)
assert_approx_equal(p3, p4, significant=significant)
assert_(p1 != p3)
assert_approx_equal(u1, 3.1374674823029505,
significant=significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=significant)
assert_approx_equal(u3, 3.1374674823029505,
significant=significant)
assert_approx_equal(u4, -3.1374674823029505,
significant=significant)
# Note: scipy and lawstat tail is reversed compared to test statistic
assert_approx_equal(p3, 0.0028931043330757342,
significant=significant)
assert_approx_equal(p1, 0.99710689566692423,
significant=significant)
def test_brunnermunzel_two_sided():
# copied from scipy with adjustment
x = [1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4, 1, 1]
y = [3, 3, 4, 3, 1, 2, 3, 1, 1, 5, 4]
significant = 13
# revere direction to match our definition
x, y = y, x
# Results are compared with R's lawstat package.
res1 = rank_compare_2indep(x, y)
u1, p1 = res1
t1 = res1.test_prob_superior(alternative='two-sided')
res2 = rank_compare_2indep(y, x)
u2, p2 = res2
t2 = res2.test_prob_superior(alternative='two-sided')
assert_approx_equal(p1, p2, significant=significant)
assert_approx_equal(u1, 3.1374674823029505,
significant=significant)
assert_approx_equal(u2, -3.1374674823029505,
significant=significant)
assert_approx_equal(p2, 0.0057862086661515377,
significant=significant)
assert_allclose(t1[0], u1, rtol=1e-13)
assert_allclose(t2[0], u2, rtol=1e-13)
assert_allclose(t1[1], p1, rtol=1e-13)
assert_allclose(t2[1], p2, rtol=1e-13)
def test_rank_compare_2indep1():
# Example from Munzel and Hauschke 2003
# data is given by counts, expand to observations
levels = [-2, -1, 0, 1, 2]
new = [24, 37, 21, 19, 6]
active = [11, 51, 22, 21, 7]
x1 = np.repeat(levels, new)
x2 = np.repeat(levels, active)
# using lawstat
# > brunner.munzel.test(xn, xa) #brunnermunzel.test(x, y)
res2_t = Holder(statistic=1.1757561456582,
df=204.2984239868,
pvalue=0.2410606649547,
ci=[0.4700629827705593, 0.6183882855872511],
prob=0.5442256341789052)
res = rank_compare_2indep(x1, x2, use_t=False)
assert_allclose(res.statistic, -res2_t.statistic, rtol=1e-13)
assert_allclose(res.prob1, 1 - res2_t.prob, rtol=1e-13)
assert_allclose(res.prob2, res2_t.prob, rtol=1e-13)
tt = res.test_prob_superior()
# TODO: return HolderTuple
# assert_allclose(tt.statistic, res2_t.statistic)
# TODO: check sign/direction in lawstat
assert_allclose(tt[0], -res2_t.statistic, rtol=1e-13)
ci = res.conf_int(alpha=0.05)
# we compare normal confint with t confint, lower rtol
assert_allclose(ci, 1 - | np.array(res2_t.ci) | numpy.array |
from datetime import timedelta as delta
from os import path
from glob import glob
import numpy as np
import dask
import math
import xarray as xr
from netCDF4 import Dataset
import warnings
import matplotlib.pyplot as plt
warnings.simplefilter('ignore', category=xr.SerializationWarning)
from parcels import AdvectionRK4
from parcels import Field
from parcels import FieldSet
from parcels import JITParticle
from parcels import ParticleFile
from parcels import ParticleSet
from parcels import Variable
#input
wstokes = False #False || True
data_in_waves = "/projects/0/topios/hydrodynamic_data"
data_in_mit = "/home/sypmauu/GalapagosParcels/input/MIT4km"
data_out = "/home/sypmauu/GalapagosParcels/output"
filename_out = "AgridRelease_200727"
galapagos_domain = [-94, -87, -3.5, 3]
seeding_distance = 1 #unit: lon/lat degree
seeding_resolution = 4 #unit: gridpoints
seeding_frequency = 5 #unit: days
advection_duration = 90 #unit: days
output_frequency = 6 #unit: hours
length_simulation = 4*365 #unit: days
#Get indices for Galapagos domain to run simulation
def getclosest_ij(lats,lons,latpt,lonpt):
"""Function to find the index of the closest point to a certain lon/lat value."""
dist_lat = (lats-latpt)**2 # find squared distance of every point on grid
dist_lon = (lons-lonpt)**2
minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element
minindex_lon = dist_lon.argmin()
return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index
dfile = Dataset(data_in_mit+'/RGEMS3_Surf_grid.nc')
lon = dfile.variables['XC'][:]
lat = dfile.variables['YC'][:]
depth = dfile.variables['Depth'][:]
iy_min, ix_min = getclosest_ij(lat, lon, galapagos_domain[2], galapagos_domain[0])
iy_max, ix_max = getclosest_ij(lat, lon, galapagos_domain[3], galapagos_domain[1])
#MITgcm field
varfiles = sorted(glob(data_in_mit + "/RGEMS_20*.nc"))
meshfile = glob(data_in_mit+"/RGEMS3_Surf_grid.nc")
files_MITgcm = {'U': {'lon': meshfile, 'lat': meshfile, 'data': varfiles},
'V': {'lon': meshfile, 'lat': meshfile, 'data': varfiles}}
variables_MITgcm = {'U': 'UVEL', 'V': 'VVEL'}
dimensions_MITgcm = {'lon': 'XC', 'lat': 'YC', 'time': 'time'}
indices_MITgcm = {'lon': range(ix_min,ix_max), 'lat': range(iy_min,iy_max)}
fieldset_MITgcm = FieldSet.from_netcdf(files_MITgcm,
variables_MITgcm,
dimensions_MITgcm,
indices = indices_MITgcm)
#Stokes Field
if wstokes:
files_stokes = sorted(glob(data_in_waves + "/WaveWatch3data/CFSR/WW3-GLOB-30M_20[08-12]*_uss.nc"))
variables_stokes = {'U': 'uuss',
'V': 'vuss'}
dimensions_stokes = {'U': {'lon': 'longitude', 'lat': 'latitude', 'time': 'time'},
'V': {'lon': 'longitude', 'lat': 'latitude', 'time': 'time'}}
indices_stokes = {'lon': range(120, 220), 'lat': range(142, 170)}
fieldset_stokes = FieldSet.from_netcdf(files_stokes,
variables_stokes,
dimensions_stokes,
indices=indices_stokes)
fieldset_stokes.add_periodic_halo(zonal=True, meridional=False, halosize=5)
fieldset = FieldSet(U=fieldset_MITgcm.U + fieldset_stokes.U,
V=fieldset_MITgcm.V + fieldset_stokes.V)
fname = path.join(data_out, filename_out + "_wstokes.nc")
else:
fieldset = fieldset_MITgcm
fname = path.join(data_out, filename_out + ".nc")
fU=fieldset_MITgcm.U
# get all lon, lat that are land
fieldset_MITgcm.computeTimeChunk(fU.grid.time[0], 1)
lon = np.array(fU.lon[:])
lat = np.array(fU.lat[:])
LandMask = fU.data[0,:,:]
LandMask = | np.array(LandMask) | numpy.array |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
import plotly
import plotly.plotly as py
import plotly.graph_objs as go
plotly.tools.set_credentials_file(username='mickisgay', api_key='FaiGQJE3E8ldYHgn5tC6')
fp = open('sphere.txt', 'r')
data = fp.readlines()
x = []
y = []
z = []
for line in data[1:]:
t = line.split(',')
x.append(t[0])
y.append(t[1])
z.append(t[2])
X = | np.array(x) | numpy.array |
import numpy as np
from numpy.random import default_rng # 랜덤 뽑기, 중복 없이 뽑기
import random # 확률적으로 랜덤 뽑기
import sys
answer = sys.argv[1]
low = 65
high = 123
dawkins = np.array(
[ord(x) for x in answer if x != ' ']
)
space_position = [x[0] for x in enumerate(answer) if x[1] == ' ']
def chromosomes():
return np.random.randint(
low=low, high=high, size=len(dawkins)
)
def init_population(r: int):
return np.array([
chromosomes() for _ in range(r)
])
def diff(x: np.ndarray):
return np.subtract(dawkins, x)
def std(dif: np.ndarray, r=4):
return round(np.std(dif), r)
def fitness(population: np.ndarray):
return np.array([std(diff(x)) for x in population])
def description(label: str, min_loss: float, master_string: str):
print('--------------------------------------------')
print(label, 'min loss = ', min_loss)
print(label, 'master = ', master_string)
print('--------------------------------------------')
def ranking_selection(population: np.ndarray, fargs: np.ndarray, n: int):
return population[fargs][:n]
def sex(s1: np.ndarray, s2: np.ndarray, mw: int = 99):
p = np.zeros(len(s1), int)
d1 = diff(s1)
d2 = diff(s2)
for i in range(len(s1)):
[m] = random.choices(range(0, 2), weights=[mw, 1])
if m == 0: # 변이가 발생하지 않음
p[i] = s1[i] if abs(d1[i]) < abs(d2[i]) else s2[i]
else: # 변이 발생
p[i] = np.random.randint(low, high)
return p
def crossover(selection: np.ndarray, r: int, mw: int = 99):
high = len(selection)
offspring = np.empty((0, len(selection[0])), int)
while r > 0:
rng = default_rng()
x, y = rng.choice(high, size=2, replace=False)
child = sex(selection[x], selection[y], mw)
offspring = np.vstack((offspring, child))
r -= 1
return offspring
def ascii_to_str(gene: np.ndarray):
a = list(map(chr, gene))
for s in space_position:
a.insert(s, ' ')
return ''.join(a)
gen = 1 # 세대
n = 1000 # 인구수
m = 100 # 선택 수
mw = 99 # 변이 가중치 (mw:1)
population = init_population(n) # 인구 초기화
while True:
fit = fitness(population) # 적합도 계산
fargs = | np.argsort(fit) | numpy.argsort |
# Script for training the ContrastSourceNet
# Author: <NAME>
# A FEW NOTES ABOUT THE CODE TO UNDERSTAND IT BETTER
# The network takes into input the contrast source row-space components and outputs (or tries to output) the true contrast source.
# As a result it has estimated the null-space components, and denoised the row-space components
# CONTRAST SOURCE VECTORS AND HOW IT IS FED INTO THE NETWORK
# PyTorch doesn't have any functionality for complex numbers,
# so each contrast source vector for a view is reshaped into an image with a single channel and then split into 2 channels, one for real and other imaginary parts
# So whenever the contrast source is fed into the network, you can see the following code
# CSImage = util_functions.convert_w_to_CSImage(w)
# This CSImage has dimensions: [2V X L X L] and the network takes the input into the form: [BATCH_SIZE X 2V X L X L]
# So if only a single contrast source is fed into the input you need to exapnd the first dimension as follows:
# CSImage_input = np.expand_dims(CSImage, axis=0)
# This line will change dimension from [2V X L X L] to [1 X 2V X L X L]
import sys
sys.path.insert(0, './utility')
sys.path.insert(0, './SOM_CSI')
import numpy as np
import setup_functions
import util_functions
import generate_shapes
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torch.optim as optim
import torchvision.transforms as transforms
from ContrastSourceNet import ContrastSourceNet_16_MultiScale_2
# Setting up parameters for Inverse Scattering
wavelength = 0.75
k = 2*np.pi/wavelength # Wavevector
d = 2 # Size of imaging domain (in SI Units)
L = 16
n = d/L
R = 4
M = 32
V = 16
pos_D, pos_S, pos_Tx = setup_functions.gen_pos_D(d,L,n), setup_functions.gen_pos_S(R, M, d), setup_functions.gen_pos_Tx(R*1.5, V, d)
e = setup_functions.gen_e(k, pos_D, pos_Tx)
G_D, G_S = util_functions.construct_G_D(pos_D, k, n), util_functions.construct_G_S(pos_D, pos_S, k, n)
# Hyperparameters for network training
BATCH_SIZE = 40
BATCH_SIZE_TEST = 400
RESTART = True # Set to True if you want to retrain the network from scratch
MAX_EPOCH = 50
max_contrast = 7.0
min_contrast = 1.0
LEARN_RATE = 1e-4
SNR = 25
sing_values = 19 # Determined from Morozov's principle. Should be modified for a different SNR value
# Initializing a network
cs_net = ContrastSourceNet_16_MultiScale_2(V)
MODEL_L16_FILE = './best_models_yet/ContrastSourceNet_noisydata_25SNR_L16_8.pth'
if not RESTART:
cs_net.load_state_dict(torch.load(MODEL_L16_FILE))
# Loading the test and train dataset
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0,0),(1.0, 1.0))])
trainset = torchvision.datasets.MNIST(root='./data', train = True, download = True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=1)
testset = torchvision.datasets.MNIST(root='./data', train = False, download = True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE_TEST, shuffle=False, num_workers=1)
# Obtain a batch from the test dataset
for i, data in enumerate(testloader):
batch_test, _ = data
batch_test_numpy = batch_test.numpy()
break
# SVD of G_S matrix
U1, s1, V1h = np.linalg.svd(G_S, full_matrices=False)
S1 = np.diag(s1)
V1 = V1h.conj().T
# Training code
# Loss function used: Mean Squared Error between the true and predicted CS, with each true CS L2 norm in the denominator
# for normalization
loss = nn.MSELoss()
def loss_fn(pred_cs, true_cs):
true_cs_flat = true_cs.view(-1,2*V*L*L)
pred_cs_flat = pred_cs.view(-1,2*V*L*L)
return loss(pred_cs_flat, true_cs_flat)
# Optimizer to be used: Stochastic Gradient Descent with Momentum
optimizer = optim.Adam(cs_net.parameters(),lr = LEARN_RATE)
# Defining placeholders for input of the network
CSImage_input = np.empty((BATCH_SIZE,2*V,L,L),dtype=np.float32)
loss_list = [] # Keep track of the training loss, used in deciding whether or not to decrease the learning rate
# Training begins here!
iteration = 0
loss_avg = np.inf
cs_net.train()
for epoch in range(MAX_EPOCH):
if epoch == 40:
LEARN_RATE *= 0.2
optimizer = optim.Adam(cs_net.parameters(),lr = LEARN_RATE)
for i, data in enumerate(trainloader):
# Obtain a batch from the train dataset
batch, _ = data
batch_numpy = batch.numpy()
# Calculate the true contrast source and measurement data from the batch
CSImage_true, Y = util_functions.convert_batch_to_CSImageandY(batch_numpy, L, G_S, G_D, e, max_contrast, min_contrast, True)
# Convert the true and input contrast source into real images for it to be made ready for the network
# Nothing fancy
for idx in range(BATCH_SIZE):
Y[idx,:,:] = util_functions.add_noise(Y[idx,:,:], SNR)
CSImage_input[idx,:,:,:] = util_functions.convert_w_to_CSImage(util_functions.SOM_Stage_I(U1,S1,V1,Y[idx,:,:],sing_values))
# Set all the gradients equal to zero
optimizer.zero_grad()
# Forward pass the batch to network
CSImage_output = cs_net(torch.Tensor(CSImage_input))
# Loss function calculation
loss_value = loss_fn(CSImage_output,torch.Tensor(CSImage_true))
# Calculate gradients for all network parameters
loss_value.backward()
# Perform gradient update steps
optimizer.step()
loss_list.append(loss_value)
if np.mod(iteration,10) == 0:
print('Iteration: %d, Loss: %.5f'%(iteration,loss_value))
if | np.mod(iteration,200) | numpy.mod |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 4 13:01:02 2016
@author: rob
"""
import numpy as np
from sklearn.metrics import confusion_matrix
acc_weights = np.array([0.964, 0, 0.958, 0, 0.964, 0])
test_weights = np.array([0.964, 0.958, 0.964])
#Load the assemble dataset
data_ass = np.genfromtxt('assembling_data.csv',delimiter = ',',skip_header=1)
y_ass = data_ass[:,1]
"""Save all the individual logits in a dictionary"""
# Note that every load-statement has different settings with respect to starting
# columns and rows. Adapt this to your need
logits = {}
logits_test = {}
logits['logits_1'] = np.genfromtxt('logits_nn_1hidden6apr.csv')
logits['logits_2'] = np.genfromtxt('Probabilities_Classes_LassoR1.csv',delimiter = ',',skip_header=1)[:,1:]
logits['logits_3'] = np.genfromtxt('predicted_probabilities_random_forest.csv',delimiter = ',',skip_header=1)[:,1:]
logits['logits_4'] = np.genfromtxt('predicted_probabilities_boosting.csv',delimiter = ',',skip_header=1)[:,1:]
logits['logits_5'] = np.genfromtxt('predicted_probabilities_bagging.csv',delimiter = ',',skip_header=1)[:,1:]
logits['logits_6'] = np.genfromtxt('Probabilities_Classes_RidgeR.csv',delimiter = ',',skip_header=1)[:,1:]
logits_test['logits_1'] = np.genfromtxt('logits_nn_1hidden_test8april.csv')
logits_test['logits_2'] = np.genfromtxt('random_forest_prediction_test2.csv',delimiter = ',',skip_header=1)[:,1:]
logits_test['logits_3'] = np.genfromtxt('bagging_probabilities_modelingTest.csv',delimiter = ',',skip_header=1)[:,1:]
Ntest = logits_test['logits_1'].shape[0]
#Expected sizes
D = 7
N = 3000
#Check these expected sizes
assert logits['logits_1'].shape == (N,D), 'Wrong size of logits_1'
assert logits['logits_2'].shape == (N,D), 'Wrong size of logits_2'
assert logits['logits_3'].shape == (N,D), 'Wrong size of logits_3'
assert logits['logits_4'].shape == (N,D), 'Wrong size of logits_4'
assert logits['logits_5'].shape == (N,D), 'Wrong size of logits_5'
#Perform weighted sum over individual logits
logits_weighted_sum = np.zeros((N,D))
for n in xrange(len(acc_weights)):
logits_weighted_sum += acc_weights[n]*logits['logits_'+str(n+1)]
logits_weighted_sum /= np.sum(acc_weights)
#Perform weighted sum over individual logits over testset
logits_test_sum = np.zeros((Ntest,D))
for n in xrange(len(test_weights)):
logits_test_sum += test_weights[n]*logits_test['logits_'+str(n+1)]
logits_test_sum /= np.sum(test_weights)
#Make predictions
pred = {}
acc = {}
conf = {}
ytrue = np.expand_dims(y_ass,axis=1)
for n in xrange(len(acc_weights)):
logits_n = logits['logits_'+str(n+1)]
pp = np.argmax(logits_n,axis=1)
pred['classifier_'+str(n+1)] = pp
ypp = np.expand_dims(pp,axis=1)
print('Confusion matrix for classifier %s'%(n+1))
print(confusion_matrix(ytrue,ypp))
#Save the accuracy for later printing
acc['classifier_'+str(n+1)] = np.mean(ytrue==ypp)
#Calculate the average confidence at the falsely classified samples
ind_false = np.where(ytrue!=ypp)
ind_false = ind_false[0]
class_false = np.squeeze(ytrue[ind_false]).astype(int)
conf_false = logits_n[ind_false,class_false]
conf['classifier_'+str(n+1)] = np.mean(conf_false)
#Print the accuracies
for n in xrange(len(acc_weights)):
print('Accuracy for classifier %s is %.3f'%(n+1,acc['classifier_'+str(n+1)]))
#Print the confidences
for n in xrange(len(acc_weights)):
print('Average confidence at misclassified samples for classifier %s is %.3f'%(n+1,conf['classifier_'+str(n+1)]))
#Check if the weighted sum makes sense
assert np.linalg.norm(np.sum(logits_weighted_sum,axis=1)-1) < 0.001,'The weighted sum seems not to result in a probability distribution'
ensemble_pred = np.argmax(logits_weighted_sum,axis=1)
ensemble_pred = np.expand_dims(ensemble_pred,axis=1)
acc_ens = np.mean(ensemble_pred == ytrue)
assert len(ensemble_pred) == N, 'Something in the sizes of argmax faulted'
print('Ensemble accuracy is %.3f'%(acc_ens))
#Make predictions on the testset
test_pred = | np.argmax(logits_test_sum,axis=1) | numpy.argmax |
from __future__ import print_function
import numpy as np
from scipy import sparse
from scipy.interpolate import griddata
def fast_histogram2d(x, y, bins=10, weights=None, reduce_w=None, NULL=None,
reinterp=None):
"""
Compute the sparse bi-dimensional histogram of two data samples where *x*,
and *y* are 1-D sequences of the same length. If *weights* is None
(default), this is a histogram of the number of occurences of the
observations at (x[i], y[i]).
If *weights* is specified, it specifies values at the coordinate (x[i],
y[i]). These values are accumulated for each bin and then reduced according
to *reduce_w* function, which defaults to numpy's sum function (np.sum).
(If *weights* is specified, it must also be a 1-D sequence of the same
length as *x* and *y*.)
Parameters
------
x: ndarray[ndim=1]
first data sample coordinates
y: ndarray[ndim=1]
second data sample coordinates
bins: int or [int, int]
int, the number of bins for the two dimensions (nx=ny=bins)
or [int, int], the number of bins in each dimension (nx, ny = bins)
weights: ndarray[ndim=1]
values *w_i* weighing each sample *(x_i, y_i)*
accumulated and reduced (using reduced_w) per bin
reduce_w: callable
function that will reduce the *weights* values accumulated per bin
defaults to numpy's sum function (np.sum)
NULL: value type
filling missing data value
reinterp: str in {‘linear’, ‘nearest’, ‘cubic’}, optional
Method of interpolation.
if set, reinterpolation is made using scipy.interpolate.griddata to
fill missing data within the convex polygone that encloses the data
Returns
-------
B: ndarray[ndim=2]
bi-dimensional histogram
extent: tuple(4)
(xmin, xmax, ymin, ymax) extension of the histogram
steps: tuple(2)
(dx, dy) bin size in x and y direction
"""
# define the bins (do anything you want here but needs edges and sizes of
# the 2d bins)
try:
nx, ny = bins
except TypeError:
nx = ny = bins
# values you want to be reported
if weights is None:
weights = np.ones(x.size)
if reduce_w is None:
reduce_w = np.sum
else:
if not hasattr(reduce_w, '__call__'):
raise TypeError('reduce function is not callable')
# culling nans
finite_inds = (np.isfinite(x) & np.isfinite(y) & np.isfinite(weights))
_x = np.asarray(x)[finite_inds]
_y = np.asarray(y)[finite_inds]
_w = | np.asarray(weights) | numpy.asarray |
import cv2
import numpy as np
import IPython
def _cvt_2uint8(array, clip=True):
output = np.clip(array, 0, 255) if clip else array
output = output.astype('uint8')
return output
def jupyter_img_show(img):
_, i_img = cv2.imencode('.png', img)
IPython.display.display(IPython.display.Image(data=i_img))
"""
********************************
Intensity transformation
********************************
"""
def gamma_correction(img, gamma, c=1):
"""
Apply gamma correction on input image.
s = 255 * c * (r / 255) ^ gamma
Args:
img: input image array
gamma: the gamma value of gamma value, which is between 0 and 1.
c: the constant of gamma_correction, which is between 0 and 1.
Returns:
a transformed image array.
"""
trans_img = 255 * (c * (img / 255) ** gamma)
# clip
trans_img = _cvt_2uint8(trans_img)
return trans_img
def histogram_equalization(img) :
"""
Apply histogram equalization on input image.
s = (L - 1) \sum_{j=0}^{k} p_{j}(r_{j})
Args:
img: input image array
Returns:
a transformed image array.
"""
# tuning dimension
is_grayscale = len(img.shape) == 2
img_3dim = np.expand_dims(img, axis=2) if is_grayscale else img
# count
count = np.zeros((256, img_3dim.shape[2]), dtype='int32')
for c in range(img_3dim.shape[2]):
for x in range(img_3dim.shape[0]):
for y in range(img_3dim.shape[1]):
count[img_3dim[x][y][c]][c] += 1
# Build lookup table
lookup_table = _cvt_2uint8(255 * np.cumsum(count, axis=0) / (img_3dim.shape[0] * img_3dim.shape[1]), False)
# apply transform
trans_img_3dim = np.zeros(img_3dim.shape, dtype='float32')
for x in range(img_3dim.shape[0]):
for y in range(img_3dim.shape[1]):
for c in range(img_3dim.shape[2]):
trans_img_3dim[x][y][c] = lookup_table[img_3dim[x][y][c]][c]
# tuning dimension
trans_img = np.squeeze(trans_img_3dim, axis=2) if is_grayscale else trans_img_3dim
# clip
trans_img = _cvt_2uint8(trans_img)
return trans_img
def piecewise_linear_transformation(img, funcs, break_points):
"""
Apply piecewise linear transformation on input image.
The following conditions should be satisfied
1. each function is an increasing linear function
2. len(funcs) - len(break_points) = 1
3. for each element b in break_points, 0 < b < 255
4. 2 neighbor function must have same value at their common break point.
Args:
img: input image array
funcs: a list of functions those are used on transformation
break_points: a list of break point.
Returns:
a transformed image array.
"""
def binary_search(array, target):
start = 0
end = len(array)
while end - start > 1:
mid = (start + end) // 2
if array[mid] == target:
return mid
elif array[mid] > target:
end = mid
else:
start = mid
return start
# tuning dimension
is_grayscale = len(img.shape) == 2
img_3dim = np.expand_dims(img, axis=2) if is_grayscale else img
# apply transformation
trans_img_3dim = np.zeros(img_3dim.shape, dtype='float32')
for x in range(trans_img_3dim.shape[0]):
for y in range(trans_img_3dim.shape[1]):
for c in range(trans_img_3dim.shape[2]):
func = funcs[binary_search([0] + break_points, img_3dim[x][y][c])]
trans_img_3dim[x][y][c] = func(img_3dim[x][y][c])
# tuning dimension
trans_img = np.squeeze(trans_img_3dim, axis=2) if is_grayscale else trans_img_3dim
# clip
trans_img = _cvt_2uint8(trans_img)
return trans_img
def negative_transformation(img):
"""
Apply negative transformation on input image.
Args:
img: input image array
Returns:
a transformed image array.
"""
trans_img = 255 - img
return trans_img
"""
**************
filter
**************
"""
def custom_filter(img, is_order_statistics, mode='constant', clip=True, **kwargs):
"""
Apply custom filter on input image.
Args:
img: input image array
is_order_statistics: whether this filter is order statistics filter
mode: str or function, optional (used for numpy.pad)
clip: whether clip value so that the range of value is between 0 and 255
kwargs:
kernel: custom filter whose shape should be (odd integer, odd integer), only used when is_order_statistics=False
k_size: a tuple which contains kernel sizes, only used when is_order_statistics=True
filtering: a function which acts on each filtering step, only used when is_order_statistics=True
Returns:
a filtered image array.
"""
# tuning dimension
is_grayscale = len(img.shape) == 2
img_3dim = np.expand_dims(img, axis=2) if is_grayscale else img
# create output skeleton
filter_img_3dim = np.zeros(img_3dim.shape, dtype='float32')
# create kwargs for np.pad
pad_kwargs = dict()
for kwarg in ['stat_lengthsequence', 'constant_valuessequence', 'end_valuessequence', 'reflect_type']:
if kwarg in kwargs:
pad_kwargs[kwarg] = pad_kwargs[kwarg]
if is_order_statistics:
# extract parameters
k_size = kwargs['k_size']
if isinstance(k_size, int):
k_dim_0 = k_size
k_dim_1 = k_size
else:
k_dim_0 = k_size[0]
k_dim_1 = k_size[1]
filtering = kwargs['filtering']
# pad img
pad_dim_0, pad_dim_1 = k_dim_0//2, k_dim_1//2
pad_img = np.pad(img_3dim, ((pad_dim_0, pad_dim_0), (pad_dim_1, pad_dim_1), (0, 0)), mode, **pad_kwargs)
# apply filter
for c in range(filter_img_3dim.shape[2]):
for x in range(filter_img_3dim.shape[0]):
for y in range(filter_img_3dim.shape[1]):
sliding_window = pad_img[x:x+k_dim_0, y:y+k_dim_1, c]
filter_img_3dim[x][y][c] = filtering(sliding_window)
else:
# extract parameters
kernel = kwargs['kernel']
# pad img
pad_dim_0, pad_dim_1 = kernel.shape[0]//2, kernel.shape[1]//2
pad_img = np.pad(img_3dim, ((pad_dim_0, pad_dim_0), (pad_dim_1, pad_dim_1), (0, 0)), mode, **pad_kwargs)
# apply filter
for c in range(filter_img_3dim.shape[2]):
for x in range(filter_img_3dim.shape[0]):
for y in range(filter_img_3dim.shape[1]):
sliding_window = pad_img[x:x+kernel.shape[0], y:y+kernel.shape[1], c]
filter_img_3dim[x][y][c] = np.sum(sliding_window * kernel)
# tuning dimension
filter_img = np.squeeze(filter_img_3dim, axis=2) if is_grayscale else filter_img_3dim
# clip
filter_img = _cvt_2uint8(filter_img) if clip else filter_img
return filter_img
def laplacian_filter(img, add_original=True, mode='constant', **kwargs):
"""
Apply Laplacian filter on input image.
Args:
img: input image array
add_original: whether adding original image after applying laplacian filter
p_mode: str or function, optional (used for numpy.pad)
kwargs:
constant_values: Used in 'constant'. The values to set the padded values for each axis.
Returns:
a filtered image array.
"""
# create kernel
kernel = np.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]])
# apply filter
kwargs['kernel'] = kernel
if add_original:
filter_img = img + custom_filter(img, False, mode, False, **kwargs)
filter_img = _cvt_2uint8(filter_img)
else:
filter_img = custom_filter(img, False, mode, **kwargs)
return filter_img
def gaussian_filter(img, k_size, sigma=1, mode='constant', **kwargs):
"""
Apply gaussian filter on input image.
Args:
img: input image array
k_size: kernel size, must be an odd integer
sigma: variance of gaussian distribution
mode: str or function, optional (used for numpy.pad)
kwargs:
constant_values: Used in 'constant'. The values to set the padded values for each axis.
Returns:
a filtered image array.
"""
# create kernel
start, end = -1 * (k_size // 2), k_size // 2 + 1
x, y = np.mgrid[start:end, start:end]
kernel = np.exp((x**2 + y**2)/(-2 * sigma**2)) / (2 * np.pi * sigma**2)
# apply filter
kwargs['kernel'] = kernel
filter_img = custom_filter(img, False, mode, **kwargs)
return filter_img
def median_filter(img, k_size, mode='constant', **kwargs):
"""
Apply median filter on input image.
Args:
img: input image array
k_size: kernel size tuple, must be odd integers
mode: str or function, optional (used for numpy.pad)
kwargs:
constant_values: Used in 'constant'. The values to set the padded values for each axis.
Returns:
a filtered image array.
"""
# define single step filtering
def filtering(sliding_window):
return | np.median(sliding_window) | numpy.median |
import numpy as np
from copy import deepcopy
# assuming J = 1, kB = 1
def Mag(lat):
return lat.sum()
def MagDelta(cluster, updown):
return -2*updown*len(cluster)
def EDelta(lat, L, x, y):
return 2*lat[x][y]*(lat[(x-1)%L][y]+ lat[(x+1)%L][y]+ lat[x][(y-1)%L]+ lat[x][(y+1)%L])
def SpinFlip(lat, cluster, updown):
for i in cluster:
lat[i[0]][i[1]] = -updown
def Cluster(lat, L, seed, beta=1):
# seed: tuple
tx,ty = seed[0],seed[1]
updown = lat[tx][ty]
lat[tx][ty] = 0
cluster = [] # cluster index
cbound = [seed]
cout = []
while True:
for b in cbound:
x,y = b[0],b[1]
# left
rand = rng.random()
if lat[(x-1)%L][y]==updown and rand<1-np.exp(-2*beta):
lat[(x-1)%L][y] = 0
cout.append(((x-1)%L,y))
# right
rand = rng.random()
if lat[(x+1)%L][y]==updown and rand<1-np.exp(-2*beta):
lat[(x+1)%L][y] = 0
cout.append(((x+1)%L,y))
# top
rand = rng.random()
if lat[x][(y-1)%L]==updown and rand<1-np.exp(-2*beta):
lat[x][(y-1)%L] = 0
cout.append((x,(y-1)%L))
# bottom
rand = rng.random()
if lat[x][(y+1)%L]==updown and rand<1-np.exp(-2*beta):
lat[x][(y+1)%L] = 0
cout.append((x,(y+1)%L))
if len(cout)==0:
cluster = cluster + cbound
break
else:
cluster = cluster + cbound
cbound = cout
cout = []
if len(cluster)>=L**2:
break
return cluster, updown
def Wolff(lat, L, beta=1):
posx,posy = rng.integers(0,L,endpoint=False), rng.integers(0,L,endpoint=False)
seed = (posx,posy)
cluster, updown = Cluster(lat, L, seed, beta)
# SpinFlip
SpinFlip(lat, cluster, updown)
# MagDelta
dmag = -2*updown*len(cluster)
return dmag, len(cluster)
def Metropolis(lat, L, beta=1):
posx,posy = rng.integers(0,L,endpoint=False), rng.integers(0,L,endpoint=False)
dE = EDelta(lat, L, posx, posy)
rand = rng.random()
dmag = 0
if dE<=0 or rand<np.exp(-beta*dE):
dmag = -2*lat[posx][posy]
lat[posx][posy] = -lat[posx][posy]
return dmag
def bondSW(lat, L, beta):
bondx = | np.zeros((L,L), dtype=np.bool8) | numpy.zeros |
# Test the Correlation module
from UQpy.transformations import Decorrelate
import numpy as np
import pytest
def test_samples():
samples_z = np.array([[0.3, 0.36], [0.2, 1.6]])
rz = np.array([[1.0, 0.8], [0.8, 1.0]])
ntf_obj = Decorrelate(samples_z=samples_z, corr_z=rz)
np.testing.assert_allclose(ntf_obj.samples_u, [[0.3, 0.19999999999999998], [0.2, 2.4000000000000004]], rtol=1e-09)
def test_samples_u():
samples_z = | np.array([[0.3, 0.36], [0.2, 1.6]]) | numpy.array |
import sys,glob,os,time,copy
import numpy as np
import pickle as pickle
import multiprocessing as mp
from .. import get_img_info, corrections, visual_tools, spot_tools, domain_tools
from .. import _correction_folder, _corr_channels, _temp_folder,_distance_zxy,\
_sigma_zxy,_image_size, _allowed_colors, _num_buffer_frames, _num_empty_frames, _image_dtype
from ..External import Fitting_v3
from scipy import ndimage, stats
from scipy.spatial.distance import pdist,cdist,squareform
from skimage import morphology
from skimage.segmentation import random_walker
from functools import partial
import matplotlib
import matplotlib.pyplot as plt
import h5py
import ast
_allowed_kwds = {'combo': 'c',
'decoded':'d',
'unique': 'u',
'relabeled_unique':'l',
'merfish': 'm',
#'rna-unique':'r',
'rna': 'r', # long term used label, because "-" is creating issue in python
'gene':'g',
'protein':'p',
}
_max_num_seeds = 4000
_min_num_seeds = 50
_spot_seeding_th = 200
from . import batch_functions
from . import field_of_view
# initialize pool
init_dic = {}
def _init_unique_pool(_ic_profile_dic, _cac_profile_dic, _ic_shape, _cac_shape):
"""initialize pool, function used to put data into shared memory"""
print(f"- Initialize core with illumination correction profiles for {list(_ic_profile_dic.keys())}")
init_dic['illumination'] = _ic_profile_dic
print(f"- Initialize core with chromatic correction profiles for {list(_cac_profile_dic.keys())}")
init_dic['chromatic'] = _cac_profile_dic
init_dic['ic_shape'] = _ic_shape
init_dic['cac_shape'] = _cac_shape
def _fit_single_image(_im, _id, _chrom_coords, _seeding_args, _fitting_args, _check_fitting=True,
_normalization=True, _verbose=False):
if _verbose:
print(f"+++ fitting for region:{_id}")
_spots_for_chrom = []
if _normalization:
_norm_cst = np.nanmedian(_im)
for _chrom_coord in _chrom_coords:
if _im is None:
_spots_for_chrom.append(np.array([]))
else:
# seeding
_seeds = visual_tools.get_seed_in_distance(_im, _chrom_coord, *_seeding_args)
if len(_seeds) == 0:
# no seed found, return empty array
_spots_for_chrom.append(np.array([]))
continue
# fit
_fitter = Fitting_v3.iter_fit_seed_points(
_im, _seeds.T, *_fitting_args)
_fitter.firstfit()
# if check-fitting
if _check_fitting:
_fitter.repeatfit()
#_fits = visual_tools.fit_multi_gaussian(_im, _seeds, *_fitting_args)
_spots = np.array(_fitter.ps)
if _normalization:
_spots[:,0] = _spots[:,0] / _norm_cst
_spots_for_chrom.append(_spots)
return _spots_for_chrom
# function to allow multi-processing pick spots
def _pick_spot_in_batch(_cell, _data_type='unique', _pick_type='EM',
_use_chrom_coords=True,
_sel_ids=None, _num_iters=10, _terminate_th=0.003,
_intensity_th=1, _hard_intensity_th=True, _spot_num_th=100,
_ref_spot_list=None, _ref_spot_ids=None, _ref_pick_type='EM',
_ignore_ids=False, _ref_dist_metric='median', _score_metric='linear',
_local_size=5, _w_ctdist=2, _w_lcdist=1, _w_int=1, _w_nbdist=2,
_save_inter_plot=False, _save_to_info=True, _save_plot=True,
_check_spots=True, _check_th=-3.5, _check_percentile=10.,
_hard_dist_th=6000, _distance_limits=[0, np.inf],
_ignore_nan=True, _nan_mask=0., _inf_mask=-1000.,
_chrom_share_spots=False, _plot_limits=[0, 1500], _cmap='seismic_r', _fig_dpi=300, _fig_size=4,
_overwrite=False, _verbose=True):
"""_cell: Cell_Data class"""
if _verbose:
print(f"-- {_pick_type} pick spots for fov:{_cell.fov_id}, cell:{_cell.cell_id}")
# notice: always load in attributes, never return indices in batch format
_picked_spots = _cell._pick_spots(_data_type=_data_type,
_pick_type=_pick_type,
_use_chrom_coords=_use_chrom_coords, _sel_ids=_sel_ids,
_num_iters=_num_iters, _terminate_th=_terminate_th,
_intensity_th=_intensity_th, _hard_intensity_th=_hard_intensity_th,
_spot_num_th=_spot_num_th, _ref_spot_list=_ref_spot_list,
_ref_spot_ids=_ref_spot_ids, _ref_pick_type=_ref_pick_type,
_ignore_ids=_ignore_ids,
_ref_dist_metric=_ref_dist_metric, _score_metric=_score_metric,
_local_size=_local_size, _w_ctdist=_w_ctdist, _w_lcdist=_w_lcdist,
_w_int=_w_int, _w_nbdist=_w_nbdist,
_distance_limits=_distance_limits, _ignore_nan=_ignore_nan,
_nan_mask=_nan_mask, _inf_mask=_inf_mask, _chrom_share_spots=_chrom_share_spots,
_check_spots=_check_spots, _check_th=_check_th,
_check_percentile=_check_percentile, _hard_dist_th=_hard_dist_th,
_save_inter_plot=_save_inter_plot, _save_to_attr=True, _save_to_info=_save_to_info,
_return_indices=False, _overwrite=_overwrite,
_verbose=_verbose)
_distmaps = _cell._generate_distance_map(_data_type=_data_type,
_pick_type=_pick_type,
_sel_ids=_sel_ids, _save_info=_save_to_info, _save_plot=_save_plot,
_limits=_plot_limits, _cmap=_cmap,
_fig_dpi=_fig_dpi, _fig_size=_fig_size,
_overwrite=_overwrite, _verbose=_verbose)
return _cell
def _load_cell_in_batch(_cell, _data_type='all', _save_folder=None,
_decoded_flag=None, _distmap_data='unique', _distmap_pick='EM',
_load_attrs=[], _exclude_attrs=[], _overwrite=False, _verbose=True):
"""Function to allow batch loading"""
_cell._load_from_file(_data_type=_data_type, _save_folder=_save_folder,
_decoded_flag=_decoded_flag,
_distmap_data=_distmap_data, _distmap_pick=_distmap_pick,
_load_attrs=_load_attrs, _exclude_attrs=_exclude_attrs,
_overwrite=_overwrite, _verbose=_verbose)
return _cell
def _save_cell_in_batch(_cell, _data_type='cell_info', _save_dic={}, _save_folder=None,
_unsaved_attrs=None, _clear_old_attrs=False,
_overwrite=False, _verbose=True):
"""Function to allow batch saving"""
_cell._save_to_file(_data_type=_data_type, _save_dic=_save_dic, _save_folder=_save_folder,
_unsaved_attrs=_unsaved_attrs, _clear_old_attrs=_clear_old_attrs,
_overwrite=_overwrite, _verbose=_verbose)
# batch merge cells
def _merge_RNA_to_DNA_in_batch(_cell, _source_cell_data, _merge_type='cell_info',
_attr_feature='rna-',
_load_in_ram=True, _save_to_file=True,
_overwrite=False, _verbose=True):
"""Function to allow batch cell_data merging"""
_cell._merge_RNA_to_DNA(_source_cell_data=_source_cell_data,
_merge_type=_merge_type,
_attr_feature=_attr_feature,
_load_in_ram=_load_in_ram, _save_to_file=_save_to_file,
_overwrite=_overwrite, _verbose=_verbose)
return _cell
class Cell_List():
"""
Class Cell_List:
this is a typical data structure of cells within one chromosome with images in multiple independent color-channels and decoding-groups.
"""
# initialize
def __init__(self, parameters, _chosen_fovs=[], _exclude_fovs=[],
_load_all_attr=False, _load_reference_info=True,
_color_filename='Color_Usage'):
if not isinstance(parameters, dict):
raise TypeError('wrong input type of parameters, should be a dictionary containing essential info.')
## required parameters: data folder (list)
if isinstance(parameters['data_folder'], list):
self.data_folder = [str(_fd) for _fd in parameters['data_folder']]
else:
self.data_folder = [str(parameters['data_folder'])]
## extract hybe folders and field-of-view names
self.folders = []
for _fd in self.data_folder:
_hyb_fds, _fovs = get_img_info.get_folders(_fd, feature='H', verbose=True)
self.folders += _hyb_fds
self.fovs = _fovs
## experiment_type, default is DNA
if 'experiment_type' in parameters:
setattr(self, 'experiment_type', parameters['experiment_type'])
else:
setattr(self, 'experiment_type', 'DNA')
# experiment_folder
if 'experiment_folder' in parameters:
self.experiment_folder = parameters['experiment_folder']
else:
self.experiment_folder = os.path.join(self.data_folder[0], 'Experiment')
# experiment type
if 'experiment_type' in parameters:
self.experiment_type = parameters['experiment_type']
else:
self.experiment_type = 'DNA'
## analysis_folder, segmentation_folder, save_folder, correction_folder,map_folder
if 'analysis_folder' in parameters:
self.analysis_folder = str(parameters['analysis_folder'])
else:
self.analysis_folder = self.data_folder[0]+os.sep+'Analysis'
if 'segmentation_folder' in parameters:
self.segmentation_folder = parameters['segmentation_folder']
else:
self.segmentation_folder = self.analysis_folder+os.sep+'segmentation'
if 'save_folder' in parameters:
self.save_folder = parameters['save_folder']
else:
self.save_folder = self.analysis_folder+os.sep+'5x10'
if 'correction_folder' in parameters:
self.correction_folder = parameters['correction_folder']
else:
self.correction_folder = _correction_folder
if 'drift_folder' in parameters:
self.drift_folder = parameters['drift_folder']
else:
self.drift_folder = self.analysis_folder+os.sep+'drift'
if 'map_folder' in parameters:
self.map_folder = parameters['map_folder']
else:
self.map_folder = self.analysis_folder+os.sep+'distmap'
# number of num_threads
if 'num_threads' in parameters:
self.num_threads = parameters['num_threads']
else:
self.num_threads = int(os.cpu_count() / 4) # default: use one third of cpus.
# other shared_parameters for imaging processing, etc
if "shared_parameters" in parameters:
self.shared_parameters = parameters['shared_parameters']
else:
self.shared_parameters = {}
## if loading all remaining attr in parameter
if _load_all_attr:
for _key, _value in parameters.items():
if not hasattr(self, _key):
setattr(self, _key, _value)
## list to store Cell_data
self.cells = []
# distance from pixel to nm:
if 'distance_zxy' not in self.shared_parameters:
self.shared_parameters['distance_zxy'] = _distance_zxy
if 'sigma_zxy' not in self.shared_parameters:
self.shared_parameters['sigma_zxy'] = _sigma_zxy
if 'single_im_size' not in self.shared_parameters:
self.shared_parameters['single_im_size'] = _image_size
if 'num_buffer_frames' not in self.shared_parameters:
self.shared_parameters['num_buffer_frames'] = _num_buffer_frames
if 'num_empty_frames' not in self.shared_parameters:
self.shared_parameters['num_empty_frames'] = _num_empty_frames
if 'normalization' not in self.shared_parameters:
self.shared_parameters['normalization'] = False
if 'corr_bleed' not in self.shared_parameters:
self.shared_parameters['corr_bleed'] = True
if 'corr_Z_shift' not in self.shared_parameters:
self.shared_parameters['corr_Z_shift'] = True
if 'corr_hot_pixel' not in self.shared_parameters:
self.shared_parameters['corr_hot_pixel'] = True
if 'corr_illumination' not in self.shared_parameters:
self.shared_parameters['corr_illumination'] = True
if 'corr_chromatic' not in self.shared_parameters:
self.shared_parameters['corr_chromatic'] = True
if 'allowed_kwds' not in self.shared_parameters:
self.shared_parameters['allowed_data_types'] = _allowed_kwds
## chosen field of views
if len(_chosen_fovs) == 0: # no specification
_chosen_fovs = np.arange(len(_fovs))
if len(_chosen_fovs) > 0: # there are specifications
_chosen_fovs = [_i for _i in _chosen_fovs if _i <= len(_fovs)]
_chosen_fovs = list(np.array(np.unique(_chosen_fovs), dtype=np.int))
# exclude fovs
if len(_exclude_fovs) > 0: #exclude any fov:
for _i in _exclude_fovs:
if _i in _chosen_fovs:
_chosen_fovs.pop(_chosen_fovs.index(_i))
# save values to the class
self.fov_ids = _chosen_fovs
self.chosen_fovs = list(np.array(self.fovs)[np.array(self.fov_ids, dtype=np.int)])
# read color-usage and encodding-scheme
if not hasattr(self, 'color_dic') or not hasattr(self, 'channels'):
self._load_color_info(_color_filename=_color_filename)
# load extra info for DNA / RNA
if _load_reference_info:
if getattr(self, 'experiment_type') == 'RNA' and not hasattr(self, 'rna-info_dic'):
self._load_rna_info()
elif getattr(self, 'experiment_type') == 'DNA' and not hasattr(self, 'region_dic'):
self._load_genomic_regions()
# get annotated folders by color usage
self.annotated_folders = []
for _hyb_fd, _info in self.color_dic.items():
_matches = [_fd for _fd in self.folders if _hyb_fd == _fd.split(os.sep)[-1]]
if len(_matches)==1:
self.annotated_folders.append(_matches[0])
print(f"{len(self.annotated_folders)} folders are found according to color-usage annotation.")
# tool for iteration
self.index = 0
# allow print info of Cell_List
def __str__(self):
if hasattr(self, 'data_folder'):
print("Data folder:", self.data_folder)
if hasattr(self, 'cells'):
print("Number of cells in this list:", len(self.cells))
return 'test'
# allow iteration of Cell_List
def __iter__(self):
return self.cells
def __next__(self):
if not hasattr(self, 'cells') or not not hasattr(self, 'index'):
raise StopIteration
elif self.index == 0:
raise StopIteration
else:
self.index -= 1
return self.cells[self.index]
## Load basic info
def _load_color_info(self, _color_filename='Color_Usage', _color_format='csv', _save_color_dic=True):
_color_dic, _use_dapi, _channels = get_img_info.Load_Color_Usage(self.analysis_folder,
color_filename=_color_filename,
color_format=_color_format,
return_color=True)
# need-based store color_dic
if _save_color_dic:
self.color_dic = _color_dic
# store other info
self.use_dapi = _use_dapi
self.channels = [str(ch) for ch in _channels]
# channel for beads
_bead_channel = get_img_info.find_bead_channel(_color_dic)
self.bead_channel_index = _bead_channel
_dapi_channel = get_img_info.find_dapi_channel(_color_dic)
self.dapi_channel_index = _dapi_channel
return _color_dic
## load RNA
def _load_rna_info(self, _filename='RNA_Info', _table_format='csv',
_match_to_genomic_region=True, _verbose=True):
"""Load RNA information"""
_rna_dic = get_img_info.Load_RNA_Info(self.analysis_folder, filename=_filename,
table_format=_table_format, verbose=_verbose)
if _match_to_genomic_region:
_region_dic = self._load_genomic_regions(_verbose=_verbose)
_rna_dic = get_img_info.match_RNA_to_DNA(_rna_dic, _region_dic)
# set to attribute
setattr(self, 'rna-info_dic', _rna_dic)
return _rna_dic
## load Gene
def _load_gene_info(self, _filename='Gene_Info', _table_format='csv',
_match_to_genomic_region=True, _verbose=True):
"""Load RNA information"""
_gene_dic = get_img_info.Load_Gene_Info(self.analysis_folder, filename=_filename,
table_format=_table_format, verbose=_verbose)
if _match_to_genomic_region:
_region_dic = self._load_genomic_regions(_verbose=_verbose)
_gene_dic = get_img_info.match_Gene_to_DNA(_gene_dic, _region_dic)
# set to attribute
setattr(self, 'gene_dic', _gene_dic)
return _gene_dic
## load genomic regions
def _load_genomic_regions(self, _filename='Region_Positions', _table_format='csv', _verbose=True):
"""Function to load Genomic Positions etc."""
_region_dic = get_img_info.Load_Region_Positions(self.analysis_folder, filename=_filename,
table_format=_table_format, verbose=_verbose)
setattr(self, 'region_dic', _region_dic)
return _region_dic
def _load_encoding_scheme(self, _encoding_filename='Encoding_Scheme', _encoding_format='csv', _save_encoding_scheme=True):
_encoding_scheme, self.hyb_per_group, self.reg_per_group, \
self.encoding_colors, self.encoding_group_nums \
= get_img_info.Load_Encoding_Scheme(self.analysis_folder,
encoding_filename=_encoding_filename,
encoding_format=_encoding_format,
return_info=True)
# need-based encoding scheme saving
if _save_encoding_scheme:
self.encoding_scheme = _encoding_scheme
return _encoding_scheme
## Pick segmentations info for all fovs
def _pick_cell_segmentations(self, _num_threads=None, _allow_manual=True,
_min_shape_ratio=0.036, _signal_cap_ratio=0.2, _denoise_window=5,
_shrink_percent=13, _max_conv_th=0, _min_boundary_th=0.48,
_load_in_ram=True, _save=True, _save_npy=True, _save_postfix='_segmentation',
_cell_coord_fl='cell_coords.pkl', _overwrite=False, _verbose=True):
## load segmentation
# check attributes
if not hasattr(self, 'channels') or not hasattr(self, 'color_dic'):
self._load_color_info()
if _num_threads is None:
if not hasattr(self, 'num_threads'):
raise AttributeError('No num_threads given in funtion kwds and class attributes')
else:
_num_threads = self.num_threads
# find the folder name for dapi
_select_dapi = False # not select dapi fd yet
for _fd, _info in self.color_dic.items():
if len(_info) >= self.dapi_channel_index+1 and _info[self.dapi_channel_index] == 'DAPI':
_dapi_fd = [_full_fd for _full_fd in self.annotated_folders if os.path.basename(_full_fd) == _fd]
if len(_dapi_fd) == 1:
if _verbose:
print(f"-- choose dapi images from folder: {_dapi_fd[0]}.")
_dapi_fd = _dapi_fd[0]
_select_dapi = True # successfully selected dapi
if not _select_dapi:
raise ValueError("No DAPI folder detected in annotated_folders, stop!")
# prepare filenames for images to do segmentation
if _verbose:
print(f"{len(self.chosen_fovs)} of field-of-views are selected to load segmentation.")
_chosen_files = [os.path.join(_dapi_fd, _fov) for _fov in self.chosen_fovs]
# do segmentation
_segmentation_labels, _dapi_ims = visual_tools.DAPI_convoluted_segmentation(
_chosen_files, self.channels[self.dapi_channel_index], num_threads=_num_threads,
single_im_size=self.shared_parameters['single_im_size'],
all_channels=self.channels,
num_buffer_frames=self.shared_parameters['num_buffer_frames'],
num_empty_frames=self.shared_parameters['num_empty_frames'],
correction_folder=self.correction_folder,
illumination_correction=self.shared_parameters['corr_illumination'],
min_shape_ratio=_min_shape_ratio, signal_cap_ratio=_signal_cap_ratio,
denoise_window=_denoise_window, shrink_percent=_shrink_percent,
max_conv_th=_max_conv_th, min_boundary_th=_min_boundary_th,
make_plot=False, return_images=True,
save=_save, save_npy=_save_npy, save_folder=self.segmentation_folder,
save_postfix=_save_postfix, force=_overwrite, verbose=_verbose)
## pick(exclude) cells from previous result
if _allow_manual:
# generate coordinates
_coord_list, _index_list = [],[]
for _i, _label in enumerate(_segmentation_labels):
for _j in range(np.max(_label)):
_center = np.round(ndimage.measurements.center_of_mass(_label==_j+1))
_center = list(np.flipud(_center))
_center.append(_dapi_ims[0].shape[0]/2)
_coord_list.append(_center)
_index_list.append(_i)
# wrap into a dic
_cell_coord_dic = {'coords': _coord_list,
'class_ids': _index_list,
'pfits':{},
'dec_text':{},
}
self.cell_coord_dic = copy.deepcopy(_cell_coord_dic)
# use visual tools to pick
_cell_coord_savefile = self.segmentation_folder + os.sep + _cell_coord_fl
_cell_viewer = visual_tools.imshow_mark_3d_v2(_dapi_ims, image_names=self.chosen_fovs,
save_file=_cell_coord_savefile,
given_dic=_cell_coord_dic)
return _cell_viewer
else:
return _segmentation_labels, _dapi_ims
def _update_cell_segmentations(self, _cell_coord_fl='cell_coords.pkl',
_overwrite_segmentation=False,
_marker_displace_th = 50,
_append_new=True, _append_radius=100,
_overlap_percent=60,
_save_npy=True, _save_postfix="_segmentation",
_make_plot=True, _return_all=False, _verbose=True):
"""Function to update cell segmentation info from saved file,
- usually do this after automatic segmentation
Inputs:
_cell_coord_fl: cell coordinate file generated by _pick_cell_segmentations, str
_overwrite_segmentation: whether overwrite previous segmentation files, bool (default: True)
_marker_displace_th: overall displacement of picked cellcenter to previous ones, int (default:300)
_append_new: whether append manually picked spots, bool (default: True)
_append_radius: the radii of circled-shape label appended manually, int (default:90)
_overlap_percent: percentage of manual labels allowed to overlap with existing labels, float (default:60)
_save_npy: whether save .npy file or .pkl file, bool (default: True)
_save_postfix: filename postfix for saved segmentation files, str
_make_plot: whether make plots for new segmentation labels, bool (default: True)
_return_all: whether return all info, bool (default: False)
_verbose: say something!, bool (default: True)
Outputs:
_new_seg_labels, _remove_cts, _append_cts"""
## decide save_handle
if _save_npy:
_file_type = '.npy'
else:
_file_type = '.pkl'
print(f"- Update segmentation information for file type: {_file_type}")
## check saved cell_coord.pkl file, which was generated by _pick_cell_segmentations
_cell_coord_savefile = self.segmentation_folder + os.sep + _cell_coord_fl
if not os.path.exists(_cell_coord_savefile):
raise IOError(f'{_cell_coord_savefile} doesnot exist, exit')
# open cell_coord.pkl
with open(_cell_coord_savefile, 'rb') as handle:
_new_cell_coord_dic = pickle.load(handle)
# parse
_new_ccd = visual_tools.partition_map(_new_cell_coord_dic['coords'], _new_cell_coord_dic['class_ids'])
## check if cell_coord for automatic file existed, otherwise load
if not hasattr(self, 'cell_coord_dic'):
# check if all segmentation files exists
_segmentation_filenames = [os.path.join(self.segmentation_folder, _fov.replace('.dax', _save_postfix + _file_type)) for _fov in self.chosen_fovs]
_missed_segmentation_files = [_fl for _fl in _segmentation_filenames if not os.path.isfile(_fl)]
if len(_missed_segmentation_files) > 0:
raise IOError(f"Not full segmentation results were found, {_missed_segmentation_files} are missing!")
else:
# generate coordinates
_coord_list, _index_list = [],[]
for _i, _label_file in enumerate(_segmentation_filenames):
# load segmentation labels
_label = np.load(_label_file)
# get centers
for _j in range(np.max(_label)):
_center = np.round(ndimage.measurements.center_of_mass(_label==_j+1))
_center = list(np.flipud(_center))
_center.append(_image_size[0]/2)
_coord_list.append(_center)
_index_list.append(_i)
# wrap into a dic
_cell_coord_dic = {'coords': _coord_list,
'class_ids': _index_list,
'pfits':{},
'dec_text':{},
}
# save to cell-list
self.cell_coord_dic = _cell_coord_dic
# parse
_ccd = visual_tools.partition_map(self.cell_coord_dic['coords'], self.cell_coord_dic['class_ids'])
# initialize
_new_seg_labels, _dapi_ims = [], []
_remove_cts, _append_cts = [], []
for _i, (_cell_coords, _new_cell_coords) in enumerate(zip(_ccd, _new_ccd)):
# now we are taking care of one specific field of view
if _verbose:
print(f"-- fov-{_i}, match manually picked cell with sgementation ")
# load fov image
_seg_file = os.path.join(self.segmentation_folder, self.chosen_fovs[_i].replace('.dax', _save_postfix+_file_type))
if _save_npy:
_seg_label = np.load(_seg_file)
if not _overwrite_segmentation:
# save original seg label into another file
_old_seg_folder = os.path.join(os.path.dirname(_seg_file), 'old')
if not os.path.exists(_old_seg_folder):
os.makedirs(_old_seg_folder)
_old_seg_file = os.path.join(os.path.dirname(_seg_file),
'old',
os.path.basename(_seg_file).replace(_save_postfix+_file_type, _save_postfix))
# notice: _file_type .npy was not added to _old_seg_file because np.save automatically adds postfix
np.save(_old_seg_file, _seg_label)
else:
_seg_label, _dapi_im = pickle.load(open(_seg_file, 'rb'))
if not _overwrite_segmentation:
# save original seg label into another file
_old_seg_file = _seg_file.replace(_save_postfix+_file_type, _save_postfix+'_old'+_file_type)
pickle.dump([_seg_label, _dapi_im], open(_old_seg_file, 'wb'))
# keep record of removed labels
_remove = 0
# keep cells in original segmentation with markers
for _l, _coord in enumerate(_cell_coords):
_dist = [np.sum((_c-_coord)**2) for _c in _new_cell_coords]
_match = [_d < _marker_displace_th for _d in _dist]
if sum(_match) == 0:
_seg_label[_seg_label==_l+1-_remove] = -1
_seg_label[_seg_label >_l+1-_remove] -= 1
_remove += 1
if _append_new:
_append = 0
if _verbose:
print(f"-- Appending manually added markers with radius={_append_radius}")
# local function used to add a new marker to label
def _add_round_marker(_label, _center, _radius, _overlap_percent=60, overwrite_marker=False):
"""Function to add round-marker with given center and radius"""
if len(_label.shape) != len(_center):
raise ValueError(
"Dimension of label and center doesn't match")
# convert format
_center = np.array(_center, dtype=np.int)
_radius = np.int(_radius)
# generate mask
_shape_lst = (list(range(_label.shape[i]))
for i in range(len(_label.shape)))
_coord_lst = | np.meshgrid(*_shape_lst, indexing='ij') | numpy.meshgrid |
# -*- coding: utf-8 -*-
"""
This script saves the input file for the horseshoe problem.
"""
__version__ = '1.0'
__author__ = '<NAME>'
import sys
import numpy as np
import numpy.matlib
sys.path.append(r'C:\BELLA')
from src.divers.excel import autofit_column_widths
from src.divers.excel import delete_file
from src.BELLA.save_set_up import save_constraints_BELLA
from src.BELLA.save_set_up import save_objective_function_BELLA
from src.BELLA.save_set_up import save_multipanel
from src.BELLA.save_set_up import save_materials
from src.BELLA.panels import Panel
from src.BELLA.multipanels import MultiPanel
from src.BELLA.constraints import Constraints
from src.BELLA.obj_function import ObjFunction
from src.BELLA.materials import Material
filename = 'input_file_horseshoe.xlsx'
# check for authorisation before overwriting
delete_file(filename)
n_panels = 18
### Design guidelines ---------------------------------------------------------
constraints_set = 'C0'
constraints_set = 'C1'
# constraints_set == 'C0' ->
# - ply-drop spacing rule enforced with a minimum of
# constraints.min_drop plies between ply drops at panel boundaries
# - covering rule enforced by preventing the drop of the
# constraints.n_covering outermost plies on each laminate surface
# - symmetry rule enforced, no other lay-up rules
#
# constraints_set == 'C1' ->
# - ply-drop spacing rule enforced with a minimum of
# constraints.min_drop plies between ply drops at panel boundaries
# - covering enforrced by preventing the drop of the
# constraints.n_covering outermost plies on each laminate surface
# - symmetry rule enforced
# - 10% rule enforced
# if rule_10_Abdalla == True rule applied by restricting LPs instead of
# ply percentages and percent_Abdalla is the percentage limit of the
# rule
# otherwise:
# if combined_45_135 == True the restrictions are:
# - a maximum percentage of constraints.percent_0 0 deg plies
# - a maximum percentage of constraints.percent_90 90 deg plies
# - a maximum percentage of constraints.percent_45_135 +-45 deg plies
# if combined_45_135 == False the restrictions are:
# - a maximum percentage of constraints.percent_0 0 deg plies
# - a maximum percentage of constraints.percent_90 90 deg plies
# - a maximum percentage of constraints.percent_45 45 deg plies
# - a maximum percentage of constraints.percent_135 -45 deg plies
# - disorientation rule enforced with variation of fibre angle between
# adacent plies limited to a maximum value of constraints.delta_angle
# degrees
# - contiguity rule enforced with no more than constraints.n_contig
# adajacent plies with same fibre angle
# - damage tolerance rule enforced
# if constraints.dam_tol_rule == 1 the restrictions are:
# - one outer ply at + or -45 deg at the laminate surfaces
# (2 plies intotal)
# if constraints.dam_tol_rule == 2 the restrictions are:
# - [+45, -45] or [-45, +45] at the laminate surfaces
# (4 plies in total)
# if constraints.dam_tol_rule == 3 the restrictions are:
# - [+45,-45] [-45,+45] [+45,+45] or [-45,-45] at the laminate
# surfaces (4 plies in total)
# - out-of-plane orthotropy rule enforced to have small absolutes values
# of LP_11 and LP_12 such that the values of D16 and D26 are small too
## lay-up rules
# set of admissible fibre orientations
set_of_angles = np.array([-45, 0, 45, 90], dtype=int)
set_of_angles = np.array([
-45, 0, 45, 90, +30, -30, +60, -60, 15, -15, 75, -75], dtype=int)
sym = True # symmetry rule
oopo = False # out-of-plane orthotropy requirements
if constraints_set == 'C0':
bal = False # balance rule
rule_10_percent = False # 10% rule
diso = False # disorientation rule
contig = False # contiguity rule
dam_tol = False # damage-tolerance rule
else:
bal = True
rule_10_percent = True
diso = True
contig = True
dam_tol = True
rule_10_Abdalla = True # 10% rule restricting LPs instead of ply percentages
percent_Abdalla = 10 # percentage limit for the 10% rule applied on LPs
combine_45_135 = True # True if restriction on +-45 plies combined for 10% rule
percent_0 = 10 # percentage used in the 10% rule for 0 deg plies
percent_45 = 0 # percentage used in the 10% rule for +45 deg plies
percent_90 = 10 # percentage used in the 10% rule for 90 deg plies
percent_135 = 0 # percentage used in the 10% rule for -45 deg plies
percent_45_135 =10 # percentage used in the 10% rule for +-45 deg plies
delta_angle = 45 # maximum angle difference for adjacent plies
n_contig = 5 # maximum number of adjacent plies with the same fibre orientation
dam_tol_rule = 1 # type of damage tolerance rule
## ply-drop rules
covering = True # covering rule
n_covering = 1 # number of plies ruled by covering rule at laminate surfaces
pdl_spacing = True # ply drop spacing rule
min_drop = 2 # Minimum number of continuous plies between ply drops
constraints = Constraints(
sym=sym,
bal=bal,
oopo=oopo,
dam_tol=dam_tol,
dam_tol_rule=dam_tol_rule,
covering=covering,
n_covering=n_covering,
rule_10_percent=rule_10_percent,
rule_10_Abdalla=rule_10_Abdalla,
percent_Abdalla=percent_Abdalla,
percent_0=percent_0,
percent_45=percent_45,
percent_90=percent_90,
percent_135=percent_135,
percent_45_135=percent_45_135,
combine_45_135=combine_45_135,
diso=diso,
contig=contig,
n_contig=n_contig,
delta_angle=delta_angle,
set_of_angles=set_of_angles,
min_drop=min_drop,
pdl_spacing=pdl_spacing)
### Objective function parameters ---------------------------------------------
# Coefficient for the 10% rule penalty
coeff_10 = 1
# Coefficient for the contiguity constraint penalty
coeff_contig = 1
# Coefficient for the disorientation constraint penalty
coeff_diso = 10
# Coefficient for the out-of-plane orthotropy penalty
coeff_oopo = 1
# Lamination-parameter weightings in panel objective functions
# (In practice these weightings can be different for each panel)
lampam_weightings = np.array([0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0])
## Multi-panel objective function
# Weightings of the panels in the multi-panel objecive function
panel_weightings = np.ones((n_panels,), float)
# Coefficient for the ply drop spacing guideline penalty
coeff_spacing = 1
obj_func_param = ObjFunction(
constraints=constraints,
coeff_contig=coeff_contig,
coeff_diso=coeff_diso,
coeff_10=coeff_10,
coeff_oopo=coeff_oopo,
coeff_spacing=coeff_spacing)
### Multi-panel composite laminate layout -------------------------------------
# panel IDs
ID = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
# number of panels
n_panels = len(ID)
# panel number of plies
n_plies = [32, 28, 20, 18, 16, 22, 18, 24, 38,
34, 30, 28, 22, 18, 24, 30, 18, 22]
# panels adjacency
neighbour_panels = {
1 : [2, 9],
2 : [1, 3, 6, 10],
3 : [2, 4, 6],
4 : [3, 5, 7],
5 : [4, 8],
6 : [2, 3, 7],
7 : [4, 6, 8],
8 : [5, 7],
9 : [1, 10, 11],
10 : [2, 9, 12],
11 : [9, 12],
12 : [10, 11, 13, 16],
13 : [12, 14, 16],
14 : [13, 15, 17],
15 : [14, 18],
16 : [12, 13, 17],
17 : [14, 16, 18],
18 : [15, 17]}
# boundary weights
boundary_weights = {(1, 2) : 0.610,
(1, 9) : 0.457,
(2, 3) : 0.305,
(2, 6) : 0.305,
(2, 10) : 0.457,
(3, 4) : 0.305,
(3, 6) : 0.508,
(4, 5) : 0.305,
(4, 7) : 0.508,
(5, 8) : 0.508,
(6, 7) : 0.305,
(7, 8) : 0.305,
(9, 10) : 0.610,
(9, 11) : 0.457,
(10, 12) : 0.457,
(11, 12) : 0.610,
(12, 13) : 0.305,
(12, 16) : 0.305,
(13, 14) : 0.305,
(13, 16) : 0.508,
(14, 15) : 0.305,
(14, 17) : 0.508,
(15, 18) : 0.508,
(16, 17) : 0.305,
(17, 18) : 0.305}
# panel length in the x-direction (m)
length_x = (25.40/1000)* | np.array([18, 18, 20, 20, 20, 20, 20, 20,
18, 18, 18, 18, 20, 20, 20, 20, 20, 20]) | numpy.array |
#!/usr/bin/env python
# --------------------------------------------------------
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# for testing
import argparse
import datetime
import numpy as np
import itertools
from core.bc import BC
from core.ddpg import DDPG
from tensorboardX import SummaryWriter
from experiments.config import *
from core.replay_memory import BaseMemory as ReplayMemory
from core import networks
from core.utils import *
import IPython
import matplotlib.pyplot as plt
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.utils.data
import cv2
import torch.nn as nn
import threading
import argparse
import pprint
import time, os, sys
import os.path as osp
import numpy as np
import copy
from core.env_planner import EnvPlanner
from OMG.omg.config import cfg as planner_cfg
# try: # ros
import tf
import tf2_ros
import rosnode
import message_filters
import _init_paths
import rospy
import tf.transformations as tra
import std_msgs.msg
from sensor_msgs.msg import Image, CameraInfo
from sensor_msgs.msg import PointCloud2, PointField
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Pose, PoseArray, Point, Quaternion
from sensor_msgs import point_cloud2
from cv_bridge import CvBridge, CvBridgeError
lock = threading.Lock()
# for real robot
from lula_franka.franka import Franka
from joint_listener import JointListener
from moveit import MoveitBridge
# use posecnn layer for backprojection
import posecnn_cuda
# graspnet
import tensorflow
sys.path.insert(0, '6dof-graspnet')
# set policy mode
GA_DDPG_ONLY = True
GRASPNET_ONLY = False
COMBINED = False
RANDOM_TARGET = False
USE_LOOK_AT = False
CONTACT_GRASPNET = False
PUT_BIN = False
# contact graspnet
from grasp_estimator import GraspEstimator, get_graspnet_config, joint_config
if CONTACT_GRASPNET:
sys.path.insert(0, 'contact_graspnet')
sys.path.insert(0, 'contact_graspnet/contact_graspnet')
from inference_edit import get_graspnet_config as get_graspnet_config_contact
from contact_grasp_estimator import GraspEstimator as GraspEstimatorContact
import config_utils
# compute look at pose according to object pose
def compute_look_at_pose(pose_listener, center_object, angle, distance, psi=0):
# find the hand camera to hand transformation
try:
tf_pose = pose_listener.lookupTransform('measured/camera_color_optical_frame', 'measured/right_gripper', rospy.Time(0))
pose_camera = make_pose(tf_pose)
except (tf2_ros.LookupException,
tf2_ros.ConnectivityException,
tf2_ros.ExtrapolationException):
pose_camera = None
if pose_camera is not None:
pose_camera[:3, :3] = np.eye(3)
pose_camera[:3, 3] *= -1
else:
print('cannot find camera to hand transformation')
psi /= 57.3
theta = angle / 57.3
r = distance
position_robot = center_object + np.array([-r * np.cos(theta) * np.cos(psi),
-r * np.cos(theta) * np.sin(psi),
r * np.sin(theta)], dtype=np.float32)
Z_BG = center_object - position_robot
Z_BG /= np.linalg.norm(Z_BG)
Y_BG = np.array([-np.sin(psi), np.cos(psi), 0], dtype=np.float32)
X_BG = np.cross(Y_BG, Z_BG)
R_BG = np.zeros((3, 3), dtype=np.float32)
R_BG[:, 0] = X_BG
R_BG[:, 1] = Y_BG
R_BG[:, 2] = Z_BG
pose_robot = np.eye(4, dtype=np.float32)
pose_robot[:3, 3] = position_robot
pose_robot[:3, :3] = R_BG[:3, :3]
# adjust for camera offset
if pose_camera is not None:
pose_robot = np.dot(pose_camera, pose_robot)
return pose_robot
class ImageListener:
def __init__(self, agent, graspnet, graspnet_contact):
franka = Franka(is_physical_robot=True)
self.moveit = MoveitBridge(franka)
self.moveit.retract()
# self.moveit.close_gripper()
self.moveit.open_gripper()
self.joint_listener = JointListener()
self.pose_listener = tf.TransformListener()
print('sleep a short time')
rospy.sleep(2.0)
print('current robot joints')
print(self.joint_listener.joint_position)
tf_pose = self.pose_listener.lookupTransform('measured/panda_hand', 'measured/right_gripper', rospy.Time(0))
self.grasp_offset = make_pose(tf_pose)
print('grasp offset', self.grasp_offset)
self.agent = agent
self.graspnet = graspnet
self.graspnet_contact = graspnet_contact
self.cv_bridge = CvBridge()
self.im = None
self.depth = None
self.rgb_frame_id = None
self.rgb_frame_stamp = None
self.im_ef_pose = None
self.acc_points = np.zeros([4, 0])
self.depth_threshold = 1.2
self.table_height = 0.0
self.initial_joints = initial_joints
self.num_initial_joints = initial_joints.shape[0]
self.index_joints = 0
self.target_obj_id = 1 # target object ID
# publish object points for visualization
self.empty_msg = PointCloud2()
self.object_points2_target_pub = rospy.Publisher('/gaddpg_object_points2_target', PointCloud2, queue_size=10)
self.object_points2_obstacle_pub = rospy.Publisher('/gaddpg_object_points2_obstacle', PointCloud2, queue_size=10)
# initialize a node
self.label_sub = message_filters.Subscriber('seg_label', Image, queue_size=1)
self.hand_finger_point = np.array([ [ 0., 0., 0. , -0. , 0. , -0. ],
[ 0., 0., 0.053, -0.053, 0.053, -0.053],
[ 0., 0., 0.075, 0.075, 0.105, 0.105]])
self.bin_conf_1 = np.array([0.7074745589850109, 0.361727706885124, 0.38521270434333,
-1.1754794559646125, -0.4169872830046795, 1.7096866963969337, 1.654512471818922]).astype(np.float32)
self.bin_conf_2 = np.array([0.5919747534674433, 0.7818432665691674, 0.557417382701195,
-1.1647884021323738, -0.39191044586242046, 1.837464805311654, 1.9150514982533562]).astype(np.float32)
if cfg.ROS_CAMERA == 'D415':
# use RealSense D435
self.base_frame = 'measured/base_link'
camera_name = 'cam_2'
rgb_sub = message_filters.Subscriber('/%s/color/image_raw' % camera_name, Image, queue_size=1)
depth_sub = message_filters.Subscriber('/%s/aligned_depth_to_color/image_raw' % camera_name, Image, queue_size=1)
msg = rospy.wait_for_message('/%s/color/camera_info' % camera_name, CameraInfo)
self.camera_frame = 'measured/camera_color_optical_frame'
self.target_frame = self.base_frame
elif cfg.ROS_CAMERA == 'Azure':
self.base_frame = 'measured/base_link'
rgb_sub = message_filters.Subscriber('/k4a/rgb/image_raw', Image, queue_size=1)
depth_sub = message_filters.Subscriber('/k4a/depth_to_rgb/image_raw', Image, queue_size=1)
msg = rospy.wait_for_message('/k4a/rgb/camera_info', CameraInfo)
self.camera_frame = 'rgb_camera_link'
self.target_frame = self.base_frame
else:
# use kinect
self.base_frame = '%s_rgb_optical_frame' % (cfg.ROS_CAMERA)
rgb_sub = message_filters.Subscriber('/%s/rgb/image_color' % (cfg.ROS_CAMERA), Image, queue_size=1)
depth_sub = message_filters.Subscriber('/%s/depth_registered/image' % (cfg.ROS_CAMERA), Image, queue_size=1)
msg = rospy.wait_for_message('/%s/rgb/camera_info' % (cfg.ROS_CAMERA), CameraInfo)
self.camera_frame = '%s_rgb_optical_frame' % (cfg.ROS_CAMERA)
self.target_frame = self.base_frame
# update camera intrinsics
intrinsics = np.array(msg.K).reshape(3, 3)
self.fx = intrinsics[0, 0]
self.fy = intrinsics[1, 1]
self.px = intrinsics[0, 2]
self.py = intrinsics[1, 2]
print(intrinsics)
queue_size = 1
slop_seconds = 0.4
ts = message_filters.ApproximateTimeSynchronizer([rgb_sub, depth_sub, self.label_sub], queue_size, slop_seconds)
ts.registerCallback(self.callback_rgbdm)
# set global intrinsics and extrinsics
global INTRINSICS, EXTRINSICS
INTRINSICS = intrinsics
EXTRINSICS = np.zeros([4, 4])# from camera to end effector
EXTRINSICS[:3, 3] = (np.array([0.05253322227958818, -0.05414890498307623, 0.06035263861136299])) # camera offset
EXTRINSICS[:3, :3] = quat2mat([0.7182116422267757, 0.016333297635292354, 0.010996322012974747, 0.6955460741463947])
self.remaining_step = cfg.RL_MAX_STEP
# start publishing thread
self.start_publishing_tf()
self.planner = EnvPlanner()
self.expert_plan = []
self.standoff_idx = -1
self.has_plan = False
self.num_trial = 0
# threshold to close gripper
self.grasp_score_threshold = 0.4
def compute_plan_with_gaddpg(self, state, ef_pose, vis=False):
"""
generate initial expert plan
"""
joints = get_joints(self.joint_listener)
gaddpg_grasps_from_simulate_view(self.agent, state, cfg.RL_MAX_STEP, ef_pose)
print('finish simulate views')
# can use remaining timesteps to replan. Set vis to visualize collision and traj
self.expert_plan, self.standoff_idx = self.planner.expert_plan(cfg.RL_MAX_STEP, joints, ef_pose, state[0][0], vis=vis)
print('expert plan', self.expert_plan.shape)
print('standoff idx', self.standoff_idx)
def start_publishing_tf(self):
self.stop_event = threading.Event()
self.tf_thread = threading.Thread(target=self.publish_point_cloud)
self.tf_thread.start()
def publish_point_cloud(self):
rate = rospy.Rate(30.)
fields = [
PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1)]
while not self.stop_event.is_set() and not rospy.is_shutdown():
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = self.base_frame
out_xyz = self.acc_points[:3, :].T
label = self.acc_points[3, :].flatten()
target_xyz = out_xyz[label == 0, :]
obj_pc2_target = point_cloud2.create_cloud(header, fields, target_xyz)
self.object_points2_target_pub.publish(obj_pc2_target)
obstacle_xyz = out_xyz[label == 1, :]
obj_pc2_obstacle = point_cloud2.create_cloud(header, fields, obstacle_xyz)
self.object_points2_obstacle_pub.publish(obj_pc2_obstacle)
# if out_xyz.shape[0] > 0:
# print('publish points')
# print(out_xyz.shape)
rate.sleep()
def callback_rgbdm(self, rgb, depth, mask):
ef_pose = get_ef_pose(self.pose_listener)
if depth.encoding == '32FC1':
depth_cv = self.cv_bridge.imgmsg_to_cv2(depth)
elif depth.encoding == '16UC1':
depth_cv = self.cv_bridge.imgmsg_to_cv2(depth).copy().astype(np.float32)
depth_cv /= 1000.0
else:
rospy.logerr_throttle(
1, 'Unsupported depth type. Expected 16UC1 or 32FC1, got {}'.format(
depth.encoding))
return
im = self.cv_bridge.imgmsg_to_cv2(rgb, 'bgr8')
mask = self.cv_bridge.imgmsg_to_cv2(mask, 'mono8')
# rescale image if necessary
# Lirui: consider rescaling to 112 x 112 which is used in training (probably not necessary)
if cfg.SCALES_BASE[0] != 1:
im_scale = cfg.SCALES_BASE[0]
im = pad_im(cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR), 16)
depth_cv = pad_im(cv2.resize(depth_cv, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST), 16)
mask = pad_im(cv2.resize(mask, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_NEAREST), 16)
with lock:
self.im = im.copy()
self.im_ef_pose = ef_pose.copy()
self.mask = mask.copy()
self.depth = depth_cv.copy()
self.rgb_frame_id = rgb.header.frame_id
self.rgb_frame_stamp = rgb.header.stamp
def show_segmentation_result(self, color, mask, mask_ids):
image = color.copy()
for i in range(len(mask_ids)):
mask_id = mask_ids[i]
index = np.where(mask == mask_id)
x = int(np.mean(index[1]))
y = int(np.mean(index[0]))
image = cv2.putText(image, str(i+1), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 3, (0, 255, 0), 2, cv2.LINE_AA)
cv2.namedWindow("Display 1")
cv2.imshow("Display 1", image)
cv2.waitKey(0)
cv2.destroyAllWindows()
value = input('Please enter which object to pick up: ')
return int(value)
def find_target_object(self, depth, mask, mask_ids, ef_pose, remaining_step, vis=False):
# select target points
target_mask = get_target_mask(self.acc_points)
points = self.acc_points[:3, target_mask]
# sample points
points = regularize_pc_point_count(points.T, 1024, use_farthest_point=True).T
# base to hand
points = se3_transform_pc(se3_inverse(ef_pose), points)
# hand to camera
offset_pose = se3_inverse(EXTRINSICS)
xyz_points = offset_pose[:3, :3].dot(points) + offset_pose[:3, [3]]
# projection to image
p_xyz = INTRINSICS.dot(xyz_points)
index = p_xyz[2] > 0.03
p_xyz = p_xyz[:, index]
xyz_points = xyz_points[:, index]
x, y = (p_xyz[0] / p_xyz[2]).astype(np.int), (p_xyz[1] / p_xyz[2]).astype(np.int)
# bounding box
x1 = np.min(x)
x2 = np.max(x)
y1 = np.min(y)
y2 = np.max(y)
area = (x2 - x1 + 1) * (y2 - y1 + 1)
# check labels
valid_idx_mask = (x > 0) * (x < mask.shape[1] - 1) * (y > 0) * (y < mask.shape[0] - 1)
labels = mask[y[valid_idx_mask], x[valid_idx_mask]]
labels_nonzero = labels[labels > 0]
xyz_points = xyz_points[:, valid_idx_mask]
# find the marjority label
if float(len(labels_nonzero)) / float((len(labels) + 1)) < 0.5:
print('overlap to background')
target_id = -1
else:
target_id = np.bincount(labels_nonzero).argmax()
# check bounding box overlap
I = np.where(mask == target_id)
x11 = np.min(I[1])
x22 = np.max(I[1])
y11 = np.min(I[0])
y22 = np.max(I[0])
area1 = (x22 - x11 + 1) * (y22 - y11 + 1)
xx1 = np.maximum(x1, x11)
yy1 = np.maximum(y1, y11)
xx2 = np.minimum(x2, x22)
yy2 = np.minimum(y2, y22)
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (area + area1 - inter)
print('overlap', ovr)
if ovr < 0.3:
target_id = -1
# projected depth
depths = depth[y[valid_idx_mask], x[valid_idx_mask]]
# computed depth
z = xyz_points[2, :]
diff = np.mean(np.absolute(depths - z))
print('mean depth diff', diff)
if diff > 0.15:
target_id = -1
# if remaining_step == cfg.RL_MAX_STEP - 1 and target_id != -1:
# self.acc_points = np.zeros([4, 0])
if vis:
# show image
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
plt.imshow(mask)
plt.scatter(x[valid_idx_mask], y[valid_idx_mask], s=10)
# plt.show()
plt.show(block=False)
plt.pause(1)
plt.close()
return target_id
def print_joint(self, joint):
num = len(joint)
s = ''
for i in range(num):
s += '%.6f, ' % rad2deg(joint[i])
print(s)
def process_label(self, foreground_labels):
""" Process foreground_labels
- Map the foreground_labels to {0, 1, ..., K-1}
@param foreground_labels: a [H x W] numpy array of labels
@return: foreground_labels
"""
# Find the unique (nonnegative) foreground_labels, map them to {0, ..., K-1}
unique_nonnegative_indices = np.unique(foreground_labels)
mapped_labels = foreground_labels.copy()
for k in range(unique_nonnegative_indices.shape[0]):
mapped_labels[foreground_labels == unique_nonnegative_indices[k]] = k
foreground_labels = mapped_labels
return foreground_labels
def compute_grasp_object_distance(self, RT_grasp):
T = RT_grasp[:3, 3].reshape((3, 1))
# target points
index = self.acc_points[3, :] == 0
points = self.acc_points[:3, index]
n = points.shape[1]
hand = np.repeat(T, n, axis=1)
distances = np.linalg.norm(hand - points, axis=0)
return np.min(distances)
def run_network(self):
# sample an initial joint
if self.remaining_step == cfg.RL_MAX_STEP:
print('use initial joint %d' % (self.index_joints))
initial_joints = self.initial_joints[self.index_joints, :]
self.moveit.go_local(q=initial_joints, wait=True)
rospy.sleep(1.0)
with lock:
if listener.im is None:
print('no image')
return
color = self.im.copy()
depth = self.depth.copy()
mask = self.mask.copy()
im_ef_pose = self.im_ef_pose.copy()
rgb_frame_id = self.rgb_frame_id
rgb_frame_stamp = self.rgb_frame_stamp
print('===========================================')
# process mask
mask = self.process_label(mask)
mask_ids = np.unique(mask)
if mask_ids[0] == 0:
mask_ids = mask_ids[1:]
num = mask_ids.shape[0]
mask_failure = (num == 0)
# no mask for the first frame
if mask_failure and self.remaining_step == cfg.RL_MAX_STEP:
print('no object segmented')
raw_input('put objects in the scene?')
return
count = np.zeros((num, ), dtype=np.int32)
for i in range(num):
count[i] = len(np.where(mask == mask_ids[i])[0])
# show the segmentation
start_time = time.time()
if self.remaining_step == cfg.RL_MAX_STEP:
print('%d objects segmented' % num)
print(mask_ids)
if not RANDOM_TARGET:
label_max = np.argmax(count)
target_id = mask_ids[label_max]
else:
target_id = self.show_segmentation_result(color, mask, mask_ids)
'''
while True:
target_id = np.random.choice(mask_ids)
# check number of pixels for the target
num_pixels = np.sum(mask == target_id)
if num_pixels > 500:
print('%d target pixels' % num_pixels)
break
'''
elif num > 0:
# data association to find the target id for the current frame
target_id = self.find_target_object(depth, mask, mask_ids, im_ef_pose, self.remaining_step, vis=False)
else:
target_id = -1
self.target_obj_id = target_id
print('target id is %d' % target_id)
print("---select target time %s seconds ---" % (time.time() - start_time))
if self.remaining_step == cfg.RL_MAX_STEP and not args.fix_initial_state:
self.index_joints += 1
if self.index_joints >= self.num_initial_joints:
self.index_joints = 0
# process target mask
start_time = time.time()
mask_background = np.zeros_like(mask)
mask_background[mask == 0] = 1
if num > 0:
# update this for 0 background and 1-N for other target
mask_target = np.zeros_like(mask)
mask_target[mask == target_id] = 1
# erode target mask
mask_target = cv2.erode(mask_target, np.ones((7, 7), np.uint8), iterations=3)
num_pixels = np.sum(mask_target)
print('finish mask, %d foreground pixels' % (num_pixels))
# build the final mask
mask[(mask == target_id) & (mask_target == 0)] = 0
mask_final = mask.copy()
else:
mask_final = np.zeros_like(mask)
print("---process mask time %s seconds ---" % (time.time() - start_time))
# compute state
start_time = time.time()
depth = depth[...,None]
agg = (not mask_failure) and (self.remaining_step >= cfg.RL_MAX_STEP - 1)
state, point_background = self.camera_image_to_state( color, depth, mask_final, mask_background, im_ef_pose,
cfg.RL_MAX_STEP - self.remaining_step,
agg=agg, vis=False)
print('after camera image to state', state[0].shape)
print('background point shape', point_background.shape)
print("---compute state time %s seconds ---" % (time.time() - start_time))
# compute action
state = [state, None, None, None]
# look at target
if self.remaining_step == cfg.RL_MAX_STEP and USE_LOOK_AT:
index = self.acc_points[3, :] == 0
points = self.acc_points[:3, index]
center = np.mean(points, axis=1)
angle = 60
T_lookat = compute_look_at_pose(self.pose_listener, center, angle=angle, distance=0.45)
self.moveit.go_local(T_lookat, wait=True)
self.remaining_step = max(self.remaining_step-1, 1)
rospy.sleep(0.5)
return
# GRASPNET + OMG + GA-DDPG
# run graspnet
if (not self.has_plan and COMBINED) or (GRASPNET_ONLY and not GA_DDPG_ONLY):
point_state = state[0][0].copy() # avoid aggregation
print('point_state', point_state.shape)
target_mask = point_state[3, :] == 0
target_pt = point_state[:3, target_mask].T
print('target_pt', target_pt.shape)
if CONTACT_GRASPNET: # only for target
# pc_full: (493949, 3), pc_colors: (493949, 3), pc_segments: dict (idx: (13481, 3)), local_regions True filter_grasps True forward_passes 1
pc_segments = {'0': target_pt}
point_full = point_state[:3,6:-500].T
print('point_full', point_full.shape)
# all points. You need to add table point here
pred_grasps_cam, scores, contact_pts, _ = self.graspnet_contact.predict_scene_grasps(sess_contact, point_full,
pc_segments=pc_segments,
local_regions=True,
filter_grasps=True,
forward_passes=1)
# pred_grasps_cam: dict (idx: (N, 4, 4)), scores: dict (idx: (N, 1)), contact_pts: dict (idx: (N, 3))
generated_grasps = pred_grasps_cam['0']
generated_scores = scores['0']
print('generated contact grasps', generated_grasps.shape)
else:
latents = self.graspnet.sample_latents()
generated_grasps, generated_scores, _ = self.graspnet.predict_grasps(
sess,
target_pt.copy(),
latents,
num_refine_steps=10,
)
# select grasps
top_num = 100 # grasp num
sorted_idx = list(np.argsort(generated_scores))[::-1]
select_grasp = [generated_grasps[idx] for idx in sorted_idx[:top_num]]
select_grasp_score = [generated_scores[idx] for idx in sorted_idx[:top_num]]
print('mean select grasp score: {:.3f}'.format(np.mean(np.round(select_grasp_score, 3))))
goal_states = np.array([im_ef_pose.dot(g.dot(rotZ(np.pi / 2))) for g in select_grasp]) # might not need rotate
print(goal_states.shape)
if goal_states.shape[0] == 0:
return
# use OMG in this repo
planner_cfg.use_external_grasp = True
planner_cfg.external_grasps = goal_states # this sets the grasps in base coordinate
joints = get_joints(self.joint_listener)
# construct scene points
num = point_state.shape[1] + point_background.shape[1]
scene_points = np.ones((4, num), dtype=np.float32)
scene_points[:, :point_state.shape[1]] = point_state.copy()
scene_points[:3, point_state.shape[1]:] = point_background.copy()
step = 30
plan, standoff_idx = self.planner.expert_plan(step, joints, im_ef_pose, scene_points, vis=False)
self.has_plan = True
print('expert plan', plan.shape)
# execute plan to standoff
if COMBINED:
self.moveit.execute(plan[:standoff_idx-5])
self.remaining_step = 10
print('*****************switch to gaddpg****************')
rospy.sleep(1.0)
else:
self.moveit.execute(plan[:standoff_idx])
self.moveit.execute(plan[standoff_idx:])
rospy.sleep(1.0)
if PUT_BIN:
self.put_bin()
else:
self.retract()
self.acc_points = np.zeros([4, 0])
self.remaining_step = cfg.RL_MAX_STEP
else:
if self.termination_heuristics(state) or self.num_trial >= 5:
if self.num_trial >= 5:
print('********************trial exceed********************')
if PUT_BIN:
self.put_bin()
else:
self.retract()
# reset
self.acc_points = np.zeros([4, 0])
self.remaining_step = cfg.RL_MAX_STEP
self.has_plan = False
self.num_trial = 0
return
# run ga-ddpg
print('use ga-ddpg')
target_state = select_target_point(state) # only target points
action, _, _, aux_pred = self.agent.select_action(target_state, remain_timestep=self.remaining_step)
print('finish network')
pose_delta = unpack_action(action)
ef_pose = get_ef_pose(self.pose_listener)
ef_pose = ef_pose.dot(pose_delta)
RT_grasp = ef_pose.dot(self.grasp_offset)
vis_pose = ef_pose.copy()
# send_transform(RT_grasp, vis_pose, 'GADDPG_action')
self.moveit.go_local(RT_grasp, wait=True)
print('remaining step: {} aggr. point: {}'.format(self.remaining_step, self.acc_points.shape[1]))
# raw_input('next step?')
self.remaining_step = max(self.remaining_step-1, 1)
if self.remaining_step == 1:
self.remaining_step += 5
self.num_trial += 1
def retract(self):
"""
close finger and lift
"""
# close the gripper
self.moveit.close_gripper(force=60)
rospy.sleep(1.0)
# lift object
delta = 0.20
joints = get_joints(self.joint_listener)
T = self.moveit.forward_kinematics(joints[:-2])
print('T in retract', T)
T_lift = T.copy()
T_lift[2, 3] += delta
self.moveit.go_local(T_lift, wait=True)
# wait a few seconds
rospy.sleep(2.0)
# put object down
T_put = T.copy()
T_put[2, 3] += 0.01
self.moveit.go_local(T_put, wait=True)
self.moveit.open_gripper()
self.moveit.go_local(T_lift, wait=True)
if GA_DDPG_ONLY:
self.moveit.retract()
else:
step = 20
joint_position = get_joints(self.joint_listener)
end_conf = np.append(self.moveit.home_q, joint_position[7:])
traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :]
self.moveit.execute(traj)
raw_input('finished. Try again?')
# grasp object and put object into a bin with goal conf
def put_bin(self):
force_before = self.joint_listener.robot_force
print('force before grasping', force_before)
# close the gripper
self.moveit.close_gripper(force=60)
rospy.sleep(0.5)
# lift object a bit
delta = 0.05
joints = get_joints(self.joint_listener)
T = self.moveit.forward_kinematics(joints[:-2])
print('T in retract', T)
T_lift = T.copy()
T_lift[2, 3] += delta
self.moveit.go_local(T_lift, wait=True)
force_after = self.joint_listener.robot_force
print('force after grasping', force_after)
force_diff = np.linalg.norm(force_before - force_after)
print('force diff norm', force_diff)
# lift object more
delta = 0.30
joints = get_joints(self.joint_listener)
T = self.moveit.forward_kinematics(joints[:-2])
print('T in retract', T)
T_lift = T.copy()
T_lift[2, 3] += delta
self.moveit.go_local(T_lift, wait=True)
# check grasp success
joint_position = self.joint_listener.joint_position
print('check success', joint_position)
if joint_position[-1] > 0.002 or force_diff > 0.5 or force_diff == 0:
success = True
print('grasp success')
else:
success = False
print('grasp fail')
# plan to goal conf
step = 20
if success:
joint_position = get_joints(self.joint_listener)
end_conf = np.append(self.bin_conf_1, joint_position[7:])
traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :]
self.moveit.execute(traj)
joint_position = get_joints(self.joint_listener)
end_conf = np.append(self.bin_conf_2, joint_position[7:])
traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :]
self.moveit.execute(traj)
self.moveit.open_gripper()
joint_position = get_joints(self.joint_listener)
end_conf = np.append(self.moveit.home_q, joint_position[7:])
traj = self.planner.plan_to_conf(step, joint_position, end_conf, vis=False)[::2, :]
self.moveit.execute(traj)
self.moveit.open_gripper()
def bias_target_pc_regularize(self, point_state, total_point_num, target_pt_num=1024, use_farthest_point=True):
target_mask = point_state[3, :] == 0
target_pt = point_state[:, target_mask]
nontarget_pt = point_state[:, ~target_mask]
print(target_pt.shape, nontarget_pt.shape)
if target_pt.shape[1] > 0:
target_pt = regularize_pc_point_count(target_pt.T, target_pt_num, use_farthest_point).T
if nontarget_pt.shape[1] > 0:
effective_target_pt_num = min(target_pt_num, target_pt.shape[1])
nontarget_pt = regularize_pc_point_count(nontarget_pt.T, total_point_num - effective_target_pt_num, use_farthest_point).T
return np.concatenate((target_pt, nontarget_pt), axis=1)
# new_points is in hand coordinate
# ACC_POINTS is in base
def update_curr_acc_points(self, new_points, ef_pose, step):
"""
Update accumulated points in world coordinate
"""
new_points = se3_transform_pc(ef_pose, new_points)
# the number below can be adjusted for efficiency and robustness
aggr_sample_point_num = min(int(CONFIG.pt_accumulate_ratio**step * CONFIG.uniform_num_pts), new_points.shape[1])
index = np.random.choice(range(new_points.shape[1]), size=aggr_sample_point_num, replace=False).astype(np.int)
new_points = new_points[:,index]
print('new points before filtering with table height', new_points.shape)
index = new_points[2, :] > self.table_height
new_points = new_points[:, index]
print('new points {} total point {}'.format(new_points.shape, self.acc_points.shape))
self.acc_points = np.concatenate((new_points, self.acc_points), axis=1) #
self.acc_points = regularize_pc_point_count(self.acc_points.T, 4096, use_farthest_point=True).T
# if it still grows too much, can limit points by call regularize pc point count
# self.planner.expert_plan can also be called with these dense points directly
def goal_closure(self, action, goal):
action_2 = np.zeros(7)
action_2[-3:] = action[:3]
action_2[:-3] = mat2quat(euler2mat(action[3], action[4], action[5])) # euler to quat
point_dist = float(agent.goal_pred_loss(torch.from_numpy(goal)[None].float().cuda(),
torch.from_numpy(action_2)[None].float().cuda()))
print('point dist: {:.3f}'.format(point_dist))
return point_dist < 0.008
def graspnet_closure(self, point_state):
"""
Compute grasp quality from tf grasp net.
"""
score = self.graspnet.compute_grasps_score(sess, point_state)
print('grasp closure score:', score)
return score > self.grasp_score_threshold # tuned threshold
# point_state is in hand coordinate
def process_pointcloud(self, point_state, im_ef_pose, step, agg=True, use_farthest_point=False):
"""
Process the cluttered scene point_state
[0 - 6]: random or gripper points with mask -1
[6 - 1030]: target point with mask 0
[1030 - 5002]: obstacle point with mask 1
[5002 - 5502]: robot points with mask 2 can be random or generated with get_collision_points and transform with joint
"""
# accumulate all point state in base
# set the mask 0 as target, 1 as other objects
index_target = point_state[3, :] == self.target_obj_id
index_other = point_state[3, :] != self.target_obj_id
point_state[3, index_target] = 0.
point_state[3, index_other] = 1.
if agg:
self.update_curr_acc_points(point_state, im_ef_pose, step)
# base to hand
inv_ef_pose = se3_inverse(im_ef_pose)
point_state = se3_transform_pc(inv_ef_pose, self.acc_points)
point_state = self.bias_target_pc_regularize(point_state, CONFIG.uniform_num_pts)
hand_finger_point = np.concatenate([self.hand_finger_point, np.ones((1, self.hand_finger_point.shape[1]), dtype=np.float32)], axis=0)
point_state = | np.concatenate([hand_finger_point, point_state], axis=1) | numpy.concatenate |
import os
import string
import random
import numpy as np
from sklearn.metrics import accuracy_score
from tqdm.notebook import tqdm
from sklearn.base import TransformerMixin
from sklearn.naive_bayes import GaussianNB, CategoricalNB
import nltk
from nltk import word_tokenize, WordNetLemmatizer
from nltk.stem import WordNetLemmatizer
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
wordnet_lemmatizer: WordNetLemmatizer = WordNetLemmatizer()
def clean_text(text: str) -> str:
# removes upper cases
text = text.lower()
# removes punctuation
for char in string.punctuation:
text = text.replace(char, "")
# lemmatize the words and join back into string text
text = " ".join([wordnet_lemmatizer.lemmatize(word) for word in word_tokenize(text)])
return text
class DenseTransformer(TransformerMixin):
def fit(self, x, y=None, **fit_params):
return self
@staticmethod
def transform(x, y=None, **fit_params):
return x.todense()
def __str__(self):
return "DenseTransformer()"
def __repr__(self):
return self.__str__()
class CleanTextTransformer(TransformerMixin):
def fit(self, x, y=None, **fit_params):
return self
@staticmethod
def transform(x, y=None, **fit_params):
return | np.vectorize(clean_text) | numpy.vectorize |
import itertools
import logging
import os.path as osp
import tempfile
import numpy as np
from mmcv.utils import print_log
from terminaltables import AsciiTable
from .builder import DATASETS
from .coco import CocoDataset
# DATASETS.register_module(name='LVISDataset', module=LVISDataset)
# LVISDataset = LVISV05Dataset
# DATASETS.register_module(name='LVISDataset', module=LVISDataset)
@DATASETS.register_module()
class LVISV1Dataset(CocoDataset):
CLASSES = (
'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',
'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',
'apple', 'applesauce', 'apricot', 'apron', 'aquarium',
'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',
'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',
'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',
'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',
'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',
'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',
'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',
'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',
'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',
'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',
'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',
'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',
'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',
'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',
'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',
'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',
'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',
'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',
'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',
'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',
'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',
'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',
'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',
'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',
'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',
'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',
'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',
'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',
'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',
'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',
'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',
'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',
'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',
'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',
'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',
'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',
'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',
'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',
'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',
'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',
'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',
'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',
'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',
'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',
'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',
'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',
'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',
'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',
'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',
'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',
'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',
'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',
'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',
'coloring_material', 'combination_lock', 'pacifier', 'comic_book',
'compass', 'computer_keyboard', 'condiment', 'cone', 'control',
'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',
'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',
'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',
'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',
'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',
'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',
'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',
'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',
'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',
'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',
'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',
'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',
'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',
'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',
'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',
'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',
'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',
'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',
'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',
'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',
'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',
'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',
'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',
'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',
'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',
'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',
'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',
'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',
'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',
'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',
'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',
'folding_chair', 'food_processor', 'football_(American)',
'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',
'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',
'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',
'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',
'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',
'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',
'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',
'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',
'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',
'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',
'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',
'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',
'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',
'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',
'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',
'headboard', 'headlight', 'headscarf', 'headset',
'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',
'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',
'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',
'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',
'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',
'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',
'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',
'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',
'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',
'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',
'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',
'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',
'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',
'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',
'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',
'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',
'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',
'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',
'lizard', 'log', 'lollipop', 'speaker_(stero_equipment)', 'loveseat',
'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',
'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',
'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',
'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',
'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',
'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',
'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',
'mitten', 'mixer_(kitchen_tool)', 'money',
'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',
'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',
'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',
'music_stool', 'musical_instrument', 'nailfile', 'napkin',
'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',
'newsstand', 'nightshirt', 'nosebag_(for_animals)',
'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',
'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',
'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',
'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',
'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',
'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',
'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',
'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',
'parchment', 'parka', 'parking_meter', 'parrot',
'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',
'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',
'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',
'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',
'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',
'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',
'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',
'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',
'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',
'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',
'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',
'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',
'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',
'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',
'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',
'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',
'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',
'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',
'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',
'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',
'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',
'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',
'recliner', 'record_player', 'reflector', 'remote_control',
'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',
'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',
'rolling_pin', 'root_beer', 'router_(computer_equipment)',
'rubber_band', 'runner_(carpet)', 'plastic_bag',
'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',
'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',
'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',
'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',
'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',
'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',
'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',
'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',
'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',
'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',
'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',
'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',
'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',
'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',
'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',
'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',
'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',
'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',
'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',
'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',
'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',
'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',
'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',
'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',
'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',
'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',
'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',
'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',
'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',
'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',
'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',
'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',
'tambourine', 'army_tank', 'tank_(storage_vessel)',
'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',
'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',
'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',
'telephone_pole', 'telephoto_lens', 'television_camera',
'television_set', 'tennis_ball', 'tennis_racket', 'tequila',
'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',
'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',
'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',
'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',
'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',
'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',
'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',
'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',
'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',
'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',
'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',
'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',
'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',
'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',
'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',
'washbasin', 'automatic_washer', 'watch', 'water_bottle',
'water_cooler', 'water_faucet', 'water_heater', 'water_jug',
'water_gun', 'water_scooter', 'water_ski', 'water_tower',
'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',
'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',
'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',
'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',
'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',
'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',
'yoke_(animal_equipment)', 'zebra', 'zucchini')
# def load_annotations(self, ann_file):
# try:
# import lvis
# assert lvis.__version__ >= '10.5.3'
# from lvis import LVIS
# except AssertionError:
# raise AssertionError('Incompatible version of lvis is installed. '
# 'Run pip uninstall lvis first. Then run pip '
# 'install mmlvis to install open-mmlab forked '
# 'lvis. ')
# except ImportError:
# raise ImportError('Package lvis is not installed. Please run pip '
# 'install mmlvis to install open-mmlab forked '
# 'lvis.')
# self.coco = LVIS(ann_file)
# # assert not self.custom_classes, 'LVIS custom classes is not supported'
# self.cat_ids = self.coco.get_cat_ids()
# self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
# self.img_ids = self.coco.get_img_ids()
# data_infos = []
# for i in self.img_ids:
# info = self.coco.load_imgs([i])[0]
# # coco_url is used in LVISv1 instead of file_name
# # e.g. http://images.cocodataset.org/train2017/000000391895.jpg
# # train/val split in specified in url
# info['filename'] = info['coco_url'].replace(
# 'http://images.cocodataset.org/', '')
# data_infos.append(info)
# return data_infos
def load_annotations(self, ann_file):
try:
import lvis
assert lvis.__version__ >= '10.5.3'
from lvis import LVIS
except AssertionError:
raise AssertionError('Incompatible version of lvis is installed. '
'Run pip uninstall lvis first. Then run pip '
'install mmlvis to install open-mmlab forked '
'lvis. ')
except ImportError:
raise ImportError('Package lvis is not installed. Please run pip '
'install mmlvis to install open-mmlab forked '
'lvis.')
self.lvis = LVIS(ann_file)
self.full_cat_ids = self.lvis.get_cat_ids()
self.full_cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.full_cat_ids)
}
self.CLASSES = tuple([item['name'] for item in self.lvis.dataset['categories']])
self.cat_ids = self.lvis.get_cat_ids()
self.cat2label = {
cat_id: i + 1
for i, cat_id in enumerate(self.cat_ids)
}
self.img_ids = self.lvis.get_img_ids()
self.img_infos = []
for i in self.img_ids:
info = self.lvis.load_imgs([i])[0]
info['filename'] = info['coco_url'].replace(
'http://images.cocodataset.org/', '')
self.img_infos.append(info)
return self.img_infos
def get_ann_info(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
ann_info = self.lvis.load_anns(ann_ids)
return self._parse_ann_info(self.img_infos[idx], ann_info)
def get_ann_info_withoutparse(self, idx):
img_id = self.img_infos[idx]['id']
ann_ids = self.lvis.get_ann_ids(img_ids=[img_id])
ann_info = self.lvis.load_anns(ann_ids)
return ann_info
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
ids_with_ann = set(_['image_id'] for _ in self.lvis.anns.values())
for i, img_info in enumerate(self.img_infos):
if self.img_ids[i] not in ids_with_ann:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,
labels, masks, mask_polys, poly_lens.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
# Two formats are provided.
# 1. mask: a binary map of the same size of the image.
# 2. polys: each mask consists of one or several polys, each poly is a
# list of float.
gt_masks = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
if ann['area'] <= 0 or w < 1 or h < 1:
continue
bbox = [x1, y1, x1 + w - 1, y1 + h - 1]
if 'iscrowd' in ann.keys():
if ann['iscrowd']:
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks.append(self.lvis.ann_to_mask(ann))
if gt_bboxes:
gt_bboxes = | np.array(gt_bboxes, dtype=np.float32) | numpy.array |
import copy
import numpy as np
import chess as python_chess
from collections import deque
from typing import Optional, List
from src.config import ConfigChess
from src.chess.move import Move
class Board(python_chess.Board):
def __init__(
self,
board_fen: Optional[str] = None,
array: Optional[np.ndarray] = None,
history_size: int = 8,
):
self.board_size = ConfigChess.board_size
self.number_unique_pieces = ConfigChess.number_unique_pieces
if array is not None:
assert isinstance(array, np.ndarray)
assert all([dim == self.board_size for dim in array.shape])
assert np.unique(array).size <= self.number_unique_pieces + 1
self.array = array.astype("int8")
board_fen = self.array_to_board_fen(self.array)
fen = self.get_fen(board_fen)
python_chess.Board.__init__(self, fen=fen)
else:
board_fen = (
ConfigChess.initial_board_fen if board_fen is None else board_fen
)
fen = self.get_fen(board_fen)
python_chess.Board.__init__(self, fen=fen)
self.array = self.board_fen_to_array(self.board_fen())
self.state_history = deque(maxlen=history_size)
for time_step in range(history_size):
self.state_history.append(np.zeros(self.state.shape))
self.state_history.append(self.state)
@property
def array_one_hot(self) -> np.ndarray:
return np.eye(self.number_unique_pieces + 1)[self.array]
@property
def moves(self) -> List[Move]:
return [Move(uci=move.uci()) for move in self.legal_moves]
@property
def state(self) -> np.ndarray:
return np.dstack(
[
self.array_one_hot,
np.full((self.board_size, self.board_size), self.is_repetition()),
]
)
@property
def full_state(self) -> np.ndarray:
return np.dstack(
[np.dstack(self.state_history)]
+ [
np.full((self.board_size, self.board_size), feature)
for feature in [
self.has_queenside_castling_rights(self.turn),
self.has_kingside_castling_rights(self.turn),
self.has_queenside_castling_rights(not self.turn),
self.has_kingside_castling_rights(not self.turn),
self.fullmove_number,
self.halfmove_clock,
]
]
)
@staticmethod
def get_fen(board_fen: str):
return " ".join(
[
board_fen,
ConfigChess.initial_turn,
ConfigChess.initial_castling_rights,
ConfigChess.initial_ep_quare,
ConfigChess.initial_halfmove_clock,
ConfigChess.initial_fullmove_number,
]
)
@staticmethod
def from_one_hot(array_oh: np.ndarray) -> np.ndarray:
array = np.argmax(array_oh, axis=-1)
array[np.where(array > ConfigChess.number_unique_pieces / 2)] = (
array[np.where(array > ConfigChess.number_unique_pieces / 2)]
- ConfigChess.number_unique_pieces
+ 1
)
return array
@staticmethod
def piece_symbol_to_int(piece_symbol: Optional[str]) -> int:
if piece_symbol is None:
return 0
piece_int = ConfigChess.piece_symbols.index(piece_symbol.lower())
player = 1 if piece_symbol.isupper() else -1
return player * piece_int
@staticmethod
def int_to_piece_symbol(piece_int: int) -> Optional[str]:
player, piece_symbol = (
np.sign(piece_int),
ConfigChess.piece_symbols[ | np.abs(piece_int) | numpy.abs |
import numpy as np
import numpy.testing as npt
import pytest
import freud
class TestAngularSeparationGlobal:
def test_getN(self):
boxlen = 10
N = 500
box, points = freud.data.make_random_system(boxlen, N, is2D=True)
_, query_points = freud.data.make_random_system(boxlen, N // 3, is2D=True)
ang = freud.environment.AngularSeparationGlobal()
# test access
with pytest.raises(AttributeError):
ang.angles
def test_compute(self):
# Going to make sure that the use of equivalent_orientations captures
# both of the global reference orientations
global_ors = np.array([[1, 0, 0, 0], [0, 1, 0, 0]], dtype=np.float32)
equivalent_orientations = np.array(
[[1, 0, 0, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, -1, 0, 0]], dtype=np.float32
)
ors = [[1, 0, 0, 0]]
ors.append([0, 1, 0, 0])
# The following two quaternions correspond to rotations of the above
# by pi/16
ors.append([0.99518473, 0.0, 0.0, 0.09801714])
ors.append([0.0, 0.99518473, -0.09801714, 0.0])
ors = np.asarray(ors, dtype=np.float32)
ang = freud.environment.AngularSeparationGlobal()
ang.compute(global_ors, ors, equivalent_orientations)
# Each orientation should be either equal to or pi/16 away from the
# global reference quaternion
for i in [0, 1]:
for j in [0, 1]:
npt.assert_allclose(ang.angles[i][j], 0, atol=1e-6)
for i in [2, 3]:
for j in [0, 1]:
npt.assert_allclose(ang.angles[i][j], np.pi / 16, atol=1e-6)
def test_repr(self):
ang = freud.environment.AngularSeparationGlobal()
assert str(ang) == str(eval(repr(ang)))
class TestAngularSeparationNeighbor:
def test_getN(self):
boxlen = 10
N = 500
box, points = freud.data.make_random_system(boxlen, N, is2D=True)
_, query_points = freud.data.make_random_system(boxlen, N // 3, is2D=True)
ang = freud.environment.AngularSeparationNeighbor()
# test access
with pytest.raises(AttributeError):
ang.angles
def test_nlist(self):
"""Check that the internally generated NeighborList is correct."""
boxlen = 4
num_neighbors = 1
r_guess = 2
box = freud.box.Box.square(boxlen)
# Create three points in a line.
points = np.asarray([[0, 0, 0], [1, 0, 0], [1.5, 0, 0]], dtype=np.float32)
# Use two separate orientations. The second orientation is a pi/3
# rotation from the identity quaternion
ors = np.asarray(
[
[1, 0, 0, 0],
[np.cos(np.pi / 6), np.sin(np.pi / 6), 0, 0],
[np.cos(np.pi / 6), np.sin(np.pi / 6), 0, 0],
],
dtype=np.float32,
)
equivalent_orientations = np.asarray(
[[1, 0, 0, 0], [-1, 0, 0, 0]], dtype=np.float32
)
ang = freud.environment.AngularSeparationNeighbor()
qargs = dict(num_neighbors=num_neighbors, r_guess=r_guess, exclude_ii=True)
ang.compute(
(box, points),
ors,
equiv_orientations=equivalent_orientations,
neighbors=qargs,
)
aq = freud.locality.AABBQuery(box, points)
nlist = aq.query(points, qargs).toNeighborList()
npt.assert_array_equal(nlist[:], ang.nlist[:])
def test_compute(self):
boxlen = 4
num_neighbors = 1
r_guess = 2
box = freud.box.Box.square(boxlen)
# Create three points in a line.
points = np.asarray([[0, 0, 0], [1, 0, 0], [1.5, 0, 0]], dtype=np.float32)
# Use two separate orientations. The second orientation is a pi/3
# rotation from the identity quaternion
ors = np.asarray(
[
[1, 0, 0, 0],
[np.cos(np.pi / 6), np.sin(np.pi / 6), 0, 0],
[np.cos(np.pi / 6), | np.sin(np.pi / 6) | numpy.sin |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 3 17:27:46 2018
@author: <NAME>
Implementation of information representation based multi-layer classifier using GFMM
Note: Currently, all samples in the dataset must be normalized to the range of [0, 1] before using this class
"""
import sys, os
sys.path.insert(0, os.path.pardir)
import numpy as np
import math
import ast
import time
import multiprocessing
from functionhelper.bunchdatatype import Bunch
from functionhelper.membershipcalc import memberG, asym_similarity_one_many
from functionhelper.preprocessinghelper import read_file_in_chunks_group_by_label, read_file_in_chunks, string_to_boolean, loadDataset
from functionhelper.hyperboxadjustment import isOverlap, hyperboxOverlapTest, modifiedIsOverlap, hyperboxContraction
from concurrent.futures import ProcessPoolExecutor, as_completed
def get_num_cpu_cores():
num_cores = multiprocessing.cpu_count()
if num_cores >= 4:
num_cores = num_cores - 2
return num_cores
class Info_Presentation_Multi_Layer_Classifier_GFMM(object):
def __init__(self, teta = [0.1, 0.5], gamma = 1, simil_thres = 0.5, oper = 'min'):
self.gamma = gamma
self.teta_onl = teta[0]
self.higher_teta = teta[1:]
self.oper = oper
self.simil_thres = simil_thres
def homogeneous_hyperbox_expansion(self, X_l, X_u, patClassId, current_hyperboxes):
"""
Expand current hyperboxes to cover input patterns, all input samples have the same label with each other as well as current hyperboxes (if exists)
Update the number of patterns contained in the hyperboxes and their centroids of samples
INPUT
Xl Input data lower bounds (rows = objects, columns = features)
Xu Input data upper bounds (rows = objects, columns = features)
patClassId Input data class labels (crisp). patClassId[i] = 0 corresponds to an unlabeled item
current_hyperboxes A list of current hyperboxes in the Bunch datatype (properties: lower, upper, classId, no_pat, centroid)
OUTPUT
result A bunch data size with lower and upper bounds, class labels of hyperboxes
"""
yX = X_l.shape[0]
V = current_hyperboxes.lower
W = current_hyperboxes.upper
classId = current_hyperboxes.classId
no_Pats = current_hyperboxes.no_pat
centroid = current_hyperboxes.centroid
# for each input sample
for i in range(yX):
classOfX = patClassId[i]
if V.size == 0: # no model provided - starting from scratch
V = np.array([X_l[i]])
W = np.array([X_u[i]])
classId = np.array([patClassId[i]])
no_Pats = np.array([1])
centroid = np.array([(X_l[i] + X_u[i]) / 2])
else:
b = memberG(X_l[i], X_u[i], V, W, self.gamma, self.oper)
index = np.argsort(b)[::-1]
bSort = b[index];
if bSort[0] != 1:
adjust = False
for j in index:
# test violation of max hyperbox size and class labels
if ((np.maximum(W[j], X_u[i]) - np.minimum(V[j], X_l[i])) <= self.teta_onl).all() == True:
# adjust the j-th hyperbox
V[j] = np.minimum(V[j], X_l[i])
W[j] = np.maximum(W[j], X_u[i])
no_Pats[j] = no_Pats[j] + 1
centroid[j] = centroid[j] + (((X_l[i] + X_u[i]) / 2) - centroid[j]) / no_Pats[j]
adjust = True
if classOfX != 0 and classId[j] == 0:
classId[j] = classOfX
break
# if i-th sample did not fit into any existing box, create a new one
if not adjust:
V = np.concatenate((V, X_l[i].reshape(1, -1)), axis = 0)
W = np.concatenate((W, X_u[i].reshape(1, -1)), axis = 0)
classId = np.concatenate((classId, [classOfX]))
no_Pats = np.concatenate((no_Pats, [1]))
new_Central_Sample = (X_l[i] + X_u[i]) / 2
centroid = np.concatenate((centroid, new_Central_Sample.reshape(1, -1)), axis = 0)
return Bunch(lower=V, upper=W, classId=classId, no_pat=no_Pats, centroid=centroid)
def heterogeneous_hyperbox_expansion(self, X_l, X_u, patClassId, current_hyperboxes):
"""
Expand current hyperboxes to cover input patterns, input samples contains different labels
Update the number of patterns contained in the hyperboxes and their centroids of samples
INPUT
Xl Input data lower bounds (rows = objects, columns = features)
Xu Input data upper bounds (rows = objects, columns = features)
patClassId Input data class labels (crisp). patClassId[i] = 0 corresponds to an unlabeled item
current_hyperboxes A list of current hyperboxes in the Bunch datatype (properties: lower, upper, classId, no_pat, centroid)
OUTPUT
result A bunch data size with lower and upper bounds, class labels of hyperboxes
"""
yX = X_l.shape[0]
V = current_hyperboxes.lower
W = current_hyperboxes.upper
classId = current_hyperboxes.classId
no_Pats = current_hyperboxes.no_pat
centroid = current_hyperboxes.centroid
# for each input sample
for i in range(yX):
classOfX = patClassId[i]
if V.size == 0: # no model provided - starting from scratch
V = np.array([X_l[0]])
W = np.array([X_u[0]])
classId = np.array([patClassId[0]])
no_Pats = np.array([1])
centroid = np.array([(X_l[0] + X_u[0]) / 2])
else:
id_lb_sameX = np.logical_or(classId == classOfX, classId == 0)
if id_lb_sameX.any() == True:
V_sameX = V[id_lb_sameX]
W_sameX = W[id_lb_sameX]
lb_sameX = classId[id_lb_sameX]
id_range = np.arange(len(classId))
id_processing = id_range[id_lb_sameX]
b = memberG(X_l[i], X_u[i], V_sameX, W_sameX, self.gamma, self.oper)
index = np.argsort(b)[::-1]
bSort = b[index]
if bSort[0] != 1 or (classOfX != lb_sameX[index[0]] and classOfX != 0):
adjust = False
for j in id_processing[index]:
# test violation of max hyperbox size and class labels
if (classOfX == classId[j] or classId[j] == 0 or classOfX == 0) and ((np.maximum(W[j], X_u[i]) - np.minimum(V[j], X_l[i])) <= self.teta_onl).all() == True:
# adjust the j-th hyperbox
V[j] = np.minimum(V[j], X_l[i])
W[j] = np.maximum(W[j], X_u[i])
no_Pats[j] = no_Pats[j] + 1
centroid[j] = centroid[j] + (((X_l[i] + X_u[i]) / 2) - centroid[j]) / no_Pats[j]
adjust = True
if classOfX != 0 and classId[j] == 0:
classId[j] = classOfX
break
# if i-th sample did not fit into any existing box, create a new one
if not adjust:
V = np.concatenate((V, X_l[i].reshape(1, -1)), axis = 0)
W = np.concatenate((W, X_u[i].reshape(1, -1)), axis = 0)
classId = np.concatenate((classId, [classOfX]))
no_Pats = np.concatenate((no_Pats, [1]))
new_Central_Sample = (X_l[i] + X_u[i]) / 2
centroid = np.concatenate((centroid, new_Central_Sample.reshape(1, -1)), axis = 0)
else:
# new class lable => create new pattern
V = np.concatenate((V, X_l[i].reshape(1, -1)), axis = 0)
W = np.concatenate((W, X_u[i].reshape(1, -1)), axis = 0)
classId = np.concatenate((classId, [classOfX]))
no_Pats = np.concatenate((no_Pats, [1]))
new_Central_Sample = (X_l[i] + X_u[i]) / 2
centroid = np.concatenate((centroid, new_Central_Sample.reshape(1, -1)), axis = 0)
return Bunch(lower=V, upper=W, classId=classId, no_pat=no_Pats, centroid=centroid)
def homogeneous_worker_distribution_chunk_by_class(self, chunk_data, dic_current_hyperboxes, nprocs):
"""
Distribute data in the current chunk to each worker according to class labels in turn
INPUT
chunk_data a dictionary contains input data with key being label and value being respective bunch data (properties: data, label)
dic_current_hyperboxes a dictionary contains current coordinates of hyperboxes with labels as keys and values being a list of nprocs bunches of hyperboxes
nprocs number of processes needs to be generated
OUTPUT
dic_results a dictionary contains new coordinates of hyperboxes with labels as keys and values being a list of nprocs bunches of hyperboxe
"""
dic_results = dic_current_hyperboxes
with ProcessPoolExecutor(max_workers=nprocs) as executor:
for key in chunk_data:
futures = []
# get list of current hyperboxes or initialize empty list if not exist list or input key
if len(dic_current_hyperboxes) > 0 and (key in dic_current_hyperboxes):
boxes = dic_current_hyperboxes[key]
else:
boxes = np.empty(nprocs, dtype=Bunch)
for j in range(nprocs):
boxes[j] = Bunch(lower=np.array([]), upper=np.array([]), classId=np.array([]), no_pat=0, centroid=np.array([]))
values = chunk_data[key]
num_samples = len(values.data)
if num_samples >= nprocs:
chunksize = int(math.ceil(num_samples / float(nprocs)))
for i in range(nprocs):
X_l = values.data[(chunksize * i) : (chunksize * (i + 1))]
X_u = values.data[(chunksize * i) : (chunksize * (i + 1))]
patClassId = values.label[(chunksize * i) : (chunksize * (i + 1))]
futures.append(executor.submit(self.homogeneous_hyperbox_expansion, X_l, X_u, patClassId, boxes[i]))
else:
futures.append(executor.submit(self.homogeneous_hyperbox_expansion, values, boxes[0]))
# Instruct workers to process results as they come, when all are completed
as_completed(futures) # wait all workers completed
lst_current_boxes = []
for future in futures:
lst_current_boxes.append(future.result())
dic_results[key] = lst_current_boxes
return dic_results
def heterogeneous_worker_distribution_chunk(self, lst_chunk_data, lst_current_hyperboxes, nprocs):
"""
Distribute data in the current chunk to each worker according to the order of patterns
INPUT
lst_chunk_data a list contains input data with key being label and value being respective bunch data (properties: data, label)
lst_current_hyperboxes a list contains current coordinates of hyperboxes (the number of hyperboxes is respective to the number of init cores)
nprocs number of processes needs to be generated
OUTPUT
lst_result a list of newly generated coordinates of hyperboxes
"""
lst_results = []
futures = []
if len(lst_current_hyperboxes) == 0:
lst_current_hyperboxes = np.empty(nprocs, dtype=Bunch)
for j in range(nprocs):
lst_current_hyperboxes[j] = Bunch(lower=np.array([]), upper=np.array([]), classId= | np.array([]) | numpy.array |
import numpy as np
import scipy.sparse as sp
from tqdm import tqdm
from recsys.mf.core import CoreMF
class BPR(CoreMF):
def __init__(self, iterations, factors, learning_rate, alpha, seed):
super().__init__(iterations, factors, learning_rate, alpha, seed=seed, beta=0, calculate_loss=False)
self.positives = {}
self.negatives = {}
def negative_choice(self, user):
return np.random.choice(self.negatives[user])
def fit(self, user_to_item: sp.csr_matrix):
self.__fit_preparation__(user_to_item)
implicit_values = user_to_item.toarray()
n_users, n_items = user_to_item.shape
items_range = np.arange(n_items)
users_range = | np.unique(self.user_indices) | numpy.unique |
# import modules
import os
import numpy as np
import torch, torchvision
import torch.nn as nn
import torch.nn.functional as F
import subprocess
from collections import defaultdict, Counter
from tqdm import tqdm
import math
# import mmcv functionalities
from mmdet.models import build_detector
from mmcv.runner import load_checkpoint
from mmcv.parallel import MMDataParallel, collate, scatter
from mmdet.apis import single_gpu_test, train_detector, init_detector, inference_detector, show_result_pyplot
from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor)
from mmdet.datasets.dataset_wrappers import (ConcatDataset, RepeatDataset, ClassBalancedDataset)
from mmcv.ops import RoIPool
from mmdet.core import get_classes, bbox2roi, bbox_mapping, merge_aug_bboxes, merge_aug_masks, multiclass_nms
from mmdet.datasets.pipelines import Compose
#---------------------------------------------------------------------------#
#----------- Custom function to load dataset and return class --------------#
#---------------- wise object and image level statistics -------------------#
#---------------------------------------------------------------------------#
def get_class_statistics(dataset, indices):
class_objects = {} # define empty dict to hold class wise ground truths
for i in range(len(dataset.CLASSES)):
class_objects[i] = list()
for i in indices:
img_data, index = dataset[i]
gt_labels = img_data['gt_labels'].data.numpy()
for label in gt_labels:
class_objects[label].append(index)
#------------ print statistics -------------#
print("Class".ljust(10), "No. of objects".ljust(3), "No. of images")
print("-"*40)
for key, val in class_objects.items():
print(dataset.CLASSES[key].ljust(15), str(len(val)).ljust(15), len(set(val)))
return class_objects
#---------------------------------------------------------------------------#
#--------------- Custom function to create Class Imbalance -----------------#
#---------------------------------------------------------------------------#
def create_custom_dataset(fullSet, all_indices, rare_class_budget, unrare_class_budget, imbalanced_classes, all_classes):
labelled_budget = {}
labelled_indices, unlabelled_indices = list(), list()
exhausted_rare_classes = set()
# initialize budget for rare and unrare class from the split_config file
for i in range(len(fullSet.CLASSES)):
if i in imbalanced_classes:
labelled_budget[i] = rare_class_budget
else:
labelled_budget[i] = unrare_class_budget
# iterate through whole dataset to select images class wise
for i in all_indices:
img_data, index = fullSet[i]
#print(img_data)
gt_labels = img_data['gt_labels'].data.numpy()
# skip image if it does not contain classes with budget left
if exhausted_rare_classes & set(gt_labels) or not (all_classes & set(gt_labels)):
continue
# else add image to the labelled pool and decrease budget class wise
for label, no_of_objects in Counter(gt_labels).items():
labelled_budget[label] -= no_of_objects # decrease budget
if label in all_classes and labelled_budget[label] <= 0: # budget exhausted
#print(fullSet.CLASSES[label]," class exhausted...")
all_classes.remove(label)
if label in imbalanced_classes: # if rare class
#print("added to rare class list")
exhausted_rare_classes.add(label) # add to exhausted list of rare_classes
labelled_indices.append(index) # add image to labelled pool
if not len(all_classes): # if budget exceeded for all the classes, stop & return dataset
#print("\nall class budget exhausted...")
break
# remove labelled indices from the full list
labelled_indices = np.asarray(labelled_indices)
unlabelled_indices = np.setdiff1d(all_indices, labelled_indices)
# print dataset statistics
stats = get_class_statistics(fullSet, labelled_indices)
return labelled_indices, unlabelled_indices
#---------------------------------------------------------------------------#
#------------ Custom function to extract proposals from images -------------#
#---------------------------------------------------------------------------#
def extract_proposal_features(model, features, img_metas):
assert model.with_bbox, 'Bbox head must be implemented.'
proposal_list = model.rpn_head.simple_test_rpn(features, img_metas)
return proposal_list
#---------------------------------------------------------------------------#
#---------------- Custom function to extract resized features --------------#
#------------------ from different layers after RoI Pooling ----------------#
#---------------------------------------------------------------------------#
def get_RoI_features(model, features, proposals, with_shared_fcs=False, only_cls_scores=False):
""" Extract features from either the RoI pooling layers or shared Fully-connected layers
or directly return the class_scores from the final class predictor itself.
Args:
model (nn.Module): The loaded detector.
features (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor.
proposals (list[Tensor]): Either predicted proposals from RPN layers (unlabelled images)
or transformed proposals from ground truth bounding boxes (query set).
with_shared_fcs (Bool): if True, return the features from shared FC layer; default is False
only_cls_scores (Bool): if True, return the class_scores from the final predictor; default is False
Returns:
(List[Tensor]) : If 'only_cls_scores' flag is set, class_scores from the final predictor for each
proposal will be returned, otherwise return the feature maps after flattening out.
"""
device = next(model.parameters()).device # model device
rois = bbox2roi(proposals).to(device=device) # convert proposals to Region of Interests
bbox_feats = model.roi_head.bbox_roi_extractor(
features[:model.roi_head.bbox_roi_extractor.num_inputs], rois)
if model.roi_head.with_shared_head:
bbox_feats = model.roi_head.shared_head(bbox_feats)
#print("Features shape from RoI Pooling Layer: ",bbox_feats.shape) # [no_of_proposals, 256, 7, 7]
x = bbox_feats.flatten(1) # flatten the RoI Pooling features
if with_shared_fcs or only_cls_scores: # extract flattened features from shared FC layers
for fc in model.roi_head.bbox_head.shared_fcs:
x = model.roi_head.bbox_head.relu(fc(x))
if only_cls_scores: # if cls_scores flag is set
cls_scores = model.roi_head.bbox_head.fc_cls(x) if model.roi_head.bbox_head.with_cls else None
return cls_scores # return class scores from the final class predictors
# else return output from the shared_fc layers
return x
# else return features from the RoI pooling layer
return bbox_feats
#---------------------------------------------------------------------------#
#-------- Define custom Uncertainty Score function : Score each image ------#
#-------- on the basis of Entropy and select the images with topmost -------#
#-------- Uncertainty scores for Next round of Uncertainty Sampling --------#
#---------------------------------------------------------------------------#
def get_uncertainty_scores(model, img_loader, no_of_imgs, imb_classes=None):
uncertainty_scores = torch.zeros(no_of_imgs)
device = next(model.parameters()).device # model device
if imb_classes is not None:
print('using imbalanced classes ', imb_classes)
for i, data_batch in enumerate(tqdm(img_loader)): # for each batch
# split the dataloader output into image_data and dataset indices
img_data, indices = data_batch[0], data_batch[1].numpy()
imgs, img_metas = img_data['img'].data[0].to(device=device), img_data['img_metas'].data[0]
# extract image features from backbone + FPN neck
with torch.no_grad():
features = model.extract_feat(imgs)
# get batch proposals from RPN Head and extract class scores from RoI Head
batch_proposals = extract_proposal_features(model, features, img_metas)
batch_cls_scores = get_RoI_features(model, features, batch_proposals, only_cls_scores=True)
# normalize class_scores for each image to range between (0,1) which indicates
# probability whether an object of that class has a bounding box centered there
batch_cls_scores = batch_cls_scores.softmax(-1)
# calculate class_entropies from the class probabilities
# formula : entropy(p) = -[(p * logp) + {(1-p) * log(1-p)}] => (-p * logp) + {p * log(1-p)} - log(1-p)
logp = torch.log2(batch_cls_scores)
negp = torch.neg(batch_cls_scores)
logOneMinusP = torch.log2(torch.add(negp, 1))
batch_cls_scores = torch.add((negp * logp), torch.sub((batch_cls_scores * logOneMinusP),logOneMinusP))
# split class_entropies as per no. of proposals in each image within batch
num_proposals_per_img = tuple(len(p) for p in batch_proposals)
batch_cls_scores = batch_cls_scores.split(num_proposals_per_img, 0)
# for each image, take the max of class_entropies per proposal and aggregate over all proposals (average-max)
for j, img_cls_scores in enumerate(batch_cls_scores):
if imb_classes is not None: # use imbalanced class scores only for uncertainty score calculation
imb_scores = torch.zeros(len(imb_classes))
for k, imb_cls in enumerate(imb_classes):
imb_scores[k] = torch.mean(img_cls_scores[:, imb_cls]) # average of each imb class over all proposals
final_score = torch.max(imb_scores) # take max over all imb class averages
else: # use all class scores for uncertainty score calculation
max_scores_per_proposal, _ = torch.max(img_cls_scores, dim=1) # take max of all class scores per proposal
final_score = torch.mean(max_scores_per_proposal,dim=0) # average over all proposals (avg-max implement)
# store final uncertainty score for current image
uncertainty_scores[indices[j]] = round(final_score.item(), 4)
return uncertainty_scores
#---------------------------------------------------------------------------#
#------ Custom function to extract RoI features from Unlabelled set --------#
#---------------------------------------------------------------------------#
def get_unlabelled_RoI_features(model, unlabelled_loader, feature_type):
device = next(model.parameters()).device # model device
unlabelled_indices = list()
unlabeled_features = []
if(feature_type == "fc"):
fc_features = True
for i, data_batch in enumerate(tqdm(unlabelled_loader)): # for each batch
# split the dataloader output into image_data and dataset indices
img_data, indices = data_batch[0], data_batch[1].numpy()
imgs, img_metas = img_data['img'].data[0].to(device=device), img_data['img_metas'].data[0]
# extract image features from backbone + FPN neck
with torch.no_grad():
features = model.extract_feat(imgs)
# get batch proposals from RPN Head and extract class scores from RoI Head
batch_proposals = extract_proposal_features(model, features, img_metas)
batch_roi_features = get_RoI_features(model, features, batch_proposals, with_shared_fcs=fc_features)
num_proposals_per_img = tuple(len(p) for p in batch_proposals)
batch_roi_features = batch_roi_features.split(num_proposals_per_img, 0)
for j, img_roi_features in enumerate(batch_roi_features):
# print(indices[j], img_roi_features.shape)
unlabelled_indices.append(indices[j]) # add image index to list
xf = img_roi_features.detach().cpu().numpy()
unlabeled_features.append(xf)
# xf = np.expand_dims(xf, axis=0)
# if(len(unlabeled_features.shape)==1):
# unlabeled_features = xf
# else:
# unlabeled_features = np.vstack((unlabeled_features, xf))
unlabeled_features = np.stack(unlabeled_features, axis=0)
return unlabeled_features, unlabelled_indices
#---------------------------------------------------------------------------#
#-------------- Custom function to Select Top-K Proposals ------------------#
#---------------------------------------------------------------------------#
def select_top_k_proposals(fg_cls_scores, fg_classes_with_max_score, fg_classes, proposal_budget):
# get the indices in order which sorts the foreground class proposals scores in descending order
max_score_order = torch.argsort(fg_cls_scores, descending=True).tolist()
selected_prop_indices = list()
# loop through until proposal budget is exhausted
while proposal_budget:
cls_budget, per_cls_budget, next_round_max_score_order = dict(), (proposal_budget // len(fg_classes)) + 1, list()
# assign budget to each foreground class
for cls in fg_classes:
cls_budget[cls.item()] = per_cls_budget
# loop through the ordered list
for idx in max_score_order:
curr_class = fg_classes_with_max_score[idx].item()
if cls_budget[curr_class]: # if budget permits
selected_prop_indices.append(idx) # add index to selection list
cls_budget[curr_class] -= 1 # reduce class budget
proposal_budget -= 1 # reduce proposal budget
if not proposal_budget: # stop if proposal budget exhausted
break
else:
next_round_max_score_order.append(idx)
# limit the order_list to indices not chosen in current iteration
max_score_order = next_round_max_score_order
return selected_prop_indices
#---------------------------------------------------------------------------#
#---------------- Custom function to extract RoI features ------------------#
#---------------- from Unlabelled set with Top-K Proposals -----------------#
#---------------------------------------------------------------------------#
def get_unlabelled_top_k_RoI_features(model, unlabelled_loader, proposal_budget, feature_type):
device = next(model.parameters()).device # model device
unlabelled_indices = list()
unlabelled_roi_features = list()
if(feature_type == "fc"):
fc_features = True
for i, data_batch in enumerate(tqdm(unlabelled_loader)): # for each batch
# split the dataloader output into image_data and dataset indices
img_data, img_indices = data_batch[0], data_batch[1].numpy()
imgs, img_metas = img_data['img'].data[0].to(device=device), img_data['img_metas'].data[0]
# extract image features from backbone + FPN neck
with torch.no_grad():
features = model.extract_feat(imgs)
# get batch proposals from RPN Head and extract class scores from RoI Head
batch_proposals = extract_proposal_features(model, features, img_metas)
batch_roi_features = get_RoI_features(model, features, batch_proposals, with_shared_fcs=True)
batch_cls_scores = get_RoI_features(model, features, batch_proposals, only_cls_scores=True)
# normalize class_scores for each image to range between (0,1) which indicates
# probability whether an object of that class has a bounding box centered there
batch_cls_scores = batch_cls_scores.softmax(-1)
# split features and cls_scores
num_proposals_per_img = tuple(len(p) for p in batch_proposals)
batch_cls_scores = batch_cls_scores.split(num_proposals_per_img, 0)
batch_roi_features = batch_roi_features.split(num_proposals_per_img, 0)
# for each image, select the top-k proposals where k = proposal_budget
for j, img_cls_scores in enumerate(batch_cls_scores):
img_roi_features = batch_roi_features[j]
max_score_per_proposal, max_score_classes = torch.max(img_cls_scores, dim=1) # take max of all class scores per proposal
classes, indices, counts = torch.unique(max_score_classes, return_inverse=True, return_counts=True)
bg_class_index, bg_count, num_proposals = len(classes) - 1, counts[-1], len(indices)
fg_indices = indices != bg_class_index
#print(classes, indices, counts)
fg_img_cls_scores = max_score_per_proposal[fg_indices]
fg_classes_with_max_score = max_score_classes[fg_indices]
fg_img_roi_features = img_roi_features[fg_indices]
#print(fg_img_roi_features.shape)
if bg_count > num_proposals - proposal_budget: # no. of foreground proposals < proposal_budget
#print("augment some background imgs")
bg_indices = indices == bg_class_index
bg_img_roi_features = img_roi_features[bg_indices][:bg_count - num_proposals + proposal_budget]
selected_roi_features = torch.cat((fg_img_roi_features, bg_img_roi_features)).detach().cpu().numpy()
del bg_indices, bg_img_roi_features
elif bg_count == num_proposals - proposal_budget: # no. of foreground proposals = proposal_budget
#print("no need to augment or select")
selected_roi_features = fg_img_roi_features.detach().cpu().numpy()
else: # no. of foreground proposals > proposal_budget
#print("select from foreground imgs")
top_k_indices = select_top_k_proposals(fg_img_cls_scores, fg_classes_with_max_score, classes[:-1], proposal_budget)
#print(fg_classes_with_max_score[top_k_indices])
selected_roi_features = fg_img_roi_features[top_k_indices].detach().cpu().numpy()
# append to unlebelled_roi_features list
unlabelled_roi_features.append(selected_roi_features)
unlabelled_indices.append(img_indices[j]) # add image index to list
# free up gpu_memory
del max_score_per_proposal, max_score_classes, classes, indices, counts, bg_class_index, bg_count, num_proposals,fg_indices, fg_img_cls_scores, fg_classes_with_max_score, fg_img_roi_features
unlabelled_features = np.stack(unlabelled_roi_features, axis=0)
return unlabelled_features, unlabelled_indices
#---------------------------------------------------------------------------#
#--------- Custom function to extract RoI features from Query set ----------#
#---------------------------------------------------------------------------#
def get_query_RoI_features(model, query_loader, imbalanced_classes, feature_type):
device = next(model.parameters()).device # model device
query_indices = list()
query_features = []
if(feature_type == "fc"):
fc_features = True
for i, data_batch in enumerate(tqdm(query_loader)): # for each batch
# split the dataloader output into image_data and dataset indices
img_data, indices = data_batch[0], data_batch[1].numpy()
imgs, img_metas = img_data['img'].data[0].to(device=device), img_data['img_metas'].data[0]
batch_gt_bboxes = img_data['gt_bboxes'].data[0] # extract gt_bboxes from data batch
batch_gt_labels = img_data['gt_labels'].data[0] # extract gt_labels from data batch
gt_bboxes, gt_labels = list(), list()
# filter only the imbalanced class bboxes and labels
for img_gt_bboxes, img_gt_labels in zip(batch_gt_bboxes, batch_gt_labels):
#print(img_gt_bboxes, img_gt_labels)
imb_cls_indices = torch.zeros(len(img_gt_labels), dtype=torch.bool)
for imb_class in imbalanced_classes:
imb_cls_indices = (imb_cls_indices | torch.eq(img_gt_labels, imb_class))
#print('rare class:',img_gt_labels[imb_cls_indices], img_gt_bboxes[imb_cls_indices])
gt_bboxes.append(img_gt_bboxes[imb_cls_indices])
gt_labels.append(img_gt_labels[imb_cls_indices])
num_gts_per_img = tuple(len(p) for p in gt_bboxes) # store how many bboxes per img
#print(num_gts_per_img)
#print(gt_bboxes, gt_labels)
gt_bboxes = torch.cat(gt_bboxes) # stack all bboxes across batch of imgs
gt_labels = torch.cat(gt_labels) # stack all labels across batch of imgs
#print(gt_bboxes, gt_labels)
# append confidence score of 1.0 to each gt_bboxes
batch_proposals = torch.cat((gt_bboxes, torch.ones(gt_bboxes.shape[0], 1)), 1)
# return batch proposals to original shape as were in batch
batch_proposals = batch_proposals.split(num_gts_per_img, 0)
# extract image features from backbone + FPN neck
with torch.no_grad():
features = model.extract_feat(imgs)
batch_roi_features = get_RoI_features(model, features, batch_proposals, with_shared_fcs=fc_features)
batch_roi_features = batch_roi_features.split(num_gts_per_img, 0)
for j, img_roi_features in enumerate(batch_roi_features):
#print(indices[j], img_roi_features.shape)
query_indices.append(indices[j]) # add image index to list
xf = img_roi_features.detach().cpu().numpy()
query_features.append(xf)
# query_features = np.stack(query_features, axis=0)
return query_features, query_indices
#---------------------------------------------------------------------------#
#------- Custom function to prepare Validation set from labelled set -------#
#---------------------------------------------------------------------------#
def prepare_val_file(trn_dataset, indices, filename_07='trainval_07.txt', filename_12='trainval_12.txt', strat_dir='.'):
trnval_07_file = open(os.path.join(strat_dir, filename_07), 'w')
trnval_12_file = open(os.path.join(strat_dir,filename_12), 'w')
for i, index in enumerate(indices):
img_prefix = trn_dataset[index][0]['img_metas'].data['filename'].split('/')[2]
img_name = trn_dataset[index][0]['img_metas'].data['filename'].split('/')[-1].split('.')[0]
if img_prefix == 'VOC2007':
trnval_07_file.write(img_name + '\n')
else:
trnval_12_file.write(img_name + '\n')
trnval_07_file.close()
trnval_12_file.close()
return [trnval_07_file.name, trnval_12_file.name]
#---------------------------------------------------------------------------#
#----------- Custom function for Query-Query kernel computation ------------#
#---------------------------------------------------------------------------#
def compute_queryQuery_kernel(query_dataset_feat):
query_query_sim = []
for i in range(len(query_dataset_feat)):
query_row_sim = []
for j in range(len(query_dataset_feat)):
query_feat_i = query_dataset_feat[i] #(num_proposals, num_features)
query_feat_j = query_dataset_feat[j]
query_feat_i = l2_normalize(query_feat_i)
query_feat_j = l2_normalize(query_feat_j)
dotp = np.tensordot(query_feat_i, query_feat_j, axes=([1],[1])) #compute the dot product along the feature dimension, i.e between every GT bbox of rare class in the query image
max_match_queryGt_queryGt = | np.amax(dotp, axis=(0,1)) | numpy.amax |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pathlib
from collections import OrderedDict
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import pandas as pd
import scipy.stats
import math
import matplotlib.pyplot as plt
from baselines.common.schedules import LinearSchedule
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
tf.enable_eager_execution()
tf.keras.backend.set_floatx('float32')
# Gaussian Noise Class (Diagonal Covariance Matrix)
class GaussActionNoise:
def __init__(self, mean, std_deviation, dim = 2):
self.mean = mean
self.std_dev = std_deviation
self.dim = dim
def __call__(self):
x = np.random.normal(self.mean, self.std_dev, self.dim)
return x
# Parent Buffer class
class Buffer:
def __init__(self, buffer_capacity=100000, batch_size=64, prioritized_replay_eps=1e-6):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
self.prioritized_replay_eps = prioritized_replay_eps
def __len__(self):
return len(self.buffer)
# Takes (s,a,r,s') obervation tuple as input
def record(self, obs, action, rew, new_obs, done):
self.buffer.add(obs, action, rew, new_obs, float(done))
def learn(self, beta):
experience = self.buffer.sample(self.batch_size, beta = beta)
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
rewards = tf.expand_dims(rewards, 1)
update_metrics = self.update(obses_t, actions, rewards, obses_tp1, dones, weights.astype(np.float32))
td_errors = update_metrics[0]
# update priorities
new_priorities = np.abs(td_errors) + self.prioritized_replay_eps
self.buffer.update_priorities(batch_idxes, new_priorities)
return update_metrics
# Q(s,a) Buffer
class QsaBuffer(Buffer):
def __init__(self, buffer_capacity=100000, batch_size=64, alpha = 0.6):
super(QsaBuffer, self).__init__(buffer_capacity, batch_size, prioritized_replay_eps = 1e-6)
self.buffer = PrioritizedReplayBuffer(self.buffer_capacity, alpha = alpha)
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch, dones_batch, impt_weights_batch
):
with tf.GradientTape() as tape:
target_actions = mm_target_actor(next_state_batch, training=True)
y = reward_batch + gamma * mm_target_qsa(
[next_state_batch, target_actions], training=True
)
qsa_value = mm_qsa([state_batch, action_batch], training=True)
td_errors = y - qsa_value
qsa_loss = tf.math.reduce_mean(impt_weights_batch * tf.math.square(td_errors))
qsa_grad = tape.gradient(qsa_loss, mm_qsa.trainable_variables)
mm_qsa_optimizer.apply_gradients(
zip(qsa_grad, mm_qsa.trainable_variables)
)
qsa_grad_list = []
for grad in qsa_grad:
qsa_grad_list.append(tf.math.reduce_mean(tf.abs(grad)))
return td_errors, qsa_loss, tf.math.reduce_mean(qsa_grad_list)
# MM Actor Buffer Class
class ActorBuffer(Buffer):
def __init__(self, buffer_capacity=100000, batch_size=64):
super(ActorBuffer, self).__init__(buffer_capacity, batch_size)
self.buffer = ReplayBuffer(self.buffer_capacity)
# Eager execution is turned on by default in TensorFlow 2. Decorating with tf.function allows
# TensorFlow to build a static graph out of the logic and computations in our function.
# This provides a large speed up for blocks of code that contain many small TensorFlow operations such as this one.
@tf.function
def update(
self, state_batch, action_batch, reward_batch, next_state_batch,
):
with tf.GradientTape() as tape:
actions = mm_actor(state_batch, training = True)
nextState_Mean = state_batch \
+ time_delta / cap * (actions * cap_air * (temp_air - state_batch)) * is_room \
+ (tf.linalg.matmul(state_batch, tf.cast(adj_matrix / r_wall_matrix, tf.float32)) \
- tf.math.multiply(state_batch, tf.cast(tf.math.reduce_sum(adj_matrix / r_wall_matrix, 1), tf.float32))) \
+ adj_out * temp_out_mean / r_outside \
- adj_out * state_batch / r_outside \
+ adj_hall * temp_hall_mean / r_hall \
- adj_hall * state_batch / r_hall
ndim = state_batch.get_shape().as_list()[1]
diffMatrix = next_state_batch - nextState_Mean
prob_nextState = tf.math.exp(-0.5 * tf.reduce_sum(tf.matmul(diffMatrix, tf.cast(tf.linalg.inv(gaussian_cov), tf.float32)) * diffMatrix, 1)) / tf.math.sqrt((2 * math.pi)**ndim * tf.cast(tf.linalg.det(gaussian_cov), tf.float32))
prob_nextState /= max_pdf # Divide by maximum possible pdf (normalize to below 1)
prob_nextState += 1e-12 # Small value tolerance (avoid pdf being 0 due to limited accuracy)
#next_state_actions = tf.dtypes.cast(actor_model(next_state_batch, training = True), tf.float64)
#next_state_actions = tf.dtypes.cast(target_actor(next_state_batch, training = True), tf.float64)
V_currState = mm_qsa([state_batch, actions], training=True)
next_state_actions = mm_lag_actor(next_state_batch, training = True)
V_nextState = mm_qsa([next_state_batch, next_state_actions], training=True)
out_of_range_bool = (state_batch < temp_low_vec) | (state_batch > temp_up_vec)
reward_sa = -tf.math.reduce_sum(is_room * (actions * cost_air_var \
+ tf.cast(out_of_range_bool, tf.float32) * (penalty_var) \
+ 10.0 * tf.math.abs((temp_up_vec + temp_low_vec) / 2.0 - state_batch)),
axis = 1)
#actor_loss = -tf.math.reduce_mean(gamma * V_nextState * tf.math.log(prob_nextState))
actor_loss = -tf.math.reduce_mean(reward_sa + gamma * (V_nextState - V_currState) * tf.math.log(prob_nextState))
actor_grad = tape.gradient(actor_loss, mm_actor.trainable_variables)
mm_actor_optimizer.apply_gradients(
zip(actor_grad, mm_actor.trainable_variables)
)
actor_grad_list = []
for grad in actor_grad:
actor_grad_list.append(tf.math.reduce_mean(tf.abs(grad)))
return actor_loss, tf.math.reduce_mean(actor_grad_list)
# For Actor buffer
def learn(self):
obses_t, actions, rewards, obses_tp1, dones = self.buffer.sample(self.batch_size)
rewards = tf.expand_dims(rewards, 1)
update_metrics = self.update(obses_t, actions, rewards, obses_tp1)
return update_metrics
# Actor Network Architecture
def get_actor():
# Initialize weights between -3e-3 and 3-e3
last_init = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
inputs = layers.Input(shape=(num_states,))
out = layers.Dense(2048, activation = "relu")(inputs)
out = layers.LayerNormalization()(out)
outputs = layers.Dense(num_actions, activation="sigmoid",
kernel_initializer=last_init)(out)
outputs = outputs * air_max_vec
model = tf.keras.Model(inputs, outputs)
return model
# Qsa Network Architecture
def get_qsa():
last_init = tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3)
# State as input
state_input = layers.Input(shape=(num_states))
state_out = layers.Dense(32, activation = "relu")(state_input)
state_out = layers.LayerNormalization()(state_out)
# Action as input
action_input = layers.Input(shape=(num_actions))
action_out = layers.Dense(32, activation = "relu")(action_input)
action_out = layers.LayerNormalization()(action_out)
# Both are passed through seperate layer before concatenating
concat = layers.Concatenate()([state_out, action_out])
out = layers.Dense(2048, activation = "relu")(concat)
out = layers.LayerNormalization()(out)
outputs = layers.Dense(1,
activation="relu",
kernel_initializer=last_init)(out)
# Try RELU output to make it +ve (QMix)
outputs = outputs * -1.0
# Outputs single value for give state-action
model = tf.keras.Model([state_input, action_input], outputs)
return model
# Policy Function
def policy(actor_model, state, noise_object, t):
sampled_actions = tf.squeeze(actor_model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + (noise * air_max_vec)
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, 0, air_max_vec)
return np.squeeze(legal_action)
# Calculate Cumulative Discounted Rewards
def calcDiscRewards(traj_rewards, gamma):
i, total_reward = 0, 0
for r in traj_rewards:
total_reward += ((gamma**i) * r)
i += 1
return total_reward
# This updates target parameters slowly
# Based on rate `tau`, which is much less than one.
@tf.function
def update_target(target_weights, weights, tau):
for (a, b) in zip(target_weights, weights):
a.assign(b * tau + a * (1 - tau))
@tf.function
def update_lag_actor(lag_actor_weights, weights):
for (a, b) in zip(lag_actor_weights, weights):
a.assign(b)
def set_start_state(rddl_env, start_state):
# Initialize Environment
state, t = rddl_env.reset()
# Set Start State
state['temp/1'] = np.array(start_state)
env._state['temp/1'] = state['temp/1']
return rddl_env, state, t
def simulate_test(rddl_env, start_state, total_episodes, seed):
# list of trajectories
listTrajectory = []
listTestTrajectories = []
disc_rewards_arr, mean_qsa_loss_arr, mean_actor_loss_arr, mean_qsa_grad_arr, mean_actor_grad_arr = [], [], [], [], []
beta_schedule = LinearSchedule(total_episodes * horizon,
initial_p = 1.0,
final_p = 1.0)
noise_schedule = LinearSchedule(total_episodes * horizon,
initial_p = 0.2,
final_p = 0.0)
t_iter, best_mean_undisc_reward = 0, float('-inf')
for ep in range(total_episodes):
# initialize environament
if ep == 0:
rddl_env, state, t = set_start_state(rddl_env, start_state)
else:
rddl_env, state, t = set_start_state(rddl_env, np.random.uniform([0] * num_states, [30] * num_states))
done = False
# create a trajectory container
trajectory = rddlgym.Trajectory(rddl_env)
qsa_loss_arr, actor_loss_arr, qsa_grad_arr, actor_grad_arr = [], [], [], []
while not done:
curr_state = state['temp/1'].astype(np.float32)
tf_state = tf.expand_dims(tf.convert_to_tensor(curr_state), 0)
action = OrderedDict({'air/1': policy(mm_actor,
tf_state,
GaussActionNoise(mean = 0,
std_deviation = noise_schedule.value(t_iter),
dim = num_actions),
t)})
next_state, reward, done, info = rddl_env.step(action)
reward, nextState = reward.astype(np.float32), next_state['temp/1'].astype(np.float32)
# Reward scaling for HVAC-6, for training only
scaled_reward = reward / np.abs(penalty_var) * 10.0
scaled_reward = scaled_reward.astype(np.float32)
q_buffer.record(curr_state, action['air/1'].astype(np.float32), scaled_reward, nextState, done)
actor_buffer.record(curr_state, action['air/1'].astype(np.float32), scaled_reward, nextState, done)
if len(q_buffer) > q_buffer.batch_size:
td_errors, qsa_loss, ave_qsa_grad = q_buffer.learn(beta = beta_schedule.value(t_iter))
qsa_loss_arr.append(qsa_loss)
qsa_grad_arr.append(ave_qsa_grad)
update_target(mm_target_qsa.variables, mm_qsa.variables, tau)
if (len(actor_buffer) > actor_buffer.batch_size):
actor_loss, ave_actor_grad = actor_buffer.learn()
actor_loss_arr.append(actor_loss)
actor_grad_arr.append(ave_actor_grad)
update_target(mm_target_actor.variables, mm_actor.variables, tau)
update_lag_actor(mm_lag_actor.variables, mm_actor.variables)
trajectory.add_transition(t, state, action, reward, next_state, info, done)
state = next_state
t = rddl_env.timestep
t_iter += 1
disc_rewards = calcDiscRewards(trajectory.as_dataframe().reward, gamma)
disc_rewards_arr.append(disc_rewards)
if len(qsa_loss_arr) == 0:
mean_qsa_loss = None
mean_qsa_loss_arr.append(float('nan'))
else:
mean_qsa_loss = np.mean(qsa_loss_arr)
mean_qsa_loss_arr.append(mean_qsa_loss)
if len(actor_loss_arr) == 0:
mean_actor_loss = None
mean_actor_loss_arr.append(float('nan'))
else:
mean_actor_loss = np.mean(actor_loss_arr)
mean_actor_loss_arr.append(mean_actor_loss)
if len(qsa_grad_arr) == 0:
mean_qsa_grad = None
mean_qsa_grad_arr.append(float('nan'))
else:
mean_qsa_grad = np.mean(qsa_grad_arr)
mean_qsa_grad_arr.append(mean_qsa_grad)
if len(actor_grad_arr) == 0:
mean_actor_grad = None
mean_actor_grad_arr.append(float('nan'))
else:
mean_actor_grad = np.mean(actor_grad_arr)
mean_actor_grad_arr.append(mean_actor_grad)
print("Episode * {} * Total Reward is ==> {}".format(ep, disc_rewards))
print("Qsa loss: {}".format(mean_qsa_loss))
print("Actor loss: {}".format(mean_actor_loss))
print("Average Qsa gradient: {}".format(mean_qsa_grad))
print("Average actor gradient: {}".format(mean_actor_grad))
print()
listTrajectory.append(trajectory.as_dataframe())
if (ep + 1) % test_interval == 0:
l_test_trajs, mean_disc_r, mean_undisc_r = test_actor_loop(folderName + '/' + 'mm_test_log_' + str(ep + 1) + '.csv', env, start_state)
listTestTrajectories.append(l_test_trajs)
if mean_undisc_r > best_mean_undisc_reward:
best_mm_actor.set_weights(mm_actor.get_weights())
best_mm_qsa.set_weights(mm_qsa.get_weights())
best_mean_undisc_reward = mean_undisc_r
return disc_rewards_arr, mean_qsa_loss_arr, mean_actor_loss_arr, mean_qsa_grad_arr, mean_actor_grad_arr, listTrajectory, listTestTrajectories
def test_actor_loop(filename, rddl_env, start_state):
list_traj_df, list_disc_reward, list_undisc_reward = [], [], []
for i in range(test_loops):
# initialize environament
rddl_env, state, t = set_start_state(rddl_env, start_state)
done = False
test_trajectory = rddlgym.Trajectory(rddl_env)
while not done:
curr_state = state['temp/1'].astype(np.float32)
tf_state = tf.expand_dims(tf.convert_to_tensor(curr_state), 0)
action = OrderedDict({'air/1': policy(mm_actor, tf_state, lambda : np.array([0] * num_actions), t)})
next_state, reward, done, info = rddl_env.step(action)
test_trajectory.add_transition(t, state, action, reward, next_state, info, done)
state = next_state
t = rddl_env.timestep
test_log_df = test_trajectory.as_dataframe()
disc_reward = calcDiscRewards(test_log_df.reward, gamma)
test_log_df['Total Discounted Rewards'] = [disc_reward for i in range(test_log_df.shape[0])]
undisc_reward = calcDiscRewards(test_log_df.reward, 1.0)
test_log_df['Total Undiscounted Rewards'] = [undisc_reward for i in range(test_log_df.shape[0])]
list_traj_df.append(test_log_df)
list_disc_reward.append(disc_reward)
list_undisc_reward.append(undisc_reward)
return list_traj_df, np.mean(list_disc_reward), np.mean(list_undisc_reward)
def log_learn(folderName, lDiscRewards, lQsaLoss, lActorLoss, lQsaGrad, lActorGrad):
learn_log_df = pd.DataFrame({'Episode': [i for i in range(len(lDiscRewards))],
'Discounted Rewards': lDiscRewards,
'Qsa Loss': lQsaLoss,
'Actor Loss': lActorLoss,
'Qsa Gradient': lQsaGrad,
'Actor Gradient': lActorGrad})
learn_log_df.to_csv(folderName + 'learn_log.csv', index = False)
def log_trajectories(folderName, lTrainTraj, lListTestTraj):
for i in range(len(lTrainTraj)):
lTrainTraj[i].to_csv(folderName + 'E' + str(i + 1) + '.csv', index = False)
testTrajFolder = folderName + 'test_trajs/'
for i in range(len(lListTestTraj)):
testTraj_subFolder = testTrajFolder + 'E' + str((i + 1) * test_interval) + '/'
pathlib.Path(testTraj_subFolder).mkdir(parents = True, exist_ok = True)
for j in range(len(lListTestTraj[i])):
lListTestTraj[i][j].to_csv(testTraj_subFolder + str(j + 1) + '.csv', index = False)
def plot_graphs(dirName, lQsaLoss, lActorLoss, lQsaGrad, lActorGrad, lTrajList):
numEpisodes = len(lQsaLoss)
plt.figure()
plt.plot(range(numEpisodes), lQsaLoss)
plt.xlabel("Episode")
plt.ylabel("Average Qsa Loss Across Minibatches")
plt.savefig(dirName + 'qsa_loss.png')
plt.close()
# Plot mean_actor_loss_arr
plt.figure()
plt.plot(range(numEpisodes), lActorLoss)
plt.xlabel("Episode")
plt.ylabel("Average Actor Loss Across Minibatches")
plt.savefig(dirName + 'actor_loss.png')
plt.close()
# Plot mean_qsa_grad_arr
plt.figure()
plt.plot(range(numEpisodes), lQsaGrad)
plt.xlabel("Episode")
plt.ylabel("Average Qsa Gradient Across Minibatches")
plt.savefig(dirName + 'qsa_grad.png')
plt.close()
# Plot mean_qsa_loss_arr, mean_actor_loss_arr
plt.figure()
plt.plot(range(numEpisodes), lActorGrad)
plt.xlabel("Episode")
plt.ylabel("Average Actor Gradient Across Minibatches")
plt.savefig(dirName + 'actor_grad.png')
plt.close()
ave_disc_rewards = []
ave_undisc_rewards = []
for TrajList in lTrajList:
ave_disc_rewards.append( | np.mean([df['Total Discounted Rewards'][0] for df in TrajList]) | numpy.mean |
import numpy as np
from PuzzleLib import Config
from PuzzleLib.Backend import gpuarray
from PuzzleLib.Modules.Module import ModuleError
from PuzzleLib.Modules.DeconvND import DeconvND
class Deconv1D(DeconvND):
def __init__(self, inmaps, outmaps, size, stride=1, pad=0, dilation=1, postpad=0, wscale=1.0, useBias=True,
name=None, initscheme=None, empty=False, groups=1):
super().__init__(
2, inmaps, outmaps, (1, size), (1, stride), (0, pad), (1, dilation), (0, postpad), wscale, useBias,
name, initscheme, empty, groups
)
self.registerBlueprint(locals())
def optimizeForShape(self, shape, memlimit=None):
shape = shape[:2] + (1, ) + shape[2:]
super().optimizeForShape(shape, memlimit)
def updateData(self, data):
data = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().updateData(data)
self.data = self.data.reshape(*self.data.shape[:2], *self.data.shape[3:])
def updateGrad(self, grad):
grad = grad.reshape(*grad.shape[:2], 1, *grad.shape[2:])
data = self.inData
self.inData = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().updateGrad(grad)
self.inData = data
self.grad = self.grad.reshape(*self.grad.shape[:2], *self.grad.shape[3:])
def accGradParams(self, grad, scale=1.0, momentum=0.0):
grad = grad.reshape(*grad.shape[:2], 1, *grad.shape[2:])
data = self.inData
self.inData = data.reshape(*data.shape[:2], 1, *data.shape[2:])
super().accGradParams(grad, scale, momentum)
self.inData = data
def checkDataShape(self, shape):
if len(shape) != 3:
raise ModuleError("Data must be 3d tensor")
_, inmaps, _ = shape
if inmaps != self.W.shape[0]:
raise ModuleError("Data has %d maps (expected: %d)" % (inmaps, self.W.shape[0]))
def dataShapeFrom(self, shape):
batchsize, inmaps, insize = shape
_, outmaps, _, fsize = self.W.shape
_, pad = self.pad
_, postpad = self.postpad
_, dilation = self.dilation
_, stride = self.stride
outmaps *= self.groups
outsize = (insize - 1) * stride + dilation * (fsize - 1) - 2 * pad + 1 + postpad
return batchsize, outmaps, outsize
def checkGradShape(self, shape):
if len(shape) != 3:
raise ModuleError("Grad must be 3d tensor")
_, outmaps, size = shape
if outmaps != self.W.shape[1] * self.groups:
raise ModuleError("Grad has %d maps (expected: %d)" % (outmaps, self.W.shape[1] * self.groups))
if size + 2 * self.pad[1] < self.dilation[1] * (self.W.shape[3] - 1) + 1:
raise ModuleError(
"Grad maps height is too small (got %d, expected at least %d)" %
(size + 2 * self.pad[1], self.dilation[1] * (self.W.shape[3] - 1) + 1)
)
def gradShapeFrom(self, shape):
batchsize, outmaps, outsize = shape
inmaps, _, _, fsize = self.W.shape
_, pad = self.pad
_, dilation = self.dilation
_, stride = self.stride
insize = (outsize + 2 * pad - dilation * (fsize - 1) - 1) // stride + 1
return batchsize, inmaps, insize
def unittest():
if Config.backend in {Config.Backend.cuda, Config.Backend.hip}:
multiMapsWithPadsTest()
trainTest()
def multiMapsWithPadsTest():
batchsize, inmaps, size = 5, 4, 2
outmaps, fsize, stride, pad, dilation = 4, 2, 2, 1, 2
hostData = np.random.randn(batchsize, inmaps, size).astype(np.float32)
data = gpuarray.to_gpu(hostData)
deconv = Deconv1D(inmaps, outmaps, size=size, stride=stride, pad=pad, dilation=dilation, initscheme="gaussian")
deconv(data)
hostW, hostBias = deconv.W.get(), deconv.b.get()
hostOutData = np.zeros(deconv.data.shape[:2]+(deconv.data.shape[2]+2*pad, ), dtype=np.float32)
for c in range(outmaps):
hostOutData[:, c, :] = hostBias[0, c, 0, 0]
for b in range(batchsize):
for oc in range(outmaps):
for ic in range(inmaps):
for x in range(size):
for dx in range(fsize):
hostOutData[b, oc, x * stride + dx * dilation] += hostW[ic, oc, 0, dx] * hostData[b, ic, x]
assert np.allclose(hostOutData[:, :, pad:-pad], deconv.data.get())
hostGrad = np.random.randn(*deconv.data.shape).astype(np.float32)
grad = gpuarray.to_gpu(hostGrad)
deconv.backward(grad)
hostExtGrad = np.zeros(grad.shape[:2] + (grad.shape[2] + 2 * pad, ), dtype=np.float32)
hostExtGrad[:, :, pad:-pad] = hostGrad
hostGrad = hostExtGrad
hostInGrad = np.zeros(hostData.shape, dtype=np.float32)
for b in range(batchsize):
for ic in range(inmaps):
for oc in range(outmaps):
for x in range(size):
for dx in range(fsize):
hostInGrad[b, ic, x] += hostGrad[b, oc, x * stride + dx * dilation] * hostW[ic, oc, 0, dx]
assert np.allclose(hostInGrad, deconv.grad.get())
hostWGrad = np.zeros(deconv.getVar("W").grad.shape, dtype=np.float32)
for b in range(batchsize):
for ic in range(inmaps):
for oc in range(outmaps):
for dx in range(fsize):
for x in range(size):
hostWGrad[ic, oc, 0, dx] += hostGrad[b, oc, x * stride + dx * dilation] * hostData[b, ic, x]
assert np.allclose(hostWGrad, deconv.getVar("W").grad.get())
hostBGrad = np.empty(hostBias.shape, dtype=np.float32)
for oc in range(outmaps):
hostBGrad[0, oc, 0, 0] = | np.sum(hostGrad[:, oc, :]) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 16 16:39:02 2022
@author: James
"""
import torch
import torch.nn as nn
from torch.nn import init
from torch.autograd import Variable
from torch.nn.utils import clip_grad_norm_
import numpy as np
import time
import random
from sklearn.metrics import f1_score
import os
from utils import load_cora
from models import Encoder,SupervisedGraphSage,find_feature
if torch.cuda.is_available():
device = torch.device('cuda')
# device=torch.device('cpu')
seed=1
def set_seed(seed):
torch.manual_seed(seed)
| np.random.seed(seed) | numpy.random.seed |
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as pat
import scipy.stats as stats
from models import ConstantVelocity2d
from tracker import PDAF, NNKF
from clutter import PoissonClutter2d
from utils import GaussianNoise, create_ellipse
def parse_arguments():
"""Parse optional arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--tracker', type=str, default='pdaf',
choices=['pdaf', 'nnkf'],
help='type of single target tracker.')
parser.add_argument('--steps', type=int, default=15,
help='number of simulation steps')
parser.add_argument('--PD', type=float, default=0.8,
help='detection probability.')
parser.add_argument('--PG', type=float, default=0.9997,
help='validation gate probability.')
parser.add_argument('--clutter_density', type=float, default=3*0.01)
parser.add_argument('--clutter_range', type=float, default=10)
parser.add_argument('--sigma_w', type=float, default=2,
help='covariance of process noise [m/s^2].')
parser.add_argument('--sigma_v', type=float, default=0.3,
help='covariance of observation noise [m/s^2].')
parser.add_argument('--initial_point', type=float, nargs=4,
default=[-100, 28, 100, -28],
help='[m, m/s, m, m/s].')
parser.add_argument('--initial_covariance', type=float, nargs=4,
default=[1, 10, 1, 10])
parser.add_argument('--dt', type=float, default=0.2,
help='sampling period [sec]')
parser.add_argument('--seed', type=int, default=-1,
help='seed of random variables.')
return parser.parse_args()
def create_tracker(args, target_model):
"""Create a target tracker."""
cov_w = np.diag([args.sigma_w**2, args.sigma_w**2])
cov_v = np.diag([args.sigma_v**2, args.sigma_v**2])
x = np.array(args.initial_point)
cov_x = np.diag(args.initial_covariance)
z = | np.array([x[0], x[2]]) | numpy.array |
"""
python -m unittest pwseqdist/tests/test_distances.py
"""
import sys
import unittest
import numpy as np
from scipy.spatial.distance import squareform
import parasail
import pytest
import pwseqdist as pwsd
mixed_seqs = ['CACADLGAYPDKLIF',
'CACDALLAYTDKLIF',
'CACDAVGDTLDKLIF',
'CACDDVTEVEGDKLIF',
'CACDFISPSNWGIQSGRNTDKLIF',
'CACDILLGDTADKLIF',
'CACDIVLSGGLDTRQMFF',
'CACDLLLRQSSTDKLIF',
'CACDNLSETTDKLIF',
'CACDPLGTDKLIF',
'CACDPMGGSGGLSWDTRQMFF',
'CACDPVLGDTRLTDKLIF',
'CACDPVQGYSGQNRAYTDKLIF',
'CACDSILGDTLYTDKLIF',
'CACDSLTSHTGGFGPDKLIF',
'CACDSTGDLSSWDTRQMFF',
'CACDSVESRNVLGDPTTDKLIF',
'CACDSVLSRDLGDSELIF',
'CACDTAAGGYASSWDTRQMFF',
'CACDTAPHGGRTWDTRQMFF',
'CACDTGGYVNWDTRQMFF',
'CACDTGRLLGDTADTRQMFF',
'CACDTIRGFSSWDTRQMFF',
'CACDTIVAPALDKLIF',
'CACDTLFLGEDTPTDKLIF',
'CACDTLGDLSLTAQLFF',
'CACDTLGDPPHTDKLIF',
'CACDTLGDYTQSDKLIF',
'CACDTLGGYPWDTRQMFF',
'CACDTLGKTDKLIF',
'CACDTLPLKTGGPLYTDKLIF',
'CACDTLRLGDPLNTDKLIF',
'CACDTVALGDTESSWDTRQMFF',
'CACDTVGAVLGDPKGTDKLIF',
'CACDTVGDGPDTDKLIF',
'CACDTVGDTADKLIF',
'CACDTVGDTHSWDTRQMFF',
'CACDTVGGSTDKLIF',
'CACDTVGIPPDKLIF',
'CACDTVGYGEGDTDKLIF',
'CACDTVISSNRRGGDKLIF',
'CACDTVPPGDTGTDKLIF',
'CACDTVRFTGGYENTDKLIF',
'CACDYVLGAEDKLIF',
'CACEGILKSEPLGIDKLIF',
'CACEMLGHPPGDKLIF',
'CACVSLDLSYTDKLIF',
'CALGEIAFRSRTGGPPYTDKLIF',
'CALGTAYFLRDPGADKLIF',
'CAVKVPLTSSPREGPTVLHDKLIF']
seqs = [s[:10] for s in mixed_seqs]
class TestDistances(unittest.TestCase):
def test_haming_metric(self):
self.assertTrue(pwsd.metrics.hamming_distance(seqs[0], seqs[1]) == 4)
self.assertTrue(pwsd.metrics.hamming_distance(seqs[0], seqs[0]) == 0)
def test_subst(self):
subst_dict = pwsd.matrices.dict_from_matrix(parasail.blosum62)
for s1, s2 in zip(seqs[-10:], seqs[:10]):
str_d = pwsd.metrics.str_subst_metric(s1, s2, subst_dict, as_similarity=False, na_penalty=None)
np_d = pwsd.metrics.np_subst_metric(pwsd.matrices.seq2vec(s1),
pwsd.matrices.seq2vec(s2),
parasail.blosum62.matrix, as_similarity=False)
# print('%s\t%s\t%1.0f\t%1.0f' % (s1, s2, str_d, np_d))
self.assertTrue(str_d == np_d)
def test_nw_metric(self):
subst_dict = pwsd.matrices.dict_from_matrix(parasail.blosum62)
nw_d = pwsd.metrics.nw_metric(mixed_seqs[0], mixed_seqs[1], matrix='blosum62', open=3, extend=3)
for s1, s2 in zip(seqs[-10:], seqs[:10]):
nw_d = pwsd.metrics.nw_metric(s1, s2, matrix='blosum62', open=30, extend=30)
str_d = pwsd.metrics.str_subst_metric(s1, s2, subst_dict, as_similarity=False, na_penalty=None)
self.assertTrue(nw_d == str_d)
def test_nw_hamming_metric(self):
subst_dict = pwsd.matrices.dict_from_matrix(parasail.blosum62)
nw_d = pwsd.metrics.nw_hamming_metric(mixed_seqs[0], mixed_seqs[1], matrix='blosum62', open=3, extend=3)
for s1, s2 in zip(seqs[-10:], seqs[:10]):
nw_d = pwsd.metrics.nw_hamming_metric(s1, s2, matrix='blosum62', open=30, extend=30)
str_d = pwsd.metrics.hamming_distance(s1, s2)
# print('%s\t%s\t%1.0f\t%1.0f' % (s1, s2, str_d, nw_d))
self.assertTrue(nw_d == str_d)
class TestApply(unittest.TestCase):
def test_pw_sq(self):
dvec = pwsd.apply_pairwise_sq(seqs[:10], pwsd.metrics.hamming_distance, ncpus=1)
dmat = squareform(dvec)
self.assertTrue(dmat.shape[0] == 10 and dmat.shape[1] == 10)
def test_pw_sq_subst(self):
subst_dict = pwsd.matrices.dict_from_matrix(parasail.blosum62)
dvec = pwsd.apply_pairwise_sq(seqs[:10], pwsd.metrics.str_subst_metric, subst_dict=subst_dict, ncpus=1)
dmat = squareform(dvec)
self.assertTrue(dmat.shape[0] == 10 and dmat.shape[1] == 10)
def test_pw_sq_nonuniq(self):
dvec = pwsd.apply_pairwise_sq(seqs[:10], pwsd.metrics.hamming_distance, ncpus=1)
dmat = squareform(dvec)
dvec2 = pwsd.apply_pairwise_sq(seqs[:10] + seqs[:10], pwsd.metrics.hamming_distance, ncpus=1)
dmat2 = squareform(dvec2)
self.assertTrue( | np.all(dmat2[:10, :][:, :10] == dmat) | numpy.all |
import collections
import copy
import os
import platform
from typing import Dict, Union, List, Any, Optional, Tuple
import numpy as np
import gym
import gym.spaces
from cffi import FFI
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ACTION_KEY = "action"
STATE_NEEDS_RESET = "needs_reset"
STATE_WAIT_ACT = "wait_act"
STATE_WAIT_WAIT = "step_wait"
class CVecEnv:
"""
An environment instance created by an EnvLib, uses the VecEnv interface.
https://github.com/openai/baselines/blob/master/baselines/common/vec_env/__init__.py
Args:
num_envs: number of environments to create
lib_dir: a folder containing either lib{name}.so (Linux), lib{name}.dylib (Mac), or {name}.dll (Windows)
lib_name: name of the library (minus the lib part)
c_func_defs: list of cdefs that are passed to FFI in order to define custom functions that can then be called with env.call_func()
options: options to pass to the libenv_make() call for this environment
debug: if set to True, check array data to make sure it matches the provided spaces
reuse_arrays: reduce allocations by using the same numpy arrays for each reset(), step(), and render() call
"""
def __init__(
self,
num_envs: int,
lib_dir: str,
lib_name: str = "env",
c_func_defs: Optional[List[str]] = None,
options: Optional[Dict] = None,
debug: bool = False,
reuse_arrays: bool = False,
) -> None:
self._debug = debug
self._reuse_arrays = reuse_arrays
if options is None:
options = {}
options = copy.deepcopy(options)
if platform.system() == "Linux":
lib_filename = f"lib{lib_name}.so"
elif platform.system() == "Darwin":
lib_filename = f"lib{lib_name}.dylib"
elif platform.system() == "Windows":
lib_filename = f"{lib_name}.dll"
else:
raise Exception(f"unrecognized platform {platform.system()}")
if c_func_defs is None:
c_func_defs = []
# load cdef for libenv.h
libenv_cdef = ""
with open(os.path.join(SCRIPT_DIR, "libenv.h")) as f:
inside_cdef = False
for line in f:
if line.startswith("// BEGIN_CDEF"):
inside_cdef = True
elif line.startswith("// END_CDEF"):
inside_cdef = False
elif line.startswith("#if") or line.startswith("#endif"):
continue
if inside_cdef:
line = line.replace("LIBENV_API", "")
libenv_cdef += line
self._ffi = FFI()
self._ffi.cdef(libenv_cdef)
for cdef in c_func_defs:
self._ffi.cdef(cdef)
self._lib_path = os.path.join(lib_dir, lib_filename)
assert os.path.exists(self._lib_path), f"lib not found at {self._lib_path}"
# unclear if this is necessary, but nice to not have symbols conflict if possible
dlopen_flags = (
self._ffi.RTLD_NOW | self._ffi.RTLD_LOCAL # pylint: disable=no-member
)
if platform.system() == "Linux":
dlopen_flags |= self._ffi.RTLD_DEEPBIND # pylint: disable=no-member
self._c_lib = self._ffi.dlopen(name=self._lib_path, flags=dlopen_flags)
# dlclose will be called automatically when the library goes out of scope
# https://cffi.readthedocs.io/en/latest/cdef.html#ffi-dlopen-loading-libraries-in-abi-mode
# on mac os x, the library may not always be unloaded when you expect
# https://developer.apple.com/videos/play/wwdc2017/413/?time=1776
# loading/unloading the library all the time can be slow
# it may be useful to keep a reference to an environment (and thus the c_lib object)
# to avoid this happening
self._options = options
self._state = STATE_NEEDS_RESET
c_options, self._options_keepalives = self._convert_options(
self._ffi, self._c_lib, options
)
self._c_env = self._c_lib.libenv_make(num_envs, c_options[0])
self.reward_range = (float("-inf"), float("inf"))
self.spec = None
self.num_envs = num_envs
self.observation_space = self._get_spaces(self._c_lib.LIBENV_SPACES_OBSERVATION)
self._action_space = self._get_spaces(self._c_lib.LIBENV_SPACES_ACTION)
self._info_space = self._get_spaces(self._c_lib.LIBENV_SPACES_INFO)
self._render_space = self._get_spaces(self._c_lib.LIBENV_SPACES_RENDER)
# allocate buffers
self._observations, self._observation_buffers = self._allocate_dict_space(
self.num_envs, self.observation_space
)
# we only use dict spaces for consistency, but action is always a single space
# the private version is the dict space, while the public version is a single space
assert len(self._action_space.spaces) == 1, "action space can only be 1 element"
assert list(self._action_space.spaces.keys())[0] == ACTION_KEY
self.action_space = self._action_space.spaces[ACTION_KEY]
dict_actions, self._action_buffers = self._allocate_dict_space(
self.num_envs, self._action_space
)
self._actions = dict_actions[ACTION_KEY]
self._renders, self._renders_buffers = self._allocate_dict_space(
self.num_envs, self._render_space
)
self.metadata = {"render.modes": list(self._render_space.spaces.keys())}
self._infos, self._infos_buffers = self._allocate_dict_space(
self.num_envs, self._info_space
)
self._rews, self._rews_buffer = self._allocate_array(
self.num_envs, np.dtype("float32")
)
self._dones, self._dones_buffer = self._allocate_array(
self.num_envs, np.dtype("bool")
)
assert np.dtype("bool").itemsize == 1
c_step = self._ffi.new("struct libenv_step *")
c_step.obs = self._observation_buffers
# cast the pointer to the buffer to avoid a warning from cffi
c_step.rews = self._ffi.cast(
self._ffi.typeof(c_step.rews).cname, self._rews_buffer
)
c_step.dones = self._ffi.cast(
self._ffi.typeof(c_step.dones).cname, self._dones_buffer
)
c_step.infos = self._infos_buffers
self._c_step = c_step
self.closed = False
self.viewer = None
def __repr__(self):
return f"<CVecEnv lib_path={self._lib_path} options={self._options}>"
def _numpy_aligned(self, shape, dtype, align=64):
"""
Allocate an aligned numpy array, based on https://github.com/numpy/numpy/issues/5312#issuecomment-299533915
"""
n_bytes = np.prod(shape) * dtype.itemsize
arr = np.zeros(n_bytes + (align - 1), dtype=np.uint8)
data_align = arr.ctypes.data % align
offset = 0 if data_align == 0 else (align - data_align)
view = arr[offset : offset + n_bytes].view(dtype)
return view.reshape(shape)
def _allocate_dict_space(
self, num_envs: int, dict_space: gym.spaces.Dict
) -> Tuple[collections.OrderedDict, Any]:
"""
Allocate arrays for a space, returns an OrderedDict of numpy arrays along with a backing bytearray
"""
result = collections.OrderedDict() # type: collections.OrderedDict
length = len(dict_space.spaces) * num_envs
buffers = self._ffi.new(f"void *[{length}]")
for space_idx, (name, space) in enumerate(dict_space.spaces.items()):
actual_shape = (num_envs,) + space.shape
arr = self._numpy_aligned(shape=actual_shape, dtype=space.dtype)
result[name] = arr
for env_idx in range(num_envs):
buffers[space_idx * num_envs + env_idx] = self._ffi.from_buffer(
arr.data[env_idx:]
)
return result, buffers
def _allocate_array(self, num_envs: int, dtype: np.dtype) -> Tuple[np.ndarray, Any]:
arr = self._numpy_aligned(shape=(num_envs,), dtype=dtype)
return arr, self._ffi.from_buffer(arr.data)
@staticmethod
def _convert_options(ffi: Any, c_lib: Any, options: Dict) -> Any:
"""
Convert a dictionary to libenv_options
"""
keepalives = (
[]
) # add variables to here to keep them alive after this function returns
c_options = ffi.new("struct libenv_options *")
c_option_array = ffi.new("struct libenv_option[%d]" % len(options))
for i, (k, v) in enumerate(options.items()):
name = str(k).encode("utf8")
assert (
len(name) < c_lib.LIBENV_MAX_NAME_LEN - 1
), "length of options key is too long"
if isinstance(v, bytes):
c_data = ffi.new("char[]", v)
dtype = c_lib.LIBENV_DTYPE_UINT8
count = len(v)
elif isinstance(v, str):
c_data = ffi.new("char[]", v.encode("utf8"))
dtype = c_lib.LIBENV_DTYPE_UINT8
count = len(v)
elif isinstance(v, bool):
c_data = ffi.new("uint8_t*", v)
dtype = c_lib.LIBENV_DTYPE_UINT8
count = 1
elif isinstance(v, int):
assert -2 ** 31 < v < 2 ** 31
c_data = ffi.new("int32_t*", v)
dtype = c_lib.LIBENV_DTYPE_INT32
count = 1
elif isinstance(v, float):
c_data = ffi.new("float*", v)
dtype = c_lib.LIBENV_DTYPE_FLOAT32
count = 1
elif isinstance(v, np.ndarray):
c_data = ffi.new("char[]", v.tobytes())
if v.dtype == np.dtype("uint8"):
dtype = c_lib.LIBENV_DTYPE_UINT8
elif v.dtype == | np.dtype("int32") | numpy.dtype |
# Output GT heatmap as an auxiliary input
# Single object only
# Additionally output the previous image crop
# at the same location (but centered on the (current?) object)
import torch
import torchvision
from .abstract_datasets import DetectionDataset
import cv2
import os
import numpy as np
import json
import math
class Surgical_Hands_v2(DetectionDataset):
"""
Data annotated from publicly available surgical hand videos
x training samples
x testing samples
"""
def __init__(self, *args, **kwargs):
super(Surgical_Hands_v2, self).__init__(*args, **kwargs)
self.load_type = kwargs['load_type']
self.json_path = kwargs['json_path']
# Maximum number of annotated object present in a single frame in entire dataset
# Dictates the return size of annotations in __getitem__
self.max_objects = 1
self.sigma = kwargs['gaussian_sigma']
self.heatmap_size = kwargs['heatmap_size']
self.image_height = self.final_shape[0]
self.image_width = self.final_shape[1]
self.stride = (self.image_width / self.heatmap_size[0],
self.image_height / self.heatmap_size[1]) # effective stride of the entire network
self.num_keypoints = 21 # 21 annotated hand keypoints
self.sc = kwargs['sc']
self.mask_occ = False # Treat occluded keypoints as un-annotated, if False treat them as GT labels
self.joint_names = ['wrist', 'thumb_k', 'thumb_b', 'thumb_m', 'thumb_t', \
'index_k', 'index_b', 'index_m', 'index_t', \
'middle_k', 'middle_b', 'middle_m', 'middle_t', \
'ring_k', 'ring_b', 'ring_m', 'ring_t', \
'pinky_k', 'pinky_b', 'pinky_m', 'pinky_t']
self.neighbor_link = [[0, 1], [1, 2], [2, 3], [3, 4],
[0, 5], [5, 6], [6, 7], [7, 8],
[0, 9], [9, 10], [10, 11], [11, 12],
[0, 13], [13, 14], [14, 15], [15, 16],
[0, 17], [17, 18], [18, 19], [19, 20]]
# Colors RGB
self.colors = [[187, 38, 26], [187, 38, 26], [187, 38, 26], [187, 38, 26],
[172, 201, 63], [172, 201, 63], [172, 201, 63], [172, 201, 63],
[92, 200, 97], [92, 200, 97], [92, 200, 97], [92, 200, 97],
[28, 84, 197], [28, 84, 197], [28, 84, 197], [28, 84, 197],
[149, 40, 197], [149, 40, 197], [149, 40, 197], [149, 40, 197]]
self.categories = {'supercategory': 'hand',
'id': 2,
'name': 'hand', # maybe distinguish between left/right hand?
'keypoints': self.joint_names,
'skeleton': torch.Tensor(self.neighbor_link)}
self.viz = kwargs['viz']
if self.load_type == 'train':
self.transforms = kwargs['model_obj'].train_transforms
else:
self.transforms = kwargs['model_obj'].test_transforms
# Track statistics of hand positions through dataset
avg_hand_pts = np.zeros((self.num_keypoints, 2))
num_hand_pts = np.zeros((self.num_keypoints, 1))
print('{} samples in {}'.format(len(self.samples), self.load_type))
self.new_samples = []
0
self.img_id_to_kpts = {} # Mapping between images and keypoints within them
self.t1_to_t0 = {} # Point to the previous image. First image points to itself
kwargs.get('min_temporal_distance', 4)
min_temporal_dist = kwargs.get('min_temporal_dist', 4) # final name
vid_id_to_frames = {} # all the labeled frames in each vid_id
vid_id_to_path = {}
prev_vid_id = None
prev_frame_id = None
for idx, item in enumerate(self.samples):
width, height = item['frame_size']
vid_id = item['frames'][0]['vid_id']
labeled_frames = vid_id_to_frames.get(vid_id, [])
lbl_frame_paths = vid_id_to_path.get(vid_id, [])
for frm in item['frames']:
bbox_data = []
if not frm['is_labeled']:
continue
frame_id = int(frm['frame_id'])
frame_pth = frm['img_path']
labeled_frames.append(frame_id)
lbl_frame_paths.append(frame_pth)
if frame_id not in self.img_id_to_kpts:
self.img_id_to_kpts[frame_id] = {}
for obj in frm['objs']:
kpts = | np.array(obj['hand_pts']) | numpy.array |
from __future__ import print_function
import pickle
import numpy as np
import os
import gzip
import matplotlib.pyplot as plt
from random import randint
from sklearn.utils import shuffle
from model import Model
from utils import *
from tensorboard_evaluation import Evaluation
import tensorflow as tf
from datetime import datetime
def read_data(datasets_dir="./data", frac = 0.1):
"""
This method reads the states and actions recorded in drive_manually.py
and splits it into training/ validation set.
"""
print("... read data")
data_file = os.path.join(datasets_dir, 'data.pkl.gzip')
f = gzip.open(data_file, 'rb')
data = pickle.load(f)
# get images as features and actions as targets
X = | np.array(data["state"]) | numpy.array |
import numpy as np
# import scipy as sp #TODO doesn't work
from matplotlib.pyplot import *
import itertools as itools
from functools import reduce
# Constants
epsilon = np.finfo(float).eps
#########################################################
# Joint probability table
# Order of dimensions X,Y,Z,U (4x4x2x2)
Ptable = np.zeros((4,4,2,2))
Ptable[0,0,0,0] = Ptable[1,1,0,0] = Ptable[0,1,1,0] = Ptable[1,0,1,0] = 1.0/8.0
Ptable[2,2,0,1] = Ptable[3,3,1,1] = 1.0/4.0
# Table for P_XY
PP = np.sum(Ptable, axis=(2,3))
def create_table(n=4):
""" Create a n-dimensions-joint probability table for X,Y
X and Y have range n
Input:
n: range of varaibles X and Y
Output:
Ptable: Table of joint probability for X,Y. Dimension is nxn
"""
Ptable = np.zeros((n,n))
h = n//2
Ptable[:h,:h] = 1.0/(2*h*h)
Ptable[h:,h:] = np.eye(h)*1.0/(2*h)
return Ptable
#########################################################
#########################################################
# Utilities on probabilities
def marginals(Ptable):
"""Compute the value of marginals for a joint probability
Input:
Ptable: table of joint probability
Output:
res: list of probabilities of the marginals, in the same order
as listed in np.shape(Ptable)
"""
res = []
sh = np.shape(Ptable)
numDim = np.shape(sh)[0] # number of dimensions of the table
# generate numDim tuples for sum
# (1,2,3), (2,3,0), (3,0,1), (0,1,2)
ll = list(range(0,numDim))
tuples = []
for i in range(numDim):
tuples.append(tuple(ll[1:]))
ll = ll[1:] + [ll[0]]
# reduce Ptable with `sum` and get the marginals
for tup in tuples:
res.append(np.sum(Ptable, axis=tup))
return res
def marginal_Z(Ptable):
"""Values of the marginal Z, derived from P(X,Y)
Input:
Ptable: Table of joint probability P(X,Y)
Output:
res: array of probabilities for marginal Z
"""
sh = np.shape(Ptable)
n = sh[0]//2
res = np.zeros(n)
# get all combinations of indices for Ptable
combinations = [(x,y) for x in range(sh[0]) for y in range(sh[1]) if x<n and y<n]
# combinations = list(filter(lambda t: (t[0]<n) and (t[1]<n), combinations))
# get marginal of X
m = marginals(Ptable)
px = m[0]
for z in range(n):
# create a filter for this value of Z
f = lambda t: (t[0] + t[1])%n == z
# get only the indices that satisfy X+Y mod n = z
# where z is the value of Z (also the index in res)
ls = list(filter(f, combinations))
# P(Z=z) = sum_{X+Ymod2=z} P(X,Y) + P(X=n+z)
for idx in ls:
res[z]+= Ptable[idx]
res[z]+= px[n+z]
return res
def evalZ(x,y):
"""value of Z based on values of X and Y
Input:
x,y: values of x and y
Output:
res: Z
"""
if (x >= 2):
return x%2
else:
return (x+y)%2
#########################################################
#########################################################
# Utilies on Mutual Information measure
# Note: different behaviour using
# math.log2(p) and np.log(p)/np.log(2)
h = lambda p: -(p*np.log2(p) + (1-p)*np.log2(1-p))
def entropy(px):
"""Entropy of random variable x
Input:
px: list containing probabilities of random variable X
Output:
res: entropy of random varaible
"""
res = 0.0
for p in px:
res += p*np.log2(p)
return -res
def I(Ptable):
"""Mutual information of r.v. X and Y
Input:
Ptable: array-like table of joint probability for X and Y
Output:
res: mutual information I(X;Y)
"""
m = marginals(Ptable)
px = m[0]
py = m[1]
res = 0.0
#TODO
# mask = Ptable != 0
# Ptable[mask] = Ptable[mask]*np.log2(Ptable[mask]/(x*y))
# A[A!=0] += np.log2(A[A!=0]/(x*y))
for ix,x in enumerate(px):
for iy,y in enumerate(py):
temp = Ptable[ix,iy]
if temp > epsilon:
res += temp*np.log2(temp/(x*y))
return res
#########################################################
#########################################################
# Utilities on interpolation towards uniform distribution
#
def step_linear(Ptable, n=10):
"""Linear stepping
Input:
Ptable: table of joint probability
n: number of steps to interpolate. Default n=10
Output:
PPs: list of tables, interpolated from Ptable
towards uniform ditribution
"""
alpha,stepsize = np.linspace(0,1,num=n, retstep=True)
sh = np.shape(Ptable)
norm = 1.0/ | np.product(sh) | numpy.product |
import h5py
import numpy as np
import include.diag as diag
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
def angular_derivative(array, wvn):
return np.fft.ifft(1j * wvn * np.fft.fft(array))
quench_rates = [100, 150, 200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700, 750, 800, 850]
spin_winding_list = []
for quench in quench_rates:
filename = '../../data/1d_kibble-zurek/single_runs/1d_polar-BA-FM_{}.hdf5'.format(quench)
with h5py.File(filename, 'r') as data_file:
# Load in data:
x = data_file['grid/x']
Nx = len(x)
dx = x[1] - x[0]
dkx = 2 * np.pi / (Nx * dx)
Kx = np.fft.fftshift(np.arange(-Nx // 2, Nx // 2) * dkx)
dt = data_file['time/dt'][...]
Nframe = data_file['time/Nframe'][...]
frame = int(quench / (Nframe * dt))
psi_plus = data_file['wavefunction/psi_plus'][:, frame]
psi_0 = data_file['wavefunction/psi_0'][:, frame]
psi_minus = data_file['wavefunction/psi_minus'][:, frame]
n = abs(psi_plus) ** 2 + abs(psi_0) ** 2 + abs(psi_minus) ** 2
# Calculate spin vectors:
fx, fy, fz, F = diag.calculate_spin(psi_plus, psi_0, psi_minus, n)
F_plus = fx + 1j * fy
F_minus = fx - 1j * fy
R = Nx * dx / (2 * np.pi) # Radius of ring
dF_plus = angular_derivative(F_plus, Kx)
dF_minus = angular_derivative(F_minus, Kx)
integral = (R / (2j * abs(F_plus) ** 2)) * (F_minus * dF_plus - F_plus * dF_minus)
spin_winding = int(dx * sum(np.real(integral)) / (2 * np.pi * 2 * | np.sqrt(Nx) | numpy.sqrt |
"""
Distributions (Re)generation Script
This script generates likelihood and cost distributions based on threat
intelligence data stored in a connected Neo4j graph database. It attempts to
do so for every possible permutation of (size, industry) values.
These are then consumed by `montecarlo.py`, which runs a Monte Carlo
simulation based on these figures.
Acknowledgements: Dr <NAME> & Dr <NAME>
"""
import os
import sys
import argparse
import warnings
import logging as log
from typing import Tuple
import itertools
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from matplotlib import pyplot as plt
from scipy.stats import lognorm
from graph import GraphInterface as gi
# Used for logging, equivalent to `logging.WARNING` + 1.
SUCCESS = 31
# The arbitrary maximum number of incidents that an organisation can experience
# in a year.
MAX_ANNUAL_INCIDENTS = 8000
# Quantifies the quantitative boundaries for human-readable incident frequencies,
# which many sources (e.g., the CSBS 2020) use to present their results.
#
# 'None' = 0
# 'Annually' = 1
# 'Less than monthly' = 2–7
# 'Monthly' = 8–17
# 'Weekly' = 18–79
# 'Daily' = 80–399
# 'More than daily' = 400–8000
BOUNDARIES = {
"None": 0,
"Once per year": 1,
"Less than once a month": 2,
"Once a month": 8,
"Once a week": 18,
"Once a day": 80,
"Several times a day": 400,
"MAX": MAX_ANNUAL_INCIDENTS,
}
OUTPUT_DIR = None
IMAGES = None
# pylint: disable=invalid-name,anomalous-backslash-in-string
def _generate_new_incident_frequency_distribution(pairing: Tuple = (None, None)) -> int:
"""
Generates a new incident frequency distribution.
Notes
-----
(Re)generates the incident frequency distribution for a
:math:`\left(\text{size}, \text{industry}\right)` pairing from the data in
a Neo4j graph database.
Currently this only produces log-normal distributions. Additional types of
distribution can be implemented by overloading this method (by importing the
`multipledispatch` package) and returning the values required for defining
that distribution (e.g., :math:`\mu` and :math:`\sigma` instead of :math:`a`
and :math:`b`).
"""
# pylint: enable=anomalous-backslash-in-string
log.info("Generating new incident frequency distribution for '%s'...", str(pairing))
# Attempts to get the incident probabilities for the pairing from the graph
# database
incident_frequency_probabilities = gi.get_incident_frequency_probabilities(
list(BOUNDARIES.values())[:-1], pairing
)
if incident_frequency_probabilities is None:
log.info(
"No incident frequency distribution generated for '%s'.",
str(pairing),
)
return 0
log.debug(
"Returned values are: incident frequency probabilities = %s",
str(incident_frequency_probabilities),
)
# If values are found, generate a distribution
Fs = np.cumsum(incident_frequency_probabilities)
xs = np.log(list(BOUNDARIES.values())[1:])
ys = np.log(1 - Fs)
data = pd.DataFrame(xs, ys)
# pylint: disable=line-too-long
# See <https://www.statsmodels.org/stable/_modules/statsmodels/stats/stattools.html#omni_normtest> for explanation
# pylint: enable=line-too-long
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fit = smf.ols(formula="ys ~ xs", data=data).fit()
log.debug(fit.summary())
# Get the parameters for the generated distribution and store them in the
# graph database.
alogb = fit.params[0]
a = -fit.params[1]
b = np.exp(alogb / a)
gi.create_incident_frequency_distribution_node(pairing, a, b)
log.log(
SUCCESS,
"New incident frequency distribution successfully generated for '%s'.",
str(pairing),
)
return 1
# pylint: enable=invalid-name
# pylint: disable=anomalous-backslash-in-string
def _generate_new_incident_costs_distribution(pairing: Tuple = (None, None)) -> int:
"""
(Re)generates the incident cost distribution for a
:math:`\left(\text{size}, \text{industry}\right)` pairing from the data in
a Neo4j graph database.
Currently this only produces log-normal distributions. Additional types of
distribution can be implemented by overloading this method (by importing the
`multipledispatch` package) and returning the values required for defining
that distribution (e.g., :math:`\mu` and :math:`\sigma` instead of :math:`a`
and :math:`b`).
"""
# pylint: enable=anomalous-backslash-in-string
# Plots the distribution for the average cost of incident(s) over 12 months
log.info("Generating new incident cost distribution for '%s'...", str(pairing))
incident_mean_cost, incident_median_cost = gi.get_incident_cost_averages(pairing)
if incident_mean_cost is None or incident_median_cost is None:
log.info(
"No incident costs distribution generated for '%s'.",
str(pairing),
)
return 0
log.debug(
"Returned values are: mean = %s, median = %s",
str(incident_mean_cost),
str(incident_median_cost),
)
log_stddev = np.sqrt(
2
* (
| np.log(incident_mean_cost) | numpy.log |
import util
import random
import numpy as np
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.metrics import f1_score
from sklearn.preprocessing import StandardScaler
import itertools
from sklearn import svm, linear_model, cross_validation
from sklearn.svm import SVC
seed = 1
random.seed(seed)
np.random.seed(seed)
# Only using small xbb here
df_all = pd.read_csv('../data/Fold1/xaa.csv')
df_sample = df_all.ix[:,1:]
# Splitting dataset
X, Y = util.sep_feat_labels(df_sample)
X.ix[:,1:] = StandardScaler().fit_transform(X.ix[:,1:]) # rescaling features
x_train, x_dev, y_train, y_dev = train_test_split(X, Y, test_size=0.2, random_state=seed)
x_train_qid = x_train['qid'].copy()
x_train = x_train.ix[:,1:].copy()
x_dev_qid = x_dev['qid'].copy()
x_dev = x_dev.ix[:,1:].copy()
#===============================================================
# Code from: https://gist.github.com/agramfort/2071994
#===============================================================
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Parameters
----------
X : array, shape (n_samples, n_features)
The data
y : array, shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns
-------
X_trans : array, shape (k, n_feaures)
Data as pairs
y_trans : array, shape (k,)
Output class labels, where classes have values {-1, +1}
"""
X_new = []
y_new = []
y = np.asarray(y)
if y.ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = - y_new[-1]
X_new[-1] = - X_new[-1]
return np.asarray(X_new), | np.asarray(y_new) | numpy.asarray |
"""Corrector function for TESSCut TPFs"""
import lightkurve as lk
import numpy as np
from .backdrop import BackDrop
class TESSCutCorrector(lk.RegressionCorrector):
"""Remove TESS jitter and sky background noise using linear regression.
Will automatically generate a design matrix based on `tess_backdrop` stored files.
Parameters
----------
tpf : `lightkurve.TargetPixelFile`
The target pixel file for a target
aperture_mask : np.ndarray of booleans
Aperture mask to apply to TPF. If none, one will be
selected per `lightkurve` defaults.
"""
def __init__(self, tpf, aperture_mask=None):
"""
Parameters
----------
tpf : `lightkurve.TargetPixelFile`
The target pixel file for a target
aperture_mask : np.ndarray of booleans
Aperture mask to apply to TPF. If none, one will be
selected per `lightkurve` defaults.
"""
if aperture_mask is None:
aperture_mask = tpf.create_threshold_mask(3)
self.aperture_mask = aperture_mask
lc = tpf.to_lightcurve(aperture_mask=aperture_mask)
# Remove cadences that have NaN flux (cf. #874). We don't simply call
# `lc.remove_nans()` here because we need to mask both lc & tpf.
nan_mask = np.isnan(lc.flux)
lc = lc[~nan_mask]
self.b = BackDrop()
self.b.load(tpf.sector, tpf.camera, tpf.ccd)
self.tpf = self.b.correct_tpf(tpf)[~nan_mask]
self.lc = self.tpf.to_lightcurve(aperture_mask=aperture_mask)
super().__init__(lc=self.lc)
def __repr__(self):
if self.lc.label == "":
return "TESSCutCorrector (ID: {})".format(self.lc.targetid)
return "TESSCutCorrector (ID: {})".format(self.lc.label)
def correct(
self,
cadence_mask=None,
sigma=5,
niters=3,
propagate_errors=False,
spline_timescale=0.5,
spline_degree=3,
npca_components=10,
):
"""Returns a systematics-corrected light curve from a TESSCut TPF.
Parameters
----------
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
sigma : int (default 5)
Standard deviation at which to remove outliers from fitting
niters : int (default 5)
Number of iterations to fit and remove outliers
propagate_errors : bool (default False)
Whether to propagate the uncertainties from the regression. Default is False.
Setting to True will increase run time, but will sample from multivariate normal
distribution of weights.
spline_timescale : float, None
Time between knots in spline component. If None, will not use spline.
spline_degree : int
Polynomial degree of spline.
npca_components : int, default 10
Number of terms added to the design matrix for jitter correction.
Returns
-------
clc : `lightkurve.LightCurve`
Systematics-corrected `lightkurve.LightCurve`.
"""
bad = ~lk.utils.TessQualityFlags.create_quality_mask(
self.lc.quality,
self.tpf.quality & lk.utils.TessQualityFlags.DEFAULT_BITMASK,
)
med_flux = np.median(self.lc.flux[cadence_mask & ~bad].value)
if spline_timescale is not None:
# Spline DM
knots = np.linspace(
self.lc.time[0].value,
self.lc.time[-1].value,
int(
(self.lc.time[-1].value - self.lc.time[0].value) / spline_timescale
),
)[1:-1]
dm_spline = lk.designmatrix.create_sparse_spline_matrix(
self.lc.time.value, knots=knots, degree=spline_degree
)
dm_spline.prior_mu = np.zeros(dm_spline.shape[1]) * med_flux
dm_spline.prior_sigma = np.ones(dm_spline.shape[1]) * med_flux * 3
dm_ones = lk.DesignMatrix(
np.ones(len(self.lc.flux))[:, None],
name="ones",
prior_mu=[med_flux],
prior_sigma=[0.1 * med_flux],
)
# Scattered Light DM
bkg = self.b.build_correction(
np.arange(self.tpf.shape[2]) + self.tpf.column,
np.arange(self.tpf.shape[1]) + self.tpf.row,
)
bkg = bkg[:, self.aperture_mask].sum(axis=1)
bkg -= np.median(bkg)
dm_bkg = lk.DesignMatrix(
np.vstack([bkg ** idx for idx in | np.arange(1, 4) | numpy.arange |
"""
This file contains specific functions for computing losses of FCOS
file
"""
import logging
import torch
from torch.nn import functional as F
from torch import nn
import os
from ..utils import concat_box_prediction_layers
from fcos_core.layers import IOULoss
from fcos_core.layers import SigmoidFocalLoss
from fcos_core.layers import sigmoid_focal_loss_bce
from fcos_core.modeling.matcher import Matcher
from fcos_core.modeling.utils import cat
from fcos_core.structures.boxlist_ops import boxlist_iou
from fcos_core.structures.boxlist_ops import cat_boxlist
INF = 100000000
def get_num_gpus():
return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
def reduce_sum(tensor):
if get_num_gpus() <= 1:
return tensor
import torch.distributed as dist
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.reduce_op.SUM)
return tensor
class FCOSLossComputation(object):
"""
This class computes the FCOS losses.
"""
def __init__(self, cfg):
self.cls_loss_func = SigmoidFocalLoss(
cfg.MODEL.FCOS.LOSS_GAMMA,
cfg.MODEL.FCOS.LOSS_ALPHA
)
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS
self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE
self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS
# we make use of IOU Loss for bounding boxes regression,
# but we found that L1 in log scale can yield a similar performance
self.box_reg_loss_func = IOULoss(self.iou_loss_type)
self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")
self.COUNT = [0, 0, 0, 0, 0]
def gmm_clustter_2(self, cls_loss):
from sklearn.mixture import GaussianMixture
import numpy as np
# mean = torch.mean(cls_loss)
# sigma = torch.std(cls_loss)
min_loss = torch.min(cls_loss).cpu().detach().numpy()
max_loss = torch.max(cls_loss).cpu().detach().numpy()
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
precisions_init = np.array([0.1, 0.1]).reshape(2, 1, 1)
cls_loss = cls_loss.view(-1, 1).cpu().detach().numpy()
gm = GaussianMixture(n_components=2, weights_init=[0.5, 0.5],
means_init=means_init, precisions_init= precisions_init)
gm.fit(cls_loss)
results = gm.predict(cls_loss)
assignments = results == 0
if len(np.nonzero(assignments)[0]) > 0:
scores = gm.score_samples(cls_loss)
score_fgs = scores[assignments]
fgs_inds = np.nonzero(assignments)[0]
fgs_thr_ind = np.argmax(score_fgs)
assignments_ = cls_loss.reshape(-1) <= cls_loss[fgs_inds[fgs_thr_ind]]
assignments = assignments & assignments_
return torch.from_numpy(assignments)
def gmm_clustter(self, cls_loss):
from sklearn.mixture import GaussianMixture
import numpy as np
topk = 12
topk = min(topk, torch.numel(cls_loss))
cls_loss = cls_loss.cpu().detach().numpy().flatten()
lenth = cls_loss.shape[0]
assign_topk = np.argpartition(cls_loss, topk - 1)[0:topk]
cls_loss = cls_loss[assign_topk]
min_loss = np.min(cls_loss)
max_loss = np.max(cls_loss)
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
precisions_init = np.array([0.1, 0.1]).reshape(2, 1, 1)
cls_loss = cls_loss.reshape((-1, 1))
gm = GaussianMixture(n_components=2, weights_init=[0.5, 0.5],
means_init=means_init, precisions_init= precisions_init)
gm.fit(cls_loss)
results = gm.predict(cls_loss)
assign_temp = results == 0
assignments = np.zeros(lenth, dtype=np.bool)
assignments[assign_topk[assign_temp]] = True
# if len(np.nonzero(assignments)[0]) > 0:
# scores = gm.score_samples(cls_loss)
# score_fgs = scores[assignments]
# fgs_inds = np.nonzero(assignments)[0]
# fgs_thr_ind = np.argmax(score_fgs)
# assignments_ = cls_loss.reshape(-1) < cls_loss[fgs_inds[fgs_thr_ind]]
# assignments = assignments & assignments_
return torch.from_numpy(assignments)
def topk_clustter(self, cls_loss, k = 9):
import numpy as np
# mean = torch.mean(cls_loss)
# sigma = torch.std(cls_loss)
min_loss = torch.min(cls_loss).cpu().detach().numpy()
max_loss = torch.max(cls_loss).cpu().detach().numpy()
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
precisions_init = np.array([0.1, 0.1]).reshape(2, 1, 1)
cls_loss = cls_loss.flatten()
k = min(k, len(cls_loss))
cls_loss = 0 - cls_loss
_, assignments = torch.topk(cls_loss, k)
return assignments
def avg_clustter(self, cls_loss):
mean = torch.mean(cls_loss)
sigma = torch.std(cls_loss)
assignments = cls_loss <= mean
return assignments
def dbscan_clustter(self, loss):
from sklearn.clustter import DBSCAN
import numpy as np
def get_ious(self, pred, target):
pred_left = pred[:, 0]
pred_top = pred[:, 1]
pred_right = pred[:, 2]
pred_bottom = pred[:, 3]
target_left = target[:, 0]
target_top = target[:, 1]
target_right = target[:, 2]
target_bottom = target[:, 3]
target_area = (target_left + target_right) * \
(target_top + target_bottom)
pred_area = (pred_left + pred_right) * \
(pred_top + pred_bottom)
w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right)
g_w_intersect = torch.max(pred_left, target_left) + torch.max(
pred_right, target_right)
h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top)
g_h_intersect = torch.max(pred_bottom, target_bottom) + torch.max(pred_top, target_top)
ac_uion = g_w_intersect * g_h_intersect + 1e-7
area_intersect = w_intersect * h_intersect
area_union = target_area + pred_area - area_intersect
ious = (area_intersect + 1.0) / (area_union + 1.0)
gious = ious - (ac_uion - area_union) / ac_uion
return ious, gious
def gmm_clustter_2(self, cls_loss):
from sklearn.mixture import GaussianMixture
import numpy as np
# mean = torch.mean(cls_loss)
# sigma = torch.std(cls_loss)
min_loss = torch.min(cls_loss).cpu().detach().numpy()
max_loss = torch.max(cls_loss).cpu().detach().numpy()
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
precisions_init = | np.array([0.1, 0.1]) | numpy.array |
# coding: utf-8
# # Building your Recurrent Neural Network - Step by Step
#
# Welcome to Course 5's first assignment! In this assignment, you will implement your first Recurrent Neural Network in numpy.
#
# Recurrent Neural Networks (RNN) are very effective for Natural Language Processing and other sequence tasks because they have "memory". They can read inputs $x^{\langle t \rangle}$ (such as words) one at a time, and remember some information/context through the hidden layer activations that get passed from one time-step to the next. This allows a uni-directional RNN to take information from the past to process later inputs. A bidirection RNN can take context from both the past and the future.
#
# **Notation**:
# - Superscript $[l]$ denotes an object associated with the $l^{th}$ layer.
# - Example: $a^{[4]}$ is the $4^{th}$ layer activation. $W^{[5]}$ and $b^{[5]}$ are the $5^{th}$ layer parameters.
#
# - Superscript $(i)$ denotes an object associated with the $i^{th}$ example.
# - Example: $x^{(i)}$ is the $i^{th}$ training example input.
#
# - Superscript $\langle t \rangle$ denotes an object at the $t^{th}$ time-step.
# - Example: $x^{\langle t \rangle}$ is the input x at the $t^{th}$ time-step. $x^{(i)\langle t \rangle}$ is the input at the $t^{th}$ timestep of example $i$.
#
# - Lowerscript $i$ denotes the $i^{th}$ entry of a vector.
# - Example: $a^{[l]}_i$ denotes the $i^{th}$ entry of the activations in layer $l$.
#
# We assume that you are already familiar with `numpy` and/or have completed the previous courses of the specialization. Let's get started!
# Let's first import all the packages that you will need during this assignment.
# In[3]:
import numpy as np
from rnn_utils import *
# ## 1 - Forward propagation for the basic Recurrent Neural Network
#
# Later this week, you will generate music using an RNN. The basic RNN that you will implement has the structure below. In this example, $T_x = T_y$.
# <img src="images/RNN.png" style="width:500;height:300px;">
# <caption><center> **Figure 1**: Basic RNN model </center></caption>
# Here's how you can implement an RNN:
#
# **Steps**:
# 1. Implement the calculations needed for one time-step of the RNN.
# 2. Implement a loop over $T_x$ time-steps in order to process all the inputs, one at a time.
#
# Let's go!
#
# ## 1.1 - RNN cell
#
# A Recurrent neural network can be seen as the repetition of a single cell. You are first going to implement the computations for a single time-step. The following figure describes the operations for a single time-step of an RNN cell.
#
# <img src="images/rnn_step_forward.png" style="width:700px;height:300px;">
# <caption><center> **Figure 2**: Basic RNN cell. Takes as input $x^{\langle t \rangle}$ (current input) and $a^{\langle t - 1\rangle}$ (previous hidden state containing information from the past), and outputs $a^{\langle t \rangle}$ which is given to the next RNN cell and also used to predict $y^{\langle t \rangle}$ </center></caption>
#
# **Exercise**: Implement the RNN-cell described in Figure (2).
#
# **Instructions**:
# 1. Compute the hidden state with tanh activation: $a^{\langle t \rangle} = \tanh(W_{aa} a^{\langle t-1 \rangle} + W_{ax} x^{\langle t \rangle} + b_a)$.
# 2. Using your new hidden state $a^{\langle t \rangle}$, compute the prediction $\hat{y}^{\langle t \rangle} = softmax(W_{ya} a^{\langle t \rangle} + b_y)$. We provided you a function: `softmax`.
# 3. Store $(a^{\langle t \rangle}, a^{\langle t-1 \rangle}, x^{\langle t \rangle}, parameters)$ in cache
# 4. Return $a^{\langle t \rangle}$ , $y^{\langle t \rangle}$ and cache
#
# We will vectorize over $m$ examples. Thus, $x^{\langle t \rangle}$ will have dimension $(n_x,m)$, and $a^{\langle t \rangle}$ will have dimension $(n_a,m)$.
# In[4]:
# GRADED FUNCTION: rnn_cell_forward
def rnn_cell_forward(xt, a_prev, parameters):
"""
Implements a single forward step of the RNN-cell as described in Figure (2)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias, numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, a_prev, xt, parameters)
"""
# Retrieve parameters from "parameters"
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
### START CODE HERE ### (≈2 lines)
# compute next activation state using the formula given above
a_next = np.tanh(np.dot(Wax,xt) + np.dot(Waa,a_prev) + ba)
# compute output of the current cell using the formula given above
yt_pred = softmax(np.dot(Wya,a_next) + by)
### END CODE HERE ###
# store values you need for backward propagation in cache
cache = (a_next, a_prev, xt, parameters)
return a_next, yt_pred, cache
# In[5]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a_next, yt_pred, cache = rnn_cell_forward(xt, a_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", a_next.shape)
print("yt_pred[1] =", yt_pred[1])
print("yt_pred.shape = ", yt_pred.shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a_next[4]**:
# </td>
# <td>
# [ 0.59584544 0.18141802 0.61311866 0.99808218 0.85016201 0.99980978
# -0.18887155 0.99815551 0.6531151 0.82872037]
# </td>
# </tr>
# <tr>
# <td>
# **a_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **yt[1]**:
# </td>
# <td>
# [ 0.9888161 0.01682021 0.21140899 0.36817467 0.98988387 0.88945212
# 0.36920224 0.9966312 0.9982559 0.17746526]
# </td>
# </tr>
# <tr>
# <td>
# **yt.shape**:
# </td>
# <td>
# (2, 10)
# </td>
# </tr>
#
# </table>
# ## 1.2 - RNN forward pass
#
# You can see an RNN as the repetition of the cell you've just built. If your input sequence of data is carried over 10 time steps, then you will copy the RNN cell 10 times. Each cell takes as input the hidden state from the previous cell ($a^{\langle t-1 \rangle}$) and the current time-step's input data ($x^{\langle t \rangle}$). It outputs a hidden state ($a^{\langle t \rangle}$) and a prediction ($y^{\langle t \rangle}$) for this time-step.
#
#
# <img src="images/rnn.png" style="width:800px;height:300px;">
# <caption><center> **Figure 3**: Basic RNN. The input sequence $x = (x^{\langle 1 \rangle}, x^{\langle 2 \rangle}, ..., x^{\langle T_x \rangle})$ is carried over $T_x$ time steps. The network outputs $y = (y^{\langle 1 \rangle}, y^{\langle 2 \rangle}, ..., y^{\langle T_x \rangle})$. </center></caption>
#
#
#
# **Exercise**: Code the forward propagation of the RNN described in Figure (3).
#
# **Instructions**:
# 1. Create a vector of zeros ($a$) that will store all the hidden states computed by the RNN.
# 2. Initialize the "next" hidden state as $a_0$ (initial hidden state).
# 3. Start looping over each time step, your incremental index is $t$ :
# - Update the "next" hidden state and the cache by running `rnn_cell_forward`
# - Store the "next" hidden state in $a$ ($t^{th}$ position)
# - Store the prediction in y
# - Add the cache to the list of caches
# 4. Return $a$, $y$ and caches
# In[6]:
# GRADED FUNCTION: rnn_forward
def rnn_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Waa -- Weight matrix multiplying the hidden state, numpy array of shape (n_a, n_a)
Wax -- Weight matrix multiplying the input, numpy array of shape (n_a, n_x)
Wya -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
ba -- Bias numpy array of shape (n_a, 1)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y_pred -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of caches, x)
"""
# Initialize "caches" which will contain the list of all caches
caches = []
# Retrieve dimensions from shapes of x and parameters["Wya"]
n_x, m, T_x = x.shape
n_y, n_a = parameters["Wya"].shape
### START CODE HERE ###
# initialize "a" and "y" with zeros (≈2 lines)
a = np.zeros([n_a,m,T_x])
y_pred = np.zeros([n_y,m,T_x])
# Initialize a_next (≈1 line)
a_next = a0
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, compute the prediction, get the cache (≈1 line)
a_next, yt_pred, cache = rnn_cell_forward(x[:,:,t],a_next,parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y_pred[:,:,t] = yt_pred
# Append "cache" to "caches" (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y_pred, caches
# In[7]:
np.random.seed(1)
x = np.random.randn(3,10,4)
a0 = np.random.randn(5,10)
Waa = np.random.randn(5,5)
Wax = np.random.randn(5,3)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Waa": Waa, "Wax": Wax, "Wya": Wya, "ba": ba, "by": by}
a, y_pred, caches = rnn_forward(x, a0, parameters)
print("a[4][1] = ", a[4][1])
print("a.shape = ", a.shape)
print("y_pred[1][3] =", y_pred[1][3])
print("y_pred.shape = ", y_pred.shape)
print("caches[1][1][3] =", caches[1][1][3])
print("len(caches) = ", len(caches))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a[4][1]**:
# </td>
# <td>
# [-0.99999375 0.77911235 -0.99861469 -0.99833267]
# </td>
# </tr>
# <tr>
# <td>
# **a.shape**:
# </td>
# <td>
# (5, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **y[1][3]**:
# </td>
# <td>
# [ 0.79560373 0.86224861 0.11118257 0.81515947]
# </td>
# </tr>
# <tr>
# <td>
# **y.shape**:
# </td>
# <td>
# (2, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **cache[1][1][3]**:
# </td>
# <td>
# [-1.1425182 -0.34934272 -0.20889423 0.58662319]
# </td>
# </tr>
# <tr>
# <td>
# **len(cache)**:
# </td>
# <td>
# 2
# </td>
# </tr>
#
# </table>
# Congratulations! You've successfully built the forward propagation of a recurrent neural network from scratch. This will work well enough for some applications, but it suffers from vanishing gradient problems. So it works best when each output $y^{\langle t \rangle}$ can be estimated using mainly "local" context (meaning information from inputs $x^{\langle t' \rangle}$ where $t'$ is not too far from $t$).
#
# In the next part, you will build a more complex LSTM model, which is better at addressing vanishing gradients. The LSTM will be better able to remember a piece of information and keep it saved for many timesteps.
# ## 2 - Long Short-Term Memory (LSTM) network
#
# This following figure shows the operations of an LSTM-cell.
#
# <img src="images/LSTM.png" style="width:500;height:400px;">
# <caption><center> **Figure 4**: LSTM-cell. This tracks and updates a "cell state" or memory variable $c^{\langle t \rangle}$ at every time-step, which can be different from $a^{\langle t \rangle}$. </center></caption>
#
# Similar to the RNN example above, you will start by implementing the LSTM cell for a single time-step. Then you can iteratively call it from inside a for-loop to have it process an input with $T_x$ time-steps.
#
# ### About the gates
#
# #### - Forget gate
#
# For the sake of this illustration, lets assume we are reading words in a piece of text, and want use an LSTM to keep track of grammatical structures, such as whether the subject is singular or plural. If the subject changes from a singular word to a plural word, we need to find a way to get rid of our previously stored memory value of the singular/plural state. In an LSTM, the forget gate lets us do this:
#
# $$\Gamma_f^{\langle t \rangle} = \sigma(W_f[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_f)\tag{1} $$
#
# Here, $W_f$ are weights that govern the forget gate's behavior. We concatenate $[a^{\langle t-1 \rangle}, x^{\langle t \rangle}]$ and multiply by $W_f$. The equation above results in a vector $\Gamma_f^{\langle t \rangle}$ with values between 0 and 1. This forget gate vector will be multiplied element-wise by the previous cell state $c^{\langle t-1 \rangle}$. So if one of the values of $\Gamma_f^{\langle t \rangle}$ is 0 (or close to 0) then it means that the LSTM should remove that piece of information (e.g. the singular subject) in the corresponding component of $c^{\langle t-1 \rangle}$. If one of the values is 1, then it will keep the information.
#
# #### - Update gate
#
# Once we forget that the subject being discussed is singular, we need to find a way to update it to reflect that the new subject is now plural. Here is the formulat for the update gate:
#
# $$\Gamma_u^{\langle t \rangle} = \sigma(W_u[a^{\langle t-1 \rangle}, x^{\{t\}}] + b_u)\tag{2} $$
#
# Similar to the forget gate, here $\Gamma_u^{\langle t \rangle}$ is again a vector of values between 0 and 1. This will be multiplied element-wise with $\tilde{c}^{\langle t \rangle}$, in order to compute $c^{\langle t \rangle}$.
#
# #### - Updating the cell
#
# To update the new subject we need to create a new vector of numbers that we can add to our previous cell state. The equation we use is:
#
# $$ \tilde{c}^{\langle t \rangle} = \tanh(W_c[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_c)\tag{3} $$
#
# Finally, the new cell state is:
#
# $$ c^{\langle t \rangle} = \Gamma_f^{\langle t \rangle}* c^{\langle t-1 \rangle} + \Gamma_u^{\langle t \rangle} *\tilde{c}^{\langle t \rangle} \tag{4} $$
#
#
# #### - Output gate
#
# To decide which outputs we will use, we will use the following two formulas:
#
# $$ \Gamma_o^{\langle t \rangle}= \sigma(W_o[a^{\langle t-1 \rangle}, x^{\langle t \rangle}] + b_o)\tag{5}$$
# $$ a^{\langle t \rangle} = \Gamma_o^{\langle t \rangle}* \tanh(c^{\langle t \rangle})\tag{6} $$
#
# Where in equation 5 you decide what to output using a sigmoid function and in equation 6 you multiply that by the $\tanh$ of the previous state.
# ### 2.1 - LSTM cell
#
# **Exercise**: Implement the LSTM cell described in the Figure (3).
#
# **Instructions**:
# 1. Concatenate $a^{\langle t-1 \rangle}$ and $x^{\langle t \rangle}$ in a single matrix: $concat = \begin{bmatrix} a^{\langle t-1 \rangle} \\ x^{\langle t \rangle} \end{bmatrix}$
# 2. Compute all the formulas 1-6. You can use `sigmoid()` (provided) and `np.tanh()`.
# 3. Compute the prediction $y^{\langle t \rangle}$. You can use `softmax()` (provided).
# In[8]:
# GRADED FUNCTION: lstm_cell_forward
def lstm_cell_forward(xt, a_prev, c_prev, parameters):
"""
Implement a single forward step of the LSTM-cell as described in Figure (4)
Arguments:
xt -- your input data at timestep "t", numpy array of shape (n_x, m).
a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m)
c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a_next -- next hidden state, of shape (n_a, m)
c_next -- next memory state, of shape (n_a, m)
yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m)
cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters)
Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde),
c stands for the memory value
"""
# Retrieve parameters from "parameters"
Wf = parameters["Wf"]
bf = parameters["bf"]
Wi = parameters["Wi"]
bi = parameters["bi"]
Wc = parameters["Wc"]
bc = parameters["bc"]
Wo = parameters["Wo"]
bo = parameters["bo"]
Wy = parameters["Wy"]
by = parameters["by"]
# Retrieve dimensions from shapes of xt and Wy
n_x, m = xt.shape
n_y, n_a = Wy.shape
### START CODE HERE ###
# Concatenate a_prev and xt (≈3 lines)
concat = np.zeros([n_a+n_x,m])
concat[: n_a, :] = a_prev
concat[n_a :, :] = xt
# Compute values for ft, it, cct, c_next, ot, a_next using the formulas given figure (4) (≈6 lines)
ft = sigmoid(np.dot(Wf,concat) + bf)
it = sigmoid(np.dot(Wi,concat) + bi)
cct = np.tanh(np.dot(Wc,concat) + bc)
c_next = ft*c_prev + it*cct
ot = sigmoid(np.dot(Wo,concat) + bo)
a_next = ot*np.tanh(c_next)
# Compute prediction of the LSTM cell (≈1 line)
yt_pred = softmax(np.dot(Wy, a_next) + by)
### END CODE HERE ###
# store values needed for backward propagation in cache
cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters)
return a_next, c_next, yt_pred, cache
# In[9]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
c_prev = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)
print("a_next[4] = ", a_next[4])
print("a_next.shape = ", c_next.shape)
print("c_next[2] = ", c_next[2])
print("c_next.shape = ", c_next.shape)
print("yt[1] =", yt[1])
print("yt.shape = ", yt.shape)
print("cache[1][3] =", cache[1][3])
print("len(cache) = ", len(cache))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a_next[4]**:
# </td>
# <td>
# [-0.66408471 0.0036921 0.02088357 0.22834167 -0.85575339 0.00138482
# 0.76566531 0.34631421 -0.00215674 0.43827275]
# </td>
# </tr>
# <tr>
# <td>
# **a_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **c_next[2]**:
# </td>
# <td>
# [ 0.63267805 1.00570849 0.35504474 0.20690913 -1.64566718 0.11832942
# 0.76449811 -0.0981561 -0.74348425 -0.26810932]
# </td>
# </tr>
# <tr>
# <td>
# **c_next.shape**:
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **yt[1]**:
# </td>
# <td>
# [ 0.79913913 0.15986619 0.22412122 0.15606108 0.97057211 0.31146381
# 0.00943007 0.12666353 0.39380172 0.07828381]
# </td>
# </tr>
# <tr>
# <td>
# **yt.shape**:
# </td>
# <td>
# (2, 10)
# </td>
# </tr>
# <tr>
# <td>
# **cache[1][3]**:
# </td>
# <td>
# [-0.16263996 1.03729328 0.72938082 -0.54101719 0.02752074 -0.30821874
# 0.07651101 -1.03752894 1.41219977 -0.37647422]
# </td>
# </tr>
# <tr>
# <td>
# **len(cache)**:
# </td>
# <td>
# 10
# </td>
# </tr>
#
# </table>
# ### 2.2 - Forward pass for LSTM
#
# Now that you have implemented one step of an LSTM, you can now iterate this over this using a for-loop to process a sequence of $T_x$ inputs.
#
# <img src="images/LSTM_rnn.png" style="width:500;height:300px;">
# <caption><center> **Figure 4**: LSTM over multiple time-steps. </center></caption>
#
# **Exercise:** Implement `lstm_forward()` to run an LSTM over $T_x$ time-steps.
#
# **Note**: $c^{\langle 0 \rangle}$ is initialized with zeros.
# In[10]:
# GRADED FUNCTION: lstm_forward
def lstm_forward(x, a0, parameters):
"""
Implement the forward propagation of the recurrent neural network using an LSTM-cell described in Figure (3).
Arguments:
x -- Input data for every time-step, of shape (n_x, m, T_x).
a0 -- Initial hidden state, of shape (n_a, m)
parameters -- python dictionary containing:
Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
bf -- Bias of the forget gate, numpy array of shape (n_a, 1)
Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
bi -- Bias of the update gate, numpy array of shape (n_a, 1)
Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x)
bc -- Bias of the first "tanh", numpy array of shape (n_a, 1)
Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
bo -- Bias of the output gate, numpy array of shape (n_a, 1)
Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a)
by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1)
Returns:
a -- Hidden states for every time-step, numpy array of shape (n_a, m, T_x)
y -- Predictions for every time-step, numpy array of shape (n_y, m, T_x)
caches -- tuple of values needed for the backward pass, contains (list of all the caches, x)
"""
# Initialize "caches", which will track the list of all the caches
caches = []
### START CODE HERE ###
# Retrieve dimensions from shapes of x and parameters['Wy'] (≈2 lines)
n_x, m, T_x = x.shape
n_y, n_a = parameters['Wy'].shape
# initialize "a", "c" and "y" with zeros (≈3 lines)
a = np.zeros([n_a, m, T_x])
c = np.zeros([n_a, m, T_x])
y = np.zeros([n_y, m, T_x])
# Initialize a_next and c_next (≈2 lines)
a_next = a0
c_next = np.zeros([n_a, m])
# loop over all time-steps
for t in range(T_x):
# Update next hidden state, next memory state, compute the prediction, get the cache (≈1 line)
a_next, c_next, yt, cache = lstm_cell_forward(x[:,:,t], a_next, c_next, parameters)
# Save the value of the new "next" hidden state in a (≈1 line)
a[:,:,t] = a_next
# Save the value of the prediction in y (≈1 line)
y[:,:,t] = yt
# Save the value of the next cell state (≈1 line)
c[:,:,t] = c_next
# Append the cache into caches (≈1 line)
caches.append(cache)
### END CODE HERE ###
# store values needed for backward propagation in cache
caches = (caches, x)
return a, y, c, caches
# In[11]:
np.random.seed(1)
x = np.random.randn(3,10,7)
a0 = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a, y, c, caches = lstm_forward(x, a0, parameters)
print("a[4][3][6] = ", a[4][3][6])
print("a.shape = ", a.shape)
print("y[1][4][3] =", y[1][4][3])
print("y.shape = ", y.shape)
print("caches[1][1[1]] =", caches[1][1][1])
print("c[1][2][1]", c[1][2][1])
print("len(caches) = ", len(caches))
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **a[4][3][6]** =
# </td>
# <td>
# 0.172117767533
# </td>
# </tr>
# <tr>
# <td>
# **a.shape** =
# </td>
# <td>
# (5, 10, 7)
# </td>
# </tr>
# <tr>
# <td>
# **y[1][4][3]** =
# </td>
# <td>
# 0.95087346185
# </td>
# </tr>
# <tr>
# <td>
# **y.shape** =
# </td>
# <td>
# (2, 10, 7)
# </td>
# </tr>
# <tr>
# <td>
# **caches[1][1][1]** =
# </td>
# <td>
# [ 0.82797464 0.23009474 0.76201118 -0.22232814 -0.20075807 0.18656139
# 0.41005165]
# </td>
#
# </tr>
# <tr>
# <td>
# **c[1][2][1]** =
# </td>
# <td>
# -0.855544916718
# </td>
# </tr>
#
# </tr>
# <tr>
# <td>
# **len(caches)** =
# </td>
# <td>
# 2
# </td>
# </tr>
#
# </table>
# Congratulations! You have now implemented the forward passes for the basic RNN and the LSTM. When using a deep learning framework, implementing the forward pass is sufficient to build systems that achieve great performance.
#
# The rest of this notebook is optional, and will not be graded.
# ## 3 - Backpropagation in recurrent neural networks (OPTIONAL / UNGRADED)
#
# In modern deep learning frameworks, you only have to implement the forward pass, and the framework takes care of the backward pass, so most deep learning engineers do not need to bother with the details of the backward pass. If however you are an expert in calculus and want to see the details of backprop in RNNs, you can work through this optional portion of the notebook.
#
# When in an earlier course you implemented a simple (fully connected) neural network, you used backpropagation to compute the derivatives with respect to the cost to update the parameters. Similarly, in recurrent neural networks you can to calculate the derivatives with respect to the cost in order to update the parameters. The backprop equations are quite complicated and we did not derive them in lecture. However, we will briefly present them below.
# ### 3.1 - Basic RNN backward pass
#
# We will start by computing the backward pass for the basic RNN-cell.
#
# <img src="images/rnn_cell_backprop.png" style="width:500;height:300px;"> <br>
# <caption><center> **Figure 5**: RNN-cell's backward pass. Just like in a fully-connected neural network, the derivative of the cost function $J$ backpropagates through the RNN by following the chain-rule from calculas. The chain-rule is also used to calculate $(\frac{\partial J}{\partial W_{ax}},\frac{\partial J}{\partial W_{aa}},\frac{\partial J}{\partial b})$ to update the parameters $(W_{ax}, W_{aa}, b_a)$. </center></caption>
# #### Deriving the one step backward functions:
#
# To compute the `rnn_cell_backward` you need to compute the following equations. It is a good exercise to derive them by hand.
#
# The derivative of $\tanh$ is $1-\tanh(x)^2$. You can find the complete proof [here](https://www.wyzant.com/resources/lessons/math/calculus/derivative_proofs/tanx). Note that: $ \text{sech}(x)^2 = 1 - \tanh(x)^2$
#
# Similarly for $\frac{ \partial a^{\langle t \rangle} } {\partial W_{ax}}, \frac{ \partial a^{\langle t \rangle} } {\partial W_{aa}}, \frac{ \partial a^{\langle t \rangle} } {\partial b}$, the derivative of $\tanh(u)$ is $(1-\tanh(u)^2)du$.
#
# The final two equations also follow same rule and are derived using the $\tanh$ derivative. Note that the arrangement is done in a way to get the same dimensions to match.
# In[28]:
def rnn_cell_backward(da_next, cache):
"""
Implements the backward pass for the RNN-cell (single time-step).
Arguments:
da_next -- Gradient of loss with respect to next hidden state
cache -- python dictionary containing useful values (output of rnn_cell_forward())
Returns:
gradients -- python dictionary containing:
dx -- Gradients of input data, of shape (n_x, m)
da_prev -- Gradients of previous hidden state, of shape (n_a, m)
dWax -- Gradients of input-to-hidden weights, of shape (n_a, n_x)
dWaa -- Gradients of hidden-to-hidden weights, of shape (n_a, n_a)
dba -- Gradients of bias vector, of shape (n_a, 1)
"""
# Retrieve values from cache
(a_next, a_prev, xt, parameters) = cache
# Retrieve values from parameters
Wax = parameters["Wax"]
Waa = parameters["Waa"]
Wya = parameters["Wya"]
ba = parameters["ba"]
by = parameters["by"]
### START CODE HERE ###
# compute the gradient of tanh with respect to a_next (≈1 line)
dtanh = (1 - a_next**2)*da_next
# compute the gradient of the loss with respect to Wax (≈2 lines)
dxt = np.dot(Wax.T,dtanh)
dWax = np.dot(dtanh,xt.T)
# compute the gradient with respect to Waa (≈2 lines)
da_prev = np.dot(Waa.T,dtanh)
dWaa = np.dot(dtanh,a_prev.T)
# compute the gradient with respect to b (≈1 line)
dba = np.sum(dtanh, axis=1, keepdims=True)
### END CODE HERE ###
# Store the gradients in a python dictionary
gradients = {"dxt": dxt, "da_prev": da_prev, "dWax": dWax, "dWaa": dWaa, "dba": dba}
return gradients
# In[29]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
Wax = np.random.randn(5,3)
Waa = np.random.randn(5,5)
Wya = np.random.randn(2,5)
b = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
a_next, yt, cache = rnn_cell_forward(xt, a_prev, parameters)
da_next = np.random.randn(5,10)
gradients = rnn_cell_backward(da_next, cache)
print("gradients[\"dxt\"][1][2] =", gradients["dxt"][1][2])
print("gradients[\"dxt\"].shape =", gradients["dxt"].shape)
print("gradients[\"da_prev\"][2][3] =", gradients["da_prev"][2][3])
print("gradients[\"da_prev\"].shape =", gradients["da_prev"].shape)
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWax\"].shape =", gradients["dWax"].shape)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWaa\"].shape =", gradients["dWaa"].shape)
print("gradients[\"dba\"][4] =", gradients["dba"][4])
print("gradients[\"dba\"].shape =", gradients["dba"].shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **gradients["dxt"][1][2]** =
# </td>
# <td>
# -0.460564103059
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dxt"].shape** =
# </td>
# <td>
# (3, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da_prev"][2][3]** =
# </td>
# <td>
# 0.0842968653807
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da_prev"].shape** =
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"][3][1]** =
# </td>
# <td>
# 0.393081873922
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"].shape** =
# </td>
# <td>
# (5, 3)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"][1][2]** =
# </td>
# <td>
# -0.28483955787
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"].shape** =
# </td>
# <td>
# (5, 5)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"][4]** =
# </td>
# <td>
# [ 0.80517166]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# </table>
# #### Backward pass through the RNN
#
# Computing the gradients of the cost with respect to $a^{\langle t \rangle}$ at every time-step $t$ is useful because it is what helps the gradient backpropagate to the previous RNN-cell. To do so, you need to iterate through all the time steps starting at the end, and at each step, you increment the overall $db_a$, $dW_{aa}$, $dW_{ax}$ and you store $dx$.
#
# **Instructions**:
#
# Implement the `rnn_backward` function. Initialize the return variables with zeros first and then loop through all the time steps while calling the `rnn_cell_backward` at each time timestep, update the other variables accordingly.
# In[42]:
def rnn_backward(da, cache):
"""
Implement the backward pass for a RNN over an entire sequence of input data.
Arguments:
da -- Upstream gradients of all hidden states, of shape (n_a, m, T_x)
caches -- tuple containing information from the forward pass (rnn_forward)
Returns:
gradients -- python dictionary containing:
dx -- Gradient w.r.t. the input data, numpy-array of shape (n_x, m, T_x)
da0 -- Gradient w.r.t the initial hidden state, numpy-array of shape (n_a, m)
dWax -- Gradient w.r.t the input's weight matrix, numpy-array of shape (n_a, n_x)
dWaa -- Gradient w.r.t the hidden state's weight matrix, numpy-arrayof shape (n_a, n_a)
dba -- Gradient w.r.t the bias, of shape (n_a, 1)
"""
### START CODE HERE ###
# Retrieve values from the first cache (t=1) of caches (≈2 lines)
(caches, x) = cache
(a1, a0, x1, parameters) = caches[0]
# Retrieve dimensions from da's and x1's shapes (≈2 lines)
n_a, m, T_x = da.shape
n_x, m = x1.shape
# initialize the gradients with the right sizes (≈6 lines)
dx = np.zeros([n_x, m, T_x])
dWax = np.zeros([n_a, n_x])
dWaa = np.zeros([n_a, n_a])
dba = np.zeros([n_a, 1])
da0 = np.zeros([n_a, m])
da_prevt = np.zeros([n_a, m])
# Loop through all the time steps
for t in reversed(range(T_x)):
# Compute gradients at time step t. Choose wisely the "da_next" and the "cache" to use in the backward propagation step. (≈1 line)
gradients = rnn_cell_backward(da[:,:,t] + da_prevt, caches[t])
# Retrieve derivatives from gradients (≈ 1 line)
dxt, da_prevt, dWaxt, dWaat, dbat = gradients["dxt"], gradients["da_prev"], gradients["dWax"], gradients["dWaa"], gradients["dba"]
# Increment global derivatives w.r.t parameters by adding their derivative at time-step t (≈4 lines)
dx[:, :, t] = dxt
dWax += dWaxt
dWaa += dWaat
dba += dbat
# Set da0 to the gradient of a which has been backpropagated through all time-steps (≈1 line)
da0 = da_prevt
### END CODE HERE ###
# Store the gradients in a python dictionary
gradients = {"dx": dx, "da0": da0, "dWax": dWax, "dWaa": dWaa,"dba": dba}
return gradients
# In[43]:
np.random.seed(1)
x = np.random.randn(3,10,4)
a0 = np.random.randn(5,10)
Wax = np.random.randn(5,3)
Waa = np.random.randn(5,5)
Wya = np.random.randn(2,5)
ba = np.random.randn(5,1)
by = np.random.randn(2,1)
parameters = {"Wax": Wax, "Waa": Waa, "Wya": Wya, "ba": ba, "by": by}
a, y, caches = rnn_forward(x, a0, parameters)
da = np.random.randn(5, 10, 4)
gradients = rnn_backward(da, caches)
print("gradients[\"dx\"][1][2] =", gradients["dx"][1][2])
print("gradients[\"dx\"].shape =", gradients["dx"].shape)
print("gradients[\"da0\"][2][3] =", gradients["da0"][2][3])
print("gradients[\"da0\"].shape =", gradients["da0"].shape)
print("gradients[\"dWax\"][3][1] =", gradients["dWax"][3][1])
print("gradients[\"dWax\"].shape =", gradients["dWax"].shape)
print("gradients[\"dWaa\"][1][2] =", gradients["dWaa"][1][2])
print("gradients[\"dWaa\"].shape =", gradients["dWaa"].shape)
print("gradients[\"dba\"][4] =", gradients["dba"][4])
print("gradients[\"dba\"].shape =", gradients["dba"].shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **gradients["dx"][1][2]** =
# </td>
# <td>
# [-2.07101689 -0.59255627 0.02466855 0.01483317]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dx"].shape** =
# </td>
# <td>
# (3, 10, 4)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da0"][2][3]** =
# </td>
# <td>
# -0.314942375127
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da0"].shape** =
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"][3][1]** =
# </td>
# <td>
# 11.2641044965
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWax"].shape** =
# </td>
# <td>
# (5, 3)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"][1][2]** =
# </td>
# <td>
# 2.30333312658
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWaa"].shape** =
# </td>
# <td>
# (5, 5)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"][4]** =
# </td>
# <td>
# [-0.74747722]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dba"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# </table>
# ## 3.2 - LSTM backward pass
# ### 3.2.1 One Step backward
#
# The LSTM backward pass is slighltly more complicated than the forward one. We have provided you with all the equations for the LSTM backward pass below. (If you enjoy calculus exercises feel free to try deriving these from scratch yourself.)
#
# ### 3.2.2 gate derivatives
#
# $$d \Gamma_o^{\langle t \rangle} = da_{next}*\tanh(c_{next}) * \Gamma_o^{\langle t \rangle}*(1-\Gamma_o^{\langle t \rangle})\tag{7}$$
#
# $$d\tilde c^{\langle t \rangle} = dc_{next}*\Gamma_u^{\langle t \rangle}+ \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * i_t * da_{next} * \tilde c^{\langle t \rangle} * (1-\tanh(\tilde c)^2) \tag{8}$$
#
# $$d\Gamma_u^{\langle t \rangle} = dc_{next}*\tilde c^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * \tilde c^{\langle t \rangle} * da_{next}*\Gamma_u^{\langle t \rangle}*(1-\Gamma_u^{\langle t \rangle})\tag{9}$$
#
# $$d\Gamma_f^{\langle t \rangle} = dc_{next}*\tilde c_{prev} + \Gamma_o^{\langle t \rangle} (1-\tanh(c_{next})^2) * c_{prev} * da_{next}*\Gamma_f^{\langle t \rangle}*(1-\Gamma_f^{\langle t \rangle})\tag{10}$$
#
# ### 3.2.3 parameter derivatives
#
# $$ dW_f = d\Gamma_f^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{11} $$
# $$ dW_u = d\Gamma_u^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{12} $$
# $$ dW_c = d\tilde c^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{13} $$
# $$ dW_o = d\Gamma_o^{\langle t \rangle} * \begin{pmatrix} a_{prev} \\ x_t\end{pmatrix}^T \tag{14}$$
#
# To calculate $db_f, db_u, db_c, db_o$ you just need to sum across the horizontal (axis= 1) axis on $d\Gamma_f^{\langle t \rangle}, d\Gamma_u^{\langle t \rangle}, d\tilde c^{\langle t \rangle}, d\Gamma_o^{\langle t \rangle}$ respectively. Note that you should have the `keep_dims = True` option.
#
# Finally, you will compute the derivative with respect to the previous hidden state, previous memory state, and input.
#
# $$ da_{prev} = W_f^T*d\Gamma_f^{\langle t \rangle} + W_u^T * d\Gamma_u^{\langle t \rangle}+ W_c^T * d\tilde c^{\langle t \rangle} + W_o^T * d\Gamma_o^{\langle t \rangle} \tag{15}$$
# Here, the weights for equations 13 are the first n_a, (i.e. $W_f = W_f[:n_a,:]$ etc...)
#
# $$ dc_{prev} = dc_{next}\Gamma_f^{\langle t \rangle} + \Gamma_o^{\langle t \rangle} * (1- \tanh(c_{next})^2)*\Gamma_f^{\langle t \rangle}*da_{next} \tag{16}$$
# $$ dx^{\langle t \rangle} = W_f^T*d\Gamma_f^{\langle t \rangle} + W_u^T * d\Gamma_u^{\langle t \rangle}+ W_c^T * d\tilde c_t + W_o^T * d\Gamma_o^{\langle t \rangle}\tag{17} $$
# where the weights for equation 15 are from n_a to the end, (i.e. $W_f = W_f[n_a:,:]$ etc...)
#
# **Exercise:** Implement `lstm_cell_backward` by implementing equations $7-17$ below. Good luck! :)
# In[96]:
def lstm_cell_backward(da_next, dc_next, cache):
"""
Implement the backward pass for the LSTM-cell (single time-step).
Arguments:
da_next -- Gradients of next hidden state, of shape (n_a, m)
dc_next -- Gradients of next cell state, of shape (n_a, m)
cache -- cache storing information from the forward pass
Returns:
gradients -- python dictionary containing:
dxt -- Gradient of input data at time-step t, of shape (n_x, m)
da_prev -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m)
dc_prev -- Gradient w.r.t. the previous memory state, of shape (n_a, m, T_x)
dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x)
dWo -- Gradient w.r.t. the weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x)
dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1)
dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1)
dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1)
dbo -- Gradient w.r.t. biases of the output gate, of shape (n_a, 1)
"""
# Retrieve information from "cache"
(a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) = cache
# Retrieve values from parameters
Wf = parameters['Wf']
Wo = parameters['Wo']
Wi = parameters['Wi']
Wc = parameters['Wc']
### START CODE HERE ###
# Retrieve dimensions from xt's and a_next's shape (≈2 lines)
n_x, m = xt.shape
n_a, m = a_next.shape
# Compute gates related derivatives, you can find their values can be found by looking carefully at equations (7) to (10) (≈4 lines)
dot = da_next * np.tanh(c_next) * ot * (1 - ot)
dcct = (dc_next * it + ot * (1 - np.square(np.tanh(c_next))) * it * da_next) * (1 - np.square(cct))
dit = (dc_next * cct + ot * (1 - np.square(np.tanh(c_next))) * cct * da_next) * it * (1 - it)
dft = (dc_next * c_prev + ot *(1 - np.square(np.tanh(c_next))) * c_prev * da_next) * ft * (1 - ft)
# Code equations (7) to (10) (≈4 lines)
##dit = None
##dft = None
##dot = None
##dcct = None
concat = np.concatenate((a_prev, xt))
# Compute parameters related derivatives. Use equations (11)-(14) (≈8 lines)
dWf = np.dot(dft, concat.T)
dWi = np.dot(dit, concat.T)
dWc = np.dot(dcct, concat.T)
dWo = np.dot(dot, concat.T)
dbf = np.sum(dft, axis=1 ,keepdims = True)
dbi = np.sum(dit, axis=1, keepdims = True)
dbc = np.sum(dcct, axis=1, keepdims = True)
dbo = np.sum(dot, axis=1, keepdims = True)
# Compute derivatives w.r.t previous hidden state, previous memory state and input. Use equations (15)-(17). (≈3 lines)
da_prev = np.dot(Wf[:, :n_a].T, dft) + np.dot(Wi[:, :n_a].T, dit) + np.dot(Wc[:, :n_a].T, dcct) + np.dot(Wo[:, :n_a].T, dot)
dc_prev = dc_next * ft + ot * (1 - np.square(np.tanh(c_next))) * ft * da_next
dxt = np.dot(Wf[:, n_a:].T, dft) + np.dot(Wi[:, n_a:].T, dit) + np.dot(Wc[:, n_a:].T, dcct) + np.dot(Wo[:, n_a:].T, dot)
### END CODE HERE ###
# Save gradients in dictionary
gradients = {"dxt": dxt, "da_prev": da_prev, "dc_prev": dc_prev, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi,
"dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo}
return gradients
# In[97]:
np.random.seed(1)
xt = np.random.randn(3,10)
a_prev = np.random.randn(5,10)
c_prev = np.random.randn(5,10)
Wf = np.random.randn(5, 5+3)
bf = np.random.randn(5,1)
Wi = np.random.randn(5, 5+3)
bi = np.random.randn(5,1)
Wo = np.random.randn(5, 5+3)
bo = np.random.randn(5,1)
Wc = np.random.randn(5, 5+3)
bc = np.random.randn(5,1)
Wy = np.random.randn(2,5)
by = np.random.randn(2,1)
parameters = {"Wf": Wf, "Wi": Wi, "Wo": Wo, "Wc": Wc, "Wy": Wy, "bf": bf, "bi": bi, "bo": bo, "bc": bc, "by": by}
a_next, c_next, yt, cache = lstm_cell_forward(xt, a_prev, c_prev, parameters)
da_next = np.random.randn(5,10)
dc_next = np.random.randn(5,10)
gradients = lstm_cell_backward(da_next, dc_next, cache)
print("gradients[\"dxt\"][1][2] =", gradients["dxt"][1][2])
print("gradients[\"dxt\"].shape =", gradients["dxt"].shape)
print("gradients[\"da_prev\"][2][3] =", gradients["da_prev"][2][3])
print("gradients[\"da_prev\"].shape =", gradients["da_prev"].shape)
print("gradients[\"dc_prev\"][2][3] =", gradients["dc_prev"][2][3])
print("gradients[\"dc_prev\"].shape =", gradients["dc_prev"].shape)
print("gradients[\"dWf\"][3][1] =", gradients["dWf"][3][1])
print("gradients[\"dWf\"].shape =", gradients["dWf"].shape)
print("gradients[\"dWi\"][1][2] =", gradients["dWi"][1][2])
print("gradients[\"dWi\"].shape =", gradients["dWi"].shape)
print("gradients[\"dWc\"][3][1] =", gradients["dWc"][3][1])
print("gradients[\"dWc\"].shape =", gradients["dWc"].shape)
print("gradients[\"dWo\"][1][2] =", gradients["dWo"][1][2])
print("gradients[\"dWo\"].shape =", gradients["dWo"].shape)
print("gradients[\"dbf\"][4] =", gradients["dbf"][4])
print("gradients[\"dbf\"].shape =", gradients["dbf"].shape)
print("gradients[\"dbi\"][4] =", gradients["dbi"][4])
print("gradients[\"dbi\"].shape =", gradients["dbi"].shape)
print("gradients[\"dbc\"][4] =", gradients["dbc"][4])
print("gradients[\"dbc\"].shape =", gradients["dbc"].shape)
print("gradients[\"dbo\"][4] =", gradients["dbo"][4])
print("gradients[\"dbo\"].shape =", gradients["dbo"].shape)
# **Expected Output**:
#
# <table>
# <tr>
# <td>
# **gradients["dxt"][1][2]** =
# </td>
# <td>
# 3.23055911511
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dxt"].shape** =
# </td>
# <td>
# (3, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da_prev"][2][3]** =
# </td>
# <td>
# -0.0639621419711
# </td>
# </tr>
# <tr>
# <td>
# **gradients["da_prev"].shape** =
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dc_prev"][2][3]** =
# </td>
# <td>
# 0.797522038797
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dc_prev"].shape** =
# </td>
# <td>
# (5, 10)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWf"][3][1]** =
# </td>
# <td>
# -0.147954838164
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWf"].shape** =
# </td>
# <td>
# (5, 8)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWi"][1][2]** =
# </td>
# <td>
# 1.05749805523
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWi"].shape** =
# </td>
# <td>
# (5, 8)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWc"][3][1]** =
# </td>
# <td>
# 2.30456216369
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWc"].shape** =
# </td>
# <td>
# (5, 8)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWo"][1][2]** =
# </td>
# <td>
# 0.331311595289
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dWo"].shape** =
# </td>
# <td>
# (5, 8)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbf"][4]** =
# </td>
# <td>
# [ 0.18864637]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbf"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbi"][4]** =
# </td>
# <td>
# [-0.40142491]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbi"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbc"][4]** =
# </td>
# <td>
# [ 0.25587763]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbc"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbo"][4]** =
# </td>
# <td>
# [ 0.13893342]
# </td>
# </tr>
# <tr>
# <td>
# **gradients["dbo"].shape** =
# </td>
# <td>
# (5, 1)
# </td>
# </tr>
# </table>
# ### 3.3 Backward pass through the LSTM RNN
#
# This part is very similar to the `rnn_backward` function you implemented above. You will first create variables of the same dimension as your return variables. You will then iterate over all the time steps starting from the end and call the one step function you implemented for LSTM at each iteration. You will then update the parameters by summing them individually. Finally return a dictionary with the new gradients.
#
# **Instructions**: Implement the `lstm_backward` function. Create a for loop starting from $T_x$ and going backward. For each step call `lstm_cell_backward` and update the your old gradients by adding the new gradients to them. Note that `dxt` is not updated but is stored.
# In[109]:
def lstm_backward(da, caches):
"""
Implement the backward pass for the RNN with LSTM-cell (over a whole sequence).
Arguments:
da -- Gradients w.r.t the hidden states, numpy-array of shape (n_a, m, T_x)
dc -- Gradients w.r.t the memory states, numpy-array of shape (n_a, m, T_x)
caches -- cache storing information from the forward pass (lstm_forward)
Returns:
gradients -- python dictionary containing:
dx -- Gradient of inputs, of shape (n_x, m, T_x)
da0 -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m)
dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x)
dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x)
dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x)
dWo -- Gradient w.r.t. the weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x)
dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1)
dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1)
dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1)
dbo -- Gradient w.r.t. biases of the save gate, of shape (n_a, 1)
"""
# Retrieve values from the first cache (t=1) of caches.
(caches, x) = caches
(a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0]
### START CODE HERE ###
# Retrieve dimensions from da's and x1's shapes (≈2 lines)
n_a, m, T_x = da.shape
n_x, m = x1.shape
# initialize the gradients with the right sizes (≈12 lines)
dx = np.zeros((n_x, m, T_x))
da0 = np.zeros((n_a, m))
da_prevt = np.zeros(da0.shape)
dc_prevt = np.zeros(da0.shape)
dWf = np.zeros((n_a, n_a + n_x))
dWi = np.zeros(dWf.shape)
dWc = np.zeros(dWf.shape)
dWo = np.zeros(dWf.shape)
dbf = | np.zeros((n_a, 1)) | numpy.zeros |
from scipy.linalg.decomp_cholesky import cho_solve
from scipy.special import logsumexp
import numpy as np
def log_gaussian_pdf(x, mu=None, Sigma=None, is_cholesky=False, compute_grad=False):
if mu is None:
mu = np.zeros(len(x))
if Sigma is None:
Sigma = np.eye(len(mu))
if is_cholesky is False:
L = np.linalg.cholesky(Sigma)
else:
L = Sigma
assert len(x) == Sigma.shape[0]
assert len(x) == Sigma.shape[1]
assert len(x) == len(mu)
# solve y=K^(-1)x = L^(-T)L^(-1)x
x = np.array(x - mu)
y = cho_solve((L, True), x)
# y = solve_triangular(L, x.T, lower=True)
# y = solve_triangular(L.T, y, lower=False)
if not compute_grad:
log_determinant_part = -np.sum(np.log(np.diag(L)))
quadratic_part = -0.5 * x.dot(y)
const_part = -0.5 * len(L) * np.log(2 * np.pi)
return const_part + log_determinant_part + quadratic_part
else:
return -y
def log_gaussian_pdf_isotropic(x, sigma, mu=None, compute_grad=False):
if mu is not None:
x = x - mu
if compute_grad:
return -(x) / (sigma ** 2)
else:
D = len(x)
const_part = -0.5 * D * np.log(2 * np.pi)
quadratic_part = -np.dot(x, x) / (2 * (sigma ** 2))
log_determinant_part = -D * np.log(sigma)
return const_part + log_determinant_part + quadratic_part
def sample_gaussian(N, mu=np.zeros(2), Sigma=np.eye(2), is_cholesky=False):
mu = np.atleast_1d(mu)
D = len(mu)
assert len(mu.shape) == 1
assert len(Sigma.shape) == 2
assert D == Sigma.shape[0]
assert D == Sigma.shape[1]
if is_cholesky is False:
L = np.linalg.cholesky(Sigma)
else:
L = Sigma
return L.dot(np.random.randn(D, N)).T + mu
def rings_sample(N, D, sigma=0.1, radia=np.array([1, 3])):
assert D >= 2
angles = np.random.rand(N) * 2 * np.pi
noise = np.random.randn(N) * sigma
weights = 2 * np.pi * radia
weights /= np.sum(weights)
radia_inds = np.random.choice(len(radia), N, p=weights)
radius_samples = radia[radia_inds] + noise
xs = (radius_samples) * np.sin(angles)
ys = (radius_samples) * np.cos(angles)
X = np.vstack((xs, ys)).T.reshape(N, 2)
result = np.zeros((N, D))
result[:, :2] = X
if D > 2:
result[:, 2:] = np.random.randn(N, D - 2) * sigma
return result
def rings_log_pdf_grad(X, sigma=0.1, radia=np.array([1, 3])):
weights = 2 * np.pi * radia
weights /= np.sum(weights)
norms = | np.linalg.norm(X[:, :2], axis=1) | numpy.linalg.norm |
import numpy as np
from vis_sample import vis_sample
import matplotlib.pylab as pl
from vis_sample.file_handling import *
from scipy import ndimage
from scipy import sparse
import time
import math
def matched_filter(filterfile=None, datafile=None, mu_RA=0., mu_DEC=0., src_distance=None, interpolate=True, weights='renormalize', norm_chans=None, window_func='Hanning', binfactor=2, outfile=None, mode='channel', restfreq=None, plot=False, verbose=False):
"""The matched_filter() method in VISIBLE allows you to apply an approximated matched filter to interferometric spectral line data and extract a signal.
The filter can be created from a FITS image or RADMC3D output image, and the weak line data can be a CASA MS or uvfits file.
The filter response can be output either to a .npy file or returned back to the user (for scripting)
Parameters
__________
filterfile : input filter image or a list of filter images, needs to be in a valid FITS format with units of DEG for the RA and DEC, a RADMC3D image.out file (ascii format), or a SkyImage object from vis_sample. Must have an accurate reference frequency
datafile - path to uvfits file or CASA measurement set containing the weak line. This should be as broad as possible (for a baseline), and NOT just a small window around the line
mu_RA - (optional, default = 0) right ascension offset from phase center in arcseconds (i.e. filter visibilities are sampled as if the image is centered at (mu_RA, mu_DEC)
mu_DEC - (optional, default = 0) declination offset from phase center in arcseconds (i.e. filter visibilities are sampled as if the image is centered at (mu_RA, mu_DEC)
src_distance - distance to source in parsecs - only required for RADMC3D input images
interpolate - (optional, default = True) whether the filter is interpolated to match the the local velocity spacing of the data. Should remain true unless you have a good reason otherwise.
weights - (optional, default = 'renormalize') options are 'renormalize', 'preserve', and 'statwt'. 'renormalize' will calculate the offset (if any) between the current weights and the scatter of the visibilities, and renormalize accordingly. If 'preserve' is selected, then the data weights are assumed to be correct as-is. 'statwt' will assume that the CASA task 'statwt' was applied to the data and no renormalization will be applied. 'renormalize' should not be used if strong lines are present in the data, and the application of statwt using channels without signal will be preferable.
norm_chans - (optional) specify signal free channels to normalize the output spectrum. Channels should be specified as a list of start/stop channel pairs (i.e. [[0,100][130,400],[450,600]]). This option should only be used if the selected 'weights' option cannot normalize the spectrum properly. Note that the channel indices are for the 'n_chan - n_kernel + 1' sized impulse response spectrum
window_func - (optional, default = 'Hanning') the window function used in processing the time domain data, which introduces a channel correlation. A Hanning filter is used for ALMA. Can be set to 'none' for synthetic data, other options (Welch, Hamming, etc.) will be added in the future.
binfactor - (optional, default = 2) the degree to which data was averaged/binned after the window function was applied. The default for ALMA observations after Cycle 3 is a factor of 2 (set in the OT). Valid factors are 1, 2, 3, and 4. Factors over 4 are treated as having no channel correlation.
outfile - (optional) name of output file for filter response, needs to have a .npy extension. If n filter images are provided then n outfiles must be specified.
mode - (optional, default = 'channel') output format of the x-axis of the impulse response spectrum. Options are 'channel', 'frequency', and 'velocity'.
restfreq - (optional) rest frequency for 'velocity' output mode, input as a float in MHz. If a rest frequency is not specified then the center frequency of the data will be used.
plot - (optional) plot the real portion of the filter response spectrum against the x-axis chosen by the 'mode' parameter. The output will still be either returned or saved to 'outfile'.
verbose - (boolean) flag to print(all progress output and timing
Usage:
__________
>> from VISIBLE import matched_filter # import the matched_filter command
>> matched_filter(filterfile="my_filter.fits", datafile="observations.ms", outfile="spectrum.npy") # filter observations.ms using the filter image from my_filter.fits and output spectrum to spectrum.npy
>> output = matched_filter(filterfile="my_filter.fits", datafile="observations.ms") # filter observations.ms using the filter image from my_filter.fits, result stored in variable 'output', where output looks likes [channels, xc_spectrum].
>> spectrum = matched_filter(filterfile="my_filter.fits", datafile="observations.ms.cvel", mode="frequency") # same as above, output with x axis in units of frequency. Input ms should be run through cvel prior to filtering
>> spectrum = matched_filter(filterfile="my_filter.fits", datafile="observations.ms.cvel", mode="velocity") # same as above, output with x axis in units of lsrk velocity. Input ms should be run through cvel prior to filtering
"""
# Error/warning cases #
if not filterfile:
print("ERROR: Please supply an input filter image or list of filter images")
return
if not datafile:
print("ERROR: Please supply an input MS or uvfits file to filter")
return
if mode=='velocity':
print("WARNING: ALMA does not Doppler track, make sure that the datafile has been run through cvel or velocities will not be correct")
if mode=='frequency':
print("WARNING: ALMA does not Doppler track, make sure that the datafile has been run through cvel or frequencies will not be correct")
if (window_func != "Hanning") and (window_func != "none"):
print('ERROR: Please specify a valid window function. Options are "Hanning" or "none".')
return
if not (type(binfactor) is int):
print('ERROR: Please specify a valid binning factor. Value should be a positive integer and values greater than 4 will result in data being treated as having no channel correlation.')
return
elif binfactor < 1:
print('ERROR: Please specify a valid binning factor. Value should be a positive integer and values greater than 4 will result in data being treated as having no channel correlation.')
return
if outfile:
if not ((type(outfile) is str) or (type(outfile) is list)):
print("ERROR: Please supply a valid outfile path or list of paths (matching the number of filter images).")
return
# parse whether we have a bank of filters or single filter and check that number of outfiles matches
if type(filterfile) is list:
multifilter = True
nfilter = len(filterfile)
if outfile:
if len(outfile) != len(filterfile):
print("ERROR: Number of filter images must match the number of outfile paths.")
return
else:
multifilter = False
#################################
# data visibility retrieval #
#################################
# read visibilities in from the data file
if verbose:
print("Reading data file: "+datafile)
t0 = time.time()
try:
data = import_data_uvfits(datafile)
except IOError:
try:
data = import_data_ms(datafile)
except RuntimeError:
print("Not a valid data file. Please check that the file is a uvfits file or measurement set")
sys.exit(1)
nvis = data.VV.shape[0]
if len(data.wgts.shape) > 2:
data.wgts = np.squeeze(data.wgts)
wgt_dims = len(data.wgts.shape)
if wgt_dims == 2:
print("Dataset has a weight spectrum, compressing channelized weights via averaging to a single weight per visibility.")
data.wgts = np.mean(data.wgts, axis=1)
if weights == 'statwt':
data.wgts *= 0.5
elif weights == 'preserve':
print("Assuming data weights are correct as-is. If resulting spectrum is not properly normalized, consider using 'renormalize' or applying statwt to the data.")
else:
# using weight value as a temporary sketchy replacement for finding flagged visibilities
wgt_mean = np.mean(data.wgts[data.wgts > 0.00001])
data_std = np.std(data.VV[data.wgts > 0.00001])
data.wgts *= (1/data_std**2)/wgt_mean
# check if weights look correct
wgt_mean = np.mean(data.wgts[data.wgts > 0.00001])
data_std = np.std(data.VV[data.wgts > 0.00001])
weight_offset = np.abs(wgt_mean - 1/data_std**2)/wgt_mean*100
if weight_offset > 25.:
print("WARNING: data weights are more than 25% offset that expected from the total data variance. This may be due to very strong lines in the data or improperly initialized data weights. If resulting spectrum is not properly normalized, consider using 'renormalize' or applying statwt to the data.")
# check to see if binfactor is 1. if so, bin by a factor of 2 as covariance matrix of unbinned data is ill-conditioned
if binfactor == 1 and window_func == "Hanning":
print("WARNING: unbinned Hanning smoothed data has an ill-conditioned covariance matrix. Binning data by a factor of 2 and adjusting weights to keep numerically stable. Note that channel numbers in the output filter response will correspond to the binned data. Frequencies or velocities (if selected as output mode) will be properly calculated for the binned data.")
# force the data to have an even number of channels
if data.VV.shape[1] & 0x1:
data.VV = data.VV[:,:-1]
data.freqs = data.freqs[:-1]
data.VV = data.VV.reshape(nvis, data.VV.shape[1]/2, 2).mean(axis=2)
data.freqs = np.ndarray.tolist(np.array(data.freqs).reshape(data.VV.shape[1], 2).mean(axis=1))
data.wgts *= 5./3.
if verbose:
t1 = time.time()
print("Read data file: "+datafile)
print("Data read time = " + str(t1-t0))
##########################################
##########################################
####### Single filter image case #######
##########################################
##########################################
if multifilter == False:
#############################
# Read the filter image #
#############################
# now that we have the data, let's import the filter file
if verbose:
print("Reading filter file: "+filterfile)
t0 = time.time()
if isinstance(filterfile, SkyImage):
filter_img = filterfile
elif "image.out" in filterfile:
if src_distance is None:
print("A source distance in pc needs to be provided in order to process a RADMC3D image file")
return
else: filter_img = import_model_radmc(src_distance, filterfile)
elif "fits" in filterfile:
filter_img = import_model_fits(filterfile)
else:
print("Not a valid filter image option. Please provide a FITS file, a RADMC3D image file, or a SkyImage object).")
return
# the number of filter channels needs to be smaller than the data channels
if (len(filter_img.freqs) >= len(data.freqs)):
print("Number of channels in filter exceeds number of data channels. Filtering cannot continue.")
return
elif (len(filter_img.freqs) >= len(data.freqs)*0.5):
print("WARNING: Number of channels in data file seems small compared to width of filter. Make sure there is adequate baseline in the data file.")
if verbose:
t1 = time.time()
print("Read filter image: " + filterfile)
print("Filter read time = " + str(t1-t0))
##############################
# Interpolate the filter #
##############################
# if interpolation enabled, then make filter match data resolution (in velocity space)
if interpolate:
if verbose:
print("Interpolating filter")
t0 = time.time()
# determine the reference frequencies and freq spacings
filter_rfreq = np.mean(filter_img.freqs)
filter_delfreq = filter_img.freqs[1] - filter_img.freqs[0]
if data.freqs.shape[1] > 1:
print("WARNING: Detected multiple spws in the data. Proceeding with assumption that all data share same frequency range. Do not trust results unless this is confirmed")
data.freqs = data.freqs[:,0]
data_rfreq = np.mean(data.freqs)
data_delfreq = data.freqs[1] - data.freqs[0]
if data_delfreq < 0:
if filter_delfreq > 0:
filter_img.data = filter_img.data[:,:,::-1]
filter_delfreq = -filter_delfreq
else:
if filter_delfreq < 0:
filter_img.data = filter_img.data[:,:,::-1]
filter_delfreq = -filter_delfreq
filter_vwidth = filter_delfreq/filter_rfreq*c_kms
data_vwidth = data_delfreq/data_rfreq*c_kms
nchan_filter = len(filter_img.freqs)
nchan_data = len(data.freqs)
chan_grid = np.arange(nchan_filter)
interp_chans = (np.arange(nchan_data)*data_vwidth/filter_vwidth)[(np.arange(nchan_data)*data_vwidth/filter_vwidth) <= np.max(chan_grid)]
interp_grid_x, interp_grid_y, interp_grid_chan = np.meshgrid(np.arange(filter_img.data.shape[0]), np.arange(filter_img.data.shape[1]), interp_chans)
interp_grid_x = np.ravel(interp_grid_x)
interp_grid_y = np.ravel(interp_grid_y)
interp_grid_chan = np.ravel(interp_grid_chan)
interp_data = ndimage.map_coordinates(filter_img.data, [interp_grid_y, interp_grid_x, interp_grid_chan], order=1)
interp_data = interp_data.reshape((filter_img.data.shape[0], filter_img.data.shape[1], interp_chans.shape[0]))
filter_img.data = interp_data
filter_img.freqs = ndimage.map_coordinates(filter_img.freqs, [interp_chans], order=1)
if verbose:
t1 = time.time()
print("Filter interpolated from " + str(nchan_filter) + " channels to " + str(len(filter_img.freqs)) + " channels")
print("Filter interpolation time = " + str(t1-t0))
#########################################
# Calculate the filter visibilities #
#########################################
if verbose:
print("Generating kernel")
t0 = time.time()
nchan_kernel = len(filter_img.freqs)
kernel = np.empty(nchan_kernel*nvis, dtype='complex128').reshape(nvis, nchan_kernel)
kernel[:,:] = vis_sample(imagefile=filter_img, uu=data.uu, vv=data.vv, mu_RA=mu_RA, mu_DEC=mu_DEC, mod_interp=False)
# calculate the noise covariance matrix and its inverse
if window_func == "none":
R_inv = np.identity(nchan_kernel)
else:
# now we assuming window_func is "Hanning"
if binfactor > 4:
# treat binning factors larger than 4 as having no channel correlation (valid for Hanning window function)
R_inv = np.identity(nchan_kernel)
elif (binfactor == 1) or (binfactor == 2):
diagonals = [3./10.*np.ones(1000-1), np.ones(1000), 3./10.*np.ones(1000-1)]
R = sparse.diags(diagonals, [-1, 0, 1], format='csc').toarray()
R_inv = np.linalg.inv(R)[500-int(nchan_kernel/2.) : 500+int(math.ceil(nchan_kernel/2.)), 500-int(nchan_kernel/2.) : 500+int(math.ceil(nchan_kernel/2.))]
elif binfactor == 3:
diagonals = [1./6.*np.ones(1000-1), np.ones(1000), 1./6.*np.ones(1000-1)]
R = sparse.diags(diagonals, [-1, 0, 1], format='csc').toarray()
R_inv = np.linalg.inv(R)[500-int(nchan_kernel/2.) : 500+int(math.ceil(nchan_kernel/2.)), 500-int(nchan_kernel/2.) : 500+int(math.ceil(nchan_kernel/2.))]
elif binfactor == 4:
diagonals = [3./26.*np.ones(1000-1), np.ones(1000), 3./26.*np.ones(1000-1)]
R = sparse.diags(diagonals, [-1, 0, 1], format='csc').toarray()
R_inv = np.linalg.inv(R)[500-int(nchan_kernel/2.) : 500+int(math.ceil(nchan_kernel/2.)), 500-int(nchan_kernel/2.) : 500+int(math.ceil(nchan_kernel/2.))]
if verbose:
t1 = time.time()
print("Kernel generated")
print("Kernel generation time = " + str(t1-t0))
###############################
# Do the actual filtering #
###############################
if verbose:
print("Starting kernel convolution")
t0 = time.time()
xc = np.zeros((data.VV.shape[1] - nchan_kernel + 1), dtype='complex128')
kernel_noise_power = 0.
for v in np.arange(nvis):
# sketchy temporary check for flagged visibilities
if (not np.isnan(data.wgts[v])) and (data.wgts[v] > 0.00001):
xc += np.correlate(data.VV[v], np.matmul(data.wgts[v]*R_inv, kernel[v]))
kernel_noise_power += np.dot(kernel[v],np.matmul(data.wgts[v]*R_inv, kernel[v].conj()))
# normalize the output such that real and imag noise powers are both 1 (hence factor of sqrt(2))
xc = xc/np.sqrt(kernel_noise_power)* | np.sqrt(2) | numpy.sqrt |
# wujian@2018
"""
SI-SNR(scale-invariant SNR/SDR) measure of speech separation
"""
import numpy as np
from itertools import permutations
import pdb
def si_snr(x, s, remove_dc=True):
"""
Compute SI-SNR
Arguments:
x: vector, enhanced/separated signal
s: vector, reference signal(ground truth)
"""
def vec_l2norm(x):
return | np.linalg.norm(x, 2) | numpy.linalg.norm |
# coding: utf-8
import numpy as np
import random
import tensorflow as tf
import logging
import imageio
import read_data
from keras.utils import to_categorical
# from data_generator import DataGenerator
from place_pick_mil import MIL
# from evaluation.eval_reach import evaluate_vision_reach
# from evaluation.eval_push import evaluate_push
from tensorflow.python.platform import flags
import os
from functools import reduce
from operator import mul
def get_num_params():
nums=0
for variable in tf.trainable_variables():
shape= variable.get_shape()
nums+=reduce(mul, [dim.value for dim in shape], 1)
return nums
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
FLAGS = flags.FLAGS
LOGGER = logging.getLogger(__name__)
## Dataset/method options
flags.DEFINE_string('experiment', 'pick_place', 'sim_vision_reach or sim_push')
flags.DEFINE_string('data_path', './pick_dataset_origin/human_robot_dataset/',
'path to the directory where demo files that containing robot states and actions are stored')
flags.DEFINE_string('demo_gif_dir', 'data', 'path to the videos of demonstrations')
flags.DEFINE_string('gif_prefix', 'object', 'prefix of the video directory for each task, e.g. object_0 for task 0')
flags.DEFINE_integer('im_width', 264,
'width of the images in the demo videos, 125 for sim_push, and 80 for sim_vision_reach')
flags.DEFINE_integer('im_height', 196,
'height of the images in the demo videos, 125 for sim_push, and 64 for sim_vision_reach')
flags.DEFINE_integer('num_channels', 3, 'number of channels of the images in the demo videos')
flags.DEFINE_integer('T', 3, 'time horizon of the demo videos, 50 for reach, 100 for push')
flags.DEFINE_bool('hsv', False, 'convert the image to HSV format')
flags.DEFINE_bool('use_noisy_demos', False, 'use noisy demonstrations or not (for domain shift)')
flags.DEFINE_string('noisy_demo_gif_dir', None, 'path to the videos of noisy demonstrations')
flags.DEFINE_string('noisy_demo_file', None,
'path to the directory where noisy demo files that containing robot states and actions are stored')
flags.DEFINE_bool('no_action', True, 'do not include actions in the demonstrations for inner update')
flags.DEFINE_bool('no_state', False, 'do not include states in the demonstrations during training')
flags.DEFINE_bool('no_final_eept', False, 'do not include final ee pos in the demonstrations for inner update')
flags.DEFINE_bool('zero_state', True,
'zero-out states (meta-learn state) in the demonstrations for inner update (used in the paper with video-only demos)')
flags.DEFINE_bool('two_arms', False, 'use two-arm structure when state is zeroed-out')
flags.DEFINE_integer('training_set_size', -1, 'size of the training set, 1500 for sim_reach, 693 for sim push, anzero_stated \
-1 for all data except those in validation set')
flags.DEFINE_integer('val_set_size', 150, 'size of the training set, 150 for sim_reach and 76 for sim push')
## Training options
flags.DEFINE_integer('metatrain_iterations', 30000, 'number of metatraining iterations.') # 30k for pushing, 50k for reaching and placing
flags.DEFINE_integer('meta_batch_size', 16, 'number of tasks sampled per meta-update') # 15 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_integer('meta_test_batch_size', 1, 'number of tasks sampled per meta-update') # 15 for reaching, 15 for pushing, 12 for placing
flags.DEFINE_float('meta_lr', 1e-4, 'the base learning rate of the generator')
flags.DEFINE_integer('update_batch_size', 1,
'number of examples used for inner gradient update (K for K-shot learning).')
flags.DEFINE_float('train_update_lr', 1e-4,
'step size alpha for inner gradient update.') # 0.001 for reaching, 0.01 for pushing and placing
flags.DEFINE_integer('num_updates', 1, 'number of inner gradient updates during training.') # 5 for placing
flags.DEFINE_bool('clip', True, 'use gradient clipping for fast gradient')
flags.DEFINE_float('clip_max', 100.0, 'maximum clipping value for fast gradient')
flags.DEFINE_float('clip_min', -100.0, 'minimum clipping value for fast gradient')
# flags.DEFINE_float('clip_max', 20.0, 'maximum clipping value for fast gradient')
# flags.DEFINE_float('clip_min', -20.0, 'minimum clipping value for fast gradient')
flags.DEFINE_bool('fc_bt', True, 'use bias transformation for the first fc layer')
flags.DEFINE_bool('all_fc_bt', False, 'use bias transformation for all fc layers')
flags.DEFINE_bool('conv_bt', False, 'use bias transformation for the first conv layer, N/A for using pretraining')
flags.DEFINE_integer('bt_dim', 10, 'the dimension of bias transformation for FC layers')
flags.DEFINE_string('pretrain_weight_path', 'N/A', 'path to pretrained weights')
flags.DEFINE_bool('train_pretrain_conv1', False, 'whether to finetune the pretrained weights')
flags.DEFINE_bool('two_head', True, 'use two-head architecture')
flags.DEFINE_bool('learn_final_eept', False, 'learn an auxiliary loss for predicting final end-effector pose')
flags.DEFINE_bool('learn_final_eept_whole_traj', False, 'learn an auxiliary loss for predicting final end-effector pose \
by passing the whole trajectory of eepts (used for video-only models)')
flags.DEFINE_bool('stopgrad_final_eept', True,
'stop the gradient when concatenate the predicted final eept with the feature points')
flags.DEFINE_integer('final_eept_min', 6, 'first index of the final eept in the action array')
flags.DEFINE_integer('final_eept_max', 8, 'last index of the final eept in the action array')
flags.DEFINE_float('final_eept_loss_eps', 0.1, 'the coefficient of the auxiliary loss')
flags.DEFINE_float('act_loss_eps', 1.0, 'the coefficient of the action loss')
flags.DEFINE_float('loss_multiplier', 100.0,
'the constant multiplied with the loss value, 100 for reach and 50 for push')
flags.DEFINE_bool('use_l1_l2_loss', False, 'use a loss with combination of l1 and l2')
flags.DEFINE_float('l2_eps', 0.01, 'coeffcient of l2 loss')
flags.DEFINE_bool('shuffle_val', False, 'whether to choose the validation set via shuffling or not')
## Model options
flags.DEFINE_integer('random_seed', 0, 'random seed for training')
flags.DEFINE_bool('fp', True, 'use spatial soft-argmax or not')
flags.DEFINE_string('norm', 'layer_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_bool('dropout', False, 'use dropout for fc layers or not')
flags.DEFINE_float('keep_prob', 0.5, 'keep probability for dropout')
flags.DEFINE_integer('num_filters', 64,
'number of filters for conv nets -- 64 for placing, 16 for pushing, 40 for reaching.')
flags.DEFINE_integer('filter_size', 3, 'filter size for conv nets -- 3 for placing, 5 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_conv_layers', 5, 'number of conv layers -- 5 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_integer('num_strides', 3,
'number of conv layers with strided filters -- 3 for placing, 4 for pushing, 3 for reaching.')
flags.DEFINE_bool('conv', True, 'whether or not to use a convolutional network, only applicable in some cases')
flags.DEFINE_integer('num_fc_layers', 3, 'number of fully-connected layers')
flags.DEFINE_integer('layer_size', 200, 'hidden dimension of fully-connected layers')
flags.DEFINE_bool('temporal_conv_2_head', True,
'whether or not to use temporal convolutions for the two-head architecture in video-only setting.')
flags.DEFINE_bool('temporal_conv_2_head_ee', False, 'whether or not to use temporal convolutions for the two-head architecture in video-only setting \
for predicting the ee pose.')
flags.DEFINE_integer('temporal_filter_size', 10, 'filter size for temporal convolution')
flags.DEFINE_integer('temporal_num_filters', 32, 'number of filters for temporal convolution')
flags.DEFINE_integer('temporal_num_filters_ee', 32, 'number of filters for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_integer('temporal_num_layers_ee', 3, 'number of layers for temporal convolution for ee pose prediction')
flags.DEFINE_string('init', 'xavier', 'initializer for conv weights. Choose among random, xavier, and he')
flags.DEFINE_bool('max_pool', False, 'Whether or not to use max pooling rather than strided convolutions')
flags.DEFINE_bool('stop_grad', False, 'if True, do not use second derivatives in meta-optimization (for axis_angle)')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('save_dir', './atmaml_pick_logs', 'directory for summaries and checkpoints.')
# flags.DEFINE_string('save_dir', './amaml_human_pick_logs', 'directory for summaries and checkpoints.')
# flags.DEFINE_bool('resume', True, 'resume training if there is a model available')
flags.DEFINE_bool('resume', False, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('restore_iter', -1, 'iteration to load model (-1 for latest model)')
# flags.DEFINE_integer('restore_iter', 20000, 'iteration to load model (-1 for latest model)')
flags.DEFINE_integer('train_update_batch_size', -1, 'number of examples used for gradient update during training \
(use if you want to test with a different number).')
flags.DEFINE_integer('test_update_batch_size', 1, 'number of demos used during test time')
flags.DEFINE_float('gpu_memory_fraction', 0.8, 'fraction of memory used in gpu')
flags.DEFINE_bool('record_gifs', True, 'record gifs during evaluation')
flags.DEFINE_integer('color_num', 4, '')
flags.DEFINE_integer('object_num', 4, '')
flags.DEFINE_integer('train_task_num', 6, '')
flags.DEFINE_integer('task_num', 8, '')
flags.DEFINE_integer('demo_num', 5, '')
flags.DEFINE_integer('index_range', 20, '')
flags.DEFINE_integer('index_train_range', 20, '')
# flags.DEFINE_string('demo_type', 'robot', 'robot or human')
flags.DEFINE_string('demo_type', 'human', 'robot or human')
flags.DEFINE_string('extra_type', 'robot', 'robot or human') #opposite to demo_type
flags.DEFINE_string('compare_type', 'robot', 'robot or human')
flags.DEFINE_string('target_type', 'robot', '')
# flags.DEFINE_float('weight_xy', 0.999, '')
# flags.DEFINE_float('weight_z', 0.001, '')
# flags.DEFINE_float('weight_rxyz', 0.001, '')
flags.DEFINE_integer('clip_action_size', 2, 'size of embedding')
flags.DEFINE_integer('action_size', 2, 'size of embedding')
flags.DEFINE_integer('state_size', 6, 'size of embedding')
flags.DEFINE_integer('output_data', 6, '')
# flags.DEFINE_float('margin', 1.0, 'margin of loss')
# flags.DEFINE_float('pre_margin', 1.0, 'margin of loss')
flags.DEFINE_float('pre_margin_clip', 1.0, '')
flags.DEFINE_float('pre_margin', 2.0, 'pre_margin of loss')
flags.DEFINE_float('margin', 2.0, 'margin of loss')
flags.DEFINE_float('pre_margin_coefficient', 10.0, 'margin of pre-contrastive conloss')
flags.DEFINE_float('margin_coefficient', 10.0, 'margin of post-contrastive loss')
flags.DEFINE_float('pre_tar_coefficient', 0.0, '')
flags.DEFINE_float('post_tar_coefficient', 0.0, '')
flags.DEFINE_bool('norm_pre_contrastive', True, 'norm for pre_task contrastive ')
flags.DEFINE_bool('norm_post_contrastive', True, 'norm for post_task contrastive ')
flags.DEFINE_bool('all_frame_contrastive', True, '')
flags.DEFINE_bool('cos_contrastive_loss', True, 'True for cos loss, False for kl loss ')
flags.DEFINE_bool('pre_contrastive', False, 'task contrastive for pre-update ')
flags.DEFINE_bool('post_contrastive', False, 'task contrastive for post-update ')
# flags.DEFINE_bool('amaml', False, 'true for amaml')
flags.DEFINE_bool('amaml', True, 'true for amaml')
flags.DEFINE_bool('amaml_extra_domain', True, 'true for extra_domain')
flags.DEFINE_bool('cross_domain', True, 'use human and robot ')
flags.DEFINE_bool('cross_adapt_domain', True, 'adpat human/robot demos')
flags.DEFINE_bool('zero_robot_domain', True, 'adpat human/robot demos')
flags.DEFINE_bool('random_adapt_domain', True, 'randomly adpat human/robot demos')
flags.DEFINE_bool('random_single_domain', True, 'randomly adpat human/robot demos')
flags.DEFINE_integer('random_single_domain_margin', 50, 'random single domain margin')
flags.DEFINE_integer('embed_size', 0, 'size of embedding')
flags.DEFINE_bool('tar_mil', False, 'with target loss')
flags.DEFINE_bool('record_loss', False, 'true for amaml')
flags.DEFINE_bool('contrastive_fc', False, '')
# flags.DEFINE_bool('amaml', False, 'true for amaml')
def generate_data(if_train=True):
if if_train:
batch_size = FLAGS.meta_batch_size
else:
batch_size = FLAGS.meta_test_batch_size
color_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.color_num
print('color_list', color_list)
object_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.object_num
print('object_list', object_list)
if if_train:
task_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.train_task_num
else:
task_list = np.random.randint(FLAGS.train_task_num, FLAGS.task_num, size=batch_size)
print('task_list', task_list)
demo_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.demo_num
print('demo_list', demo_list)
target_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.demo_num
print('target_list', target_list)
obsas = []
obsbs = []
stateas = []
statebs = []
actionas = []
actionbs = []
color_num = ['color_blue', 'color_green', 'color_orange', 'color_yellow']
# color_num = ['color_blue', 'color_green', 'color_orange']
object_num = ['object_type_animal', 'object_type_car', 'object_type_dinosaur', 'object_type_tool']
for element in range(0, batch_size):
demo_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[color_list[element]], object_num[object_list[element]], FLAGS.demo_type,
task_list[element], demo_list[element])
target_path = '%s/%s/%s/%s/task_%d/demo_%d' % (
FLAGS.data_path, color_num[color_list[element]], object_num[object_list[element]], FLAGS.target_type,
task_list[element], target_list[element])
print('demo_path', demo_path)
print('target_path', target_path)
index = np.random.randint(0, FLAGS.index_train_range)
# if if_train:
# index = np.random.randint(0, FLAGS.index_train_range)
# else:
# index = np.random.randint(FLAGS.index_train_range, FLAGS.index_range)
if FLAGS.demo_type == 'robot':
obsa, statea, actiona = read_data.Read_Robot_Data2(demo_path, FLAGS.T, index)
elif FLAGS.demo_type == 'human':
obsa, statea, actiona = read_data.Read_Human_Data2(demo_path, FLAGS.T, index)
obsb, stateb, actionb = read_data.Read_Robot_Data2(target_path, FLAGS.T, index)
obsas.append(obsa)
obsbs.append(obsb)
stateas.append(statea)
statebs.append(stateb)
actionas.append(actiona)
actionbs.append(actionb)
obsas = np.reshape(obsas, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
obsbs = np.reshape(obsbs, [batch_size, FLAGS.T, FLAGS.im_width * FLAGS.im_height * FLAGS.num_channels])
actionas = np.reshape(actionas, [batch_size, FLAGS.T, FLAGS.output_data])
actionbs = np.reshape(actionbs, [batch_size, FLAGS.T, FLAGS.output_data])
stateas = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
statebs = np.zeros([batch_size, FLAGS.T, FLAGS.output_data])
return obsas, obsbs, actionas, actionbs, stateas, statebs
def generate_atmaml_data(if_train=True):
if if_train:
batch_size = FLAGS.meta_batch_size
else:
batch_size = FLAGS.meta_test_batch_size
color_list = (np.random.randint(0, 100, size=batch_size) + 1) % FLAGS.color_num
# print('color_list', color_list)
compare_color_list = (color_list + np.random.randint(1, FLAGS.color_num-1)) % FLAGS.color_num
# print('compare_color_list', compare_color_list)
object_list = ( | np.random.randint(0, 100, size=batch_size) | numpy.random.randint |
from pyglet import app
from pyglet.gl import *
from pyglet.window import key, mouse, Window
import numpy as np
from scipy.spatial import cKDTree
from ctypes import *
import math
from map_meta_tools import load_map_meta
from map_geom_tools import load_map_geom
from map_plot_tools import load_map_plot
view_width = 640
view_height = 480
config = Config(sample_buffers=1, samples=8, double_buffer=True)
window = Window(config=config, width=view_width, height=view_height, resizable=True, caption='<none>')
VERTEX_SHADER_SOURCE = b'''
#version 330
layout(location = 0) in vec2 a_position;
layout(location = 1) in int a_region;
out vec4 v_color;
uniform mat3 u_map_to_clip;
layout(std140) uniform u_region_color_block {
vec4 u_region_color[3026];
};
void main()
{
v_color = u_region_color[a_region];
vec2 v_position = (u_map_to_clip * vec3(a_position, 1.0)).xy;
gl_Position = vec4(v_position, 0.0, 1.0);
}
'''
FRAGMENT_SHADER_SOURCE = b'''
#version 330
in vec4 v_color;
out vec4 f_color;
void main()
{
f_color = v_color;
}
'''
map_meta = load_map_meta()
map_geom = load_map_geom()
map_plot = load_map_plot()
vertex_array = map_geom['vertex_data']
element_array = map_geom['element_data']
color_array_dict = {
key.A: map_plot['age_data'],
key.W: map_plot['water_data'],
key.F: map_plot['forest_data'],
key.C: map_plot['cluster_data']
}
def build_shader(shader_info):
shader = glCreateShader(shader_info['type'])
glShaderSource(shader, 1,
pointer(cast(c_char_p(shader_info['source']), POINTER(GLchar))),
pointer(GLint(len(shader_info['source']))))
glCompileShader(shader)
return shader
def build_shader_program(shader_info_list):
shader_list = []
for shader_info in shader_info_list:
shader_list.append(build_shader(shader_info))
shader_program = glCreateProgram()
for shader in shader_list:
glAttachShader(shader_program, shader)
glLinkProgram(shader_program)
for shader in shader_list:
glDetachShader(shader_program, shader)
return shader_program
shader_program = build_shader_program([
{'type': GL_VERTEX_SHADER, 'source': VERTEX_SHADER_SOURCE},
{'type': GL_FRAGMENT_SHADER, 'source': FRAGMENT_SHADER_SOURCE}
])
uniform_map_to_clip = glGetUniformLocation(shader_program, c_char_p(b'u_map_to_clip'))
def update_buffer_content(buffer_type, buffer, buffer_data, buffer_usage):
glBindBuffer(buffer_type, buffer)
glBufferData(buffer_type, buffer_data.nbytes, buffer_data.ctypes.data_as(POINTER(GLvoid)), buffer_usage)
glBindBuffer(buffer_type, 0)
def build_buffer(buffer_type, buffer_data, buffer_usage):
buffer = GLuint()
glGenBuffers(1, pointer(buffer))
update_buffer_content(buffer_type, buffer, buffer_data, buffer_usage)
buffer_element_size = buffer_data.nbytes // buffer_data.shape[0]
buffer_element_count = buffer_data.nbytes // buffer_element_size
return buffer, buffer_element_size, buffer_element_count
vertex_buffer, vertex_size, _ = build_buffer(GL_ARRAY_BUFFER, vertex_array, GL_STATIC_DRAW)
element_buffer, element_size, element_count = build_buffer(GL_ELEMENT_ARRAY_BUFFER, element_array, GL_STATIC_DRAW)
region_color_buffer, _, _ = build_buffer(GL_UNIFORM_BUFFER, color_array_dict[key.F], GL_DYNAMIC_DRAW)
def on_launch():
uniform_region_color_block = glGetUniformBlockIndex(shader_program, c_char_p(b'u_region_color_block'))
glUniformBlockBinding(shader_program, uniform_region_color_block, 0)
glBindBufferBase(GL_UNIFORM_BUFFER, 0, region_color_buffer)
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer)
glEnableVertexAttribArray(0)
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, vertex_size, 0)
glEnableVertexAttribArray(1)
glVertexAttribIPointer(1, 1, GL_UNSIGNED_SHORT, vertex_size, 2 * sizeof(GLfloat))
glBindBuffer(GL_ARRAY_BUFFER, 0)
log_zoom = -8.0
mat_view_to_clip = None
mat_map_to_view = None
map_origin_x = (83748.4296875 + 732907.75) / 2
map_origin_y = (6629044.0 + 7776450.0) / 2
map_offset_x = 0.0
map_offset_y = 0.0
def update_view():
global mat_view_to_clip, mat_map_to_view
zoom = math.exp(log_zoom)
mid_x = map_origin_x + map_offset_x
mid_y = map_origin_y + map_offset_y
mat_view_to_clip = np.array([[2 / window.width, 0, -1],
[0, 2 / window.height, -1],
[0, 0, 1]], dtype='=f4')
mat_map_to_view = np.array([[zoom, 0, window.width / 2 - zoom * mid_x],
[0, zoom, window.height / 2 - zoom * mid_y],
[0, 0, 1]], dtype='=f4')
centroid_tree = cKDTree(map_geom['element_middle'])
centroid_tree_radius = | np.max(map_geom['element_extent']) | numpy.max |
from math import sqrt, pi
from copy import deepcopy
import pytest
import numpy as np
import networkx as nx
from networkx.algorithms import isomorphism as iso
from clovek_ne_jezi_se.utils import (
make_even_points_on_circle, make_dict_from_lists,
GraphQueryParams,
is_label_isomorphic,
get_filtered_subgraph_view,
get_filtered_node_names,
get_node_attribute_mapped_list
)
@pytest.mark.parametrize(
'center,radius,n_points,start_radians,clockwise,expected',
[
(
(0, 0), 1, 3, 0, True,
np.array([
np.array([1., 0.]),
np.array([-1. / 2, -sqrt(3) / 2]),
np.array([-1. / 2, sqrt(3) / 2])
])
),
(
(0, 0), 1, 4, 0, True,
np.array([
| np.array([1., 0.]) | numpy.array |
"""
utils
auxiliary module
"""
import numpy as np
import numpy.linalg as la
import scipy.interpolate as ip
import logging
logger = logging.getLogger(__name__)
def K(d):
"""space phasor transformation matrix
(Inverse Park Transformation) T-1 * dq
arguments:
d: rotation angle
returns transformation matrix
"""
return np.array((
(-np.cos(d), np.sin(d)),
(- | np.cos(d-2*np.pi/3) | numpy.cos |
"""
Cross-match cluster catalogs made from individual maps in order to check calibration.
"""
import os
import sys
import glob
import numpy as np
import astropy.table as atpy
from nemo import catalogs
from nemo import plotSettings
from collections import OrderedDict as odict
import pylab as plt
import IPython
#------------------------------------------------------------------------------------------------------------
# Main
# Pick one
#relativeTo='median'
#relativeTo='absolute'
#relativeTo='S16'
relativeTo='S18'
# This only makes sense if relativeTo = 'median', 'S16, or 'S18'
#mode='residualDiff'
mode='ratio'
# Applied to reference catalog only (since now doing forced photometry)
refSNRCut=10.0
print("relativeTo = %s; refSNRCut = %.1f" % (relativeTo, refSNRCut))
plotsDir="plots_%s_%s_%.1f" % (mode, relativeTo, refSNRCut)
os.makedirs(plotsDir, exist_ok = True)
# For testing: multiply ycRef by this
calFactor=1.00
if calFactor != 1.0:
print("WARNING: calFactor set to %.2f - multiplying ycRef by this factor." % (calFactor))
# Reference catalog - used for cross-match positions across other catalogs
refTab=atpy.Table().read("../MFMF_S18_auto/MFMF_S18_auto_M500.fits")
if relativeTo == 'S16':
refTab=atpy.Table().read("../MFMF_S16_auto/MFMF_S16_auto_M500.fits")
keepCols=['name', 'RADeg', 'decDeg', 'fixed_SNR', 'fixed_y_c', 'fixed_err_y_c']
removeCols=[]
for k in refTab.keys():
if k not in keepCols:
removeCols.append(k)
refTab.remove_columns(removeCols)
refTab=refTab[refTab['fixed_SNR'] > refSNRCut]
# Dictionaries in which to store all cross-matched y_c values
# Each entry will be a list
ycRef={}
ycErrRef={}
yc={}
ycErr={}
labels={}
# Cross match against nemo output
print("Collecting fixed_y_c measurements for each cluster across all maps")
xMatchFiles=glob.glob("outputCatalogs/*.fits")
xMatchFiles.sort()
for f in xMatchFiles:
label=os.path.split(f)[-1].split("_optimal")[0]
print(" %s" % (label))
tab=atpy.Table().read(f)
#tab=tab[tab['fixed_SNR'] > SNRCut]
try:
refMatched, tabMatched, sep=catalogs.crossMatch(refTab, tab, radiusArcmin = 1.4)
except:
raise Exception("Matching probably failed because SNRCut is too low so there were no matches")
for ref, match in zip(refMatched, tabMatched):
name=ref['name']
if name not in yc.keys():
yc[name]=[]
ycErr[name]=[]
labels[name]=[]
ycRef[name]=ref['fixed_y_c']*calFactor
ycErrRef[name]=ref['fixed_err_y_c']
yc[name].append(match['fixed_y_c'])
ycErr[name].append(match['fixed_err_y_c'])
labels[name].append(label)
nameList=list(yc.keys()); nameList.sort()
for name in nameList:
yc[name]=np.array(yc[name])
ycErr[name]=np.array(ycErr[name])
# Check scaled residuals relative to median
# Make a plot for each cluster with a fair number of data points
# Gather a set so we know the typical offset for each map
# We can also make a map of typical offset for each cluster
minPoints=10
resByMap={}
nameList=list(yc.keys()); nameList.sort()
print("Making plots of fixed_y_c for each cluster")
for name in nameList:
# Plot
if len(yc[name]) < minPoints:
continue
if relativeTo == 'absolute':
res=yc[name]
resSigma=ycErr[name]
ylim=None
ylabel="$\\tilde{y_0}\, (10^{-4})$"
else:
if mode == 'relativeDiff':
if relativeTo == 'median':
res=(yc[name]-np.median(yc[name]))/ycErr[name]
else:
# Relative to S16 or S18 act+planck co-add
res=(np.array(yc[name])-ycRef[name])/np.sqrt(np.array(ycErr[name])**2+ycErrRef[name]**2)
resSigma=[1]*len(res)
ylabel="$\Delta \\tilde{y_0} (\sigma)$"
ylim=(-4, 4)
elif mode == 'ratio':
if relativeTo == 'median':
res=yc[name]/np.median(yc[name])
resSigma=(ycErr[name]/yc[name])*res
else:
res=np.array(yc[name])/ | np.array(ycRef[name]) | numpy.array |
import sys
sys.path.append('../')
import constants as cnst
import os
import torch
import tqdm
import numpy as np
import constants
SHAPE = [0, 1, 2]
EXP = [50, 51, 52]
POSE = [150, 151, 152, 153, 154, 155]
def centre_using_nearest(flame_seq, flame_dataset, one_translation_for_whole_seq=True):
shape_weigth = 0
pose_weight = 0.7
if one_translation_for_whole_seq:
dist = np.linalg.norm(flame_dataset[:, 150:156] - flame_seq[0, 150:156], axis=-1)
min_arg = np.argmin(dist)
flame_seq[:, 156:] = flame_dataset[min_arg, 156:]
else:
for i in range(len(flame_seq)):
shape_dist = np.linalg.norm(flame_dataset[:, SHAPE] - flame_seq[i, SHAPE], axis=-1)
pose_dist = | np.linalg.norm(flame_dataset[:, POSE] - flame_seq[i, POSE], axis=-1) | numpy.linalg.norm |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import numpy as np
import pandas as pd
import pytest
from sklearn.datasets import make_classification, make_regression
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.svm import SVC
from .... import tensor as mt, dataframe as md, execute
from ....core import enter_mode
from .._bagging import (
_extract_bagging_io,
BaggingSample,
BaggingSampleReindex,
BaggingClassifier,
BaggingRegressor,
)
def _get_tileable_chunk_data(sync_session, tileable):
@enter_mode(build=True)
async def _async_fetch():
tuples = []
async_session = sync_session._session
meta_api = async_session._meta_api
t, indexes = async_session._get_to_fetch_tileable(tileable)
delays = [
meta_api.get_chunk_meta.delay(chunk.key, fields=["bands"])
for chunk in t.chunks
]
band_infos = await meta_api.get_chunk_meta.batch(*delays)
for chunk, band_info in zip(t.chunks, band_infos):
band = band_info["bands"][0]
storage_api = await async_session._get_storage_api(band)
data = await storage_api.get(chunk.key)
tuples.append((t, chunk, data))
return tuples
future = asyncio.run_coroutine_threadsafe(
_async_fetch(), sync_session._isolation.loop
)
return future.result(120 if "CI" in os.environ else None)
@pytest.mark.parametrize(
"use_dataframe, max_samples, max_features, with_labels, with_weights",
[
(False, 10, 1.0, False, False),
(False, 10, 0.5, True, True),
(True, 10, 1.0, False, False),
(True, 10, 0.5, True, True),
],
)
def test_bagging_sample_execution(
setup, use_dataframe, max_samples, max_features, with_labels, with_weights
):
rs = np.random.RandomState(0)
raw_data = rs.randint(100, size=(100, 50))
if not use_dataframe:
t = mt.tensor(raw_data, chunk_size=20)
else:
raw_data = pd.DataFrame(raw_data)
t = md.DataFrame(raw_data, chunk_size=20)
raw_labels = rs.choice([0, 1, 2], size=100)
raw_weights = rs.random(100)
labels = mt.tensor(raw_labels, chunk_size=20) if with_labels else None
weights = mt.tensor(raw_weights, chunk_size=20) if with_weights else None
sample_op = BaggingSample(
n_estimators=10,
max_samples=max_samples,
max_features=max_features,
random_state=rs,
)
result_tuple = execute(*sample_op(t, labels, weights))
t_sampled, t_labels, t_weights, t_feature_indices = _extract_bagging_io(
result_tuple, sample_op, output=True
)
label_chunks, weights_chunks, feature_idx_chunks = dict(), dict(), dict()
for t, chunks_dict in zip((t_labels, t_weights), (label_chunks, weights_chunks)):
if t is None:
continue
for tiled, chunk, chunk_data in _get_tileable_chunk_data(setup, t):
assert len(tiled.chunks) == 5
chunks_dict[chunk.index] = chunk_data
for d in chunk_data:
assert d.shape == (10,)
if t_feature_indices is not None:
for tiled, chunk, chunk_data in _get_tileable_chunk_data(
setup, t_feature_indices
):
assert len(tiled.chunks) == 5
feature_idx_chunks[chunk.index] = chunk_data
assert chunk_data.shape == (2, int(max_features * raw_data.shape[1]))
for tiled, chunk, chunk_data in _get_tileable_chunk_data(setup, t_sampled):
assert len(tiled.chunks) == 5
assert len(chunk_data) == 2
for est_id, d in enumerate(chunk_data):
assert d.shape == (10, int(max_features * raw_data.shape[1]))
if use_dataframe:
raw_sliced = raw_data.loc[d.index]
if label_chunks:
label_chunk = label_chunks[(chunk.index[0],)][est_id]
| np.testing.assert_array_equal(raw_labels[d.index], label_chunk) | numpy.testing.assert_array_equal |
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
from copy import copy
# output the index of when v has a continuous string of i
# get_runs([0,0,1,1,1,0,0],1) gives [2],[5],[3]
def get_runs(v, i):
bounded = np.hstack(([0], (v==i).astype(int), [0]))
difs = | np.diff(bounded) | numpy.diff |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib as mpl
mpl.rcParams['font.sans-serif'] = [
'Roboto Condensed', 'Roboto Condensed Regular'
]
import seaborn as sns
import math
import rdkit
import itertools
from rdkit import Chem, DataStructs
from rdkit.Chem import rdDepictor
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem import AllChem, Draw, Descriptors, QED
import json
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.optimize import leastsq
from scipy import interpolate
import cairosvg as cs
def get_valid_actions(state, atom_types, allow_removal, allow_no_modification,
allowed_ring_sizes, allow_bonds_between_rings):
"""Computes the set of valid actions for a given state.
Args:
state: String SMILES; the current state. If None or the empty string, we
assume an "empty" state with no atoms or bonds.
atom_types: Set of string atom types, e.g. {'C', 'O'}.
allow_removal: Boolean whether to allow actions that remove atoms and bonds.
allow_no_modification: Boolean whether to include a "no-op" action.
allowed_ring_sizes: Set of integer allowed ring sizes; used to remove some
actions that would create rings with disallowed sizes.
allow_bonds_between_rings: Boolean whether to allow actions that add bonds
between atoms that are both in rings.
Returns:
Set of RDKit Mol containing the valid actions (technically, the set of
all states that are acceptable from the given state).
Raises:
ValueError: If state does not represent a valid molecule.
"""
if not state:
# Available actions are adding a node of each type.
return copy.deepcopy(atom_types)
mol = Chem.MolFromSmiles(state)
if mol is None:
raise ValueError('Received invalid state: %s' % state)
# atom_valences = dict(
# #zip(sorted(atom_types), molecules.atom_valences(sorted(atom_types))))
# zip(sorted(atom_types), molecules_py.atom_valences(sorted(atom_types))))
atom_valences = {'C': 4, 'H': 1, 'O': 2, 'N': 3}
atoms_with_free_valence = {
i: [
atom.GetIdx()
for atom in mol.GetAtoms()
# Only atoms that allow us to replace at least one H with a new bond
# are enumerated here.
if atom.GetNumImplicitHs() >= i
] for i in range(1, max(atom_valences.values()))
}
valid_actions = set()
valid_actions.update(
_atom_addition(
mol,
atom_types=atom_types,
atom_valences=atom_valences,
atoms_with_free_valence=atoms_with_free_valence))
valid_actions.update(
_bond_addition(
mol,
atoms_with_free_valence=atoms_with_free_valence,
allowed_ring_sizes=allowed_ring_sizes,
allow_bonds_between_rings=allow_bonds_between_rings))
if allow_removal:
valid_actions.update(_bond_removal(mol))
if allow_no_modification:
#valid_actions.add(Chem.MolToSmiles(mol))
valid_actions.add(Chem.Mol(mol))
return valid_actions
def _atom_addition(state, atom_types, atom_valences, atoms_with_free_valence):
"""Computes valid actions that involve adding atoms to the graph.
Actions:
* Add atom (with a bond connecting it to the existing graph)
Each added atom is connected to the graph by a bond. There is a separate
action for connecting to (a) each existing atom with (b) each valence-allowed
bond type. Note that the connecting bond is only of type single, double, or
triple (no aromatic bonds are added).
For example, if an existing carbon atom has two empty valence positions and
the available atom types are {'C', 'O'}, this section will produce new states
where the existing carbon is connected to (1) another carbon by a double bond,
(2) another carbon by a single bond, (3) an oxygen by a double bond, and
(4) an oxygen by a single bond.
Args:
state: RDKit Mol.
atom_types: Set of string atoms.
atom_valences: Dict mapping string atom types to integer valences.
atoms_with_free_valence: Dict mapping integer minimum available valence
values to lists of integer atom indices. For instance, all atom indices in
atoms_with_free_valence[2] have at least two available valence positions.
Returns:
Set of RDKit Mol; the available actions.
"""
bond_order = {
1: Chem.BondType.SINGLE,
2: Chem.BondType.DOUBLE,
3: Chem.BondType.TRIPLE,
}
atom_addition = set()
for i in range(1, max(atom_valences.values())):
if i not in bond_order:
continue # Skip valences that are too high.
for atom in atoms_with_free_valence[i]:
for element in atom_types:
if atom_valences[element] >= i:
new_state = Chem.RWMol(state)
idx = new_state.AddAtom(Chem.Atom(element))
new_state.AddBond(atom, idx, bond_order[i])
sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)
if sanitization_result:
continue # Skip the molecule when sanitization fails.
#atom_addition.add(Chem.MolToSmiles(new_state))
atom_addition.add(new_state)
return atom_addition
def _bond_addition(state, atoms_with_free_valence, allowed_ring_sizes,
allow_bonds_between_rings):
"""Computes valid actions that involve adding bonds to the graph.
Actions (where allowed):
* None->{single,double,triple}
* single->{double,triple}
* double->{triple}
Note that aromatic bonds are not modified.
Args:
state: RDKit Mol.
atoms_with_free_valence: Dict mapping integer minimum available valence
values to lists of integer atom indices. For instance, all atom indices in
atoms_with_free_valence[2] have at least two available valence positions.
allowed_ring_sizes: Set of integer allowed ring sizes; used to remove some
actions that would create rings with disallowed sizes.
allow_bonds_between_rings: Boolean whether to allow actions that add bonds
between atoms that are both in rings.
Returns:
Set of RDKit Mol; the available actions.
"""
bond_orders = [
None,
Chem.BondType.SINGLE,
Chem.BondType.DOUBLE,
Chem.BondType.TRIPLE,
]
bond_addition = set()
for valence, atoms in atoms_with_free_valence.items():
if valence > 3:
continue # Skip valences that are too high.
for atom1, atom2 in itertools.combinations(atoms, 2):
# Get the bond from a copy of the molecule so that SetBondType() doesn't
# modify the original state.
bond = Chem.Mol(state).GetBondBetweenAtoms(atom1, atom2)
new_state = Chem.RWMol(state)
# Kekulize the new state to avoid sanitization errors; note that bonds
# that are aromatic in the original state are not modified (this is
# enforced by getting the bond from the original state with
# GetBondBetweenAtoms()).
Chem.Kekulize(new_state, clearAromaticFlags=True)
if bond is not None:
if bond.GetBondType() not in bond_orders:
continue # Skip aromatic bonds.
idx = bond.GetIdx()
# Compute the new bond order as an offset from the current bond order.
bond_order = bond_orders.index(bond.GetBondType())
bond_order += valence
if bond_order < len(bond_orders):
idx = bond.GetIdx()
bond.SetBondType(bond_orders[bond_order])
new_state.ReplaceBond(idx, bond)
else:
continue
# If do not allow new bonds between atoms already in rings.
elif (not allow_bonds_between_rings and
(state.GetAtomWithIdx(atom1).IsInRing() and
state.GetAtomWithIdx(atom2).IsInRing())):
continue
# If the distance between the current two atoms is not in the
# allowed ring sizes
elif (allowed_ring_sizes is not None and
len(Chem.rdmolops.GetShortestPath(
state, atom1, atom2)) not in allowed_ring_sizes):
continue
else:
new_state.AddBond(atom1, atom2, bond_orders[valence])
sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)
if sanitization_result:
continue # Skip the molecule when sanitization fails.
#bond_addition.add(Chem.MolToSmiles(new_state))
bond_addition.add(new_state)
return bond_addition
def _bond_removal(state):
"""Computes valid actions that involve removing bonds from the graph.
Actions (where allowed):
* triple->{double,single,None}
* double->{single,None}
* single->{None}
Bonds are only removed (single->None) if the resulting graph has zero or one
disconnected atom(s); the creation of multi-atom disconnected fragments is not
allowed. Note that aromatic bonds are not modified.
Args:
state: RDKit Mol.
Returns:
Set of RDKit Mol; the available actions.
"""
bond_orders = [
None,
Chem.BondType.SINGLE,
Chem.BondType.DOUBLE,
Chem.BondType.TRIPLE,
]
bond_removal = set()
for valence in [1, 2, 3]:
for bond in state.GetBonds():
# Get the bond from a copy of the molecule so that SetBondType() doesn't
# modify the original state.
bond = Chem.Mol(state).GetBondBetweenAtoms(bond.GetBeginAtomIdx(),
bond.GetEndAtomIdx())
if bond.GetBondType() not in bond_orders:
continue # Skip aromatic bonds.
new_state = Chem.RWMol(state)
# Kekulize the new state to avoid sanitization errors; note that bonds
# that are aromatic in the original state are not modified (this is
# enforced by getting the bond from the original state with
# GetBondBetweenAtoms()).
Chem.Kekulize(new_state, clearAromaticFlags=True)
# Compute the new bond order as an offset from the current bond order.
bond_order = bond_orders.index(bond.GetBondType())
bond_order -= valence
if bond_order > 0: # Downgrade this bond.
idx = bond.GetIdx()
bond.SetBondType(bond_orders[bond_order])
new_state.ReplaceBond(idx, bond)
sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)
if sanitization_result:
continue # Skip the molecule when sanitization fails.
#bond_removal.add(Chem.MolToSmiles(new_state))
bond_removal.add(new_state)
elif bond_order == 0: # Remove this bond entirely.
atom1 = bond.GetBeginAtom().GetIdx()
atom2 = bond.GetEndAtom().GetIdx()
new_state.RemoveBond(atom1, atom2)
sanitization_result = Chem.SanitizeMol(new_state, catchErrors=True)
if sanitization_result:
continue # Skip the molecule when sanitization fails.
smiles = Chem.MolToSmiles(new_state)
parts = sorted(smiles.split('.'), key=len)
# We define the valid bond removing action set as the actions
# that remove an existing bond, generating only one independent
# molecule, or a molecule and an atom.
if len(parts) == 1 or len(parts[0]) == 1:
#bond_removal.add(parts[-1])
bond_removal.add(Chem.MolFromSmiles(parts[-1]))
return bond_removal
def highlights_diff(original_mol, next_mol):
highlight_atoms = []
original_num_atoms = len(original_mol.GetAtoms())
next_num_atoms = len(next_mol.GetAtoms())
for i in range(min(original_num_atoms, next_num_atoms)):
if original_mol.GetAtoms()[i].GetSymbol() != next_mol.GetAtoms(
)[i].GetSymbol():
highlight_atoms.append(next_mol.GetAtoms()[i].GetIdx())
if next_num_atoms > original_num_atoms:
highlight_atoms.extend(range(original_num_atoms, next_num_atoms))
highlight_bonds = []
original_num_bonds = len(original_mol.GetBonds())
next_num_bonds = len(next_mol.GetBonds())
for i in range(min(original_num_bonds, next_num_bonds)):
if original_mol.GetBonds()[i].GetBondType() != next_mol.GetBonds(
)[i].GetBondType():
highlight_bonds.append(next_mol.GetBonds()[i].GetIdx())
if next_num_bonds > original_num_bonds:
highlight_bonds.extend(range(original_num_bonds, next_num_bonds))
return highlight_atoms, highlight_bonds
def tidy_smiles(smiles):
new_smiles = {
'weight_0': list(set(smiles['weight_0'][-30:])),
'weight_1': list(set(smiles['weight_1'][-30:])),
'weight_2': list(set(smiles['weight_2'][-150:])),
'weight_3': list(set(smiles['weight_3'][-150:])),
'weight_4': list(set(smiles['weight_4'][-150:])),
'weight_5': list(set(smiles['weight_5'][-150:]))
}
return new_smiles
def get_properties(smiles, target_molecule='C1CCC2CCCCC2C1'):
target_mol_fp = AllChem.GetMorganFingerprintAsBitVect(
Chem.MolFromSmiles(target_molecule), radius=2, nBits=2048)
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return 0.0, 0.0
fingerprint_structure = AllChem.GetMorganFingerprintAsBitVect(
mol, radius=2, nBits=2048)
sim = DataStructs.TanimotoSimilarity(target_mol_fp, fingerprint_structure)
qed = QED.qed(mol)
return sim, qed
def plot_multi_obj_opt(smiles, target_mol, idx=0):
with open('all_molecules_with_id.json') as f:
molid = json.load(f)
colors = iter(cm.rainbow(np.linspace(0, 1, 6)))
plt.figure()
for i in range(6):
ssl = smiles['weight_%i' % i]
sim, qed = zip(
*[get_properties(ss, target_molecule=target_mol) for ss in ssl])
plt.scatter(sim, qed, label='w=%.1f' % (i * 0.2), color=next(colors))
target_sim, target_qed = get_properties(target_mol, target_mol)
plt.axvline(x=target_sim, ls='dashed', color='grey')
plt.axhline(y=target_qed, ls='dashed', color='grey')
leg = plt.legend()
leg.get_frame().set_alpha(0.95)
plt.ylim((-0.2, 1))
plt.xlabel('Similarity')
plt.ylabel('QED')
plt.title(molid[target_mol])
plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.88)
plt.savefig('batch/mult_obj_gen_{}.pdf'.format(idx))
#plt.show()
def plot_multi_obj_gen_drug20():
with open('multi_obj_opt_drug20.json') as f:
data = json.load(f)
plot_multi_obj_opt_multi_plot(result['smiles'], result['target_mol'], 2)
def plot_qed_improvements():
with open('qed_imp_2.json') as f:
improvements = json.load(f)
def double_gaussian(x, params):
(c1, mu1, sigma1, c2, mu2, sigma2) = params
res = c1 * np.exp( - (x - mu1)**2.0 / (2.0 * sigma1**2.0) ) \
+ c2 * np.exp( - (x - mu2)**2.0 / (2.0 * sigma2**2.0) )
return res
def double_gaussian_fit(params, y):
fit = double_gaussian(x, params)
return (fit - y)
colors = list(iter(cm.rainbow(np.linspace(0, 1, 6))))
colors = ['#eae471', '#c1e092', '#83b49d', '#448fad', '#3e60c3', '#5a26a6']
plt.figure()
start = -0.4
end = 0.6
for i in range(6):
imp = np.array(improvements['weight_%i' % i])
y, binEdges = np.histogram(imp, bins=40, range=(start, end))
y = y.astype(np.float64)
y /= y.sum()
x = 0.5 * (binEdges[1:] + binEdges[:-1])
if i == 0:
fit = leastsq(lambda x: double_gaussian_fit(x, y),
[1, 0, 0.02, 1, 0.3, 0.1])
elif i == 1:
fit = leastsq(lambda x: double_gaussian_fit(x, y),
[1, 0, 0.02, 1, 0.1, 0.1])
else:
fit = leastsq(lambda x: double_gaussian_fit(x, y),
[1, 0, 0.02, 1, 0.1, 0.05])
xx = np.linspace(start, end, 300)
yy = double_gaussian(xx, fit[0])
plt.plot(x, y, 'o', color=colors[i], alpha=0.3)
plt.plot(
xx,
yy,
color=colors[i],
label='w=%.1f' % (i * 0.2),
)
plt.xlim(start, end)
# plt.ylim(-0.02, 0.2)
plt.legend()
plt.xlabel('Improvements on QED')
plt.ylabel('Normalized count')
plt.subplots_adjust(left=0.16, bottom=0.16, right=0.92, top=0.92)
plt.savefig('qed_improvements.pdf')
plt.show()
def plot_qed_relative_improvements():
with open('qed_rel_imp_2.json') as f:
improvements = json.load(f)
def double_gaussian(x, params):
(c1, mu1, sigma1, c2, mu2, sigma2) = params
res = c1 * np.exp( - (x - mu1)**2.0 / (2.0 * sigma1**2.0) ) \
+ c2 * np.exp( - (x - mu2)**2.0 / (2.0 * sigma2**2.0) )
return res
def double_gaussian_fit(params, y):
fit = double_gaussian(x, params)
return (fit - y)
colors = list(iter(cm.rainbow(np.linspace(0, 1, 6))))
colors = ['#eae471', '#c1e092', '#83b49d', '#448fad', '#3e60c3', '#5a26a6']
plt.figure()
start = -1
end = 1
for i in range(6):
imp = | np.array(improvements['weight_%i' % i]) | numpy.array |
import numpy as np
from pyshtools.shclasses import SHCoeffs
from pyshtools.expand import spharm,SHExpandDH
from pyshtools.spectralanalysis import Curve2Mask
from pyshtools.shio import read_icgem_gfc
from sphericalpolygon import create_polygon
from ..gg.static_models import static_download
from ..gg.lovenums import lovenums
from ..gg.utils import month2int,crop_region
from ..gg.lsq import lsqm,ilsqm,wlsqm,iwlsqm
from ..gg.filter import filter_ddk,filter_gaussian,filter_gaussian_inverse
from ..gg.leakage import spectral_domain
from .class_Grid import Grid
from .class_Series import Series
class GSM(object):
'''
class GSM
- attributes:
- info -> All information about the object
- degree_order
- max_degree
- max_order
- normalization
- permanent_tide
- earth_gravity_param
- mean_equator_radius
- background_gravity
- title
- summary
- institution
- processing_level
- product_version
- time_coverage_start
- time_coverage_end
- total_month -> Months over a time interval regardless of the existence of the solutions
- total_month_counts
- solution_month
- solution_counts
- missing_month
- missing_month_counts
- missing_solution_flag -> If True, the monthly solution is missing, otherwise, the monthly solution exists
- unused_days -> Unused days for monthly solutions
- date_issued
- equi_material -> Equivalent material used to represent mass per unit area
- filter -> filter applied to monthly solutions
- shc
- shc_std
- methods:
- deaverage
- debackground
- replace_slr_c20
- filter_ddk
- filter_gaussian
- sma
- gsm
- rate
- grid
- study_area
- leakage_correction
'''
def __init__(self,info,shc,shc_std):
self.info = info
self.degree_order = info['degree_order']
self.max_degree = info['max_degree']
self.max_order = info['max_order']
self.normalization = info['normalization']
self.permanent_tide = info['permanent_tide']
self.earth_gravity_param = info['earth_gravity_param']
self.mean_equator_radius = info['mean_equator_radius']
self.background_gravity = info['background_gravity']
self.title = info['title']
self.summary = info['summary']
self.institution = info['institution']
self.processing_level = info['processing_level']
self.product_version = info['product_version']
self.time_coverage_start = info['time_coverage_start']
self.time_coverage_end = info['time_coverage_end']
self.total_month = info['total_month']
self.total_month_counts = info['total_month_counts']
self.solution_month = info['solution_month']
self.solution_counts = info['solution_counts']
self.missing_month = info['missing_month']
self.missing_month_counts = info['missing_month_counts']
self.missing_solution_flag = info['missing_solution_flag']
self.unused_days = info['unused_days']
self.date_issued = info['date_issued']
self.equi_material = info['equi_material']
self.filter = info['filter']
self.shc = shc
self.shc_std = shc_std
def __repr__(self):
return 'title = {:s}\nmax_degree = {:d}\nmax_order = {:d}\ndegree_order = {:d}\nnormalization = {:s}\ninstitution = {:s}\nprocessing_level = {:s}\nproduct_version = {:s}\ntime_coverage_start = {:s}\ntime_coverage_end = {:s}\nsolution_counts = {:d}\ntotal_month_counts = {:d}\nmissing_month_counts = {:d}'.format\
(self.title,self.max_degree,self.max_order,self.degree_order,self.normalization,self.institution,self.processing_level,self.product_version,self.time_coverage_start,self.time_coverage_end,self.solution_counts,self.total_month_counts,self.missing_month_counts)
def deaverage(self):
'''
Deaverage the GSM solutions of the GRACE and GRACE-FO RL06 products
Usage:
xxx_gsm_d = xxx_gsm.deaverage()
Outputs:
xxx_gsm_d -> instance of SHC class
Examples:
>>> csr_gsm_d = csr_gsm.deaverage()
>>> print(csr_gsm_d)
'''
info = self.info.copy()
shc_deaverage = self.shc - np.average(self.shc,axis=0)
info['title'] = 'Deaveraged ' + info['title']
info['background_gravity'] = 'Average of monthly solutions'
return GSM(info,shc_deaverage,self.shc_std)
def debackground(self):
'''
Debackground the GSM solutions of the GRACE and GRACE-FO RL06 products
Usage:
xxx_gsm_d = xxx_gsm.debackground()
Outputs:
xxx_gsm_d -> instance of GSM class
Examples:
>>> csr_gsm_d = csr_gsm.debackground()
>>> print(csr_gsm_d)
'''
info = self.info.copy()
degree_order = self.degree_order
background_gravity = self.background_gravity
gravity_file = static_download(background_gravity)
if background_gravity == 'GGM05C':
cilm,gm,r0,errors = read_icgem_gfc(gravity_file ,lmax=degree_order,errors='calibrated')
elif background_gravity == 'EIGEN-6C4':
cilm,gm,r0,errors = read_icgem_gfc(gravity_file,lmax=degree_order,errors='formal')
else:
raise Exception('Currently, available background gravity models are GGM05C and EIGEN-6C4')
shc_debackground = self.shc - cilm
info['title'] = 'Debackgrounded ' + info['title']
return GSM(info,shc_debackground,self.shc_std)
def replace_slr_c20(self,slr_c20):
'''
Replace the C20 values from the GSM files of the GRACE and GRACE-FO RL06 products with the 2nd degree terms from SLR measurements.
Usage:
xxx_gsm_r = xxx_gsm.replace_slr_c20(slr_c20)
Inputs:
slr_c20 -> instance of SLR_C20 class
Outputs:
xxx_gsm_r -> instance of GSM class
Examples:
>>> csr_gsm_r = csr_gsm.replace_slr_c20(slr_c20)
>>> print(csr_gsm_r)
'''
shc,shc_std = self.shc.copy(),self.shc_std.copy()
shc[:,0,2,0] = slr_c20.c20
shc_std[:,0,2,0] = slr_c20.c20_std
info = self.info.copy()
info['title'] = info['title'] + ' with C20 replaced by the SLR measurements'
info['summary'] = info['summary'] + ' Note that the 2nd-degree terms have been replaced with the values from SLR C20.'
return GSM(info,shc,shc_std)
def filter_ddk(self,filter_type = 'DDK5'):
'''
Filt the deaveraged GSM SHC with the DDK filter, where DDK1,DDK2,...,DDK8 are avaliable.
Usage:
xxx_gsm_fddk = xxx_gsm.filt_DDK()
Parameters:
filt_type: [optional, str, default = 'DDK5'] types of DDK filter. Avaliable options are 'DDK1', 'DDK2',...,'DDK8'
Outputs:
xxx_gsm_fddk: instance of GSM class
Examples:
>>> slr_c20 = read_slr_c20(end_date='2017-06')
>>> slr_c20_deaverage = slr_c20.deaverage()
>>> csr_gsm = read_gsm('CSR',96,lmax=179,end_date='2017-06')
>>> csr_gsm_d = csr_gsm.deaverage()
>>> csr_gsm_r = csr_gsm_d.replace_slr_c20(slr_c20_deaverage)
>>> csr_gsm_fddk = csr_gsm_r.filt_ddk('DDK5')
>>> print(csr_gsm_fddk.title)
>>> print(csr_gsm_fddk.summary)
DDK5 filtered Deaveraged GRACE Geopotential Coefficients CSR RL06 with C20 replaced by the SLR measurements
Spherical harmonic coefficients representing an estimate of the mean gravity field of Earth during the specified timespan derived from GRACE mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. The 0th and 1st degree terms are excluded from CSR level-2. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
'''
filter_shc,filter_shc_std = filter_ddk(filter_type,self.shc,self.shc_std)
info = self.info.copy()
info['title'] = filter_type + ' filtered ' + info['title']
info['filter'] = filter_type
if 'with C20 replaced by the SLR measurements' in info['title']:
info['summary'] = info['summary'] + ' Also note that C20 from SLR also experienced the ' + filter_type + ' filtering.'
return GSM(info,filter_shc,filter_shc_std)
def filter_gaussian(self,r):
'''
Filtering the deaveraged GSM SHC with the Gaussian filter.
Usage: xxx_gsm_gau = xxx_gsm.filt_gaussian()
Inputs:
r -> [float] Gaussian filter radius in km
Outputs:
xxx_gsm_gau: instance of GSM class
Examples:
>>> slr_c20 = read_slr_c20(end_date='2017-06')
>>> slr_c20_deaverage = slr_c20.deaverage()
>>> csr_gsm = read_GSM('CSR',96,lmax=179,end_date='2017-06')
>>> csr_gsm_d = csr_gsm.deaverage()
>>> csr_gsm_r = csr_gsm_d.replace_slr_c20(slr_c20_deaverage)
>>> csr_gsm_fgau = csr_gsm_r.filt_gaussian(200)
>>> print(csr_gsm_fgau.title)
Gaussian filtered Deaveraged GRACE Geopotential Coefficients CSR RL06 with C20 replaced by the SLR measurements
>>> print(csr_gsm_fgau.summary)
Spherical harmonic coefficients representing an estimate of the mean gravity field of Earth during the specified timespan derived from GRACE mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. The 0th and 1st degree terms are excluded from CSR level-2. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the Gaussian filtering.
'''
filter_shc,filter_shc_std = filter_gaussian(r,self.shc,self.shc_std)
info = self.info.copy()
info['title'] = 'Gaussian filtered ' + info['title']
info['filter'] = 'Gaussian filter with radius of '+str(r) + ' km'
if 'with C20 replaced by the SLR measurements' in info['title']:
info['summary'] = info['summary'] + ' Also note that C20 from SLR also experienced the Gaussian filtering.'
return GSM(info,filter_shc,filter_shc_std)
def sma(self, equi_material = None):
'''
Convert Stokes coefficents(or rates) for GSM to that for Surface Mass Anomaly in Equivalent Water(or Ice, Sand) Thickness(EWT) with unit of [mm w.e.](or [mm i.e.],[mm s.e.]) or [mm w.e./yr](or [mm i.e./yr],[mm s.e./yr])
Usage:
xxx_sma = xxx_gsm.sma()
Parameters:
equi_material -> [optional, str, default = None] Equivalent material for Surface Mass Anomaly. Currently, only Water, Ice, and Sand are avaliable.
Outputs:
xxx_sma: instance of SMA class
Examples:
>>> csr_gsm = read_gsm('CSR',96)
>>> gfz_gsm = read_gsm('GFZ',96)
>>> jpl_gsm = read_gsm('JPL',96)
>>> slr_c20 = read_slr_c20()
>>> comb_gsm = GSM_average([csr_gsm.deaverage(),gfz_gsm.deaverage(),jpl_gsm.deaverage()])
>>> comb_gsm_r = comb_gsm.replace_slr_c20(slr_c20.deaverage())
>>> comb_gsm_ddk5 = comb_gsm_r.filt_DDK('DDK5')
>>> sma = comb_gsm_ddk5.sma('Sand')
>>> print(sma.title)
Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent Sand Thickness(EWT) derived from the DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements
>>> print(sma.summary)
Spherical harmonic coefficients representing an estimate of the Surface Mass Anomaly(SMA) expressed in terms of Equivalent Sand[1442kg/m3] Thickness(EWT) with unit of [mm s.e.] during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
>>> print(sma.material)
Sand
'''
if equi_material is None:
equi_material = self.equi_material
if equi_material == 'Water':
rho = 1000
elif equi_material == 'Ice':
rho = 917
elif equi_material == 'Sand':
rho = 1442
else:
raise Exception('Currently, the equivalent material for SMA can only be Water, Ice, or Sand.')
# Calculate the average density of the Earth
G = 6.67430e-11
GM = float(self.earth_gravity_param.partition('m3/s2')[0])
a = float(self.mean_equator_radius.partition('m')[0])
rho_ave = 3*GM/(4*G*np.pi*a**3)
sma_shc = np.zeros_like(self.shc)
sma_shc_std = np.zeros_like(self.shc_std)
for l in range(self.degree_order+1):
k_l =lovenums(l)
factor = a*rho_ave/(3*rho)*(2*l+1)/(1+k_l)
#factor = a*h_l[l]/(1+k_l) # for vertical displacement
sma_shc[:,:,l,:] = factor*self.shc[:,:,l,:]*1e3 # in mm
sma_shc_std[:,:,l,:] = factor*self.shc_std[:,:,l,:]*1e3
info = self.info.copy()
if 'change rate' in info['title']:
info['title'] = 'Stokes coefficients for annual change rate of Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ' + info['title']
info['summary'] = info['summary'].replace('mean gravity field of Earth','Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e./yr]')
else:
info['title'] = 'Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ' + info['title']
info['summary'] = info['summary'].replace('mean gravity field of Earth','Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e.]')
info['equi_material'] = equi_material
return GSM(info,sma_shc,sma_shc_std)
def gsm(self):
'''
Convert Stokes coefficents for Surface Mass Anomaly in Equivalent Water(or Ice, Sand) Thickness(EWT) with unit of [mm w.e.] to that for GSM
Usage: xxx_gsm = xxx_sma.gsm()
Parameters:
-----------
None
Returns:
-----------
xxx_gsm: instance of GSM class
Examples:
-----------
>>> csr_gsm = read_GSM('CSR',96)
>>> gfz_gsm = read_GSM('GFZ',96)
>>> jpl_gsm = read_GSM('JPL',96)
>>> slr_c20 = read_SLR_C20()
>>> comb_gsm = GSM_average([csr_gsm.deaverage(),gfz_gsm.deaverage(),jpl_gsm.deaverage()])
>>> comb_gsm_r = comb_gsm.replace_slr_c20(slr_c20.deaverage())
>>> comb_gsm_ddk5 = comb_gsm_r.filt_DDK('DDK5')
>>> sma = comb_gsm_ddk5.sma('Sand')
>>> gsm = sma.gsm()
>>> print(sma.title)
Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent Sand Thickness(EWT) derived from the DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements
>>> print(sma.summary)
Spherical harmonic coefficients representing an estimate of the Surface Mass Anomaly(SMA) expressed in terms of Equivalent Sand[1442kg/m3] Thickness(EWT) with unit of [mm w.e.] during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
>>> print(sma.material)
Sand
>>> print(gsm.title)
DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements
>>> print(gsm.summary)
Spherical harmonic coefficients representing an estimate of the mean gravity field of Earth during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.
>>> print(comb_gsm_ddk5.SHC[100,0,30,20])
1.8369657302246403e-12
>>> print(sma.SHC[100,0,30,20])
0.9456468755977168
>>> print(gsm.SHC[100,0,30,20])
1.8369657302246403e-12
'''
equi_material = self.equi_material
if equi_material is 'Water':
rho = 1000
elif equi_material is 'Ice':
rho = 917
elif equi_material is 'Sand':
rho = 1442
else:
raise Exception('Currently, the equivalent material can only be Water, Ice, or Sand.')
# Calculate the average density of the Earth
G = 6.67430e-11
GM = float(self.earth_gravity_param.partition('m3/s2')[0])
a = float(self.mean_equator_radius.partition('m')[0])
rho_ave = 3*GM/(4*G*np.pi*a**3)
gsm_shc = np.zeros_like(self.shc)
gsm_shc_std = np.zeros_like(self.shc_std)
for l in range(self.degree_order+1):
k_l =lovenums(l)
factor = 3*rho/(a*rho_ave)*(1+k_l)/(2*l+1)
gsm_shc[:,:,l,:] = factor*self.shc[:,:,l,:]/1e3
gsm_shc_std[:,:,l,:] = factor*self.shc_std[:,:,l,:]/1e3
info = self.info.copy()
if 'change rate' in info['title']:
info['title'] = info['title'].replace('Stokes coefficients for annual change rate of Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ','')
info['title'] = info['title'].replace('Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ','')
info['summary'] = info['summary'].replace('Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e./yr]','mean gravity field of Earth')
else:
info['title'] = info['title'].replace('Stokes coefficients for Surface Mass Anomaly(SMA) in Equivalent ' + equi_material + ' Thickness(EWT) derived from the ','')
info['summary'] = info['summary'].replace('Surface Mass Anomaly(SMA) expressed in terms of Equivalent ' + equi_material + '['+ str(rho)+ 'kg/m3]' + ' Thickness(EWT) with unit of [mm '+equi_material[0].lower()+'.e.]','mean gravity field of Earth')
return GSM(info,gsm_shc,gsm_shc_std)
def rate(self,mode='ILSQM'):
'''
Estimate the annual change rate of Geopotential coefficients or Stokes coefficents for Surface Mass Anomaly in Equivalent Water(or Ice, Sand) Thickness(EWT) using the linearly fitting method.
There are four methods for linearly fitting, including 'LSM', 'ILSM', 'WLSM', and 'IWSLM'. The ILSM is default and recommended.
Usage: xxx_sma_rate = xxx_sma.rate() or xxx_gsm_rate = xxx_gsm.rate('IWLSM')
Parameters:
-----------
lsm [str] [optional, default: ILSM] alternertively, 'LSM', 'ILSM', 'WLSM', and 'IWSLM' are available, where
'LSM' -- Least Square Method
'ILSM' -- Iterative Least Square Method
'WLSM' -- Weighted Least Square Method
'IWSLM' -- Iterative Weighted Least Square Method
Returns:
-----------
xxx_gsm: instance of GSM class
Examples:
-----------
>>> csr_gsm = read_GSM('CSR',96)
>>> gfz_gsm = read_GSM('GFZ',96)
>>> jpl_gsm = read_GSM('JPL',96)
>>> slr_c20 = read_SLR_C20()
>>> comb_gsm = GSM_average([csr_gsm.deaverage(),gfz_gsm.deaverage(),jpl_gsm.deaverage()])
>>> comb_gsm_r = comb_gsm.replace_slr_c20(slr_c20.deaverage())
>>> comb_gsm_ddk5 = comb_gsm_r.filt_DDK('DDK5')
>>> comb_gsm_ddk5_rate = comb_gsm_ddk5.rate()
>>> print(comb_gsm_ddk5_rate.title)
'Annual change rate of DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements'
>>> print(comb_gsm_ddk5_rate.summary)
'Spherical harmonic coefficients representing an estimate of annual change rate of the mean gravity field of Earth during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.'
>>> sma_rate = comb_gsm_ddk5_rate.sma()
>>> print(sma_rate.title)
'Stokes coefficients for annual change rate of Surface Mass Anomaly(SMA) in Equivalent Water Thickness(EWT) derived from the Annual change rate of DDK5 filtered Combined Deaveraged GRACE & GRACE-FO Geopotential Coefficients CSR RL06, GFZ RL06, JPL RL06 with C20 replaced by the SLR measurements'
>>> print(sma_rate.summary)
'Spherical harmonic coefficients representing an estimate of annual change rate of the Surface Mass Anomaly(SMA) expressed in terms of Equivalent Water[1000kg/m3] Thickness(EWT) with unit of [mm w.e./yr] during the specified timespan derived from GRACE & GRACE-FO mission measurements. These coefficients represent the full magnitude of land hydrology, ice, and solid Earth processes. Further, they represent atmospheric and oceanic processes not captured in the accompanying GAC product. Note that the 2nd degree terms have been replaced by the C20 values from SLR. Also note that C20 values from SLR also experienced the DDK5 filtering.'
'''
shc,shc_std = self.shc,self.shc_std
month = month2int(self.solution_month)
shc_rate,shc_rate_std = [np.zeros_like(shc[0]) for k in range(2)]
degree = order = self.degree_order
for i in range(2):
for l in range(2,degree+1): # start with 1 if consider the motion of mass center
for m in range(order+1):
if i==1 and m==0 or m > l: continue
if mode is 'LSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],_,_ = lsqm(month,shc[:,i,l,m])
elif mode is 'WLSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],_,_ = wlsqm(month,shc[:,i,l,m],shc_std[:,i,l,m])
elif mode is 'ILSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],normal,_,_ = ilsqm(month,shc[:,i,l,m])
elif mode is 'IWLSQM':
shc_rate[i,l,m],shc_rate_std[i,l,m],normal,_,_ = iwlsqm(month,shc[:,i,l,m],shc_std[:,i,l,m])
else:
raise Exception('Currently, the least square method can only be LSQM, WLSQM, ILSQM, and IWLSQM.')
info = self.info.copy()
info['title'] = 'Annual change rate of ' + info['title']
info['summary'] = info['summary'].replace('an estimate of','an estimate of annual change rate of')
for em in ['w','i','s']:
info['summary'] = info['summary'].replace('[mm '+em+'.e.]','[mm '+em+'.e./yr]')
return GSM(info,np.array([shc_rate]),np.array([shc_rate_std]))
def grid(self,region=None):
'''
Expand spherical harmonic coefficients into a regional grid
Usage:
xxx_sma_grid = xxx_sma.grid(region)
Parameters:
region: [float array] range of an area, for example, [96.0,120.0,21.0,39.0] means the boundary in lon and lat;
Returns:
an instance of GRID class
'''
# Create an empty list to contain the grid of EWT from GRACE for each month
grids = []
shcs,shcs_std = self.shc,self.shc_std
for cilm in shcs:
coeffs_class = SHCoeffs.from_array(cilm)
grids_class = coeffs_class.expand()
grids.append(grids_class.data)
grids = np.array(grids)
grids_std = np.zeros_like(grids)
lons = grids_class.lons()
lats = grids_class.lats()
if region is not None:
if 'rate' not in self.title:
print('The calculation will take a few minutes, please be patient.')
lons_region,lats_region,grids_region,grids_std_region,lons_flag,lats_flag = crop_region(lons,lats,grids,grids_std,region)
# Convert SHCs_std to grids_std
lmax = self.degree_order
k = 0
for shc_std in shcs_std:
i = 0
for theta in lats_region:
j = 0
for phi in lons_region:
ylm = spharm(lmax, 90-theta, phi)
grids_std_region[k,i,j] = np.sqrt(np.sum((ylm*shc_std)**2))
j+=1
i+=1
k+=1
else:
region = 'globe'
grids_region = grids
grids_std_region = np.zeros_like(grids_region)
lons_region,lats_region = lons,lats
lons_flag = np.ones(len(lons_region),dtype=bool)
lats_flag = np.ones(len(lats_region),dtype=bool)
# Note: Since it takes a lot of time to calculate the uncertainties of the global grid data, the uncertainties are all set to zero.
info = self.info.copy()
info['title'] = 'Grids expanded from ' + info['title']
info['summary'] = info['summary'].replace('Spherical harmonic coefficients','Grids')
info['summary'] = info['summary'].replace('coefficients','grids')
info['region'] = region
return Grid(info,grids_region,grids_std_region,lons_region,lats_region,lons_flag,lats_flag)
def study_area(self,points):
a = float(self.mean_equator_radius.partition('m')[0])/1e3 # km
if self.equi_material is 'Water':
rho = 1000
elif self.equi_material is 'Ice':
rho = 917
elif self.equi_material is 'Sand':
rho = 1442
qs,qs_std = [],[]
north_pole = create_polygon(points).contains_points([90,0]) # Determine if the North Pole is inside the study area
mask_grid = Curve2Mask(2*(self.degree_order+1),points,north_pole,sampling=2)
mask_shc = SHExpandDH(mask_grid,sampling=2)
area = mask_shc[0,0,0]*4*np.pi*a**2 # km^2
for shc in self.shc:
q = np.sum(shc*mask_shc)*4*np.pi*a**2*rho/1e9 # Gt
qs.append(q)
for shc_std in self.shc_std:
q_std = np.sqrt(np.sum((shc_std*mask_shc)**2))*4*np.pi*a**2*rho/1e9 # Gt
qs_std.append(q_std)
qs,qs_std = np.array(qs), | np.array(qs_std) | numpy.array |
"""
Signals and Systems Function Module
Copyright (c) March 2017, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import sk_dsp_comm.sigsys as ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full import of the module, to avoid the the need to prefix with ssd, is:
>>> from sk_dsp_comm.sigsys import *
Function Catalog
----------------
"""
from matplotlib import pylab
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
from logging import getLogger
log = getLogger(__name__)
import warnings
def cic(m, k):
"""
A functional form implementation of a cascade of integrator comb (CIC) filters.
Parameters
----------
m : Effective number of taps per section (typically the decimation factor).
k : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth).
Returns
-------
b : FIR filter coefficients for a simple direct form implementation using the filter() function.
Notes
-----
Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter
requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring
real coefficient multiplies via filter().
<NAME> July 2013
"""
if k == 1:
b = np.ones(m)
else:
h = np.ones(m)
b = h
for i in range(1, k):
b = signal.convolve(b, h) # cascade by convolving impulse responses
# Make filter have unity gain at DC
return b / np.sum(b)
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is assumed to be 44.1 kHz.
Parameters
----------
x : ndarray of the input signal samples
GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndarray of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3))
A = np.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
# Pass signal x through the cascade of ten filters
y = np.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
>>> plt.show()
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3));
A = np.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = np.logspace(1,np.log10(20e3),1000)
H = np.ones(len(F))*np.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*np.log10(abs(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(np.arange(NB),GdB,'b','bs')
#plt.bar(np.arange(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import peaking
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> f = np.logspace(1,5,400)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
>>> b,a = peaking(-5.0,500,4)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(-5,8)
>>> x = ss.ex6_2(n)
>>> plt.stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_cd(Ka, out_type ='fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
With the exception of the loop amplifier gain, all
other parameters are hard-coded from Case Study example.
Examples
--------
>>> b,a = position_cd(Ka,'fb_approx')
>>> b,a = position_cd(Ka,'fb_exact')
"""
rs = 10/(2*np.pi)
# Load b and a ndarrays with the coefficients
if out_type.lower() == 'open_loop':
b = np.array([Ka*4000*rs])
a = np.array([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = np.array([3.2*Ka*rs])
a = np.array([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = np.array([4000*Ka*rs])
a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
raise ValueError('out_type must be: open_loop, fb_approx, or fc_exact')
return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natural frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T*(2*zeta*wn-1/tau)/vmax
Ki = T*wn**2./vmax
K = Kp*vmax/T
wn = np.sqrt(K/(Kp/Ki))
zeta = (K + 1/tau)/(2*wn)
log.info('wn = %s' % (wn))
log.info('zeta = %s' % (zeta))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
raise ValueError('tf_mode must be: H, HE, HVU, or HED')
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As input the function uses the numerator and denominator
s-domain system function coefficient ndarrays b and a respectively.
Assumed to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndarray.
a : denominator coefficient ndarray.
auto_scale : True
size : [xmin,xmax,ymin,ymax] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
if (isinstance(a,int) or isinstance(a,float)):
a = [a]
if (isinstance(b,int) or isinstance(b,float)):
b = [b]
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = np.array([0.0])
if M > 0:
N_roots = np.roots(b)
D_roots = np.array([0.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5
size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5
size[1] = max(size[1],0.5)
size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5
size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5
plt.plot([size[0],size[1]],[0,0],'k--')
plt.plot([0,0],[size[2],size[3]],'r--')
# Plot labels if multiplicity greater than 1
x_scale = size[1]-size[0]
y_scale = size[3]-size[2]
x_off = 0.03
y_off = 0.01
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis(np.array(size))
return M,N
def os_filter(x, h, N, mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> from numpy import arange, cos, pi, ones
>>> n = arange(0,100)
>>> x = cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = os_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = os_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(int(Nframe*N))
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,int(Nframe*N)))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx]
def oa_filter(x, h, N, mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import oa_filter
>>> n = np.arange(0,100)
>>> x = np.cos(2*np.pi*0.05*n)
>>> b = np.ones(10)
>>> y = oa_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = oa_filter(x,h,N,1)
"""
P = len(h)
L = int(N) - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)):
"""
Lowpass sampling theorem plotting function.
Display the spectrum of a sampled signal after setting the bandwidth,
sampling frequency, maximum display frequency, and spectral shape.
Parameters
----------
fb : spectrum lowpass bandwidth in Hz
fs : sampling frequency in Hz
fmax : plot over [-fmax,fmax]
shape : 'tri' or 'line'
N : number of translates, N positive and N negative
fsize : the size of the figure window, default (6,4)
Returns
-------
Nothing : A plot window opens containing the spectrum plot
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.sigsys import lp_samp
No aliasing as bandwidth 10 Hz < 25/2; fs > fb.
>>> lp_samp(10,25,50,10)
>>> plt.show()
Now aliasing as bandwidth 15 Hz > 25/2; fs < fb.
>>> lp_samp(15,25,50,10)
"""
plt.figure(figsize=fsize)
# define the plot interval
f = np.arange(-fmax,fmax+fmax/200.,fmax/200.)
A = 1.0
line_ampl = A/2.*np.array([0, 1])
# plot the lowpass spectrum in black
shapes = ['tri', 'line']
if shape.lower() not in shapes:
raise ValueError('shape must be tri or line')
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f,fb))
# overlay positive and negative frequency translates
for n in range(N):
plt.plot(f, lp_tri(f - (n + 1) * fs, fb), '--r')
plt.plot(f, lp_tri(f + (n + 1) * fs, fb), '--g')
elif shape.lower() == 'line':
plt.plot([fb, fb],line_ampl,'b', linewidth=2)
plt.plot([-fb, -fb],line_ampl,'b', linewidth=2)
# overlay positive and negative frequency translates
for n in range(N):
plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.ylabel('Spectrum Magnitude')
plt.xlabel('Frequency in Hz')
plt.axis([-fmax,fmax,0,1])
plt.grid()
def lp_tri(f, fb):
"""
Triangle spectral shape function used by :func:`lp_samp`.
Parameters
----------
f : ndarray containing frequency samples
fb : the bandwidth as a float constant
Returns
-------
x : ndarray of spectrum samples for a single triangle shape
Notes
-----
This is a support function for the lowpass spectrum plotting function
:func:`lp_samp`.
Examples
--------
>>> x = lp_tri(f, fb)
"""
x = np.zeros(len(f))
for k in range(len(f)):
if abs(f[k]) <= fb:
x[k] = 1 - abs(f[k])/float(fb)
return x
def sinusoid_awgn(x, SNRdB):
"""
Add white Gaussian noise to a single real sinusoid.
Input a single sinusoid to this function and it returns a noisy
sinusoid at a specific SNR value in dB. Sinusoid power is calculated
using np.var.
Parameters
----------
x : Input signal as ndarray consisting of a single sinusoid
SNRdB : SNR in dB for output sinusoid
Returns
-------
y : Noisy sinusoid return vector
Examples
--------
>>> # set the SNR to 10 dB
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.04*n)
>>> y = sinusoid_awgn(x,10.0)
"""
# Estimate signal power
x_pwr = np.var(x)
# Create noise vector
noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x));
return x + noise
def simple_quant(x, b_tot, x_max, limit):
"""
A simple rounding quantizer for bipolar signals having Btot = B + 1 bits.
This function models a quantizer that employs Btot bits that has one of
three selectable limiting types: saturation, overflow, and none.
The quantizer is bipolar and implements rounding.
Parameters
----------
x : input signal ndarray to be quantized
b_tot : total number of bits in the quantizer, e.g. 16
x_max : quantizer full-scale dynamic range is [-Xmax, Xmax]
Limit = Limiting of the form 'sat', 'over', 'none'
Returns
-------
xq : quantized output ndarray
Notes
-----
The quantization can be formed as e = xq - x
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from matplotlib.mlab import psd
>>> import numpy as np
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(0,10000)
>>> x = np.cos(2*np.pi*0.211*n)
>>> y = ss.sinusoid_awgn(x,90)
>>> Px, f = psd(y,2**10,Fs=1)
>>> plt.plot(f, 10*np.log10(Px))
>>> plt.ylim([-80, 25])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel(r'Normalized Frequency $\omega/2\pi$')
>>> plt.show()
>>> yq = ss.simple_quant(y,12,1,'sat')
>>> Px, f = psd(yq,2**10,Fs=1)
>>> plt.plot(f, 10*np.log10(Px))
>>> plt.ylim([-80, 25])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel(r'Normalized Frequency $\omega/2\pi$')
>>> plt.show()
"""
B = b_tot - 1
x = x / x_max
if limit.lower() == 'over':
xq = (np.mod(np.round(x*2**B) + 2 ** B, 2 ** b_tot) - 2 ** B) / 2 ** B
elif limit.lower() == 'sat':
xq = np.round(x*2**B)+2**B
s1 = np.nonzero(np.ravel(xq >= 2 ** b_tot - 1))[0]
s2 = np.nonzero(np.ravel(xq < 0))[0]
xq[s1] = (2 ** b_tot - 1) * np.ones(len(s1))
xq[s2] = np.zeros(len(s2))
xq = (xq - 2**B)/2**B
elif limit.lower() == 'none':
xq = np.round(x*2**B)/2**B
else:
raise ValueError('limit must be the string over, sat, or none')
return xq * x_max
def prin_alias(f_in,fs):
"""
Calculate the principle alias frequencies.
Given an array of input frequencies the function returns an
array of principle alias frequencies.
Parameters
----------
f_in : ndarray of input frequencies
fs : sampling frequency
Returns
-------
f_out : ndarray of principle alias frequencies
Examples
--------
>>> # Linear frequency sweep from 0 to 50 Hz
>>> f_in = arange(0,50,0.1)
>>> # Calculate principle alias with fs = 10 Hz
>>> f_out = prin_alias(f_in,10)
"""
return abs(np.rint(f_in/fs)*fs - f_in)
"""
Principle alias via recursion
f_out = np.copy(f_in)
for k in range(len(f_out)):
while f_out[k] > fs/2.:
f_out[k] = abs(f_out[k] - fs)
return f_out
"""
def cascade_filters(b1,a1,b2,a2):
"""
Cascade two IIR digital filters into a single (b,a) coefficient set.
To cascade two digital filters (system functions) given their numerator
and denominator coefficients you simply convolve the coefficient arrays.
Parameters
----------
b1 : ndarray of numerator coefficients for filter 1
a1 : ndarray of denominator coefficients for filter 1
b2 : ndarray of numerator coefficients for filter 2
a2 : ndarray of denominator coefficients for filter 2
Returns
-------
b : ndarray of numerator coefficients for the cascade
a : ndarray of denominator coefficients for the cascade
Examples
--------
>>> from scipy import signal
>>> b1,a1 = signal.butter(3, 0.1)
>>> b2,a2 = signal.butter(3, 0.15)
>>> b,a = cascade_filters(b1,a1,b2,a2)
"""
return signal.convolve(b1,b2), signal.convolve(a1,a2)
def soi_snoi_gen(s,SIR_dB,N,fi,fs = 8000):
"""
Add an interfering sinusoidal tone to the input signal at a given SIR_dB.
The input is the signal of interest (SOI) and number of sinsuoid signals
not of interest (SNOI) are addedto the SOI at a prescribed signal-to-
intereference SIR level in dB.
Parameters
----------
s : ndarray of signal of SOI
SIR_dB : interference level in dB
N : Trim input signal s to length N + 1 samples
fi : ndarray of intereference frequencies in Hz
fs : sampling rate in Hz, default is 8000 Hz
Returns
-------
r : ndarray of combined signal plus intereference of length N+1 samples
Examples
--------
>>> # load a speech ndarray and trim to 5*8000 + 1 samples
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
"""
n = np.arange(0,N+1)
K = len(fi)
si = np.zeros(N+1)
for k in range(K):
si += np.cos(2*np.pi*fi[k]/fs*n);
s = s[:N+1]
Ps = np.var(s)
Psi = np.var(si)
r = s + np.sqrt(Ps/Psi*10**(-SIR_dB/10))*si
return r
def lms_ic(r,M,mu,delta=1):
"""
Least mean square (LMS) interference canceller adaptive filter.
A complete LMS adaptive filter simulation function for the case of
interference cancellation. Used in the digital filtering case study.
Parameters
----------
M : FIR Filter length (order M-1)
delta : Delay used to generate the reference signal
mu : LMS step-size
delta : decorrelation delay between input and FIR filter input
Returns
-------
n : ndarray Index vector
r : ndarray noisy (with interference) input signal
r_hat : ndarray filtered output (NB_hat[n])
e : ndarray error sequence (WB_hat[n])
ao : ndarray final value of weight vector
F : ndarray frequency response axis vector
Ao : ndarray frequency response of filter
Examples
----------
>>> # import a speech signal
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> # add interference at 1kHz and 1.5 kHz and
>>> # truncate to 5 seconds
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
>>> # simulate with a 64 tap FIR and mu = 0.005
>>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005)
"""
N = len(r)-1;
# Form the reference signal y via delay delta
y = signal.lfilter(np.hstack((np.zeros(delta), np.array([1]))),1,r)
# Initialize output vector x_hat to zero
r_hat = np.zeros(N+1)
# Initialize error vector e to zero
e = np.zeros(N+1)
# Initialize weight vector to zero
ao = np.zeros(M+1)
# Initialize filter memory to zero
z = np.zeros(M)
# Initialize a vector for holding ym of length M+1
ym = np.zeros(M+1)
for k in range(N+1):
# Filter one sample at a time
r_hat[k],z = signal.lfilter(ao,np.array([1]),np.array([y[k]]),zi=z)
# Form the error sequence
e[k] = r[k] - r_hat[k]
# Update the weight vector
ao = ao + 2*mu*e[k]*ym
# Update vector used for correlation with e(k)
ym = np.hstack((np.array([y[k]]), ym[:-1]))
# Create filter frequency response
F, Ao = signal.freqz(ao,1,1024)
F/= (2*np.pi)
Ao = 20*np.log10(abs(Ao))
return np.arange(0,N+1), r, r_hat, e, ao, F, Ao
def fir_iir_notch(fi,fs,r=0.95):
"""
Design a second-order FIR or IIR notch filter.
A second-order FIR notch filter is created by placing conjugate
zeros on the unit circle at angle corresponidng to the notch center
frequency. The IIR notch variation places a pair of conjugate poles
at the same angle, but with radius r < 1 (typically 0.9 to 0.95).
Parameters
----------
fi : notch frequency is Hz relative to fs
fs : the sampling frequency in Hz, e.g. 8000
r : pole radius for IIR version, default = 0.95
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
If the pole radius is 0 then an FIR version is created, that is
there are no poles except at z = 0.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> b_FIR, a_FIR = ss.fir_iir_notch(1000,8000,0)
>>> ss.zplane(b_FIR, a_FIR)
>>> plt.show()
>>> b_IIR, a_IIR = ss.fir_iir_notch(1000,8000)
>>> ss.zplane(b_IIR, a_IIR)
"""
w0 = 2*np.pi*fi/float(fs)
if r >= 1:
raise ValueError('Poles on or outside unit circle.')
elif r == 0:
a = np.array([1.0])
else:
a = np.array([1, -2*r*np.cos(w0), r**2])
b = np.array([1, -2*np.cos(w0), 1])
return b, a
def simple_sa(x, NS, NFFT, fs, NAVG=1, window='boxcar'):
"""
Spectral estimation using windowing and averaging.
This function implements averaged periodogram spectral estimation
estimation similar to the NumPy's psd() function, but more
specialized for the windowing case study of Chapter 16.
Parameters
----------
x : ndarray containing the input signal
NS : The subrecord length less zero padding, e.g. NS < NFFT
NFFT : FFT length, e.g., 1024 = 2**10
fs : sampling rate in Hz
NAVG : the number of averages, e.g., 1 for deterministic signals
window : hardcoded window 'boxcar' (default) or 'hanning'
Returns
-------
f : ndarray frequency axis in Hz on [0, fs/2]
Sx : ndarray the power spectrum estimate
Notes
-----
The function also prints the maximum number of averages K possible
for the input data record.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(0,2048)
>>> x = np.cos(2*np.pi*1000/10000*n) + 0.01*np.cos(2*np.pi*3000/10000*n)
>>> f, Sx = ss.simple_sa(x,128,512,10000)
>>> plt.plot(f, 10*np.log10(Sx))
>>> plt.ylim([-80, 0])
>>> plt.xlabel("Frequency (Hz)")
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.show()
With a hanning window.
>>> f, Sx = ss.simple_sa(x,256,1024,10000,window='hanning')
>>> plt.plot(f, 10*np.log10(Sx))
>>> plt.xlabel("Frequency (Hz)")
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.ylim([-80, 0])
"""
Nx = len(x)
K = int(Nx/NS)
log.info('K = ', K)
if NAVG > K:
warnings.warn('NAVG exceeds number of available subrecords')
return 0,0
if window.lower() == 'boxcar' or window.lower() == 'rectangle':
w = signal.boxcar(NS)
elif window.lower() == 'hanning':
w = signal.hanning(NS)
xsw = np.zeros((K,NS)) + 1j*np.zeros((K,NS))
for k in range(NAVG):
xsw[k,] = w*x[k*NS:(k+1)*NS]
Sx = np.zeros(NFFT)
for k in range(NAVG):
X = fft.fft(xsw[k,],NFFT)
Sx += abs(X)**2
Sx /= float(NAVG)
Sx /= float(NFFT**2)
NFFTby2 = int(NFFT/2)
if x.dtype != 'complex128':
n = np.arange(NFFTby2)
f = fs*n/float(NFFT)
Sx = Sx[0:NFFTby2]
else:
n = np.arange(NFFTby2)
f = fs*np.hstack((np.arange(-NFFTby2,0),np.arange(NFFTby2)))/float(NFFT)
Sx = np.hstack((Sx[NFFTby2:],Sx[0:NFFTby2]))
return f, Sx
def line_spectra(fk,Xk,mode,sides=2,linetype='b',lwidth=2,floor_dB=-100,fsize=(6,4)):
"""
Plot the Fourier series line spectral given the coefficients.
This function plots two-sided and one-sided line spectra of a periodic
signal given the complex exponential Fourier series coefficients and
the corresponding harmonic frequencies.
Parameters
----------
fk : vector of real sinusoid frequencies
Xk : magnitude and phase at each positive frequency in fk
mode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot,
mode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians
sides : 2; 2-sided or 1-sided
linetype : line type per Matplotlib definitions, e.g., 'b';
lwidth : 2; linewidth in points
fsize : optional figure size in inches, default = (6,4) inches
Returns
-------
Nothing : A plot window opens containing the line spectrum plot
Notes
-----
Since real signals are assumed the frequencies of fk are 0 and/or positive
numbers. The supplied Fourier coefficients correspond.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import line_spectra
>>> n = np.arange(0,25)
>>> # a pulse train with 10 Hz fundamental and 20% duty cycle
>>> fk = n*10
>>> Xk = np.sinc(n*10*.02)*np.exp(-1j*2*np.pi*n*10*.01) # 1j = sqrt(-1)
>>> line_spectra(fk,Xk,'mag')
>>> plt.show()
>>> line_spectra(fk,Xk,'phase')
"""
plt.figure(figsize=fsize)
# Eliminate zero valued coefficients
idx = np.nonzero(Xk)[0]
Xk = Xk[idx]
fk = fk[idx]
if mode == 'mag':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, 2.*np.abs(Xk[k])],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), 0, 1.05*max(abs(Xk))])
elif sides == 1:
plt.axis([0, 1.2*max(fk), 0, 1.05*2*max(abs(Xk))])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdB':
Xk_dB = 20*np.log10(np.abs(Xk))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdBn':
Xk_dB = 20*np.log10(np.abs(Xk)/max(np.abs(Xk)))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Normalized Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'phase':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, -np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
if sides == 2:
plt.plot([-1.2*max(fk), 1.2*max(fk)], [0, 0],'k')
plt.axis([-1.2*max(fk), 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
elif sides == 1:
plt.plot([0, 1.2*max(fk)], [0, 0],'k')
plt.axis([0, 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Phase (rad)')
plt.xlabel('Frequency (Hz)')
else:
warnings.warn('Invalid mode type')
def fs_coeff(xp,N,f0,one_side=True):
"""
Numerically approximate the Fourier series coefficients given periodic x(t).
The input is assummed to represent one period of the waveform
x(t) that has been uniformly sampled. The number of samples supplied
to represent one period of the waveform sets the sampling rate.
Parameters
----------
xp : ndarray of one period of the waveform x(t)
N : maximum Fourier series coefficient, [0,...,N]
f0 : fundamental frequency used to form fk.
Returns
-------
Xk : ndarray of the coefficients over indices [0,1,...,N]
fk : ndarray of the harmonic frequencies [0, f0,2f0,...,Nf0]
Notes
-----
len(xp) >= 2*N+1 as len(xp) is the fft length.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> import sk_dsp_comm.sigsys as ss
>>> t = arange(0,1,1/1024.)
>>> # a 20% duty cycle pulse starting at t = 0
>>> x_rect = ss.rect(t-.1,0.2)
>>> Xk, fk = ss.fs_coeff(x_rect,25,10)
>>> # plot the spectral lines
>>> ss.line_spectra(fk,Xk,'mag')
>>> plt.show()
"""
Nint = len(xp)
if Nint < 2*N+1:
raise ValueError('Number of samples in xp insufficient for requested N.')
Xp = fft.fft(xp,Nint)/float(Nint)
# To interface with the line_spectra function use one_side mode
if one_side:
Xk = Xp[0:N+1]
fk = f0*np.arange(0,N+1)
else:
Xk = np.hstack((Xp[-N:],Xp[0:N+1]))
fk = f0*np.arange(-N,N+1)
return Xk, fk
def fs_approx(Xk,fk,t):
"""
Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies
Assume the signal is real so coefficients Xk are supplied for nonnegative
indicies. The negative index coefficients are assumed to be complex
conjugates.
Parameters
----------
Xk : ndarray of complex Fourier series coefficients
fk : ndarray of harmonic frequencies in Hz
t : ndarray time axis corresponding to output signal array x_approx
Returns
-------
x_approx : ndarray of periodic waveform approximation over time span t
Examples
--------
>>> t = arange(0,2,.002)
>>> # a 20% duty cycle pulse train
>>> n = arange(0,20,1) # 0 to 19th harmonic
>>> fk = 1*n % period = 1s
>>> t, x_approx = fs_approx(Xk,fk,t)
>>> plot(t,x_approx)
"""
x_approx = np.zeros(len(t))
for k,Xkk in enumerate(Xk):
if fk[k] == 0:
x_approx += Xkk.real*np.ones(len(t))
else:
x_approx += 2*np.abs(Xkk)*np.cos(2*np.pi*fk[k]*t+np.angle(Xkk))
return x_approx
def ft_approx(x,t,Nfft):
'''
Approximate the Fourier transform of a finite duration signal using scipy.signal.freqz()
Parameters
----------
x : input signal array
t : time array used to create x(t)
Nfft : the number of frdquency domain points used to
approximate X(f) on the interval [fs/2,fs/2], where
fs = 1/Dt. Dt being the time spacing in array t
Returns
-------
f : frequency axis array in Hz
X : the Fourier transform approximation (complex)
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
:math:`a^n*u[n]` is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> plt.figure(figsize=(6,5))
>>> plt.plot(t,x0)
>>> plt.grid()
>>> plt.ylim([-0.1,1.1])
>>> plt.xlim([-2,2])
>>> plt.title(r'Exact Waveform')
>>> plt.xlabel(r'Time (s)')
>>> plt.ylabel(r'$x_0(t)$')
>>> plt.show()
>>> # FT Exact Plot
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> fe = np.arange(-10,10,.01)
>>> X0e = tau*np.sinc(fe*tau)
>>> plt.plot(fe,abs(X0e))
>>> #plot(f,angle(X0))
>>> plt.grid()
>>> plt.xlim([-10,10])
>>> plt.title(r'Exact (Theory) Spectrum Magnitude')
>>> plt.xlabel(r'Frequency (Hz)')
>>> plt.ylabel(r'$|X_0e(f)|$')
>>> plt.show()
>>> # FT Approximation Plot
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> f,X0 = ss.ft_approx(x0,t,4096)
>>> plt.plot(f,abs(X0))
>>> #plt.plot(f,angle(X0))
>>> plt.grid()
>>> plt.xlim([-10,10])
>>> plt.title(r'Approximation Spectrum Magnitude')
>>> plt.xlabel(r'Frequency (Hz)')
>>> plt.ylabel(r'$|X_0(f)|$');
>>> plt.tight_layout()
>>> plt.show()
'''
fs = 1/(t[1] - t[0])
t0 = (t[-1]+t[0])/2 # time delay at center
N0 = len(t)/2 # FFT center in samples
f = np.arange(-1./2,1./2,1./Nfft)
w, X = signal.freqz(x,1,2*np.pi*f)
X /= fs # account for dt = 1/fs in integral
X *= np.exp(-1j*2*np.pi*f*fs*t0)# time interval correction
X *= np.exp(1j*2*np.pi*f*N0)# FFT time interval is [0,Nfft-1]
F = f*fs
return F, X
def conv_sum(x1,nx1,x2,nx2,extent=('f','f')):
"""
Discrete convolution of x1 and x2 with proper tracking of the output time axis.
Convolve two discrete-time signals using the SciPy function :func:`scipy.signal.convolution`.
The time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n].
Parameters
----------
x1 : ndarray of signal x1 corresponding to nx1
nx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to nx2
nx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ny : ndarray of the corresponding sequence index n
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
a^n*u[n] is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> nx = np.arange(-5,10)
>>> x = ss.drect(nx,4)
>>> y,ny = ss.conv_sum(x,nx,x,nx)
>>> plt.stem(ny,y)
>>> plt.show()
Consider a pulse convolved with an exponential. ('r' type extent)
>>> h = 0.5**nx*ss.dstep(nx)
>>> y,ny = ss.conv_sum(x,nx,h,nx,('f','r')) # note extents set
>>> plt.stem(ny,y) # expect a pulse charge and discharge sequence
"""
nnx1 = np.arange(0,len(nx1))
nnx2 = np.arange(0,len(nx2))
n1 = nnx1[0]
n2 = nnx1[-1]
n3 = nnx2[0]
n4 = nnx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n4+1-1)
ny = np.arange(0,len(x1)+len(x2)-1) + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
nny = np.arange(n1+n3,n1+1+n4+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n3+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
nny = np.arange(n2+n3,n2+1+n4+1-1)
ny = nny + nx1[-1]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
nny = np.arange(n1+n4,n2+1+n4+1-1)
ny = nny + nx1[0]+nx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
nny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
nny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ny = nny + max(nx1[0]+nx2[-1],nx1[-1]+nx2[0])
else:
raise ValueError('Invalid x1 x2 extents specified or valid extent not found!')
# Finally convolve the sequences
y = signal.convolve(x1, x2)
log.info('Output support: (%+d, %+d)' % (ny[0],ny[-1]))
return y[nny], ny
def conv_integral(x1,tx1,x2,tx2,extent=('f','f')):
"""
Continuous-time convolution of x1 and x2 with proper tracking of the output time axis.
Appromimate the convolution integral for the convolution of two continuous-time signals using the SciPy function signal. The time (sequence axis) are managed from input to output. y(t) = x1(t)*x2(t).
Parameters
----------
x1 : ndarray of signal x1 corresponding to tx1
tx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to tx2
tx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ty : ndarray of the corresponding time axis for y
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The time steps used in
x1(t) and x2(t) must match. The default extents of ('f','f') are used for signals
that are active (have support) on or within t1 and t2 respectively. A right-sided
signal such as exp(-a*t)*u(t) is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> tx = np.arange(-5,10,.01)
>>> x = ss.rect(tx-2,4) # pulse starts at t = 0
>>> y,ty = ss.conv_integral(x,tx,x,tx)
>>> plt.plot(ty,y) # expect a triangle on [0,8]
>>> plt.show()
Now, consider a pulse convolved with an exponential.
>>> h = 4*np.exp(-4*tx)*ss.step(tx)
>>> y,ty = ss.conv_integral(x,tx,h,tx,extent=('f','r')) # note extents set
>>> plt.plot(ty,y) # expect a pulse charge and discharge waveform
"""
dt = tx1[1] - tx1[0]
nx1 = np.arange(0,len(tx1))
nx2 = np.arange(0,len(tx2))
n1 = nx1[0]
n2 = nx1[-1]
n3 = nx2[0]
n4 = nx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n4+1-1)
ty = np.arange(0,len(x1)+len(x2)-1)*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
ny = np.arange(n1+n3,n1+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n3+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
ny = np.arange(n2+n3,n2+1+n4+1-1)
ty = ny*dt + tx1[-1]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
ny = np.arange(n1+n4,n2+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
ny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
ny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ty = ny*dt + max(tx1[0]+tx2[-1],tx1[-1]+tx2[0])
else:
raise ValueError('Invalid x1 x2 extents specified or valid extent not found!')
# Finally convolve the sampled sequences and scale by dt
y = signal.convolve(x1, x2)*dt
log.info('Output support: (%+2.2f, %+2.2f)' % (ty[0],ty[-1]))
return y[ny], ty
def delta_eps(t,eps):
"""
Rectangular pulse approximation to impulse function.
Parameters
----------
t : ndarray of time axis
eps : pulse width
Returns
-------
d : ndarray containing the impulse approximation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import delta_eps
>>> t = np.arange(-2,2,.001)
>>> d = delta_eps(t,.1)
>>> plt.plot(t,d)
>>> plt.show()
"""
d = np.zeros(len(t))
for k,tt in enumerate(t):
if abs(tt) <= eps/2.:
d[k] = 1/float(eps)
return d
def step(t):
"""
Approximation to step function signal u(t).
In this numerical version of u(t) the step turns on at t = 0.
Parameters
----------
t : ndarray of the time axis
Returns
-------
x : ndarray of the step function signal u(t)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import step
>>> t = arange(-1,5,.01)
>>> x = step(t)
>>> plt.plot(t,x)
>>> plt.ylim([-0.01, 1.01])
>>> plt.show()
To turn on at t = 1, shift t.
>>> x = step(t - 1.0)
>>> plt.ylim([-0.01, 1.01])
>>> plt.plot(t,x)
"""
x = np.zeros(len(t))
for k,tt in enumerate(t):
if tt >= 0:
x[k] = 1.0
return x
def rect(t,tau):
"""
Approximation to the rectangle pulse Pi(t/tau).
In this numerical version of Pi(t/tau) the pulse is active
over -tau/2 <= t <= tau/2.
Parameters
----------
t : ndarray of the time axis
tau : the pulse width
Returns
-------
x : ndarray of the signal Pi(t/tau)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import rect
>>> t = arange(-1,5,.01)
>>> x = rect(t,1.0)
>>> plt.plot(t,x)
>>> plt.ylim([0, 1.01])
>>> plt.show()
To turn on the pulse at t = 1 shift t.
>>> x = rect(t - 1.0,1.0)
>>> plt.plot(t,x)
>>> plt.ylim([0, 1.01])
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/2.:
x[k] = 0
else:
x[k] = 1
return x
def tri(t,tau):
"""
Approximation to the triangle pulse Lambda(t/tau).
In this numerical version of Lambda(t/tau) the pulse is active
over -tau <= t <= tau.
Parameters
----------
t : ndarray of the time axis
tau : one half the triangle base width
Returns
-------
x : ndarray of the signal Lambda(t/tau)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import tri
>>> t = arange(-1,5,.01)
>>> x = tri(t,1.0)
>>> plt.plot(t,x)
>>> plt.show()
To turn on at t = 1, shift t.
>>> x = tri(t - 1.0,1.0)
>>> plt.plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/1.:
x[k] = 0
else:
x[k] = 1 - np.abs(tk)/tau
return x
def dimpulse(n):
"""
Discrete impulse function delta[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal delta[n]
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import dimpulse
>>> n = arange(-5,5)
>>> x = dimpulse(n)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = dimpulse(n+2)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn == 0:
x[k] = 1.0
return x
def dstep(n):
"""
Discrete step function u[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal u[n]
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import dstep
>>> n = arange(-5,5)
>>> x = dstep(n)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = dstep(n+2)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0:
x[k] = 1.0
return x
def drect(n,N):
"""
Discrete rectangle function of duration N samples.
The signal is active on the interval 0 <= n <= N-1. Also known
as the rectangular window function, which is available in
scipy.signal.
Parameters
----------
n : ndarray of the time axis
N : the pulse duration
Returns
-------
x : ndarray of the signal
Notes
-----
The discrete rectangle turns on at n = 0, off at n = N-1 and
has duration of exactly N samples.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import drect
>>> n = arange(-5,5)
>>> x = drect(n, N=3)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = drect(n+2, N=3)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0 and nn < N:
x[k] = 1.0
return x
def rc_imp(Ns,alpha,M=6):
"""
A truncated raised cosine pulse used in digital communications.
The pulse shaping factor :math:`0< \\alpha < 1` is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform.
Examples
--------
Ten samples per symbol and alpha = 0.35.
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import rc_imp
>>> b = rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n));
a = alpha;
Ns *= 1.0
for i in range(len(n)):
if (1 - 4*(a*n[i]/Ns)**2) == 0:
b[i] = np.pi/4*np.sinc(1/(2.*a))
else:
b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2)
return b
def sqrt_rc_imp(Ns,alpha,M=6):
"""
A truncated square root raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform. When
square root raised cosine (SRC) pulse is used generate Tx signals and
at the receiver used as a matched filter (receiver FIR filter), the
received signal is now raised cosine shaped, this having zero
intersymbol interference and the optimum removal of additive white
noise if present at the receiver input.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import sqrt_rc_imp
>>> b = sqrt_rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
Ns *= 1.0
a = alpha
for i in range(len(n)):
if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2:
b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))
else:
b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2))
b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a))
return b
def pn_gen(n_bits, m=5):
"""
Maximal length sequence signal generator.
Generates a sequence 0/1 bits of N_bit duration. The bits themselves
are obtained from an m-sequence of length m. Available m-sequence
(PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
n_bits : the number of bits to generate
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
PN : ndarray of the generator output over N_bits
Notes
-----
The sequence is periodic having period 2**m - 1 (2^m - 1).
Examples
--------
>>> # A 15 bit period signal nover 50 bits
>>> PN = pn_gen(50,4)
"""
c = m_seq(m)
Q = len(c)
max_periods = int(np.ceil(n_bits / float(Q)))
PN = np.zeros(max_periods*Q)
for k in range(max_periods):
PN[k*Q:(k+1)*Q] = c
PN = np.resize(PN, (1, n_bits))
return PN.flatten()
def m_seq(m):
"""
Generate an m-sequence ndarray using an all-ones initialization.
Available m-sequence (PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
c : ndarray of one period of the m-sequence
Notes
-----
The sequence period is 2**m - 1 (2^m - 1).
Examples
--------
>>> c = m_seq(5)
"""
if m == 2:
taps = np.array([1, 1, 1])
elif m == 3:
taps = np.array([1, 0, 1, 1])
elif m == 4:
taps = np.array([1, 0, 0, 1, 1])
elif m == 5:
taps = np.array([1, 0, 0, 1, 0, 1])
elif m == 6:
taps = np.array([1, 0, 0, 0, 0, 1, 1])
elif m == 7:
taps = | np.array([1, 0, 0, 0, 1, 0, 0, 1]) | numpy.array |
import torch
import torch_geometric.transforms as transforms
from torch_geometric.data import Data as OldData
from torch_geometric.data import InMemoryDataset
import os
import math
import numpy as np
from numpy.linalg import inv
from scipy.spatial.transform import Rotation as R
from utils.urdf2graph import yumi2graph, hand2graph
import h5py
class Data(OldData):
def __inc__(self, key, value):
if key == 'edge_index':
return self.num_nodes
elif key == 'l_hand_edge_index':
return self.l_hand_num_nodes
elif key == 'r_hand_edge_index':
return self.r_hand_num_nodes
else:
return 0
"""
Normalize by a constant coefficient
"""
class Normalize(object):
def __call__(self, data, coeff=100.0):
if hasattr(data, 'x'):
data.x = data.x/coeff
if hasattr(data, 'l_hand_x'):
data.l_hand_x = data.l_hand_x/coeff
if hasattr(data, 'r_hand_x'):
data.r_hand_x = data.r_hand_x/coeff
return data
def __repr__(self):
return '{}()'.format(self.__class__.__name__)
"""
Target Dataset for Yumi Manipulator
"""
class YumiDataset(InMemoryDataset):
yumi_cfg = {
'joints_name': [
'yumi_joint_1_l',
'yumi_joint_2_l',
'yumi_joint_7_l',
'yumi_joint_3_l',
'yumi_joint_4_l',
'yumi_joint_5_l',
'yumi_joint_6_l',
'yumi_joint_1_r',
'yumi_joint_2_r',
'yumi_joint_7_r',
'yumi_joint_3_r',
'yumi_joint_4_r',
'yumi_joint_5_r',
'yumi_joint_6_r',
],
'edges': [
['yumi_joint_1_l', 'yumi_joint_2_l'],
['yumi_joint_2_l', 'yumi_joint_7_l'],
['yumi_joint_7_l', 'yumi_joint_3_l'],
['yumi_joint_3_l', 'yumi_joint_4_l'],
['yumi_joint_4_l', 'yumi_joint_5_l'],
['yumi_joint_5_l', 'yumi_joint_6_l'],
['yumi_joint_1_r', 'yumi_joint_2_r'],
['yumi_joint_2_r', 'yumi_joint_7_r'],
['yumi_joint_7_r', 'yumi_joint_3_r'],
['yumi_joint_3_r', 'yumi_joint_4_r'],
['yumi_joint_4_r', 'yumi_joint_5_r'],
['yumi_joint_5_r', 'yumi_joint_6_r'],
],
'root_name': [
'yumi_joint_1_l',
'yumi_joint_1_r',
],
'end_effectors': [
'yumi_joint_6_l',
'yumi_joint_6_r',
],
'shoulders': [
'yumi_joint_2_l',
'yumi_joint_2_r',
],
'elbows': [
'yumi_joint_3_l',
'yumi_joint_3_r',
],
}
def __init__(self, root, transform=None, pre_transform=None):
super(YumiDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
self._raw_file_names = [os.path.join(self.root, file) for file in os.listdir(self.root) if file.endswith('.urdf')]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data_list.append(yumi2graph(file, self.yumi_cfg))
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
Map glove data to inspire hand data
"""
def linear_map(x_, min_, max_, min_hat, max_hat):
x_hat = 1.0 * (x_ - min_) / (max_ - min_) * (max_hat - min_hat) + min_hat
return x_hat
def map_glove_to_inspire_hand(glove_angles):
### This function linearly maps the Wiseglove angle measurement to Inspire hand's joint angles.
## preparation, specify the range for linear scaling
hand_start = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0, 0.0]) # radius already
hand_final = np.array([-1.6, -1.6, -1.6, -1.6, -1.6, -1.6, -1.6, -1.6, -0.75, 0.0, -0.2, -0.15])
glove_start = np.array([0, 0, 53, 0, 0, 22, 0, 0, 22, 0, 0, 35, 0, 0])# * pi / 180.0 # degree to radius
glove_final = np.array([45, 100, 0, 90, 120, 0, 90, 120, 0, 90, 120, 0, 90, 120])# * pi / 180.0
length = glove_angles.shape[0]
hand_angles = np.zeros((length, 12)) # 12 joints
## Iterate to map angles
for i in range(length):
# four fingers' extension/flexion (abduction/adduction are dumped)
hand_angles[i, 0] = linear_map(glove_angles[i, 3], glove_start[3], glove_final[3], hand_start[0], hand_final[0]) # Link1 (joint name)
hand_angles[i, 1] = linear_map(glove_angles[i, 4], glove_start[4], glove_final[4], hand_start[1], hand_final[1]) # Link11
hand_angles[i, 2] = linear_map(glove_angles[i, 6], glove_start[6], glove_final[6], hand_start[2], hand_final[2]) # Link2
hand_angles[i, 3] = linear_map(glove_angles[i, 7], glove_start[7], glove_final[7], hand_start[3], hand_final[3]) # Link22
hand_angles[i, 4] = linear_map(glove_angles[i, 9], glove_start[9], glove_final[9], hand_start[4], hand_final[4]) # Link3
hand_angles[i, 5] = linear_map(glove_angles[i, 10], glove_start[10], glove_final[10], hand_start[5], hand_final[5]) # Link33
hand_angles[i, 6] = linear_map(glove_angles[i, 12], glove_start[12], glove_final[12], hand_start[6], hand_final[6]) # Link4
hand_angles[i, 7] = linear_map(glove_angles[i, 13], glove_start[13], glove_final[13], hand_start[7], hand_final[7]) # Link44
# thumb
hand_angles[i, 8] = (hand_start[8] + hand_final[8]) / 2.0 # Link5 (rotation about z axis), fixed!
hand_angles[i, 9] = linear_map(glove_angles[i, 2], glove_start[2], glove_final[2], hand_start[9], hand_final[9]) # Link 51
hand_angles[i, 10] = linear_map(glove_angles[i, 0], glove_start[0], glove_final[0], hand_start[10], hand_final[10]) # Link 52
hand_angles[i, 11] = linear_map(glove_angles[i, 1], glove_start[1], glove_final[1], hand_start[11], hand_final[11]) # Link 53
return hand_angles
"""
Parse H5 File
"""
def parse_h5(filename, selected_key=None):
data_list = []
h5_file = h5py.File(filename, 'r')
# print(filename, h5_file.keys(), len(h5_file.keys()))
if selected_key is None:
keys = h5_file.keys()
else:
keys = [selected_key]
for key in keys:
if '语句' in key and selected_key is None:
print('Skipping'+key)
continue
# glove data
l_glove_angle = h5_file[key + '/l_glove_angle'][:]
r_glove_angle = h5_file[key + '/r_glove_angle'][:]
l_hand_angle = map_glove_to_inspire_hand(l_glove_angle)
r_hand_angle = map_glove_to_inspire_hand(r_glove_angle)
# position data
l_shoulder_pos = h5_file[key + '/l_up_pos'][:]
r_shoulder_pos = h5_file[key + '/r_up_pos'][:]
l_elbow_pos = h5_file[key + '/l_fr_pos'][:]
r_elbow_pos = h5_file[key + '/r_fr_pos'][:]
l_wrist_pos = h5_file[key + '/l_hd_pos'][:]
r_wrist_pos = h5_file[key + '/r_hd_pos'][:]
# quaternion data
l_shoulder_quat = R.from_quat(h5_file[key + '/l_up_quat'][:])
r_shoulder_quat = R.from_quat(h5_file[key + '/r_up_quat'][:])
l_elbow_quat = R.from_quat(h5_file[key + '/l_fr_quat'][:])
r_elbow_quat = R.from_quat(h5_file[key + '/r_fr_quat'][:])
l_wrist_quat = R.from_quat(h5_file[key + '/l_hd_quat'][:])
r_wrist_quat = R.from_quat(h5_file[key + '/r_hd_quat'][:])
# rotation matrix data
l_shoulder_matrix = l_shoulder_quat.as_matrix()
r_shoulder_matrix = r_shoulder_quat.as_matrix()
l_elbow_matrix = l_elbow_quat.as_matrix()
r_elbow_matrix = r_elbow_quat.as_matrix()
l_wrist_matrix = l_wrist_quat.as_matrix()
r_wrist_matrix = r_wrist_quat.as_matrix()
# transform to local coordinates
# l_wrist_matrix = l_wrist_matrix * inv(l_elbow_matrix)
# r_wrist_matrix = r_wrist_matrix * inv(r_elbow_matrix)
# l_elbow_matrix = l_elbow_matrix * inv(l_shoulder_matrix)
# r_elbow_matrix = r_elbow_matrix * inv(r_shoulder_matrix)
# l_shoulder_matrix = l_shoulder_matrix * inv(l_shoulder_matrix)
# r_shoulder_matrix = r_shoulder_matrix * inv(r_shoulder_matrix)
# euler data
l_shoulder_euler = R.from_matrix(l_shoulder_matrix).as_euler('zyx', degrees=True)
r_shoulder_euler = R.from_matrix(r_shoulder_matrix).as_euler('zyx', degrees=True)
l_elbow_euler = R.from_matrix(l_elbow_matrix).as_euler('zyx', degrees=True)
r_elbow_euler = R.from_matrix(r_elbow_matrix).as_euler('zyx', degrees=True)
l_wrist_euler = R.from_matrix(l_wrist_matrix).as_euler('zyx', degrees=True)
r_wrist_euler = R.from_matrix(r_wrist_matrix).as_euler('zyx', degrees=True)
total_frames = l_shoulder_pos.shape[0]
for t in range(total_frames):
data = parse_arm(l_shoulder_euler[t], l_elbow_euler[t], l_wrist_euler[t], r_shoulder_euler[t], r_elbow_euler[t], r_wrist_euler[t],
l_shoulder_pos[t], l_elbow_pos[t], l_wrist_pos[t], r_shoulder_pos[t], r_elbow_pos[t], r_wrist_pos[t],
l_shoulder_quat[t], l_elbow_quat[t], l_wrist_quat[t], r_shoulder_quat[t], r_elbow_quat[t], r_wrist_quat[t])
data_list.append(data)
return data_list, l_hand_angle, r_hand_angle
def parse_arm(l_shoulder_euler, l_elbow_euler, l_wrist_euler, r_shoulder_euler, r_elbow_euler, r_wrist_euler,
l_shoulder_pos, l_elbow_pos, l_wrist_pos, r_shoulder_pos, r_elbow_pos, r_wrist_pos,
l_shoulder_quat, l_elbow_quat, l_wrist_quat, r_shoulder_quat, r_elbow_quat, r_wrist_quat):
# x
x = torch.stack([torch.from_numpy(l_shoulder_euler),
torch.from_numpy(l_elbow_euler),
torch.from_numpy(l_wrist_euler),
torch.from_numpy(r_shoulder_euler),
torch.from_numpy(r_elbow_euler),
torch.from_numpy(r_wrist_euler)], dim=0).float()
# number of nodes
num_nodes = 6
# edge index
edge_index = torch.LongTensor([[0, 1, 3, 4],
[1, 2, 4, 5]])
# position
pos = torch.stack([torch.from_numpy(l_shoulder_pos),
torch.from_numpy(l_elbow_pos),
torch.from_numpy(l_wrist_pos),
torch.from_numpy(r_shoulder_pos),
torch.from_numpy(r_elbow_pos),
torch.from_numpy(r_wrist_pos)], dim=0).float()
# edge attributes
edge_attr = []
for edge in edge_index.permute(1, 0):
parent = edge[0]
child = edge[1]
edge_attr.append(pos[child] - pos[parent])
edge_attr = torch.stack(edge_attr, dim=0)
# skeleton type & topology type
skeleton_type = 0
topology_type = 0
# end effector mask
ee_mask = torch.zeros(num_nodes, 1).bool()
ee_mask[2] = ee_mask[5] = True
# shoulder mask
sh_mask = torch.zeros(num_nodes, 1).bool()
sh_mask[0] = sh_mask[3] = True
# elbow mask
el_mask = torch.zeros(num_nodes, 1).bool()
el_mask[1] = el_mask[4] = True
# parent
parent = torch.LongTensor([-1, 0, 1, -1, 3, 4])
# offset
offset = torch.zeros(num_nodes, 3)
for node_idx in range(num_nodes):
if parent[node_idx] != -1:
offset[node_idx] = pos[node_idx] - pos[parent[node_idx]]
else:
offset[node_idx] = pos[node_idx]
# distance to root
root_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
root_dist[node_idx] = dist
# distance to shoulder
shoulder_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1 and current_idx != 0 and current_idx != 3:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
shoulder_dist[node_idx] = dist
# distance to elbow
elbow_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1 and current_idx != 1 and current_idx != 4:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
elbow_dist[node_idx] = dist
# quaternion
q = torch.stack([torch.from_numpy(l_shoulder_quat.as_quat()),
torch.from_numpy(l_elbow_quat.as_quat()),
torch.from_numpy(l_wrist_quat.as_quat()),
torch.from_numpy(r_shoulder_quat.as_quat()),
torch.from_numpy(r_elbow_quat.as_quat()),
torch.from_numpy(r_wrist_quat.as_quat())], dim=0).float()
data = Data(x=torch.cat([x,pos], dim=-1),
edge_index=edge_index,
edge_attr=edge_attr,
pos=pos,
q=q,
skeleton_type=skeleton_type,
topology_type=topology_type,
ee_mask=ee_mask,
sh_mask=sh_mask,
el_mask=el_mask,
root_dist=root_dist,
shoulder_dist=shoulder_dist,
elbow_dist=elbow_dist,
num_nodes=num_nodes,
parent=parent,
offset=offset)
# print(data)
return data
"""
Source Dataset for Sign Language
"""
class SignDataset(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(SignDataset, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
data_path = os.path.join(self.root, 'h5')
self._raw_file_names = [os.path.join(data_path, file) for file in os.listdir(data_path)]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data, _, _ = parse_h5(file)
data_list.extend(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
parse h5 with hand
"""
def parse_h5_hand(filename, selected_key=None):
data_list = []
h5_file = h5py.File(filename, 'r')
if selected_key is None:
keys = h5_file.keys()
else:
keys = [selected_key]
for key in keys:
if '语句' in key and selected_key is None:
print('Skipping'+key)
continue
# glove data
l_glove_pos = h5_file[key + '/l_glove_pos'][:]
r_glove_pos = h5_file[key + '/r_glove_pos'][:]
# insert zero for root
total_frames = l_glove_pos.shape[0]
l_glove_pos = np.concatenate([np.zeros((total_frames, 1, 3)), l_glove_pos], axis=1)
r_glove_pos = np.concatenate([np.zeros((total_frames, 1, 3)), r_glove_pos], axis=1)
# print(l_glove_pos.shape, r_glove_pos.shape)
# switch dimensions
l_glove_pos = np.stack([-l_glove_pos[..., 2], -l_glove_pos[..., 1], -l_glove_pos[..., 0]], axis=-1)
r_glove_pos = np.stack([-r_glove_pos[..., 2], -r_glove_pos[..., 1], -r_glove_pos[..., 0]], axis=-1)
for t in range(total_frames):
data = parse_glove_pos(l_glove_pos[t])
data.l_hand_x = data.x
data.l_hand_edge_index = data.edge_index
data.l_hand_edge_attr = data.edge_attr
data.l_hand_pos = data.pos
data.l_hand_ee_mask = data.ee_mask
data.l_hand_el_mask = data.el_mask
data.l_hand_root_dist = data.root_dist
data.l_hand_elbow_dist = data.elbow_dist
data.l_hand_num_nodes = data.num_nodes
data.l_hand_parent = data.parent
data.l_hand_offset = data.offset
r_hand_data = parse_glove_pos(r_glove_pos[t])
data.r_hand_x = r_hand_data.x
data.r_hand_edge_index = r_hand_data.edge_index
data.r_hand_edge_attr = r_hand_data.edge_attr
data.r_hand_pos = r_hand_data.pos
data.r_hand_ee_mask = r_hand_data.ee_mask
data.r_hand_el_mask = r_hand_data.el_mask
data.r_hand_root_dist = r_hand_data.root_dist
data.r_hand_elbow_dist = r_hand_data.elbow_dist
data.r_hand_num_nodes = r_hand_data.num_nodes
data.r_hand_parent = r_hand_data.parent
data.r_hand_offset = r_hand_data.offset
data_list.append(data)
return data_list
def parse_glove_pos(glove_pos):
# x
x = torch.from_numpy(glove_pos).float()
# number of nodes
num_nodes = 17
# edge index
edge_index = torch.LongTensor([[0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14, 15],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]])
# position
pos = torch.from_numpy(glove_pos).float()
# edge attributes
edge_attr = []
for edge in edge_index.permute(1, 0):
parent = edge[0]
child = edge[1]
edge_attr.append(pos[child] - pos[parent])
edge_attr = torch.stack(edge_attr, dim=0)
# skeleton type & topology type
skeleton_type = 0
topology_type = 0
# end effector mask
ee_mask = torch.zeros(num_nodes, 1).bool()
ee_mask[3] = ee_mask[6] = ee_mask[9] = ee_mask[12] = ee_mask[16] = True
# elbow mask
el_mask = torch.zeros(num_nodes, 1).bool()
el_mask[1] = el_mask[4] = el_mask[7] = el_mask[10] = el_mask[13] = True
# parent
parent = torch.LongTensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11, 0, 13, 14, 15])
# offset
offset = torch.zeros(num_nodes, 3)
for node_idx in range(num_nodes):
if parent[node_idx] != -1:
offset[node_idx] = pos[node_idx] - pos[parent[node_idx]]
# distance to root
root_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while parent[current_idx] != -1:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
root_dist[node_idx] = dist
# distance to elbow
elbow_dist = torch.zeros(num_nodes, 1)
for node_idx in range(num_nodes):
dist = 0
current_idx = node_idx
while current_idx != -1 and not el_mask[current_idx]:
origin = offset[current_idx]
offsets_mod = math.sqrt(origin[0]**2+origin[1]**2+origin[2]**2)
dist += offsets_mod
current_idx = parent[current_idx]
elbow_dist[node_idx] = dist
data = Data(x=x,
edge_index=edge_index,
edge_attr=edge_attr,
pos=pos,
skeleton_type=skeleton_type,
topology_type=topology_type,
ee_mask=ee_mask,
el_mask=el_mask,
root_dist=root_dist,
elbow_dist=elbow_dist,
num_nodes=num_nodes,
parent=parent,
offset=offset)
# print(data)
return data
"""
Source Dataset for Sign Language with Hand
"""
class SignWithHand(InMemoryDataset):
def __init__(self, root, transform=None, pre_transform=None):
super(SignWithHand, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
data_path = os.path.join(self.root, 'h5')
self._raw_file_names = [os.path.join(data_path, file) for file in os.listdir(data_path)]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data = parse_h5_hand(file)
data_list.extend(data)
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
Target Dataset for Inspire Hand
"""
class InspireHand(InMemoryDataset):
hand_cfg = {
'joints_name': [
'yumi_link_7_r_joint',
'Link1',
'Link11',
'Link1111',
'Link2',
'Link22',
'Link2222',
'Link3',
'Link33',
'Link3333',
'Link4',
'Link44',
'Link4444',
'Link5',
'Link51',
'Link52',
'Link53',
'Link5555',
],
'edges': [
['yumi_link_7_r_joint', 'Link1'],
['Link1', 'Link11'],
['Link11', 'Link1111'],
['yumi_link_7_r_joint', 'Link2'],
['Link2', 'Link22'],
['Link22', 'Link2222'],
['yumi_link_7_r_joint', 'Link3'],
['Link3', 'Link33'],
['Link33', 'Link3333'],
['yumi_link_7_r_joint', 'Link4'],
['Link4', 'Link44'],
['Link44', 'Link4444'],
['yumi_link_7_r_joint', 'Link5'],
['Link5', 'Link51'],
['Link51', 'Link52'],
['Link52', 'Link53'],
['Link53', 'Link5555'],
],
'root_name': 'yumi_link_7_r_joint',
'end_effectors': [
'Link1111',
'Link2222',
'Link3333',
'Link4444',
'Link5555',
],
'elbows': [
'Link1',
'Link2',
'Link3',
'Link4',
'Link5',
],
}
def __init__(self, root, transform=None, pre_transform=None):
super(InspireHand, self).__init__(root, transform, pre_transform)
self.data, self.slices = torch.load(self.processed_paths[0])
@property
def raw_file_names(self):
self._raw_file_names = [os.path.join(self.root, file) for file in os.listdir(self.root) if file.endswith('.urdf')]
return self._raw_file_names
@property
def processed_file_names(self):
return ['data.pt']
def process(self):
data_list = []
for file in self.raw_file_names:
data_list.append(hand2graph(file, self.hand_cfg))
if self.pre_filter is not None:
data_list = [data for data in data_list if self.pre_filter(data)]
if self.pre_transform is not None:
data_list = [self.pre_transform(data) for data in data_list]
data, slices = self.collate(data_list)
torch.save((data, slices), self.processed_paths[0])
"""
parse h5 with all data
"""
def parse_all(filename, selected_key=None):
data_list = []
h5_file = h5py.File(filename, 'r')
if selected_key is None:
keys = h5_file.keys()
else:
keys = [selected_key]
for key in keys:
if '语句' in key and selected_key is None:
print('Skipping'+key)
continue
# position data
l_shoulder_pos = h5_file[key + '/l_up_pos'][:]
r_shoulder_pos = h5_file[key + '/r_up_pos'][:]
l_elbow_pos = h5_file[key + '/l_fr_pos'][:]
r_elbow_pos = h5_file[key + '/r_fr_pos'][:]
l_wrist_pos = h5_file[key + '/l_hd_pos'][:]
r_wrist_pos = h5_file[key + '/r_hd_pos'][:]
# quaternion data
l_shoulder_quat = R.from_quat(h5_file[key + '/l_up_quat'][:])
r_shoulder_quat = R.from_quat(h5_file[key + '/r_up_quat'][:])
l_elbow_quat = R.from_quat(h5_file[key + '/l_fr_quat'][:])
r_elbow_quat = R.from_quat(h5_file[key + '/r_fr_quat'][:])
l_wrist_quat = R.from_quat(h5_file[key + '/l_hd_quat'][:])
r_wrist_quat = R.from_quat(h5_file[key + '/r_hd_quat'][:])
# rotation matrix data
l_shoulder_matrix = l_shoulder_quat.as_matrix()
r_shoulder_matrix = r_shoulder_quat.as_matrix()
l_elbow_matrix = l_elbow_quat.as_matrix()
r_elbow_matrix = r_elbow_quat.as_matrix()
l_wrist_matrix = l_wrist_quat.as_matrix()
r_wrist_matrix = r_wrist_quat.as_matrix()
# transform to local coordinates
# l_wrist_matrix = l_wrist_matrix * inv(l_elbow_matrix)
# r_wrist_matrix = r_wrist_matrix * inv(r_elbow_matrix)
# l_elbow_matrix = l_elbow_matrix * inv(l_shoulder_matrix)
# r_elbow_matrix = r_elbow_matrix * inv(r_shoulder_matrix)
# l_shoulder_matrix = l_shoulder_matrix * inv(l_shoulder_matrix)
# r_shoulder_matrix = r_shoulder_matrix * inv(r_shoulder_matrix)
# euler data
l_shoulder_euler = R.from_matrix(l_shoulder_matrix).as_euler('zyx', degrees=True)
r_shoulder_euler = R.from_matrix(r_shoulder_matrix).as_euler('zyx', degrees=True)
l_elbow_euler = R.from_matrix(l_elbow_matrix).as_euler('zyx', degrees=True)
r_elbow_euler = R.from_matrix(r_elbow_matrix).as_euler('zyx', degrees=True)
l_wrist_euler = R.from_matrix(l_wrist_matrix).as_euler('zyx', degrees=True)
r_wrist_euler = R.from_matrix(r_wrist_matrix).as_euler('zyx', degrees=True)
# glove data
l_glove_pos = h5_file[key + '/l_glove_pos'][:]
r_glove_pos = h5_file[key + '/r_glove_pos'][:]
# insert zero for root
total_frames = l_glove_pos.shape[0]
l_glove_pos = np.concatenate([np.zeros((total_frames, 1, 3)), l_glove_pos], axis=1)
r_glove_pos = np.concatenate([ | np.zeros((total_frames, 1, 3)) | numpy.zeros |
import pickle
import re
import string
# Others
import nltk
import numpy as np
from keras.layers import Dense, LSTM, Conv1D, MaxPooling1D, Dropout
from keras.layers.embeddings import Embedding
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
# Keras
from keras.preprocessing.text import Tokenizer
from nltk.corpus import stopwords
## Plotly
# py.init_notebook_mode(connected=True)
from lab4.utils import clean_text
with open('raw.pickle', 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
p = u.load()
print(p.keys())
embeddings_index = dict()
f = open('glove.6B.300d.txt')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
labels = np.array([row['label'] for row in p['info']])
texts = p['texts']
texts = [clean_text(text) for text in texts]
unique_words = len(set(" ".join(texts).split()))
longest_sentence = max([len(text.split()) for text in texts])
vocabulary_size = 20000
tokenizer = Tokenizer(num_words=vocabulary_size)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
data = pad_sequences(sequences, maxlen=80)
embedding_matrix = | np.zeros((vocabulary_size, 300)) | numpy.zeros |
#==============================================================================
# WELCOME
#==============================================================================
# Welcome to RainyDay, a framework for coupling remote sensing precipitation
# fields with Stochastic Storm Transposition for assessment of rainfall-driven hazards.
# Copyright (C) 2017 <NAME> (<EMAIL>)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.#
#==============================================================================
# THIS DOCUMENT CONTAINS VARIOUS FUNCTIONS NEEDED TO RUN RainyDay
#==============================================================================
import os
import sys
import numpy as np
import scipy as sp
import glob
import math
from datetime import datetime, date, time, timedelta
import time
from copy import deepcopy
from mpl_toolkits.basemap import Basemap, addcyclic
from matplotlib.patches import Polygon
from scipy import stats
from netCDF4 import Dataset, num2date, date2num
#import gdal
import rasterio
import pandas as pd
from numba import prange,jit
import shapely
import geopandas as gp
from scipy.stats import norm
from scipy.stats import lognorm
# plotting stuff, really only needed for diagnostic plots
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import LogNorm
import subprocess
try:
os.environ.pop('PYTHONIOENCODING')
except KeyError:
pass
import warnings
warnings.filterwarnings("ignore")
from numba.types import int32,int64,float32,uint32
import linecache
GEOG="+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"
# =============================================================================
# Smoother that is compatible with nan values. Adapted from https://stackoverflow.com/questions/18697532/gaussian-filtering-a-image-with-nan-in-python
# =============================================================================
def mysmoother(inarray,sigma=[3,3]):
if len(sigma)!=len(inarray.shape):
sys.exit("there seems to be a mismatch between the sigma dimension and the dimension of the array you are trying to smooth")
V=inarray.copy()
V[np.isnan(inarray)]=0.
VV=sp.ndimage.gaussian_filter(V,sigma=sigma)
W=0.*inarray.copy()+1.
W[np.isnan(inarray)]=0.
WW=sp.ndimage.gaussian_filter(W,sigma=sigma)
outarray=VV/WW
outarray[np.isnan(inarray)]=np.nan
return outarray
def my_kde_bandwidth(obj, fac=1): # this 1.5 choice is completely subjective :(
#We use Scott's Rule, multiplied by a constant factor
return np.power(obj.n, -1./(obj.d+4)) * fac
def convert_3D_2D(geometry):
'''
Takes a GeoSeries of 3D Multi/Polygons (has_z) and returns a list of 2D Multi/Polygons
'''
new_geo = []
for p in geometry:
if p.has_z:
if p.geom_type == 'Polygon':
lines = [xy[:2] for xy in list(p.exterior.coords)]
new_p = shapely.geometry.Polygon(lines)
new_geo.append(new_p)
elif p.geom_type == 'MultiPolygon':
new_multi_p = []
for ap in p:
lines = [xy[:2] for xy in list(ap.exterior.coords)]
new_p = shapely.geometry.Polygon(lines)
new_multi_p.append(new_p)
new_geo.append(shapely.geometry.MultiPolygon(new_multi_p))
return new_geo
#==============================================================================
# LOOP TO DO SPATIAL SEARCHING FOR MAXIMUM RAINFALL LOCATION AT EACH TIME STEP
# THIS IS THE CORE OF THE STORM CATALOG CREATION TECHNIQUE
#==============================================================================
#def catalogweave(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum):
# rainsum[:]=0.
# code= """
# #include <stdio.h>
# int i,j,x,y;
# for (x=0;x<xlen;x++) {
# for (y=0;y<ylen;y++) {
# for (j=0;j<maskheight;j++) {
# for (i=0;i<maskwidth;i++) {
# rainsum(y,x)=rainsum(y,x)+temparray(y+j,x+i)*trimmask(j,i);
# }
# }
# }
# }
# """
# vars=['temparray','trimmask','xlen','ylen','maskheight','maskwidth','rainsum']
# sp.weave.inline(code,vars,type_converters=converters.blitz,compiler='gcc')
# rmax=np.nanmax(rainsum)
# wheremax=np.where(rainsum==rmax)
# return rmax, wheremax[0][0], wheremax[1][0]
#
def catalogAlt(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum,domainmask):
rainsum[:]=0.
for i in range(0,(ylen)*(xlen)):
y=i//xlen
x=i-y*xlen
#print x,
rainsum[y,x]=np.nansum(np.multiply(temparray[(y):(y+maskheight),(x):(x+maskwidth)],trimmask))
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(rainsum==rmax)
return rmax, wheremax[0][0], wheremax[1][0]
def catalogAlt_irregular(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum,domainmask):
rainsum[:]=0.
for i in range(0,(ylen)*(xlen)):
y=i//xlen
x=i-y*xlen
#print x,y
if np.any(np.equal(domainmask[y+maskheight/2,x:x+maskwidth],1.)) and np.any(np.equal(domainmask[y:y+maskheight,x+maskwidth/2],1.)):
rainsum[y,x]=np.nansum(np.multiply(temparray[(y):(y+maskheight),(x):(x+maskwidth)],trimmask))
else:
rainsum[y,x]=0.
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(rainsum==rmax)
return rmax, wheremax[0][0], wheremax[1][0]
@jit(nopython=True,fastmath=True)
def catalogNumba_irregular(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum,domainmask):
rainsum[:]=0.
halfheight=int32(np.ceil(maskheight/2))
halfwidth=int32(np.ceil(maskwidth/2))
for i in range(0,ylen*xlen):
y=i//xlen
x=i-y*xlen
#print x,y
if np.any(np.equal(domainmask[y+halfheight,x:x+maskwidth],1.)) and np.any(np.equal(domainmask[y:y+maskheight,x+halfwidth],1.)):
rainsum[y,x]=np.nansum(np.multiply(temparray[y:(y+maskheight),x:(x+maskwidth)],trimmask))
else:
rainsum[y,x]=0.
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(np.equal(rainsum,rmax))
return rmax, wheremax[0][0], wheremax[1][0]
@jit(nopython=True)
def catalogNumba(temparray,trimmask,xlen,ylen,maskheight,maskwidth,rainsum):
rainsum[:]=0.
for i in range(0,(ylen)*(xlen)):
y=i//xlen
x=i-y*xlen
#print x,y
rainsum[y,x]=np.nansum(np.multiply(temparray[(y):(y+maskheight),(x):(x+maskwidth)],trimmask))
#wheremax=np.argmax(rainsum)
rmax=np.nanmax(rainsum)
wheremax=np.where(np.equal(rainsum,rmax))
return rmax, wheremax[0][0], wheremax[1][0]
@jit(nopython=True)
def DistributionBuilder(intenserain,tempmax,xlen,ylen,checksep):
for y in np.arange(0,ylen):
for x in np.arange(0,xlen):
if np.any(checksep[:,y,x]):
#fixind=np.where(checksep[:,y,x]==True)
for i in np.arange(0,checksep.shape[0]):
if checksep[i,y,x]==True:
fixind=i
break
if tempmax[y,x]>intenserain[fixind,y,x]:
intenserain[fixind,y,x]=tempmax[y,x]
checksep[:,y,x]=False
checksep[fixind,y,x]=True
else:
checksep[fixind,y,x]=False
elif tempmax[y,x]>np.min(intenserain[:,y,x]):
fixind=np.argmin(intenserain[:,y,x])
intenserain[fixind,y,x]=tempmax[y,x]
checksep[fixind,y,x]=True
return intenserain,checksep
# slightly faster numpy-based version of above
def DistributionBuilderFast(intenserain,tempmax,xlen,ylen,checksep):
minrain=np.min(intenserain,axis=0)
if np.any(checksep):
flatsep=np.any(checksep,axis=0)
minsep=np.argmax(checksep[:,flatsep],axis=0)
islarger=np.greater(tempmax[flatsep],intenserain[minsep,flatsep])
if np.any(islarger):
intenserain[minsep,flatsep][islarger]=tempmax[flatsep][islarger]
checksep[:]=False
checksep[minsep,flatsep]=True
else:
checksep[minsep,flatsep]=False
elif np.any(np.greater(tempmax,minrain)):
#else:
fixind=np.greater(tempmax,minrain)
minrainind=np.argmin(intenserain,axis=0)
intenserain[minrainind[fixind],fixind]=tempmax[fixind]
checksep[minrainind[fixind],fixind]=True
return intenserain,checksep
#def SSTalt(passrain,sstx,ssty,trimmask,maskheight,maskwidth,intense_data=False):
# rainsum=np.zeros((len(sstx)),dtype='float32')
# nreals=len(rainsum)
#
# for i in range(0,nreals):
# rainsum[i]=np.nansum(np.multiply(passrain[(ssty[i]) : (ssty[i]+maskheight) , (sstx[i]) : (sstx[i]+maskwidth)],trimmask))
# return rainsum
@jit(fastmath=True)
def SSTalt(passrain,sstx,ssty,trimmask,maskheight,maskwidth,intensemean=None,intensestd=None,intensecorr=None,homemean=None,homestd=None,durcheck=False):
maxmultiplier=1.5
rainsum=np.zeros((len(sstx)),dtype='float32')
whichstep=np.zeros((len(sstx)),dtype='int32')
nreals=len(rainsum)
nsteps=passrain.shape[0]
multiout=np.empty_like(rainsum)
if (intensemean is not None) and (homemean is not None):
domean=True
else:
domean=False
if (intensestd is not None) and (intensecorr is not None) and (homestd is not None):
#rquant=np.random.random_integers(5,high=95,size=nreals)/100.
rquant=np.random.random_sample(size=nreals)
doall=True
else:
doall=False
rquant=np.nan
if durcheck==False:
exprain=np.expand_dims(passrain,0)
else:
exprain=passrain
for k in range(0,nreals):
y=int(ssty[k])
x=int(sstx[k])
if np.all(np.less(exprain[:,y:y+maskheight,x:x+maskwidth],0.5)):
rainsum[k]=0.
multiout[k]=-999.
else:
if domean:
#sys.exit('need to fix short duration part')
muR=homemean-intensemean[y,x]
if doall:
stdR=np.sqrt(np.power(homestd,2)+np.power(intensestd[y,x],2)-2.*intensecorr[y,x]*homestd*intensestd[y,x])
# multiplier=sp.stats.lognorm.ppf(rquant[k],stdR,loc=0,scale=np.exp(muR))
#multiplier=10.
#while multiplier>maxmultiplier: # who knows what the right number is to use here...
inverrf=sp.special.erfinv(2.*rquant-1.)
multiplier=np.exp(muR+np.sqrt(2.*np.power(stdR,2))*inverrf[k])
#multiplier=np.random.lognormal(muR,stdR)
if multiplier>maxmultiplier:
multiplier=1.
else:
multiplier=np.exp(muR)
if multiplier>maxmultiplier:
multiplier=1.
else:
multiplier=1.
# print("still going!")
if multiplier>maxmultiplier:
sys.exit("Something seems to be going horribly wrong in the multiplier scheme!")
else:
multiout[k]=multiplier
if durcheck==True:
storesum=0.
storestep=0
for kk in range(0,nsteps):
#tempsum=numba_multimask_calc(passrain[kk,:],rsum,train,trimmask,ssty[k],maskheight,sstx[k],maskwidth)*multiplier
tempsum=numba_multimask_calc(passrain[kk,:],trimmask,y,x,maskheight,maskwidth)*multiplier
if tempsum>storesum:
storesum=tempsum
storestep=kk
rainsum[k]=storesum
whichstep[k]=storestep
else:
rainsum[k]=numba_multimask_calc(passrain,trimmask,y,x,maskheight,maskwidth)*multiplier
if domean:
return rainsum,multiout,whichstep
else:
return rainsum,whichstep
#@jit(nopython=True,fastmath=True,parallel=True)
@jit(nopython=True,fastmath=True)
def numba_multimask_calc(passrain,trimmask,ssty,sstx,maskheight,maskwidth):
train=np.multiply(passrain[ssty : ssty+maskheight , sstx : sstx+maskwidth],trimmask)
rainsum=np.sum(train)
return rainsum
@jit(fastmath=True)
def SSTalt_singlecell(passrain,sstx,ssty,trimmask,maskheight,maskwidth,intensemean=None,intensestd=None,intensecorr=None,homemean=None,homestd=None,durcheck=False):
rainsum=np.zeros((len(sstx)),dtype='float32')
whichstep=np.zeros((len(sstx)),dtype='int32')
nreals=len(rainsum)
nsteps=passrain.shape[0]
multiout=np.empty_like(rainsum)
# do we do deterministic or dimensionless rescaling?
if (intensemean is not None) and (homemean is not None):
domean=True
else:
domean=False
# do we do stochastic rescaling?
if (intensestd is not None) and (intensecorr is not None) and (homestd is not None):
rquant=np.random.random_sample(size=nreals)
inverrf=sp.special.erfinv(2.*rquant-1.)
doall=True
else:
doall=False
#rquant=np.nan
if durcheck==False:
passrain=np.expand_dims(passrain,0)
# deterministic or dimensionless:
if domean and doall==False:
rain,multi,step=killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=durcheck,intensemean=intensemean,homemean=homemean,multiout=multiout)
return rain,multi,step
# stochastic:
elif doall:
rain,multi,step=killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=durcheck,intensemean=intensemean,intensestd=intensestd,intensecorr=intensecorr,homemean=homemean,homestd=homestd,multiout=multiout,inverrf=inverrf)
return rain,multi,step
# no rescaling:
else:
rain,_,step=killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=durcheck,multiout=multiout)
return rain,step
#@jit(nopython=True,fastmath=True,parallel=True)
@jit(nopython=True,fastmath=True)
def killerloop_singlecell(passrain,rainsum,whichstep,nreals,ssty,sstx,nsteps,durcheck=False,intensemean=None,homemean=None,homestd=None,multiout=None,rquant=None,intensestd=None,intensecorr=None,inverrf=None):
maxmultiplier=1.5 # who knows what the right number is to use here...
for k in prange(nreals):
y=int(ssty[k])
x=int(sstx[k])
# deterministic or dimensionless:
if (intensemean is not None) and (homemean is not None) and (homestd is None):
if np.less(homemean,0.001) or np.less(intensemean[y,x],0.001):
multiplier=1. # or maybe this should be zero
else:
multiplier=np.exp(homemean-intensemean[y,x])
if multiplier>maxmultiplier:
multiplier=1. # or maybe this should be zero
# stochastic:
elif (intensemean is not None) and (homemean is not None) and (homestd is not None):
if np.less(homemean,0.001) or np.less(intensemean[y,x],0.001):
multiplier=1. # or maybe this should be zero
else:
muR=homemean-intensemean[y,x]
stdR=np.sqrt(np.power(homestd,2)+np.power(intensestd[y,x],2)-2*intensecorr[y,x]*homestd*intensestd[y,x])
multiplier=np.exp(muR+np.sqrt(2.*np.power(stdR,2))*inverrf[k])
if multiplier>maxmultiplier:
multiplier=1. # or maybe this should be zero
# no rescaling:
else:
multiplier=1.
if durcheck==False:
rainsum[k]=np.nansum(passrain[:,y, x])
else:
storesum=0.
storestep=0
for kk in range(nsteps):
tempsum=passrain[kk,y,x]
if tempsum>storesum:
storesum=tempsum
storestep=kk
rainsum[k]=storesum*multiplier
multiout[k]=multiplier
whichstep[k]=storestep
return rainsum,multiout,whichstep
#@jit(nopython=True,fastmath=True,parallel=True)
#def killerloop(passrain,rainsum,nreals,ssty,sstx,maskheight,maskwidth,trimmask,nsteps,durcheck):
# for k in prange(nreals):
# spanx=int64(sstx[k]+maskwidth)
# spany=int64(ssty[k]+maskheight)
# if np.all(np.less(passrain[:,ssty[k]:spany,sstx[k]:spanx],0.5)):
# rainsum[k]=0.
# else:
# if durcheck==False:
# rainsum[k]=np.nansum(np.multiply(passrain[ssty[k] : spany , sstx[k] : spanx],trimmask))
# else:
# storesum=float32(0.)
# for kk in range(nsteps):
# tempsum=np.nansum(np.multiply(passrain[kk,ssty[k]:spany,sstx[k]:spanx],trimmask))
# if tempsum>storesum:
# storesum=tempsum
# rainsum[k]=storesum
# return rainsum
#whichstep[k]=storestep
#return rainsum,whichstep
# this function below never worked for some unknown Numba problem-error messages indicated that it wasn't my fault!!! Some problem in tempsum
#@jit(nopython=True,fastmath=True,parallel=True)
#def killerloop(passrain,rainsum,nreals,ssty,sstx,maskheight,maskwidth,masktile,nsteps,durcheck):
# for k in prange(nreals):
# spanx=sstx[k]+maskwidth
# spany=ssty[k]+maskheight
# if np.all(np.less(passrain[:,ssty[k]:spany,sstx[k]:spanx],0.5)):
# rainsum[k]=0.
# else:
# if durcheck==False:
# #tempstep=np.multiply(passrain[:,ssty[k] : spany , sstx[k] : spanx],trimmask)
# #xnum=int64(sstx[k])
# #ynum=int64(ssty[k])
# #rainsum[k]=np.nansum(passrain[:,ssty[k], sstx[k]])
# rainsum[k]=np.nansum(np.multiply(passrain[:,ssty[k] : spany , sstx[k] : spanx],masktile))
# else:
# storesum=float32(0.)
# for kk in range(nsteps):
# #tempsum=0.
# #tempsum=np.multiply(passrain[kk,ssty[k]:spany,sstx[k]:spanx],masktile[0,:,:])
# tempsum=np.nansum(np.multiply(passrain[kk,ssty[k]:spany,sstx[k]:spanx],masktile[0,:,:]))
# return rainsum
#==============================================================================
# THIS VARIANT IS SIMPLER AND UNLIKE SSTWRITE, IT ACTUALLY WORKS RELIABLY!
#==============================================================================
#def SSTwriteAlt(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth):
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True)
# #ctr=0
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# for j in unqwhere:
# #ctr=ctr+1
# #print ctr
# outrain[j,:]=np.multiply(catrain[unqstm[i],:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],trimmask)
# return outrain
#==============================================================================
# THIS VARIANT IS SAME AS ABOVE, BUT HAS A MORE INTERESTING RAINFALL PREPENDING PROCEDURE
#==============================================================================
#def SSTwriteAltPreCat(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime):
# catyears=ptime.astype('datetime64[Y]').astype(int)+1970
# ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1]+precat.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True)
#
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# unqmonth=ptime[unqstm[i]]
# pretimeind=np.where(np.logical_and(ptime>unqmonth-2,ptime<unqmonth+2))[0]
# for j in unqwhere:
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),catrain[unqstm[i],:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
# outrain[j,:]=np.multiply(temprain,trimmask)
# return outrain
#
#==============================================================================
# SAME AS ABOVE, BUT HANDLES STORM ROTATION
#==============================================================================
#def SSTwriteAltPreCatRotation(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime,delarray,rlzanglebin,rainprop):
##def SSTwriteAltPreCatRotation(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime,delarray,rlzanglebin):
# catyears=ptime.astype('datetime64[Y]').astype(int)+1970
# ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1]+precat.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True) # unqstm is the storm number
#
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# unqmonth=ptime[unqstm[i]]
# pretimeind=np.where(np.logical_and(ptime>unqmonth-2,ptime<unqmonth+2))[0]
# for j in unqwhere:
# inrain=catrain[unqstm[i],:].copy()
#
# xctr=rlzx[j]+maskwidth/2.
# yctr=rlzy[j]+maskheight/2.
# xlinsp=np.linspace(-xctr,rainprop.subdimensions[1]-xctr,rainprop.subdimensions[1])
# ylinsp=np.linspace(-yctr,rainprop.subdimensions[0]-yctr,rainprop.subdimensions[0])
#
# ingridx,ingridy=np.meshgrid(xlinsp,ylinsp)
# ingridx=ingridx.flatten()
# ingridy=ingridy.flatten()
# outgrid=np.column_stack((ingridx,ingridy))
#
# for k in range(0,inrain.shape[0]):
# interp=sp.interpolate.LinearNDInterpolator(delarray[unqstm[i]][rlzanglebin[j]-1],inrain[k,:].flatten(),fill_value=0.)
# inrain[k,:]=np.reshape(interp(outgrid),rainprop.subdimensions)
# #inrain[k,:]=temprain
#
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
#
# outrain[j,:]=np.multiply(temprain,trimmask)
# return outrain
@jit(fastmath=True)
def SSTspin_write_v2(catrain,rlzx,rlzy,rlzstm,trimmask,maskheight,maskwidth,precat,ptime,rainprop,rlzanglebin=None,delarray=None,spin=False,flexspin=True,samptype='uniform',cumkernel=None,rotation=False,domaintype='rectangular'):
catyears=ptime.astype('datetime64[Y]').astype(int)+1970
ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
nyrs=np.int(rlzx.shape[0])
raindur=np.int(catrain.shape[1]+precat.shape[1])
outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True) # unqstm is the storm number
for i in range(0,len(unqstm)):
unqwhere=np.where(unqstm[i]==rlzstm)[0]
unqmonth=ptime[unqstm[i]]
pretimeind=np.where(np.logical_and(ptime>unqmonth-1,ptime<unqmonth+1))[0]
# flexspin allows you to use spinup rainfall from anywhere in transposition domain, rather than just storm locations, but it doesn't seem to be very useful based on initial testing
if spin==True and flexspin==True:
if samptype=='kernel' or domaintype=='irregular':
rndloc=np.random.random_sample(len(unqwhere))
shiftprex,shiftprey=numbakernel(rndloc,cumkernel)
else:
shiftprex=np.random.random_integers(0,np.int(rainprop.subdimensions[1])-maskwidth-1,len(unqwhere))
shiftprey=np.random.random_integers(0,np.int(rainprop.subdimensions[0])-maskheight-1,len(unqwhere))
ctr=0
for j in unqwhere:
inrain=catrain[unqstm[i],:].copy()
# this doesn't rotate the prepended rainfall
if rotation==True:
xctr=rlzx[j]+maskwidth/2.
yctr=rlzy[j]+maskheight/2.
xlinsp=np.linspace(-xctr,rainprop.subdimensions[1]-xctr,rainprop.subdimensions[1])
ylinsp=np.linspace(-yctr,rainprop.subdimensions[0]-yctr,rainprop.subdimensions[0])
ingridx,ingridy=np.meshgrid(xlinsp,ylinsp)
ingridx=ingridx.flatten()
ingridy=ingridy.flatten()
outgrid=np.column_stack((ingridx,ingridy))
for k in range(0,inrain.shape[0]):
interp=sp.interpolate.LinearNDInterpolator(delarray[unqstm[i]][rlzanglebin[j]-1],inrain[k,:].flatten(),fill_value=0.)
inrain[k,:]=np.reshape(interp(outgrid),rainprop.subdimensions)
if spin==True and flexspin==True:
temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(shiftprey[ctr]) : (shiftprey[ctr]+maskheight) , (shiftprex[ctr]) : (shiftprex[ctr]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
elif spin==True and flexspin==False:
temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
elif spin==False:
temprain=inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]
else:
sys.exit("what else is there?")
ctr=ctr+1
outrain[j,:]=np.multiply(temprain,trimmask)
return outrain
##==============================================================================
## SAME AS ABOVE, BUT A BIT MORE DYNAMIC IN TERMS OF SPINUP
##==============================================================================
#def SSTspin_write_v2(catrain,rlzx,rlzy,rlzstm,trimmask,xmin,xmax,ymin,ymax,maskheight,maskwidth,precat,ptime,rainprop,rlzanglebin=None,delarray=None,spin=False,flexspin=True,samptype='uniform',cumkernel=None,rotation=False,domaintype='rectangular',intense_data=False):
# catyears=ptime.astype('datetime64[Y]').astype(int)+1970
# ptime=ptime.astype('datetime64[M]').astype(int)-(catyears-1970)*12+1
# nyrs=np.int(rlzx.shape[0])
# raindur=np.int(catrain.shape[1]+precat.shape[1])
# outrain=np.zeros((nyrs,raindur,maskheight,maskwidth),dtype='float32')
# unqstm,unqind,unqcnts=np.unique(rlzstm,return_inverse=True,return_counts=True) # unqstm is the storm number
#
# if intense_data!=False:
# sys.exit("Scenario writing for intensity-based resampling not tested!")
# intquant=intense_data[0]
# fullmu=intense_data[1]
# fullstd=intense_data[2]
# muorig=intense_data[3]
# stdorig=intense_data[4]
#
# for i in range(0,len(unqstm)):
# unqwhere=np.where(unqstm[i]==rlzstm)[0]
# unqmonth=ptime[unqstm[i]]
# pretimeind=np.where(np.logical_and(ptime>unqmonth-1,ptime<unqmonth+1))[0]
#
# if transpotype=='intensity':
# origmu=np.multiply(murain[caty[i]:caty[i]+maskheight,catx[i]:catx[i]+maskwidth],trimmask)
# origstd=np.multiply(stdrain[caty[i]:caty[i]+maskheight,catx[i]:catx[i]+maskwidth],trimmask)
# #intense_dat=[intquant[],murain,stdrain,origmu,origstd]
#
# # flexspin allows you to use spinup rainfall from anywhere in transposition domain, rather than just storm locations, but it doesn't seem to be very useful based on initial testing
# if spin==True and flexspin==True:
# if samptype=='kernel' or domaintype=='irregular':
# rndloc=np.random.random_sample(len(unqwhere))
# shiftprex,shiftprey=numbakernel(rndloc,cumkernel)
# else:
# shiftprex=np.random.random_integers(0,np.int(rainprop.subdimensions[1])-maskwidth-1,len(unqwhere))
# shiftprey=np.random.random_integers(0,np.int(rainprop.subdimensions[0])-maskheight-1,len(unqwhere))
#
# ctr=0
# for j in unqwhere:
# inrain=catrain[unqstm[i],:].copy()
#
# if intense_data!=False:
# transmu=np.multiply(fullmu[(rlzy[i]) : (rlzy[i]+maskheight) , (rlzx[i]) : (rlzx[i]+maskwidth)],trimmask)
# transtd=np.multiply(fullstd[(rlzy[i]) : (rlzy[i]+maskheight) , (rlzx[i]) : (rlzx[i]+maskwidth)],trimmask)
# mu_multi=transmu/muorig
# std_multi=np.abs(transtd-stdorig)/stdorig
# multipliermask=norm.ppf(intquant[i],loc=mu_multi,scale=std_multi)
# multipliermask[multipliermask<0.]=0.
# multipliermask[np.isnan(multipliermask)]=0.
#
# # this doesn't rotate the prepended rainfall
# if rotation==True:
# xctr=rlzx[j]+maskwidth/2.
# yctr=rlzy[j]+maskheight/2.
# xlinsp=np.linspace(-xctr,rainprop.subdimensions[1]-xctr,rainprop.subdimensions[1])
# ylinsp=np.linspace(-yctr,rainprop.subdimensions[0]-yctr,rainprop.subdimensions[0])
#
# ingridx,ingridy=np.meshgrid(xlinsp,ylinsp)
# ingridx=ingridx.flatten()
# ingridy=ingridy.flatten()
# outgrid=np.column_stack((ingridx,ingridy))
#
# for k in range(0,inrain.shape[0]):
# interp=sp.interpolate.LinearNDInterpolator(delarray[unqstm[i]][rlzanglebin[j]-1],inrain[k,:].flatten(),fill_value=0.)
# inrain[k,:]=np.reshape(interp(outgrid),rainprop.subdimensions)
#
# if spin==True and flexspin==True:
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(shiftprey[ctr]) : (shiftprey[ctr]+maskheight) , (shiftprex[ctr]) : (shiftprex[ctr]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
# elif spin==True and flexspin==False:
# temprain=np.concatenate((np.squeeze(precat[np.random.choice(pretimeind, 1),:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)],axis=0),inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]),axis=0)
# elif spin==False:
# temprain=inrain[:,(rlzy[j]) : (rlzy[j]+maskheight) , (rlzx[j]) : (rlzx[j]+maskwidth)]
# else:
# sys.exit("what else is there?")
# ctr=ctr+1
# if intense_data!=False:
# outrain[j,:]=np.multiply(temprain,multipliermask)
# else:
# outrain[j,:]=np.multiply(temprain,trimmask)
# return outrain
#==============================================================================
# LOOP FOR KERNEL BASED STORM TRANSPOSITION
# THIS FINDS THE TRANSPOSITION LOCATION FOR EACH REALIZATION IF YOU ARE USING THE KERNEL-BASED RESAMPLER
# IF I CONFIGURE THE SCRIPT SO THE USER CAN PROVIDE A CUSTOM RESAMPLING SCHEME, THIS WOULD PROBABLY WORK FOR THAT AS WELL
#==============================================================================
#def weavekernel(rndloc,cumkernel):
# nlocs=len(rndloc)
# nrows=cumkernel.shape[0]
# ncols=cumkernel.shape[1]
# tempx=np.empty((len(rndloc)),dtype="int32")
# tempy=np.empty((len(rndloc)),dtype="int32")
# code= """
# #include <stdio.h>
# int i,x,y,brklp;
# double prevprob;
# for (i=0;i<nlocs;i++) {
# prevprob=0.0;
# brklp=0;
# for (y=0; y<nrows; y++) {
# for (x=0; x<ncols; x++) {
# if ( (rndloc(i)<=cumkernel(y,x)) && (rndloc(i)>prevprob) ) {
# tempx(i)=x;
# tempy(i)=y;
# prevprob=cumkernel(y,x);
# brklp=1;
# break;
# }
# }
# if (brklp==1) {
# break;
# }
# }
# }
# """
# vars=['rndloc','cumkernel','nlocs','nrows','ncols','tempx','tempy']
# sp.weave.inline(code,vars,type_converters=converters.blitz,compiler='gcc')
# return tempx,tempy
def pykernel(rndloc,cumkernel):
nlocs=len(rndloc)
ncols=cumkernel.shape[1]
tempx=np.empty((len(rndloc)),dtype="int32")
tempy=np.empty((len(rndloc)),dtype="int32")
flatkern=np.append(0.,cumkernel.flatten())
for i in range(0,nlocs):
x=rndloc[i]-flatkern
x[np.less(x,0.)]=1000.
whereind = np.argmin(x)
y=whereind//ncols
x=whereind-y*ncols
tempx[i]=x
tempy[i]=y
return tempx,tempy
@jit
def numbakernel(rndloc,cumkernel,tempx,tempy,ncols):
nlocs=len(rndloc)
#ncols=xdim
flatkern=np.append(0.,cumkernel.flatten())
#x=np.zeros_like(rndloc,dtype='float64')
for i in np.arange(0,nlocs):
x=rndloc[i]-flatkern
x[np.less(x,0.)]=10.
whereind=np.argmin(x)
y=whereind//ncols
x=whereind-y*ncols
tempx[i]=x
tempy[i]=y
return tempx,tempy
@jit
def numbakernel_fast(rndloc,cumkernel,tempx,tempy,ncols):
nlocs=int32(len(rndloc))
ncols=int32(cumkernel.shape[1])
flatkern=np.append(0.,cumkernel.flatten())
return kernelloop(nlocs,rndloc,flatkern,ncols,tempx,tempy)
#@jit(nopython=True,fastmath=True,parallel=True)
@jit(nopython=True,fastmath=True)
def kernelloop(nlocs,rndloc,flatkern,ncols,tempx,tempy):
for i in prange(nlocs):
diff=rndloc[i]-flatkern
diff[np.less(diff,0.)]=10.
whereind=np.argmin(diff)
y=whereind//ncols
x=whereind-y*ncols
tempx[i]=x
tempy[i]=y
return tempx,tempy
#==============================================================================
# FIND THE BOUNDARY INDICIES AND COORDINATES FOR THE USER-DEFINED SUBAREA
# NOTE THAT subind ARE THE MATRIX INDICIES OF THE SUBBOX, STARTING FROM UPPER LEFT CORNER OF DOMAIN AS (0,0)
# NOTE THAT subcoord ARE THE COORDINATES OF THE OUTSIDE BORDER OF THE SUBBOX
# THEREFORE THE DISTANCE FROM THE WESTERN (SOUTHERN) BOUNDARY TO THE EASTERN (NORTHERN) BOUNDARY IS NCOLS (NROWS) +1 TIMES THE EAST-WEST (NORTH-SOUTH) RESOLUTION
#==============================================================================
def findsubbox(inarea,rainprop):
outind=np.empty([4],dtype='int')
outextent=np.empty([4])
outdim=np.empty([2])
inbox=deepcopy(inarea)
rangex=np.arange(rainprop.bndbox[0],rainprop.bndbox[1]-rainprop.spatialres[0]/1000,rainprop.spatialres[0])
rangey=np.arange(rainprop.bndbox[3],rainprop.bndbox[2]+rainprop.spatialres[1]/1000,-rainprop.spatialres[1])
if rangex.shape[0]<rainprop.dimensions[1]:
rangex=np.append(rangex,rangex[-1])
if rangey.shape[0]<rainprop.dimensions[0]:
rangey=np.append(rangey,rangey[-1])
if rangex.shape[0]>rainprop.dimensions[1]:
rangex=rangex[0:-1]
if rangey.shape[0]>rainprop.dimensions[0]:
rangey=rangey[0:-1]
outextent=inbox
# "SNAP" output extent to grid
outind[0]=np.abs(rangex-outextent[0]).argmin()
outind[1]=np.abs(rangex-outextent[1]).argmin()-1
outind[2]=np.abs(rangey-outextent[2]).argmin()-1
outind[3]=np.abs(rangey-outextent[3]).argmin()
outextent[0]=rangex[outind[0]]
outextent[1]=rangex[outind[1]+1]
outextent[2]=rangey[outind[2]+1]
outextent[3]=rangey[outind[3]]
outdim[1]=np.shape(np.arange(outind[0],outind[1]+1))[0]
outdim[0]=np.shape(np.arange(outind[3],outind[2]+1))[0]
outdim=np.array(outdim,dtype='int32')
return outextent,outind,outdim
#==============================================================================
# THIS RETURNS A LOGICAL GRID THAT CAN THEN BE APPLIED TO THE GLOBAL GRID TO EXTRACT
# A USEER-DEFINED SUBGRID
# THIS HELPS TO KEEP ARRAY SIZES SMALL
#==============================================================================
def creategrids(rainprop):
globrangex=np.arange(0,rainprop.dimensions[1],1)
globrangey=np.arange(0,rainprop.dimensions[0],1)
subrangex=np.arange(rainprop.subind[0],rainprop.subind[1]+1,1)
subrangey=np.arange(rainprop.subind[3],rainprop.subind[2]+1,1)
subindx=np.logical_and(globrangex>=subrangex[0],globrangex<=subrangex[-1])
subindy=np.logical_and(globrangey>=subrangey[0],globrangey<=subrangey[-1])
gx,gy=np.meshgrid(subindx,subindy)
outgrid=np.logical_and(gx==True,gy==True)
return outgrid,subindx,subindy
#==============================================================================
# FUNCTION TO CREATE A MASK ACCORDING TO A USER-DEFINED POLYGON SHAPEFILE AND PROJECTION
# THIS USES GDAL COMMANDS FROM THE OS TO RASTERIZE
#==============================================================================
def rastermaskGDAL(shpname,shpproj,rainprop,masktype,fullpath,gdalpath=False):
bndbox=np.array(rainprop.subind)
bndcoords=np.array(rainprop.subextent)
if rainprop.projection==GEOG:
xdim=np.shape(np.linspace(bndcoords[0],bndcoords[1],rainprop.subind[1]-rainprop.subind[0]+1))[0]
ydim=np.shape(np.linspace(bndcoords[2],bndcoords[3],rainprop.subind[2]-rainprop.subind[3]+1))[0]
else:
sys.exit("unrecognized projection!")
rastertemplate=np.zeros((ydim,xdim),dtype='float32')
if masktype=='simple':
print('creating simple mask (0s and 1s)')
#os.system('gdal_rasterize -at -burn 1.0 -te '+str(rainprop.subextent[0])+' '+str(rainprop.subextent[2])+' '+str(rainprop.subextent[1])+' '+str(rainprop.subextent[3])+' -tr '+str(rainprop.spatialres[0])+' '+str(rainprop.spatialres[1])+' -ts '+str(np.int(rainprop.subdimensions[1]))+' '+str(np.int(rainprop.subdimensions[0]))+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff');
if gdalpath!=False:
rasterizecmd=gdalpath+'/gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0])+' '+"%.9f"%(rainprop.spatialres[1])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
else:
rasterizecmd='gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0])+' '+"%.9f"%(rainprop.spatialres[1])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
os.system(rasterizecmd)
ds=rasterio.open(fullpath+'/temp.tiff')
rastertemplate=ds.read(1)
os.system('rm '+fullpath+'/temp.tiff')
elif masktype=="fraction":
print('creating fractional mask (range from 0.0-1.0)')
#os.system('gdal_rasterize -at -burn 1.0 -te '+str(rainprop.subextent[0])+' '+str(rainprop.subextent[2])+' '+str(rainprop.subextent[1])+' '+str(rainprop.subextent[3])+' -tr '+str(rainprop.spatialres[0]/10.)+' '+str(rainprop.spatialres[1]/10.)+' -ts '+str(np.int(rainprop.subdimensions[1])*10)+' '+str(np.int(rainprop.subdimensions[0])*10)+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff');
#os.system('gdalwarp -r average -te '+str(rainprop.subextent[0])+' '+str(rainprop.subextent[2])+' '+str(rainprop.subextent[1])+' '+str(rainprop.subextent[3])+' -ts '+str(np.int(rainprop.subdimensions[1]))+' '+str(np.int(rainprop.subdimensions[0]))+' -overwrite '+fullpath+'/temp.tiff '+fullpath+'/tempAGG.tiff');
if gdalpath!=False:
rasterizecmd=gdalpath+'/gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0]/10.)+' '+"%.9f"%(rainprop.spatialres[1]/10.)+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1])*10)+' '+"%.9f"%(np.int(rainprop.subdimensions[0])*10)+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
else:
rasterizecmd='gdal_rasterize -at -burn 1.0 -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -tr '+"%.9f"%(rainprop.spatialres[0]/10.)+' '+"%.9f"%(rainprop.spatialres[1]/10.)+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1])*10)+' '+"%.9f"%(np.int(rainprop.subdimensions[0])*10)+' -ot Float32 '+shpname+' '+fullpath+'/temp.tiff'
os.system(rasterizecmd)
if gdalpath!=False:
warpcmd=gdalpath+'/gdalwarp -r average -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -overwrite '+fullpath+'/temp.tiff '+fullpath+'/tempAGG.tiff'
else:
warpcmd='gdalwarp -r average -te '+"%.9f"%(rainprop.subextent[0])+' '+"%.9f"%(rainprop.subextent[2])+' '+"%.9f"%(rainprop.subextent[1])+' '+"%.9f"%(rainprop.subextent[3])+' -ts '+"%.9f"%(np.int(rainprop.subdimensions[1]))+' '+"%.9f"%(np.int(rainprop.subdimensions[0]))+' -overwrite '+fullpath+'/temp.tiff '+fullpath+'/tempAGG.tiff'
os.system(warpcmd)
ds=rasterio.open(fullpath+'/tempAGG.tiff')
rastertemplate=ds.read(1)
os.system('rm '+fullpath+'/temp.tiff')
os.system('rm '+fullpath+'/tempAGG.tiff')
else:
sys.exit("You entered an incorrect mask type, options are 'simple' or 'fraction'")
rastertemplate=np.array(rastertemplate[:])
return rastertemplate
#==============================================================================
# WRITE SCENARIOS TO NETCDF ONE REALIZATION AT A TIME
#==============================================================================
def writerealization(rlz,nrealizations,writename,outrain,writemax,writestorm,writeperiod,writex,writey,writetimes,latrange,lonrange,whichorigstorm):
# SAVE outrain AS NETCDF FILE
dataset=Dataset(writename, 'w', format='NETCDF4')
# create dimensions
outlats=dataset.createDimension('outlat',len(latrange))
outlons=dataset.createDimension('outlon',len(lonrange))
time=dataset.createDimension('time',writetimes.shape[1])
nyears=dataset.createDimension('nyears',len(writeperiod))
# create variables
times=dataset.createVariable('time',np.float64, ('nyears','time'))
latitudes=dataset.createVariable('latitude',np.float32, ('outlat'))
longitudes=dataset.createVariable('longitude',np.float32, ('outlon'))
rainrate=dataset.createVariable('rainrate',np.float32,('nyears','time','outlat','outlon'),zlib=True,complevel=4,least_significant_digit=2)
basinrainfall=dataset.createVariable('basinrainfall',np.float32,('nyears'))
xlocation=dataset.createVariable('xlocation',np.int32,('nyears'))
ylocation=dataset.createVariable('ylocation',np.int32,('nyears'))
returnperiod=dataset.createVariable('returnperiod',np.float32,('nyears'))
stormnumber=dataset.createVariable('stormnumber',np.int32,('nyears'))
original_stormnumber=dataset.createVariable('original_stormnumber',np.int32,('nyears'))
#stormtimes=dataset.createVariable('stormtimes',np.float64,('nyears'))
# Global Attributes
dataset.description = 'SST Rainfall Scenarios Realization: '+str(rlz+1)+' of '+str(nrealizations)
dataset.history = 'Created ' + str(datetime.now())
dataset.source = 'Storm Catalog for (FILL IN THE BLANK)'
# Variable Attributes (time since 1970-01-01 00:00:00.0 in numpys)
latitudes.units = 'degrees north'
longitudes.units = 'degrees east'
rainrate.units = 'mm/h'
times.units = 'minutes since 1970-01-01 00:00.0'
times.calendar = 'gregorian'
#print dataset.description
#print dataset.history
# fill the netcdf file
latitudes[:]=latrange
longitudes[:]=lonrange
rainrate[:]=outrain
basinrainfall[:]=writemax
times[:]=writetimes
xlocation[:]=writex
ylocation[:]=writey
stormnumber[:]=writestorm
returnperiod[:]=writeperiod
original_stormnumber[:]=whichorigstorm
#stormtimes[:]=writetimes
dataset.close()
#==============================================================================
# WRITE The maximized storm
#==============================================================================
def writemaximized(writename,outrain,writemax,write_ts,writex,writey,writetimes,latrange,lonrange):
# SAVE outrain AS NETCDF FILE
dataset=Dataset(writename, 'w', format='NETCDF4')
# create dimensions
outlats=dataset.createDimension('outlat',len(latrange))
outlons=dataset.createDimension('outlon',len(lonrange))
time=dataset.createDimension('time',len(writetimes))
# create variables
times=dataset.createVariable('time',np.float64, ('time'))
latitudes=dataset.createVariable('latitude',np.float32, ('outlat'))
longitudes=dataset.createVariable('longitude',np.float32, ('outlon'))
rainrate=dataset.createVariable('rainrate',np.float32,('time','outlat','outlon'),zlib=True,complevel=4,least_significant_digit=2)
basinrainfall=dataset.createVariable('basinrainfall',np.float32)
xlocation=dataset.createVariable('xlocation',np.int32)
ylocation=dataset.createVariable('ylocation',np.int32)
#stormtimes=dataset.createVariable('stormtimes',np.float64,('nyears'))
# Global Attributes
dataset.description = 'SST Rainfall Maximum Storm'
dataset.history = 'Created ' + str(datetime.now())
dataset.source = 'Storm Catalog for (FILL IN THE BLANK)'
# Variable Attributes (time since 1970-01-01 00:00:00.0 in numpys)
latitudes.units = 'degrees north'
longitudes.units = 'degrees east'
rainrate.units = 'mm/h'
times.units = 'minutes since 1970-01-01 00:00.0'
times.calendar = 'gregorian'
#print dataset.description
#print dataset.history
# fill the netcdf file
latitudes[:]=latrange
longitudes[:]=lonrange
rainrate[:]=outrain
basinrainfall[:]=writemax
times[:]=writetimes
xlocation[:]=writex
ylocation[:]=writey
dataset.close()
#==============================================================================
# READ RAINFALL FILE FROM NETCDF
#==============================================================================
def readnetcdf(rfile,inbounds=False):
infile=Dataset(rfile,'r')
if np.any(inbounds!=False):
outrain=np.array(infile.variables['rainrate'][:,inbounds[3]:inbounds[2]+1,inbounds[0]:inbounds[1]+1])
outlatitude=np.array(infile.variables['latitude'][inbounds[3]:inbounds[2]+1])
outlongitude=np.array(infile.variables['longitude'][inbounds[0]:inbounds[1]+1])
else:
outrain=np.array(infile.variables['rainrate'][:])
outlatitude=np.array(infile.variables['latitude'][:])
outlongitude= | np.array(infile.variables['longitude'][:]) | numpy.array |
import glob
import os,sys
import cv2
import numpy as np
import math
# from . import openexr_io
from numpy import unique
# from scipy.stats import entropy as scipy_entropy
import matplotlib.pyplot as plt
class io():
def __init__(self, data_root):
self.data_root = data_root
self.img_channels = 3
self.datatype = 'Cycles'
self.dirname = ['CyclesPS_Dichromatic', 'CyclesPS_Metallic', 'CyclesPS_Dichromatic', 'CyclesPS_Metallic']
self.suffix= ['direct.tif', 'direct.tif','indirect.tif', 'indirect.tif']
if len(self.dirname) != len(self.suffix):
raise Exception("dirname and suffix have different length")
self.ext = '.obj'
self.objlists = []
for i in range(len(self.dirname)):
data_path = f'{data_root}/{self.dirname[i]}'
objlist = []
[objlist.append(p) for p in glob.glob(data_path + '/*%s' % self.ext, recursive=True) if os.path.isdir(p)]
objlist = sorted(objlist)
self.objlists.append(objlist)
def get_num_object(self):
return len(self.objlists[0])
def get_num_set(self):
return len(self.objlists)
def load(self, objid, objset, sizeImgBuffer=None, scale=1.0):
objlist = self.objlists[objset]
objname = objlist[objid].split('/')[-1]
imglist = []
[imglist.append(p) for p in glob.glob(objlist[objid] + '/*_%s' % self.suffix[objset], recursive=True) if os.path.isfile(p)]
imglist = sorted(imglist)
if len(imglist) == 0:
return False
if os.name == 'posix':
temp = imglist[0].split("/")
if os.name == 'nt':
temp = imglist[0].split("\\")
img_dir = "/".join(temp[:-1])
print(f'Loading {objname} / {self.dirname[objset]}, {self.suffix[objset]} (Cycles)')
if sizeImgBuffer is not None:
indexset = np.random.permutation(len(imglist))[:sizeImgBuffer]
else:
indexset = range(len(imglist))
I = []
for i, indexofimage in enumerate(indexset):
img_path = imglist[indexofimage]
img = cv2.resize(cv2.cvtColor(cv2.imread(img_path, flags = cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH), cv2.COLOR_BGR2RGB), dsize=None, fx=scale, fy=scale,interpolation=cv2.INTER_NEAREST)
if img.dtype == 'uint8':
bit_depth = 255.0
if img.dtype == 'uint16':
bit_depth = 65535.0
img = np.float32(img) / bit_depth
h = img.shape[0]
w=h
I.append(img)
nml_path = img_dir + '/gt_normal.tif'
if os.path.isfile(nml_path) and i == 0:
N = np.float32(cv2.resize(cv2.cvtColor(cv2.imread(nml_path, flags = cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH), cv2.COLOR_BGR2RGB), dsize=None, fx=scale, fy=scale,interpolation=cv2.INTER_NEAREST))/65535.0
N = 2 * N - 1
mask = np.abs(1 - np.sqrt(np.sum(N*N, axis=2))) < 1.0e-2
N /= np.sqrt(np.sum(N*N, axis=2).reshape(N.shape[0], N.shape[1], 1))
N = N * mask.reshape(N.shape[0], N.shape[1], 1)
N = np.reshape(N, (h * w, 3))
I = np.array(I)
L = | np.loadtxt(img_dir + '/light.txt', np.float32) | numpy.loadtxt |
import numpy as np
import tensorflow as tf
from typing import List, Tuple
class InverseTimeDecay(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, initial_learning_rate, decay_steps, decay_rate):
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.decay_rate = decay_rate
self.step_recorder = 0
self.lr = 0
def __call__(self, step):
self.step_recorder = step
self.lr = self.initial_learning_rate / (
1 + self.decay_rate * step / self.decay_steps
)
return self.lr
def get_batch_tensor(X: np.ndarray, idx: np.ndarray):
"""
Convert numpy tensor of shape (N, k) to tensor of shape (N_batch, k) and dtype tf.float32,
where N = len(X), N_batch = len(idx), and k is the number of input features
"""
assert len(X.shape) == 2, "Input array is not a PINN input"
return tf.convert_to_tensor(X[idx], dtype=tf.float32)
def boundary_condition(
outer_bc_geometry: List[float],
inner_bc_geometry: List[float],
bc_num: List[int],
T_end: float,
):
"""
Generate BC points for outer and inner boundaries
"""
x_l, x_r, y_d, y_u = outer_bc_geometry
xc_l, xc_r, yc_d, yc_u = inner_bc_geometry
N_x, N_y, N_t, N_bc = bc_num
N_bc = N_bc // 4 + 1
# generate bc for outer boundary
left_points = np.stack((np.ones(N_y) * x_l, np.linspace(y_d, y_u, N_y)), 1)
right_points = np.stack((np.ones(N_y) * x_r, np.linspace(y_d, y_u, N_y)), 1)
t_lr = np.repeat(np.linspace(0, T_end, N_t), N_y).reshape(-1, 1)
X_left = np.hstack((t_lr, np.vstack([left_points for _ in range(N_t)])))
X_right = np.hstack((t_lr, np.vstack([right_points for _ in range(N_t)])))
X_lr = np.concatenate((X_left, X_right), 1)
lr_idx = np.random.choice(len(X_lr), size=N_bc, replace=False)
X_lr = X_lr[lr_idx]
down_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_d), 1)
up_points = np.stack((np.linspace(x_l, x_r, N_x), np.ones(N_x) * y_u), 1)
t_du = np.repeat(np.linspace(0, T_end, N_t), N_x).reshape(-1, 1)
X_down = np.hstack((t_du, np.vstack([down_points for _ in range(N_t)])))
X_up = np.hstack((t_du, np.vstack([up_points for _ in range(N_t)])))
X_du = | np.concatenate((X_down, X_up), 1) | numpy.concatenate |
import itertools
import math
import traceback
from copy import copy
from typing import Callable, List, Tuple, Union
import numpy
from joblib import Parallel, delayed
from numpy.linalg import LinAlgError, norm
from scipy.interpolate import RBFInterpolator
from scipy.optimize import minimize
from aydin.util.log.log import lprint, lsection
class Optimizer:
def __init__(self):
pass
def optimize(
self,
function: Callable,
bounds: List[Union[Tuple[int, ...], Tuple[float, ...]]],
init_strategies: str = 'corners+centers+random',
exploration_rate: float = 0.4,
patience: int = 64,
max_num_evaluations: int = 128,
num_interpolated_evaluations: int = 128,
workers: int = -1,
):
"""
Optimizes (maximizes) a given function by alternating between optimisation
of a proxy function obtrained through interpolation, and exploration of the
least sampled regions of the optimisation domain.
Parameters
----------
function: Callable
Function to optimize.
bounds: List[Union[Tuple[int, ...], Tuple[float, ...]]]
Bounds for function parameters
init_strategies: str
Initialisation strategies. Can contain: 'corners', 'centers', and 'random'
exploration_rate: float
Rate at which to explore
max_num_evaluations: int
Maximum number of evaluations of the /a priori/ costly given function.
num_interpolated_evaluations: int
Max number of evaluations of the inyterpolated function.
workers: int
Number of workers, if -1 the maximum is used.
Returns
-------
optimal_point, optimal_value
optimal_point: Optimal value for parameters
optimal_value: Corresponding function value
"""
# First we figure out the dimensionality of the problem:
n = len(bounds)
# Save Function:
self.function = function
# Save bounds:
self.bounds = bounds
# Second, we allocate the array that stores the evaluations:
self.x = []
self.y = []
# We keep track here of the best evaluation:
self.best_point = None
self.best_value = -math.inf
# First we initialise with some points on the corners:
if 'corners' in init_strategies:
with lsection("Evaluating function at corners"):
if 'centers' in init_strategies:
init_grid = tuple((u, 0.5 * (u + v), v) for u, v in bounds)
else:
init_grid = copy(bounds)
point_list = list(itertools.product(*init_grid))
self._add_points(point_list, workers=workers, display_points=True)
# First we initialise with some random points:
if 'random' in init_strategies:
with lsection("Evaluating function at random points"):
point_list = list(self._random_sample() for _ in range(min(4, 2 * n)))
self._add_points(point_list, workers=workers)
# Foir how long did we not see an improvement?
self.since_last_best = 0
# This is the main loop that evaluates the function:
with lsection(
f"Optimizing function with at most {max_num_evaluations} function evaluations within: {bounds}"
):
for i in range(max_num_evaluations):
# lprint(f"Evaluation #{i}")
# lprint(f"x={x}")
# Given the existing points, we can build the interpolating function:
try:
self.interpolator = RBFInterpolator(
y=numpy.stack(self.x),
d=numpy.stack(self.y),
neighbors=8 if len(self.x) < 8 else 4 * n,
smoothing=abs(numpy.random.normal(0, 1)) ** 0.5,
)
# From time to time we just pick points far from all other points:
do_explore = numpy.random.random() < exploration_rate
# using the interpolator we can quickly search for the best value:
new_point = self._delegated_optimizer(
do_explore=do_explore,
num_evaluations=num_interpolated_evaluations,
)
# We add that point to the list of points:
has_new_best = self._add_points([new_point])
lprint(
f"{i}{'!' if has_new_best else' '}: {' Exploring' if do_explore else 'Optimizing'}, Best point: {self.best_point}, best value: {self.best_value}, new point: {new_point})"
)
# Are we running out of patience?
if self.since_last_best > patience:
# If yes we stop searching:
lprint(
f"Run out of patience: {self.since_last_best} > {patience} !"
)
break
except LinAlgError:
lprint("Error while optimizing, let's stop training now!")
lprint(f"x={self.x}")
traceback.print_exc()
break
lprint(f"Best point: {self.best_point}, best value: {self.best_value}")
return self.best_point, self.best_value
def _add_points(self, point_list: List, workers=-1, display_points=False):
# Normalise points:
point_list = list(
numpy.array(point, dtype=numpy.float32) for point in point_list
)
def _function(*_point):
_value = self.function(*_point)
if display_points:
lprint(f"New point: {_point} -> {_value}")
return _value
# Evaluate function in parallel:
values = Parallel(n_jobs=workers, backend='threading')(
delayed(_function)(*point) for point in point_list
)
# to return:
has_new_best = False
# Going through the list of points:
for new_value, new_point in zip(values, point_list):
# Replace NaNs or other weird floats with something better:
new_value = numpy.nan_to_num(new_value, neginf=-1e6, posinf=-1e6, nan=-1e6)
# And add this new point to the list:
self.x.append(new_point)
self.y.append(new_value)
# We keep track of the last best evaluation:
if new_value > self.best_value:
has_new_best = True
self.since_last_best = 0
self.best_value = new_value
self.best_point = new_point
else:
self.since_last_best += 1
return has_new_best
def _delegated_optimizer(
self, do_explore: bool, num_evaluations: int = 128, workers: int = -1
):
# If we ran out of evaluations (recursive call!), then let's return immediately with None:
if num_evaluations <= 0:
return None
# First we figure out the dimensionality of the problem:
n = len(self.bounds)
# This is the function to optimize:
def function(point):
value = 0
fallback_exploration = False
if not do_explore:
# We compute interpolated value:
try:
# interpolation value:
value += self.interpolator(point.reshape(n, -1).T)
except Exception as e:
lprint(f"Exception: {e}")
# If there is an issue with interpolation, we fallback on exploration:
fallback_exploration = True
if do_explore or fallback_exploration:
# point coordinates translated for usage with the kd-tree:
point_for_tree = | numpy.array(point) | numpy.array |
"""
Code to process a dataframe into X_test for ML deployment
"""
# import packages
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split, KFold
from nltk.stem.snowball import SnowballStemmer
from scipy.stats import randint
from io import StringIO
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import chi2
# from IPython.display import display
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
from sklearn.metrics import confusion_matrix
from sklearn import metrics
from sklearn.feature_selection import chi2
from nltk.corpus import stopwords
#
# if nltk does not work, then run the 3 lines below:
### import nltk
### nltk.download("punkt")
### nltk.download("stopwords")
def predict_faculty(df_in, model, tfidf):
"""
:param df: conditions exist
:return:
"""
X = get_X_and_y(df_in)
features_tfidf = tfidf.transform(X['text_info']).toarray() # don't push all feats in, just the text stack
features = pd.concat([pd.DataFrame(features_tfidf), X.reset_index().drop(columns={'text_info',
'text_info_read',
'text_info_export',
'index'})], axis=1)
y_pred = model.predict(features)
return y_pred
# Here are copies of common functions for the sole purpose of easing imports for cross-platform use
#
# let's prepare the preprocessing
def remove_punctuation(text):
'''a function for removing punctuation'''
import string
# replacing the punctuations with no space,
# which in effect deletes the punctuation marks
translator = str.maketrans('', '', string.punctuation)
# return the text stripped of punctuation marks
return text.translate(translator)
# print(remove_punctuation("""123, hi, ./;[][90-] \][0-*( )] hi man how are you""" )) # powerful and fast
def remove_numbers(text):
import string
translator = str.maketrans('', '', '0123456789')
return text.translate(translator)
# print(remove_numbers('8u1981723 asdh 288 hi hi 2 hi ')) # nice
def remove_stopwords_and_lower(text):
'''a function for removing the stopword'''
# extracting the stopwords from nltk library
sw = stopwords.words('english')
# displaying the stopwords
np.array(sw)
# we know more stuff no one needs at all like 'department' but let's keep them for now
# removing the stop words and lowercasing the selected words
text = [word.lower() for word in text.split() if word.lower() not in sw]
# joining the list of words with space separator
return " ".join(text)
def comma_space_fix(text):
return (text
.replace(": ", ":")
.replace(":", ": ")
.replace("! ", "!")
.replace("!", "! ")
.replace("? ", "?")
.replace("?", "? ")
.replace(", ", ",")
.replace(",", ", ")
.replace(". ", ".")
.replace(".", ". ")
.replace("; ", ";")
.replace(";", "; ")) # this makes both ",x" and ", x" ", x"
# hey! notice that you are cutting off the org info after the first comma but lumping it all together now
# for multi-affil, this may not be what you want as it loses ordering
# however it is OK for now
def remove_common_words_and_lower(text):
# we need to remove vu/dept/amsterdam because it messes up the bigrams
remove_words_org = ['vu', 'amsterdam', 'vrije', 'universiteit', 'free', 'university', 'department', 'of', 'the',
'in',
'and', 'a', '@', 'center', 'centre', 'instituut', 'institute', '&', 'for', '(', ')', 'insitute',
'research']
#
# removing institute is perhaps not the best option, try stuff out : )
# removing the stop words and lowercasing the selected words
text = [word.lower() for word in text.split() if word.lower() not in remove_words_org]
# joining the list of words with space separator
return " ".join(text)
# fill empty nans with empty strings,
# this difference avoids errors in type assertions
def fill_empty(row):
if pd.notnull(row):
return row
else:
return ''
# define encoding/enumeration
def encode_fac(row):
if row == 'Faculty of Science':
id = 0
elif row == 'Faculty of Behavioural and Movement Sciences':
id = 1
elif row == 'medical center':
id = 2
elif row == 'Faculty of Social Sciences':
id = 3
elif row == 'School of Business and Economics':
id = 4
elif row == 'Faculty of Law':
id = 5
elif row == 'Faculty of Humanities':
id = 6
elif row == 'Faculty of Religion and Theology':
id = 7
elif row == 'ACTA':
id = 8
else: # rest
id = 9
return id
def get_X_and_y(df):
def add_space(row):
return row + ' '
df['text_info_1'] = (df
.first_VU_author_raw_organization_info
.apply(fill_empty)
.apply(comma_space_fix)
.apply(remove_punctuation)
.apply(remove_numbers)
.apply(remove_stopwords_and_lower)
.apply(remove_common_words_and_lower)
.apply(add_space))
df['text_info_2'] = (df
.title
.apply(fill_empty)
.apply(comma_space_fix)
.apply(remove_punctuation)
.apply(remove_numbers)
.apply(remove_stopwords_and_lower)
.apply(remove_common_words_and_lower)
.apply(add_space))
df['text_info_3'] = (df
.journal_name
.apply(fill_empty)
.apply(comma_space_fix)
.apply(remove_punctuation)
.apply(remove_numbers)
.apply(remove_stopwords_and_lower)
.apply(remove_common_words_and_lower)
.apply(add_space))
df['text_info_4'] = (df
.abstract_text_clean
.apply(fill_empty)
.apply(comma_space_fix)
.apply(remove_punctuation)
.apply(remove_numbers)
.apply(remove_stopwords_and_lower)
.apply(remove_common_words_and_lower)
.apply(add_space))
# define the features matrix
# notice that for this setting we do not add extra cols
# for example, we could add #authors as a column
# and let the machine learning decide how/if to use that
abstract_down_weight = 3 # hinges on space_fix
#
df['text_info'] = (3 * df['text_info_1']
+ ' '
+ 3 * df['text_info_2'] # title
+ ' '
+ 3 * df['text_info_3'] # journal_name
+ ' '
+ df['text_info_4']) # abstract
df['text_info_read'] = (df['text_info_1']
+ ' || '
+ df['text_info_2']
+ ' || '
+ df['text_info_3']
+ ' || '
+ df['text_info_4']
)
df['text_info_export'] = (
' #ORGVU1 ' +
df['text_info_1']
+ ' #TITLE '
+ df['text_info_2']
+ ' #JNAME '
+ df['text_info_3']
+ ' #ABS '
+ df['text_info_4']
)
for id in | np.arange(0, 10) | numpy.arange |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pixell` package."""
import unittest
from pixell import sharp
from pixell import enmap
from pixell import curvedsky
from pixell import lensing
from pixell import interpol
from pixell import array_ops
from pixell import enplot
from pixell import powspec
from pixell import reproject
from pixell import pointsrcs
from pixell import wcsutils
from pixell import utils as u
from pixell import colors
from pixell import fft
from pixell import tilemap
from pixell import utils
import numpy as np
import pickle
import os,sys
try: # when invoked directly...
import pixel_tests as ptests
except ImportError: # when imported through py.test
from . import pixel_tests as ptests
TEST_DIR = ptests.TEST_DIR
DATA_PREFIX = ptests.DATA_PREFIX
lens_version = '091819'
def get_offset_result(res=1.,dtype=np.float64,seed=1):
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(res))
shape = (3,) + shape
obs_pos = enmap.posmap(shape, wcs)
np.random.seed(seed)
grad = enmap.enmap(np.random.random(shape),wcs)*1e-3
raw_pos = enmap.samewcs(lensing.offset_by_grad(obs_pos, grad, pol=shape[-3]>1, geodesic=True), obs_pos)
return obs_pos,grad,raw_pos
def get_lens_result(res=1.,lmax=400,dtype=np.float64,seed=1):
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(res))
shape = (3,) + shape
# ells = np.arange(lmax)
ps_cmb,ps_lens = powspec.read_camb_scalar(DATA_PREFIX+"test_scalCls.dat")
ps_lensinput = np.zeros((4,4,ps_cmb.shape[-1]))
ps_lensinput[0,0] = ps_lens
ps_lensinput[1:,1:] = ps_cmb
lensed = lensing.rand_map(shape, wcs, ps_lensinput, lmax=lmax, maplmax=None, dtype=dtype, seed=seed, phi_seed=None, oversample=2.0, spin=[0,2], output="lu", geodesic=True, verbose=False, delta_theta=None)
return lensed
class PixelTests(unittest.TestCase):
def test_almxfl(self):
import healpy as hp
for lmax in [100,400,500,1000]:
ainfo = sharp.alm_info(lmax)
alms = hp.synalm(np.ones(lmax+1),lmax = lmax, new=True)
filtering = np.ones(lmax+1)
alms0 = ainfo.lmul(alms.copy(),filtering)
assert np.all(np.isclose(alms0,alms))
for lmax in [100,400,500,1000]:
ainfo = sharp.alm_info(lmax)
alms = hp.synalm(np.ones(lmax+1),lmax = lmax, new=True)
alms0 = curvedsky.almxfl(alms.copy(),lambda x: np.ones(x.shape))
assert np.all(np.isclose(alms0,alms))
def test_rand_alm(self):
def nalm(lmax):
return (lmax + 1) * (lmax + 2) / 2
lmaxes = [50, 100, 150, 300]
mypower = np.ones(50)
for lmax in lmaxes:
palm = curvedsky.rand_alm(mypower, lmax = lmax)
halm = curvedsky.rand_alm_healpy( mypower, lmax = lmax)
print("nalm(%i) = %i, curvedsky.rand_alm gives %s, curvedsky.rand_alm_healpy gives %s "\
% (lmax, \
nalm(lmax),\
palm.shape, \
halm.shape) )
assert np.all(np.isclose(np.asarray(palm.shape),np.asarray(halm.shape)))
def test_offset(self):
obs_pos,grad,raw_pos = get_offset_result(1.)
obs_pos0 = enmap.read_map(DATA_PREFIX+"MM_offset_obs_pos_%s.fits" % lens_version)
grad0 = enmap.read_map(DATA_PREFIX+"MM_offset_grad_%s.fits" % lens_version)
raw_pos0 = enmap.read_map(DATA_PREFIX+"MM_offset_raw_pos_%s.fits" % lens_version)
assert np.all(np.isclose(obs_pos,obs_pos0))
assert np.all(np.isclose(raw_pos,raw_pos0))
assert np.all(np.isclose(grad,grad0))
assert wcsutils.equal(grad.wcs,grad0.wcs)
assert wcsutils.equal(obs_pos.wcs,obs_pos0.wcs)
assert wcsutils.equal(raw_pos.wcs,raw_pos0.wcs)
def test_lensing(self):
lensed,unlensed = get_lens_result(1.,400,np.float64)
lensed0 = enmap.read_map(DATA_PREFIX+"MM_lensed_%s.fits" % lens_version)
unlensed0 = enmap.read_map(DATA_PREFIX+"MM_unlensed_%s.fits" % lens_version)
y,x = lensed0.posmap()
assert np.all(np.isclose(lensed,lensed0))
assert np.all(np.isclose(unlensed,unlensed0))
assert wcsutils.equal(lensed.wcs,lensed0.wcs)
assert wcsutils.equal(unlensed.wcs,unlensed0.wcs)
assert wcsutils.equal(unlensed.wcs,lensed.wcs)
def test_enplot(self):
print("Testing enplot...")
shape,wcs = enmap.geometry(pos=(0,0),shape=(3,100,100),res=0.01)
a = enmap.ones(shape,wcs)
p = enplot.get_plots(a)
def test_fft(self):
# Tests that ifft(ifft(imap))==imap, i.e. default normalizations are consistent
shape,wcs = enmap.geometry(pos=(0,0),shape=(3,100,100),res=0.01)
imap = enmap.enmap(np.random.random(shape),wcs)
assert np.all(np.isclose(imap,enmap.ifft(enmap.fft(imap,normalize='phy'),normalize='phy').real))
assert np.all(np.isclose(imap,enmap.ifft(enmap.fft(imap)).real))
def test_fft_input_shape(self):
# Tests fft for various shapes and choices of axes.
# 1D FFT over last axis for 3d array.
signal = np.ones((1, 2, 5))
signal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5), dtype=np.complex128)
out_exp[0,0,0] = 5
out_exp[0,1,0] = 50
out = fft.fft(signal)
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 1D FFT over middle axis for 3d array.
signal = np.ones((1, 5, 2))
signal[0,:,1] = 10.
out_exp = np.zeros((1, 5, 2), dtype=np.complex128)
out_exp[0,0,0] = 5
out_exp[0,0,1] = 50
out = fft.fft(signal, axes=[-2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D FFT over last 2 axes of 4d array.
signal = np.ones((1, 2, 5, 10))
signal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5, 10), dtype=np.complex128)
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.fft(signal, axes=[-2, -1])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D FFT over last 2 axes of 4d non-contiguous array.
signal = np.ones((1, 2, 5, 10), dtype=np.complex128)
signal[0,1,:] = 10
ft = np.zeros((5, 10, 1, 2), dtype=np.complex128).transpose(2, 3, 0, 1)
out_exp = np.zeros_like(ft)
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.fft(signal, ft=ft, axes=[-2, -1])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(np.shares_memory(ft, out))
self.assertFalse(out.flags['C_CONTIGUOUS'])
# 2D FFT over middle 2 axes of 4d array.
signal = np.ones((1, 5, 10, 2))
signal[0,:,:,1] = 10.
out_exp = np.zeros((1, 5, 10, 2), dtype=np.complex128)
out_exp[0,0,0,0] = 50
out_exp[0,0,0,1] = 500
out = fft.fft(signal, axes=[-3, -2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
def test_ifft_input_shape(self):
# Tests ifft for various shapes and choices of axes.
# 1D IFFT over last axis for 3d array.
fsignal = np.ones((1, 2, 5), dtype=np.complex128)
fsignal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5))
out_exp[0,0,0] = 5
out_exp[0,1,0] = 50
out = fft.ifft(fsignal)
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 1D IFFT over middle axis for 3d array.
fsignal = np.ones((1, 5, 2), dtype=np.complex128)
fsignal[0,:,1] = 10.
out_exp = np.zeros((1, 5, 2))
out_exp[0,0,0] = 5
out_exp[0,0,1] = 50
out = fft.ifft(fsignal, axes=[-2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D IFFT over last 2 axes of 4d array.
fsignal = np.ones((1, 2, 5, 10), dtype=np.complex128)
fsignal[0,1,:] = 10.
out_exp = np.zeros((1, 2, 5, 10))
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.ifft(fsignal, axes=[-2, -1])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
# 2D IFFT over last 2 axes of 4d non-contiguous array.
fsignal = np.ones((1, 2, 5, 10), dtype=np.complex128)
fsignal[0,1,:] = 10.
tod = np.zeros((5, 10, 1, 2), dtype=np.complex128).transpose(2, 3, 0, 1)
out_exp = np.zeros_like(tod)
out_exp[0,0,0,0] = 50
out_exp[0,1,0,0] = 500
out = fft.ifft(fsignal, tod=tod, axes=[-2, -1])
self.assertTrue(np.shares_memory(tod, out))
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertFalse(out.flags['C_CONTIGUOUS'])
# 2D IFFT over middle 2 axes of 4d array.
fsignal = np.ones((1, 5, 10, 2), dtype=np.complex128)
fsignal[0,:,:,1] = 10.
out_exp = np.zeros((1, 5, 10, 2))
out_exp[0,0,0,0] = 50
out_exp[0,0,0,1] = 500
out = fft.ifft(fsignal, axes=[-3, -2])
np.testing.assert_allclose(out, out_exp, atol=1e-12)
self.assertTrue(out.flags['C_CONTIGUOUS'])
def test_extract(self):
# Tests that extraction is sensible
shape,wcs = enmap.geometry(pos=(0,0),shape=(500,500),res=0.01)
imap = enmap.enmap(np.random.random(shape),wcs)
smap = imap[200:300,200:300]
sshape,swcs = smap.shape,smap.wcs
smap2 = enmap.extract(imap,sshape,swcs)
pixbox = enmap.pixbox_of(imap.wcs,sshape,swcs)
# Do write and read test
filename = "temporary_extract_map.fits" # NOT THREAD SAFE
enmap.write_map(filename,imap)
smap3 = enmap.read_map(filename,pixbox=pixbox)
os.remove(filename)
assert np.all(np.isclose(smap,smap2))
assert np.all(np.isclose(smap,smap3))
assert wcsutils.equal(smap.wcs,smap2.wcs)
assert wcsutils.equal(smap.wcs,smap3.wcs)
def test_fullsky_geometry(self):
# Tests whether number of pixels and area of a full-sky 0.5 arcminute resolution map are correct
print("Testing full sky geometry...")
test_res_arcmin = 0.5
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(test_res_arcmin/60.),proj='car')
assert shape[0]==21601 and shape[1]==43200
assert abs(enmap.area(shape,wcs) - 4*np.pi) < 1e-6
def test_pixels(self):
"""Runs reference pixel and mean-square comparisons on extracts from randomly generated
maps"""
print("Testing reference pixels...")
results,rname = ptests.get_extraction_test_results(TEST_DIR+"/tests.yml")
cresults = pickle.load(open(DATA_PREFIX+"%s.pkl" % rname,'rb'))
assert sorted(results.keys())==sorted(cresults.keys())
for g in results.keys():
assert sorted(results[g].keys())==sorted(cresults[g].keys())
for s in results[g].keys():
assert sorted(results[g][s].keys())==sorted(cresults[g][s].keys())
for e in results[g][s].keys():
assert np.all(np.isclose(results[g][s][e],cresults[g][s][e]))
def test_sim_slice(self):
ps = powspec.read_spectrum(DATA_PREFIX+"test_scalCls.dat")[:1,:1]
test_res_arcmin = 10.0
lmax = 2000
fact = 2.
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(test_res_arcmin/60.),proj='car')
omap = curvedsky.rand_map(shape, wcs, ps,lmax=lmax)
ofunc = lambda ishape,iwcs: fact*enmap.extract(omap,ishape,iwcs)
nmap = reproject.populate(shape,wcs,ofunc,maxpixy = 400,maxpixx = 400)
assert np.all(np.isclose(nmap/omap,2.))
def test_b_sign(self):
"""
We generate a random IQU map with geometry such that cdelt[0]<0
We transform this to TEB with map2harm and map2alm followed by
scalar harm2map and alm2map and use these as reference T,E,B maps.
We flip the original map along the RA direction.
We transform this to TEB with map2harm and map2alm followed by
scalar harm2map and alm2map and use these as comparison T,E,B maps.
We compare these maps.
"""
ells,cltt,clee,clbb,clte = np.loadtxt(DATA_PREFIX+"cosmo2017_10K_acc3_lensedCls.dat",unpack=True)
ps_cmb = np.zeros((3,3,ells.size))
ps_cmb[0,0] = cltt
ps_cmb[1,1] = clee
ps_cmb[2,2] = clbb
ps_cmb[1,0] = clte
ps_cmb[0,1] = clte
np.random.seed(100)
# Curved-sky is fine
lmax = 1000
alm = curvedsky.rand_alm_healpy(ps_cmb,lmax=lmax)
shape,iwcs = enmap.fullsky_geometry(res=np.deg2rad(10./60.))
wcs = enmap.empty(shape,iwcs)[...,::-1].wcs
shape = (3,) + shape
imap = curvedsky.alm2map(alm,enmap.empty(shape,wcs))
oalm = curvedsky.map2alm(imap.copy(),lmax=lmax)
rmap = curvedsky.alm2map(oalm,enmap.empty(shape,wcs),spin=0)
imap2 = imap.copy()[...,::-1]
oalm = curvedsky.map2alm(imap2.copy(),lmax=lmax)
rmap2 = curvedsky.alm2map(oalm,enmap.empty(shape,wcs),spin=0)
assert np.all(np.isclose(rmap[0],rmap2[0]))
assert np.all(np.isclose(rmap[1],rmap2[1]))
assert np.all(np.isclose(rmap[2],rmap2[2]))
# Flat-sky
px = 2.0
N = 300
shape,iwcs = enmap.geometry(pos=(0,0),res=np.deg2rad(px/60.),shape=(300,300))
shape = (3,) + shape
a = enmap.zeros(shape,iwcs)
a = a[...,::-1]
wcs = a.wcs
seed = 100
imap = enmap.rand_map(shape,wcs,ps_cmb,seed=seed)
kmap = enmap.map2harm(imap.copy())
rmap = enmap.harm2map(kmap,spin=0) # reference map
imap = imap[...,::-1]
kmap = enmap.map2harm(imap.copy())
rmap2 = enmap.harm2map(kmap,spin=0)[...,::-1] # comparison map
assert np.all(np.isclose(rmap[0],rmap2[0]))
assert np.all(np.isclose(rmap[1],rmap2[1],atol=1e0))
assert np.all(np.isclose(rmap[2],rmap2[2],atol=1e0))
def test_plain_wcs(self):
# Test area and box for a small Cartesian geometry
shape,wcs = enmap.geometry(res=np.deg2rad(1./60.),shape=(600,600),pos=(0,0),proj='plain')
box = np.rad2deg(enmap.box(shape,wcs))
area = np.rad2deg(np.rad2deg(enmap.area(shape,wcs)))
assert np.all(np.isclose(box,np.array([[-5,-5],[5,5]])))
assert np.isclose(area,100.)
# and for an artifical Cartesian geometry with area>4pi
shape,wcs = enmap.geometry(res=np.deg2rad(10),shape=(100,100),pos=(0,0),proj='plain')
box = np.rad2deg(enmap.box(shape,wcs))
area = np.rad2deg(np.rad2deg(enmap.area(shape,wcs)))
assert np.all(np.isclose(box,np.array([[-500,-500],[500,500]])))
assert np.isclose(area,1000000)
def test_pospix(self):
# Posmap separable and non-separable on CAR
for res in [6,12,24]:
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(res/60.),proj='car')
posmap1 = enmap.posmap(shape,wcs)
posmap2 = enmap.posmap(shape,wcs,separable=True)
assert np.all(np.isclose(posmap1,posmap2))
# Pixmap plain
pres = 0.5
shape,wcs = enmap.geometry(pos=(0,0),shape=(30,30),res=pres*u.degree,proj='plain')
yp,xp = enmap.pixshapemap(shape,wcs)
assert np.all(np.isclose(yp,pres*u.degree))
assert np.all(np.isclose(xp,pres*u.degree))
yp,xp = enmap.pixshape(shape,wcs)
parea = enmap.pixsize(shape,wcs)
assert np.isclose(parea,(pres*u.degree)**2)
assert np.isclose(yp,pres*u.degree)
assert np.isclose(xp,pres*u.degree)
pmap = enmap.pixsizemap(shape,wcs)
assert np.all(np.isclose(pmap,(pres*u.degree)**2))
# Pixmap CAR
pres = 0.1
dec_cut = 89.5 # pixsizemap is not accurate near the poles currently
shape,wcs = enmap.band_geometry(dec_cut=dec_cut*u.degree,res=pres*u.degree,proj='car')
# Current slow and general but inaccurate near the poles implementation
pmap = enmap.pixsizemap(shape,wcs)
# Fast CAR-specific pixsizemap implementation
dra, ddec = wcs.wcs.cdelt*u.degree
dec = enmap.posmap([shape[-2],1],wcs)[0,:,0]
area = np.abs(dra*(np.sin(np.minimum(np.pi/2.,dec+ddec/2))-np.sin(np.maximum(-np.pi/2.,dec-ddec/2))))
Nx = shape[-1]
pmap2 = enmap.ndmap(area[...,None].repeat(Nx,axis=-1),wcs)
assert np.all(np.isclose(pmap,pmap2))
def test_project_nn(self):
shape,wcs = enmap.fullsky_geometry(res=np.deg2rad(12/60.),proj='car')
shape2,wcs2 = enmap.fullsky_geometry(res=np.deg2rad(6/60.),proj='car')
shape3,wcs3 = enmap.fullsky_geometry(res=np.deg2rad(24/60.),proj='car')
imap = enmap.ones(shape,wcs)
omap2 = enmap.project(imap,shape2,wcs2,order=0,mode='wrap')
omap3 = enmap.project(imap,shape3,wcs3,order=0,mode='wrap')
assert np.all(np.isclose(omap2,1))
assert np.all(np.isclose(omap3,1))
def test_wcsunequal(self):
shape1,wcs1 = enmap.geometry(pos=(0,0),shape=(100,100),res=1*u.arcmin,proj='car')
shape1,wcs2 = enmap.geometry(pos=(0,0),shape=(100,100),res=1*u.arcmin,proj='cea')
shape1,wcs3 = enmap.geometry(pos=(10,10),shape=(100,100),res=1*u.arcmin,proj='car')
shape1,wcs4 = enmap.geometry(pos=(0,0),shape=(100,100),res=2*u.arcmin,proj='car')
assert not(wcsutils.equal(wcs1,wcs2))
assert not(wcsutils.equal(wcs1,wcs3))
assert not(wcsutils.equal(wcs1,wcs4))
def test_scale(self):
# Test (with a plain geometry) that scale_geometry
# will result in geometries with the same bounding box
# but different area pixel
pres = 0.5
ufact = 2
dfact = 0.5
shape,wcs = enmap.geometry(pos=(0,0),shape=(30,30),res=pres*u.arcmin,proj='plain')
ushape,uwcs = enmap.scale_geometry(shape,wcs,ufact)
dshape,dwcs = enmap.scale_geometry(shape,wcs,dfact)
box = enmap.box(shape,wcs)
ubox = enmap.box(ushape,uwcs)
dbox = enmap.box(dshape,dwcs)
parea = enmap.pixsize(shape,wcs)
uparea = enmap.pixsize(ushape,uwcs)
dparea = enmap.pixsize(dshape,dwcs)
assert np.all(np.isclose(box,ubox))
assert np.all(np.isclose(box,dbox))
assert np.isclose(parea/(ufact**2),uparea)
assert np.isclose(parea/(dfact**2),dparea)
def test_prepare_alm_mmax(self):
# Check if mmax is correctly handled by prepare_alm.
# Create lmax=mmax=3 alm array and corresponding alm_info.
lmax = 3
nalm = 10 # Triangular alm array of lmax=3 has 10 elements.
alm_in = np.arange(nalm, dtype=np.complex128)
ainfo_in = sharp.alm_info(
lmax=3, mmax=3, nalm=nalm, stride=1, layout="triangular")
# Case 1: provide only alm.
alm_out, ainfo_out = curvedsky.prepare_alm(alm=alm_in, ainfo=None)
np.testing.assert_array_almost_equal(alm_out, alm_in)
self.assertIs(ainfo_out.lmax, ainfo_in.lmax)
self.assertIs(ainfo_out.mmax, ainfo_in.mmax)
self.assertIs(ainfo_out.nelem, ainfo_in.nelem)
# Case 2: provide only alm_info.
alm_out, ainfo_out = curvedsky.prepare_alm(alm=None, ainfo=ainfo_in)
# Expect zero array.
np.testing.assert_array_almost_equal(alm_out, alm_in * 0)
self.assertIs(ainfo_out.lmax, ainfo_in.lmax)
self.assertIs(ainfo_out.mmax, ainfo_in.mmax)
self.assertIs(ainfo_out.nelem, ainfo_in.nelem)
# Case 3: provide alm and alm_info
alm_out, ainfo_out = curvedsky.prepare_alm(alm=alm_in, ainfo=ainfo_in)
np.testing.assert_array_almost_equal(alm_out, alm_in)
self.assertIs(ainfo_out.lmax, ainfo_in.lmax)
self.assertIs(ainfo_out.mmax, ainfo_in.mmax)
self.assertIs(ainfo_out.nelem, ainfo_in.nelem)
# Case 4: provide only alm with lmax=3 and mmax=1.
# This should currently fail.
nalm = 7
alm_in = np.arange(7, dtype=np.complex128)
self.assertRaises(AssertionError, curvedsky.prepare_alm,
**dict(alm=alm_in, ainfo=None, lmax=lmax))
# Case 5: provide only alm_info with lmax=3 and mmax=1.
nalm = 7
ainfo_in = sharp.alm_info(
lmax=3, mmax=1, nalm=nalm, stride=1, layout="triangular")
alm_exp = np.zeros(7, dtype=np.complex128)
alm_out, ainfo_out = curvedsky.prepare_alm(alm=None, ainfo=ainfo_in)
np.testing.assert_array_almost_equal(alm_out, alm_exp)
self.assertIs(ainfo_out.lmax, ainfo_in.lmax)
self.assertIs(ainfo_out.mmax, ainfo_in.mmax)
self.assertIs(ainfo_out.nelem, ainfo_in.nelem)
# Case 6: provide both alm and alm_info with lmax=3 and mmax=1.
# This should be allowed.
nalm = 7
ainfo_in = sharp.alm_info(
lmax=3, mmax=1, nalm=nalm, stride=1, layout="triangular")
alm_in = np.arange(7, dtype=np.complex128)
alm_out, ainfo_out = curvedsky.prepare_alm(alm=alm_in, ainfo=ainfo_in)
| np.testing.assert_array_almost_equal(alm_out, alm_in) | numpy.testing.assert_array_almost_equal |
import pytry
import os
import random
import nengo
import nengo_extras
import numpy as np
import nengo_dl
import tensorflow as tf
import davis_tracking
class TrackingTrial(pytry.PlotTrial):
def params(self):
self.param('number of data sets to use', n_data=-1)
self.param('data directory', dataset_dir=r'../dataset')
self.param('dt', dt=0.1)
self.param('dt_test', dt_test=0.001)
self.param('decay time (input synapse)', decay_time=0.01)
self.param('test set (odd|one|train)', test_set='one')
self.param('augment training set with flips', augment=False)
self.param('miniback size', minibatch_size=200)
self.param('learning rate', learning_rate=1e-3)
self.param('number of epochs', n_epochs=5)
self.param('saturation', saturation=5)
self.param('separate positive and negative channels', separate_channels=True)
self.param('number of features in layer 1', n_features_1=10)
self.param('number of features in layer 2', n_features_2=10)
self.param('split spatial configuration', split_spatial=False)
self.param('spatial stride', spatial_stride=6)
self.param('spatial kernel size', spatial_size=12)
self.param('number of parallel ensembles', n_parallel=1)
self.param('merge pixels (to make a smaller image)', merge=5)
self.param('normalize inputs', normalize=False)
self.param('save parameters', save_params=True)
self.param('load parameters from a file', load_params_from='')
self.param('use nengo (instead of nengo_dl)', use_nengo=False)
self.param('input data (events|frames|both)', input_data='events')
def evaluate(self, p, plt):
files = []
sets = []
for f in os.listdir(p.dataset_dir):
if f.endswith('events'):
files.append(os.path.join(p.dataset_dir, f))
if p.test_set == 'one':
test_file = random.sample(files, 1)[0]
files.remove(test_file)
if p.n_data != -1:
files = random.sample(files, p.n_data)
if len(p.load_params_from) > 0:
params = np.load(p.load_params_from, allow_pickle=True)
else:
params = None
strip_edges = 3 # the number of edge pixels to remove due to convolution
inputs = []
targets = []
targets_raw = []
for f in files:
times, imgs, targs = davis_tracking.load_data(f, dt=p.dt, decay_time=p.decay_time,
separate_channels=p.separate_channels,
saturation=p.saturation, merge=p.merge)
if p.input_data in ['frames', 'both']:
times_frames, frames_raw = davis_tracking.load_frames(f.replace('.events', '.frame'), merge=p.merge)
frames = []
for t in times:
index = np.searchsorted(times_frames, t)
frames.append(frames_raw[index-1]*2-1)
if p.input_data == 'both':
imgs = np.hstack([imgs, frames])
else:
imgs = np.array(frames)
inputs.append(imgs)
targets_raw.append(targs[:, :2])
targets.append(davis_tracking.make_heatmap(targs, merge=p.merge, strip_edges=strip_edges).reshape(len(targs),-1))
inputs_all = np.vstack(inputs)
targets_all = np.vstack(targets)
targets_all_raw = np.vstack(targets_raw)
if p.test_set == 'odd':
inputs_train = inputs_all[::2]
inputs_test = inputs_all[1::2]
targets_train = targets_all[::2]
targets_test = targets_all[1::2]
targets_test_raw = targets_all_raw[1::2]
dt_test = p.dt*2
elif p.test_set == 'one':
times, imgs, targs = davis_tracking.load_data(test_file, dt=p.dt_test, decay_time=p.decay_time,
separate_channels=p.separate_channels,
saturation=p.saturation, merge=p.merge)
if p.input_data in ['frames', 'both']:
times_frames, frames_raw = davis_tracking.load_frames(test_file.replace('.events', '.frame'), merge=p.merge)
frames = []
for t in times:
index = np.searchsorted(times_frames, t)
frames.append(frames_raw[index-1]*2-1)
if p.input_data == 'both':
imgs = np.hstack([imgs, frames])
else:
imgs = np.array(frames)
inputs_test = imgs
targets_test_raw = targs[:, :2]
targets_test = davis_tracking.make_heatmap(targs, merge=p.merge, strip_edges=strip_edges).reshape(len(targs), -1)
inputs_train = inputs_all
targets_train = targets_all
dt_test = p.dt_test
elif p.test_set == 'train':
inputs_train = inputs_all
inputs_test = inputs_all
targets_train = targets_all
targets_test = targets_all
targets_test_raw = targets_all_raw
dt_test = p.dt
if p.input_data == 'events':
if p.separate_channels:
shape = (2, 180//p.merge, 240//p.merge)
else:
shape = (1, 180//p.merge, 240//p.merge)
elif p.input_data == 'frames':
shape = (1, 180//p.merge, 240//p.merge)
elif p.input_data == 'both':
if p.separate_channels:
shape = (3, 180//p.merge, 240//p.merge)
else:
shape = (2, 180//p.merge, 240//p.merge)
output_shape = shape[1]-strip_edges*2, shape[2]-strip_edges*2
dimensions = shape[0]*shape[1]*shape[2]
if p.normalize:
magnitude = np.linalg.norm(inputs_train.reshape(-1, dimensions), axis=1)
inputs_train = inputs_train*(1.0/magnitude[:,None,None])
magnitude = np.linalg.norm(inputs_test.reshape(-1, dimensions), axis=1)
inputs_test = inputs_test*(1.0/magnitude[:,None,None])
max_rate = 100
amp = 1 / max_rate
model = nengo.Network()
with model:
model.config[nengo.Ensemble].neuron_type = nengo.RectifiedLinear(amplitude=amp)
model.config[nengo.Ensemble].max_rates = nengo.dists.Choice([max_rate])
model.config[nengo.Ensemble].intercepts = nengo.dists.Choice([0])
model.config[nengo.Connection].synapse = None
inp = nengo.Node(
nengo.processes.PresentInput(inputs_test.reshape(-1, dimensions), dt_test),
size_out=dimensions,
)
out = nengo.Node(None, size_in=targets_train.shape[-1])
if not p.split_spatial:
# do a standard convnet
init = params[2]['transform'].init if params is not None else nengo.dists.Uniform(-1, 1)
conv1 = nengo.Convolution(p.n_features_1, shape, channels_last=False, strides=(1,1),
padding='valid',
kernel_size=(3,3),
init=init)
layer1 = nengo.Ensemble(conv1.output_shape.size, dimensions=1)
p_layer1 = nengo.Probe(layer1.neurons)
if params is not None:
layer1.gain = params[0]['gain']
layer1.bias = params[0]['bias']
nengo.Connection(inp, layer1.neurons, transform=conv1)
init = params[3]['transform'].init if params is not None else nengo.dists.Uniform(-1, 1)
conv2 = nengo.Convolution(p.n_features_2, conv1.output_shape, channels_last=False, strides=(1,1),
padding='valid',
kernel_size=(3,3),
init=init)
layer2 = nengo.Ensemble(conv2.output_shape.size, dimensions=1)
p_layer2 = nengo.Probe(layer2.neurons)
if params is not None:
layer2.gain = params[1]['gain']
layer2.bias = params[1]['bias']
nengo.Connection(layer1.neurons, layer2.neurons, transform=conv2)
init = params[4]['transform'].init if params is not None else nengo.dists.Uniform(-1, 1)
conv3 = nengo.Convolution(1, conv2.output_shape, channels_last=False, strides=(1,1),
padding='valid',
kernel_size=(3,3),
init=init)
nengo.Connection(layer2.neurons, out, transform=conv3)
else:
# do the weird spatially split convnet
convnet = davis_tracking.ConvNet(nengo.Network())
convnet.make_input_layer(
shape,
spatial_stride=(p.spatial_stride, p.spatial_stride),
spatial_size=(p.spatial_size,p.spatial_size))
nengo.Connection(inp, convnet.input)
init = params[2]['transform'].init if params is not None else nengo.dists.Uniform(-1, 1)
convnet.make_middle_layer(n_features=p.n_features_1, n_parallel=p.n_parallel, n_local=1,
kernel_stride=(1,1), kernel_size=(3,3), init=init)
init = params[3]['transform'].init if params is not None else nengo.dists.Uniform(-1, 1)
convnet.make_middle_layer(n_features=p.n_features_2, n_parallel=p.n_parallel, n_local=p.n_parallel,
kernel_stride=(1,1), kernel_size=(3,3), init=init)
init = params[4]['transform'].init if params is not None else nengo.dists.Uniform(-1, 1)
convnet.make_middle_layer(n_features=1, n_parallel=1, n_local=p.n_parallel,
kernel_stride=(1,1), kernel_size=(3,3), init=init, use_neurons=False)
convnet.make_merged_output(output_shape)
nengo.Connection(convnet.output, out)
if params is not None:
assert | np.allclose(params[0]['gain'], 100, atol=1e-5) | numpy.allclose |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import itertools
import inspect
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy import units as u
from astropy.units.quantity_helper.function_helpers import (
ARRAY_FUNCTION_ENABLED, SUBCLASS_SAFE_FUNCTIONS, UNSUPPORTED_FUNCTIONS,
FUNCTION_HELPERS, DISPATCHED_FUNCTIONS, IGNORED_FUNCTIONS)
from astropy.utils.compat import (
NUMPY_LT_1_14, NUMPY_LT_1_15, NUMPY_LT_1_16, NUMPY_LT_1_18)
NO_ARRAY_FUNCTION = not ARRAY_FUNCTION_ENABLED
# To get the functions that could be covered, we look for those that
# are wrapped. Of course, this does not give a full list pre-1.17.
all_wrapped_functions = {name: f for name, f in np.__dict__.items()
if callable(f) and hasattr(f, '__wrapped__') and
(NUMPY_LT_1_15 or f is not np.printoptions)}
all_wrapped = set(all_wrapped_functions.values())
class CoverageMeta(type):
"""Meta class that tracks which functions are covered by tests.
Assumes that a test is called 'test_<function_name>'.
"""
covered = set()
def __new__(mcls, name, bases, members):
for k, v in members.items():
if inspect.isfunction(v) and k.startswith('test'):
f = k.replace('test_', '')
if f in all_wrapped_functions:
mcls.covered.add(all_wrapped_functions[f])
return super().__new__(mcls, name, bases, members)
class BasicTestSetup(metaclass=CoverageMeta):
"""Test setup for functions that should not change the unit.
Also provides a default Quantity with shape (3, 3) and units of m.
"""
def setup(self):
self.q = np.arange(9.).reshape(3, 3) / 4. * u.m
class InvariantUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
o = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs) * self.q.unit
assert o.shape == expected.shape
assert np.all(o == expected)
class NoUnitTestSetup(BasicTestSetup):
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, *kwargs)
assert type(out) is type(expected)
if isinstance(expected, tuple):
assert all(np.all(o == x) for o, x in zip(out, expected))
else:
assert np.all(out == expected)
class TestShapeInformation(BasicTestSetup):
# alen is deprecated in Numpy 1.8
if NUMPY_LT_1_18:
def test_alen(self):
assert np.alen(self.q) == 3
def test_shape(self):
assert np.shape(self.q) == (3, 3)
def test_size(self):
assert np.size(self.q) == 9
def test_ndim(self):
assert np.ndim(self.q) == 2
class TestShapeManipulation(InvariantUnitTestSetup):
# Note: do not parametrize the below, since test names are used
# to check coverage.
def test_reshape(self):
self.check(np.reshape, (9, 1))
def test_ravel(self):
self.check(np.ravel)
def test_moveaxis(self):
self.check(np.moveaxis, 0, 1)
def test_rollaxis(self):
self.check(np.rollaxis, 0, 2)
def test_swapaxes(self):
self.check(np.swapaxes, 0, 1)
def test_transpose(self):
self.check(np.transpose)
def test_atleast_1d(self):
q = 1. * u.m
o, so = np.atleast_1d(q, self.q)
assert o.shape == (1,)
assert o == q
expected = np.atleast_1d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_2d(self):
q = 1. * u.m
o, so = np.atleast_2d(q, self.q)
assert o.shape == (1, 1)
assert o == q
expected = np.atleast_2d(self.q.value) * u.m
assert np.all(so == expected)
def test_atleast_3d(self):
q = 1. * u.m
o, so = np.atleast_3d(q, self.q)
assert o.shape == (1, 1, 1)
assert o == q
expected = np.atleast_3d(self.q.value) * u.m
assert np.all(so == expected)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="expand_dims used asarray in numpy <1.16")
def test_expand_dims(self):
self.check(np.expand_dims, 1)
def test_squeeze(self):
o = np.squeeze(self.q[:, np.newaxis, :])
assert o.shape == (3, 3)
assert np.all(o == self.q)
@pytest.mark.xfail(NUMPY_LT_1_15,
reason="flip needs axis argument in numpy <1.15")
def test_flip(self):
self.check(np.flip)
def test_fliplr(self):
self.check(np.fliplr)
def test_flipud(self):
self.check(np.flipud)
def test_rot90(self):
self.check(np.rot90)
def test_broadcast_to(self):
# TODO: should we change the default for subok?
self.check(np.broadcast_to, (3, 3, 3), subok=True)
def test_broadcast_arrays(self):
# TODO: should we change the default for subok?
q2 = np.ones((3, 3, 3)) / u.s
o1, o2 = np.broadcast_arrays(self.q, q2, subok=True)
assert isinstance(o1, u.Quantity)
assert isinstance(o2, u.Quantity)
assert o1.shape == o2.shape == (3, 3, 3)
assert np.all(o1 == self.q)
assert np.all(o2 == q2)
class TestArgFunctions(NoUnitTestSetup):
def test_argmin(self):
self.check(np.argmin)
def test_argmax(self):
self.check(np.argmax)
def test_argsort(self):
self.check(np.argsort)
def test_lexsort(self):
self.check(np.lexsort)
def test_searchsorted(self):
q = self.q.ravel()
q2 = np.array([150., 350.]) * u.cm
out = np.searchsorted(q, q2)
expected = np.searchsorted(q.value, q2.to_value(q.unit))
assert np.all(out == expected)
def test_nonzero(self):
self.check(np.nonzero)
def test_argwhere(self):
self.check(np.argwhere)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_argpartition(self):
self.check(np.argpartition, 2)
def test_flatnonzero(self):
self.check(np.flatnonzero)
class TestAlongAxis(BasicTestSetup):
@pytest.mark.skip(NUMPY_LT_1_15,
reason="take_long_axis added in numpy 1.15")
def test_take_along_axis(self):
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
out = np.take_along_axis(self.q, indices, axis=0)
expected = np.take_along_axis(self.q.value, indices,
axis=0) * self.q.unit
assert np.all(out == expected)
@pytest.mark.skip(NUMPY_LT_1_15,
reason="put_long_axis added in numpy 1.15")
def test_put_along_axis(self):
q = self.q.copy()
indices = np.expand_dims(np.argmax(self.q, axis=0), axis=0)
np.put_along_axis(q, indices, axis=0, values=-100 * u.cm)
expected = q.value.copy()
np.put_along_axis(expected, indices, axis=0, values=-1)
expected = expected * q.unit
assert np.all(q == expected)
@pytest.mark.parametrize('axis', (0, 1))
def test_apply_along_axis(self, axis):
out = np.apply_along_axis(np.square, axis, self.q)
expected = np.apply_along_axis(np.square, axis,
self.q.value) * self.q.unit ** 2
assert_array_equal(out, expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
@pytest.mark.parametrize('axes', ((1,), (0,), (0, 1)))
def test_apply_over_axes(self, axes):
def function(x, axis):
return np.sum(np.square(x), axis)
out = np.apply_over_axes(function, self.q, axes)
expected = np.apply_over_axes(function, self.q.value, axes)
expected = expected * self.q.unit ** (2 * len(axes))
assert_array_equal(out, expected)
class TestIndicesFrom(NoUnitTestSetup):
def test_diag_indices_from(self):
self.check(np.diag_indices_from)
def test_triu_indices_from(self):
self.check(np.triu_indices_from)
def test_tril_indices_from(self):
self.check(np.tril_indices_from)
class TestRealImag(InvariantUnitTestSetup):
def setup(self):
self.q = (np.arange(9.).reshape(3, 3) + 1j) * u.m
def test_real(self):
self.check(np.real)
def test_imag(self):
self.check(np.imag)
class TestCopyAndCreation(InvariantUnitTestSetup):
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copy(self):
self.check(np.copy)
# Also as kwarg
copy = np.copy(a=self.q)
assert_array_equal(copy, self.q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_asfarray(self):
self.check(np.asfarray)
farray = np.asfarray(a=self.q)
assert_array_equal(farray, self.q)
def test_empty_like(self):
o = np.empty_like(self.q)
assert o.shape == (3, 3)
assert isinstance(o, u.Quantity)
assert o.unit == self.q.unit
o2 = np.empty_like(prototype=self.q)
assert o2.shape == (3, 3)
assert isinstance(o2, u.Quantity)
assert o2.unit == self.q.unit
o3 = np.empty_like(self.q, subok=False)
assert type(o3) is np.ndarray
def test_zeros_like(self):
self.check(np.zeros_like)
o2 = np.zeros_like(a=self.q)
assert_array_equal(o2, self.q * 0.)
def test_ones_like(self):
self.check(np.ones_like)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_full_like(self):
o = np.full_like(self.q, 0.5 * u.km)
expected = np.empty_like(self.q.value) * u.m
expected[...] = 0.5 * u.km
assert np.all(o == expected)
with pytest.raises(u.UnitsError):
np.full_like(self.q, 0.5 * u.s)
class TestAccessingParts(InvariantUnitTestSetup):
def test_diag(self):
self.check(np.diag)
def test_diagonal(self):
self.check(np.diagonal)
def test_diagflat(self):
self.check(np.diagflat)
def test_compress(self):
o = np.compress([True, False, True], self.q, axis=0)
expected = np.compress([True, False, True], self.q.value,
axis=0) * self.q.unit
assert np.all(o == expected)
def test_extract(self):
o = np.extract([True, False, True], self.q)
expected = np.extract([True, False, True],
self.q.value) * self.q.unit
assert np.all(o == expected)
def test_delete(self):
self.check(np.delete, slice(1, 2), 0)
self.check(np.delete, [0, 2], 1)
def test_trim_zeros(self):
q = self.q.ravel()
out = np.trim_zeros(q)
expected = np.trim_zeros(q.value) * u.m
assert np.all(out == expected)
def test_roll(self):
self.check(np.roll, 1)
self.check(np.roll, 1, axis=0)
def test_take(self):
self.check(np.take, [0, 1], axis=1)
self.check(np.take, 1)
class TestSettingParts(metaclass=CoverageMeta):
def test_put(self):
q = np.arange(3.) * u.m
np.put(q, [0, 2], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_putmask(self):
q = np.arange(3.) * u.m
mask = [True, False, True]
values = [50, 0, 150] * u.cm
np.putmask(q, mask, values)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
with pytest.raises(u.UnitsError):
np.putmask(q, mask, values.value)
with pytest.raises(u.UnitsError):
np.putmask(q.value, mask, values)
a = np.arange(3.)
values = [50, 0, 150] * u.percent
np.putmask(a, mask, values)
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_place(self):
q = np.arange(3.) * u.m
np.place(q, [True, False, True], [50, 150] * u.cm)
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.place(a, [True, False, True], [50, 150] * u.percent)
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_copyto(self):
q = np.arange(3.) * u.m
np.copyto(q, [50, 0, 150] * u.cm, where=[True, False, True])
assert q.unit == u.m
expected = [50, 100, 150] * u.cm
assert np.all(q == expected)
a = np.arange(3.)
np.copyto(a, [50, 0, 150] * u.percent, where=[True, False, True])
assert type(a) is np.ndarray
expected = np.array([0.5, 1., 1.5])
assert np.all(a == expected)
def test_fill_diagonal(self):
q = np.arange(9.).reshape(3, 3) * u.m
expected = q.value.copy()
np.fill_diagonal(expected, 0.25)
expected = expected * u.m
np.fill_diagonal(q, 25. * u.cm)
assert q.unit == u.m
assert np.all(q == expected)
class TestRepeat(InvariantUnitTestSetup):
def test_tile(self):
self.check(np.tile, 2)
def test_repeat(self):
self.check(np.repeat, 2)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_resize(self):
self.check(np.resize, (4, 4))
class TestConcatenate(metaclass=CoverageMeta):
def setup(self):
self.q1 = np.arange(6.).reshape(2, 3) * u.m
self.q2 = self.q1.to(u.cm)
def check(self, func, *args, **kwargs):
q_list = kwargs.pop('q_list', [self.q1, self.q2])
o = func(q_list, *args, **kwargs)
unit = q_list[0].unit
v_list = [q.to_value(unit) for q in q_list]
expected = func(v_list, *args, **kwargs) * unit
assert o.shape == expected.shape
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_concatenate(self):
self.check(np.concatenate)
self.check(np.concatenate, axis=1)
out = np.empty((4, 3)) * u.dimensionless_unscaled
result = np.concatenate([self.q1, self.q2], out=out)
assert out is result
assert out.unit == self.q1.unit
expected = np.concatenate(
[self.q1.value, self.q2.to_value(self.q1.unit)]) * self.q1.unit
assert np.all(result == expected)
with pytest.raises(TypeError):
np.concatenate([self.q1, object()])
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_stack(self):
self.check(np.stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_column_stack(self):
self.check(np.column_stack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_hstack(self):
self.check(np.hstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_vstack(self):
self.check(np.vstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_dstack(self):
self.check(np.dstack)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_block(self):
self.check(np.block)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_append(self):
out = np.append(self.q1, self.q2, axis=0)
assert out.unit == self.q1.unit
expected = np.append(self.q1.value, self.q2.to_value(self.q1.unit),
axis=0) * self.q1.unit
assert np.all(out == expected)
a = np.arange(3.)
result = np.append(a, 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.append(a, 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_insert(self):
# Unit of inserted values is ignored.
q = np.arange(12.).reshape(6, 2) * u.m
out = np.insert(q, (3, 5), [50., 25.] * u.cm)
assert isinstance(out, u.Quantity)
assert out.unit == q.unit
expected = np.insert(q.value, (3, 5), [0.5, 0.25]) * u.m
assert np.all(out == expected)
a = np.arange(3.)
result = np.insert(a, (2,), 50. * u.percent)
assert isinstance(result, u.Quantity)
assert result.unit == u.dimensionless_unscaled
expected = np.insert(a, (2,), 0.5) * u.dimensionless_unscaled
assert np.all(result == expected)
with pytest.raises(TypeError):
np.insert(q, 3 * u.cm, 50. * u.cm)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_pad(self):
q = np.arange(1., 6.) * u.m
out = np.pad(q, (2, 3), 'constant', constant_values=(0., 150.*u.cm))
assert out.unit == q.unit
expected = np.pad(q.value, (2, 3), 'constant',
constant_values=(0., 1.5)) * q.unit
assert np.all(out == expected)
out2 = np.pad(q, (2, 3), 'constant', constant_values=150.*u.cm)
assert out2.unit == q.unit
expected2 = np.pad(q.value, (2, 3), 'constant',
constant_values=1.5) * q.unit
assert np.all(out2 == expected2)
out3 = np.pad(q, (2, 3), 'linear_ramp', end_values=(25.*u.cm, 0.))
assert out3.unit == q.unit
expected3 = np.pad(q.value, (2, 3), 'linear_ramp',
end_values=(0.25, 0.)) * q.unit
assert np.all(out3 == expected3)
class TestSplit(metaclass=CoverageMeta):
def setup(self):
self.q = np.arange(54.).reshape(3, 3, 6) * u.m
def check(self, func, *args, **kwargs):
out = func(self.q, *args, **kwargs)
expected = func(self.q.value, *args, **kwargs)
expected = [x * self.q.unit for x in expected]
assert len(out) == len(expected)
assert all(o.shape == x.shape for o, x in zip(out, expected))
assert all(np.all(o == x) for o, x in zip(out, expected))
def test_split(self):
self.check(np.split, [1])
def test_array_split(self):
self.check(np.array_split, 2)
def test_hsplit(self):
self.check(np.hsplit, [1, 4])
def test_vsplit(self):
self.check(np.vsplit, [1])
def test_dsplit(self):
self.check(np.dsplit, [1])
class TestUfuncReductions(InvariantUnitTestSetup):
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
def test_any(self):
with pytest.raises(NotImplementedError):
np.any(self.q)
def test_all(self):
with pytest.raises(NotImplementedError):
np.all(self.q)
def test_sometrue(self):
with pytest.raises(NotImplementedError):
np.sometrue(self.q)
def test_alltrue(self):
with pytest.raises(NotImplementedError):
np.alltrue(self.q)
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q)
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q)
class TestUfuncLike(InvariantUnitTestSetup):
def test_ptp(self):
self.check(np.ptp)
self.check(np.ptp, axis=0)
def test_round_(self):
self.check(np.round_)
def test_around(self):
self.check(np.around)
def test_fix(self):
self.check(np.fix)
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="angle used asarray in numpy <1.16")
def test_angle(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.angle(q)
expected = np.angle(q.value) * u.radian
assert np.all(out == expected)
def test_i0(self):
q = np.array([0., 10., 20.]) * u.percent
out = np.i0(q)
expected = np.i0(q.to_value(u.one)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.i0(self.q)
def test_clip(self):
qmin = 200 * u.cm
qmax = [270, 280, 290] * u.cm
out = np.clip(self.q, qmin, qmax)
expected = np.clip(self.q.value, qmin.to_value(self.q.unit),
qmax.to_value(self.q.unit)) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_sinc(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.sinc(q)
expected = np.sinc(q.to_value(u.radian)) * u.one
assert isinstance(out, u.Quantity)
assert np.all(out == expected)
with pytest.raises(u.UnitsError):
np.sinc(1.*u.one)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_where(self):
out = np.where([True, False, True], self.q, 1. * u.km)
expected = np.where([True, False, True], self.q.value,
1000.) * self.q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_choose(self):
# from np.choose docstring
a = np.array([0, 1]).reshape((2, 1, 1))
q1 = np.array([1, 2, 3]).reshape((1, 3, 1)) * u.cm
q2 = np.array([-1, -2, -3, -4, -5]).reshape((1, 1, 5)) * u.m
out = np.choose(a, (q1, q2))
# result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
expected = np.choose(a, (q1.value, q2.to_value(q1.unit))) * u.cm
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_select(self):
q = self.q
out = np.select([q < 0.55 * u.m, q > 1. * u.m],
[q, q.to(u.cm)], default=-1. * u.km)
expected = np.select([q.value < 0.55, q.value > 1],
[q.value, q.value], default=-1000) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_real_if_close(self):
q = np.array([1+0j, 0+1j, 1+1j, 0+0j]) * u.m
out = np.real_if_close(q)
expected = np.real_if_close(q.value) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_tril(self):
self.check(np.tril)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_triu(self):
self.check(np.triu)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_unwrap(self):
q = [0., 3690., -270., 690.] * u.deg
out = np.unwrap(q)
expected = (np.unwrap(q.to_value(u.rad)) * u.rad).to(q.unit)
assert out.unit == expected.unit
assert np.allclose(out, expected, atol=1*u.urad, rtol=0)
with pytest.raises(u.UnitsError):
np.unwrap([1., 2.]*u.m)
with pytest.raises(u.UnitsError):
np.unwrap(q, discont=1.*u.m)
def test_nan_to_num(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q)
expected = np.nan_to_num(q.value) * q.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_nan_to_num_complex(self):
q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
out = np.nan_to_num(q, nan=1.*u.km, posinf=2.*u.km, neginf=-2*u.km)
expected = [-2000., 2000., 1000., 3., 4.] * u.m
assert np.all(out == expected)
class TestUfuncLikeTests(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([-np.inf, +np.inf, np.nan, 3., 4.]) * u.m
def check(self, func):
out = func(self.q)
expected = func(self.q.value)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
def test_isposinf(self):
self.check(np.isposinf)
def test_isneginf(self):
self.check(np.isneginf)
def test_isreal(self):
self.check(np.isreal)
assert not np.isreal([1. + 1j]*u.m)
def test_iscomplex(self):
self.check(np.iscomplex)
assert np.iscomplex([1. + 1j]*u.m)
def test_isclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 102., 199.]) * u.cm
atol = 1.5 * u.cm
rtol = 1. * u.percent
out = np.isclose(q1, q2, atol=atol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=atol.to_value(q1.unit))
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
out = np.isclose(q1, q2, atol=0, rtol=rtol)
expected = np.isclose(q1.value, q2.to_value(q1.unit),
atol=0, rtol=0.01)
assert type(out) is np.ndarray
assert out.dtype.kind == 'b'
assert np.all(out == expected)
@pytest.mark.xfail
def test_isclose_failure(self):
q_cm = self.q.to(u.cm)
# atol does not have units; TODO: should this work by default?
out = np.isclose(self.q, q_cm)
expected = np.isclose(self.q.value, q_cm.to_value(u.m))
assert np.all(out == expected)
class TestReductionLikeFunctions(InvariantUnitTestSetup):
def test_average(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
q2 = np.eye(3) / u.s
o = np.average(q1, weights=q2)
expected = np.average(q1.value, weights=q2.value) * u.m
assert np.all(o == expected)
def test_mean(self):
self.check(np.mean)
def test_std(self):
self.check(np.std)
def test_var(self):
o = np.var(self.q)
expected = np.var(self.q.value) * self.q.unit ** 2
assert np.all(o == expected)
def test_median(self):
self.check(np.median)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_quantile(self):
self.check(np.quantile, 0.5)
o = np.quantile(self.q, 50 * u.percent)
expected = np.quantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
# For ndarray input, we return a Quantity.
o2 = np.quantile(self.q.value, 50 * u.percent)
assert o2.unit == u.dimensionless_unscaled
assert np.all(o2 == expected.value)
o3 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, out=o3)
assert result is o3
assert np.all(o3 == expected)
o4 = 0 * o2
result = np.quantile(self.q, 50 * u.percent, None, o4)
assert result is o4
assert np.all(o4 == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_percentile(self):
self.check(np.percentile, 0.5)
o = np.percentile(self.q, 0.5 * u.one)
expected = np.percentile(self.q.value, 50) * u.m
assert np.all(o == expected)
def test_trace(self):
self.check(np.trace)
@pytest.mark.xfail(NO_ARRAY_FUNCTION and not NUMPY_LT_1_14,
reason=("Needs __array_function__ support "
"(or numpy < 1.14)"))
def test_count_nonzero(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.count_nonzero(q1)
assert type(o) is not u.Quantity
assert o == 8
o = np.count_nonzero(q1, axis=1)
# Returns integer Quantity with units of m
assert type(o) is np.ndarray
assert np.all(o == np.array([2, 3, 3]))
def test_allclose(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
atol = 2 * u.cm
rtol = 1. * u.percent
assert np.allclose(q1, q2, atol=atol)
assert np.allclose(q1, q2, atol=0., rtol=rtol)
def test_allclose_failures(self):
q1 = np.arange(3.) * u.m
q2 = np.array([0., 101., 199.]) * u.cm
with pytest.raises(u.UnitsError):
# Default atol breaks code; TODO: should this work?
assert np.allclose(q1, q2)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=2, rtol=0)
with pytest.raises(u.UnitsError):
np.allclose(q1, q2, atol=0, rtol=1. * u.s)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_array_equal(self):
q1 = np.arange(3.) * u.m
q2 = q1.to(u.cm)
assert np.array_equal(q1, q2)
q3 = q1.value * u.cm
assert not np.array_equal(q1, q3)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_array_equiv(self):
q1 = np.array([[0., 1., 2.]]*3) * u.m
q2 = q1[0].to(u.cm)
assert np.array_equiv(q1, q2)
q3 = q1[0].value * u.cm
assert not np.array_equiv(q1, q3)
class TestNanFunctions(InvariantUnitTestSetup):
def setup(self):
super().setup()
self.q[1, 1] = np.nan
def test_nanmax(self):
self.check(np.nanmax)
def test_nanmin(self):
self.check(np.nanmin)
def test_nanargmin(self):
out = np.nanargmin(self.q)
expected = np.nanargmin(self.q.value)
assert out == expected
def test_nanargmax(self):
out = np.nanargmax(self.q)
expected = np.nanargmax(self.q.value)
assert out == expected
def test_nanmean(self):
self.check(np.nanmean)
def test_nanmedian(self):
self.check(np.nanmedian)
def test_nansum(self):
self.check(np.nansum)
def test_nancumsum(self):
self.check(np.nancumsum)
def test_nanstd(self):
self.check(np.nanstd)
def test_nanvar(self):
out = np.nanvar(self.q)
expected = np.nanvar(self.q.value) * self.q.unit ** 2
assert np.all(out == expected)
def test_nanprod(self):
with pytest.raises(u.UnitsError):
np.nanprod(self.q)
def test_nancumprod(self):
with pytest.raises(u.UnitsError):
np.nancumprod(self.q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_nanquantile(self):
self.check(np.nanquantile, 0.5)
o = np.nanquantile(self.q, 50 * u.percent)
expected = np.nanquantile(self.q.value, 0.5) * u.m
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_nanpercentile(self):
self.check(np.nanpercentile, 0.5)
o = np.nanpercentile(self.q, 0.5 * u.one)
expected = np.nanpercentile(self.q.value, 50) * u.m
assert np.all(o == expected)
class TestVariousProductFunctions(metaclass=CoverageMeta):
"""
Test functions that are similar to gufuncs
"""
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_cross(self):
q1 = np.arange(6.).reshape(2, 3) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.cross(q1, q2)
expected = np.cross(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_outer(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([1, 2]) / u.s
o = np.outer(q1, q2)
assert np.all(o == np.array([[1, 2], [2, 4], [3, 6]]) * u.m / u.s)
o2 = 0 * o
result = np.outer(q1, q2, out=o2)
assert result is o2
assert np.all(o2 == o)
with pytest.raises(TypeError):
np.outer(q1, q2, out=object())
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_inner(self):
q1 = np.array([1, 2, 3]) * u.m
q2 = np.array([4, 5, 6]) / u.s
o = np.inner(q1, q2)
assert o == 32 * u.m / u.s
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_dot(self):
q1 = np.array([1., 2., 3.]) * u.m
q2 = np.array([4., 5., 6.]) / u.s
o = np.dot(q1, q2)
assert o == 32. * u.m / u.s
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_vdot(self):
q1 = np.array([1j, 2j, 3j]) * u.m
q2 = np.array([4j, 5j, 6j]) / u.s
o = np.vdot(q1, q2)
assert o == (32. + 0j) * u.m / u.s
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_tensordot(self):
# From the docstring example
a = np.arange(60.).reshape(3, 4, 5) * u.m
b = np.arange(24.).reshape(4, 3, 2) / u.s
c = np.tensordot(a, b, axes=([1, 0], [0, 1]))
expected = np.tensordot(a.value, b.value,
axes=([1, 0], [0, 1])) * u.m / u.s
assert np.all(c == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_kron(self):
q1 = np.eye(2) * u.m
q2 = np.ones(2) / u.s
o = np.kron(q1, q2)
expected = np.kron(q1.value, q2.value) * u.m / u.s
assert np.all(o == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_einsum(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum('...i', q1)
assert np.all(o == q1)
o = np.einsum('ii', q1)
expected = np.einsum('ii', q1.value) * u.m
assert np.all(o == expected)
q2 = np.eye(3) / u.s
o2 = np.einsum('ij,jk', q1, q2)
assert np.all(o2 == q1 / u.s)
o3 = 0 * o2
result = np.einsum('ij,jk', q1, q2, out=o3)
assert result is o3
assert np.all(o3 == o2)
def test_einsum_path(self):
q1 = np.arange(9.).reshape(3, 3) * u.m
o = np.einsum_path('...i', q1)
assert o[0] == ['einsum_path', (0,)]
o = np.einsum_path('ii', q1)
assert o[0] == ['einsum_path', (0,)]
q2 = np.eye(3) / u.s
o = np.einsum_path('ij,jk', q1, q2)
assert o[0] == ['einsum_path', (0, 1)]
class TestIntDiffFunctions(metaclass=CoverageMeta):
def test_trapz(self):
y = np.arange(9.) * u.m / u.s
out = np.trapz(y)
expected = np.trapz(y.value) * y.unit
assert np.all(out == expected)
dx = 10. * u.s
out = np.trapz(y, dx=dx)
expected = np.trapz(y.value, dx=dx.value) * y.unit * dx.unit
assert np.all(out == expected)
x = np.arange(9.) * u.s
out = np.trapz(y, x)
expected = np.trapz(y.value, x.value) * y.unit * x.unit
assert np.all(out == expected)
def test_diff(self):
# Simple diff works out of the box.
x = np.arange(10.) * u.m
out = np.diff(x)
expected = np.diff(x.value) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_diff_prepend_append(self):
x = np.arange(10.) * u.m
out = np.diff(x, prepend=-12.5*u.cm, append=1*u.km)
expected = np.diff(x.value, prepend=-0.125, append=1000.) * x.unit
assert np.all(out == expected)
x = np.arange(10.) * u.m
out = np.diff(x, prepend=-12.5*u.cm, append=1*u.km, n=2)
expected = np.diff(x.value, prepend=-0.125, append=1000.,
n=2) * x.unit
assert np.all(out == expected)
with pytest.raises(TypeError):
np.diff(x, prepend=object())
def test_gradient(self):
# Simple gradient works out of the box.
x = np.arange(10.) * u.m
out = np.gradient(x)
expected = np.gradient(x.value) * u.m
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_gradient_spacing(self):
# Simple gradient works out of the box.
x = np.arange(10.) * u.m
spacing = 10. * u.s
out = np.gradient(x, spacing)
expected = np.gradient(x.value, spacing.value) * (x.unit /
spacing.unit)
assert np.all(out == expected)
f = np.array([[1, 2, 6], [3, 4, 5]]) * u.m
dx = 2. * u.s
y = [1., 1.5, 3.5] * u.GHz
dfdx, dfdy = np.gradient(f, dx, y)
exp_dfdx, exp_dfdy = np.gradient(f.value, dx.value, y.value)
exp_dfdx = exp_dfdx * f.unit / dx.unit
exp_dfdy = exp_dfdy * f.unit / y.unit
assert np.all(dfdx == exp_dfdx)
assert np.all(dfdy == exp_dfdy)
dfdx2 = np.gradient(f, dx, axis=0)
assert np.all(dfdx2 == exp_dfdx)
dfdy2 = np.gradient(f, y, axis=(1,))
assert np.all(dfdy2 == exp_dfdy)
class TestSpaceFunctions(metaclass=CoverageMeta):
@pytest.mark.xfail(NUMPY_LT_1_16,
reason="No array-like start, top in numpy <1.16")
def test_linspace(self):
# Note: linspace gets unit of end point, not superlogical.
out = np.linspace(1000.*u.m, 10.*u.km, 5)
expected = np.linspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(6.).reshape(2, 3) * u.m
q2 = 10000. * u.cm
out = np.linspace(q1, q2, 5)
expected = np.linspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_logspace(self):
unit = u.m / u.s**2
out = np.logspace(10.*u.dex(unit), 20*u.dex(unit), 10)
expected = np.logspace(10., 20., 10) * unit
assert np.all(out == expected)
out = np.logspace(10.*u.STmag, 20*u.STmag, 10)
expected = np.logspace(10., 20., 10, base=10.**(-0.4)) * u.ST
assert u.allclose(out, expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_geomspace(self):
out = np.geomspace(1000.*u.m, 10.*u.km, 5)
expected = np.geomspace(1, 10, 5) * u.km
assert np.all(out == expected)
q1 = np.arange(1., 7.).reshape(2, 3) * u.m
q2 = 10000. * u.cm
out = np.geomspace(q1, q2, 5)
expected = np.geomspace(q1.to_value(q2.unit), q2.value, 5) * q2.unit
assert np.all(out == expected)
class TestInterpolationFunctions(metaclass=CoverageMeta):
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_interp(self):
x = np.array([1250., 2750.]) * u.m
xp = np.arange(5.) * u.km
yp = np.arange(5.) * u.day
out = np.interp(x, xp, yp)
expected = np.interp(x.to_value(xp.unit), xp.value, yp.value) * yp.unit
assert np.all(out == expected)
out = np.interp(x, xp, yp.value)
assert type(out) is np.ndarray
assert np.all(out == expected.value)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_piecewise(self):
x = np.linspace(-2.5, 2.5, 6) * u.m
out = np.piecewise(x, [x < 0, x >= 0], [-1*u.s, 1*u.day])
expected = np.piecewise(x.value, [x.value < 0, x.value >= 0],
[-1, 24*3600]) * u.s
assert out.unit == expected.unit
assert np.all(out == expected)
out2 = np.piecewise(x, [x < 1 * u.m, x >= 0],
[-1*u.s, 1*u.day, lambda x: 1*u.hour])
expected2 = np.piecewise(x.value, [x.value < 1, x.value >= 0],
[-1, 24*3600, 3600]) * u.s
assert out2.unit == expected2.unit
assert np.all(out2 == expected2)
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x, [x], [0.])
with pytest.raises(TypeError): # no Quantity in condlist.
np.piecewise(x.value, [x], [0.])
class TestBincountDigitize(metaclass=CoverageMeta):
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_bincount(self):
i = np.array([1, 1, 2, 3, 2, 4])
weights = np.arange(len(i)) * u.Jy
out = np.bincount(i, weights)
expected = np.bincount(i, weights.value) * weights.unit
assert_array_equal(out, expected)
with pytest.raises(TypeError):
np.bincount(weights)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_digitize(self):
x = np.array([1500., 2500., 4500.]) * u.m
bins = np.arange(10.) * u.km
out = np.digitize(x, bins)
expected = np.digitize(x.to_value(bins.unit), bins.value)
assert_array_equal(out, expected)
class TestHistogramFunctions(metaclass=CoverageMeta):
def setup(self):
self.x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
self.y = np.array([1.2, 2.2, 2.4, 3.0, 4.0]) * u.cm
self.weights = np.arange(len(self.x)) / u.s
def check(self, function, *args, value_args=None, value_kwargs=None,
expected_units=None, **kwargs):
"""Check quanties are treated correctly in the histogram function.
Test is done by applying ``function(*args, **kwargs)``, where
the argument can be quantities, and comparing the result to
``function(*value_args, **value_kwargs)``, with the outputs
converted to quantities using the ``expected_units`` (where `None`
indicates the output is expected to be a regular array).
For ``**value_kwargs``, any regular ``kwargs`` are treated as
defaults, i.e., non-quantity arguments do not have to be repeated.
"""
if value_kwargs is None:
value_kwargs = kwargs
else:
for k, v in kwargs.items():
value_kwargs.setdefault(k, v)
# Get the result, using the Quantity override.
out = function(*args, **kwargs)
# Get the comparison, with non-Quantity arguments.
expected = function(*value_args, **value_kwargs)
# All histogram functions return a tuple of the actual histogram
# and the bin edges. First, check the actual histogram.
out_h = out[0]
expected_h = expected[0]
if expected_units[0] is not None:
expected_h = expected_h * expected_units[0]
assert_array_equal(out_h, expected_h)
# Check bin edges. Here, histogramdd returns an interable of the
# bin edges as the second return argument, while histogram and
# histogram2d return the bin edges directly.
if function is np.histogramdd:
bin_slice = 1
else:
bin_slice = slice(1, None)
for o_bin, e_bin, e_unit in zip(out[bin_slice],
expected[bin_slice],
expected_units[bin_slice]):
if e_unit is not None:
e_bin = e_bin * e_unit
assert_array_equal(o_bin, e_bin)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_histogram(self):
x = self.x
weights = self.weights
# Plain histogram.
self.check(np.histogram, x,
value_args=(x.value,),
expected_units=(None, x.unit))
# With bins.
self.check(np.histogram, x, [125, 200] * u.cm,
value_args=(x.value, [1.25, 2.]),
expected_units=(None, x.unit))
# With density.
self.check(np.histogram, x, [125, 200] * u.cm, density=True,
value_args=(x.value, [1.25, 2.]),
expected_units=(1/x.unit, x.unit))
# With weights.
self.check(np.histogram, x, [125, 200] * u.cm, weights=weights,
value_args=(x.value, [1.25, 2.]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit))
# With weights and density.
self.check(np.histogram, x, [125, 200] * u.cm,
weights=weights, density=True,
value_args=(x.value, [1.25, 2.]),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit/x.unit, x.unit))
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram(x.value, [125, 200] * u.s)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_histogram_bin_edges(self):
x = np.array([1.1, 1.2, 1.3, 2.1, 5.1]) * u.m
out_b = np.histogram_bin_edges(x)
expected_b = np.histogram_bin_edges(x.value) * x.unit
assert np.all(out_b == expected_b)
# With bins
out2_b = np.histogram_bin_edges(x, [125, 200] * u.cm)
expected2_b = np.histogram_bin_edges(x.value, [1.25, 2.]) * x.unit
assert np.all(out2_b == expected2_b)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x, [125, 200])
with pytest.raises(u.UnitsError):
np.histogram_bin_edges(x.value, [125, 200] * u.s)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_histogram2d(self):
x, y = self.x, self.y
weights = self.weights
# Basic tests with X, Y.
self.check(np.histogram2d, x, y,
value_args=(x.value, y.value),
expected_units=(None, x.unit, y.unit))
# Check units with density.
self.check(np.histogram2d, x, y, density=True,
value_args=(x.value, y.value),
expected_units=(1/(x.unit*y.unit), x.unit, y.unit))
# Check units with weights.
self.check(np.histogram2d, x, y, weights=weights,
value_args=(x.value, y.value),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, x.unit, y.unit))
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.] * u.m
self.check(np.histogram2d, x, y, [5, inb_y],
value_args=(x.value, y.value,
[5, np.array([0, 2.5, 100.])]),
expected_units=(None, x.unit, y.unit))
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.] * u.percent
self.check(np.histogram2d, x.value, y.value, bins=[5, inb2_y],
value_args=(x.value, y.value),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.])]),
expected_units=(None, u.one, u.one))
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogram2d(x, y, 125 * u.s)
with pytest.raises(TypeError):
np.histogram2d(x.value, y.value, 125 * u.s)
# Bin units need to match units of x, y.
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, [125, 200] * u.s)
with pytest.raises(u.UnitsError):
np.histogram2d(x, y, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogram2d(x.value, y.value, [125, 200] * u.s)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_histogramdd(self):
# First replicates of the histogram2d tests, but using the
# histogramdd override. Normally takes the sample as a tuple
# with a given number of dimensions, and returns the histogram
# as well as a tuple of bin edges.
sample = self.x, self.y
sample_units = self.x.unit, self.y.unit
sample_values = (self.x.value, self.y.value)
weights = self.weights
# Basic tests with X, Y
self.check(np.histogramdd, sample,
value_args=(sample_values,),
expected_units=(None, sample_units))
# Check units with density.
self.check(np.histogramdd, sample, density=True,
value_args=(sample_values,),
expected_units=(1/(self.x.unit*self.y.unit),
sample_units))
# Check units with weights.
self.check(np.histogramdd, sample, weights=weights,
value_args=(sample_values,),
value_kwargs=dict(weights=weights.value),
expected_units=(weights.unit, sample_units))
# Check quantity bin sizes.
inb_y = [0, 0.025, 1.] * u.m
self.check(np.histogramdd, sample, [5, inb_y],
value_args=(sample_values, [5, np.array([0, 2.5, 100.])]),
expected_units=(None, sample_units))
# Check we dispatch on bin sizes (and check kwarg as well).
inb2_y = [0, 250, 10000.] * u.percent
self.check(np.histogramdd, sample_values, bins=[5, inb2_y],
value_args=(sample_values,),
value_kwargs=dict(bins=[5, np.array([0, 2.5, 100.])]),
expected_units=(None, (u.one, u.one)))
# For quantities, it is probably not that likely one would pass
# in the sample as an array, but check that it works anyway.
# This also gives a 3-D check.
xyz = np.random.normal(size=(10, 3)) * u.m
self.check(np.histogramdd, xyz,
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,)*3))
# Passing it in as a tuple should work just as well; note the
# *last* axis contains the sample dimension.
self.check(np.histogramdd, (xyz[:, 0], xyz[:, 1], xyz[:, 2]),
value_args=(xyz.value,),
expected_units=(None, (xyz.unit,)*3))
# Single-item bins should be integer, not Quantity.
with pytest.raises(TypeError):
np.histogramdd(sample, 125 * u.s)
# Sequence of single items should be integer.
with pytest.raises(TypeError):
np.histogramdd(sample, [125, 200] * u.s)
with pytest.raises(TypeError):
np.histogramdd(sample_values, [125, 200] * u.s)
# Units of bins should match.
with pytest.raises(u.UnitsError):
np.histogramdd(sample, ([125, 200], [125, 200]))
with pytest.raises(u.UnitsError):
np.histogramdd(sample_values, ([125, 200] * u.s, [125, 200]))
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_correlate(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.correlate(x1, x2)
expected = np.correlate(x1.value, x2.value) * u.m ** 2
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_convolve(self):
x1 = [1, 2, 3] * u.m
x2 = [0, 1, 0.5] * u.m
out = np.convolve(x1, x2)
expected = np.convolve(x1.value, x2.value) * u.m ** 2
assert np.all(out == expected)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_cov(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.cov(x)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_corrcoef(self):
# Do not see how we can use cov with Quantity
x = np.array([[0, 2], [1, 1], [2, 0]]).T * u.m
with pytest.raises(TypeError):
np.corrcoef(x)
class TestSortFunctions(InvariantUnitTestSetup):
def test_sort(self):
self.check(np.sort)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_sort_complex(self):
self.check(np.sort_complex)
def test_msort(self):
self.check(np.msort)
def test_partition(self):
self.check(np.partition, 2)
class TestStringFunctions(metaclass=CoverageMeta):
# For all these functions, we could change it to work on Quantity,
# but it would mean deviating from the docstring. Not clear whether
# that is worth it.
def setup(self):
self.q = np.arange(3.) * u.Jy
@pytest.mark.xfail
def test_array2string(self):
out = np.array2string(self.q)
expected = str(self.q)
assert out == expected
@pytest.mark.xfail
def test_array_repr(self):
out = np.array_repr(self.q)
expected = (np.array_repr(self.q.value)[:-1] +
', {!r})'.format(str(self.q.unit)))
assert out == expected
@pytest.mark.xfail
def test_array_str(self):
out = np.array_str(self.q)
expected = str(self.q)
assert out == expected
class TestBitAndIndexFunctions(metaclass=CoverageMeta):
# Index/bit functions generally fail for floats, so the usual
# float quantity are safe, but the integer ones are not.
def setup(self):
self.q = np.arange(3) * u.m
self.uint_q = u.Quantity(np.arange(3), 'm', dtype='u1')
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_packbits(self):
with pytest.raises(TypeError):
np.packbits(self.q)
with pytest.raises(TypeError):
np.packbits(self.uint_q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_unpackbits(self):
with pytest.raises(TypeError):
np.unpackbits(self.q)
with pytest.raises(TypeError):
np.unpackbits(self.uint_q)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_unravel_index(self):
with pytest.raises(TypeError):
np.unravel_index(self.q, 3)
with pytest.raises(TypeError):
np.unravel_index(self.uint_q, 3)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_ravel_multi_index(self):
with pytest.raises(TypeError):
np.ravel_multi_index((self.q,), 3)
with pytest.raises(TypeError):
np.ravel_multi_index((self.uint_q,), 3)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_ix_(self):
with pytest.raises(TypeError):
np.ix_(self.q)
with pytest.raises(TypeError):
np.ix_(self.uint_q)
class TestDtypeFunctions(NoUnitTestSetup):
def test_common_type(self):
self.check(np.common_type)
def test_result_type(self):
self.check(np.result_type)
def test_can_cast(self):
self.check(np.can_cast, self.q.dtype)
self.check(np.can_cast, 'f4')
def test_min_scalar_type(self):
out = np.min_scalar_type(self.q[0])
expected = np.min_scalar_type(self.q.value[0])
assert out == expected
def test_iscomplexobj(self):
self.check(np.iscomplexobj)
def test_isrealobj(self):
self.check(np.isrealobj)
class TestMeshGrid(metaclass=CoverageMeta):
def test_meshgrid(self):
q1 = np.arange(3.) * u.m
q2 = np.arange(5.) * u.s
o1, o2 = np.meshgrid(q1, q2)
e1, e2 = np.meshgrid(q1.value, q2.value)
assert np.all(o1 == e1 * q1.unit)
assert np.all(o2 == e2 * q2.unit)
class TestMemoryFunctions(NoUnitTestSetup):
def test_shares_memory(self):
self.check(np.shares_memory, self.q.value)
def test_may_share_memory(self):
self.check(np.may_share_memory, self.q.value)
class TestSetOpsFcuntions(metaclass=CoverageMeta):
def setup(self):
self.q = np.array([[0., 1., -1.],
[3., 5., 3.],
[0., 1., -1]]) * u.m
self.q2 = np.array([0., 100., 150., 200.]) * u.cm
def check(self, function, qs, *args, **kwargs):
unit = kwargs.pop('unit', self.q.unit)
out = function(*qs, *args, **kwargs)
qv = tuple(q.to_value(self.q.unit) for q in qs)
expected = function(*qv, *args, **kwargs)
if isinstance(expected, tuple):
if unit:
expected = (expected[0] * unit,) + expected[1:]
for o, e in zip(out, expected):
assert_array_equal(o, e)
else:
if unit:
expected = expected * unit
assert_array_equal(out, expected)
def check1(self, function, *args, **kwargs):
self.check(function, (self.q,), *args, **kwargs)
def check2(self, function, *args, **kwargs):
self.check(function, (self.q, self.q2), *args, **kwargs)
@pytest.mark.parametrize('kwargs', (
dict(return_index=True, return_inverse=True),
dict(return_counts=True),
dict(return_index=True, return_inverse=True, return_counts=True)))
def test_unique(self, kwargs):
self.check1(np.unique, **kwargs)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
@pytest.mark.parametrize('kwargs', (
dict(axis=0),
dict(axis=1),
dict(return_counts=True, return_inverse=False, axis=1)))
def test_unique_more_complex(self, kwargs):
self.check1(np.unique, **kwargs)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
@pytest.mark.parametrize('kwargs', (
dict(),
dict(return_indices=True)))
def test_intersect1d(self, kwargs):
self.check2(np.intersect1d, **kwargs)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_setxor1d(self):
self.check2(np.setxor1d)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_union1d(self):
self.check2(np.union1d)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_setdiff1d(self):
self.check2(np.setdiff1d)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_in1d(self):
self.check2(np.in1d, unit=None)
@pytest.mark.xfail(NO_ARRAY_FUNCTION,
reason="Needs __array_function__ support")
def test_isin(self):
self.check2(np.isin, unit=None)
def test_ediff1d(self):
# ediff1d works always as it calls the Quantity method.
self.check1(np.ediff1d)
x = np.arange(10.) * u.m
out = np.ediff1d(x, to_begin=-12.5*u.cm, to_end=1*u.km)
expected = np.ediff1d(x.value, to_begin=-0.125, to_end=1000.) * x.unit
| assert_array_equal(out, expected) | numpy.testing.assert_array_equal |
import matplotlib.pyplot as plt
from statistics import mean
import csv
import numpy as np
with open('./trajectory_x.csv') as f:
reader = csv.reader(f)
centers_x = [int(row[0]) for row in reader]
with open('./trajectory_y.csv') as f:
reader = csv.reader(f)
centers_y = [int(row[0]) for row in reader]
def dist(x, y):
return (x - y)**2
def get_min(m0, m1, m2, i, j):
if m0 < m1:
if m0 < m2:
return i - 1, j, m0
else:
return i - 1, j - 1, m2
else:
if m1 < m2:
return i, j - 1, m1
else:
return i - 1, j - 1, m2
def partial_dtw(x, y):
Tx = len(x)
Ty = len(y)
C = np.zeros((Tx, Ty))
B = | np.zeros((Tx, Ty, 2), int) | numpy.zeros |
from sys import argv
import numpy as np
import scipy as sp
from scipy.linalg import eig,svd,eigh
from scipy.sparse.linalg import eigs
from sklearn.neighbors import kneighbors_graph
from copy import deepcopy
from .utils import *
from pymanopt.manifolds import Grassmann
import nudged
from sklearn.metrics.pairwise import pairwise_distances
def findSingleLP(X,d,k,sigma,embMethod='lpp'):
D,N = X.shape
W = np.zeros((N,N))
B = np.zeros((N,N))
if embMethod == 'pca':
for i in range(N-1):
for j in range(i+1,N):
W[i,j] = 1.0/N
W = 0.5*(W + W.T)
B = np.eye(N)
L = B - W
M1 = X.dot(L).dot(X.T)
Mc = | np.eye(M1.shape[0]) | numpy.eye |
import numpy as np
import matplotlib.pyplot as plt
import csv
from scipy.optimize import minimize
from functools import partial
###############################################################################
#
# Jiles-Atherton Equation solving functions
#
###############################################################################
def coth(x):
# Hyperbolic cotangent (syntactic sugar)
return 1 / np.tanh(x)
def L(x):
# Langevin function
if x == 0:
return 0
else:
return coth(x) - 1 / x
def dLdx(x):
# Derivative of langevin function
if x == 0:
return 1 / 3
else:
return 1 - coth(x) ** 2 + 1 / x ** 2
def dMdH(M, H, Ms, a, alpha, k, c, delta):
# Derivative of magnetization
He = H + alpha * M
Man = Ms * L(He / a)
dM = Man - M
dMdH_num = dM / (delta * k - alpha * dM) + c * Ms / a * dLdx(He / a)
dMdH_den = (1 + c - c * alpha * Ms / a * dLdx(He / a))
return dMdH_num / dMdH_den
def euler(dMdH, M0, H):
# Euler ODE integrator for J-A equation
M = [M0]
for i in range(len(H) - 1):
dH_i = H[i + 1] - H[i]
dMdH_i = dMdH(M[i], H[i + 1], delta=np.sign(dH_i))
M.append(M[i] + dMdH_i * dH_i)
return M
def H_arr(Hlimit, curve_type):
# External field intensity input
if curve_type == 'initial':
H = np.linspace(0, Hlimit, 500, endpoint=True)
elif curve_type == 'loop':
H1 = np.linspace(Hlimit, -Hlimit, 1000, endpoint=False)
H2 = np.linspace(-Hlimit, Hlimit, 1000, endpoint=True)
H = np.append(H1, H2)
elif curve_type == 'full':
H1 = np.linspace(0, Hlimit, 500, endpoint=False)
H2 = np.linspace(Hlimit, -Hlimit, 1000, endpoint=False)
H3 = np.linspace(-Hlimit, Hlimit, 1000, endpoint=True)
H = np.append(H1, | np.append(H2, H3) | numpy.append |
################################################################################
# Copyright (C) 2013-2014 <NAME>
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for gaussian_markov_chain module.
"""
import numpy as np
from ..gaussian_markov_chain import GaussianMarkovChain
from ..gaussian_markov_chain import VaryingGaussianMarkovChain
from ..gaussian import Gaussian, GaussianMoments
from ..gaussian import GaussianARD
from ..gaussian import GaussianGamma
from ..wishart import Wishart, WishartMoments
from ..gamma import Gamma, GammaMoments
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils import misc
from bayespy.utils.misc import TestCase
def kalman_filter(y, U, A, V, mu0, Cov0, out=None):
"""
Perform Kalman filtering to obtain filtered mean and covariance.
The parameters of the process may vary in time, thus they are
given as iterators instead of fixed values.
Parameters
----------
y : (N,D) array
"Normalized" noisy observations of the states, that is, the
observations multiplied by the precision matrix U (and possibly
other transformation matrices).
U : (N,D,D) array or N-list of (D,D) arrays
Precision matrix (i.e., inverse covariance matrix) of the observation
noise for each time instance.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Filtered mean of the states.
Cov : array
Filtered covariance of the states.
See also
--------
rts_smoother
"""
mu = mu0
Cov = Cov0
# Allocate memory for the results
(N,D) = np.shape(y)
X = np.empty((N,D))
CovX = np.empty((N,D,D))
# Update step for t=0
M = np.dot(np.dot(Cov, U[0]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[0]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
X[0,:] = mu
CovX[0,:,:] = Cov
#for (yn, Un, An, Vn) in zip(y, U, A, V):
for n in range(len(y)-1): #(yn, Un, An, Vn) in zip(y, U, A, V):
# Prediction step
mu = np.dot(A[n], mu)
Cov = np.dot(np.dot(A[n], Cov), A[n].T) + V[n]
# Update step
M = np.dot(np.dot(Cov, U[n+1]), Cov) + Cov
L = linalg.chol(M)
mu = np.dot(Cov, linalg.chol_solve(L, np.dot(Cov,y[n+1]) + mu))
Cov = np.dot(Cov, linalg.chol_solve(L, Cov))
# Force symmetric covariance (for numeric inaccuracy)
Cov = 0.5*Cov + 0.5*Cov.T
# Store results
X[n+1,:] = mu
CovX[n+1,:,:] = Cov
return (X, CovX)
def rts_smoother(mu, Cov, A, V, removethis=None):
"""
Perform Rauch-Tung-Striebel smoothing to obtain the posterior.
The function returns the posterior mean and covariance of each
state. The parameters of the process may vary in time, thus they
are given as iterators instead of fixed values.
Parameters
----------
mu : (N,D) array
Mean of the states from Kalman filter.
Cov : (N,D,D) array
Covariance of the states from Kalman filter.
A : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Dynamic matrix for each time instance.
V : (N-1,D,D) array or (N-1)-list of (D,D) arrays
Covariance matrix of the innovation noise for each time instance.
Returns
-------
mu : array
Posterior mean of the states.
Cov : array
Posterior covariance of the states.
See also
--------
kalman_filter
"""
N = len(mu)
#n = N-1
# Start from the last time instance and smoothen backwards
x = mu[-1,:]
Covx = Cov[-1,:,:]
for n in reversed(range(N-1)):#(An, Vn) in zip(reversed(A), reversed(V)):
#n = n - 1
#if n <= 0:
# break
# The predicted value of n
x_p = np.dot(A[n], mu[n,:])
Cov_p = np.dot(np.dot(A[n], Cov[n,:,:]), A[n].T) + V[n]
# Temporary variable
S = np.linalg.solve(Cov_p, np.dot(A[n], Cov[n,:,:]))
# Smoothed value of n
x = mu[n,:] + np.dot(S.T, x-x_p)
Covx = Cov[n,:,:] + np.dot(np.dot(S.T, Covx-Cov_p), S)
# Force symmetric covariance (for numeric inaccuracy)
Covx = 0.5*Covx + 0.5*Covx.T
# Store results
mu[n,:] = x
Cov[n,:] = Covx
return (mu, Cov)
class TestGaussianMarkovChain(TestCase):
def create_model(self, N, D):
# Construct the model
Mu = Gaussian(np.random.randn(D),
np.identity(D))
Lambda = Wishart(D,
random.covariance(D))
A = Gaussian(np.random.randn(D,D),
np.identity(D))
V = Gamma(D,
np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N)
Y = Gaussian(X, np.identity(D))
return (Y, X, Mu, Lambda, A, V)
def test_plates(self):
"""
Test that plates are handled correctly.
"""
def test_message_to_mu0(self):
pass
def test_message_to_Lambda0(self):
pass
def test_message_to_A(self):
pass
def test_message_to_v(self):
pass
def test_message_to_parents(self):
""" Check gradient passed to inputs parent node """
N = 3
D = 2
Mu = Gaussian(np.random.randn(D), random.covariance(D))
Lambda = Wishart(D, random.covariance(D))
A = Gaussian(np.random.randn(D,D), random.covariance(D))
V = Gamma(D, np.random.rand(D))
X = GaussianMarkovChain(Mu, Lambda, A, V, n=N+1)
Y = Gaussian(X, random.covariance(D))
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
self.assert_message_to_parent(X, Mu, eps=1e-8)
self.assert_message_to_parent(
X,
Lambda,
eps=1e-8,
postprocess=lambda u: [
u[0] + linalg.transpose(u[0], ndim=1),
u[1],
]
)
self.assert_message_to_parent(X, A)
self.assert_message_to_parent(X, V, eps=1e-10, atol=1e-5)
pass
def test_message_to_parents_with_inputs(self):
""" Check gradient passed to inputs parent node """
def check(Mu, Lambda, A, V, U):
X = GaussianMarkovChain(Mu, Lambda, A, V, inputs=U)
Y = Gaussian(X, random.covariance(D))
# Check moments
self.assert_moments(
X,
postprocess=lambda u: [
u[0],
u[1] + linalg.transpose(u[1], ndim=1),
u[2]
]
)
Y.observe(np.random.randn(N+1, D))
X.update()
# Check gradient messages to parents
self.assert_message_to_parent(X, Mu)
self.assert_message_to_parent(
X,
Lambda,
postprocess=lambda phi: [
phi[0] + linalg.transpose(phi[0], ndim=1),
phi[1]
]
)
self.assert_message_to_parent(
X,
A,
postprocess=lambda phi: [
phi[0],
phi[1] + linalg.transpose(phi[1], ndim=1),
]
)
self.assert_message_to_parent(X, V)
self.assert_message_to_parent(X, U)
N = 4
D = 2
K = 3
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
Gaussian(
np.random.randn(D,D+K),
random.covariance(D+K)
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
check(
Gaussian(
np.random.randn(D),
random.covariance(D)
),
Wishart(
D,
random.covariance(D)
),
GaussianGamma(
np.random.randn(D,D+K),
random.covariance(D+K),
D,
np.random.rand(D),
ndim=1
),
Gamma(
D,
np.random.rand(D)
),
Gaussian(
np.random.randn(N,K),
random.covariance(K)
)
)
pass
def test_message_to_child(self):
"""
Test the updating of GaussianMarkovChain.
Check that the moments and the lower bound contribution are computed
correctly.
"""
# TODO: Add plates and missing values!
# Dimensionalities
D = 3
N = 5
(Y, X, Mu, Lambda, A, V) = self.create_model(N, D)
# Inference with arbitrary observations
y = np.random.randn(N,D)
Y.observe(y)
X.update()
(x_vb, xnxn_vb, xpxn_vb) = X.get_moments()
# Get parameter moments
(mu0, mumu0) = Mu.get_moments()
(icov0, logdet0) = Lambda.get_moments()
(a, aa) = A.get_moments()
(icov_x, logdetx) = V.get_moments()
icov_x = np.diag(icov_x)
# Prior precision
Z = np.einsum('...kij,...kk->...ij', aa, icov_x)
U_diag = [icov0+Z] + (N-2)*[icov_x+Z] + [icov_x]
U_super = (N-1) * [-np.dot(a.T, icov_x)]
U = misc.block_banded(U_diag, U_super)
# Prior mean
mu_prior = np.zeros(D*N)
mu_prior[:D] = np.dot(icov0,mu0)
# Data
Cov = np.linalg.inv(U + np.identity(D*N))
mu = np.dot(Cov, mu_prior + y.flatten())
# Moments
xx = mu[:,np.newaxis]*mu[np.newaxis,:] + Cov
mu = np.reshape(mu, (N,D))
xx = np.reshape(xx, (N,D,N,D))
# Check results
self.assertAllClose(x_vb, mu,
msg="Incorrect mean")
for n in range(N):
self.assertAllClose(xnxn_vb[n,:,:], xx[n,:,n,:],
msg="Incorrect second moment")
for n in range(N-1):
self.assertAllClose(xpxn_vb[n,:,:], xx[n,:,n+1,:],
msg="Incorrect lagged second moment")
# Compute the entropy H(X)
ldet = linalg.logdet_cov(Cov)
H = random.gaussian_entropy(-ldet, N*D)
# Compute <log p(X|...)>
xx = np.reshape(xx, (N*D, N*D))
mu = np.reshape(mu, (N*D,))
ldet = -logdet0 - np.sum(np.ones((N-1,D))*logdetx)
P = random.gaussian_logpdf(np.einsum('...ij,...ij',
xx,
U),
np.einsum('...i,...i',
mu,
mu_prior),
np.einsum('...ij,...ij',
mumu0,
icov0),
-ldet,
N*D)
# The VB bound from the net
l = X.lower_bound_contribution()
self.assertAllClose(l, H+P)
# Compute the true bound <log p(X|...)> + H(X)
#
# Simple tests
#
def check(N, D, plates=None, mu=None, Lambda=None, A=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if A is None:
A = np.random.randn(D,D)
if V is None:
V = np.random.rand(D)
X = GaussianMarkovChain(mu,
Lambda,
A,
V,
plates=plates,
n=N)
(u0, u1, u2) = X._message_to_child()
(mu, mumu) = Gaussian._ensure_moments(mu, GaussianMoments, ndim=1).get_moments()
(Lambda, _) = Wishart._ensure_moments(Lambda, WishartMoments, ndim=1).get_moments()
(a, aa) = Gaussian._ensure_moments(A, GaussianMoments, ndim=1).get_moments()
a = a * np.ones((N-1,D,D)) # explicit broadcasting for simplicity
aa = aa * np.ones((N-1,D,D,D)) # explicit broadcasting for simplicity
(v, _) = Gamma._ensure_moments(V, GammaMoments).get_moments()
v = v * np.ones((N-1,D))
plates_C = X.plates
plates_mu = X.plates
C = np.zeros(plates_C + (N,D,N,D))
plates_mu = np.shape(mu)[:-1]
m = np.zeros(plates_mu + (N,D))
m[...,0,:] = np.einsum('...ij,...j->...i', Lambda, mu)
C[...,0,:,0,:] = Lambda + np.einsum('...dij,...d->...ij',
aa[...,0,:,:,:],
v[...,0,:])
for n in range(1,N-1):
C[...,n,:,n,:] = (np.einsum('...dij,...d->...ij',
aa[...,n,:,:,:],
v[...,n,:])
+ v[...,n,:,None] * np.identity(D))
for n in range(N-1):
C[...,n,:,n+1,:] = -np.einsum('...di,...d->...id',
a[...,n,:,:],
v[...,n,:])
C[...,n+1,:,n,:] = -np.einsum('...di,...d->...di',
a[...,n,:,:],
v[...,n,:])
C[...,-1,:,-1,:] = v[...,-1,:,None]*np.identity(D)
C = np.reshape(C, plates_C+(N*D,N*D))
Cov = np.linalg.inv(C)
Cov = np.reshape(Cov, plates_C+(N,D,N,D))
m0 = np.einsum('...minj,...nj->...mi', Cov, m)
m1 = np.zeros(plates_C+(N,D,D))
m2 = np.zeros(plates_C+(N-1,D,D))
for n in range(N):
m1[...,n,:,:] = Cov[...,n,:,n,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n,:])
for n in range(N-1):
m2[...,n,:,:] = Cov[...,n,:,n+1,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n+1,:])
self.assertAllClose(m0, u0*np.ones(np.shape(m0)))
self.assertAllClose(m1, u1*np.ones(np.shape(m1)))
self.assertAllClose(m2, u2*np.ones(np.shape(m2)))
pass
check(4,1)
check(4,3)
#
# Test mu
#
# Simple
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3)))
# Plates
check(4,3,
mu=Gaussian(np.random.randn(5,6,3),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=(5,)))
check(4,3,
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(1,)))
#
# Test Lambda
#
# Simple
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3)))
# Plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,)))
check(4,3,
Lambda=Wishart(10+np.random.rand(1),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=()))
check(4,3,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(1,)))
#
# Test A
#
# Simple
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
# Plates on time axis
check(5,3,
A=GaussianARD(np.random.randn(4,3,3),
np.random.rand(4,3,3),
shape=(3,),
plates=(4,3)))
# Plates on time axis with broadcasted moments
check(5,3,
A=GaussianARD(np.random.randn(1,3,3),
np.random.rand(1,3,3),
shape=(3,),
plates=(4,3)))
check(5,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(4,3)))
# Plates
check(4,3,
A=GaussianARD(np.random.randn(5,6,1,3,3),
np.random.rand(5,6,1,3,3),
shape=(3,),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(5,1,3)))
check(4,3,
A=GaussianARD(np.random.randn(1,1,3,3),
np.random.rand(1,1,3,3),
shape=(3,),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(3,)))
check(4,3,
plates=(5,),
A=GaussianARD(np.random.randn(3,3),
np.random.rand(3,3),
shape=(3,),
plates=(1,1,3)))
#
# Test v
#
# Simple
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
V=Gamma(np.random.rand(3),
np.random.rand(3),
plates=(3,)))
# Plates
check(4,3,
V=Gamma(np.random.rand(5,6,1,3),
np.random.rand(5,6,1,3),
plates=(5,6,1,3)))
# Plates with moments broadcasted over plates
check(4,3,
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(5,1,3)))
check(4,3,
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(5,1,3)))
# Plates broadcasting
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,3),
np.random.rand(1,3),
plates=(1,3)))
check(4,3,
plates=(5,),
V=Gamma(np.random.rand(1,1,3),
np.random.rand(1,1,3),
plates=(1,1,3)))
#
# Check with input signals
#
mu = 2
Lambda = 3
A = 4
B = 5
v = 6
inputs = [[-2], [3]]
X = GaussianMarkovChain([mu], [[Lambda]], [[A,B]], [v], inputs=inputs)
V = (np.array([[v*A**2, -v*A, 0],
[-v*A, v*A**2, -v*A],
[0, -v*A, 0]]) +
np.array([[Lambda, 0, 0],
[0, v, 0],
[0, 0, v]]))
m = (np.array([Lambda*mu, 0, 0]) +
np.array([0, v*B*inputs[0][0], v*B*inputs[1][0]]) -
np.array([v*A*B*inputs[0][0], v*A*B*inputs[1][0], 0]))
Cov = np.linalg.inv(V)
mean = np.dot(Cov, m)
X.update()
u = X.get_moments()
self.assertAllClose(u[0], mean[:,None])
self.assertAllClose(u[1] - u[0][...,None,:]*u[0][...,:,None],
Cov[(0,1,2),(0,1,2),None,None])
self.assertAllClose(u[2] - u[0][...,:-1,:,None]*u[0][...,1:,None,:],
Cov[(0,1),(1,2),None,None])
pass
def test_smoothing(self):
"""
Test the posterior estimation of GaussianMarkovChain.
Create time-variant dynamics and compare the results of BayesPy VB
inference and standard Kalman filtering & smoothing.
This is not that useful anymore, because the moments are checked much
better in another test method.
"""
#
# Set up an artificial system
#
# Dimensions
N = 500
D = 2
# Dynamics (time varying)
A0 = np.array([[.9, -.4], [.4, .9]])
A1 = np.array([[.98, -.1], [.1, .98]])
l = np.linspace(0, 1, N-1).reshape((-1,1,1))
A = (1-l)*A0 + l*A1
# Innovation covariance matrix (time varying)
v = np.random.rand(D)
V = np.diag(v)
# Observation noise covariance matrix
C = np.identity(D)
#
# Simulate data
#
X = np.empty((N,D))
Y = np.empty((N,D))
x = np.array([0.5, -0.5])
X[0,:] = x
Y[0,:] = x + np.random.multivariate_normal(np.zeros(D), C)
for n in range(N-1):
x = np.dot(A[n,:,:],x) + np.random.multivariate_normal(np.zeros(D), V)
X[n+1,:] = x
Y[n+1,:] = x + np.random.multivariate_normal(np.zeros(D), C)
#
# BayesPy inference
#
# Construct VB model
Xh = GaussianMarkovChain(np.zeros(D), np.identity(D), A, 1/v, n=N)
Yh = Gaussian(Xh, np.identity(D), plates=(N,))
# Put data
Yh.observe(Y)
# Run inference
Xh.update()
# Store results
Xh_vb = Xh.u[0]
CovXh_vb = Xh.u[1] - Xh_vb[...,np.newaxis,:] * Xh_vb[...,:,np.newaxis]
#
# "The ground truth" using standard Kalman filter and RTS smoother
#
V = N*(V,)
UY = Y
U = N*(C,)
(Xh, CovXh) = kalman_filter(UY, U, A, V, np.zeros(D), np.identity(D))
(Xh, CovXh) = rts_smoother(Xh, CovXh, A, V)
#
# Check results
#
self.assertTrue(np.allclose(Xh_vb, Xh))
self.assertTrue(np.allclose(CovXh_vb, CovXh))
class TestVaryingGaussianMarkovChain(TestCase):
def test_plates_from_parents(self):
"""
Test that VaryingGaussianMarkovChain deduces plates correctly
"""
def check(plates_X,
plates_mu=(),
plates_Lambda=(),
plates_B=(),
plates_S=(),
plates_v=()):
D = 3
K = 2
N = 4
np.random.seed(42)
mu = Gaussian(np.random.randn(*(plates_mu+(D,))),
random.covariance(D))
Lambda = Wishart(D+np.ones(plates_Lambda),
random.covariance(D))
B = GaussianARD(np.random.randn(*(plates_B+(D,D,K))),
1+np.random.rand(*(plates_B+(D,D,K))),
shape=(D,K),
plates=plates_B+(D,))
S = GaussianARD(np.random.randn(*(plates_S+(N,K))),
1+np.random.rand(*(plates_S+(N,K))),
shape=(K,),
plates=plates_S+(N,))
v = Gamma(1+np.random.rand(*(plates_v+(1,D))),
1+np.random.rand(*(plates_v+(1,D))))
X = VaryingGaussianMarkovChain(mu, Lambda, B, S, v, name="X")
self.assertEqual(plates_X, X.plates,
msg="Incorrect plates deduced")
pass
check(())
check((2,3),
plates_mu=(2,3))
check((6,7),
plates_Lambda=(6,7))
check((2,3),
plates_B=(2,3))
check((2,3),
plates_S=(2,3))
check((2,3),
plates_v=(2,3))
pass
def test_message_to_child(self):
# A very simple check before the more complex ones:
# 1-D process, k=1, fixed constant parameters
m = 1.0
l = 4.0
b = 2.0
s = [3.0, 8.0]
v = 5.0
X = VaryingGaussianMarkovChain([m],
[[l]],
[[[b]]],
[[s[0]],[s[1]]],
[v])
(u0, u1, u2) = X._message_to_child()
C = np.array([[l+b**2*s[0]**2*v, -b*s[0]*v, 0],
[ -b*s[0]*v, v+b**2*s[1]**2*v, -b*s[1]*v],
[ 0, -b*s[1]*v, v]])
Cov = np.linalg.inv(C)
m0 = np.dot(Cov, [[l*m], [0], [0]])
m1 = np.diag(Cov)[:,None,None] + m0[:,:,None]**2
m2 = np.diag(Cov, k=1)[:,None,None] + m0[1:,:,None]*m0[:-1,:,None]
self.assertAllClose(m0, u0)
self.assertAllClose(m1, u1)
self.assertAllClose(m2, u2)
def check(N, D, K, plates=None, mu=None, Lambda=None, B=None, S=None, V=None):
if mu is None:
mu = np.random.randn(D)
if Lambda is None:
Lambda = random.covariance(D)
if B is None:
B = np.random.randn(D,D,K)
if S is None:
S = np.random.randn(N-1,K)
if V is None:
V = np.random.rand(D)
X = VaryingGaussianMarkovChain(mu,
Lambda,
B,
S,
V,
plates=plates,
n=N)
(u0, u1, u2) = X._message_to_child()
(mu, mumu) = X.parents[0].get_moments()
(Lambda, _) = X.parents[1].get_moments()
(b, bb) = X.parents[2].get_moments()
(s, ss) = X.parents[3].get_moments()
(v, _) = X.parents[4].get_moments()
v = v * np.ones((N-1,D))
#V = np.atleast_3d(v)[...,-1,:,None]*np.identity(D)
plates_C = X.plates
plates_mu = X.plates
C = np.zeros(plates_C + (N,D,N,D))
plates_mu = np.shape(mu)[:-1]
m = np.zeros(plates_mu + (N,D))
m[...,0,:] = np.einsum('...ij,...j->...i', Lambda, mu)
#m = np.reshape(m, plates_mu + (N*D,))
A = np.einsum('...dik,...nk->...ndi', b, s)
AA = np.einsum('...dikjl,...nkl->...ndij', bb, ss)
C[...,0,:,0,:] = Lambda + np.einsum('...dij,...d->...ij',
AA[...,0,:,:,:],
v[...,0,:])
for n in range(1,N-1):
C[...,n,:,n,:] = (np.einsum('...dij,...d->...ij',
AA[...,n,:,:,:],
v[...,n,:])
+ v[...,n,:,None] * np.identity(D))
for n in range(N-1):
C[...,n,:,n+1,:] = -np.einsum('...di,...d->...id',
A[...,n,:,:],
v[...,n,:])
C[...,n+1,:,n,:] = -np.einsum('...di,...d->...di',
A[...,n,:,:],
v[...,n,:])
C[...,-1,:,-1,:] = v[...,-1,:,None]*np.identity(D)
C = np.reshape(C, plates_C+(N*D,N*D))
Cov = np.linalg.inv(C)
Cov = np.reshape(Cov, plates_C+(N,D,N,D))
m0 = np.einsum('...minj,...nj->...mi', Cov, m)
m1 = np.zeros(plates_C+(N,D,D))
m2 = np.zeros(plates_C+(N-1,D,D))
for n in range(N):
m1[...,n,:,:] = Cov[...,n,:,n,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n,:])
for n in range(N-1):
m2[...,n,:,:] = Cov[...,n,:,n+1,:] + np.einsum('...i,...j->...ij',
m0[...,n,:],
m0[...,n+1,:])
self.assertAllClose(m0, u0*np.ones(np.shape(m0)))
self.assertAllClose(m1, u1*np.ones(np.shape(m1)))
self.assertAllClose(m2, u2*np.ones(np.shape(m2)))
pass
check(2,1,1)
check(2,3,1)
check(2,1,3)
check(4,3,2)
#
# Test mu
#
# Simple
check(4,3,2,
mu=Gaussian(np.random.randn(3),
random.covariance(3)))
# Plates
check(4,3,2,
mu=Gaussian(np.random.randn(5,6,3),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,2,
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=(5,)))
check(4,3,2,
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
mu=Gaussian(np.random.randn(3),
random.covariance(3),
plates=()))
check(4,3,2,
plates=(5,),
mu=Gaussian(np.random.randn(1,3),
random.covariance(3),
plates=(1,)))
#
# Test Lambda
#
# Simple
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3)))
# Plates
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,6)))
# Plates with moments broadcasted over plates
check(4,3,2,
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(5,)))
check(4,3,2,
Lambda=Wishart(10+np.random.rand(1),
random.covariance(3),
plates=(5,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=()))
check(4,3,2,
plates=(5,),
Lambda=Wishart(10+np.random.rand(),
random.covariance(3),
plates=(1,)))
#
# Test B
#
# Simple
check(4,3,2,
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(3,)))
# Plates
check(4,3,2,
B=GaussianARD(np.random.randn(5,6,3,3,2),
np.random.rand(5,6,3,3,2),
shape=(3,2),
plates=(5,6,3)))
# Plates with moments broadcasted over plates
check(4,3,2,
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(5,3)))
check(4,3,2,
B=GaussianARD(np.random.randn(1,3,3,2),
np.random.rand(1,3,3,2),
shape=(3,2),
plates=(5,3)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(3,)))
check(4,3,2,
plates=(5,),
B=GaussianARD(np.random.randn(3,3,2),
np.random.rand(3,3,2),
shape=(3,2),
plates=(1,3)))
#
# Test S
#
# Simple
check(4,3,2,
S=GaussianARD(np.random.randn(4-1,2),
np.random.rand(4-1,2),
shape=(2,),
plates=(4-1,)))
# Plates
check(4,3,2,
S=GaussianARD(np.random.randn(5,6,4-1,2),
np.random.rand(5,6,4-1,2),
shape=(2,),
plates=(5,6,4-1,)))
# Plates with moments broadcasted over plates
check(4,3,2,
S=GaussianARD(np.random.randn(4-1,2),
np.random.rand(4-1,2),
shape=(2,),
plates=(5,4-1,)))
check(4,3,2,
S=GaussianARD(np.random.randn(1,4-1,2),
np.random.rand(1,4-1,2),
shape=(2,),
plates=(5,4-1,)))
# Plates broadcasting
check(4,3,2,
plates=(5,),
S=GaussianARD( | np.random.randn(4-1,2) | numpy.random.randn |
import math
from typing import Callable
import matplotlib.pylab as plt
import numpy as np
import scipy.optimize
import torch
from scipy.special import roots_legendre
from torch import nn
# Functions fixed_quad and _cached_roots_legendre are adapted from scipy but adapted to pytorch, and the case of
# integration from 0->1. Copyright notice applies to just the modified _cached_roots_legendre and fixed_quad functions
# Copyright © 2001, 2002 Enthought, Inc.
# All rights reserved.
# Copyright © 2003-2013 SciPy Developers.
# All rights reserved.
def _cached_roots_legendre(n: int) -> (torch.tensor, torch.tensor):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
x, w = torch.tensor(roots_legendre(n), dtype=torch.float64)
_cached_roots_legendre.cache[n] = (
torch.tensor(0.5, dtype=torch.float64) * (
x + torch.tensor(1, dtype=torch.float64)),
torch.tensor(0.5, dtype=torch.float64) * w)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func: Callable[[torch.tensor], torch.tensor], n: int = 5,
dtype: torch.dtype = torch.float32,
device: torch.device = None) -> torch.tensor:
y, w = _cached_roots_legendre(n)
return torch.sum(w.to(dtype=dtype, device=device) * func(
y.to(dtype=dtype, device=device)), axis=-1)
class SpheroidalPotential(nn.Module):
def __init__(self, rho_func: Callable[[torch.tensor], torch.tensor],
q: torch.tensor = torch.tensor([1.0])):
"""
Parameters
----------
rho_func : Function which returns density when supplied an ellipsoidal radius m
q : flattening of density i.e. rho(m) = rho(sqrt(x**2 + y**2 + z**2/q**2))
"""
super(SpheroidalPotential, self).__init__()
self.rho = rho_func
self.register_buffer('q', torch.as_tensor(q))
self.register_buffer('r_max', torch.as_tensor(q))
self.register_buffer('z_max', torch.as_tensor(q))
self.register_buffer('grid', torch.zeros((512, 1024)))
def extra_repr(self):
return f'q={self.q}, r_max={self.r_max}, z_max={self.z_max}, grid={self.grid.shape}'
def _f_compute(self, r_cyl: torch.tensor, z: torch.tensor,
rel_tol: float = 1e-6, direction: str = 'r_cyl', *args,
**kwargs) -> torch.tensor:
r_cyl, z = map(torch.as_tensor, (r_cyl, z))
assert (r_cyl.dtype == z.dtype) and (
r_cyl.device == z.device), "r_cyl and z should be same type on same device"
if direction == 'r_cyl':
# Change variables of the integral from BT's tau over 0->inf, to x = (1/tau-1)**3 over 0->1.
# Tests suggested 3rd power generally provided better convergence than 1,2,4...
def integrand(x):
tau = (1 / x - 1) ** 3
r_cyl_mat, z_mat, x, tau = torch.broadcast_tensors(r_cyl.unsqueeze(-1), z.unsqueeze(-1), x, tau)
m = torch.sqrt(r_cyl_mat ** 2 / (tau + 1) + z_mat ** 2 / (tau + self.q ** 2))
return self.rho(m, *args, **kwargs) / (tau + 1) ** 2 / torch.sqrt(tau + self.q ** 2) * 3 * tau / x / (1 - x)
integral = r_cyl * self._fixedquad(integrand, rel_tol=rel_tol,
dtype=z.dtype, device=z.device)
elif direction == 'z':
def integrand(x):
tau = (1 / x - 1) ** 3
r_cyl_mat, z_mat, x, tau = torch.broadcast_tensors(r_cyl.unsqueeze(-1), z.unsqueeze(-1), x, tau)
m = torch.sqrt(r_cyl_mat ** 2 / (tau + 1) + z_mat ** 2 / (tau + self.q ** 2))
return self.rho(m, *args, **kwargs) / (tau + 1) / (tau + self.q ** 2) ** 1.5 * 3 * tau / x / (1 - x)
integral = z * self._fixedquad(integrand, rel_tol=rel_tol,
dtype=z.dtype, device=z.device)
else:
raise ValueError("Direction should be ('r_cyl'|'z')")
return -2 * math.pi * self.q * integral
def f_r_cyl(self, r_cyl: torch.tensor, z: torch.tensor, *args,
**kwargs) -> torch.tensor:
"""Return the force in cylindrical R direction at (r_cyl, z)"""
return self._f_compute(r_cyl, z, direction='r_cyl', *args, **kwargs)
def f_z(self, r_cyl: torch.tensor, z: torch.tensor, *args,
**kwargs) -> torch.tensor:
"""Return the force in the z-direction at (r_cyl, z)"""
return self._f_compute(r_cyl, z, direction='z', *args, **kwargs)
def f_r(self, r_cyl: torch.tensor, z: torch.tensor, *args,
**kwargs) -> torch.tensor:
"""Return the radial force at (r_cyl, z)"""
r_cyl, z = map(torch.as_tensor, (r_cyl, z))
r = torch.sqrt(r_cyl ** 2 + z ** 2)
return (r_cyl * self.f_r_cyl(r_cyl, z, *args, **kwargs) +
z * self.f_z(r_cyl, z, *args, **kwargs)) / r
def f_theta(self, r_cyl: torch.tensor, z: torch.tensor, *args,
**kwargs) -> torch.tensor:
"""Return the force in the theta direction at (r_cyl, z)"""
r_cyl, z = map(torch.as_tensor, (r_cyl, z))
r = torch.sqrt(r_cyl ** 2 + z ** 2)
return (z * self.f_r_cyl(r_cyl, z, *args, **kwargs) -
r_cyl * self.f_z(r_cyl, z, *args, **kwargs)) / r
def vc2(self, r_cyl: torch.tensor, z: torch.tensor, *args,
**kwargs) -> torch.tensor:
"""Return the squared circular velocity at (r_cyl, z)"""
r_cyl, z = map(torch.as_tensor, (r_cyl, z))
r = torch.sqrt(r_cyl ** 2 + z ** 2)
return -self.f_r(r_cyl, z, *args, **kwargs) * r
def pot_ellip(self, r_cyl: torch.tensor, z: torch.tensor, *args,
**kwargs) -> torch.tensor:
"""Return the elipticity of the potential"""
r_cyl, z = map(torch.as_tensor, (r_cyl, z))
return torch.sqrt(z * self.f_r_cyl(r_cyl, z, *args, **kwargs) /
(r_cyl * self.f_z(r_cyl, z, *args, **kwargs)))
@classmethod
def spherical_to_cylindrical(cls, r: torch.tensor, ang: torch.tensor) -> (
torch.tensor, torch.tensor):
z = r * torch.sin(math.pi / 180 * ang)
r_cyl = torch.sqrt(r ** 2 - z ** 2)
return z, r_cyl
@staticmethod
def _fixedquad(func, n=None, n_max=100, n_min=10, rel_tol=1e-6,
dtype=torch.float32, device=None) -> torch.tensor:
"""Integrate func from 0->1 using Gaussian quadrature of order n if set.
Else provide answer with estimated relative error less than rel_tol (up to a
maximum order of n_max"""
if n is None:
val = old_val = fixed_quad(func, n=n_min, dtype=dtype,
device=device)
for n in range(n_min + 5, n_max, 5):
val = fixed_quad(func, n=n, dtype=dtype, device=device)
rel_err = torch.max(torch.abs((val - old_val) / val))
if rel_err < rel_tol:
break
old_val = val
else:
val = fixed_quad(func, n=n, dtype=dtype, device=device)
return val
def grid_accelerations(self, r_max=10., z_max=10., r_bins=512, z_bins=1024):
"""Linear interpolate the gridded forces to the specified positions. This should be preceeded
by a call to grid_acc to (re)compute the accelerations on the grid."""
r = torch.linspace(0, r_max, r_bins, device=self.q.device)
z = torch.linspace(-z_max, z_max, z_bins, device=self.q.device)
self.r_max, self.z_max = torch.as_tensor(r_max), torch.as_tensor(z_max)
f_r_cyl = self.f_r_cyl(r, z.unsqueeze(-1))
f_z = self.f_z(r, z.unsqueeze(-1))
self.grid = torch.stack((f_r_cyl, f_z)).unsqueeze(
0) # .permute(0,1,3,2)
def get_accelerations_cyl(self, positions):
"""Linear interpolate the gridded forces to the specified positions. This should be preceeded
by a call to grid_acc to (re)compute the accelerations on the grid.
Returns forces in (r_cyl, z) directions."""
samples = torch.stack((2 * torch.sqrt(positions[..., 0] ** 2 + positions[..., 1] ** 2) / self.r_max - 1,
positions[..., 2] / self.z_max), dim=-1)
samples = samples.unsqueeze(0).unsqueeze(2)
return torch.nn.functional.grid_sample(self.grid, samples,
mode='bilinear',
align_corners=True).squeeze().t()
def get_accelerations(self, positions):
"""Linear interpolate the gridded forces to the specified positions. This should be preceeded
by a call to grid_acc to (re)compute the accelerations on the grid.
Returns forces in xyz directions."""
acc_cyl = self.get_accelerations_cyl(positions)
acc = torch.zeros_like(positions)
r_cyl = torch.sqrt(positions[..., 0] ** 2 + positions[..., 1] ** 2)
acc[..., 0] = acc_cyl[..., 0] * positions[..., 0] / r_cyl
acc[..., 1] = acc_cyl[..., 0] * positions[..., 1] / r_cyl
acc[..., 2] = acc_cyl[..., 1]
return acc
def fit_q_slice_to_snapshot(snap):
"""Fit a flattening to a particle snapshot. We assume that the density varies like rho ~ m**gamma with
m**2 = Rcyl**2 + z**2/q**2 in this slice. This should therefore be a fairly narrow slice so this approximation
is reasonable (it will also fail for very flattened systems). Returns (q,qerr)"""
st = snap.z / snap.r
mintheta = np.sin(30. / 180 * np.pi)
masses = snap.masses.detach().cpu().numpy()
mass, edges = np.histogram(st ** 2, | np.linspace(mintheta ** 2, 1, 100) | numpy.linspace |
#!/usr/bin/env python
from pathlib import Path
import numpy as np
import pytest
from cdflib import cdfread, cdfwrite
R = Path(__file__).parent
fnbasic = 'testing.cdf'
def cdf_create(fn: Path, spec: dict):
return cdfwrite.CDF(fn, cdf_spec=spec)
def cdf_read(fn: Path, validate: bool = False):
return cdfread.CDF(fn, validate=validate)
def test_cdf_creation(tmp_path):
fn = tmp_path / fnbasic
cdf_create(fn, {'rDim_sizes': [1]}).close()
reader = cdf_read(fn)
# Test CDF info
info = reader.cdf_info()
assert info['Majority'] == 'Row_major'
def test_checksum(tmp_path):
# Setup the test_file
fn = tmp_path / fnbasic
tfile = cdf_create(fn, {'Checksum': True})
var_spec = {}
var_spec['Variable'] = 'Variable1'
var_spec['Data_Type'] = 4
var_spec['Num_Elements'] = 1
var_spec['Rec_Vary'] = True
var_spec['Dim_Sizes'] = []
varatts = {}
varatts['Attribute1'] = 1
varatts['Attribute2'] = '500'
tfile.write_var(var_spec, var_attrs=varatts,
var_data=np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
tfile.close()
# %% Open the file to read
reader = cdf_read(fn, validate=True)
# Test CDF info
var = reader.varget("Variable1")
assert (var == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).all()
# test convenience info
assert (reader["Variable1"] == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).all()
def test_checksum_compressed(tmp_path):
# Setup the test_file
fn = tmp_path / fnbasic
var_spec = {}
var_spec['Variable'] = 'Variable1'
var_spec['Data_Type'] = 2
var_spec['Num_Elements'] = 1
var_spec['Rec_Vary'] = True
var_spec['Dim_Sizes'] = []
varatts = {}
varatts['Attribute1'] = 1
varatts['Attribute2'] = '500'
v = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
tfile = cdf_create(fn, {'Compressed': 6, 'Checksum': True})
tfile.write_var(var_spec, var_attrs=varatts,
var_data=v)
tfile.close()
# %% Open the file to read
reader = cdf_read(fn, validate=True)
var = reader.varget("Variable1")
assert (var == v).all()
att = reader.attget("Attribute1", entry=0)
assert att['Data'] == [1]
att = reader.attget("Attribute2", entry=0)
assert att['Data'] == '500'
def test_file_compression(tmp_path):
# Setup the test_file
fn = tmp_path / fnbasic
var_spec = {}
var_spec['Variable'] = 'Variable1'
var_spec['Data_Type'] = 2
var_spec['Num_Elements'] = 1
var_spec['Rec_Vary'] = True
var_spec['Dim_Sizes'] = []
varatts = {}
varatts['Attribute1'] = 1
varatts['Attribute2'] = '500'
v = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
tfile = cdf_create(fn, {'Compressed': 6, 'Checksum': True})
tfile.write_var(var_spec, var_attrs=varatts,
var_data=v)
tfile.close()
# Open the file to read
reader = cdf_read(fn)
# Test CDF info
var = reader.varget("Variable1")
assert (var == v).all()
def test_globalattrs(tmp_path):
# Setup the test_file
fn = tmp_path / fnbasic
globalAttrs = {}
globalAttrs['Global1'] = {0: 'Global Value 1'}
globalAttrs['Global2'] = {0: 'Global Value 2'}
globalAttrs['Global3'] = {0: [12, 'cdf_int4']}
globalAttrs['Global4'] = {0: [12.34, 'cdf_double']}
globalAttrs['Global5'] = {0: [12.34, 21.43]}
GA6 = {}
GA6[0] = 'abcd'
GA6[1] = [12, 'cdf_int2']
GA6[2] = [12.5, 'cdf_float']
GA6[3] = [[0, 1, 2], 'cdf_int8']
globalAttrs['Global6'] = GA6
tfile = cdf_create(fn, {'Checksum': True})
tfile.write_globalattrs(globalAttrs)
tfile.close()
# %% Open the file to read
reader = cdf_read(fn)
# Test CDF info
attrib = reader.attinq('Global2')
assert attrib['num_gr_entry'] == 1
attrib = reader.attinq('Global6')
assert attrib['num_gr_entry'] == 4
entry = reader.attget('Global6', 3)
assert entry['Data_Type'] == 'CDF_INT8'
for x in [0, 1, 2]:
assert entry['Data'][x] == x
def test_create_zvariable(tmp_path):
# Setup the test_file
fn = tmp_path / fnbasic
vs = {}
vs['Variable'] = 'Variable1'
vs['Data_Type'] = 1
vs['Num_Elements'] = 1
vs['Rec_Vary'] = True
vs['Dim_Sizes'] = []
vs['Dim_Vary'] = True
tfile = cdf_create(fn, {'Checksum': True})
tfile.write_var(vs, var_data=np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
tfile.close()
# %% Open the file to read
reader = cdf_read(fn)
# Test CDF info
varinfo = reader.varinq("Variable1")
assert varinfo['Data_Type'] == 1
var = reader.varget("Variable1")
assert (var == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).all()
def test_create_rvariable(tmp_path):
# Setup the test_file
fn = tmp_path / fnbasic
vs = {}
vs['Variable'] = 'Variable1'
vs['Var_Type'] = 'rvariable'
vs['Data_Type'] = 12
vs['Num_Elements'] = 1
vs['Rec_Vary'] = True
vs['Dim_Sizes'] = []
vs['Dim_Vary'] = [True]
tfile = cdf_create(fn, {'rDim_sizes': [1]})
tfile.write_var(vs, var_data=np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]))
tfile.close()
# Open the file to read
reader = cdf_read(fn)
# Test CDF info
varinfo = reader.varinq("Variable1")
assert varinfo['Data_Type'] == 12
var = reader.varget("Variable1")
for x in [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
assert var[x] == x
def test_create_zvariable_no_recvory(tmp_path):
# Setup the test_file
fn = tmp_path / fnbasic
var_spec = {}
var_spec['Variable'] = 'Variable1'
var_spec['Data_Type'] = 8
var_spec['Num_Elements'] = 1
var_spec['Rec_Vary'] = False
var_spec['Dim_Sizes'] = []
var_spec['Dim_Vary'] = True
tfile = cdf_create(fn, {'rDim_sizes': [1]})
tfile.write_var(var_spec, var_data= | np.array([2]) | numpy.array |
import numpy as np
from tqdm import tqdm
# Policy
def epsilon_greedy(Q, state, epsilon):
if np.random.rand() < epsilon:
return np.random.randint(Q.shape[1])
else:
return np.argmax(Q[state])
# SARSA: Sutton & Barto (2018), p. 130
def sarsa(env, gamma=1.0, epsilon= 0.1, alpha=0.1, episodes=10000):
nS = len(env.state_space())
nA = len(env.action_space())
Q = np.zeros((nS, nA), dtype=np.float64)
for e in tqdm(range(episodes)):
state, reward, done = env.reset()
action = epsilon_greedy(Q, state, epsilon)
while not done:
next_state, reward, done = env.step(action)
next_action = epsilon_greedy(Q, state, epsilon)
# Update Q values using the action taken in the next state
td_target = reward + gamma * Q[next_state][next_action] * (not done)
td_error = td_target - Q[state][action]
Q[state][action] += alpha * td_error
state, action = next_state, next_action
pi = np.argmax(Q, axis=1)
return (pi, Q)
# SARSA with GLIE: Introduction to RL with David Silver (2015), Lecture 5
def sarsa_glie(env, gamma=1.0, alpha=0.1, episodes=10000):
nS = len(env.state_space())
nA = len(env.action_space())
Q = np.zeros((nS, nA), dtype=np.float64)
for e in tqdm(range(episodes)):
epsilon = 1 / (e + 1) # GLIE -> Greedy in the Limit with Infinite Exploration
state, reward, done = env.reset()
action = epsilon_greedy(Q, state, epsilon)
while not done:
next_state, reward, done = env.step(action)
next_action = epsilon_greedy(Q, state, epsilon)
# Update Q values using the action taken in the next state
td_target = reward + gamma * Q[next_state][next_action] * (not done)
td_error = td_target - Q[state][action]
Q[state][action] += alpha * td_error
state, action = next_state, next_action
pi = np.argmax(Q, axis=1)
return (pi, Q)
# Expected SARSA: Sutton & Barto (2018), p. 133
def expected_sarsa(env, gamma=1.0, epsilon=0.1, alpha=0.1, episodes=10000):
nS = len(env.state_space())
nA = len(env.action_space())
Q = np.zeros((nS, nA), dtype=np.float64)
for e in tqdm(range(episodes)):
state, reward, done = env.reset()
while not done:
action = epsilon_greedy(Q, state, epsilon)
next_state, reward, done = env.step(action)
# Probability distribution for next action
prob = np.full(nA, epsilon / nA, dtype=np.float64)
prob[np.argmax(Q[next_state])] += 1 - epsilon
# Update Q values using the expected value of the next state
td_target = reward + gamma * np.sum(prob * Q[next_state]) * (not done)
td_error = td_target - Q[state][action]
Q[state][action] += alpha * td_error
state = next_state
pi = np.argmax(Q, axis=1)
return (pi, Q)
# Q-Learning: Sutton & Barto (2018), p. 131
def q_learning(env, gamma=1.0, epsilon = 0.1, alpha=0.1, episodes=10000):
nS = len(env.state_space())
nA = len(env.action_space())
Q = np.zeros((nS, nA), dtype=np.float64)
for e in tqdm(range(episodes)):
state, reward, done = env.reset()
while not done:
action = epsilon_greedy(Q, state, epsilon)
next_state, reward, done = env.step(action)
# Update Q values using the action with the highest value in the next state
td_target = reward + gamma * | np.max(Q[next_state]) | numpy.max |
from collections.abc import Hashable
import logging
import time
from typing import Optional
import numba
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def add_transition_info(
ds: pd.DataFrame,
colname: Hashable,
threshold: Optional[float] = None,
extra_rows: int = 0,
transition_col: str = "process_num",
time_col: str = "secs",
):
"""Calculate splits and add "secs" and "process_num" column to dataframe."""
logger.info("Start splitting process")
now = time.time()
# setze ersten Wert auf 0. (evtl auch noch den letzten?)
# Bisschen unschön, aber nötig, damit start und end indexes in sync sind.
col = ds[colname]
threshold if threshold else col.max() / 2
start_indexes = get_transition_indices(col.to_numpy(), threshold)
start_indexes = np.maximum(start_indexes - extra_rows, 0)
end_indexes = get_transition_indices(col.to_numpy(), threshold, True)
end_indexes = np.minimum(end_indexes + extra_rows, len(ds.index))
if len(start_indexes) > len(end_indexes):
start_indexes = start_indexes[:-1]
if len(end_indexes) > len(start_indexes):
end_indexes = start_indexes[1:]
if not colname or len(start_indexes) == 0 or len(end_indexes) == 0:
start_indexes = [0]
end_indexes = [len(ds.index)] # perhaps -1 ?
logger.info("Found indices. Applying new columns to dataset....")
ds = ds.drop(time_col, errors="ignore", axis=1)
proc = np.full((len(ds.index),), np.nan)
for i, (start, end) in enumerate(zip(start_indexes, end_indexes), start=1):
proc[start:end] = i
categories = [i + 1 for i in range(len(start_indexes))]
ds[transition_col] = pd.Categorical(proc, categories=categories)
ds[time_col] = ds.groupby(transition_col).apply(calc_secs)[time_col]
logger.info(f"Splitting took {(time.time() - now):.2f} seconds")
return ds
def calc_secs(ds: pd.DataFrame):
idx = ds.index.astype(int).to_numpy() / 1_000_000_000
ds["secs"] = idx - idx[0] if len(ds.index) > 0 else np.nan
return ds
@numba.jit(nopython=True, parallel=True)
def get_transition_indices(
y: np.ndarray, threshold: float, falling_edge: bool = False
) -> np.ndarray:
"""Return indices where a transition occurs (default: detect rising edges)."""
# Find where y crosses a threshold in a specific direction.
lower = y < threshold
higher = y >= threshold
if falling_edge:
return | np.where(higher[:-1] & lower[1:]) | numpy.where |
"""
Classes and functions that handle masks (i.e., exposure depth).
Classes
Mask
Functions
someFunction
"""
import os
import numpy as np
import healpy as hp
import scipy.signal
import ugali.utils.binning
import ugali.utils.skymap
import ugali.observation.roi
from ugali.utils import healpix
from ugali.utils.logger import logger
from ugali.utils.healpix import ang2pix
from ugali.utils.config import Config
from ugali.utils.constants import MAGERR_PARAMS
############################################################
class Mask(object):
"""
Contains maps of completeness depth in magnitudes for multiple observing bands, and associated products.
"""
def __init__(self, config, roi):
self.config = Config(config)
self.roi = roi
filenames = self.config.getFilenames()
catalog_pixels = self.roi.getCatalogPixels()
self.mask_1 = MaskBand(filenames['mask_1'][catalog_pixels],self.roi)
self.mask_2 = MaskBand(filenames['mask_2'][catalog_pixels],self.roi)
self._fracRoiSparse()
self.minimum_solid_angle = self.config.params['mask']['minimum_solid_angle'] # deg^2
# FIXME: Need to parallelize CMD and MMD formulation
self._solidAngleCMD()
self._pruneCMD(self.minimum_solid_angle)
#self._solidAngleMMD()
#self._pruneMMD(self.minimum_solid_angle)
self._photometricErrors()
@property
def mask_roi_unique(self):
"""
Assemble a set of unique magnitude tuples for the ROI
"""
# There is no good inherent way in numpy to do this...
# http://stackoverflow.com/q/16970982/
# Also possible and simple:
#return np.unique(zip(self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse))
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = A[np.lexsort(A.T[::-1])]
return B[np.concatenate(([True],np.any(B[1:]!=B[:-1],axis=1)))]
@property
def mask_roi_digi(self):
"""
Get the index of the unique magnitude tuple for each pixel in the ROI.
"""
# http://stackoverflow.com/q/24205045/#24206440
A = np.vstack([self.mask_1.mask_roi_sparse,self.mask_2.mask_roi_sparse]).T
B = self.mask_roi_unique
AA = np.ascontiguousarray(A)
BB = np.ascontiguousarray(B)
dt = np.dtype((np.void, AA.dtype.itemsize * AA.shape[1]))
a = AA.view(dt).ravel()
b = BB.view(dt).ravel()
idx = np.argsort(b)
indices = np.searchsorted(b[idx],a)
return idx[indices]
@property
def frac_annulus_sparse(self):
return self.frac_roi_sparse[self.roi.pixel_annulus_cut]
@property
def frac_interior_sparse(self):
return self.frac_roi_sparse[self.roi.pixel_interior_cut]
def _fracRoiSparse(self):
"""
Calculate an approximate pixel coverage fraction from the two masks.
We have no way to know a priori how much the coverage of the
two masks overlap in a give pixel. For example, masks that each
have frac = 0.5 could have a combined frac = [0.0 to 0.5].
The limits will be:
max: min(frac1,frac2)
min: max((frac1+frac2)-1, 0.0)
Sometimes we are lucky and our fracdet is actually already
calculated for the two masks combined, so that the max
condition is satisfied. That is what we will assume...
"""
self.frac_roi_sparse = np.min([self.mask_1.frac_roi_sparse,self.mask_2.frac_roi_sparse],axis=0)
return self.frac_roi_sparse
def _solidAngleMMD(self):
"""
Compute solid angle within the mask annulus (deg^2) as a
function of mag_1 and mag_2
"""
# Take upper corner of the magnitude bin
mag_2,mag_1 = np.meshgrid(self.roi.bins_mag[1:],self.roi.bins_mag[1:])
# Havent tested since adding fracdet
unmasked_mag_1 = (self.mask_1.mask_annulus_sparse[:,np.newaxis]>mag_1[:,np.newaxis])
unmasked_mag_2 = (self.mask_2.mask_annulus_sparse[:,np.newaxis]>mag_2[:,np.newaxis])
n_unmasked_pixels = (unmasked_mag_1*unmasked_mag_2*self.frac_annulus_sparse).sum(axis=1)
self.solid_angle_mmd = self.roi.area_pixel * n_unmasked_pixels
if self.solid_angle_mmd.sum() == 0:
msg = "Mask annulus contains no solid angle."
logger.error(msg)
raise Exception(msg)
def _pruneMMD(self, minimum_solid_angle):
"""
Remove regions of magnitude-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
INPUTS:
solid_angle[1]: minimum solid angle (deg^2)
"""
logger.info('Pruning mask based on minimum solid angle of %.2f deg^2'%(minimum_solid_angle))
solid_angle_mmd = self.solid_angle_mmd*(self.solid_angle_mmd > minimum_solid_angle)
if solid_angle_mmd.sum() == 0:
msg = "Pruned mask contains no solid angle."
logger.error(msg)
raise ValueError(msg)
self.solid_angle_mmd = solid_angle_mmd
# Compute which magnitudes the clipping correspond to
index_mag_1, index_mag_2 = np.nonzero(self.solid_angle_mmd)
self.mag_1_clip = self.roi.bins_mag[1:][np.max(index_mag_1)]
self.mag_2_clip = self.roi.bins_mag[1:][np.max(index_mag_2)]
logger.info('Clipping mask 1 at %.2f mag'%(self.mag_1_clip) )
logger.info('Clipping mask 2 at %.2f mag'%(self.mag_2_clip) )
self.mask_1.mask_roi_sparse = np.clip(self.mask_1.mask_roi_sparse, 0., self.mag_1_clip)
self.mask_2.mask_roi_sparse = np.clip(self.mask_2.mask_roi_sparse, 0., self.mag_2_clip)
def _solidAngleCMD(self):
"""
Compute solid angle within the mask annulus (deg^2) as a
function of color and magnitude.
"""
self.solid_angle_cmd = np.zeros([len(self.roi.centers_mag),
len(self.roi.centers_color)])
for index_mag in np.arange(len(self.roi.centers_mag)):
for index_color in np.arange(len(self.roi.centers_color)):
# mag and color at bin center
mag = self.roi.centers_mag[index_mag]
color = self.roi.centers_color[index_color]
if self.config.params['catalog']['band_1_detection']:
# Evaluating at the center of the color-mag bin, be consistent!
#mag_1 = self.roi.centers_mag[index_mag]
#color = self.roi.centers_color[index_color]
#mag_2 = mag_1 - color
# Evaluating at corner of the color-mag bin, be consistent!
mag_1 = mag + (0.5 * self.roi.delta_mag)
mag_2 = mag - color + (0.5 * self.roi.delta_color)
else:
# Evaluating at the center of the color-mag bin, be consistent!
#mag_2 = self.roi.centers_mag[index_mag]
#color = self.roi.centers_color[index_color]
#mag_1 = mag_2 + color
# Evaluating at corner of the color-mag bin, be consistent!
mag_1 = mag + color + (0.5 * self.roi.delta_color)
mag_2 = mag + (0.5 * self.roi.delta_mag)
# ADW: Is there a problem here?
#self.solid_angle_cmd[index_mag, index_color] = self.roi.area_pixel * np.sum((self.mask_1.mask > mag_1) * (self.mask_2.mask > mag_2))
# ADW: I think we want to keep pixels that are >= mag
unmasked_mag_1 = (self.mask_1.mask_annulus_sparse >= mag_1)
unmasked_mag_2 = (self.mask_2.mask_annulus_sparse >= mag_2)
n_unmasked_pixels = np.sum(unmasked_mag_1*unmasked_mag_2*self.frac_annulus_sparse)
#n_unmasked_pixels = np.sum((self.mask_1.mask_annulus_sparse > mag_1) \
# * (self.mask_2.mask_annulus_sparse > mag_2))
self.solid_angle_cmd[index_mag, index_color] = self.roi.area_pixel * n_unmasked_pixels
if self.solid_angle_cmd.sum() == 0:
msg = "Mask annulus contains no solid angle."
logger.error(msg)
raise Exception(msg)
return self.solid_angle_cmd
def _solidAngleCMD(self):
"""
Compute solid angle within the mask annulus (deg^2) as a
function of color and magnitude.
Returns:
--------
solid_angle_cmd : 2d array
"""
self.solid_angle_cmd = np.zeros([len(self.roi.centers_mag),
len(self.roi.centers_color)])
idx_mag,idx_color=np.where(self.solid_angle_cmd == 0)
mag = self.roi.centers_mag[idx_mag]
color = self.roi.centers_color[idx_color]
if self.config.params['catalog']['band_1_detection']:
# Evaluating at corner of the color-mag bin, be consistent!
mag_1 = mag + (0.5 * self.roi.delta_mag)
mag_2 = mag - color + (0.5 * self.roi.delta_color)
else:
# Evaluating at corner of the color-mag bin, be consistent!
mag_1 = mag + color + (0.5 * self.roi.delta_color)
mag_2 = mag + (0.5 * self.roi.delta_mag)
n_unmasked_pixels = np.zeros_like(mag)
for i in np.arange(len(mag_1)):
unmasked_mag_1 = (self.mask_1.mask_annulus_sparse >= mag_1[i])
unmasked_mag_2 = (self.mask_2.mask_annulus_sparse >= mag_2[i])
n_unmasked_pixels[i] = np.sum(unmasked_mag_1 * unmasked_mag_2 *
self.frac_annulus_sparse)
self.solid_angle_cmd[idx_mag, idx_color] = self.roi.area_pixel * n_unmasked_pixels
if self.solid_angle_cmd.sum() == 0:
msg = "Mask annulus contains no solid angle."
logger.error(msg)
raise Exception(msg)
return self.solid_angle_cmd
def _pruneCMD(self, minimum_solid_angle):
"""
Remove regions of color-magnitude space where the unmasked solid angle is
statistically insufficient to estimate the background.
ADW: Why are we clipping at the bin center instead of edge?
INPUTS:
solid_angle[1]: minimum solid angle (deg^2)
"""
logger.info('Pruning mask based on minimum solid angle of %.2f deg^2'%(minimum_solid_angle))
self.solid_angle_cmd *= self.solid_angle_cmd > minimum_solid_angle
if self.solid_angle_cmd.sum() == 0:
msg = "Pruned mask contains no solid angle."
logger.error(msg)
raise Exception(msg)
# Compute which magnitudes the clipping correspond to
index_mag, index_color = np.nonzero(self.solid_angle_cmd)
mag = self.roi.centers_mag[index_mag]
color = self.roi.centers_color[index_color]
if self.config.params['catalog']['band_1_detection']:
mag_1 = mag
mag_2 = mag_1 - color
self.mag_1_clip = np.max(mag_1) + (0.5 * self.roi.delta_mag)
self.mag_2_clip = np.max(mag_2) + (0.5 * self.roi.delta_color)
else:
mag_2 = mag
mag_1 = color + mag_2
self.mag_1_clip = np.max(mag_1) + (0.5 * self.roi.delta_color)
self.mag_2_clip = np.max(mag_2) + (0.5 * self.roi.delta_mag)
logger.info('Clipping mask 1 at %.2f mag'%(self.mag_1_clip) )
logger.info('Clipping mask 2 at %.2f mag'%(self.mag_2_clip) )
self.mask_1.mask_roi_sparse = np.clip(self.mask_1.mask_roi_sparse, 0., self.mag_1_clip)
self.mask_2.mask_roi_sparse = np.clip(self.mask_2.mask_roi_sparse, 0., self.mag_2_clip)
def completeness(self, delta, method='step'):
"""
Return the completeness as a function of magnitude.
ADW: Eventually want a completeness mask to set overall efficiency.
"""
delta = np.asarray(delta)
if method == 'step':
func = lambda delta: (delta > 0).astype(float)
elif method == 'erf':
# Trust the SDSS EDR???
# 95% completeness:
def func(delta):
# Efficiency at bright end (assumed to be 100%)
e = 1.0
# EDR says full width is ~0.5 mag
width = 0.2
# This should be the halfway point in the curve
return (e/2.0)*(1/np.sqrt(2*width))*(np.sqrt(2*width)-scipy.special.erf(-delta))
elif method == 'flemming':
# Functional form taken from Fleming et al. AJ 109, 1044 (1995)
# http://adsabs.harvard.edu/abs/1995AJ....109.1044F
# f = 1/2 [1 - alpha(V - Vlim)/sqrt(1 + alpha^2 (V - Vlim)^2)]
# CAREFUL: This definition is for Vlim = 50% completeness
def func(delta):
alpha = 2.0
return 0.5 * (1 - (alpha * delta)/np.sqrt(1+alpha**2 * delta**2))
else:
raise Exception('...')
return func(delta)
def _photometricErrors(self, catalog=None, n_per_bin=100):
"""
Realistic photometric errors estimated from catalog objects and mask.
Extend below the magnitude threshold with a flat extrapolation.
"""
if catalog is None:
# Simple proxy for photometric errors
release = self.config['data']['release']
band_1 = self.config['catalog'].get('mag_1_band')
if not band_1: band_1 = self.config['isochrone']['mag_1_field']
band_2 = self.config['catalog'].get('mag_2_band')
if not band_2: band_2 = self.config['isochrone']['mag_2_field']
DELMIN = 0.0
pars_1 = MAGERR_PARAMS[release][band_1]
def photo_err_1(delta):
p = pars_1
return np.clip(np.exp(p[0]*delta+p[1])+p[2], 0, np.exp(p[0]*(DELMIN)+p[1])+p[2])
pars_2 = MAGERR_PARAMS[release][band_2]
def photo_err_2(delta):
p = pars_2
return np.clip(np.exp(p[0]*delta+p[1])+p[2], 0, | np.exp(p[0]*(DELMIN)+p[1]) | numpy.exp |
# -*- coding: utf-8 -*-
"""
Created on Jul 21 2017, Modified Nov 15 2019.
@authors: <NAME>
Compute prosody features based on pitch, loudness, duration, ratios, rhythm, and perturbations (apq/ppq)
OUTPUT OF THE FUNCTION "prosody_features":
"""
import os
path_base = os.path.dirname(os.path.abspath(__file__))
import numpy as np
import warnings
import sigproc as sg
import scipy as sp
#from scipy.stats import kurtosis, skew
from scipy.signal import gaussian
from scipy.io.wavfile import write
import praat.praat_functions as praatF
#import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error as mse
def prosody_features(sig,fs,f0=np.asarray([0]),winTime=0.04,stepTime=0.01):
if (np.sum(f0)==0)&(len(f0)==1):
f0 = f0_contour_pr(sig,fs,winTime,stepTime)#F0
#VAD
out_VAD = eVAD(sig,fs)
#Compute f0 features
feats_f0 = f0_features(sig,fs,f0,winTime,stepTime)
#Compute voiced features
feats_voiced,vcont = voiced_features(sig,fs,f0,stepTime)
#Compute VAD features (duration+energy content)
feats_VAD = VAD_features(sig,fs,out_VAD,winTime,stepTime)
#Compute unvoiced features
feats_unvoiced = unvoiced_features(sig,fs,vcont,out_VAD['Pause_labels'])
X = [feats_f0,feats_voiced,feats_unvoiced,feats_VAD]
#Create new dictionary with all features
X_pr = {}
for k in X:
for f in list(k.keys()):
X_pr[f] = k[f]
return X_pr
def prosody_features_dynamic(sig,fs,f0=np.asarray([0]),winTime=0.04,stepTime=0.01):
if len(f0)==0:
f0 = f0_contour_pr(sig,fs,winTime,stepTime)#F0
#---------------------------------------
f0coef,voiced,_ = voiced_unvoiced(sig,fs,f0,stepTime)
# f0coef = np.vstack(f0coef)
#Voiced features
lvoiced = []
for v in voiced:
lvoiced.append(len(v)/fs)#Length of voiced segment
lvoiced = np.vstack(lvoiced)
#.........................................................
X = np.hstack([lvoiced,f0coef])
return X
#==========================================================================
def Hz2Semitone(F):
ST=39.87*np.log(F/50)
return ST
#==========================================================================
def f0_contour_pr(sig,fs,sizeframe=0.04,step=0.01,maxf0=500, post=False):
"""
This function is used to extract the F0 contour using praat
"""
sig = sig-np.mean(sig)
sig = sig/np.max(np.abs(sig))
temp_aud = (sig*2**15).astype(np.int16)
temp_path = path_base+'\\temp_sig.wav'#Creates temporal wav file
write(temp_path,int(fs),temp_aud)
temp_filename_f0=path_base+'/praat/tempF0.txt'
np.savetxt(temp_filename_f0,np.zeros((3,3)))
temp_filename_vuv=path_base+'/praat/tempvuv.txt'
np.savetxt(temp_filename_vuv,np.zeros((3,3)))
minf0 = int(3/sizeframe)
praatF.praat_vuv(temp_path, temp_filename_f0, temp_filename_vuv,
time_stepF0=step, minf0=minf0, maxf0=maxf0)
#Tomas: I modified this function. The size of the frame (in seconds) and sampling frequency are
#now input arguments. This was neccesary to compute the number of frames correctly.
f0,_ = praatF.decodeF0(temp_filename_f0,len(sig),float(fs),sizeframe,step)
if np.sum(f0)==0:
print('PITCH WAS NOT DETECTED')
os.remove(temp_filename_f0)
os.remove(temp_filename_vuv)
os.remove(temp_path)
#Post-processing of F0 to avoid outliers. Is very simple
if post==True:
print('F0 post-processing Activated')
uf0 = np.mean(f0[f0>0])
sf0 = np.std(f0[f0>0])
f0[f0>(uf0+(2.5*sf0))] = 0
f0[f0<(uf0-(2.5*sf0))] = 0
return f0
#==========================================================================
def voiced_unvoiced(sig,fs,f0,stepTime):
"""
Voiced unvoiced segmentation
sig: Speech signal
fs: Sampling frequency
f0: Pitch contour
stepTime: Step size (in seconds) used to computed the f0 contour.
"""
yp = f0.copy()
yp[yp!=0] = 1
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Voiced segments
v_segm = []
f0_feats = []#Dynamic f0-based features
#Unvoiced
uv_segm = []
for idx in range(len(lim_ini)):
#------------------------------------
#Voiced segments
tini = int(lim_ini[idx]*stepTime*fs)
tend = int(lim_end[idx]*stepTime*fs)
if int(tend-tini)>int(0.04*fs):
# print(tini,tend)
v_segm.append(sig[tini:tend])
x = np.arange(0,len(f0[lim_ini[idx]:lim_end[idx]]))
#F0 based features
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.RankWarning)
f0c = np.polyfit(x,f0[lim_ini[idx]:lim_end[idx]],5)
# f0c = f0c.reshape(1,-1)#Dynamic reprsentation of f0.
p = np.poly1d(f0c)
f0_mse = mse(f0[lim_ini[idx]:lim_end[idx]],p(x))
# plt.plot(p(x),'k',label='Fitted')
# plt.plot(f0[lim_ini[idx]:lim_end[idx]],'r',label='Real')
# plt.legend()
if len(sig[tini:tend])>int(3*0.04*fs):
frames = sg.extract_windows(sig[tini:tend],int(0.04*fs),int(0.01*fs))
jitter = ppq(f0[lim_ini[idx]:lim_end[idx]],3)
ak = np.max(frames,axis=1)
shimmer = apq(ak,3)
else:
jitter = 0
shimmer = 0
f0temp = np.hstack([jitter,shimmer,len(sig[tini:tend])/fs,f0_mse,f0c])
f0_feats.append(f0temp)
#--------------------------------
#------------------------------------
#Unvoiced segments
tini = int(lim_end[idx]*stepTime*fs)
if (idx+1)<(len(lim_ini)-1):
tend = int(lim_ini[idx+1]*stepTime*fs)
if int(tend-tini)<int(0.27*fs):
uv_segm.append(sig[tini:tend])
#--------------------------------------------------------------------
f0_feats = np.vstack(f0_feats)
return f0_feats,v_segm,uv_segm
#==========================================================================
def voiced_seg(sig,fs,f0,stepTime):
"""
Voiced segments
sig: Speech signal
fs: Sampling frequency
f0: Pitch contour
stepTime: Step size (in seconds) used to computed the f0 contour.
"""
yp = f0.copy()
yp[yp!=0] = 1
#In case the starting point is F0 and not 0
if yp[0] == 1:
np.insert(yp, 0, 1)
if yp[-1:] == 1:
np.insert(yp, 0, len(yp)-1)
#---------------------
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Voiced segments
v_segm = []
tm = []
vcont = np.zeros(len(sig))
for idx in range(len(lim_ini)):
#------------------------------------
#Voiced segments
tini = int(lim_ini[idx]*stepTime*fs)
tend = int(lim_end[idx]*stepTime*fs)
if int(tend-tini)>int(0.04*fs):
# print(tini,tend)
vcont[tini:tend] = 1
v_segm.append(sig[tini:tend])
tm.append(np.hstack([lim_ini[idx]*stepTime,lim_end[idx]*stepTime]))
vseg = {'Voiced_segments':v_segm,
'Voiced_times':tm,
'Voiced_labels':vcont}
return vseg
#----------------------------------------------------------------------------
def unvoiced_seg(sig,fs,vseg,sil):
uvcont = sil+vseg+1
uvcont[uvcont>1] = 0
uvcont[0] = 0
uvcont[-1:] = 0
yp = uvcont.copy()
ydf = np.diff(yp)
lim_end = np.where(ydf==-1)[0]+1
lim_ini = np.where(ydf==1)[0]+1
#Voiced segments
uv_seg = []
uv_dur = []
uv_tm = []
for idx in range(len(lim_ini)):
#------------------------------------
try:
tini = lim_ini[idx]/fs
tend = lim_end[idx]/fs
# uv_dur.append(tend-tini)
uv_seg.append(sig[lim_ini[idx]:lim_end[idx]])
uv_tm.append([tini,tend])
except:
print('Unvoiced segment not included')
uv_dur = np.asarray(uv_dur)
return uv_seg,uv_tm,uvcont
#----------------------------------------------------------------------------
def eVAD(sig,fs,win=0.015,step=0.01):
"""
Energy-based Voice Activity Detection
"""
#Normalize signal
sig = sig- | np.mean(sig) | numpy.mean |
import numpy as np
import pandas as pd
import pytest
from vispy.color import get_colormap
from napari.layers import Vectors
from napari.utils.colormaps.standardize_color import transform_color
# Set random seed for testing
np.random.seed(0)
def test_random_vectors():
"""Test instantiating Vectors layer with random coordinate-like 2D data."""
shape = (10, 2, 2)
np.random.seed(0)
data = np.random.random(shape)
data[:, 0, :] = 20 * data[:, 0, :]
layer = Vectors(data)
assert np.all(layer.data == data)
assert layer.data.shape == shape
assert layer.ndim == shape[2]
assert layer._view_data.shape[2] == 2
def test_random_vectors_image():
"""Test instantiating Vectors layer with random image-like 2D data."""
shape = (20, 10, 2)
np.random.seed(0)
data = np.random.random(shape)
layer = Vectors(data)
assert layer.data.shape == (20 * 10, 2, 2)
assert layer.ndim == 2
assert layer._view_data.shape[2] == 2
def test_empty_vectors():
"""Test instantiating Vectors layer with empty coordinate-like 2D data."""
shape = (0, 2, 2)
data = np.empty(shape)
layer = Vectors(data)
assert np.all(layer.data == data)
assert layer.data.shape == shape
assert layer.ndim == shape[2]
assert layer._view_data.shape[2] == 2
def test_empty_vectors_with_properties():
"""Test instantiating Vectors layer with empty coordinate-like 2D data."""
shape = (0, 2, 2)
data = np.empty(shape)
properties = {'angle': np.array([0.5], dtype=np.float)}
layer = Vectors(data, properties=properties)
assert | np.all(layer.data == data) | numpy.all |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 2 16:52:27 2017
@author: abauville
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 24 15:21:26 2017
@author: abauville
"""
import numpy as np
from numpy import sin, cos, tan, arcsin, arccos, arctan, pi
import matplotlib.pyplot as plt
degree = 180.0/pi
# =============================================================================
#
# Functions for fault diagram plotting
#
# =============================================================================
def plotFaultArrow(x,y,theta, L=1, sense=0, spacing=0.1, color="r",angleHead=20.0 * pi/180.0,headL = .5,ax=plt,linewidth = 1):
# sense 0: sinistral, 1:dextral
x = x + sin(theta)*spacing
y = y - cos(theta)*spacing
segment = np.array((-1,1)) * L
segmentHead = np.array((0,2)) * L*headL
ax.plot(x+cos(theta)*segment, y + sin(theta)*segment,color=color,linewidth=linewidth)
if ((spacing>0) & (sense == 0)):
ax.plot(x+L*cos(theta) - cos(angleHead+theta)*(segmentHead), y+L*sin(theta) - sin(angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
elif ((spacing>0) & (sense == 1)):
ax.plot(x-L*cos(theta) + cos(-angleHead+theta)*(segmentHead), y-L*sin(theta) + sin(-angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
elif ((spacing<=0) & (sense == 0)):
ax.plot(x-L*cos(theta) + cos(angleHead+theta)*(segmentHead), y-L*sin(theta) + sin(angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
elif ((spacing<=0) & (sense == 1)):
ax.plot(x+L*cos(theta) - cos(-angleHead+theta)*(segmentHead), y+L*sin(theta) - sin(-angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
else:
raise ValueError("sense must be 0 or 1")
def plotArrow(x,y,theta, L=1, color="r", sense=0, angleHead=20.0 * pi/180.0,headL = .5,ax=plt,linewidth = 1):
# sense 0: sinistral, 1:dextral
x = x# + sin(theta)*spacing
y = y# - cos(theta)*spacing
segment = np.array((-1,1)) * L
segmentHead = np.array((0,2)) * L*headL
ax.plot(x+cos(theta)*segment, y + sin(theta)*segment,color=color,linewidth=linewidth)
if sense == 0:
ax.plot(x+L*cos(theta) - cos(angleHead+theta)*(segmentHead), y+L*sin(theta) - sin(angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
# elif ((spacing>0) & (sense == 1)):
ax.plot(x+L*cos(theta) - cos(-angleHead+theta)*(segmentHead), y+L*sin(theta) - sin(-angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
elif sense == 1:
# ax.plot(x-L*cos(theta) + cos(-angleHead+theta)*(segmentHead), y-L*sin(theta) + sin(-angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
ax.plot(x-L*cos(theta) + cos(angleHead+theta)*(segmentHead), y-L*sin(theta) + sin(angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
# elif ((spacing<=0) & (sense == 1)):
ax.plot(x-L*cos(theta) + cos(-angleHead+theta)*(segmentHead), y-L*sin(theta) + sin(-angleHead+theta)*(segmentHead),color=color,linewidth=linewidth)
else:
raise ValueError("sense must be 0 or 1")
def plotFaultDiagram(Tau,psi, L=1,colorFault="r",colorSigma1="b",Larrow=.15,PosArrow=.66,angleHeadArrow=20.0 * pi/180.0, spacing=.1,ax=plt,refAxes=0,faultLinewidth=1,arrowLinewidth=1,sigma1Linewidth=1,polar=0):
segment = np.array((-1,1)) * L
thetaA = psi+30*pi/180
thetaB = psi-30*pi/180
if (refAxes==0):
Tau = Tau
psiPos = psi*degree
else:
Tau = ( (Tau - refAxes.axis()[0])/(refAxes.axis()[1]-refAxes.axis()[0]) - ax.axis()[0] ) * (ax.axis()[1]-ax.axis()[0])
psiPos = ( (psi*degree - refAxes.axis()[2])/(refAxes.axis()[3]-refAxes.axis()[2]) - ax.axis()[2] ) * (ax.axis()[3]-ax.axis()[2])
if (polar==0):
# Sigma1 dir
ax.plot(Tau+cos(psi)*segment, psiPos + sin(psi)*segment,color=colorSigma1,linewidth=sigma1Linewidth)
# Faults
ax.plot(Tau+cos(thetaA)*segment, psiPos + sin(thetaA)*segment,color=[.8,.5,.2],linewidth=faultLinewidth)
ax.plot(Tau+cos(thetaB)*segment, psiPos + sin(thetaB)*segment,color=[.6,.3,.6],linewidth=faultLinewidth)
else:
# Sigma1 dir
ax.plot(Tau*cos(psi)+cos(psi)*segment, Tau*sin(psi) + sin(psi)*segment,color=colorSigma1,linewidth=sigma1Linewidth)
# Faults
ax.plot(Tau*cos(psi)+cos(thetaA)*segment, Tau*sin(psi) + sin(thetaA)*segment,color=[.8,.5,.2],linewidth=faultLinewidth)
ax.plot(Tau*cos(psi)+cos(thetaB)*segment, Tau*sin(psi) + sin(thetaB)*segment,color=[.6,.3,.6],linewidth=faultLinewidth)
# ax.plot(Tau+cos(thetaA)*segment, psiPos + sin(thetaA)*segment,color=colorFault,linewidth=faultLinewidth)
# ax.plot(Tau+cos(thetaB)*segment, psiPos + sin(thetaB)*segment,color=colorFault,linewidth=faultLinewidth)
# Arrows
# All arrows
# plotFaultArrow(Tau-cos(thetaA)*PosArrow*L,psiPos-sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing=spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau-cos(thetaA)*PosArrow*L,psiPos-sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau-cos(thetaB)*PosArrow*L,psiPos-sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau-cos(thetaB)*PosArrow*L,psiPos-sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing= spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
#
# plotFaultArrow(Tau+cos(thetaA)*PosArrow*L,psiPos+sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing= spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau+cos(thetaA)*PosArrow*L,psiPos+sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau+cos(thetaB)*PosArrow*L,psiPos+sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau+cos(thetaB)*PosArrow*L,psiPos+sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing= spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
#
#
# # Outer arrows only
# plotFaultArrow(Tau-cos(thetaA)*PosArrow*L,psiPos-sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing=spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau-cos(thetaB)*PosArrow*L,psiPos-sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
#
# plotFaultArrow(Tau+cos(thetaA)*PosArrow*L,psiPos+sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
# plotFaultArrow(Tau+cos(thetaB)*PosArrow*L,psiPos+sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing= spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
#
if (polar==0):
# Inner arrows only
plotFaultArrow(Tau-cos(thetaA)*PosArrow*L,psiPos-sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
plotFaultArrow(Tau-cos(thetaB)*PosArrow*L,psiPos-sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing= spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
plotFaultArrow(Tau+cos(thetaA)*PosArrow*L,psiPos+sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing= spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
plotFaultArrow(Tau+cos(thetaB)*PosArrow*L,psiPos+sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
else:
# Inner arrows only
plotFaultArrow(Tau*cos(psi)-cos(thetaA)*PosArrow*L,Tau*sin(psi)-sin(thetaA)*PosArrow*L,thetaA,sense=1,spacing=-spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
plotFaultArrow(Tau*cos(psi)-cos(thetaB)*PosArrow*L,Tau*sin(psi)-sin(thetaB)*PosArrow*L,thetaB,sense=0,spacing= spacing,L=Larrow,ax=ax,linewidth=arrowLinewidth,angleHead=angleHeadArrow)
plotFaultArrow(Tau*cos(psi)+ | cos(thetaA) | numpy.cos |
#!/usr/bin/env python
"""Tests for cogent3.util.unit_test, extension of the built-in PyUnit framework.
"""
from sys import exc_info
import numpy
from numpy import array, inf, log, zeros
# SUPPORT2425
# from __future__ import with_statement
from cogent3.util.unit_test import FakeRandom, TestCase, main
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2019, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "2019.12.6a"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
class FakeRandomTests(TestCase):
"""Tests FakeRandom class."""
def test_call_constant(self):
"""FakeRandom __call__ should return next item from list if constant"""
const = FakeRandom([1])
self.assertEqual(const(), 1)
self.assertRaises(IndexError, const)
def test_call_constant_wrap(self):
"""FakeRandom __call__ should wrap for one-item list if specified"""
const = FakeRandom([1], True)
for i in range(10):
self.assertEqual(const(), True)
def test_call_var(self):
"""FakeRandom __call__ should work with a multi-item list"""
f = FakeRandom([1, 2, 3])
self.assertEqual(f(), 1)
self.assertEqual(f(), 2)
self.assertEqual(f(), 3)
self.assertRaises(IndexError, f)
def test_call_var_wrap(self):
"""FakeRandom __call__ should work with a multi-item wrapped list"""
f = FakeRandom([1, 2, 3], True)
result = [f() for i in range(10)]
self.assertEqual(result, [1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
def test_cal_var_args(self):
"""FakeRandom __call__ should ignore extra args"""
f = FakeRandom([[1, 2, 3]], True)
for i in range(5):
result = f((5, 5)) # shape parameter ignored
self.assertEqual(result, [1, 2, 3])
class TestCaseTests(TestCase):
"""Tests for extension of the built-in unittest framework.
For each test, includes an example of success and failure.
"""
unequal_pairs = [
(1, 0),
([], ()),
(None, 0),
("", " "),
(1, "1"),
(0, "0"),
("", None),
(array([1, 2, 3]), array([1, 2, 4])),
(array([[1, 2], [3, 4]]), array([[1.0, 2.0], [3.0, 4.1]])),
(array([1]), array([1, 2])),
(zeros(0), array([1])),
(array([1, 1, 1]), array([1])),
(array([[1, 1], [1, 1]]), array([1, 1, 1, 1])),
(zeros(0), None),
(zeros(3), zeros(5)),
(zeros(0), ""),
]
equal_pairs = [
(1, 1),
(0, 0),
(5, 5),
(5, 5.0),
(0, 0.0),
("", ""),
(" ", " "),
("a", "a"),
(None, None),
([0, 1], [0.0, 1.0]),
(array([1, 2, 3]), array([1, 2, 3])),
(array([[1, 2], [3, 4]]), array([[1.0, 2.0], [3.0, 4.0]])),
( | zeros(0) | numpy.zeros |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import Polygon as plg
import mmocr.utils as utils
def ignore_pred(pred_boxes, gt_ignored_index, gt_polys, precision_thr):
"""Ignore the predicted box if it hits any ignored ground truth.
Args:
pred_boxes (list[ndarray or list]): The predicted boxes of one image.
gt_ignored_index (list[int]): The ignored ground truth index list.
gt_polys (list[Polygon]): The polygon list of one image.
precision_thr (float): The precision threshold.
Returns:
pred_polys (list[Polygon]): The predicted polygon list.
pred_points (list[list]): The predicted box list represented
by point sequences.
pred_ignored_index (list[int]): The ignored text index list.
"""
assert isinstance(pred_boxes, list)
assert isinstance(gt_ignored_index, list)
assert isinstance(gt_polys, list)
assert 0 <= precision_thr <= 1
pred_polys = []
pred_points = []
pred_ignored_index = []
gt_ignored_num = len(gt_ignored_index)
# get detection polygons
for box_id, box in enumerate(pred_boxes):
poly = points2polygon(box)
pred_polys.append(poly)
pred_points.append(box)
if gt_ignored_num < 1:
continue
# ignore the current detection box
# if its overlap with any ignored gt > precision_thr
for ignored_box_id in gt_ignored_index:
ignored_box = gt_polys[ignored_box_id]
inter_area, _ = poly_intersection(poly, ignored_box)
area = poly.area()
precision = 0 if area == 0 else inter_area / area
if precision > precision_thr:
pred_ignored_index.append(box_id)
break
return pred_polys, pred_points, pred_ignored_index
def compute_hmean(accum_hit_recall, accum_hit_prec, gt_num, pred_num):
"""Compute hmean given hit number, ground truth number and prediction
number.
Args:
accum_hit_recall (int|float): Accumulated hits for computing recall.
accum_hit_prec (int|float): Accumulated hits for computing precision.
gt_num (int): Ground truth number.
pred_num (int): Prediction number.
Returns:
recall (float): The recall value.
precision (float): The precision value.
hmean (float): The hmean value.
"""
assert isinstance(accum_hit_recall, (float, int))
assert isinstance(accum_hit_prec, (float, int))
assert isinstance(gt_num, int)
assert isinstance(pred_num, int)
assert accum_hit_recall >= 0.0
assert accum_hit_prec >= 0.0
assert gt_num >= 0.0
assert pred_num >= 0.0
if gt_num == 0:
recall = 1.0
precision = 0.0 if pred_num > 0 else 1.0
else:
recall = float(accum_hit_recall) / gt_num
precision = 0.0 if pred_num == 0 else float(accum_hit_prec) / pred_num
denom = recall + precision
hmean = 0.0 if denom == 0 else (2.0 * precision * recall / denom)
return recall, precision, hmean
def box2polygon(box):
"""Convert box to polygon.
Args:
box (ndarray or list): A ndarray or a list of shape (4)
that indicates 2 points.
Returns:
polygon (Polygon): A polygon object.
"""
if isinstance(box, list):
box = np.array(box)
assert isinstance(box, np.ndarray)
assert box.size == 4
boundary = np.array(
[box[0], box[1], box[2], box[1], box[2], box[3], box[0], box[3]])
point_mat = boundary.reshape([-1, 2])
return plg.Polygon(point_mat)
def points2polygon(points):
"""Convert k points to 1 polygon.
Args:
points (ndarray or list): A ndarray or a list of shape (2k)
that indicates k points.
Returns:
polygon (Polygon): A polygon object.
"""
if isinstance(points, list):
points = np.array(points)
assert isinstance(points, np.ndarray)
assert (points.size % 2 == 0) and (points.size >= 8)
point_mat = points.reshape([-1, 2])
return plg.Polygon(point_mat)
def poly_intersection(poly_det, poly_gt):
"""Calculate the intersection area between two polygon.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
intersection_area (float): The intersection area between two polygons.
"""
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
poly_inter = poly_det & poly_gt
if len(poly_inter) == 0:
return 0, poly_inter
return poly_inter.area(), poly_inter
def poly_union(poly_det, poly_gt):
"""Calculate the union area between two polygon.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
union_area (float): The union area between two polygons.
"""
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
area_det = poly_det.area()
area_gt = poly_gt.area()
area_inters, _ = poly_intersection(poly_det, poly_gt)
return area_det + area_gt - area_inters
def boundary_iou(src, target):
"""Calculate the IOU between two boundaries.
Args:
src (list): Source boundary.
target (list): Target boundary.
Returns:
iou (float): The iou between two boundaries.
"""
assert utils.valid_boundary(src, False)
assert utils.valid_boundary(target, False)
src_poly = points2polygon(src)
target_poly = points2polygon(target)
return poly_iou(src_poly, target_poly)
def poly_iou(poly_det, poly_gt):
"""Calculate the IOU between two polygons.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
iou (float): The IOU between two polygons.
"""
assert isinstance(poly_det, plg.Polygon)
assert isinstance(poly_gt, plg.Polygon)
area_inters, _ = poly_intersection(poly_det, poly_gt)
return area_inters / poly_union(poly_det, poly_gt)
def one2one_match_ic13(gt_id, det_id, recall_mat, precision_mat, recall_thr,
precision_thr):
"""One-to-One match gt and det with icdar2013 standards.
Args:
gt_id (int): The ground truth id index.
det_id (int): The detection result id index.
recall_mat (ndarray): `gt_num x det_num` matrix with element (i,j)
being the recall ratio of gt i to det j.
precision_mat (ndarray): `gt_num x det_num` matrix with element (i,j)
being the precision ratio of gt i to det j.
recall_thr (float): The recall threshold.
precision_thr (float): The precision threshold.
Returns:
True|False: Whether the gt and det are matched.
"""
assert isinstance(gt_id, int)
assert isinstance(det_id, int)
assert isinstance(recall_mat, np.ndarray)
assert isinstance(precision_mat, np.ndarray)
assert 0 <= recall_thr <= 1
assert 0 <= precision_thr <= 1
cont = 0
for i in range(recall_mat.shape[1]):
if recall_mat[gt_id,
i] > recall_thr and precision_mat[gt_id,
i] > precision_thr:
cont += 1
if cont != 1:
return False
cont = 0
for i in range(recall_mat.shape[0]):
if recall_mat[i, det_id] > recall_thr and precision_mat[
i, det_id] > precision_thr:
cont += 1
if cont != 1:
return False
if recall_mat[gt_id, det_id] > recall_thr and precision_mat[
gt_id, det_id] > precision_thr:
return True
return False
def one2many_match_ic13(gt_id, recall_mat, precision_mat, recall_thr,
precision_thr, gt_match_flag, det_match_flag,
det_ignored_index):
"""One-to-Many match gt and detections with icdar2013 standards.
Args:
gt_id (int): gt index.
recall_mat (ndarray): `gt_num x det_num` matrix with element (i,j)
being the recall ratio of gt i to det j.
precision_mat (ndarray): `gt_num x det_num` matrix with element (i,j)
being the precision ratio of gt i to det j.
recall_thr (float): The recall threshold.
precision_thr (float): The precision threshold.
gt_match_flag (ndarray): An array indicates each gt matched already.
det_match_flag (ndarray): An array indicates each box has been
matched already or not.
det_ignored_index (list): A list indicates each detection box can be
ignored or not.
Returns:
tuple (True|False, list): The first indicates the gt is matched or not;
the second is the matched detection ids.
"""
assert isinstance(gt_id, int)
assert isinstance(recall_mat, np.ndarray)
assert isinstance(precision_mat, np.ndarray)
assert 0 <= recall_thr <= 1
assert 0 <= precision_thr <= 1
assert isinstance(gt_match_flag, list)
assert isinstance(det_match_flag, list)
assert isinstance(det_ignored_index, list)
many_sum = 0.
det_ids = []
for det_id in range(recall_mat.shape[1]):
if gt_match_flag[gt_id] == 0 and det_match_flag[
det_id] == 0 and det_id not in det_ignored_index:
if precision_mat[gt_id, det_id] >= precision_thr:
many_sum += recall_mat[gt_id, det_id]
det_ids.append(det_id)
if many_sum >= recall_thr:
return True, det_ids
return False, []
def many2one_match_ic13(det_id, recall_mat, precision_mat, recall_thr,
precision_thr, gt_match_flag, det_match_flag,
gt_ignored_index):
"""Many-to-One match gt and detections with icdar2013 standards.
Args:
det_id (int): Detection index.
recall_mat (ndarray): `gt_num x det_num` matrix with element (i,j)
being the recall ratio of gt i to det j.
precision_mat (ndarray): `gt_num x det_num` matrix with element (i,j)
being the precision ratio of gt i to det j.
recall_thr (float): The recall threshold.
precision_thr (float): The precision threshold.
gt_match_flag (ndarray): An array indicates each gt has been matched
already.
det_match_flag (ndarray): An array indicates each detection box has
been matched already or not.
gt_ignored_index (list): A list indicates each gt box can be ignored
or not.
Returns:
tuple (True|False, list): The first indicates the detection is matched
or not; the second is the matched gt ids.
"""
assert isinstance(det_id, int)
assert isinstance(recall_mat, np.ndarray)
assert isinstance(precision_mat, np.ndarray)
assert 0 <= recall_thr <= 1
assert 0 <= precision_thr <= 1
assert isinstance(gt_match_flag, list)
assert isinstance(det_match_flag, list)
assert isinstance(gt_ignored_index, list)
many_sum = 0.
gt_ids = []
for gt_id in range(recall_mat.shape[0]):
if gt_match_flag[gt_id] == 0 and det_match_flag[
det_id] == 0 and gt_id not in gt_ignored_index:
if recall_mat[gt_id, det_id] >= recall_thr:
many_sum += precision_mat[gt_id, det_id]
gt_ids.append(gt_id)
if many_sum >= precision_thr:
return True, gt_ids
return False, []
def points_center(points):
assert isinstance(points, np.ndarray)
assert points.size % 2 == 0
points = points.reshape([-1, 2])
return np.mean(points, axis=0)
def point_distance(p1, p2):
assert isinstance(p1, np.ndarray)
assert isinstance(p2, np.ndarray)
assert p1.size == 2
assert p2.size == 2
dist = np.square(p2 - p1)
dist = np.sum(dist)
dist = np.sqrt(dist)
return dist
def box_center_distance(b1, b2):
assert isinstance(b1, np.ndarray)
assert isinstance(b2, np.ndarray)
return point_distance(points_center(b1), points_center(b2))
def box_diag(box):
assert isinstance(box, np.ndarray)
assert box.size == 8
return point_distance(box[0:2], box[4:6])
def filter_2dlist_result(results, scores, score_thr):
"""Find out detected results whose score > score_thr.
Args:
results (list[list[float]]): The result list.
score (list): The score list.
score_thr (float): The score threshold.
Returns:
valid_results (list[list[float]]): The valid results.
valid_score (list[float]): The scores which correspond to the valid
results.
"""
assert isinstance(results, list)
assert len(results) == len(scores)
assert isinstance(score_thr, float)
assert 0 <= score_thr <= 1
inds = np.array(scores) > score_thr
valid_results = [results[idx] for idx in np.where(inds)[0].tolist()]
valid_scores = [scores[idx] for idx in | np.where(inds) | numpy.where |
'''--------------------------- Core Script ---------------------------------'''
'''
Description: This library is based on the algorithms described in
[1] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>, <NAME>, <NAME>,
<NAME> (2016). Matrix Profile I: All Pairs Similarity Joins
for Time Series: A Unifying View that Includes Motifs, Discords and
Shapelets. IEEE ICDM 2016.
'''
import numpy as np
import time
import matplotlib.pyplot as plt
def sliding_dot_product(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m:n]
def sliding_dot_product_stomp(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m-1:n]
def calculate_distance_profile(q, t, qt, a, sum_q, sum_q2, mean_t, sigma_t):
n = t.size
m = q.size
b = np.zeros(n - m)
dist = np.zeros(n - m)
for i in range(0, n - m):
b[i] = -2 * (qt[i].real - sum_q * mean_t[i]) / sigma_t[i]
dist[i] = a[i] + b[i] + sum_q2
return np.sqrt(np.abs(dist))
# The code below takes O(m) for each subsequence
# you should replace it for MASS
def compute_mean_std_for_query(Q):
# Compute Q stats -- O(n)
sumQ = np.sum(Q)
sumQ2 = np.sum(np.power(Q, 2))
return sumQ, sumQ2
def pre_compute_mean_std_for_TS(ta, m):
na = len(ta)
sum_t = np.zeros(na - m)
sum_t2 = np.zeros(na - m)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
for i in range(na - m):
sum_t[i] = cumulative_sum_t[i + m] - cumulative_sum_t[i]
sum_t2[i] = cumulative_sum_t2[i + m] - cumulative_sum_t2[i]
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
def pre_compute_mean_std_for_TS_stomp(ta, m):
na = len(ta)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
sum_t = (cumulative_sum_t[m-1:na] - np.concatenate(([0], cumulative_sum_t[0:na-m])))
sum_t2 = (cumulative_sum_t2[m-1:na] - np.concatenate(([0], cumulative_sum_t2[0:na-m])))
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
# MUEEN’S ALGORITHM FOR SIMILARITY SEARCH (MASS)
def mass(Q, T, a, meanT, sigmaT):
# Z-Normalisation
if np.std(Q) != 0:
Q = (Q - np.mean(Q)) / np.std(Q)
QT = sliding_dot_product(Q, T)
sumQ, sumQ2 = compute_mean_std_for_query(Q)
return calculate_distance_profile(Q, T, QT, a, sumQ, sumQ2, meanT, sigmaT)
def element_wise_min(Pab, Iab, D, idx, ignore_trivial, m):
for i in range(0, len(D)):
if not ignore_trivial or (np.abs(idx - i) > m/2.0): # if it's a self-join, ignore trivial matches in [-m/2,m/2]
if D[i] < Pab[i]:
Pab[i] = D[i]
Iab[i] = idx
return Pab, Iab
def stamp(Ta, Tb, m):
"""
Compute the Matrix Profile between time-series Ta and Tb.
If Ta==Tb, the operation is a self-join and trivial matches are ignored.
:param Ta: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
nb = len(Tb)
na = len(Ta)
Pab = np.ones(na - m)* np.inf
Iab = np.zeros(na - m)
idxes = np.arange(nb - m + 1)
sumT, sumT2, meanT, meanT_2, meanTP2, sigmaT, sigmaT2 = pre_compute_mean_std_for_TS(Ta, m)
a = np.zeros(na - m)
for i in range(0, na - m):
a[i] = (sumT2[i] - 2 * sumT[i] * meanT[i] + m * meanTP2[i]) / sigmaT2[i]
ignore_trivial = np.atleast_1d(Ta == Tb).all()
for idx in idxes:
D = mass(Tb[idx: idx + m], Ta, a, meanT, sigmaT)
if(ignore_trivial):
#ignore trivial minimum and maximum
minIdx = int(np.maximum(idx-m/2.0,0))
maxIdx = int(np.minimum(idx+m/2.0,len(D)))
D[minIdx:maxIdx:1] = np.inf
Iab[Pab>D] = i
Pab = np.minimum(Pab,D)
return Pab, Iab
def stomp(T, m):
"""
Compute the Matrix Profile with self join for T
:param T: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
epsilon = 1e-10
n = len(T)
seq_l = n - m
_, _, meanT, _, _, sigmaT, _ = pre_compute_mean_std_for_TS_stomp(T, m)
Pab = np.full(seq_l+1,np.inf)
Iab = np.zeros(n - m +1)
ignore_trivial = True
for idx in range(0,seq_l):
# There's somthing with normalization
Q_std = sigmaT[idx] if sigmaT[idx] > epsilon else epsilon
if idx == 0:
QT = sliding_dot_product_stomp(T[0:m], T).real
QT_first = np.copy(QT)
else:
QT[1:] = QT[0:-1]- (T[0:seq_l] * T[idx - 1]) + (T[m:n] * T[idx + m - 1])
QT[0] = QT_first[idx]
# Calculate distance profile
D = (2 * (m - (QT - m * meanT * meanT[idx]) / (Q_std * sigmaT)))
D[D<epsilon] = 0
if (ignore_trivial):
# ignore trivial minimum and maximum
minIdx = int( | np.maximum(idx - m / 2.0, 0) | numpy.maximum |
# -*- coding: utf-8 -*-
# Authors: <NAME> <<EMAIL>>
import numpy as np
from joblib import Parallel, delayed
from sklearn.preprocessing import StandardScaler
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_memory
from .gaussian_knockoff import (_estimate_distribution,
gaussian_knockoff_generation)
from .stat_coef_diff import stat_coef_diff
from .utils import fdr_threshold, quantile_aggregation
def knockoff_aggregation(X, y, centered=True, shrink=False,
construct_method='equi', fdr=0.1, fdr_control='bhq',
reshaping_function=None, offset=1,
statistic='lasso_cv', cov_estimator='ledoit_wolf',
joblib_verbose=0, n_bootstraps=25, n_jobs=1,
adaptive_aggregation=False, gamma=0.5, gamma_min=0.05,
verbose=False, memory=None, random_state=None):
# unnecessary to have n_jobs > number of bootstraps
n_jobs = min(n_bootstraps, n_jobs)
if centered:
X = StandardScaler().fit_transform(X)
mu, Sigma = _estimate_distribution(
X, shrink=shrink, cov_estimator=cov_estimator)
mem = check_memory(memory)
stat_coef_diff_cached = mem.cache(stat_coef_diff,
ignore=['n_jobs', 'joblib_verbose'])
if n_bootstraps == 1:
X_tilde = gaussian_knockoff_generation(
X, mu, Sigma, method=construct_method,
memory=memory, seed=random_state)
ko_stat = stat_coef_diff_cached(X, X_tilde, y, method=statistic)
pvals = _empirical_pval(ko_stat, offset)
threshold = fdr_threshold(pvals, fdr=fdr,
method=fdr_control)
selected = np.where(pvals <= threshold)[0]
if verbose:
return selected, pvals
return selected
if isinstance(random_state, (int, np.int32, np.int64)):
rng = check_random_state(random_state)
elif random_state is None:
rng = check_random_state(0)
else:
raise TypeError('Wrong type for random_state')
seed_list = rng.randint(1, | np.iinfo(np.int32) | numpy.iinfo |
''' Policy Gradient implementation customized a bit for
solving the trading problem'''
# stolen shamelessly and adapted December 2016 by <NAME>
# was originally:
'''Solution to the Cartpole problem using Policy Gradients in Tensorflow.'''
# written October 2016 by <NAME>
# inspired by gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5
import numpy as np
import gym
import tensorflow as tf
import pdb
import logging
import os.path
import pandas as pd
import time
import gym_trading
import pickle as pkl
import os
import trading_env as te
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
log.info('%s logger started.',__name__)
np.seterr(all='raise')
class PolicyGradient(object) :
""" Policy Gradient implementation in tensor flow.
"""
def __init__(self,
sess, # tensorflow session
obs_dim, # observation shape
num_actions, # number of possible actions
NumOfLayers,
Num_Of_variables,
LR,
architecture,
actFunc,
avgfilename, #name for pickle file of average values
Modelfilename, #name for model to save
num_hiddenRNN=24,
regulizer=None,
regulizerScale=0.01,
neurons_per_dim=32, # hidden layer will have obs_dim * neurons_per_dim neurons
learning_rate=1e-2, # learning rate
gamma = 0.9, # reward discounting
decay = 0.9, # gradient decay rate
DropoutMemoryStates=None,
DropoutVariational_recurrent=False,
state_keep_prob=1,
output_keep_prob=1
):
self.actFunc=actFunc
self.obs_dim=obs_dim
self.Reg= regulizer
self.RegScale= regulizerScale
self._sess = sess
self.learningRule=LR
self._gamma = gamma
self.num_hiddenRNN=num_hiddenRNN
self._tf_model = {}
self._num_actions = num_actions
self._num_stocks = num_actions
self._variables=Num_Of_variables
self.NumofLayers=NumOfLayers # NUM OF HIDDEN LAYERS NOW!!!!!
hidden_neurons = obs_dim * neurons_per_dim
self.architecture = architecture
self.last100avg = []
self.filename = avgfilename
self.filenameModel= Modelfilename
self.DropoutMemoryStates=DropoutMemoryStates
self.DropoutVariational_recurrent=DropoutVariational_recurrent,
self.output_keep_prob=output_keep_prob
self.state_keep_prob=state_keep_prob
# tf.set_random_seed(1234)
'''
with tf.variable_scope('layer_one',reuse=tf.AUTO_REUSE):
L1 = tf.truncated_normal_initializer(mean=0,
stddev=1./np.sqrt(obs_dim),
dtype=tf.float32)
self._tf_model['W1'] = tf.get_variable("W1",
[obs_dim, hidden_neurons],
initializer=L1)
with tf.variable_scope('layer_two',reuse=tf.AUTO_REUSE):
L2 = tf.truncated_normal_initializer(mean=0,
stddev=1./np.sqrt(hidden_neurons),
dtype=tf.float32)
self._tf_model['W2'] = tf.get_variable("W2",
[hidden_neurons,num_actions],
initializer=L2)
######################
'''
'''
whichLayers=["layer_"+str(i) for i in range(1, self.NumofLayers+1)]
self.NameW=["W"+str(i) for i in range(1, self.NumofLayers+1)]
InputDimensions=[obs_dim]+ [hidden_neurons for i in range(0, self.NumofLayers-1)]
OutputDimensions=[hidden_neurons for i in range(0, self.NumofLayers-1)]+[self._num_actions]
for i in range(self.NumofLayers):
whichLayer=whichLayers[i]
inputsDim=InputDimensions[i]
outputDim=OutputDimensions[i]
NameW =self.NameW[i]
with tf.variable_scope(whichLayer,reuse=tf.AUTO_REUSE):
L2 = tf.contrib.layers.xavier_initializer(uniform=False, seed=1, dtype=tf.float32)
#tf.truncated_normal_initializer(mean=0,
# stddev=1./np.sqrt(hidden_neurons),
# dtype=tf.float32)
self._tf_model[NameW] = tf.get_variable(NameW,
[inputsDim,outputDim],
initializer=L2)
'''
# tf placeholders
self._tf_x = tf.placeholder(dtype=tf.float32, shape=[None, obs_dim],name="tf_x")
self._tf_y = tf.placeholder(dtype=tf.float32, shape=[None, num_actions],name="tf_y")
self._tf_epr = tf.placeholder(dtype=tf.float32, shape=[None,1], name="tf_epr")
self.X = tf.placeholder(tf.float32, shape=(None, 252, 30), name='X_for_policy')
self.actions = tf.placeholder(tf.float32, shape=(None,2), name='actions')
self.conv = tf.placeholder(tf.float32, shape=(None), name='conv')
# tf reward processing (need tf_discounted_epr for policy gradient wizardry)
self._tf_discounted_epr = self.tf_discount_rewards(self._tf_epr)
self._tf_mean, self._tf_variance= tf.nn.moments(self._tf_discounted_epr, [0],
shift=None, name="reward_moments")
self._tf_discounted_epr -= self._tf_mean
self._tf_discounted_epr /= tf.sqrt(self._tf_variance + 1e-6)
#self._saver = tf.train.Saver()
# tf optimizer op
OutputDimensions=[hidden_neurons for i in range(0, self.NumofLayers-1)]+[self._num_actions]
# Different Regularization Rules
if self.Reg=="l2":
self.Reg = tf.contrib.layers.l2_regularizer(scale=self.RegScale)
elif self.Reg=="l1":
self.Reg= tf.contrib.layers.l1_regularizer(scale=self.RegScale)
elif self.Reg=="None":
self.Reg= None
else:
assert("wrong acttivation function")
self._tf_aprob = self.tf_policy_forward(self.X,OutputDimensions)
loss = tf.nn.l2_loss(self._tf_y - self._tf_aprob) # this gradient encourages the actions taken
self._saver = tf.train.Saver(save_relative_paths=True)
# Different Learning Rules
if self.learningRule=="RMSProp":
optimizer = tf.train.RMSPropOptimizer(learning_rate, decay=decay)
elif self.learningRule=="Adam":
optimizer = tf.train.AdamOptimizer(learning_rate,
beta1=0.9,
beta2=0.999)
elif self.learningRule=="Mom":
optimizer = tf.train.MomentumOptimizer(learning_rate,
momentum=0.8,
use_locking=False,
use_nesterov=True)
elif self.learningRule=="GD":
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
#self._tf_discounted_epr = self.get_grad_loss(self._tf_epr, loss)
self._tf_discounted_epr = self.get_grad_loss(self._tf_discounted_epr, loss)
tf_grads = optimizer.compute_gradients(loss, var_list=tf.trainable_variables(),
grad_loss=self._tf_discounted_epr)
def ClipIfNotNone(grad):
if grad is None:
return grad
return tf.clip_by_value(grad, -1.2, 1.2)
tf_grads_clipped = [(ClipIfNotNone(grad), var) for grad, var in tf_grads]
self.tf_grads = tf_grads
self.tf_grads_clipped = tf_grads_clipped
self._train_op = optimizer.apply_gradients(tf_grads)
def get_grads_and_clipping(self):
return self.tf_grads, self.tf_grads_clipped
def get_grad_loss(self, tf_r, diff):
grad = tf.multiply(tf_r, diff)
discount_f = lambda a, v: a*(1/v)*(1/2)
grad_loss = tf.scan(discount_f, grad, self.conv)
return grad_loss
def tf_discount_rewards(self, tf_r): #tf_r ~ [game_steps,1]
discount_f = lambda a, v: a*self._gamma + v;
tf_r_reverse = tf.scan(discount_f, tf.reverse(tf_r,[0]))
tf_discounted_r = tf.reverse(tf_r_reverse,[0])
#tf_discounted_r = tf.clip_by_value(tf_discounted_r, -1.2, 1.2)
#tf_r_reverse = tf.scan(discount_f, tf.reverse(tf_r,[True, False]))
#tf_discounted_r = tf.reverse(tf_r_reverse,[True, False])
return tf_discounted_r
def tf_policy_forward(self, x,OutputDimensions): #x ~ [1,D]
################# ################# #################
if self.actFunc=="sigmoid":
actFunc=tf.nn.sigmoid
elif self.actFunc=="relu":
actFunc=tf.nn.relu
elif self.actFunc=="lrelu":
actFunc= tf.nn.leaky_relu
elif self.actFunc=="elu":
actFunc= tf.nn.elu
elif self.actFunc=="selu":
actFunc= tf.nn.selu
else:
assert("wrong acttivation function")
init=tf.contrib.layers.xavier_initializer(uniform=False, dtype=tf.float32)
def dropout_state_filter_visitors(state):
''' Dropout of memory cells Not literature based on tensorflow code'''
if isinstance(state, tf.contrib.rnn.LSTMStateTuple): # Never perform dropout on the c state.
return tf.contrib.rnn.LSTMStateTuple(c=True, h=True)
elif isinstance(state, tf.TensorArray):
return False
return True
if self.architecture != "FFNN":
def get_a_cell(num_hidden,i):
''' Function for GRU, RNN, LSTM'''
cell_type=self.architecture
if cell_type == 'GRU':
cell = tf.nn.rnn_cell.GRUCell(num_hidden,activation=actFunc)
elif cell_type == 'LSTM':
cell = tf.nn.rnn_cell.LSTMCell(num_hidden,activation=actFunc, state_is_tuple=True)
elif cell_type == 'RNN':
cell = tf.nn.rnn_cell.BasicRNNCell(num_hidden,activation=actFunc)
# pdb.set_trace()
drop = tf.nn.rnn_cell.DropoutWrapper(cell,
input_keep_prob=1,
output_keep_prob=self.output_keep_prob,
state_keep_prob=self.state_keep_prob,
variational_recurrent=self.DropoutVariational_recurrent,
input_size=self._num_stocks*self._variables if i==0 else tf.TensorShape(num_hidden),
dtype=tf.float32,
seed=None,
dropout_state_filter_visitor=dropout_state_filter_visitors if self.DropoutMemoryStates==True else None )
return drop
''' Create Stacked Model '''
with tf.name_scope('lstm'):
cell = tf.nn.rnn_cell.MultiRNNCell(
[get_a_cell(self.num_hiddenRNN, i) for i in range(self.NumofLayers)])
''' Make it runable '''
with tf.variable_scope('RNN', initializer=tf.contrib.layers.xavier_initializer()):
h, _ = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
else:
''' FFN Has not been tested '''
#################### #################### ###################
for i in range(0,self.NumofLayers):
outputDim=OutputDimensions[i] #OutputDimensions[i]
#if i ==0 and self.architecture == "LSTM":
#
# h, _ = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
if i ==0 and self.architecture == "FFNN":
h=tf.contrib.layers.fully_connected(h,
outputDim,
activation_fn=actFunc,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=init,
weights_regularizer=self.Reg,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None)
elif i>0 and i < max(range(self.NumofLayers)) and self.architecture == "FFNN":
h=tf.contrib.layers.fully_connected(h,
outputDim,
activation_fn=actFunc,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=init,
weights_regularizer=self.Reg,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None)
#################### #################### ####################
''' last Layer to output aka softmax shit '''
outputDim=10
aaa=self.num_hiddenRNN*252
h0 = tf.reshape(h, [-1,aaa ])
h1=tf.contrib.layers.fully_connected(h0,#tf.contrib.layers.flatten(h),
outputDim,
activation_fn=None,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=init,
weights_regularizer=self.Reg,
biases_initializer=tf.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None)
logp=h1
################# ################# #################
sign=tf.sign(logp)
absLogP=tf.abs(logp)
p = tf.multiply(sign,tf.nn.softmax(absLogP))
return p
def GaussianNoise(inputs, returns):
stdd=np.std(returns,axis=0)
noise=np.random.normal(0,stdd*2)
t1= (inputs+noise)
if abs(t1).sum()==0:
output=t1
else:
output=t1/abs(t1).sum()
return output, stdd
def train_model(self, env, episodes=100,
load_model = False, # load model from checkpoint if available:?
model_dir = "/Users/andrewplaate/mlp3/SavedModels/", log_freq=10 ) :
# initialize variables and load model
init_op = tf.global_variables_initializer()
self._sess.run(init_op)
if load_model:
ckpt = tf.train.get_checkpoint_state(model_dir)
print(tf.train.latest_checkpoint(model_dir))
if ckpt and ckpt.model_checkpoint_path:
savr = tf.train.import_meta_graph(ckpt.model_checkpoint_path+'.meta')
out = savr.restore(self._sess, ckpt.model_checkpoint_path)
print("Model restored from ",ckpt.model_checkpoint_path)
else:
print('No checkpoint found at: ',model_dir)
episode = 0
observation,Returns = env.reset()
xs,rs,ys = [],[],[] # environment info
running_reward = 0
reward_sum = 0
# training loop
day = 0
simrors = np.zeros(episodes)
mktrors = np.zeros(episodes)
alldf = None
victory = False
self.sort = np.array([])
self.NomReward = np.array([])
self.mean_sort = np.array([])
t=time.time()
while episode < episodes and not victory:
# stochastically sample a policy from the network
x=observation
WIDTH= self._variables*self._num_stocks
feed = {self.X: np.reshape(x, (-1, 252, WIDTH))}
aprob = self._sess.run(self._tf_aprob,feed)
#pdb.set_trace()
aprob, std=PolicyGradient.GaussianNoise(aprob,Returns)
action=aprob
#action = np.random.choice(self._num_actions, p=aprob)
#label = np.zeros_like(aprob) ; label[action] = 1 # make a training 'label'
label=action
# step the environment and get new measurements
observation, reward, done, sort, info, Returns = env.step(action)
nominal_reward=info["nominal_reward"]
reward_sum += reward
# record game history
xs.append(x)
ys.append(label)
rs.append(reward)
day += 1
done = True
if done:
print(time.time()-t)
t=time.time()
running_reward = running_reward * 0.99 + reward_sum * 0.01
#epx = np.vstack(xs)
#epx = observation
#epX = np.reshape(np.vstack(xs), (-1, 252, WIDTH))
epX = x
#epr = np.vstack(rs)
#epy = np.vstack(ys)
epr = reward.reshape(252,1)
epy = label
self.NomReward = | np.append(self.NomReward, nominal_reward) | numpy.append |
# Lint as: python3
"""The on robot sensor classes."""
import enum
from typing import Any, Callable, Iterable, Sequence, Type, Text, Union
import gin
import gym
import numpy as np
from pybullet_envs.minitaur.envs_v2.sensors import sensor
from pybullet_envs.minitaur.envs_v2.utilities import noise_generators
@gin.constants_from_enum
class IMUChannel(enum.Enum):
ROLL = 1,
PITCH = 2,
YAW = 3,
ROLL_RATE = 4,
PITCH_RATE = 5,
YAW_RATE = 6,
@gin.configurable
class IMUSensor(sensor.Sensor):
"""An IMU sensor."""
def __init__(
self,
name: Text = "IMU",
dtype: Type[Any] = np.float64,
channels: Sequence[IMUChannel] = None,
lower_bound: Union[float, Iterable[float]] = None,
upper_bound: Union[float, Iterable[float]] = None,
noise_generator: Union[Callable[..., Any],
noise_generators.NoiseGenerator] = None,
sensor_latency: Union[float, Sequence[float]] = 0.0,
):
"""Constructs IMUSensor.
Generates separate IMU value channels as per configuration.
Args:
name: the name of the sensor.
dtype: data type of sensor value.
channels: value channels wants to subscribe. Must be members of the
IMUChannel class.
lower_bound: The lower bounds of the sensor reading.
upper_bound: The upper bounds of the sensor reading.
noise_generator: Used to add noise to the readings.
sensor_latency: There are two ways to use this expected sensor latency.
For both methods, the latency should be in the same unit as the sensor
data timestamp. 1. As a single float number, the observation will be a
1D array. For real robots, this should be set to 0.0. 2. As a array of
floats, the observation will be a 2D array based on how long the history
need to be. Thus, [0.0, 0.1, 0.2] is a history length of 3.
Raises:
ValueError: If no IMU channel is provided and no bounds for the channels.
"""
super().__init__(
name=name,
sensor_latency=sensor_latency,
interpolator_fn=sensor.linear_obs_blender)
if channels is None:
raise ValueError("IMU channels are not provided.")
self._channels = channels
self._num_channels = len(self._channels)
self._noise_generator = noise_generator
self._dtype = dtype
if lower_bound is None or upper_bound is None:
raise ValueError("Must provides bounds for the IMU readings.")
if isinstance(lower_bound, (float, int)):
lower_bound = np.full(self._num_channels, lower_bound, dtype=dtype)
else:
lower_bound = np.array(lower_bound, dtype=dtype)
if len(lower_bound) != self._num_channels:
raise ValueError("length of sensor lower bound {lower_bound} does not"
" match the number of channels.")
if isinstance(upper_bound, (float, int)):
upper_bound = np.full(self._num_channels, upper_bound, dtype=dtype)
else:
upper_bound = np.array(upper_bound, dtype=dtype)
if len(upper_bound) != self._num_channels:
raise ValueError("length of sensor upper bound {upper_bound} does not"
" match the number of channels.")
self._observation_space = self._stack_space(
gym.spaces.Box(
low= | np.array(lower_bound, dtype=self._dtype) | numpy.array |
# Package imports
import numpy as np
from PIL import Image
import nibabel as nib
import nrrd
import pandas as pd
from skimage import io
from skimage import filters
import matplotlib.pyplot as plt
# Skimage imports
from skimage.morphology import remove_small_objects
def im_read(file_name):
"""
Function that reads the image into a Jupyter Notebook and gets the
max intensity projection.
Parameters
----------
file_name: string
The actual name of the file that is being accessed.
Returns
-------
im_max: array
The maximum intensity projection of the read image.
"""
im = io.imread(file_name)
im_max = np.max(im, axis=0)
return im_max
def mim_edge_detector(max_ip):
"""
Function that performs the edge detection to get registration points
for moving images.
Parameters
----------
max_ip: array
The maximum intensity projection of an immunofluorescent slice image.
Returns
-------
binary: array
The maximum intensity projection of the read image.
"""
gauss = filters.gaussian(max_ip, sigma=11, output=None, mode='nearest',
cval=0, multichannel=None, preserve_range=False,
truncate=4.0)
edge_sobel = filters.sobel(gauss)
threshold = filters.threshold_otsu(edge_sobel)
binary = edge_sobel > threshold
return binary
def image_cleaning(binary):
"""
A function that cleans up the image by removing small artifacts caused by
methodology.
Parameters
----------
max_ip: array
The maximum intensity projection of an immunofluorescent slice image.
Returns
-------
binary: array
The maximum intensity projection of the read image.
"""
binary = remove_small_objects(binary, min_size=3000, connectivity=1,
in_place=True)
# imagex,imagey = binary.shape
# xrange=int(imagex/10)
# yrange=int(imagey/10)
# xfull=imagex
# yfull=imagey
return binary
def atlas_slice(atlas, slice_number):
"""
A function that pulls the data for a specific atlas slice.
Parameters
----------
atlas: nrrd
Atlas segmentation file that has a stack of slices.
slice_number: int
The number in the slice that corresponds to the fixed image
for registration.
Returns
-------
sagittal: array
Sagittal view being pulled from the atlas.
coronal: array
Coronal view being pulled from the atlas.
horizontal: arrary
Horizontal view being pulled from the atlas.
"""
epi_img_data2 = atlas.get_fdata()
sagittal = epi_img_data2[140, :, :]
coronal = epi_img_data2[:, slice_number, :]
horizontal = epi_img_data2[:, :, 100]
return sagittal, coronal, horizontal
def show_slices(slices):
"""
A function that allows for slices from .nii files to be viewed.
Parameters
----------
slices: tuples
Tuple of coronal, sagittal, and horizontal slices you want to view
Returns
-------
N/A: This is specifically a visualization step
Notes
-------
From: #from: https://nipy.org/nibabel/coordinate_systems.html
"""
fig, axes = plt.subplots(1, len(slices))
for i, slice in enumerate(slices):
axes[i].imshow(slice.T, cmap='gray', origin='lower')
return
def nrrd_to_nii(file):
"""
A function that converts the .nrrd atlas to .nii file format
Parameters
----------
file: tuples
Tuple of coronal, sagittal, and horizontal slices you want to view
Returns
-------
F_im_nii: nibabel.nifti2.Nifti2Image
A nifti file format that is used by various medical imaging techniques.
Notes
-------
From: #from: https://nipy.org/nibabel/coordinate_systems.html
"""
_nrrd = nrrd.read(file)
data = _nrrd[0]
header = _nrrd[1] # noqa: F841
F_im_nii = nib.Nifti2Image(data, np.eye(4))
return F_im_nii
def atlas_edge_detection(image):
"""
A function that detects the edges of the atlas function
Parameters
----------
image: array
Array that depicts that specific atlas slice being used as a
fixed image.
Returns
-------
binary: array
The array depicting the specific atlas as a boolean.
"""
gauss = filters.gaussian(image, sigma=11, output=None, mode='nearest',
cval=0, multichannel=None,
preserve_range=False, truncate=4.0)
edge_sobel = filters.sobel(gauss)
threshold = filters.threshold_otsu(edge_sobel)
binary = edge_sobel > threshold
return binary
def x_value(binary_image):
"""
A function that finds the x-value of the relative maxium at the
top of a slice
Parameters
----------
binary_image: boolean array
Array that depicts the slice as a boolean.
Returns
-------
x: int
The x-coordinate of the maximum part of the curve.
"""
for x in range(binary_image.shape[0]):
unique_array = np.unique(binary_image[x], axis=0)
if unique_array.shape[0] == 2:
break
return x
def y_values(binary_image):
"""
A function that finds the y-value of the relative maxium at the top
of a slice
Parameters
----------
binary_image: boolean array
Array that depicts the slice as a boolean
Returns
-------
y_list: list
A list of y-values that had a boolean true value
"""
x = x_value(binary_image)
y_list = []
for y in range(binary_image[x].size):
image = binary_image[x]
value = image[y]
if value == True:
y_list.append(y)
else:
pass
y_list = np.array(y_list)
return y_list
def point_middle(binary_image):
"""
A function that finds the middle point if the maximum value row has
more than one true
pixel.
Parameters
----------
binary_image: boolean array
Array that depicts the slice as a boolean
Returns
-------
midpoint: int
The middle point of a the true values at the maximum curvature
of a slice.
"""
# x = x_value(binary_image)
y = y_values(binary_image)
middle = | np.median(y) | numpy.median |
r"""
This module provides ways to evaluate the Plasma Dispersion Function [1]_,
:math:`Z(x)`, and other related functions, specifically, the
:math:`\mathcal{F}_q(\phi,\psi)` Function [2]_.
Faddeeva function
=====================
.. math::
w(z) \equiv \exp(-z^2) \; {\mathrm {erfc}}(-{\mathrm i}z)
is used, where :math:`{\mathrm {erfc}}(z)` is the complementary error function.
It is evaluated using the python wrapper of Steven G. Johnson's routine,
provided by scipy, see :py:func:`scipy.spetial.wofz` for more details.
Plasma Dispersion Function(PDF)
====================================
The PDF is related to Faddeeva function as
.. math::
Z(z) = {\mathrm i}\sqrt{\pi} \; w(z) \; .
PDF has the following property [1]_:
.. math::
Z'(z) = -2(1+zZ(z)) \; ,
and it's easy to show the following recurrence relation
.. math::
Z^m(z) = -2[(m-1)Z^{m-2}(z) + zZ^{m-1}(z)] \quad \mathrm{for}\; m>2 \; .
Weakly Relativistic Plasma Dispersion Function
===============================================
:math:`\mathcal{F}_q` function is related to PDF as [2]_:
.. math::
\mathcal{F}_{\frac{1}{2}}(\phi,\psi) =
-\frac{1}{2\phi}[Z(\psi-\phi)+Z(-\psi-\phi)] \; ,
.. math::
\mathcal{F}_{\frac{3}{2}}(\phi,\psi) =
-\frac{1}{2\psi}[Z(\psi-\phi)-Z(-\psi-\phi)] \; ,
.. math::
\mathcal{F}_{q+2}(\phi,\psi) =
(1+\phi^2\mathcal{F}_q-q\mathcal{F}_{q+1})/\psi^2 \; .
The derivatives of :math:`\mathcal{F}_q` respect to :math:`\phi^2` can be
evaluated as:
.. math::
\mathcal{F}_q^m \equiv \frac{\partial^m \mathcal{F}_q}{\partial(\phi^2)^m}
= \mathcal{F}_{q-1}^{m-1} - \mathcal{F}_q^{m-1} \; ,
.. math::
\mathcal{F}_{q+2}^m =
(\phi^2\mathcal{F}_q^m - q\mathcal{F}_{q+1}^m + m\mathcal{F}_q^{m-1})/\psi^2.
However, as pointed out in [2]_, evaluating derivatives using the first formula
may suffer from the cancellation of two large numbers. A more reliable way is
to express the derivatives of :math:`\mathcal{F}_{1/2}` and
:math:`\mathcal{F}_{3/2}` in terms of derivatives of the PDF, and then use the
second formula to evaluate larger q's.
Fianlly, for special case, :math:`\psi=0`, L'Hopital rule needs to be used to
evaluate the "0/0" kind expressions. More details in Appendix part of [2]_.
Fast Evaluators
================
In order to evaluate ``Fq`` and ``Fmq`` functions faster, two new classes
``FqFastEvaluator`` and ``FmqFastEvaluator`` are provided. In initialization,
these classes calculate ``Fq`` or ``Fmq`` of given order on given parameter
meshes, and generate linear interpolators on these mesh. Then, when called with
given parameters, they use the existing interpolator to evaluate an
approximated value for ``Fq`` or ``Fmq``. The parameter mesh is on
:math:`\mu\delta = \psi^2 - \phi^2` and :math:`\psi` space, because they are
both real and the function value is essentially center peaked at the origin.
The calling signiture is almost the same as ``Fq`` and ``Fmq`` functions,
except that the order of the function are now determined at the time of
initialization. Two suggested mesh, ``mudelta_mesh`` and ``psi_mesh`` are
provided for default use.
.. [1] https://farside.ph.utexas.edu/teaching/plasma/lectures1/node87.html
.. [2] I.P.Shkarofsky, "New representations of dielectric tensor elements in
magnetized plasma", J. Plasma Physics(1986), vol. 35, part 2, pp.
319-331
"""
import pkg_resources
import warnings
import os
from numpy.lib.scimath import sqrt
import numpy as np
from scipy.special import wofz, gamma
from scipy.interpolate import RegularGridInterpolator
from ..geometry.grid import cubicspace
from ..settings.exception import FPSDPError, MathsWarning
class PDFError(FPSDPError):
def __init__(self, s):
self.message = s
class PDFWarning(MathsWarning):
pass
def Z(z):
r"""Plasma Dispersion Function. See the module's documentation for details:
:py:mod:`.PlasmaDispersionFunction`
The Plasma Dispersion Function(PDF) is related to Faddeeva function as
.. math::
Z(z) = {\mathrm i}\sqrt{\pi} \; w(z) \; .
"""
return 1j*sqrt(np.pi)*wofz(z)
def Z_1(z):
"""First derivative of Z
See :py:mod:`.PlasmaDispersionFunction` for details
"""
return -2*(1+z*Z(z))
def Z_2(z):
"""Shorthand for Z_m(z,2) function
"""
return -2*(z*Z_1(z) + Z(z))
def Z_m(z, m):
r"""m'th derivative of Z
Recurrence relation is used to evaluate this function.
See :py:mod:`.PlasmaDispersionFunction` for details:
The recurrence relation is
.. math::
Z_m = -2zZ_{m-1} - 2(m-1)Z_{m-2}
and the starting points are Z_0 and Z_1 evaluated by :py:func:`Z` and
:py:func:`Z_1` respectively.
"""
assert (m >= 0)
assert isinstance(m, int)
if m == 0:
return Z(z)
elif m == 1:
return Z_1(z)
else:
return -2*z*Z_m(z, m-1) -2*(m-1)*Z_m(z, m-2)
# General recurrence function to evaluate F_q for q>3/2
def Fq(phi, psi, nq, phi_nonzero=None, psi_nonzero=None, phi_tol=None,
psi_tol=None):
r"""General function to evaluate :math:`\mathcal{F}_{q}(\phi,\psi)`
For non-zero psi, we use the following recurrence relation to evaluate
.. math::
\mathcal{F}_{q+2}(\phi,\psi) =
(1+\phi^2\mathcal{F}_q-q\mathcal{F}_{q+1})/\psi^2
Special caution is required to evaluate Fq when psi=0, because the
recurrence relation has 0 in denominator. It is convenient to observe that
the above recurrence relation then requires the numerator equals 0 as well.
So we have the following recurrence relation
.. math::
\mathcal{F}_{q+1} = \frac{1+\phi^2\mathcal{F}_q}{q}
Another function will be dedicated to this special case, :py:func:`Fq0`.
Note: refer to [1]_, the sign convention for :math:`\phi` is :
.. math::
\mathrm{Re}\phi > 0 \; ,\; \mathrm{Im}\phi < 0
:param phi: :math:`\phi` parameter defined in ref.[2] in
:py:mod:`PlasmaDispersionFunction`
:type phi: ndarray of complex
:param psi: :math:`\psi` parameter defined in ref.[2] in
:py:mod:`PlasmaDispersionFunction`
:type psi: ndarray of complex
:param int nq: the numerator in q, must be odd, the denominator is default
to be 2
:param bool phi_nonzero: True if phi != 0 is guaranteed everywhere. If not
given, phi will be tested and divided into
appropriate groups.
:param bool psi_nonzero: True if psi != 0 is guaranteed everywhere. If not
given, psi will be tested and divided into
appropriate groups.
:param float phi_tol: tolerance for testing phi=0 condition. If not given,
will try to choose a proper value automatically based
on nq and m.
:param float psi_tol: tolerance for testing psi=0 condition. If not given,
will try to choose a proper value automatically based
on nq and m.
:return: :math:`\mathcal{F}_{q}(\phi,\psi)` evaluated at given
:math:`\phi` and :math:`\psi` mesh
:rtype: ndarray of complex
.. [1] Weakly relativistic dielectric tensor and dispersion functions of a
Maxwellian plasma, <NAME> and <NAME>, J. Plasma Physics
(1983), vol. 30, part 1, pp. 125-131
"""
phi = np.array(phi)
psi = np.array(psi)
if (phi_tol is None):
phi_tol = 1e-4
if (psi_tol is None):
if nq >=3:
psi_tol = 2*10**(-14.0/(nq-1))
else:
psi_tol = 1e-12
assert np.array(phi).shape == np.array(psi).shape
assert np.all(np.logical_or(np.abs(np.real(phi)) <= phi_tol ,\
np.abs(np.imag(phi)) <= phi_tol) )
assert isinstance(nq, int) and nq>0 and nq%2 == 1
if (psi_nonzero is None) and (phi_nonzero is None):
psi_nonzero_idx = np.logical_or( np.abs(np.real(psi)) >= psi_tol,
np.abs(np.imag(psi)) >= psi_tol)
phi_nonzero_idx = np.logical_or( np.abs(np.real(phi)) >= phi_tol,
np.abs(np.imag(phi)) >= phi_tol)
# Now, we have 4 cases:
# case 1: (psi != 0) and (phi != 0)
all_nonzero_idx = np.logical_and(psi_nonzero_idx, phi_nonzero_idx)
# case 2: (psi == 0) and (phi != 0)
psi_zero_idx = np.logical_and(np.logical_not(psi_nonzero_idx),
phi_nonzero_idx)
# case 3: (psi != 0) and (phi == 0)
phi_zero_idx = np.logical_and(psi_nonzero_idx,
np.logical_not(phi_nonzero_idx))
# case 4: (psi == 0) adn (phi == 0)
all_zero_idx = np.logical_and(np.logical_not(psi_nonzero_idx),
np.logical_not(phi_nonzero_idx))
result = np.empty_like(phi, dtype='complex')
# modify phi so that real(phi)>0 and imag(phi)<0
phi_m = np.abs(np.real(phi)) - 1j*np.abs(np.imag(phi))
# for case 1
phi1 = phi_m[all_nonzero_idx]
psi1 = psi[all_nonzero_idx]
result[all_nonzero_idx] = Fq(phi1, psi1, nq, True, True, phi_tol,
psi_tol)
# for case 2
phi2 = phi_m[psi_zero_idx]
psi2 = np.zeros_like(psi[psi_zero_idx])
result[psi_zero_idx] = Fq(phi2, psi2, nq, True, False, phi_tol,psi_tol)
# for case 3
phi3 = np.zeros_like(phi_m[phi_zero_idx])
psi3 = psi[phi_zero_idx]
result[phi_zero_idx] = Fq(phi3, psi3, nq, False, True, phi_tol,psi_tol)
# for case 4
phi4 = np.zeros_like(phi_m[all_zero_idx])
psi4 = np.zeros_like(psi[all_zero_idx])
result[all_zero_idx] = Fq(phi4, psi4, nq, False, False,phi_tol,psi_tol)
return result
else:
if(nq == 1):
return _F12(phi, psi, phi_nonzero, psi_nonzero)
elif(nq == 3):
return _F32(phi, psi, phi_nonzero, psi_nonzero)
#elif(nq == 5):
# return _F52(phi, psi, phi_nonzero, psi_nonzero)
else:
if(phi_nonzero and psi_nonzero):
# if psi is already checked at high order function, no more checking
return (1 + phi*phi*Fq(phi,psi,nq-4, True, True) -
(nq-4)/2.0*Fq(phi,psi,nq-2, True, True)) / (psi*psi)
elif phi_nonzero and (not psi_nonzero):
return (1+ phi*phi*Fq(phi,psi,nq-2, True, False))*2/(nq-2)
elif (not phi_nonzero) and psi_nonzero:
return (1 - (nq-4)/2.0*Fq(phi, psi, nq-2, False, True)) /\
(psi*psi)
else:
return 2.0/(nq-2)
def _F12(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler for :math:`\mathcal{F}_{1/2}(\phi,\psi)`
Do not call directly. Use Fq(phi,psi,1) instead.
"""
if not phi_nonzero:
raise PDFError('F12 enconters phi=0 input, it diverges at {} points. \
Check the data to see what\'s going on.'.format(len(phi)))
return np.zeros_like(phi) + np.nan
else:
return -(Z(psi-phi) +Z(-psi-phi))/(2*phi)
def _F32(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler for :math:`\mathcal{F}_{3/2}(\phi,\psi)`
Do not call directly. Use Fq(phi,psi,3) instead
"""
if psi_nonzero and phi_nonzero:
return -(Z(psi-phi) - Z(-psi-phi)) / (2*psi)
elif psi_nonzero and not phi_nonzero:
return -(Z(psi) - Z(-psi)) / (2*psi)
elif phi_nonzero and not psi_nonzero:
return -Z_1(-phi)
else:
return 2*np.ones_like(phi)
def Fmq(phi, psi, m, nq, phi_nonzero=None,
psi_nonzero=None, phi_tol=None, psi_tol=None):
r"""General function to evaluate m-th derivative of Fq respect to phi^2
For each :math:`m`, starting from lowest two :math:`q` values , we use the
following recurrence relation to calculate larger :math:`q`'s.
.. math::
\mathcal{F}_{q+2}^m = (\phi^2\mathcal{F}_q^m - q\mathcal{F}_{q+1}^m +
m\mathcal{F}_q^{m-1})/\psi^2
For :math:`\psi = 0` case, it is not useful, we use instead:
.. math::
\mathcal{F}_{q+1}^m = (\phi^2\mathcal{F}_q^m + m\mathcal{F}_q^{m-1})/q
Further more, if :math:`\phi = 0` at the same time as :math:`\psi=0`, we
have:
.. math::
\mathcal{F}^m_{q+3/2} = \frac{ m\mathcal{F}^{m-1}_{q+1/2} }{ q+1/2 }
Note that in physical situations, ``m``>(``nq``-1)/2 is not used. So the
recurrence starts at ``nq`` = 2* ``m`` +1 and 2* ``m`` +3.
Here we implement only m=1,2,3,4 cases, using formula given in [1]_. Higher
order cases required analytical derivation of starting formula.
:param phi: :math:`\phi` parameter defined in ref.[2] in
:py:mod:`PlasmaDispersionFunction`
:ptype phi: ndarray of complex
:param psi: :math:`\psi` parameter defined in ref.[2] in
:py:mod:`PlasmaDispersionFunction`
:ptype psi: ndarray of complex
:param int nq: the numerator in q, must be odd, the denominator is default
to be 2
:param bool phi_nonzero: True if phi != 0 is guaranteed everywhere. If not
given, phi will be tested and divided into
appropriate groups.
:param bool psi_nonzero: True if psi != 0 is guaranteed everywhere. If not
given, psi will be tested and divided into
appropriate groups.
:param float phi_tol: tolerance for testing phi=0 condition. If not given,
will try to choose a proper value automatically based
on nq and m.
:param float psi_tol: tolerance for testing psi=0 condition. If not given,
will try to choose a proper value automatically based
on nq and m.
:return: :math:`\mathcal{F}^m_{q}(\phi,\psi)` evaluated at given
:math:`\phi` and :math:`\psi` mesh
:rtype: ndarray of complex
.. [1] I.P.Shkarofsky, "New representations of dielectric tensor elements
in magnetized plasma", J. Plasma Physics(1986), vol. 35, part 2, pp.
319-331
"""
phi = np.array(phi)
psi = np.array(psi)
assert np.array(phi).shape == np.array(psi).shape
assert isinstance(m, int) and (m >= 0)
assert isinstance(nq, int) and (nq > 0) and (nq%2 == 1)
assert (nq >= 2*m+1) # required for physically meaningful result
if (phi_tol is None):
phi_tol = 1e-4
if (psi_tol is None):
if nq >=3:
psi_tol = 2*10**(-14.0/(nq-1))
else:
psi_tol = 1e-12
if (psi_nonzero is None) and (phi_nonzero is None):
psi_nonzero_idx = np.logical_or( np.abs(np.real(psi)) >= psi_tol,
np.abs(np.imag(psi)) >= psi_tol)
phi_nonzero_idx = np.logical_or( np.abs(np.real(phi)) >= phi_tol,
np.abs(np.imag(phi)) >= phi_tol)
# Now, we have 4 cases:
# case 1: (psi != 0) and (phi != 0)
all_nonzero_idx = np.logical_and(psi_nonzero_idx, phi_nonzero_idx)
# case 2: (psi == 0) and (phi != 0)
psi_zero_idx = np.logical_and(np.logical_not(psi_nonzero_idx),
phi_nonzero_idx)
# case 3: (psi != 0) and (phi == 0)
phi_zero_idx = np.logical_and(psi_nonzero_idx,
np.logical_not(phi_nonzero_idx))
# case 4: (psi == 0) adn (phi == 0)
all_zero_idx = np.logical_and(np.logical_not(psi_nonzero_idx),
np.logical_not(phi_nonzero_idx))
result = np.empty_like(phi, dtype='complex')
# modify phi so that real(phi)>0 and imag(phi)<0
phi_m = np.abs(np.real(phi)) - 1j*np.abs(np.imag(phi))
# for case 1
phi1 = phi_m[all_nonzero_idx]
psi1 = psi[all_nonzero_idx]
result[all_nonzero_idx] = Fmq(phi1, psi1, m, nq, True, True)
# for case 2
phi2 = phi_m[psi_zero_idx]
psi2 = np.zeros_like(psi[psi_zero_idx])
result[psi_zero_idx] = Fmq(phi2, psi2, m, nq, True, False)
# for case 3
phi3 = np.zeros_like(phi_m[phi_zero_idx])
psi3 = psi[phi_zero_idx]
result[phi_zero_idx] = Fmq(phi3, psi3, m, nq, False, True)
# for case 4
phi4 = np.zeros_like(phi_m[all_zero_idx])
psi4 = np.zeros_like(psi[all_zero_idx])
result[all_zero_idx] = Fmq(phi4, psi4, m, nq, False, False)
return result
else:
if (m == 0):
warnings.warn('0-th derivative is encountered. Try use Fq directly\
if possible.', PDFWarning)
return Fq(phi, psi, nq, phi_nonzero, psi_nonzero)
elif (m == 1):
return _Fq_1(phi, psi, nq, phi_nonzero, psi_nonzero)
elif (m == 2):
return _Fq_2(phi, psi, nq, phi_nonzero, psi_nonzero)
elif (m == 3):
return _Fq_3(phi, psi, nq, phi_nonzero, psi_nonzero)
elif (m == 4):
return _Fq_4(phi, psi, nq, phi_nonzero, psi_nonzero)
else: # m>4 cases are not implemented for now.
raise ValueError('m={} is encountered. m>4 cases are not \
implemented for now. Please submit a request to <EMAIL> if this \
feature is needed.'.format(m))
def _Fq_1(phi, psi, nq, phi_nonzero, psi_nonzero):
r"""Handler for :py:func:`Fmq` function when m == 1.
Calling this function directly is not recommended. Parameter validity is
not checked.
Call :py:func`Fmq` with m=1 instead.
"""
if (nq == 3):
return _F32_1(phi, psi, phi_nonzero, psi_nonzero)
elif (nq == 5):
return _F52_1(phi, psi, phi_nonzero, psi_nonzero)
else:
if psi_nonzero and phi_nonzero:
return (phi*phi*_Fq_1(phi, psi, nq-4, True, True) - \
(nq-4)/2.*_Fq_1(phi, psi, nq-2, True, True) + \
Fq(phi, psi, nq-4, True, True)) /\
(psi*psi)
elif psi_nonzero and (not phi_nonzero):
return (-(nq-4)/2.*_Fq_1(phi, psi, nq-2, False, True) + \
Fq(phi, psi, nq-4, False, True)) /\
(psi*psi)
elif phi_nonzero and (not psi_nonzero):
return (phi*phi*_Fq_1(phi, psi, nq-2, True, False) + \
Fq(phi, psi, nq-2, True, False)) *2 \
/ (nq-2)
else:
return Fq(phi, psi, nq-2, False, False)*2/(nq-2)
def _F32_1(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}'_{3/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=1, nq=3 instead.
"""
if not phi_nonzero:
raise PDFError('zero phi encountered in F32_1, divergence occurs. \
Check input to make sure this is not an error.')
return np.ones_like(phi)*np.nan
elif psi_nonzero and phi_nonzero:
return (Z_1(psi-phi)-Z_1(-psi-phi))/(4*psi*phi)
else:
return Z_m(-phi, 2)/ (2*phi)
def _F52_1(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}'_{5/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=1, nq=3 instead.
"""
if psi_nonzero:
psi3 = psi*psi*psi
plus = psi - phi
minus = -psi - phi
return -(Z(plus) - psi*Z_1(plus)) / (4*psi3) + \
(Z(minus) + psi*Z_1(minus)) / (4*psi3)
elif phi_nonzero:
return Z_m(-phi, 3)/6
else:
return 4./3
def _Fq_2(phi, psi, nq, phi_nonzero, psi_nonzero):
r"""Handler for :py:func:`Fmq` function when m == 2.
Calling this function directly is not recommended. Parameter validity is
not checked.
Call :py:func`Fmq` with m=2 instead.
"""
if (nq == 5):
return _F52_2(phi, psi, phi_nonzero, psi_nonzero)
elif (nq == 7):
return _F72_2(phi, psi, phi_nonzero, psi_nonzero)
else:
if psi_nonzero and phi_nonzero:
return (phi*phi*_Fq_2(phi, psi, nq-4, True, True) - \
(nq-4)/2.*_Fq_2(phi, psi, nq-2, True, True) + \
2*_Fq_1(phi, psi, nq-4, True, True)) / (psi*psi)
elif psi_nonzero and (not phi_nonzero):
return (-(nq-4)/2.*_Fq_2(phi, psi, nq-2, False, True) + \
2*_Fq_1(phi, psi, nq-4, False, True)) / (psi*psi)
elif phi_nonzero:
return (phi*phi*_Fq_2(phi, psi, nq-2, True, False) + \
2* _Fq_1(phi, psi, nq-2, True, False)) *2 / (nq-2)
else:
return 2*_Fq_1(phi, psi, nq-2, False, False)*2/(nq-2)
def _F52_2(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}''_{5/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=2, nq=5 instead.
"""
if not phi_nonzero:
raise PDFError('zero phi encountered in F52_2, divergence occurs. \
Check input to make sure this is not an error.')
return np.ones_like(phi)*np.nan
elif psi_nonzero and phi_nonzero:
plus = psi - phi
minus = -psi - phi
return ((Z_1(plus) - psi*Z_m(plus, 2)) - (Z_1(minus) + psi*Z_m(minus,
2))) / (8*phi*psi*psi*psi)
else:
return -Z_m(-phi, 4) / (12*phi)
def _F72_2(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}''_{7/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=2, nq=7 instead.
"""
if psi_nonzero:
psi2 = psi*psi
psi5 = psi2*psi2*psi
plus = psi - phi
minus = -psi - phi
return -(3*Z(plus) - 3*psi*Z_1(plus) + psi2*Z_m(plus, 2)) / (8*psi5) +\
(3*Z(minus) + 3*psi*Z_1(minus) + psi2*Z_m(minus, 2)) / (8*psi5)
elif phi_nonzero:
return - Z_m(-phi, 5)/60
else:
return 16./15
def _Fq_3(phi, psi, nq, phi_nonzero, psi_nonzero):
r"""Handler for :py:func:`Fmq` function when m == 3.
Calling this function directly is not recommended. Parameter validity is
not checked.
Call :py:func`Fmq` with m=3 instead.
"""
if (nq == 7):
return _F72_3(phi, psi, phi_nonzero, psi_nonzero)
elif (nq == 9):
return _F92_3(phi, psi, phi_nonzero, psi_nonzero)
else:
if psi_nonzero and phi_nonzero:
return (phi*phi*_Fq_3(phi, psi, nq-4, True, True) - \
(nq-4)/2.*_Fq_3(phi, psi, nq-2, True, True) + \
3*_Fq_2(phi, psi, nq-4, True, True)) / (psi*psi)
elif psi_nonzero and (not phi_nonzero):
return (-(nq-4)/2.*_Fq_3(phi, psi, nq-2,False, True) + \
3*_Fq_2(phi, psi, nq-4,False, True)) / (psi*psi)
elif phi_nonzero:
return (phi*phi*_Fq_3(phi, psi, nq-2, True, False) + \
3* _Fq_2(phi, psi, nq-2, True, False)) *2 / (nq-2)
else:
return 3*_Fq_2(phi, psi, nq-2, False, False)*2/(nq-2)
def _F72_3(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}'''_{7/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=3, nq=7 instead.
"""
if not phi_nonzero:
raise PDFError('zero phi encountered in F72_3, divergence occurs. \
Check input to make sure this is not an error.')
return np.ones_like(phi)*np.nan
elif psi_nonzero and phi_nonzero:
plus = psi - phi
minus = -psi - phi
psi2 = psi * psi
psi5 = psi2 * psi2 * psi
return ((3*Z_1(plus) - 3*psi*Z_2(plus)+ psi2*Z_m(plus, 3)) - \
(3*Z_1(minus) + 3*psi*Z_2(minus) + psi2*Z_m(minus, 3))) \
/ (16*phi*psi5)
else:
return Z_m(-phi, 6) / (120*phi)
def _F92_3(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}'''_{9/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=3, nq=9 instead.
"""
if psi_nonzero:
psi2 = psi*psi
psi3 = psi2*psi
psi7 = psi2*psi2*psi3
plus = psi - phi
minus = -psi - phi
return -(15*Z(plus) - 15*psi*Z_1(plus) + 6*psi2*Z_2(plus) - \
psi3*Z_m(plus, 3)) / (16*psi7) +\
(15*Z(minus) + 15*psi*Z_1(minus) + 6*psi2*Z_2(minus) + \
psi3*Z_m(minus, 3)) / (16*psi7)
elif phi_nonzero:
return - Z_m(-phi, 7)/840
else:
return 96/105.
def _Fq_4(phi, psi, nq, phi_nonzero, psi_nonzero):
r"""Handler for :py:func:`Fmq` function when m == 4.
Calling this function directly is not recommended. Parameter validity is
not checked.
Call :py:func`Fmq` with m=3 instead.
"""
if (nq == 9):
return _F92_4(phi, psi, phi_nonzero, psi_nonzero)
elif (nq == 11):
return _F112_4(phi, psi, phi_nonzero, psi_nonzero)
else:
if psi_nonzero and phi_nonzero:
return (phi*phi*_Fq_4(phi, psi, nq-4, True, True) - \
(nq-4)/2.*_Fq_4(phi, psi, nq-2, True, True) + \
4*_Fq_3(phi, psi, nq-4, True, True)) / (psi*psi)
elif psi_nonzero and (not phi_nonzero):
return (-(nq-4)/2.*_Fq_4(phi, psi, nq-2,False, True) + \
4*_Fq_3(phi, psi, nq-4, False, True)) / (psi*psi)
elif phi_nonzero:
return (phi*phi*_Fq_4(phi, psi, nq-2, True, False ) + \
4* _Fq_3(phi, psi, nq-2, True, False)) *2 / (nq-2)
else:
return 4*_Fq_3(phi, psi, nq-2, False, False)*2/(nq-2)
def _F92_4(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}^{IV}_{9/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=4, nq=9 instead.
"""
if not phi_nonzero:
raise PDFError('zero phi encountered in F92_4, divergence occurs. \
Check input to make sure this is not an error.')
return np.ones_like(phi)*np.nan
elif psi_nonzero and phi_nonzero:
plus = psi - phi
minus = -psi - phi
psi2 = psi * psi
psi3 = psi * psi2
psi7 = psi2 * psi2 * psi3
return ((15*Z_1(plus) - 15*psi*Z_2(plus) + 6*psi2*Z_m(plus, 3) - \
psi3*Z_m(plus, 4)) - \
(15*Z_1(minus) + 15*psi*Z_2(minus) + 6*psi2*Z_m(minus, 3) + \
psi3*Z_m(plus, 4)) ) / (32*phi*psi7)
else:
return -Z_m(-phi, 8) / (1680*phi)
def _F112_4(phi, psi, phi_nonzero, psi_nonzero):
r"""Handler function for :math:`\mathcal{F}^{IV}_{11/2}(\phi,\psi)`
Do not call directly. Parameter validity not checked. Use :py:func:`Fmq`
with m=4, nq=11 instead.
"""
if psi_nonzero:
psi2 = psi*psi
psi3 = psi2*psi
psi4 = psi2*psi2
psi9 = psi2*psi3*psi4
plus = psi - phi
minus = -psi - phi
return (-(105*Z(plus) - 105*psi*Z_1(plus) + 45*psi2*Z_2(plus) - \
10*psi3*Z_m(plus, 3) + psi4*Z_m(plus, 4)) +\
(105*Z(minus) + 105*psi*Z_1(minus) + 45*psi2*Z_2(minus) + \
10*psi3*Z_m(minus, 3) + psi4*Z_m(minus, 4))) / (32*psi9)
elif phi_nonzero:
return - Z_m(-phi, 9)/15120
else:
return 96*8/(105.*9)
def _Fm_mp32_00(m, shape=(1)):
r"""Handler for :math:`\mathcal{F}^m_{m+3/2}(0,0)`
when :math:`\psi=0` and :math:`\phi=0`, we have the recurrence
.. math::
\mathcal{F}^m_{q+3/2} = \frac{ m\mathcal{F}^{m-1}_{q+1/2} }{ q+1/2 }
especially when q == m, this recurrence finally ends at
:math:`\mathcal{F}_{3/2} = 2`.
We can then get the analycial formula for
:math:`\mathcal{F}^m_{m+3/2}(0,0)`:
.. math::
\mathcal{F}^m_{m+3/2}(0,0) = 2 \prod\limits_{i=1}^m 2i/(2i+1) =
\frac{m! \; 2^{m+1}}{(2m+1)!!}
:param int m: the order of the derivative.
:param shape: the shape of the return array
:type shape: tuple of int, should be the same shape as phi/psi determined
by the caller
:return: the calculated value of the function
:rtype: ndarray of complex with the shape same as ``shape``
"""
result = 2.
while(m > 0):
result *= 2*m/(2*m+1.)
m = m-1
return np.ones(shape, dtype='complex')*result
# default mudelta and psi mesh for creating fast evaluators.
_default_mudelta_mesh = cubicspace(-50,50,1001)
_default_psi_mesh = cubicspace(-50,50,1001)
class FqFastEvaluator(object):
"""Fast evaluator for Fq functions
Initialization:
FqFastEvaluator( nq, mudelta_mesh, psi_mesh, **P)
:param int nq: nq passed into Fq function, the order of the function is
nq/2.
:param mudelta_mesh: :math:`\mu \delta \equiv \psi^2-\phi^2` values for
mesh points in phi_psi plane, we use this value
because Fq is most sensitive to it.
:type mudelta_mesh: 1D array of float, monotonic order
:param psi_mesh: psi values for mesh points in phi_psi plane
:type psi_mesh: 1D array of float, monotonic order
:param value: Optional, A precalculated function value array for given
mesh. If not given, Fq function will be called to
calculate all the values on given mesh.
:type value: None or 2D array of complex.
:param **P: additional keyword arguments passed into
scipy.interpolate.RegularGridInteropolator.
Methods:
__call__(phi, psi):
return Fq value at (phi,psi) points. phi, psi are arrays with the
same shape.
reconstruct(**P):
reconstruct the interpolator using the new keyword arguments given
in **P
test(phi_test, psi_test, abserr=1e-6, relerr=1e-4):
evaluate Fq on (phi_test, psi_test) points using both original
function and interpolator, report the maximum absolute error and
relative error. Print a warning if either error is greater than the
preset margin given by abserr and relerr arguments.
"""
def __init__(self, nq, mudelta_mesh=_default_mudelta_mesh,
psi_mesh=_default_psi_mesh, value=None, **P):
self.psi_1D = psi_mesh
self.mudelta_1D = mudelta_mesh
self.nq = nq
mudelta_2D = np.zeros((len(mudelta_mesh), len(psi_mesh))) + \
mudelta_mesh[:, np.newaxis]
psi_2D = np.zeros_like(mudelta_2D) + psi_mesh[np.newaxis, :]
if value is None:
self.value = Fq(sqrt(psi_2D*psi_2D-mudelta_2D), psi_2D, self.nq)
else:
self.value = value
self.interpolator = RegularGridInterpolator((self.mudelta_1D,
self.psi_1D),
self.value,
bounds_error=False,
fill_value=0, **P)
self.phi_bounds = ((sqrt(psi_mesh[0]-mudelta_mesh[0]),
sqrt(psi_mesh[0]-mudelta_mesh[-1])),
(sqrt(psi_mesh[-1]-mudelta_mesh[0]),
sqrt(psi_mesh[-1]-mudelta_mesh[-1])) )
self.psi_bounds = (psi_mesh[0], psi_mesh[-1])
def reconstruct(self, mudelta_mesh=None, psi_mesh=None, **P):
"""reconstruct the interpolator using the new keyword arguments given
in **P and/or mudelta_mesh, psi_mesh
"""
if (psi_mesh is not None):
self.psi_1D = psi_mesh
if (mudelta_mesh is not None):
self.mudelta_1D = mudelta_mesh
if (psi_mesh is not None) or (mudelta_mesh is not None):
mudelta_2D = np.zeros((len(mudelta_mesh), len(psi_mesh))) + \
mudelta_mesh[:, np.newaxis]
psi_2D = np.zeros_like(mudelta_2D) + psi_mesh[np.newaxis, :]
self.value = Fq(sqrt(psi_2D*psi_2D-mudelta_2D), psi_2D, self.nq)
self.interpolator = RegularGridInterpolator((self.mudelta_1D,
self.psi_1D),
self.value,
bounds_error=False,
fill_value=0, **P)
else:
self.interpolator = RegularGridInterpolator((self.mudelta_1D,
self.psi_1D),
self.values,
bounds_error=False,
fill_value=0, **P)
def __call__(self, phi, psi):
"""Evaluate Fq at phi,psi using the internal interpolator
"""
phi = np.array(phi)
psi = np.array(psi)
assert phi.shape == psi.shape
# phi must be square root of a real number
assert np.all(np.logical_or(np.abs(np.real(phi)) <= 1e-10,
np.abs(np.imag(phi)) <= 1e-10))
phi2 = np.real(phi*phi)
mudelta = psi*psi - phi2
dims = list(range(1, mudelta.ndim+1))
dims.extend([0])
# construct the points structure for interpolation, transpose the array
# so that the fastest changing index is length 2: (phi,psi)
points = np.transpose(np.array([mudelta, psi]), axes=dims)
return self.interpolator(points)
def test(self, phi, psi, tolabs=1e-2, tolrel=1e-2, full_report=False):
"""evaluate Fq on (phi_test, psi_test) points using both original
function and interpolator, report the maximum absolute error and
relative error. Print a warning if either error is greater than the
preset margin given by abserr and relerr arguments.
If full_report == True, abserr and relerr on every phi,psi point will
be returned. Otherwise only the maximum value and corresponding phi,psi
are returned.
"""
exact_value = Fq(phi, psi, self.nq)
interp_value = self(phi,psi)
abs_err = interp_value - exact_value
in_range_idx = interp_value != 0
rel_err = np.zeros_like(abs_err)
rel_err[in_range_idx] = abs_err[in_range_idx]/exact_value[in_range_idx]
maxabs = np.abs(abs_err).max()
maxrel = np.abs(rel_err).max()
arg_maxabs = np.where(np.abs(abs_err) == maxabs)
arg_maxrel = np.where(np.abs(rel_err) == maxrel)
if(maxabs > tolabs):
warnings.warn('Absolute error exceeds limit({})'.format(tolabs))
if(maxrel > tolrel):
warnings.warn('Relative error exceeds limit({})'.format(tolrel))
print('\
Max Absolute Error: {}, at\n\
phi:{},\n\
psi:{}.\n\
Max Relative Error: {}, at\n\
phi:{},\n\
psi:{}'.format(abs_err[arg_maxabs], phi[arg_maxabs], psi[arg_maxabs],
rel_err[arg_maxrel], phi[arg_maxrel], psi[arg_maxrel]))
if full_report:
return (abs_err, rel_err)
class FmqFastEvaluator(object):
"""Fast evaluator for Fmq functions
Initialization:
FqFastEvaluator(m, nq, mudelta_mesh, psi_mesh, **P)
:param int m: m passed into Fmq function, the order of differentiation.
:param int nq: nq passed into Fq function, the order of the function is
nq/2.
:param mudelta_mesh: :math:`\mu \delta \equiv \psi^2-\phi^2` values for
mesh points in phi_psi plane, we use this value
because Fmq is most sensitive to it.
:type mudelta_mesh: 1D array of float, monotonic order
:param psi_mesh: psi values for mesh points in phi_psi plane
:type psi_mesh: 1D array of float, monotonic order
:param value: Optional, A precalculated function value array for given
mesh. If not given, Fmq function will be called to
calculate all the values on given mesh.
:type value: None or 2D array of complex.
:param **P: additional keyword arguments passed into
scipy.interpolate.RegularGridInteropolator.
Methods:
__call__(phi, psi):
return Fq value at (phi,psi) points. phi, psi are arrays with the
same shape.
reconstruct(mudelta_mesh=None, psi_mesh=None, **P):
reconstruct the interpolator using the new keyword arguments given
in **P and/or new meshes.
test(phi_test, psi_test, abserr=1e-6, relerr=1e-4):
evaluate Fmq on (phi_test, psi_test) points using both original
function and interpolator, report the maximum absolute error and
relative error. Print a warning if either error is greater than the
preset margin given by abserr and relerr arguments.
"""
def __init__(self, m, nq, mudelta_mesh=_default_mudelta_mesh,
psi_mesh=_default_psi_mesh, value=None, **P):
self.psi_1D = psi_mesh
self.mudelta_1D = mudelta_mesh
self.m = m
self.nq = nq
mudelta_2D = np.zeros((len(mudelta_mesh), len(psi_mesh))) + \
mudelta_mesh[:, np.newaxis]
psi_2D = np.zeros_like(mudelta_2D) + psi_mesh[np.newaxis, :]
if(value is None):
self.value = Fmq(sqrt(psi_2D*psi_2D-mudelta_2D), psi_2D, self.m,
self.nq)
else:
self.value = value
self.interpolator = RegularGridInterpolator((self.mudelta_1D,
self.psi_1D),
self.value,
bounds_error=False,
fill_value=0, **P)
self.phi_bounds = ((sqrt(psi_mesh[0]-mudelta_mesh[0]),
sqrt(psi_mesh[0]-mudelta_mesh[-1])),
( | sqrt(psi_mesh[-1]-mudelta_mesh[0]) | numpy.lib.scimath.sqrt |
# import required libraries
import numpy as np
import cv2
print('OpenCV version: '+cv2.__version__)
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import os
from collections import Counter
# Set source folder
SRC_FOLDER = "C:/Users/raksh/OneDrive - The Pennsylvania State University/PhD Research/Paper-4/SysID Experiment/OL Test 3/"
# open and read file containing start and end timestamps of the videos
df_vidTimes = pd.read_excel(SRC_FOLDER + "Video_Timestamps_1.xlsx")
df_vidTimes.drop(df_vidTimes.columns[0],axis=1,inplace=True)
################ ALL FUNCTIONS DEFINITIONS ################
def perspCorrection(img,pt1,pt2,pt3,pt4,scale_width,scale_height):
# Create a copy of the image
img_copy = np.copy(img)
# Convert to RGB so as to display via matplotlib
# Using Matplotlib we can easily find the coordinates of the 4 points that is essential for finding then transformation matrix
#img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# to calculate the transformation matrix
input_pts = np.float32([pt1,pt2,pt3,pt4])
output_pts = np.float32([[0,0],[scale_width-1,0],[0,scale_height-1],[scale_width-1,scale_height-1]])
# Compute the perspective transform M
M = cv2.getPerspectiveTransform(input_pts,output_pts)
# Apply the perspective transformation to the image
imgPersp = cv2.warpPerspective(img,M,(scale_width, scale_height)) #,flags=cv2.INTER_LINEAR) cv2.INTER_CUBIC is also an option
imgGrayPersp = cv2.cvtColor(imgPersp, cv2.COLOR_BGR2GRAY)
# visulaize corners using cv2 circles
for x in range (0,4):
cv2.circle(img_copy,(round(input_pts[x][0]),round(input_pts[x][1])),5,(0,0,255),cv2.FILLED)
return [img_copy,imgPersp,imgGrayPersp]
def extractTopBottom(img,tStart,tEnd,bStart,bEnd):
img_top = img[tStart[1]:tEnd[1],tStart[0]:tEnd[0]]
img_bottom = img[bStart[1]:bEnd[1],bStart[0]:bEnd[0]]
return [img_top,img_bottom]
def gaussianBlur(img,fsize):
# gaussian blur
gblur = cv2.GaussianBlur(img,(fsize,fsize),0)
return gblur
def medianBlur(img,fsize=3):
# median blur - effective at removing salt and pepper noise
mblur = cv2.medianBlur(img,fsize)
return mblur
def bilateralFilter(img):
# Bilateral filter preserves edges while removing noise
bfblur = cv2.bilateralFilter(img,9,75,75)
return bfblur
def gAdaptiveThresholding(img):
# median filtering
adaptive_gaussian = cv2.adaptiveThreshold(img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,11,2)
return adaptive_gaussian
def morphOps(img,kernel1,kernel2,k1_num_passes=2):
# Closing = Dilation + Erosion
# dilation
mask_dil = cv2.dilate(img,kernel1,iterations = k1_num_passes)
# erosion
mask_erode = cv2.erode(mask_dil,kernel2,iterations = 1)
return mask_erode
def computeW_Rev(img,img_debug):
avg_num_pixels = 159
scaling_factor = 1.0
mm_per_pixel = ((1/32)*25.4)/(scaling_factor*avg_num_pixels)
edge_length_threshold = 55
min_L_edge_threshold = False
min_R_edge_threshold = False
# Predefine arrays for data storage
approx_edges = 10
num_edges = np.zeros(img.shape[0]) #,dtype=np.uint16)
edge_start = np.zeros([img.shape[0],approx_edges])#,dtype=np.uint16)
edge_end = np.zeros([img.shape[0],approx_edges])#,dtype=np.uint16)
edge_count = 0
k=0
sse = False
tse = False
# start scanning from (0,0) until black pixel is found
# go across columns first
for i in range(img.shape[0]):
found_edge = False
temp_edge_count = 0
k=0
for j in range(img.shape[1]):
if(img[i,j]<=50):
# Black pixel found - edge
if(found_edge==False):
found_edge = True
temp_edge_count += 1
num_edges[i] = temp_edge_count
edge_start[i][k] = j
k += 1
else:
if(found_edge):
edge_end[i][k-1] = j-1
found_edge = False
x = Counter(num_edges)
y = {z:count for z, count in x.items() if count >= edge_length_threshold and z > 1}
#print(y)
if(len(y)!=0):
edge_condition = sorted(y,key=y.get)[0]
else:
print('num_edges > 1 and length(num_edges) >= threshold not satisfied . . . Lowering threshold to identify matches')
w = {z:count for z, count in x.items() if count < edge_length_threshold and z > 1}
if(len(w)!=0):
print('Found num_edges > 1 and length(num_edges) < threshold!')
edge_condition = sorted(w,key=w.get)[0]
else:
print('Unable to find edge condition . . . check image')
edge_condition = -1
if img_debug:
print('edge condition: ' + str(edge_condition))
if edge_condition == 2: #max(num_edges)==2:
# max num_edges = 2
L1_edge_start = edge_start[:,0][np.argwhere(num_edges==2)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==2)]>60,edge_start[:,0][np.argwhere(num_edges==2)]<300)]
L1_edge_end = edge_end[:,0][np.argwhere(num_edges==2)][np.logical_and(edge_end[:,0][np.argwhere(num_edges==2)]>60,edge_end[:,0][np.argwhere(num_edges==2)]<300)]
if(np.max(L1_edge_start)-np.min(L1_edge_start)>13):
L1_edge_start = L1_edge_start[L1_edge_start >= (np.max(L1_edge_start)-10)]
if(np.max(L1_edge_end)-np.min(L1_edge_end)>15):
L1_edge_end = L1_edge_end[L1_edge_end >= (np.max(L1_edge_end)-10)]
trueLedge_start = L1_edge_start
trueLedge_end = L1_edge_end
R1_edge_start = edge_start[:,1][np.argwhere(num_edges==2)][edge_start[:,1][np.argwhere(num_edges==2)]>350]
R1_edge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350]
if(np.max(R1_edge_start)-np.min(R1_edge_start)>13):
R1_edge_start = R1_edge_start[R1_edge_start <= (np.min(R1_edge_start)+10)]
if(np.max(R1_edge_end)-np.min(R1_edge_end)>13):
R1_edge_end = R1_edge_end[R1_edge_end <= (np.min(R1_edge_end)+10)]
trueRedge_start = R1_edge_start
trueRedge_end = R1_edge_end
if(len(trueLedge_start)>len(trueLedge_end)):
trueLedge_start = np.array([trueLedge_start[i] for i in range(len(trueLedge_end))])
if(len(trueLedge_start)<len(trueLedge_end)):
trueLedge_end = np.array([trueLedge_end[i] for i in range(len(trueLedge_start))])
if(len(trueRedge_start)>len(trueRedge_end)):
trueRedge_start = np.array([trueRedge_start[i] for i in range(len(trueRedge_end))])
if(len(trueRedge_start)<len(trueRedge_end)):
trueRedge_end = np.array([trueRedge_end[i] for i in range(len(trueRedge_start))])
line1_start = (round(np.mean((trueLedge_start+trueLedge_end)/2)),0)
line1_end = (round(np.mean((trueLedge_start+trueLedge_end)/2)),img.shape[0])
line2_start = (round(np.mean((trueRedge_start+trueRedge_end)/2)),0)
line2_end = (round(np.mean((trueRedge_start+trueRedge_end)/2)),img.shape[0])
edge_count = 2
case_cond = 1
elif edge_condition == 3: #max(num_edges)==3:
# max num_edges = 3
# logic for finding true left edge
L2_edge_start = edge_start[:,1][np.argwhere(num_edges==3)][edge_start[:,1][np.argwhere(num_edges==3)]<250]
if(len(L2_edge_start)>=edge_length_threshold):
trueLedge_start = L2_edge_start
trueLedge_end = edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]
else:
if(len(edge_start[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==3)]<250,edge_start[:,0][np.argwhere(num_edges==3)]>60)])!=0):
L1_edge_start = edge_start[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==3)]<250,edge_start[:,0][np.argwhere(num_edges==3)]>60)]
if(len(L2_edge_start)!=0):
L1_edge_start = np.hstack((L1_edge_start,L2_edge_start))
if(np.max(L1_edge_start)-np.min(L1_edge_start)>13):
L1_edge_start = L1_edge_start[L1_edge_start >= (np.max(L1_edge_start)-10)]
else:
L1_edge_start = edge_start[:,0][np.argwhere(num_edges==2)][edge_start[:,0][np.argwhere(num_edges==2)]<250]
if(len(L1_edge_start)>=edge_length_threshold):
trueLedge_start = L1_edge_start
if(len(edge_start[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_start[:,0][np.argwhere(num_edges==3)]<250,edge_start[:,0][np.argwhere(num_edges==3)]>60)])!=0):
trueLedge_end = edge_end[:,0][np.argwhere(num_edges==3)][np.logical_and(edge_end[:,0][np.argwhere(num_edges==3)]<250,edge_end[:,0][np.argwhere(num_edges==3)]>60)]
if(len(L2_edge_start)!=0):
trueLedge_end = np.hstack((trueLedge_end,edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]<250]))
if(np.max(trueLedge_end)-np.min(trueLedge_end)>13):
trueLedge_end = trueLedge_end[trueLedge_end >= (np.max(trueLedge_end)-10)]
else:
trueLedge_end = edge_end[:,0][np.argwhere(num_edges==2)][edge_end[:,0][np.argwhere(num_edges==2)]<250]
elif(len(L1_edge_start)!=0 and len(L1_edge_start)<edge_length_threshold):
trueLedge_start = L1_edge_start
trueLedge_end = edge_end[:,0][np.argwhere(num_edges==3)][edge_end[:,0][np.argwhere(num_edges==3)]<250]
trueLedge_end = np.hstack((trueLedge_end,edge_end[:,0][np.argwhere(num_edges==2)][edge_end[:,0][np.argwhere(num_edges==2)]<250]))
min_L_edge_threshold = True
else:
print('max(num_edges)=3 invalid true left edge condition encountered . . . check code')
# logic for finding true right edge
R2_edge_start = edge_start[:,1][np.argwhere(num_edges==3)][edge_start[:,1][np.argwhere(num_edges==3)]>350]
if(len(R2_edge_start)>=edge_length_threshold):
trueRedge_start = R2_edge_start
trueRedge_end = edge_end[:,1][np.argwhere(num_edges==3)][edge_end[:,1][np.argwhere(num_edges==3)]>350]
else:
R1_edge_start = edge_start[:,1][np.argwhere(num_edges==2)][edge_start[:,1][np.argwhere(num_edges==2)]>350]
if(len(R1_edge_start)==0):
# three definite edges
trueRedge_start = edge_start[:,2][np.argwhere(num_edges==3)][edge_start[:,2][np.argwhere(num_edges==3)]>350]
trueRedge_end = edge_end[:,2][np.argwhere(num_edges==3)][edge_end[:,2][np.argwhere(num_edges==3)]>350]
elif(len(R1_edge_start)>=edge_length_threshold):
trueRedge_start = R1_edge_start
trueRedge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350]
elif(len(R1_edge_start)!=0 and len(R1_edge_start)<edge_length_threshold):
# there are some elements but edge length is minimal
trueRedge_start = R1_edge_start
trueRedge_end = edge_end[:,1][np.argwhere(num_edges==2)][edge_end[:,1][np.argwhere(num_edges==2)]>350]
min_R_edge_threshold = True
else:
print('max(num_edges)=3 invalid true right edge condition encountered . . . check code')
if(np.max(trueRedge_start)-np.min(trueRedge_start)>13):
trueRedge_start = trueRedge_start[trueRedge_start <= (np.min(trueRedge_start)+10)]
if(np.max(trueRedge_end)-np.min(trueRedge_end)>13):
trueRedge_end = trueRedge_end[trueRedge_end <= (np.min(trueRedge_end)+10)]
if(len(trueLedge_start)>len(trueLedge_end)):
trueLedge_start = np.array([trueLedge_start[i] for i in range(len(trueLedge_end))])
if(len(trueLedge_start)<len(trueLedge_end)):
trueLedge_end = np.array([trueLedge_end[i] for i in range(len(trueLedge_start))])
if(len(trueRedge_start)>len(trueRedge_end)):
trueRedge_start = np.array([trueRedge_start[i] for i in range(len(trueRedge_end))])
if(len(trueRedge_start)<len(trueRedge_end)):
trueRedge_end = np.array([trueRedge_end[i] for i in range(len(trueRedge_start))])
if(len(trueLedge_start)<edge_length_threshold):
min_L_edge_threshold = True
if(len(trueRedge_start)<edge_length_threshold):
min_R_edge_threshold = True
if(min_L_edge_threshold or min_R_edge_threshold):
line1_start = (round(np.mean((trueLedge_start + trueLedge_end)/2)),0)
line1_end = (round(np.mean((trueLedge_start + trueLedge_end)/2)),img.shape[0])
line2_start = (round(np.mean((trueRedge_start + trueRedge_end)/2)),0)
line2_end = (round(np.mean((trueRedge_start + trueRedge_end)/2)),img.shape[0])
edge_count = 3
case_cond = 2
elif(np.logical_and(len(trueLedge_start)>=edge_length_threshold,len(trueRedge_start)>=edge_length_threshold)):
line1_start = (round(np.mean((trueLedge_start + trueLedge_end)/2)),0)
line1_end = (round(np.mean((trueLedge_start + trueLedge_end)/2)),img.shape[0])
line2_start = (round(np.mean((trueRedge_start + trueRedge_end)/2)),0)
line2_end = (round(np.mean((trueRedge_start + trueRedge_end)/2)),img.shape[0])
edge_count = 3
case_cond = 3
else:
print('max(num_edges)=3 with no matching condition reached . . . check code')
elif edge_condition == 4: #max(num_edges)==4:
# max num_edges = 4
# logic for finding true left edge
L3_edge_start = edge_start[:,2][np.argwhere(num_edges==4)][edge_start[:,2][np.argwhere(num_edges==4)]<250]
if(len(L3_edge_start)>=edge_length_threshold):
trueLedge_start = L3_edge_start
trueLedge_end = edge_end[:,2][np.argwhere(num_edges==4)][edge_end[:,2][np.argwhere(num_edges==4)]<250]
else:
L2_edge_start = edge_start[:,1][np.argwhere(num_edges==4)][np.logical_and(edge_start[:,1][np.argwhere(num_edges==4)]<250,edge_start[:,1][np.argwhere(num_edges==4)]>60)]
L2_edge_start = np.hstack((L2_edge_start,edge_start[:,1][ | np.argwhere(num_edges==3) | numpy.argwhere |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.