prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Basic libs
import os
import tensorflow as tf
import numpy as np
import time
import pickle
import open3d
# OS functions
from os import makedirs, listdir
from os.path import exists, join, isfile, isdir
import os.path as path
# Dataset parent class
from datasets.common import Dataset
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def rotate(points, num_axis=1):
if num_axis == 1:
theta = np.random.rand() * 2 * np.pi
axis = np.random.randint(3)
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, -s], [s, c, -s], [s, s, c]], dtype=np.float32)
R[:, axis] = 0
R[axis, :] = 0
R[axis, axis] = 1
points = np.matmul(points, R)
elif num_axis == 3:
for axis in [0, 1, 2]:
theta = np.random.rand() * 2 * np.pi
c, s = np.cos(theta), np.sin(theta)
R = np.array([[c, -s, -s], [s, c, -s], [s, s, c]], dtype=np.float32)
R[:, axis] = 0
R[axis, :] = 0
R[axis, axis] = 1
points = np.matmul(points, R)
else:
exit(-1)
return points
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class ThreeDMatchDataset(Dataset):
"""
Class to handle ThreeDMatch dataset for dense keypoint detection and feature description task.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, input_threads=8, voxel_size=0.03, load_test=False):
Dataset.__init__(self, 'ThreeDMatch')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
self.network_model = 'descriptor'
# Number of input threads
self.num_threads = input_threads
# Load test set or train set?
self.load_test = load_test
# voxel size
self.downsample = voxel_size
##########################
# Parameters for the files
##########################
# Path of the folder containing ply files
self.root = 'data/3DMatch/'
# Initiate containers
self.anc_points = {'train': [], 'val': [], 'test': []}
self.keypts = {'train': [], 'val': [], 'test': []}
self.anc_to_pos = {'train': {}, 'val': {}, 'test': {}}
self.ids_list = {'train': [], 'val': [], 'test': []}
if self.load_test:
self.prepare_geometry_registration()
else:
self.prepare_3dmatch_ply(split='train')
self.prepare_3dmatch_ply(split='val')
def prepare_3dmatch_ply(self, split='train'):
"""
Load pre-generated point cloud, keypoint correspondence(the indices) to save time.
Construct the self.anc_to_pos dictionary.
"""
print('\nPreparing ply files')
pts_filename = join(self.root, f'3DMatch_{split}_{self.downsample:.3f}_points.pkl')
keypts_filename = join(self.root, f'3DMatch_{split}_{self.downsample:.3f}_keypts.pkl')
if exists(pts_filename) and exists(keypts_filename):
with open(pts_filename, 'rb') as file:
data = pickle.load(file)
self.anc_points[split] = [*data.values()]
self.ids_list[split] = [*data.keys()]
with open(keypts_filename, 'rb') as file:
self.keypts[split] = pickle.load(file)
else:
print("PKL file not found.")
return
for idpair in self.keypts[split].keys():
anc = idpair.split("@")[0]
pos = idpair.split("@")[1]
# add (key -> value) anc -> pos
if anc not in self.anc_to_pos[split].keys():
self.anc_to_pos[split][anc] = [pos]
else:
self.anc_to_pos[split][anc] += [pos]
if split == 'train':
self.num_train = len(list(self.anc_to_pos[split].keys()))
print("Num_train", self.num_train)
else:
self.num_val = len(list(self.anc_to_pos[split].keys()))
print("Num_val", self.num_val)
return
def get_batch_gen(self, split, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test"
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
# Initiate potentials for regular generation
if not hasattr(self, 'potentials'):
self.potentials = {}
# Reset potentials
self.potentials[split] = np.random.rand(len(self.anc_points[split])) * 1e-3
################
# Def generators
################
def random_balanced_gen():
# Initiate concatenation lists
anc_points_list = []
pos_points_list = []
anc_keypts_list = []
pos_keypts_list = []
backup_anc_points_list = []
backup_pos_points_list = []
ti_list = []
ti_list_pos = []
batch_n = 0
# Initiate parameters depending on the chosen split
if split == 'train':
gen_indices = np.random.permutation(self.num_train)
# gen_indices = np.arange(self.num_train)
elif split == 'val':
gen_indices = np.random.permutation(self.num_val)
# gen_indices = np.arange(self.num_val)
elif split == 'test':
gen_indices = np.arange(self.num_test)
else:
raise ValueError('Wrong split argument in data generator: ' + split)
print(gen_indices)
# Generator loop
for p_i in gen_indices:
if split == 'test':
anc_id = self.ids_list[split][p_i]
pos_id = self.ids_list[split][p_i]
else:
anc_id = list(self.anc_to_pos[split].keys())[p_i]
import random
if random.random() > 0.5:
pos_id = self.anc_to_pos[split][anc_id][0]
else:
pos_id = random.choice(self.anc_to_pos[split][anc_id])
anc_ind = self.ids_list[split].index(anc_id)
pos_ind = self.ids_list[split].index(pos_id)
anc_points = self.anc_points[split][anc_ind].astype(np.float32)
pos_points = self.anc_points[split][pos_ind].astype(np.float32)
# back up point cloud
backup_anc_points = anc_points
backup_pos_points = pos_points
n = anc_points.shape[0] + pos_points.shape[0]
if split == 'test': # for test, use all 5000 the anc_keypts
anc_keypts = np.array([])
pos_keypts = np.array([])
# add rotation to test on Rotated3DMatch
# anc_points = rotate(anc_points, num_axis=3)
# pos_points = rotate(pos_points, num_axis=3)
else:
if anc_points.shape[0] > 80000 or pos_points.shape[0] > 80000:
continue
if anc_points.shape[0] < 2000 or pos_points.shape[0] < 2000:
continue
anc_keypts = self.keypts[split][f'{anc_id}@{pos_id}'][:, 0]
pos_keypts = self.keypts[split][f'{anc_id}@{pos_id}'][:, 1]
if split == 'train':
selected_ind = np.random.choice(min(len(anc_keypts), len(pos_keypts)), config.keypts_num, replace=False)
else:
selected_ind = np.random.choice(min(len(anc_keypts), len(pos_keypts)), 64, replace=False)
anc_keypts = anc_keypts[selected_ind]
pos_keypts = pos_keypts[selected_ind] + len(anc_points)
# if split == 'train':
# # training does not need this keypts
# anc_keypts = np.random.choice(len(anc_points), 200)
# pos_keypts = np.random.choice(len(anc_points), 200)
# else:
# # find the correspondence by nearest neighbors sourch.
# anc_keypts = np.random.choice(len(anc_points), 400)
# pos_pcd = open3d.PointCloud()
# pos_pcd.points = open3d.utility.Vector3dVector(pos_points)
# kdtree = open3d.geometry.KDTreeFlann(pos_pcd)
# pos_ind = []
# anc_ind = []
# for pts, i in zip(anc_points[anc_keypts], anc_keypts):
# _, ind, dis = kdtree.search_knn_vector_3d(pts, 1)
# if dis[0] < 0.001 and ind[0] not in pos_ind and i not in anc_ind:
# pos_ind.append(ind[0])
# anc_ind.append(i)
# if len(anc_ind) >= config.keypts_num:
# break
# anc_keypts = np.array(anc_ind)
# pos_keypts = np.array(pos_ind)
# pos_keypts = pos_keypts + len(anc_points)
# # No matter how many num_keypts are used for training, test only use 64 pair.
# if len(anc_keypts) >= config.keypts_num:
# if split == 'train':
# selected_ind = np.random.choice(range(len(anc_keypts)), config.keypts_num, replace=False)
# else:
# selected_ind = np.random.choice(range(len(anc_keypts)), 64, replace=False)
# anc_keypts = anc_keypts[selected_ind]
# pos_keypts = pos_keypts[selected_ind]
# else: # if can not build enough correspondence, then skip this fragments pair.
# continue
# data augmentations: noise
anc_noise = np.random.rand(anc_points.shape[0], 3) * config.augment_noise
pos_noise = np.random.rand(pos_points.shape[0], 3) * config.augment_noise
anc_points += anc_noise
pos_points += pos_noise
# data augmentations: rotation
anc_points = rotate(anc_points, num_axis=config.augment_rotation)
pos_points = rotate(pos_points, num_axis=config.augment_rotation)
# Add data to current batch
anc_points_list += [anc_points]
anc_keypts_list += [anc_keypts]
pos_points_list += [pos_points]
pos_keypts_list += [pos_keypts]
backup_anc_points_list += [backup_anc_points]
backup_pos_points_list += [backup_pos_points]
ti_list += [p_i]
ti_list_pos += [p_i]
yield (np.concatenate(anc_points_list + pos_points_list, axis=0), # anc_points
np.concatenate(anc_keypts_list, axis=0), # anc_keypts
np.concatenate(pos_keypts_list, axis=0),
np.array(ti_list + ti_list_pos, dtype=np.int32), # anc_obj_index
np.array([tp.shape[0] for tp in anc_points_list] + [tp.shape[0] for tp in pos_points_list]), # anc_stack_length
np.array([anc_id, pos_id]),
| np.concatenate(backup_anc_points_list + backup_pos_points_list, axis=0) | numpy.concatenate |
"""
Tests for the generic MLEModel
Author: <NAME>
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import (sarimax, varmax, kalman_filter,
kalman_smoother)
from statsmodels.tsa.statespace.mlemodel import MLEModel, MLEResultsWrapper
from statsmodels.tsa.statespace.tools import compatibility_mode
from statsmodels.datasets import nile
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from statsmodels.tsa.statespace.tests.results import results_sarimax, results_var_misc
current_path = os.path.dirname(os.path.abspath(__file__))
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except ImportError:
have_matplotlib = False
# Basic kwargs
kwargs = {
'k_states': 1, 'design': [[1]], 'transition': [[1]],
'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
def get_dummy_mod(fit=True, pandas=False):
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
if pandas:
index = pd.date_range('1960-01-01', periods=100, freq='MS')
endog = pd.Series(endog, index=index)
exog = pd.Series(exog, index=index)
mod = sarimax.SARIMAX(endog, exog=exog, order=(0,0,0), time_varying_regression=True, mle_regression=False)
if fit:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
else:
res = None
return mod, res
def test_wrapping():
# Test the wrapping of various Representation / KalmanFilter /
# KalmanSmoother methods / attributes
mod, _ = get_dummy_mod(fit=False)
# Test that we can get the design matrix
assert_equal(mod['design', 0, 0], 2.0 * np.arange(100))
# Test that we can set individual elements of the design matrix
mod['design', 0, 0, :] = 2
assert_equal(mod.ssm['design', 0, 0, :], 2)
assert_equal(mod.ssm['design'].shape, (1, 1, 100))
# Test that we can set the entire design matrix
mod['design'] = [[3.]]
assert_equal(mod.ssm['design', 0, 0], 3.)
# (Now it's no longer time-varying, so only 2-dim)
assert_equal(mod.ssm['design'].shape, (1, 1))
# Test that we can change the following properties: loglikelihood_burn,
# initial_variance, tolerance
assert_equal(mod.loglikelihood_burn, 1)
mod.loglikelihood_burn = 0
assert_equal(mod.ssm.loglikelihood_burn, 0)
assert_equal(mod.tolerance, mod.ssm.tolerance)
mod.tolerance = 0.123
assert_equal(mod.ssm.tolerance, 0.123)
assert_equal(mod.initial_variance, 1e10)
mod.initial_variance = 1e12
assert_equal(mod.ssm.initial_variance, 1e12)
# Test that we can use the following wrappers: initialization,
# initialize_known, initialize_stationary, initialize_approximate_diffuse
# Initialization starts off as none
assert_equal(mod.initialization, None)
# Since the SARIMAX model may be fully stationary or may have diffuse
# elements, it uses a custom initialization by default, but it can be
# overridden by users
mod.initialize_state()
# (The default initialization in this case is known because there is a non-
# stationary state corresponding to the time-varying regression parameter)
assert_equal(mod.initialization, 'known')
mod.initialize_approximate_diffuse(1e5)
assert_equal(mod.initialization, 'approximate_diffuse')
assert_equal(mod.ssm._initial_variance, 1e5)
mod.initialize_known([5.], [[40]])
assert_equal(mod.initialization, 'known')
assert_equal(mod.ssm._initial_state, [5.])
assert_equal(mod.ssm._initial_state_cov, [[40]])
mod.initialize_stationary()
assert_equal(mod.initialization, 'stationary')
# Test that we can use the following wrapper methods: set_filter_method,
# set_stability_method, set_conserve_memory, set_smoother_output
# The defaults are as follows:
assert_equal(mod.ssm.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(mod.ssm.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(mod.ssm.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
assert_equal(mod.ssm.smoother_output, kalman_smoother.SMOOTHER_ALL)
# Now, create the Cython filter object and assert that they have
# transferred correctly
mod.ssm._initialize_filter()
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# (the smoother object is so far not in Cython, so there is no
# transferring)
# Change the attributes in the model class
if compatibility_mode:
assert_raises(NotImplementedError, mod.set_filter_method, 100)
else:
mod.set_filter_method(100)
mod.set_stability_method(101)
mod.set_conserve_memory(102)
mod.set_smoother_output(103)
# Assert that the changes have occurred in the ssm class
if not compatibility_mode:
assert_equal(mod.ssm.filter_method, 100)
assert_equal(mod.ssm.stability_method, 101)
assert_equal(mod.ssm.conserve_memory, 102)
assert_equal(mod.ssm.smoother_output, 103)
# Assert that the changes have *not yet* occurred in the filter object
assert_equal(kf.filter_method, kalman_filter.FILTER_CONVENTIONAL)
assert_equal(kf.stability_method, kalman_filter.STABILITY_FORCE_SYMMETRY)
assert_equal(kf.conserve_memory, kalman_filter.MEMORY_STORE_ALL)
# Re-initialize the filter object (this would happen automatically anytime
# loglike, filter, etc. were called)
# In this case, an error will be raised since filter_method=100 is not
# valid
# Note: this error is only raised in the compatibility case, since the
# newer filter logic checks for a valid filter mode at a different point
if compatibility_mode:
assert_raises(NotImplementedError, mod.ssm._initialize_filter)
# Now, test the setting of the other two methods by resetting the
# filter method to a valid value
mod.set_filter_method(1)
mod.ssm._initialize_filter()
# Retrieve the new kalman filter object (a new object had to be created
# due to the changing filter method)
kf = mod.ssm._kalman_filter
assert_equal(kf.filter_method, 1)
assert_equal(kf.stability_method, 101)
assert_equal(kf.conserve_memory, 102)
def test_fit_misc():
true = results_sarimax.wpi1_stationary
endog = np.diff(true['data'])[1:]
mod = sarimax.SARIMAX(endog, order=(1,0,1), trend='c')
# Test optim_hessian={'opg','oim','approx'}
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res1 = mod.fit(method='ncg', disp=0, optim_hessian='opg', optim_complex_step=False)
res2 = mod.fit(method='ncg', disp=0, optim_hessian='oim', optim_complex_step=False)
# Check that the Hessians broadly result in the same optimum
assert_allclose(res1.llf, res2.llf, rtol=1e-2)
# Test return_params=True
mod, _ = get_dummy_mod(fit=False)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res_params = mod.fit(disp=-1, return_params=True)
# 5 digits necessary to accommodate 32-bit numpy / scipy with OpenBLAS 0.2.18
assert_almost_equal(res_params, [0, 0], 5)
def test_score_misc():
mod, res = get_dummy_mod()
# Test that the score function works
mod.score(res.params)
def test_from_formula():
assert_raises(NotImplementedError, lambda: MLEModel.from_formula(1,2,3))
def test_score_analytic_ar1():
# Test the score against the analytic score for an AR(1) model with 2
# observations
# Let endog = [1, 0.5], params=[0, 1]
mod = sarimax.SARIMAX([1, 0.5], order=(1,0,0))
def partial_phi(phi, sigma2):
return -0.5 * (phi**2 + 2*phi*sigma2 - 1) / (sigma2 * (1 - phi**2))
def partial_sigma2(phi, sigma2):
return -0.5 * (2*sigma2 + phi - 1.25) / (sigma2**2)
params = np.r_[0., 2]
# Compute the analytic score
analytic_score = np.r_[
partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])]
# Check each of the approximations, transformed parameters
approx_cs = mod.score(params, transformed=True, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(params, transformed=True, approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(params, transformed=True, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(params, transformed=True, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(params, transformed=True, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the approximations for untransformed parameters. The analytic
# check now comes from chain rule with the analytic derivative of the
# transformation
# if L* is the likelihood evaluated at untransformed parameters and
# L is the likelihood evaluated at transformed parameters, then we have:
# L*(u) = L(t(u))
# and then
# L'*(u) = L'(t(u)) * t'(u)
def partial_transform_phi(phi):
return -1. / (1 + phi**2)**(3./2)
def partial_transform_sigma2(sigma2):
return 2. * sigma2
uparams = mod.untransform_params(params)
analytic_score = np.dot(
np.diag(np.r_[partial_transform_phi(uparams[0]),
partial_transform_sigma2(uparams[1])]),
np.r_[partial_phi(params[0], params[1]),
partial_sigma2(params[0], params[1])])
approx_cs = mod.score(uparams, transformed=False, approx_complex_step=True)
assert_allclose(approx_cs, analytic_score)
approx_fd = mod.score(uparams, transformed=False,
approx_complex_step=False)
assert_allclose(approx_fd, analytic_score, atol=1e-5)
approx_fd_centered = (
mod.score(uparams, transformed=False, approx_complex_step=False,
approx_centered=True))
assert_allclose(approx_fd, analytic_score, atol=1e-5)
harvey_cs = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=True)
assert_allclose(harvey_cs, analytic_score)
harvey_fd = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False)
assert_allclose(harvey_fd, analytic_score, atol=1e-5)
harvey_fd_centered = mod.score(uparams, transformed=False, method='harvey',
approx_complex_step=False,
approx_centered=True)
assert_allclose(harvey_fd_centered, analytic_score, atol=1e-5)
# Check the Hessian: these approximations are not very good, particularly
# when phi is close to 0
params = np.r_[0.5, 1.]
def hessian(phi, sigma2):
hessian = np.zeros((2,2))
hessian[0,0] = (-phi**2 - 1) / (phi**2 - 1)**2
hessian[1,0] = hessian[0,1] = -1 / (2 * sigma2**2)
hessian[1,1] = (sigma2 + phi - 1.25) / sigma2**3
return hessian
analytic_hessian = hessian(params[0], params[1])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
assert_allclose(mod._hessian_complex_step(params) * 2,
analytic_hessian, atol=1e-1)
assert_allclose(mod._hessian_finite_difference(params) * 2,
analytic_hessian, atol=1e-1)
def test_cov_params():
mod, res = get_dummy_mod()
# Smoke test for each of the covariance types
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(res.params, disp=-1, cov_type='none')
assert_equal(res.cov_kwds['description'], 'Covariance matrix not calculated.')
res = mod.fit(res.params, disp=-1, cov_type='approx')
assert_equal(res.cov_type, 'approx')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using numerical (complex-step) differentiation.')
res = mod.fit(res.params, disp=-1, cov_type='oim')
assert_equal(res.cov_type, 'oim')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='opg')
assert_equal(res.cov_type, 'opg')
assert_equal(res.cov_kwds['description'], 'Covariance matrix calculated using the outer product of gradients (complex-step).')
res = mod.fit(res.params, disp=-1, cov_type='robust')
assert_equal(res.cov_type, 'robust')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_oim')
assert_equal(res.cov_type, 'robust_oim')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using the observed information matrix (complex-step) described in Harvey (1989).')
res = mod.fit(res.params, disp=-1, cov_type='robust_approx')
assert_equal(res.cov_type, 'robust_approx')
assert_equal(res.cov_kwds['description'], 'Quasi-maximum likelihood covariance matrix used for robustness to some misspecifications; calculated using numerical (complex-step) differentiation.')
assert_raises(NotImplementedError, mod.fit, res.params, disp=-1, cov_type='invalid_cov_type')
def test_transform():
# The transforms in MLEModel are noops
mod = MLEModel([1,2], **kwargs)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [2, 3])
assert_allclose(mod.untransform_params([2, 3]), [2, 3])
# Smoke test for transformation in `filter`, `update`, `loglike`,
# `loglikeobs`
mod.filter([], transformed=False)
mod.update([], transformed=False)
mod.loglike([], transformed=False)
mod.loglikeobs([], transformed=False)
# Note that mod is an SARIMAX instance, and the two parameters are
# variances
mod, _ = get_dummy_mod(fit=False)
# Test direct transform, untransform
assert_allclose(mod.transform_params([2, 3]), [4, 9])
assert_allclose(mod.untransform_params([4, 9]), [2, 3])
# Test transformation in `filter`
res = mod.filter([2, 3], transformed=True)
assert_allclose(res.params, [2, 3])
res = mod.filter([2, 3], transformed=False)
assert_allclose(res.params, [4, 9])
def test_filter():
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
# Test return of ssm object
res = mod.filter([], return_ssm=True)
assert_equal(isinstance(res, kalman_filter.FilterResults), True)
# Test return of full results object
res = mod.filter([])
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'opg')
# Test return of full results object, specific covariance type
res = mod.filter([], cov_type='oim')
assert_equal(isinstance(res, MLEResultsWrapper), True)
assert_equal(res.cov_type, 'oim')
def test_params():
mod = MLEModel([1,2], **kwargs)
# By default start_params raises NotImplementedError
assert_raises(NotImplementedError, lambda: mod.start_params)
# But param names are by default an empty array
assert_equal(mod.param_names, [])
# We can set them in the object if we want
mod._start_params = [1]
mod._param_names = ['a']
assert_equal(mod.start_params, [1])
assert_equal(mod.param_names, ['a'])
def check_results(pandas):
mod, res = get_dummy_mod(pandas=pandas)
# Test fitted values
assert_almost_equal(res.fittedvalues[2:], mod.endog[2:].squeeze())
# Test residuals
assert_almost_equal(res.resid[2:], np.zeros(mod.nobs-2))
# Test loglikelihood_burn
assert_equal(res.loglikelihood_burn, 1)
def test_results(pandas=False):
check_results(pandas=False)
check_results(pandas=True)
def test_predict():
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.Series([1,2], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Test that predict with start=None, end=None does prediction with full
# dataset
predict = res.predict()
assert_equal(predict.shape, (mod.nobs,))
assert_allclose(res.get_prediction().predicted_mean, predict)
# Test a string value to the dynamic option
assert_allclose(res.predict(dynamic='1981-01-01'), res.predict())
# Test an invalid date string value to the dynamic option
# assert_raises(ValueError, res.predict, dynamic='1982-01-01')
# Test for passing a string to predict when dates are not set
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
assert_raises(KeyError, res.predict, dynamic='string')
def test_forecast():
# Numpy
mod = MLEModel([1,2], **kwargs)
res = mod.filter([])
forecast = res.forecast(steps=10)
assert_allclose(forecast, np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, forecast)
# Pandas
index = pd.date_range('1960-01-01', periods=2, freq='MS')
mod = MLEModel(pd.Series([1,2], index=index), **kwargs)
res = mod.filter([])
assert_allclose(res.forecast(steps=10), np.ones((10,)) * 2)
assert_allclose(res.forecast(steps='1960-12-01'), np.ones((10,)) * 2)
assert_allclose(res.get_forecast(steps=10).predicted_mean, np.ones((10,)) * 2)
def test_summary():
dates = pd.date_range(start='1980-01-01', end='1984-01-01', freq='AS')
endog = pd.Series([1,2,3,4,5], index=dates)
mod = MLEModel(endog, **kwargs)
res = mod.filter([])
# Get the summary
txt = str(res.summary())
# Test res.summary when the model has dates
assert_equal(re.search('Sample:\s+01-01-1980', txt) is not None, True)
assert_equal(re.search('\s+- 01-01-1984', txt) is not None, True)
# Test res.summary when `model_name` was not provided
assert_equal(re.search('Model:\s+MLEModel', txt) is not None, True)
# Smoke test that summary still works when diagnostic tests fail
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res.filter_results._standardized_forecasts_error[:] = np.nan
res.summary()
res.filter_results._standardized_forecasts_error = 1
res.summary()
res.filter_results._standardized_forecasts_error = 'a'
res.summary()
def check_endog(endog, nobs=2, k_endog=1, **kwargs):
# create the model
mod = MLEModel(endog, **kwargs)
# the data directly available in the model is the Statsmodels version of
# the data; it should be 2-dim, C-contiguous, long-shaped:
# (nobs, k_endog) == (2, 1)
assert_equal(mod.endog.ndim, 2)
assert_equal(mod.endog.flags['C_CONTIGUOUS'], True)
assert_equal(mod.endog.shape, (nobs, k_endog))
# the data in the `ssm` object is the state space version of the data; it
# should be 2-dim, F-contiguous, wide-shaped (k_endog, nobs) == (1, 2)
# and it should share data with mod.endog
assert_equal(mod.ssm.endog.ndim, 2)
assert_equal(mod.ssm.endog.flags['F_CONTIGUOUS'], True)
assert_equal(mod.ssm.endog.shape, (k_endog, nobs))
assert_equal(mod.ssm.endog.base is mod.endog, True)
return mod
def test_basic_endog():
# Test various types of basic python endog inputs (e.g. lists, scalars...)
# Check cannot call with non-array-like
# fails due to checks in Statsmodels base classes
assert_raises(ValueError, MLEModel, endog=1, k_states=1)
assert_raises(ValueError, MLEModel, endog='a', k_states=1)
assert_raises(ValueError, MLEModel, endog=True, k_states=1)
# Check behavior with different types
mod = MLEModel([1], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([1.], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel([True], **kwargs)
res = mod.filter([])
assert_equal(res.filter_results.endog, [[1]])
mod = MLEModel(['a'], **kwargs)
# raises error due to inability coerce string to numeric
assert_raises(ValueError, mod.filter, [])
# Check that a different iterable tpyes give the expected result
endog = [1.,2.]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = [[1.],[2.]]
mod = check_endog(endog, **kwargs)
mod.filter([])
endog = (1.,2.)
mod = check_endog(endog, **kwargs)
mod.filter([])
def test_numpy_endog():
# Test various types of numpy endog inputs
# Check behavior of the link maintained between passed `endog` and
# `mod.endog` arrays
endog = np.array([1., 2.])
mod = MLEModel(endog, **kwargs)
assert_equal(mod.endog.base is not mod.data.orig_endog, True)
assert_equal(mod.endog.base is not endog, True)
assert_equal(mod.data.orig_endog.base is not endog, True)
endog[0] = 2
# there is no link to mod.endog
assert_equal(mod.endog, np.r_[1, 2].reshape(2,1))
# there remains a link to mod.data.orig_endog
assert_equal(mod.data.orig_endog, endog)
# Check behavior with different memory layouts / shapes
# Example (failure): 0-dim array
endog = np.array(1.)
# raises error due to len(endog) failing in Statsmodels base classes
assert_raises(TypeError, check_endog, endog, **kwargs)
# Example : 1-dim array, both C- and F-contiguous, length 2
endog = np.array([1.,2.])
assert_equal(endog.ndim, 1)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (2,))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, long-shaped: (nobs, k_endog)
endog = np.array([1., 2.]).reshape(2, 1)
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['F_CONTIGUOUS'], False)
assert_equal(endog.shape, (2, 1))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, C-contiguous, wide-shaped: (k_endog, nobs)
endog = np.array([1., 2.]).reshape(1, 2)
assert_equal(endog.ndim, 2)
assert_equal(endog.flags['C_CONTIGUOUS'], True)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['F_CONTIGUOUS'], False)
assert_equal(endog.shape, (1, 2))
# raises error because arrays are always interpreted as
# (nobs, k_endog), which means that k_endog=2 is incompatibile with shape
# of design matrix (1, 1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : 2-dim array, F-contiguous, long-shaped (nobs, k_endog)
endog = np.array([1., 2.]).reshape(1, 2).transpose()
assert_equal(endog.ndim, 2)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['C_CONTIGUOUS'], False)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (2, 1))
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : 2-dim array, F-contiguous, wide-shaped (k_endog, nobs)
endog = np.array([1., 2.]).reshape(2, 1).transpose()
assert_equal(endog.ndim, 2)
# On newer numpy (>= 0.10), this array is (rightly) both C and F contiguous
# assert_equal(endog.flags['C_CONTIGUOUS'], False)
assert_equal(endog.flags['F_CONTIGUOUS'], True)
assert_equal(endog.shape, (1, 2))
# raises error because arrays are always interpreted as
# (nobs, k_endog), which means that k_endog=2 is incompatibile with shape
# of design matrix (1, 1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example (failure): 3-dim array
endog = np.array([1., 2.]).reshape(2, 1, 1)
# raises error due to direct ndim check in Statsmodels base classes
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : np.array with 2 columns
# Update kwargs for k_endog=2
kwargs2 = {
'k_states': 1, 'design': [[1], [0.]], 'obs_cov': [[1, 0], [0, 1]],
'transition': [[1]], 'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
endog = np.array([[1., 2.], [3., 4.]])
mod = check_endog(endog, k_endog=2, **kwargs2)
mod.filter([])
def test_pandas_endog():
# Test various types of pandas endog inputs (e.g. TimeSeries, etc.)
# Example (failure): pandas.Series, no dates
endog = pd.Series([1., 2.])
# raises error due to no dates
warnings.simplefilter('always')
# assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : pandas.Series
dates = pd.date_range(start='1980-01-01', end='1981-01-01', freq='AS')
endog = pd.Series([1., 2.], index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : pandas.Series, string datatype
endog = pd.Series(['a'], index=dates)
# raises error due to direct type casting check in Statsmodels base classes
assert_raises(ValueError, check_endog, endog, **kwargs)
# Example : pandas.Series
endog = pd.Series([1., 2.], index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example : pandas.DataFrame with 1 column
endog = pd.DataFrame({'a': [1., 2.]}, index=dates)
mod = check_endog(endog, **kwargs)
mod.filter([])
# Example (failure): pandas.DataFrame with 2 columns
endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates)
# raises error because 2-columns means k_endog=2, but the design matrix
# set in **kwargs is shaped (1,1)
assert_raises(ValueError, check_endog, endog, **kwargs)
# Check behavior of the link maintained between passed `endog` and
# `mod.endog` arrays
endog = pd.DataFrame({'a': [1., 2.]}, index=dates)
mod = check_endog(endog, **kwargs)
assert_equal(mod.endog.base is not mod.data.orig_endog, True)
assert_equal(mod.endog.base is not endog, True)
assert_equal(mod.data.orig_endog.values.base is not endog, True)
endog.iloc[0, 0] = 2
# there is no link to mod.endog
assert_equal(mod.endog, np.r_[1, 2].reshape(2,1))
# there remains a link to mod.data.orig_endog
assert_allclose(mod.data.orig_endog, endog)
# Example : pandas.DataFrame with 2 columns
# Update kwargs for k_endog=2
kwargs2 = {
'k_states': 1, 'design': [[1], [0.]], 'obs_cov': [[1, 0], [0, 1]],
'transition': [[1]], 'selection': [[1]], 'state_cov': [[1]],
'initialization': 'approximate_diffuse'
}
endog = pd.DataFrame({'a': [1., 2.], 'b': [3., 4.]}, index=dates)
mod = check_endog(endog, k_endog=2, **kwargs2)
mod.filter([])
def test_diagnostics():
mod, res = get_dummy_mod()
# Override the standardized forecasts errors to get more reasonable values
# for the tests to run (not necessary, but prevents some annoying warnings)
shape = res.filter_results._standardized_forecasts_error.shape
res.filter_results._standardized_forecasts_error = (
np.random.normal(size=shape))
# Make sure method=None selects the appropriate test
actual = res.test_normality(method=None)
desired = res.test_normality(method='jarquebera')
assert_allclose(actual, desired)
assert_raises(NotImplementedError, res.test_normality, method='invalid')
actual = res.test_heteroskedasticity(method=None)
desired = res.test_heteroskedasticity(method='breakvar')
assert_allclose(actual, desired)
assert_raises(ValueError, res.test_heteroskedasticity, method=None, alternative='invalid')
assert_raises(NotImplementedError, res.test_heteroskedasticity, method='invalid')
actual = res.test_serial_correlation(method=None)
desired = res.test_serial_correlation(method='ljungbox')
assert_allclose(actual, desired)
assert_raises(NotImplementedError, res.test_serial_correlation, method='invalid')
# Smoke tests for other options
actual = res.test_heteroskedasticity(method=None, alternative='d', use_f=False)
desired = res.test_serial_correlation(method='boxpierce')
def test_diagnostics_nile_eviews():
# Test the diagnostic tests using the Nile dataset. Results are from
# "Fitting State Space Models with EViews" (<NAME> 2011,
# Journal of Statistical Software).
# For parameter values, see Figure 2
# For Ljung-Box and Jarque-Bera statistics and p-values, see Figure 5
# The Heteroskedasticity statistic is not provided in this paper.
niledata = nile.data.load_pandas().data
niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS')
mod = MLEModel(niledata['volume'], k_states=1,
initialization='approximate_diffuse', initial_variance=1e15,
loglikelihood_burn=1)
mod.ssm['design', 0, 0] = 1
mod.ssm['obs_cov', 0, 0] = np.exp(9.600350)
mod.ssm['transition', 0, 0] = 1
mod.ssm['selection', 0, 0] = 1
mod.ssm['state_cov', 0, 0] = np.exp(7.348705)
res = mod.filter([])
# Test Ljung-Box
# Note: only 3 digits provided in the reference paper
actual = res.test_serial_correlation(method='ljungbox', lags=10)[0, :, -1]
assert_allclose(actual, [13.117, 0.217], atol=1e-3)
# Test Jarque-Bera
actual = res.test_normality(method='jarquebera')[0, :2]
assert_allclose(actual, [0.041686, 0.979373], atol=1e-5)
def test_diagnostics_nile_durbinkoopman():
# Test the diagnostic tests using the Nile dataset. Results are from
# Durbin and Koopman (2012); parameter values reported on page 37; test
# statistics on page 40
niledata = nile.data.load_pandas().data
niledata.index = pd.date_range('1871-01-01', '1970-01-01', freq='AS')
mod = MLEModel(niledata['volume'], k_states=1,
initialization='approximate_diffuse', initial_variance=1e15,
loglikelihood_burn=1)
mod.ssm['design', 0, 0] = 1
mod.ssm['obs_cov', 0, 0] = 15099.
mod.ssm['transition', 0, 0] = 1
mod.ssm['selection', 0, 0] = 1
mod.ssm['state_cov', 0, 0] = 1469.1
res = mod.filter([])
# Test Ljung-Box
# Note: only 3 digits provided in the reference paper
actual = res.test_serial_correlation(method='ljungbox', lags=9)[0, 0, -1]
assert_allclose(actual, [8.84], atol=1e-2)
# Test Jarque-Bera
# Note: The book reports 0.09 for Kurtosis, because it is reporting the
# statistic less the mean of the Kurtosis distribution (which is 3).
norm = res.test_normality(method='jarquebera')[0]
actual = [norm[0], norm[2], norm[3]]
assert_allclose(actual, [0.05, -0.03, 3.09], atol=1e-2)
# Test Heteroskedasticity
# Note: only 2 digits provided in the book
actual = res.test_heteroskedasticity(method='breakvar')[0, 0]
assert_allclose(actual, [0.61], atol=1e-2)
def test_prediction_results():
# Just smoke tests for the PredictionResults class, which is copied from
# elsewhere in Statsmodels
mod, res = get_dummy_mod()
predict = res.get_prediction()
summary_frame = predict.summary_frame()
def test_lutkepohl_information_criteria():
# Setup dataset, use Lutkepohl data
dta = pd.DataFrame(
results_var_misc.lutkepohl_data, columns=['inv', 'inc', 'consump'],
index=pd.date_range('1960-01-01', '1982-10-01', freq='QS'))
dta['dln_inv'] = np.log(dta['inv']).diff()
dta['dln_inc'] = np.log(dta['inc']).diff()
dta['dln_consump'] = np.log(dta['consump']).diff()
endog = dta.loc['1960-04-01':'1978-10-01',
['dln_inv', 'dln_inc', 'dln_consump']]
# AR model - SARIMAX
# (use loglikelihood_burn=1 to mimic conditional MLE used by Stata's var
# command).
true = results_var_misc.lutkepohl_ar1_lustats
mod = sarimax.SARIMAX(endog['dln_inv'], order=(1, 0, 0), trend='c',
loglikelihood_burn=1)
res = mod.filter(true['params'])
assert_allclose(res.llf, true['loglike'])
# Test the Lutkepohl ICs
# Note: for the Lutkepohl ICs, Stata only counts the AR coefficients as
# estimated parameters for the purposes of information criteria, whereas we
# count all parameters including scale and constant, so we need to adjust
# for that
aic = (res.info_criteria('aic', method='lutkepohl') -
2 * 2 / res.nobs_effective)
bic = (res.info_criteria('bic', method='lutkepohl') -
2 * np.log(res.nobs_effective) / res.nobs_effective)
hqic = (res.info_criteria('hqic', method='lutkepohl') -
2 * 2 * np.log(np.log(res.nobs_effective)) / res.nobs_effective)
assert_allclose(aic, true['aic'])
assert_allclose(bic, true['bic'])
assert_allclose(hqic, true['hqic'])
# Test the non-Lutkepohl ICs
# Note: for the non-Lutkepohl ICs, Stata does not count the scale as an
# estimated parameter, but does count the constant term, for the
# purposes of information criteria, whereas we count both, so we need to
# adjust for that
true = results_var_misc.lutkepohl_ar1
aic = res.aic - 2
bic = res.bic - np.log(res.nobs_effective)
assert_allclose(aic, true['estat_aic'])
assert_allclose(bic, true['estat_bic'])
aic = res.info_criteria('aic') - 2
bic = res.info_criteria('bic') - np.log(res.nobs_effective)
assert_allclose(aic, true['estat_aic'])
assert_allclose(bic, true['estat_bic'])
# Note: could also test the "dfk" (degree of freedom corrections), but not
# really necessary since they just rescale things a bit
# VAR model - VARMAX
# (use loglikelihood_burn=1 to mimic conditional MLE used by Stata's var
# command).
true = results_var_misc.lutkepohl_var1_lustats
mod = varmax.VARMAX(endog, order=(1, 0), trend='nc',
error_cov_type='unstructured', loglikelihood_burn=1,)
res = mod.filter(true['params'])
assert_allclose(res.llf, true['loglike'])
# Test the Lutkepohl ICs
# Note: for the Lutkepohl ICs, Stata only counts the AR coefficients as
# estimated parameters for the purposes of information criteria, whereas we
# count all parameters including the elements of the covariance matrix, so
# we need to adjust for that
aic = (res.info_criteria('aic', method='lutkepohl') -
2 * 6 / res.nobs_effective)
bic = (res.info_criteria('bic', method='lutkepohl') -
6 * np.log(res.nobs_effective) / res.nobs_effective)
hqic = (res.info_criteria('hqic', method='lutkepohl') -
2 * 6 * np.log(np.log(res.nobs_effective)) / res.nobs_effective)
assert_allclose(aic, true['aic'])
assert_allclose(bic, true['bic'])
assert_allclose(hqic, true['hqic'])
# Test the non-Lutkepohl ICs
# Note: for the non-Lutkepohl ICs, Stata does not count the elements of the
# covariance matrix as estimated parameters for the purposes of information
# criteria, whereas we count both, so we need to adjust for that
true = results_var_misc.lutkepohl_var1
aic = res.aic - 2 * 6
bic = res.bic - 6 * np.log(res.nobs_effective)
| assert_allclose(aic, true['estat_aic']) | numpy.testing.assert_allclose |
import warnings
import cv2
import matplotlib.pyplot as plt
import numpy as np
import scipy
from scipy.optimize import linear_sum_assignment
def get_fast_aji(true, pred):
true = np.copy(true) # ? do we need this
pred = np.copy(pred)
true_id_list = list(np.unique(true))
pred_id_list = list(np.unique(pred))
true_masks = [
None,
]
for t in true_id_list[1:]:
t_mask = np.array(true == t, np.uint8)
true_masks.append(t_mask)
pred_masks = [
None,
]
for p in pred_id_list[1:]:
p_mask = np.array(pred == p, np.uint8)
pred_masks.append(p_mask)
pairwise_inter = np.zeros(
[len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64
)
pairwise_union = np.zeros(
[len(true_id_list) - 1, len(pred_id_list) - 1], dtype=np.float64
)
for true_id in true_id_list[1:]: # 0-th is background
t_mask = true_masks[true_id]
pred_true_overlap = pred[t_mask > 0]
pred_true_overlap_id = np.unique(pred_true_overlap)
pred_true_overlap_id = list(pred_true_overlap_id)
for pred_id in pred_true_overlap_id:
if pred_id == 0: # ignore
continue # overlaping background
p_mask = pred_masks[pred_id]
total = (t_mask + p_mask).sum()
inter = (t_mask * p_mask).sum()
pairwise_inter[true_id - 1, pred_id - 1] = inter
pairwise_union[true_id - 1, pred_id - 1] = total - inter
pairwise_iou = pairwise_inter / (pairwise_union + 1.0e-6)
paired_pred = | np.argmax(pairwise_iou, axis=1) | numpy.argmax |
'''Statistical tests for NDVars
Common Attributes
-----------------
The following attributes are always present. For ANOVA, they are lists with the
corresponding items for different effects.
t/f/... : NDVar
Map of the statistical parameter.
p_uncorrected : NDVar
Map of uncorrected p values.
p : NDVar | None
Map of corrected p values (None if no correct was applied).
clusters : Dataset | None
Table of all the clusters found (None if no clusters were found, or if no
clustering was performed).
n_samples : None | int
The actual number of permutations. If ``samples = -1``, i.e. a complete set
or permutations is performed, then ``n_samples`` indicates the actual
number of permutations that constitute the complete set.
'''
from datetime import datetime, timedelta
from functools import reduce, partial
from itertools import chain, repeat
from math import ceil
from multiprocessing import Process, Event, SimpleQueue
from multiprocessing.sharedctypes import RawArray
import logging
import operator
import os
import re
import socket
from time import time as current_time
from typing import Union
import numpy as np
import scipy.stats
from scipy import ndimage
from tqdm import trange
from .. import fmtxt, _info, _text
from ..fmtxt import FMText
from .._celltable import Celltable
from .._config import CONFIG
from .._data_obj import (
CategorialArg, CellArg, IndexArg, ModelArg, NDVarArg, VarArg,
Dataset, Var, Factor, Interaction, NestedEffect,
NDVar, Categorial, UTS,
ascategorial, asmodel, asndvar, asvar, assub,
cellname, combine, dataobj_repr)
from .._exceptions import OldVersionError, WrongDimension, ZeroVariance
from .._utils import LazyProperty, user_activity
from .._utils.numpy_utils import FULL_AXIS_SLICE
from . import opt, stats, vector
from .connectivity import Connectivity, find_peaks
from .connectivity_opt import merge_labels, tfce_increment
from .glm import _nd_anova
from .permutation import (
_resample_params, permute_order, permute_sign_flip, random_seeds,
rand_rotation_matrices)
from .t_contrast import TContrastRel
from .test import star, star_factor
__test__ = False
def check_for_vector_dim(y: NDVar) -> None:
for dim in y.dims:
if dim._connectivity_type == 'vector':
raise WrongDimension(f"{dim}: mass-univariate methods are not suitable for vectors. Consider using vector norm as test statistic, or using a testnd.Vector test function.")
def check_variance(x):
if x.ndim != 2:
x = x.reshape((len(x), -1))
if opt.has_zero_variance(x):
raise ZeroVariance("y contains data column with zero variance")
class NDTest:
"""Baseclass for testnd test results
Attributes
----------
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_common = ('y', 'match', 'sub', 'samples', 'tfce', 'pmin', '_cdist',
'tstart', 'tstop', '_dims')
_state_specific = ()
_statistic = None
_statistic_tail = 0
@property
def _attributes(self):
return self._state_common + self._state_specific
def __init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop):
self.y = y.name
self.match = dataobj_repr(match) if match else match
self.sub = sub
self.samples = samples
self.tfce = tfce
self.pmin = pmin
self._cdist = cdist
self.tstart = tstart
self.tstop = tstop
self._dims = y.dims[1:]
def __getstate__(self):
return {name: getattr(self, name, None) for name in self._attributes}
def __setstate__(self, state):
# backwards compatibility:
if 'Y' in state:
state['y'] = state.pop('Y')
if 'X' in state:
state['x'] = state.pop('X')
for k, v in state.items():
setattr(self, k, v)
# backwards compatibility:
if 'tstart' not in state:
cdist = self._first_cdist
self.tstart = cdist.tstart
self.tstop = cdist.tstop
if '_dims' not in state: # 0.17
if 't' in state:
self._dims = state['t'].dims
elif 'r' in state:
self._dims = state['r'].dims
elif 'f' in state:
self._dims = state['f'][0].dims
else:
raise RuntimeError("Error recovering old test results dims")
self._expand_state()
def __repr__(self):
args = self._repr_test_args()
if self.sub is not None:
if isinstance(self.sub, np.ndarray):
sub_repr = '<array>'
else:
sub_repr = repr(self.sub)
args.append(f'sub={sub_repr}')
if self._cdist:
args += self._repr_cdist()
else:
args.append('samples=0')
return f"<{self.__class__.__name__} {', '.join(args)}>"
def _repr_test_args(self):
"""List of strings describing parameters unique to the test
Will be joined with ``", ".join(repr_args)``
"""
raise NotImplementedError()
def _repr_cdist(self):
"""List of results (override for MultiEffectResult)"""
return (self._cdist._repr_test_args(self.pmin) +
self._cdist._repr_clusters())
def _expand_state(self):
"Override to create secondary results"
cdist = self._cdist
if cdist is None:
self.tfce_map = None
self.p = None
self._kind = None
else:
self.tfce_map = cdist.tfce_map
self.p = cdist.probability_map
self._kind = cdist.kind
def _desc_samples(self):
if self.samples == -1:
return f"a complete set of {self.n_samples} permutations"
elif self.samples is None:
return "no permutations"
else:
return f"{self.n_samples} random permutations"
def _desc_timewindow(self):
tstart = self._time_dim.tmin if self.tstart is None else self.tstart
tstop = self._time_dim.tstop if self.tstop is None else self.tstop
return f"{_text.ms(tstart)} - {_text.ms(tstop)} ms"
def _asfmtext(self):
p = self.p.min()
max_stat = self._max_statistic()
return FMText((fmtxt.eq(self._statistic, max_stat, 'max', stars=p), ', ', fmtxt.peq(p)))
def _default_plot_obj(self):
raise NotImplementedError
def _iter_cdists(self):
yield (None, self._cdist)
@property
def _first_cdist(self):
return self._cdist
def _plot_model(self):
"Determine x for plotting categories"
return None
def _plot_sub(self):
if isinstance(self.sub, str) and self.sub == "<unsaved array>":
raise RuntimeError("The sub parameter was not saved for previous "
"versions of Eelbrain. Please recompute this "
"result with the current version.")
return self.sub
def _assert_has_cdist(self):
if self._cdist is None:
raise RuntimeError("This method only applies to results of tests "
"with threshold-based clustering and tests with "
"a permutation distribution (samples > 0)")
def masked_parameter_map(self, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
return self._cdist.masked_parameter_map(pmin, **sub)
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
return self._cdist.cluster(cluster_id)
@LazyProperty
def clusters(self):
if self._cdist is None:
return None
else:
return self.find_clusters(None, True)
def find_clusters(self, pmin=None, maps=False, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
return self._cdist.clusters(pmin, maps, **sub)
def find_peaks(self):
"""Find peaks in a threshold-free cluster distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
return self._cdist.find_peaks()
def compute_probability_map(self, **sub):
"""Compute a probability map
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
return self._cdist.compute_probability_map(**sub)
def info_list(self, computation=True):
"List with information about the test"
out = fmtxt.List("Mass-univariate statistics:")
out.add_item(self._name())
dimnames = [dim.name for dim in self._dims]
dimlist = out.add_sublist(f"Over {_text.enumeration(dimnames)}")
if 'time' in dimnames:
dimlist.add_item(f"Time interval: {self._desc_timewindow()}.")
cdist = self._first_cdist
if cdist is None:
out.add_item("No inferential statistics")
return out
# inference
l = out.add_sublist("Inference:")
if cdist.kind == 'raw':
l.add_item("Based on maximum statistic")
elif cdist.kind == 'tfce':
l.add_item("Based on maximum statistic with threshold-"
"free cluster enhancement (Smith & Nichols, 2009)")
elif cdist.kind == 'cluster':
l.add_item("Based on maximum cluster mass statistic")
sl = l.add_sublist("Cluster criteria:")
for dim in dimnames:
if dim == 'time':
sl.add_item(f"Minimum cluster duration {_text.ms(cdist.criteria.get('mintime', 0))} ms")
elif dim == 'source':
sl.add_item(f"At least {cdist.criteria.get('minsource', 0)} contiguous sources.")
elif dim == 'sensor':
sl.add_item(f"At least {cdist.criteria.get('minsensor', 0)} contiguous sensors.")
else:
value = cdist.criteria.get(f'min{dim}', 0)
sl.add_item(f"Minimum number of contiguous elements in {dim}: {value}")
# n samples
l.add_item(f"In {self._desc_samples()}")
# computation
if computation:
out.add_item(cdist.info_list())
return out
@property
def _statistic_map(self):
return getattr(self, self._statistic)
def _max_statistic(self):
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(self._statistic_map, self.p, tail)
@staticmethod
def _max_statistic_from_map(stat_map: NDVar, p_map: NDVar, tail: int):
if tail == 0:
func = stat_map.extrema
elif tail == 1:
func = stat_map.max
else:
func = stat_map.min
if p_map:
mask = p_map <= .05 if p_map.min() <= .05 else None
else:
mask = None
return func() if mask is None else func(mask)
@property
def n_samples(self):
if self.samples == -1:
return self._first_cdist.samples
else:
return self.samples
@property
def _time_dim(self):
for dim in self._first_cdist.dims:
if isinstance(dim, UTS):
return dim
return None
class t_contrast_rel(NDTest):
"""Mass-univariate contrast based on t-values
Parameters
----------
y : NDVar
Dependent variable.
x : categorial
Model containing the cells which are compared with the contrast.
contrast : str
Contrast specification: see Notes.
match : Factor
Match cases for a repeated measures test.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value for a related samples t-test (with df =
len(match.cells) - 1).
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Notes
-----
A contrast specifies the steps to calculate a map based on *t*-values.
Contrast definitions can contain:
- Comparisons using ``>`` or ``<`` and data cells to compute *t*-maps.
For example, ``"cell1 > cell0"`` will compute a *t*-map of the comparison
if ``cell1`` and ``cell0``, being positive where ``cell1`` is greater than
``cell0`` and negative where ``cell0`` is greater than ``cell1``.
If the data is defined based on an interaction, cells are specified with
``|``, e.g. ``"a1 | b1 > a0 | b0"``. Cells can contain ``*`` to average
multiple cells. Thus, if the second factor in the model has cells ``b1``
and ``b0``, ``"a1 | * > a0 | *"`` would compare ``a1`` to ``a0``
while averaging ``b1`` and ``b0`` within ``a1`` and ``a0``.
- Unary numpy functions ``abs`` and ``negative``, e.g.
``"abs(cell1 > cell0)"``.
- Binary numpy functions ``subtract`` and ``add``, e.g.
``"add(a>b, a>c)"``.
- Numpy functions for multiple arrays ``min``, ``max`` and ``sum``,
e.g. ``min(a>d, b>d, c>d)``.
Cases with zero variance are set to t=0.
Examples
--------
To find cluster where both of two pairwise comparisons are reliable,
i.e. an intersection of two effects, one could use
``"min(a > c, b > c)"``.
To find a specific kind of interaction, where a is greater than b, and
this difference is greater than the difference between c and d, one
could use ``"(a > b) - abs(c > d)"``.
"""
_state_specific = ('x', 'contrast', 't', 'tail')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: CategorialArg,
contrast: str,
match: CategorialArg = None,
sub: CategorialArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
if match is None:
raise TypeError("The `match` parameter needs to be specified for repeated measures test t_contrast_rel")
ct = Celltable(y, x, match, sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
check_variance(ct.y.x)
# setup contrast
t_contrast = TContrastRel(contrast, ct.cells, ct.data_indexes)
# original data
tmap = t_contrast.map(ct.y.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
df = len(ct.match.cells) - 1
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(
ct.y, samples, threshold, tfce, tail, 't', "t-contrast",
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(len(ct.y), samples, unit=ct.match)
run_permutation(t_contrast, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = ('%'.join(ct.x.base_names) if isinstance(ct.x, Interaction) else
ct.x.name)
self.contrast = contrast
self.tail = tail
self.tmin = tmin
self.t = t
self._expand_state()
def _name(self):
if self.y:
return "T-Contrast: %s ~ %s" % (self.y, self.contrast)
else:
return "T-Contrast: %s" % self.contrast
def _plot_model(self):
return self.x
def _repr_test_args(self):
args = [repr(self.y), repr(self.x), repr(self.contrast)]
if self.tail:
args.append("tail=%r" % self.tail)
if self.match:
args.append('match=%r' % self.match)
return args
class corr(NDTest):
"""Mass-univariate correlation
Parameters
----------
y : NDVar
Dependent variable.
x : continuous
The continuous predictor variable.
norm : None | categorial
Categories in which to normalize (z-score) x.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an r-value equivalent to an
uncorrected p-value.
rmin : None | scalar
Threshold for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : None | categorial
When permuting data, only shuffle the cases within the categories
of match.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
r : NDVar
Map of correlation values (with threshold contours).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_specific = ('x', 'norm', 'n', 'df', 'r')
_statistic = 'r'
@user_activity
def __init__(
self,
y: NDVarArg,
x: VarArg,
norm: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
rmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: CategorialArg = None,
parc: str = None,
**criteria):
sub = assub(sub, ds)
y = asndvar(y, sub=sub, ds=ds, dtype=np.float64)
check_for_vector_dim(y)
if not y.has_case:
raise ValueError("Dependent variable needs case dimension")
x = asvar(x, sub=sub, ds=ds)
if norm is not None:
norm = ascategorial(norm, sub, ds)
if match is not None:
match = ascategorial(match, sub, ds)
name = "%s corr %s" % (y.name, x.name)
# Normalize by z-scoring the data for each subject
# normalization is done before the permutation b/c we are interested in
# the variance associated with each subject for the z-scoring.
y = y.copy()
if norm is not None:
for cell in norm.cells:
idx = (norm == cell)
y.x[idx] = scipy.stats.zscore(y.x[idx], None)
# subtract the mean from y and x so that this can be omitted during
# permutation
y -= y.summary('case')
x = x - x.mean()
n = len(y)
df = n - 2
rmap = stats.corr(y.x, x.x)
n_threshold_params = sum((pmin is not None, rmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, rmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.rtest_r(pmin, df)
elif rmin is not None:
threshold = abs(rmin)
else:
threshold = None
cdist = NDPermutationDistribution(
y, samples, threshold, tfce, 0, 'r', name,
tstart, tstop, criteria, parc)
cdist.add_original(rmap)
if cdist.do_permutation:
iterator = permute_order(n, samples, unit=match)
run_permutation(stats.corr, cdist, iterator, x.x)
# compile results
info = _info.for_stat_map('r', threshold)
r = NDVar(rmap, y.dims[1:], info, name)
# store attributes
NDTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = x.name
self.norm = None if norm is None else norm.name
self.rmin = rmin
self.n = n
self.df = df
self.r = r
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
r = self.r
# uncorrected probability
pmap = stats.rtest_p(r.x, self.df)
info = _info.for_p_map()
p_uncorrected = NDVar(pmap, r.dims, info, 'p_uncorrected')
self.p_uncorrected = p_uncorrected
self.r_p = [[r, self.p]] if self.samples else None
def _name(self):
if self.y and self.x:
return "Correlation: %s ~ %s" % (self.y, self.x)
else:
return "Correlation"
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.norm:
args.append('norm=%r' % self.norm)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_parameter_map()
else:
return self.r
class NDDifferenceTest(NDTest):
difference = None
def _get_mask(self, p=0.05):
self._assert_has_cdist()
if not 1 >= p > 0:
raise ValueError(f"p={p}: needs to be between 1 and 0")
if p == 1:
if self._cdist.kind != 'cluster':
raise ValueError(f"p=1 is only a valid mask for threshold-based cluster tests")
mask = self._cdist.cluster_map == 0
else:
mask = self.p > p
return self._cdist.uncrop(mask, self.difference, True)
def masked_difference(self, p=0.05):
"""Difference map masked by significance
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.difference.mask(mask)
class NDMaskedC1Mixin:
def masked_c1(self, p=0.05):
"""``c1`` map masked by significance of the ``c1``-``c0`` difference
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.c1_mean.mask(mask)
class ttest_1samp(NDDifferenceTest):
"""Mass-univariate one sample t-test
Parameters
----------
y : NDVar
Dependent variable.
popmean : scalar
Value to compare y against (default is 0).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
The difference value entering the test (``y`` if popmean is 0).
n : int
Number of cases.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Data points with zero variance are set to t=0.
"""
_state_specific = ('popmean', 'tail', 'n', 'df', 't', 'difference')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
popmean: float = 0,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
n = len(ct.y)
df = n - 1
y = ct.y.summary()
tmap = stats.t_1samp(ct.y.x)
if popmean:
raise NotImplementedError("popmean != 0")
diff = y - popmean
if np.any(diff < 0):
diff.info['cmap'] = 'xpolar'
else:
diff = y
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
if popmean:
y_perm = ct.y - popmean
else:
y_perm = ct.y
n_samples, samples = _resample_params(len(y_perm), samples)
cdist = NDPermutationDistribution(
y_perm, n_samples, threshold, tfce, tail, 't', '1-Sample t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.popmean = popmean
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.difference = diff
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
def _expand_state(self):
NDTest._expand_state(self)
t = self.t
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map(t.info)
p_uncorr = NDVar(pmap, t.dims, info, 'p')
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "One-Sample T-Test: %s" % self.y
else:
return "One-Sample T-Test"
def _repr_test_args(self):
args = [repr(self.y)]
if self.popmean:
args.append(repr(self.popmean))
if self.match:
args.append('match=%r' % self.match)
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_difference()
else:
return self.difference
def _independent_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for independent measures tests (2 different argspecs)"
if isinstance(x, str):
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
y0 = asndvar(x, sub, ds)
y = combine((y1, y0))
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar, dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
x_name = ct.x.name
match = ct.match
y = ct.y
y1 = ct.data[c1]
y0 = ct.data[c0]
return y, y1, y0, c1, c0, match, x_name, c1_name, c0_name
class ttest_ind(NDDifferenceTest):
"""Mass-univariate independent samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold p value for forming clusters. None for threshold-free
cluster enhancement.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n1', 'n0', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y)
n1 = len(y1)
n = len(y)
n0 = n - n1
df = n - 2
groups = np.arange(n) < n1
groups.dtype = np.int8
tmap = stats.t_ind(y.x, groups)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(y, samples, threshold, tfce, tail, 't', 'Independent Samples t-Test', tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(n, samples)
run_permutation(stats.t_ind, cdist, iterator, groups)
# store attributes
NDDifferenceTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n1 = n1
self.n0 = n0
self.df = df
self.tail = tail
info = _info.for_stat_map('t', threshold, tail=tail, old=y.info)
self.t = NDVar(tmap, y.dims[1:], info, 't')
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(self.t.x, self.df, self.tail)
info = _info.for_p_map(self.t.info)
p_uncorr = NDVar(pmap, self.t.dims, info, 'p')
self.p_uncorrected = p_uncorr
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Independent-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Independent-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
if self.c1 is None:
args = [f'{self.y!r} (n={self.n1})', f'{self.x!r} (n={self.n0})']
else:
args = [f'{self.y!r}', f'{self.x!r}', f'{self.c1!r} (n={self.n1})', f'{self.c0!r} (n={self.n0})']
if self.match:
args.append(f'match{self.match!r}')
if self.tail:
args.append(f'tail={self.tail}')
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
def _related_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for related measures tests (2 different argspecs)"
if isinstance(x, str):
if ds is None:
raise TypeError(f"x={x!r} specified as str without specifying ds")
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
n = len(y1)
y0 = asndvar(x, sub, ds, n)
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
elif match is None:
raise TypeError("The `match` argument needs to be specified for related measures tests")
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar,
dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
if not ct.all_within:
raise ValueError(f"conditions {c1!r} and {c0!r} do not have the same values on {dataobj_repr(ct.match)}")
n = len(ct.y) // 2
y1 = ct.y[:n]
y0 = ct.y[n:]
x_name = ct.x.name
match = ct.match
return y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name
class ttest_rel(NDMaskedC1Mixin, NDDifferenceTest):
"""Mass-univariate related samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed, default);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
n : int
Number of cases.
Notes
-----
In the permutation cluster test, permutations are done within the
categories of ``match``.
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y1)
if n <= 2:
raise ValueError("Not enough observations for t-test (n=%i)" % n)
df = n - 1
diff = y1 - y0
tmap = stats.t_1samp(diff.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
n_samples, samples = _resample_params(len(diff), samples)
cdist = NDPermutationDistribution(
diff, n_samples, threshold, tfce, tail, 't', 'Related Samples t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=y1.info)
t = NDVar(tmap, y1.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, y1, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
cdist = self._cdist
t = self.t
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map()
self.p_uncorrected = NDVar(pmap, t.dims, info, 'p')
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Related-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Related-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.c1 is not None:
args.extend((repr(self.c1), repr(self.c0), repr(self.match)))
args[-1] += " (n=%i)" % self.n
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
class MultiEffectNDTest(NDTest):
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.match is not None:
args.append('match=%r' % self.match)
return args
def _repr_cdist(self):
args = self._cdist[0]._repr_test_args(self.pmin)
for cdist in self._cdist:
effect_args = cdist._repr_clusters()
args.append("%r: %s" % (cdist.name, ', '.join(effect_args)))
return args
def _asfmtext(self):
table = fmtxt.Table('llll')
table.cells('Effect', fmtxt.symbol(self._statistic, 'max'), fmtxt.symbol('p'), 'sig')
table.midrule()
for i, effect in enumerate(self.effects):
table.cell(effect)
table.cell(fmtxt.stat(self._max_statistic(i)))
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
def _expand_state(self):
self.effects = tuple(e.name for e in self._effects)
# clusters
cdists = self._cdist
if cdists is None:
self._kind = None
else:
self.tfce_maps = [cdist.tfce_map for cdist in cdists]
self.p = [cdist.probability_map for cdist in cdists]
self._kind = cdists[0].kind
def _effect_index(self, effect: Union[int, str]):
if isinstance(effect, str):
return self.effects.index(effect)
else:
return effect
def _iter_cdists(self):
for cdist in self._cdist:
yield cdist.name.capitalize(), cdist
@property
def _first_cdist(self):
if self._cdist is None:
return None
else:
return self._cdist[0]
def _max_statistic(self, effect: Union[str, int]):
i = self._effect_index(effect)
stat_map = self._statistic_map[i]
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(stat_map, self.p[i], tail)
def cluster(self, cluster_id, effect=0):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
effect : int | str
Index or name of the effect from which to retrieve a cluster
(default is the first effect).
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].cluster(cluster_id)
def compute_probability_map(self, effect=0, **sub):
"""Compute a probability map
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map
(default is the first effect).
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].compute_probability_map(**sub)
def masked_parameter_map(self, effect=0, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map.
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].masked_parameter_map(pmin, **sub)
def find_clusters(self, pmin=None, maps=False, effect=None, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
effect : int | str
Index or name of the effect from which to find clusters (default is
all effects).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
if effect is not None:
i = self._effect_index(effect)
return self._cdist[i].clusters(pmin, maps, **sub)
dss = []
info = {}
for cdist in self._cdist:
ds = cdist.clusters(pmin, maps, **sub)
ds[:, 'effect'] = cdist.name
if 'clusters' in ds.info:
info['%s clusters' % cdist.name] = ds.info.pop('clusters')
dss.append(ds)
out = combine(dss)
out.info.update(info)
return out
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
dss = []
for cdist in self._cdist:
ds = cdist.find_peaks()
ds[:, 'effect'] = cdist.name
dss.append(ds)
return combine(dss)
class anova(MultiEffectNDTest):
"""Mass-univariate ANOVA
Parameters
----------
y : NDVar
Dependent variable.
x : Model
Independent variables.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an f-value equivalent to an
uncorrected p-value.
fmin : scalar
Threshold for forming clusters as f-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
replacement : bool
whether random samples should be drawn with replacement or
without
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : categorial | False
When permuting data, only shuffle the cases within the categories
of match. By default, ``match`` is determined automatically based on
the random efects structure of ``x``.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
effects : tuple of str
Names of the tested effects, in the same order as in other attributes.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
f : list of NDVar
Maps of F values.
p : list of NDVar | None
Maps of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : list of NDVar
Maps of p-values uncorrected for multiple comparison.
tfce_maps : list of NDVar | None
Maps of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Examples
--------
For information on model specification see the univariate
:func:`~eelbrain.test.anova` examples.
"""
_state_specific = ('x', 'pmin', '_effects', '_dfs_denom', 'f')
_statistic = 'f'
_statistic_tail = 1
@user_activity
def __init__(
self,
y: NDVarArg,
x: ModelArg,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
fmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: Union[CategorialArg, bool] = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
x_arg = x
sub_arg = sub
sub = assub(sub, ds)
y = asndvar(y, sub, ds, dtype=np.float64)
check_for_vector_dim(y)
x = asmodel(x, sub, ds)
if match is None:
random_effects = [e for e in x.effects if e.random]
if not random_effects:
match = None
elif len(random_effects) > 1:
raise NotImplementedError(
"Automatic match parameter for model with more than one "
"random effect. Set match manually.")
else:
match = random_effects[0]
elif match is not False:
match = ascategorial(match, sub, ds)
lm = _nd_anova(x)
effects = lm.effects
dfs_denom = lm.dfs_denom
fmaps = lm.map(y.x)
n_threshold_params = sum((pmin is not None, fmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
cdists = None
thresholds = tuple(repeat(None, len(effects)))
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, fmin and tfce can be specified")
else:
if pmin is not None:
thresholds = tuple(stats.ftest_f(pmin, e.df, df_den) for e, df_den in zip(effects, dfs_denom))
elif fmin is not None:
thresholds = tuple(repeat(abs(fmin), len(effects)))
else:
thresholds = tuple(repeat(None, len(effects)))
cdists = [
NDPermutationDistribution(
y, samples, thresh, tfce, 1, 'f', e.name,
tstart, tstop, criteria, parc, force_permutation)
for e, thresh in zip(effects, thresholds)]
# Find clusters in the actual data
do_permutation = 0
for cdist, fmap in zip(cdists, fmaps):
cdist.add_original(fmap)
do_permutation += cdist.do_permutation
if do_permutation:
iterator = permute_order(len(y), samples, unit=match)
run_permutation_me(lm, cdists, iterator)
# create ndvars
dims = y.dims[1:]
f = []
for e, fmap, df_den, f_threshold in zip(effects, fmaps, dfs_denom, thresholds):
info = _info.for_stat_map('f', f_threshold, tail=1, old=y.info)
f.append(NDVar(fmap, dims, info, e.name))
# store attributes
MultiEffectNDTest.__init__(self, y, match, sub_arg, samples, tfce, pmin,
cdists, tstart, tstop)
self.x = x_arg if isinstance(x_arg, str) else x.name
self._effects = effects
self._dfs_denom = dfs_denom
self.f = f
self._expand_state()
def _expand_state(self):
# backwards compatibility
if hasattr(self, 'effects'):
self._effects = self.effects
MultiEffectNDTest._expand_state(self)
# backwards compatibility
if hasattr(self, 'df_den'):
df_den_temp = {e.name: df for e, df in self.df_den.items()}
del self.df_den
self._dfs_denom = tuple(df_den_temp[e] for e in self.effects)
# f-maps with clusters
pmin = self.pmin or 0.05
if self.samples:
f_and_clusters = []
for e, fmap, df_den, cdist in zip(self._effects, self.f,
self._dfs_denom, self._cdist):
# create f-map with cluster threshold
f0 = stats.ftest_f(pmin, e.df, df_den)
info = _info.for_stat_map('f', f0)
f_ = NDVar(fmap.x, fmap.dims, info, e.name)
# add overlay with cluster
if cdist.probability_map is not None:
f_and_clusters.append([f_, cdist.probability_map])
else:
f_and_clusters.append([f_])
self.f_probability = f_and_clusters
# uncorrected probability
p_uncorr = []
for e, f, df_den in zip(self._effects, self.f, self._dfs_denom):
info = _info.for_p_map()
pmap = stats.ftest_p(f.x, e.df, df_den)
p_ = NDVar(pmap, f.dims, info, e.name)
p_uncorr.append(p_)
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "ANOVA: %s ~ %s" % (self.y, self.x)
else:
return "ANOVA: %s" % self.x
def _plot_model(self):
return '%'.join(e.name for e in self._effects if isinstance(e, Factor) or
(isinstance(e, NestedEffect) and isinstance(e.effect, Factor)))
def _plot_sub(self):
return super(anova, self)._plot_sub()
def _default_plot_obj(self):
if self.samples:
return [self.masked_parameter_map(e) for e in self.effects]
else:
return self._statistic_map
def table(self):
"""Table with effects and smallest p-value"""
table = fmtxt.Table('rlr' + ('' if self.p is None else 'rl'))
table.cells('#', 'Effect', 'f_max')
if self.p is not None:
table.cells('p', 'sig')
table.midrule()
for i in range(len(self.effects)):
table.cell(i)
table.cell(self.effects[i])
table.cell(fmtxt.stat(self.f[i].max()))
if self.p is not None:
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
class Vector(NDDifferenceTest):
"""Test a vector field for vectors with non-random direction
Parameters
----------
y : NDVar
Dependent variable (needs to include one vector dimension).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
difference : NDVar
The vector field averaged across cases.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
Notes
-----
Vector tests are based on the Hotelling T-Square statistic. Computation of
the T-Square statistic relies on [1]_.
References
----------
.. [1] <NAME>. (2008). Efficient numerical diagonalization of hermitian 3 x
3 matrices. International Journal of Modern Physics C, 19(3), 523-548.
`10.1142/S0129183108012303 <https://doi.org/10.1142/S0129183108012303>`_
"""
_state_specific = ('difference', 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
n = len(ct.y)
cdist = NDPermutationDistribution(ct.y, samples, tmin, tfce, 1, 'norm', 'Vector test', tstart, tstop, criteria, parc, force_permutation)
v_dim = ct.y.dimnames[cdist._vector_ax + 1]
v_mean = ct.y.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(ct.y)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self._v_dim = v_dim
self.n = n
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
@property
def _statistic(self):
return 'norm' if self.t2 is None else 't2'
def _name(self):
if self.y:
return f"Vector test: {self.y}"
else:
return "Vector test"
def _repr_test_args(self):
args = []
if self.y:
args.append(repr(self.y))
if self.match:
args.append(f'match={self.match!r}')
return args
@staticmethod
def _vector_perm(y, out, seed, use_norm):
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
rotation = rand_rotation_matrices(n_cases, seed)
if use_norm:
return vector.mean_norm_rotated(y, rotation, out)
else:
return vector.t2_stat_rotated(y, rotation, out)
@staticmethod
def _vector_t2_map(y):
dimnames = y.get_dimnames(first=('case', 'space'))
x = y.get_data(dimnames)
t2_map = stats.t2_1samp(x)
if y.ndim == 2:
return np.float64(t2_map)
else:
dims = y.get_dims(dimnames[2:])
return NDVar(t2_map, dims)
class VectorDifferenceIndependent(Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Total number of cases.
n1 : int
Number of cases in ``c1``.
n0 : int
Number of cases in ``c0``.
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
_statistic = 'norm'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
self.n1 = len(y1)
self.n0 = len(y0)
self.n = len(y)
cdist = NDPermutationDistribution(y, samples, tmin, tfce, 1, 'norm', 'Vector test (independent)', tstart, tstop, criteria, parc, force_permutation)
self._v_dim = v_dim = y.dimnames[cdist._vector_ax + 1]
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self.difference = self.c1_mean - self.c0_mean
self.difference.name = 'difference'
v_mean_norm = self.difference.norm(v_dim)
if not use_norm:
raise NotImplementedError("t2 statistic not implemented for VectorDifferenceIndependent")
else:
cdist.add_original(v_mean_norm.x if self.difference.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator, self.n1)
NDTest.__init__(self, y, match, sub, samples, tfce, None, cdist, tstart, tstop)
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (independent): {self.y}"
else:
return "Vector test (independent)"
@staticmethod
def _vector_perm(y, n1, out, seed, use_norm):
assert use_norm
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
# randomize directions
rotation = rand_rotation_matrices(n_cases, seed)
# randomize groups
cases = np.arange(n_cases)
np.random.shuffle(cases)
# group 1
mean_1 = np.zeros((n_dims, n_tests))
for case in cases[:n1]:
mean_1 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_1 /= n1
# group 0
mean_0 = np.zeros((n_dims, n_tests))
for case in cases[n1:]:
mean_0 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_0 /= (n_cases - n1)
# difference
mean_1 -= mean_0
norm = scipy.linalg.norm(mean_1, 2, axis=0)
if out is not None:
out[:] = norm
return norm
class VectorDifferenceRelated(NDMaskedC1Mixin, Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
c1_mean : NDVar
Mean in the ``c1`` condition.
c0_mean : NDVar
Mean in the ``c0`` condition.
difference : NDVar
Difference between the mean in condition ``c1`` and condition ``c0``.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
See Also
--------
Vector : One-sample vector test, notes on vector test implementation
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
difference = y1 - y0
difference.name = 'difference'
n_samples, samples = _resample_params(n, samples)
cdist = NDPermutationDistribution(difference, n_samples, tmin, tfce, 1, 'norm', 'Vector test (related)', tstart, tstop, criteria, parc, force_permutation)
v_dim = difference.dimnames[cdist._vector_ax + 1]
v_mean = difference.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(difference)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(n_samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, difference, match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._v_dim = v_dim
self.n = n
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (related): {self.y}"
else:
return "Vector test (related)"
def flatten(array, connectivity):
"""Reshape SPM buffer array to 2-dimensional map for connectivity processing
Parameters
----------
array : ndarray
N-dimensional array (with non-adjacent dimension at first position).
connectivity : Connectivity
N-dimensional connectivity.
Returns
-------
flat_array : ndarray
The input array reshaped if necessary, making sure that input and output
arrays share the same underlying data buffer.
"""
if array.ndim == 2 or not connectivity.custom:
return array
else:
out = array.reshape((array.shape[0], -1))
assert out.base is array
return out
def flatten_1d(array):
if array.ndim == 1:
return array
else:
out = array.ravel()
assert out.base is array
return out
def label_clusters(stat_map, threshold, tail, connectivity, criteria):
"""Label clusters
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
Returns
-------
cmap : np.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
cmap = np.empty(stat_map.shape, np.uint32)
bin_buff = np.empty(stat_map.shape, np.bool8)
cmap_flat = flatten(cmap, connectivity)
if tail == 0:
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
else:
int_buff = int_buff_flat = None
cids = _label_clusters(stat_map, threshold, tail, connectivity, criteria,
cmap, cmap_flat, bin_buff, int_buff, int_buff_flat)
return cmap, cids
def _label_clusters(stat_map, threshold, tail, conn, criteria, cmap, cmap_flat,
bin_buff, int_buff, int_buff_flat):
"""Find clusters on a statistical parameter map
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
cmap : array of int
Buffer for the cluster id map (will be modified).
Returns
-------
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
# compute clusters
if tail >= 0:
bin_map_above = np.greater(stat_map, threshold, bin_buff)
cids = _label_clusters_binary(bin_map_above, cmap, cmap_flat, conn,
criteria)
if tail <= 0:
bin_map_below = np.less(stat_map, -threshold, bin_buff)
if tail < 0:
cids = _label_clusters_binary(bin_map_below, cmap, cmap_flat, conn,
criteria)
else:
cids_l = _label_clusters_binary(bin_map_below, int_buff,
int_buff_flat, conn, criteria)
x = cmap.max()
int_buff[bin_map_below] += x
cids_l += x
cmap += int_buff
cids = np.concatenate((cids, cids_l))
return cids
def label_clusters_binary(bin_map, connectivity, criteria=None):
"""Label clusters in a boolean map
Parameters
----------
bin_map : numpy.ndarray
Binary map.
connectivity : Connectivity
Connectivity corresponding to ``bin_map``.
criteria : dict
Cluster criteria.
Returns
-------
cmap : numpy.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : numpy.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
cmap = np.empty(bin_map.shape, np.uint32)
cmap_flat = flatten(cmap, connectivity)
cids = _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria)
return cmap, cids
def _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria):
"""Label clusters in a binary array
Parameters
----------
bin_map : np.ndarray
Binary map of where the parameter map exceeds the threshold for a
cluster (non-adjacent dimension on the first axis).
cmap : np.ndarray
Array in which to label the clusters.
cmap_flat : np.ndarray
Flat copy of cmap (ndim=2, only used when all_adjacent==False)
connectivity : Connectivity
Connectivity.
criteria : None | list
Cluster size criteria, list of (axes, v) tuples. Collapse over axes
and apply v minimum length).
Returns
-------
cluster_ids : np.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
# find clusters
n = ndimage.label(bin_map, connectivity.struct, cmap)
if n <= 1:
# in older versions, n is 1 even when no cluster is found
if n == 0 or cmap.max() == 0:
return np.array((), np.uint32)
else:
cids = np.array((1,), np.uint32)
elif connectivity.custom:
cids = merge_labels(cmap_flat, n, *connectivity.custom[0])
else:
cids = np.arange(1, n + 1, 1, np.uint32)
# apply minimum cluster size criteria
if criteria and cids.size:
for axes, v in criteria:
cids = np.setdiff1d(cids,
[i for i in cids if np.count_nonzero(np.equal(cmap, i).any(axes)) < v],
True)
if cids.size == 0:
break
return cids
def tfce(stat_map, tail, connectivity, dh=0.1):
tfce_im = np.empty(stat_map.shape, np.float64)
tfce_im_1d = flatten_1d(tfce_im)
bin_buff = np.empty(stat_map.shape, np.bool8)
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
int_buff_1d = flatten_1d(int_buff)
return _tfce(stat_map, tail, connectivity, tfce_im, tfce_im_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh)
def _tfce(stat_map, tail, conn, out, out_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh=0.1, e=0.5, h=2.0):
"Threshold-free cluster enhancement"
out.fill(0)
# determine slices
if tail == 0:
hs = chain(np.arange(-dh, stat_map.min(), -dh),
np.arange(dh, stat_map.max(), dh))
elif tail < 0:
hs = np.arange(-dh, stat_map.min(), -dh)
else:
hs = np.arange(dh, stat_map.max(), dh)
# label clusters in slices at different heights
# fill each cluster with total section value
# each point's value is the vertical sum
for h_ in hs:
if h_ > 0:
np.greater_equal(stat_map, h_, bin_buff)
h_factor = h_ ** h
else:
np.less_equal(stat_map, h_, bin_buff)
h_factor = (-h_) ** h
c_ids = _label_clusters_binary(bin_buff, int_buff, int_buff_flat, conn, None)
tfce_increment(c_ids, int_buff_1d, out_1d, e, h_factor)
return out
class StatMapProcessor:
def __init__(self, tail, max_axes, parc):
"""Reduce a statistical map to the relevant maximum statistic"""
self.tail = tail
self.max_axes = max_axes
self.parc = parc
def max_stat(self, stat_map):
if self.tail == 0:
v = np.abs(stat_map, stat_map).max(self.max_axes)
elif self.tail > 0:
v = stat_map.max(self.max_axes)
else:
v = -stat_map.min(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class TFCEProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, dh):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.dh = dh
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._int_buff = np.empty(shape, np.uint32)
self._tfce_im = np.empty(shape, np.float64)
self._tfce_im_1d = flatten_1d(self._tfce_im)
self._int_buff_flat = flatten(self._int_buff, connectivity)
self._int_buff_1d = flatten_1d(self._int_buff)
def max_stat(self, stat_map):
v = _tfce(
stat_map, self.tail, self.connectivity, self._tfce_im, self._tfce_im_1d,
self._bin_buff, self._int_buff, self._int_buff_flat, self._int_buff_1d,
self.dh,
).max(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class ClusterProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, threshold,
criteria):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.threshold = threshold
self.criteria = criteria
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._cmap = np.empty(shape, np.uint32)
self._cmap_flat = flatten(self._cmap, connectivity)
if tail == 0:
self._int_buff = np.empty(shape, np.uint32)
self._int_buff_flat = flatten(self._int_buff, connectivity)
else:
self._int_buff = self._int_buff_flat = None
def max_stat(self, stat_map, threshold=None):
if threshold is None:
threshold = self.threshold
cmap = self._cmap
cids = _label_clusters(stat_map, threshold, self.tail, self.connectivity,
self.criteria, cmap, self._cmap_flat,
self._bin_buff, self._int_buff,
self._int_buff_flat)
if self.parc is not None:
v = []
for idx in self.parc:
clusters_v = ndimage.sum(stat_map[idx], cmap[idx], cids)
if len(clusters_v):
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
v.append(clusters_v.max())
else:
v.append(0)
return v
elif len(cids):
clusters_v = ndimage.sum(stat_map, cmap, cids)
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
return clusters_v.max()
else:
return 0
def get_map_processor(kind, *args):
if kind == 'tfce':
return TFCEProcessor(*args)
elif kind == 'cluster':
return ClusterProcessor(*args)
elif kind == 'raw':
return StatMapProcessor(*args)
else:
raise ValueError("kind=%s" % repr(kind))
class NDPermutationDistribution:
"""Accumulate information on a cluster statistic.
Parameters
----------
y : NDVar
Dependent variable.
samples : int
Number of permutations.
threshold : scalar > 0
Threshold-based clustering.
tfce : bool | scalar
Threshold-free cluster enhancement.
tail : 1 | 0 | -1
Which tail(s) of the distribution to consider. 0 is two-tailed,
whereas 1 only considers positive values and -1 only considers
negative values.
meas : str
Label for the parameter measurement (e.g., 't' for t-values).
name : None | str
Name for the comparison.
tstart, tstop : None | scalar
Restrict the time window for finding clusters (None: use the whole
epoch).
criteria : dict
Dictionary with threshold criteria for cluster size: 'mintime'
(seconds) and 'minsource' (n_sources).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation : bool
Conduct permutations regardless of whether there are any clusters.
Notes
-----
Use of the NDPermutationDistribution proceeds in 3 steps:
- initialize the NDPermutationDistribution object: ``cdist = NDPermutationDistribution(...)``
- use a copy of y cropped to the time window of interest:
``y = cdist.Y_perm``
- add the actual statistical map with ``cdist.add_original(pmap)``
- if any clusters are found (``if cdist.n_clusters``):
- proceed to add statistical maps from permuted data with
``cdist.add_perm(pmap)``.
Permutation data shape: case, [vector, ][non-adjacent, ] ...
internal shape: [non-adjacent, ] ...
"""
tfce_warning = None
def __init__(self, y, samples, threshold, tfce=False, tail=0, meas='?', name=None,
tstart=None, tstop=None, criteria={}, parc=None, force_permutation=False):
assert y.has_case
assert parc is None or isinstance(parc, str)
if tfce and threshold:
raise RuntimeError(f"threshold={threshold!r}, tfce={tfce!r}: mutually exclusive parameters")
elif tfce:
if tfce is not True:
tfce = abs(tfce)
kind = 'tfce'
elif threshold:
threshold = float(threshold)
kind = 'cluster'
assert threshold > 0
else:
kind = 'raw'
# vector: will be removed for stat_map
vector = [d._connectivity_type == 'vector' for d in y.dims[1:]]
has_vector_ax = any(vector)
if has_vector_ax:
vector_ax = vector.index(True)
else:
vector_ax = None
# prepare temporal cropping
if (tstart is None) and (tstop is None):
y_perm = y
self._crop_for_permutation = False
self._crop_idx = None
else:
t_ax = y.get_axis('time') - 1
y_perm = y.sub(time=(tstart, tstop))
# for stat-maps
if vector_ax is not None and vector_ax < t_ax:
t_ax -= 1
t_slice = y.time._array_index(slice(tstart, tstop))
self._crop_for_permutation = True
self._crop_idx = FULL_AXIS_SLICE * t_ax + (t_slice,)
dims = list(y_perm.dims[1:])
if has_vector_ax:
del dims[vector_ax]
# custom connectivity: move non-adjacent connectivity to first axis
custom = [d._connectivity_type == 'custom' for d in dims]
n_custom = sum(custom)
if n_custom > 1:
raise NotImplementedError("More than one axis with custom connectivity")
nad_ax = None if n_custom == 0 else custom.index(True)
if nad_ax:
swapped_dims = list(dims)
swapped_dims[0], swapped_dims[nad_ax] = dims[nad_ax], dims[0]
else:
swapped_dims = dims
connectivity = Connectivity(swapped_dims, parc)
assert connectivity.vector is None
# cluster map properties
ndim = len(dims)
# prepare cluster minimum size criteria
if criteria:
criteria_ = []
for k, v in criteria.items():
m = re.match('min(\w+)', k)
if m:
dimname = m.group(1)
if not y.has_dim(dimname):
raise TypeError(
"%r is an invalid keyword argument for this testnd "
"function (no dimension named %r)" % (k, dimname))
ax = y.get_axis(dimname) - 1
if dimname == 'time':
v = int(ceil(v / y.time.tstep))
else:
raise TypeError("%r is an invalid keyword argument for this testnd function" % (k,))
if nad_ax:
if ax == 0:
ax = nad_ax
elif ax == nad_ax:
ax = 0
axes = tuple(i for i in range(ndim) if i != ax)
criteria_.append((axes, v))
if kind != 'cluster':
# here so that invalid keywords raise explicitly
err = ("Can not use cluster size criteria when doing "
"threshold free cluster evaluation")
raise ValueError(err)
else:
criteria_ = None
# prepare distribution
samples = int(samples)
if parc:
for parc_ax, parc_dim in enumerate(swapped_dims):
if parc_dim.name == parc:
break
else:
raise ValueError("parc=%r (no dimension named %r)" % (parc, parc))
if parc_dim._connectivity_type == 'none':
parc_indexes = np.arange(len(parc_dim))
elif kind == 'tfce':
raise NotImplementedError(
f"TFCE for parc={parc!r} ({parc_dim.__class__.__name__} dimension)")
elif parc_dim._connectivity_type == 'custom':
if not hasattr(parc_dim, 'parc'):
raise NotImplementedError(f"parc={parc!r}: dimension has no parcellation")
parc_indexes = tuple(np.flatnonzero(parc_dim.parc == cell) for
cell in parc_dim.parc.cells)
parc_dim = Categorial(parc, parc_dim.parc.cells)
else:
raise NotImplementedError(f"parc={parc!r}")
dist_shape = (samples, len(parc_dim))
dist_dims = ('case', parc_dim)
max_axes = tuple(chain(range(parc_ax), range(parc_ax + 1, ndim)))
else:
dist_shape = (samples,)
dist_dims = None
max_axes = None
parc_indexes = None
# arguments for the map processor
shape = tuple(map(len, swapped_dims))
if kind == 'raw':
map_args = (kind, tail, max_axes, parc_indexes)
elif kind == 'tfce':
dh = 0.1 if tfce is True else tfce
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, dh)
else:
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, threshold, criteria_)
self.kind = kind
self.y_perm = y_perm
self.dims = tuple(dims) # external stat map dims (cropped time)
self.shape = shape # internal stat map shape
self._connectivity = connectivity
self.samples = samples
self.dist_shape = dist_shape
self._dist_dims = dist_dims
self._max_axes = max_axes
self.dist = None
self.threshold = threshold
self.tfce = tfce
self.tail = tail
self._nad_ax = nad_ax
self._vector_ax = vector_ax
self.tstart = tstart
self.tstop = tstop
self.parc = parc
self.meas = meas
self.name = name
self._criteria = criteria_
self.criteria = criteria
self.map_args = map_args
self.has_original = False
self.do_permutation = False
self.dt_perm = None
self._finalized = False
self._init_time = current_time()
self._host = socket.gethostname()
self.force_permutation = force_permutation
from .. import __version__
self._version = __version__
def _crop(self, im):
"Crop an original stat_map"
if self._crop_for_permutation:
return im[self._crop_idx]
else:
return im
def uncrop(
self,
ndvar: NDVar, # NDVar to uncrop
to: NDVar, # NDVar that has the target time dimensions
default: float = 0, # value to fill in uncropped area
):
if self.tstart is None and self.tstop is None:
return ndvar
target_time = to.get_dim('time')
t_ax = ndvar.get_axis('time')
dims = list(ndvar.dims)
dims[t_ax] = target_time
shape = list(ndvar.shape)
shape[t_ax] = len(target_time)
t_slice = target_time._array_index(slice(self.tstart, self.tstop))
x = np.empty(shape, ndvar.x.dtype)
x.fill(default)
x[FULL_AXIS_SLICE * t_ax + (t_slice,)] = ndvar.x
return NDVar(x, dims, ndvar.info, ndvar.name)
def add_original(self, stat_map):
"""Add the original statistical parameter map.
Parameters
----------
stat_map : array
Parameter map of the statistic of interest (uncropped).
"""
if self.has_original:
raise RuntimeError("Original pmap already added")
logger = logging.getLogger(__name__)
logger.debug("Adding original parameter map...")
# crop/reshape stat_map
stat_map = self._crop(stat_map)
if self._nad_ax:
stat_map = stat_map.swapaxes(0, self._nad_ax)
# process map
if self.kind == 'tfce':
dh = 0.1 if self.tfce is True else self.tfce
self.tfce_warning = max(stat_map.max(), -stat_map.min()) < dh
cmap = tfce(stat_map, self.tail, self._connectivity, dh)
cids = None
n_clusters = cmap.max() > 0
elif self.kind == 'cluster':
cmap, cids = label_clusters(stat_map, self.threshold, self.tail,
self._connectivity, self._criteria)
n_clusters = len(cids)
# clean original cluster map
idx = np.in1d(cmap, cids, invert=True).reshape(self.shape)
cmap[idx] = 0
else:
cmap = stat_map
cids = None
n_clusters = True
self._t0 = current_time()
self._original_cluster_map = cmap
self._cids = cids
self.n_clusters = n_clusters
self.has_original = True
self.dt_original = self._t0 - self._init_time
self._original_param_map = stat_map
if self.force_permutation or (self.samples and n_clusters):
self._create_dist()
self.do_permutation = True
else:
self.dist_array = None
self.finalize()
def _create_dist(self):
"Create the distribution container"
if CONFIG['n_workers']:
n = reduce(operator.mul, self.dist_shape)
dist_array = RawArray('d', n)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = self.dist_shape
else:
dist_array = None
dist = np.zeros(self.dist_shape)
self.dist_array = dist_array
self.dist = dist
def _aggregate_dist(self, **sub):
"""Aggregate permutation distribution to one value per permutation
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
dist : array, shape = (samples,)
Maximum value for each permutation in the given region.
"""
dist = self.dist
if sub:
if self._dist_dims is None:
raise TypeError("NDPermutationDistribution does not have parcellation")
dist_ = NDVar(dist, self._dist_dims)
dist_sub = dist_.sub(**sub)
dist = dist_sub.x
if dist.ndim > 1:
axes = tuple(range(1, dist.ndim))
dist = dist.max(axes)
return dist
def __repr__(self):
items = []
if self.has_original:
dt = timedelta(seconds=round(self.dt_original))
items.append("%i clusters (%s)" % (self.n_clusters, dt))
if self.samples > 0 and self.n_clusters > 0:
if self.dt_perm is not None:
dt = timedelta(seconds=round(self.dt_perm))
items.append("%i permutations (%s)" % (self.samples, dt))
else:
items.append("no data")
return "<NDPermutationDistribution: %s>" % ', '.join(items)
def __getstate__(self):
if not self._finalized:
raise RuntimeError("Cannot pickle cluster distribution before all "
"permutations have been added.")
state = {
name: getattr(self, name) for name in (
'name', 'meas', '_version', '_host', '_init_time',
# settings ...
'kind', 'threshold', 'tfce', 'tail', 'criteria', 'samples', 'tstart', 'tstop', 'parc',
# data properties ...
'dims', 'shape', '_nad_ax', '_vector_ax', '_criteria', '_connectivity',
# results ...
'dt_original', 'dt_perm', 'n_clusters', '_dist_dims', 'dist', '_original_param_map', '_original_cluster_map', '_cids',
)}
state['version'] = 3
return state
def __setstate__(self, state):
# backwards compatibility
version = state.pop('version', 0)
if version == 0:
if '_connectivity_src' in state:
del state['_connectivity_src']
del state['_connectivity_dst']
if '_connectivity' in state:
del state['_connectivity']
if 'N' in state:
state['samples'] = state.pop('N')
if '_version' not in state:
state['_version'] = '< 0.11'
if '_host' not in state:
state['_host'] = 'unknown'
if '_init_time' not in state:
state['_init_time'] = None
if 'parc' not in state:
if state['_dist_dims'] is None:
state['parc'] = None
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
elif isinstance(state['parc'], tuple):
if len(state['parc']) == 0:
state['parc'] = None
elif len(state['parc']) == 1:
state['parc'] = state['parc'][0]
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
nad_ax = state['_nad_ax']
state['dims'] = dims = state['dims'][1:]
state['_connectivity'] = Connectivity(
(dims[nad_ax],) + dims[:nad_ax] + dims[nad_ax + 1:],
state['parc'])
if version < 2:
state['_vector_ax'] = None
if version < 3:
state['tfce'] = ['kind'] == 'tfce'
for k, v in state.items():
setattr(self, k, v)
self.has_original = True
self.finalize()
def _repr_test_args(self, pmin):
"Argument representation for TestResult repr"
args = ['samples=%r' % self.samples]
if pmin is not None:
args.append(f"pmin={pmin!r}")
elif self.kind == 'tfce':
arg = f"tfce={self.tfce!r}"
if self.tfce_warning:
arg = f"{arg} [WARNING: The TFCE step is larger than the largest value in the data]"
args.append(arg)
if self.tstart is not None:
args.append(f"tstart={self.tstart!r}")
if self.tstop is not None:
args.append(f"tstop={self.tstop!r}")
for k, v in self.criteria.items():
args.append(f"{k}={v!r}")
return args
def _repr_clusters(self):
info = []
if self.kind == 'cluster':
if self.n_clusters == 0:
info.append("no clusters")
else:
info.append("%i clusters" % self.n_clusters)
if self.n_clusters and self.samples:
info.append(f"{fmtxt.peq(self.probability_map.min())}")
return info
def _package_ndvar(self, x, info=None, external_shape=False):
"Generate NDVar from map with internal shape"
if not self.dims:
if isinstance(x, np.ndarray):
return x.item()
return x
if not external_shape and self._nad_ax:
x = x.swapaxes(0, self._nad_ax)
if info is None:
info = {}
return NDVar(x, self.dims, info, self.name)
def finalize(self):
"Package results and delete temporary data"
if self.dt_perm is None:
self.dt_perm = current_time() - self._t0
# original parameter map
param_contours = {}
if self.kind == 'cluster':
if self.tail >= 0:
param_contours[self.threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-self.threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
self.parameter_map = self._package_ndvar(self._original_param_map, info)
# TFCE map
if self.kind == 'tfce':
self.tfce_map = self._package_ndvar(self._original_cluster_map)
else:
self.tfce_map = None
# cluster map
if self.kind == 'cluster':
self.cluster_map = self._package_ndvar(self._original_cluster_map)
else:
self.cluster_map = None
self._finalized = True
def data_for_permutation(self, raw=True):
"""Retrieve data flattened for permutation
Parameters
----------
raw : bool
Return a RawArray and a shape tuple instead of a numpy array.
"""
# get data in the right shape
x = self.y_perm.x
if self._vector_ax:
x = np.moveaxis(x, self._vector_ax + 1, 1)
if self._nad_ax is not None:
dst = 1
src = 1 + self._nad_ax
if self._vector_ax is not None:
dst += 1
if self._vector_ax > self._nad_ax:
src += 1
if dst != src:
x = x.swapaxes(dst, src)
# flat y shape
ndims = 1 + (self._vector_ax is not None)
n_flat = 1 if x.ndim == ndims else reduce(operator.mul, x.shape[ndims:])
y_flat_shape = x.shape[:ndims] + (n_flat,)
if not raw:
return x.reshape(y_flat_shape)
n = reduce(operator.mul, y_flat_shape)
ra = RawArray('d', n)
ra[:] = x.ravel() # OPT: don't copy data
return ra, y_flat_shape, x.shape[ndims:]
def _cluster_properties(self, cluster_map, cids):
"""Create a Dataset with cluster properties
Parameters
----------
cluster_map : NDVar
NDVar in which clusters are marked by bearing the same number.
cids : array_like of int
Numbers specifying the clusters (must occur in cluster_map) which
should be analyzed.
Returns
-------
cluster_properties : Dataset
Cluster properties. Which properties are included depends on the
dimensions.
"""
ndim = cluster_map.ndim
n_clusters = len(cids)
# setup compression
compression = []
for ax, dim in enumerate(cluster_map.dims):
extents = np.empty((n_clusters, len(dim)), dtype=np.bool_)
axes = tuple(i for i in range(ndim) if i != ax)
compression.append((ax, dim, axes, extents))
# find extents for all clusters
c_mask = np.empty(cluster_map.shape, np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
for ax, dim, axes, extents in compression:
np.any(c_mask, axes, extents[i])
# prepare Dataset
ds = Dataset()
ds['id'] = Var(cids)
for ax, dim, axes, extents in compression:
properties = dim._cluster_properties(extents)
if properties is not None:
ds.update(properties)
return ds
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
if self.kind != 'cluster':
raise RuntimeError(
f'Only cluster-based tests have clusters with stable ids, this '
f'is a {self.kind} distribution. Use the .find_clusters() '
f'method instead with maps=True.')
elif cluster_id not in self._cids:
raise ValueError(f'No cluster with id {cluster_id!r}')
out = self.parameter_map * (self.cluster_map == cluster_id)
properties = self._cluster_properties(self.cluster_map, (cluster_id,))
for k in properties:
out.info[k] = properties[0, k]
return out
def clusters(self, pmin=None, maps=True, **sub):
"""Find significant clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value for clusters (for thresholded cluster tests the
default is 1, for others 0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default True).
[dimname] : index
Limit the data for the distribution.
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
if pmin is None:
if self.samples > 0 and self.kind != 'cluster':
pmin = 0.05
elif self.samples == 0:
msg = ("Can not determine p values in distribution without "
"permutations.")
if self.kind == 'cluster':
msg += " Find clusters with pmin=None."
raise RuntimeError(msg)
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if self.kind == 'cluster':
if sub:
cluster_map = self.cluster_map.sub(**sub)
cids = np.setdiff1d(cluster_map.x, [0])
else:
cluster_map = self.cluster_map
cids = np.array(self._cids)
if len(cids):
# measure original clusters
cluster_v = ndimage.sum(param_map.x, cluster_map.x, cids)
# p-values
if self.samples:
# p-values: "the proportion of random partitions that
# resulted in a larger test statistic than the observed
# one" (179)
dist = self._aggregate_dist(**sub)
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
# select clusters
if pmin is not None:
idx = cluster_p <= pmin
cids = cids[idx]
cluster_p = cluster_p[idx]
cluster_v = cluster_v[idx]
# p-value corrected across parc
if sub:
dist = self._aggregate_dist()
n_larger = np.sum(dist > np.abs(cluster_v[:, None]), 1)
cluster_p_corr = n_larger / self.samples
else:
cluster_v = cluster_p = cluster_p_corr = []
ds = self._cluster_properties(cluster_map, cids)
ds['v'] = Var(cluster_v)
if self.samples:
ds['p'] = Var(cluster_p)
if sub:
ds['p_parc'] = Var(cluster_p_corr)
threshold = self.threshold
else:
p_map = self.compute_probability_map(**sub)
bin_map = np.less_equal(p_map.x, pmin)
# threshold for maps
if maps:
values = np.abs(param_map.x)[bin_map]
if len(values):
threshold = values.min() / 2
else:
threshold = 1.
# find clusters (reshape to internal shape for labelling)
if self._nad_ax:
bin_map = bin_map.swapaxes(0, self._nad_ax)
if sub:
raise NotImplementedError("sub")
# need to subset connectivity!
c_map, cids = label_clusters_binary(bin_map, self._connectivity)
if self._nad_ax:
c_map = c_map.swapaxes(0, self._nad_ax)
# Dataset with cluster info
cluster_map = NDVar(c_map, p_map.dims, {}, "clusters")
ds = self._cluster_properties(cluster_map, cids)
ds.info['clusters'] = cluster_map
min_pos = ndimage.minimum_position(p_map.x, c_map, cids)
ds['p'] = Var([p_map.x[pos] for pos in min_pos])
if 'p' in ds:
ds['sig'] = star_factor(ds['p'])
# expand clusters
if maps:
shape = (ds.n_cases,) + param_map.shape
c_maps = np.empty(shape, dtype=param_map.x.dtype)
c_mask = np.empty(param_map.shape, dtype=np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map.x, cid, c_mask)
np.multiply(param_map.x, c_mask, c_maps[i])
# package ndvar
dims = ('case',) + param_map.dims
param_contours = {}
if self.tail >= 0:
param_contours[threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
info['summary_func'] = np.sum
ds['cluster'] = NDVar(c_maps, dims, info)
else:
ds.info['clusters'] = self.cluster_map
return ds
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
if self.kind == 'cluster':
raise RuntimeError("Not a threshold-free distribution")
param_map = self._original_param_map
probability_map = self.probability_map.x
if self._nad_ax:
probability_map = probability_map.swapaxes(0, self._nad_ax)
peaks = find_peaks(self._original_cluster_map, self._connectivity)
peak_map, peak_ids = label_clusters_binary(peaks, self._connectivity)
ds = Dataset()
ds['id'] = Var(peak_ids)
v = ds.add_empty_var('v')
if self.samples:
p = ds.add_empty_var('p')
bin_buff = np.empty(peak_map.shape, np.bool8)
for i, id_ in enumerate(peak_ids):
idx = np.equal(peak_map, id_, bin_buff)
v[i] = param_map[idx][0]
if self.samples:
p[i] = probability_map[idx][0]
return ds
def compute_probability_map(self, **sub):
"""Compute a probability map
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
probability : NDVar
Map of p-values.
"""
if not self.samples:
raise RuntimeError("Can't compute probability without permutations")
if self.kind == 'cluster':
cpmap = np.ones(self.shape)
if self.n_clusters:
cids = self._cids
dist = self._aggregate_dist(**sub)
cluster_map = self._original_cluster_map
param_map = self._original_param_map
# measure clusters
cluster_v = ndimage.sum(param_map, cluster_map, cids)
# p-values: "the proportion of random partitions that resulted
# in a larger test statistic than the observed one" (179)
n_larger = np.sum(dist >= np.abs(cluster_v[:, None]), 1)
cluster_p = n_larger / self.samples
c_mask = np.empty(self.shape, dtype=np.bool8)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
cpmap[c_mask] = cluster_p[i]
# revert to original shape
if self._nad_ax:
cpmap = cpmap.swapaxes(0, self._nad_ax)
dims = self.dims
else:
if self.kind == 'tfce':
stat_map = self.tfce_map
else:
if self.tail == 0:
stat_map = self.parameter_map.abs()
elif self.tail < 0:
stat_map = -self.parameter_map
else:
stat_map = self.parameter_map
if sub:
stat_map = stat_map.sub(**sub)
dims = stat_map.dims if isinstance(stat_map, NDVar) else None
cpmap = np.zeros(stat_map.shape) if dims else 0.
if self.dist is None: # flat stat-map
cpmap += 1
else:
dist = self._aggregate_dist(**sub)
idx = np.empty(stat_map.shape, dtype=np.bool8)
actual = stat_map.x if self.dims else stat_map
for v in dist:
cpmap += np.greater_equal(v, actual, idx)
cpmap /= self.samples
if dims:
return NDVar(cpmap, dims, _info.for_cluster_pmap(), self.name)
else:
return cpmap
def masked_parameter_map(self, pmin=0.05, name=None, **sub):
"""Parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map, masked with
p <= pmin.
"""
if not 1 >= pmin > 0:
raise ValueError(f"pmin={pmin}: needs to be between 1 and 0")
if name is None:
name = self.parameter_map.name
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if pmin == 1:
if self.kind != 'cluster':
raise ValueError(f"pmin=1 is only a valid mask for threshold-based cluster tests")
mask = self.cluster_map == 0
else:
probability_map = self.compute_probability_map(**sub)
mask = probability_map > pmin
return param_map.mask(mask, name)
@LazyProperty
def probability_map(self):
if self.samples:
return self.compute_probability_map()
else:
return None
@LazyProperty
def _default_plot_obj(self):
if self.samples:
return [[self.parameter_map, self.probability_map]]
else:
return [[self.parameter_map]]
def info_list(self, title="Computation Info"):
"List with information on computation"
l = fmtxt.List(title)
l.add_item("Eelbrain version: %s" % self._version)
l.add_item("Host Computer: %s" % self._host)
if self._init_time is not None:
l.add_item("Created: %s" % datetime.fromtimestamp(self._init_time)
.strftime('%y-%m-%d %H:%M'))
l.add_item("Original time: %s" % timedelta(seconds=round(self.dt_original)))
l.add_item("Permutation time: %s" % timedelta(seconds=round(self.dt_perm)))
return l
class _MergedTemporalClusterDist:
"""Merge permutation distributions from multiple tests"""
def __init__(self, cdists):
if isinstance(cdists[0], list):
self.effects = [d.name for d in cdists[0]]
self.samples = cdists[0][0].samples
dist = {}
for i, effect in enumerate(self.effects):
if any(d[i].n_clusters for d in cdists):
dist[effect] = np.column_stack([d[i].dist for d in cdists if d[i].dist is not None])
if len(dist):
dist = {c: d.max(1) for c, d in dist.items()}
else:
self.samples = cdists[0].samples
if any(d.n_clusters for d in cdists):
dist = np.column_stack([d.dist for d in cdists if d.dist is not None])
dist = dist.max(1)
else:
dist = None
self.dist = dist
def correct_cluster_p(self, res):
clusters = res.find_clusters()
keys = list(clusters.keys())
if not clusters.n_cases:
return clusters
if isinstance(res, MultiEffectNDTest):
keys.insert(-1, 'p_parc')
cluster_p_corr = []
for cl in clusters.itercases():
n_larger = np.sum(self.dist[cl['effect']] > np.abs(cl['v']))
cluster_p_corr.append(float(n_larger) / self.samples)
else:
keys.append('p_parc')
vs = np.array(clusters['v'])
n_larger = np.sum(self.dist > np.abs(vs[:, None]), 1)
cluster_p_corr = n_larger / self.samples
clusters['p_parc'] = Var(cluster_p_corr)
clusters = clusters[keys]
return clusters
def distribution_worker(dist_array, dist_shape, in_queue, kill_beacon):
"Worker that accumulates values and places them into the distribution"
n = reduce(operator.mul, dist_shape)
dist = | np.frombuffer(dist_array, np.float64, n) | numpy.frombuffer |
r"""Compute action detection performance for the AVA dataset.
Please send any questions about this code to the Google Group ava-dataset-users:
https://groups.google.com/forum/#!forum/ava-dataset-users
Example usage:
python -O get_ava_performance.py \
-l ava/ava_action_list_v2.1_for_activitynet_2018.pbtxt.txt \
-g ava_val_v2.1.csv \
-e ava_val_excluded_timestamps_v2.1.csv \
-d your_results.csv
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from collections import defaultdict
import csv
import logging
import pprint
import sys
import time
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from ava import object_detection_evaluation
from ava import standard_fields
def print_time(message, start):
logging.info("==> %g seconds to %s", time.time() - start, message)
def make_image_key(video_id, timestamp):
"""Returns a unique identifier for a video id & timestamp."""
return "%s,%04d" % (video_id, int(timestamp))
def read_csv(csv_file, class_whitelist=None):
"""Loads boxes and class labels from a CSV file in the AVA format.
CSV file format described at https://research.google.com/ava/download.html.
Args:
csv_file: A file object.
class_whitelist: If provided, boxes corresponding to (integer) class labels
not in this set are skipped.
Returns:
boxes: A dictionary mapping each unique image key (string) to a list of
boxes, given as coordinates [y1, x1, y2, x2].
labels: A dictionary mapping each unique image key (string) to a list of
integer class lables, matching the corresponding box in `boxes`.
scores: A dictionary mapping each unique image key (string) to a list of
score values lables, matching the corresponding label in `labels`. If
scores are not provided in the csv, then they will default to 1.0.
"""
start = time.time()
boxes = defaultdict(list)
labels = defaultdict(list)
scores = defaultdict(list)
reader = csv.reader(csv_file)
for row in reader:
assert len(row) in [7, 8], "Wrong number of columns: " + row
image_key = make_image_key(row[0], row[1])
x1, y1, x2, y2 = [float(n) for n in row[2:6]]
action_id = int(row[6])
if class_whitelist and action_id not in class_whitelist:
continue
score = 1.0
if len(row) == 8:
score = float(row[7])
boxes[image_key].append([y1, x1, y2, x2])
labels[image_key].append(action_id)
scores[image_key].append(score)
print_time("read file " + csv_file.name, start)
return boxes, labels, scores
def read_exclusions(exclusions_file):
"""Reads a CSV file of excluded timestamps.
Args:
exclusions_file: A file object containing a csv of video-id,timestamp.
Returns:
A set of strings containing excluded image keys, e.g. "aaaaaaaaaaa,0904",
or an empty set if exclusions file is None.
"""
excluded = set()
if exclusions_file:
reader = csv.reader(exclusions_file)
for row in reader:
assert len(row) == 2, "Expected only 2 columns, got: " + row
excluded.add(make_image_key(row[0], row[1]))
return excluded
def read_labelmap(labelmap_file):
"""Reads a labelmap without the dependency on protocol buffers.
Args:
labelmap_file: A file object containing a label map protocol buffer.
Returns:
labelmap: The label map in the form used by the object_detection_evaluation
module - a list of {"id": integer, "name": classname } dicts.
class_ids: A set containing all of the valid class id integers.
"""
labelmap = []
class_ids = set()
name = ""
class_id = ""
for line in labelmap_file:
if line.startswith(" name:"):
name = line.split('"')[1]
elif line.startswith(" id:") or line.startswith(" label_id:"):
class_id = int(line.strip().split(" ")[-1])
labelmap.append({"id": class_id, "name": name})
class_ids.add(class_id)
return labelmap, class_ids
def split_list(alist, wanted_parts=1):
length = len(alist)
return [alist[i * length // wanted_parts: (i + 1) * length // wanted_parts]
for i in range(wanted_parts)]
def split_interleave(A):
lists = split_list(A, wanted_parts=4)
D = [val for tup in zip(*lists) for val in tup]
return D
def run_evaluation_threshold(labelmap, groundtruth, exclusions, iou):
# sns.palplot(sns.diverging_palette(128, 240, n=10))
# seq_col_brew = sns.color_palette("Blues_r", 4) # For sequential, blue gradient in reverse
# Qualitative data palette
# current_palette = sns.color_palette("Paired")
# sns.set_palette(current_palette)
# Make sure not to mess this up
filters = []
filters.append("0.1")
filters.append("0.2")
filters.append("0.3")
filters.append("0.4")
filters.append("0.5")
filters.append("0.6")
filters.append("0.7")
filters.append("0.8")
filters.append("0.9")
root_dir = '../../../data/AVA/files/'
ftype = "fusion"
all_detections = []
ts = "1809281055"
for f in filters:
all_detections.append(open("../thresholds/context_" + ftype + "/predictions_fusion_avg_fovea_" + ts + "_" + f + ".csv", 'rb'))
all_gndtruths = []
for i in range(len(filters)):
all_gndtruths.append(open(root_dir + "AVA_Val_Custom_Corrected.csv", 'rb'))
#all_gndtruths.append(open("AVA_Test_Custom_Corrected.csv", 'rb'))
#all_gndtruths.append(open("AVA_Test_Custom_Corrected.csv", 'rb'))
"""Runs evaluations given input files.
Args:
labelmap: file object containing map of labels to consider, in pbtxt format
groundtruth: file object
detections: file object
exclusions: file object or None.
"""
categories, class_whitelist = read_labelmap(labelmap)
logging.info("CATEGORIES (%d):\n%s", len(categories),
pprint.pformat(categories, indent=2))
excluded_keys = read_exclusions(exclusions)
# Reads detections data.
x_axis = []
xpose_ax = []
xobj_ax = []
xhuman_ax = []
ypose_ax = []
yobj_ax = []
yhuman_ax = []
colors_pose = []
colors_obj = []
colors_human = []
finalmAPs = []
colors = []
maxY = -1.0
for detections, gndtruth, filter_type in zip(all_detections, all_gndtruths, filters):
pascal_evaluator = None
metrics = None
actions = None
start = 0
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories, matching_iou_threshold=iou)
# Reads the ground truth data.
boxes, labels, _ = read_csv(gndtruth, class_whitelist)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in ground truth: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.InputDataFields.groundtruth_classes:
np.array(labels[image_key], dtype=int),
standard_fields.InputDataFields.groundtruth_difficult:
np.zeros(len(boxes[image_key]), dtype=bool)
})
print_time("convert groundtruth", start)
# Run evaluation
boxes, labels, scores = read_csv(detections, class_whitelist)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in detections: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_detected_image_info(
image_key, {
standard_fields.DetectionResultFields.detection_boxes:
np.array(boxes[image_key], dtype=float),
standard_fields.DetectionResultFields.detection_classes:
np.array(labels[image_key], dtype=int),
standard_fields.DetectionResultFields.detection_scores:
np.array(scores[image_key], dtype=float)
})
print_time("convert detections", start)
start = time.time()
metrics = pascal_evaluator.evaluate()
print_time("run_evaluator", start)
# TODO Show a pretty histogram here besides pprint
actions = list(metrics.keys())
final_value = 0.0
for m in actions:
ms = m.split("/")[-1]
if ms == 'mAP@' + str(iou) + 'IOU':
final_value = metrics[m]
finalmAPs.append(final_value)
else:
# x_axis.append(ms)
# y_axis.append(metrics[m])
for cat in categories:
if cat['name'].split("/")[-1] == ms:
if maxY < metrics[m]:
maxY = metrics[m]
if cat['id'] <= 10:
xpose_ax.append("(" + filter_type + ") " + ms)
ypose_ax.append(metrics[m])
colors_pose.append('red')
elif cat['id'] <= 22:
xobj_ax.append("(" + filter_type + ") " + ms)
yobj_ax.append(metrics[m])
colors_obj.append('blue')
else:
xhuman_ax.append("(" + filter_type + ") " + ms)
yhuman_ax.append(metrics[m])
colors_human.append('green')
# Make a confusion matrix for this run
pascal_evaluator = None
x_axis = split_interleave(xpose_ax) + split_interleave(xobj_ax) + split_interleave(xhuman_ax)
y_axis = split_interleave(ypose_ax) + split_interleave(yobj_ax) + split_interleave(yhuman_ax)
colors = split_interleave(colors_pose) + split_interleave(colors_obj) + split_interleave(colors_human)
print(filters)
print(finalmAPs)
plt.ylabel('frame-mAP')
top = 0.1 # offset a bit so it looks good
sns.set_style("whitegrid")
clrs = ['blue' if (x < max(finalmAPs)) else 'red' for x in finalmAPs]
g = sns.barplot(filters, finalmAPs, palette=clrs)
ax = g
# annotate axis = seaborn axis
for p in ax.patches:
ax.annotate("%.4f" % p.get_height(), (p.get_x() + p.get_width() / 2., p.get_height()),
ha='center', va='center', fontsize=10, color='gray', rotation=90, xytext=(0, 20),
textcoords='offset points')
_ = g.set_ylim(0, top) # To make space for the annotations
plt.show()
def run_evaluation(labelmap, groundtruth, exclusions, iou):
root_dir = '../../../data/AVA/files/'
test_dir = "../test_outputs/"
# Make sure not to mess this up
experiments_filters = {}
experiments_detections = {}
experiments_filters['pose'] = ['Pose']
experiments_detections['pose'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb')]
experiments_filters['rgb-streams-aug'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['rgb-streams-aug'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['flow vs flowcrop'] = ['Flow', 'Flowcrop']
experiments_detections['flow vs flowcrop'] = [open(test_dir + "/flow/output_test_flowcrop.csv", 'rb'), ]
#all_detections.append(open(test_dir + "/flow/output_test_flow.csv", 'rb'))
experiments_filters['two-streams'] = ['Two-Stream-RGB', 'Two-Stream-Crop', 'Two-Stream-Gauss', 'Two-Stream-Fovea']
experiments_detections['two-streams'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['two-streams-aug'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['two-streams-aug'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['mlp vs lstm'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['mlp vs lstm'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['lstmA vs lstmB'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['lstmA vs lstmB'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['context-fusion mlp vs lstm'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['context-fusion mlp vs lstm'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['balancing sampling'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['balancing sampling'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['balancing weights'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['balancing weights'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
experiments_filters['balancing prior'] = ['RGB', 'Crop', 'Gauss', 'Fovea']
experiments_detections['balancing prior'] = [open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'), open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'), open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'), open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb')]
# experiment =
filters = []
# filters.append("pose")
# filters.append("rgb-base")
# filters.append("rgb-prior")
# filters.append("rgb-sampling")
# filters.append("rgb-weights")
# filters.append("rgb-kinetics")
# filters.append("flow-kinetics")
# filters.append("rgb")
# filters.append("crop")
# filters.append("gauss")
# filters.append("fovea")
# filters.append("flowcrop")
# filters.append("flow")
# filters.append("MLP")
#filters.append("best case scenario thresh 0.1")
#filters.append("two pass scenario thresh 0.1")
filters.append("fovea")
filters.append("dense-gt")
#filters.append("sampling no aug")
filters.append("dense-2pass")
#filters.append("weights no aug")
# filters.append("LSTM5-A-512")
# filters.append("random")
# filters.append("LSTM5-B-512")
# filters.append("LSTM10")
# filters.append("2st(rgb)")
# filters.append("2st(crop)")
# filters.append("2st(gauss)")
# filters.append("2st(fovea)")
#filters.append("2st(crop) + flowcrop")
#filters.append("2st(gauss) + flowcrop")
#filters.append("2st(fovea) + flowcrop")
#filters.append("2st(fovea) + mlp")
#filters.append("2st(crop) + mlp")
#filters.append("2st(gauss) + mlp")
# filters.append("2stream")
#filters.append("2stream + lstm (extra pass)")
# filters.append("gauss")
#filters.append("gauss aug")
#filters.append("LSTMA 512 5 2")
#filters.append("LSTMA 512 5 3")
#filters.append("LSTMA 512 5 3")
#filters.append("LSTMA 1024 5 3")
#filters.append("LSTMA 2048 5 3")
#filters.append("LSTMB 512 3 3")
#filters.append("LSTMB 1024 3 3")
#filters.append("LSTMB 2048 3 3")
# filters.append("2st(gauss) + lstm")
all_detections = []
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_1024_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_2048_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_1024_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_2048_3_3.csv", 'rb'))
# Pose
# all_detections.append(open(test_dir + "output_test_flowcrop.csv", 'rb'))
# Balancing
#all_detections.append(open(test_dir + "output_test_flowcrop.csv", 'rb'))
#all_detections.append(open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'))
#all_detections.append(open(test_dir + "/augmentation/predictions_rgb_gauss_1807241628_1000.csv", 'rb'))
#all_detections.append(open(test_dir + "/augmentation/output_test_sampling_gauss_1809221859.csv", 'rb'))
#all_detections.append(open(test_dir + "/augmentation/output_test_weights_gauss_1809221904.csv", 'rb'))
# RGB Streams
#all_detections.append(open(test_dir + "/kinetics_init/output_test_rgb_kineticsinit_gauss_1809220212.csv", 'rb'))
#all_detections.append(open(test_dir + "/kinetics_init/output_test_flow_kineticsinit_1809220244.csv", 'rb'))
# Flow Streams
# Context (LSTMs)
#filters.append("LSTMB 512 3 3")
#filters.append("LSTMB 512 3 2")
#filters.append("LSTMB 512 3 1")
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_1.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_2.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_1.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_2.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_32_3_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_32_5_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_32_10_3.csv", 'rb'))
#all_detections.append(open(test_dir + "context/mlp/output_test_ctx.csv", 'rb'))
#all_detections.append(open(test_dir + "context/mlp/output_test_ctx_mlp_1809212356.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmA/output_test_ctx_lstm_512_5_3_1809220010.csv", 'rb'))
#all_detections.append(open(test_dir + "random/output_test_random_1809221552.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_512_5_3_1809211924.csv", 'rb'))
#all_detections.append(open(test_dir + "context/lstmB/output_test_ctx_lstm_128_10_3_1809211930.csv", 'rb'))
# 6 2-streams + baseline
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_rgb_1809220100.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_crop.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_gauss.csv", 'rb'))
all_detections.append(open(test_dir + "/two-streams/output_test_2stream_fovea.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_crop_1809220117.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_gauss_1809220152.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_fovea_1809220136.csv", 'rb'))
# Context Fusions
# all_detections.append(open(test_dir + "/context_fusion/output_test_3stream_fovea.csv", 'rb'))
# all_detections.append(open(test_dir + "/context_fusion/output_test_3stream_crop.csv", 'rb'))
# all_detections.append(open(test_dir + "/context_fusion/output_test_3stream_gauss.csv", 'rb'))
all_detections.append(open(test_dir + "/context_fusion/output_test_LSTM_FCfusion_contextGT_gauss_1810011737.csv", 'rb'))
all_detections.append(open(test_dir + "/context_fusion/output_test_LSTM_FCfusion_context_secondpass_gauss_1810011754.csv", 'rb'))
# LSTMs
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_rgb_1809220100.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_crop.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_gauss.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_fovea.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstm_fusion_thresh_512_5_3_1809242315.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstm_512_5_3_1809242252.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavggoodpedro_512_5_3_1809242338.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavg_twophase_thresh02_512_5_3_1809281219.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavg_threephase_512_5_3_1809281317.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstm_fusion_thresh01_512_5_3_1809281400.csv", 'rb'))
#all_detections.append(open(test_dir + "/context_fusion/output_test_ctx_lstmavg_twophase_thresh01_512_5_3_1809281423.csv", 'rb'))
#all_detections.append(open(test_dir + "rgb_gauss/output_test_gauss.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_sampling_gauss_1809221859.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_samplingnoaug_gauss_1809281439.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_weightsnew_gauss_1809291104.csv", 'rb'))
#all_detections.append(open(test_dir + "augmentation/output_test_weightsaug_gauss_1809261228.csv", 'rb'))
# output_test_ctx_lstm_512_5_3_1809242252.csv
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_crop_1809220117.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_gauss_1809220152.csv", 'rb'))
#all_detections.append(open(test_dir + "/two-streams/output_test_2stream_flowcrop_fovea_1809220136.csv", 'rb'))
# ---------------------------------
# New run to compare new flow
#all_detections.append(open(test_dir + "/flow/output_test_flowcrop.csv", 'rb'))
#all_detections.append(open(test_dir + "/flow/output_test_flow.csv", 'rb'))
# New 2 and 3 streams
# all_detections.append(open(test_dir + "output_test_gauss.csv", 'rb'))
# all_detections.append(open(test_dir + "output_test_gauss_extra.csv", 'rb'))
# all_detections.append(open(test_dir + "output_test_3stream_gauss.csv", 'rb'))
# all_detections.append(open(test_dir + "output_test_3stream_crop.csv", 'rb'))
# Flow, context, 2-stream, 3-stream run
#all_detections.append(open(test_dir + "output_test_ctx.csv", 'rb'))
#all_detections.append(open(test_dir + "output_test_flow.csv", 'rb'))
#all_detections.append(open(test_dir + "output_test_2stream.csv", 'rb'))
#all_detections.append(open(test_dir + "output_test_3stream.csv", 'rb'))
# RGB run
# all_detections.append(open(test_dir + "/rgb_rgb/output_test_rgb.csv", 'rb'))
# all_detections.append(open(test_dir + "/rgb_crop/output_test_crop.csv", 'rb'))
# all_detections.append(open(test_dir + "/rgb_gauss/output_test_gauss.csv", 'rb'))
# all_detections.append(open(test_dir + "/rgb_fovea/output_test_fovea.csv", 'rb'))
balancing = False
all_gndtruths = []
for i in range(len(all_detections)):
if balancing is False:
all_gndtruths.append(open(root_dir + "AVA_Test_Custom_Corrected.csv", 'rb'))
else:
all_gndtruths.append(open(root_dir + "AVA_Test_Custom_Corrected_Balanced.csv", 'rb'))
"""Runs evaluations given input files.
Args:
labelmap: file object containing map of labels to consider, in pbtxt format
groundtruth: file object
detections: file object
exclusions: file object or None.
"""
categories, class_whitelist = read_labelmap(labelmap)
logging.info("CATEGORIES (%d):\n%s", len(categories), pprint.pformat(categories, indent=2))
excluded_keys = read_exclusions(exclusions)
# Reads detections data.
x_axis = []
xpose_ax = []
xobj_ax = []
xhuman_ax = []
ypose_ax = []
yobj_ax = []
yhuman_ax = []
colors_pose = []
colors_obj = []
colors_human = []
finalmAPs = []
colors = []
maxY = -1.0
for detections, gndtruth, filter_type in zip(all_detections, all_gndtruths, filters):
pascal_evaluator = None
metrics = None
actions = None
start = 0
pascal_evaluator = object_detection_evaluation.PascalDetectionEvaluator(
categories, matching_iou_threshold=iou)
# Reads the ground truth data.
boxes, labels, _ = read_csv(gndtruth, class_whitelist)
start = time.time()
for image_key in boxes:
if image_key in excluded_keys:
logging.info(("Found excluded timestamp in ground truth: %s. "
"It will be ignored."), image_key)
continue
pascal_evaluator.add_single_ground_truth_image_info(
image_key, {
standard_fields.InputDataFields.groundtruth_boxes:
| np.array(boxes[image_key], dtype=float) | numpy.array |
# Copyright (c) 2014, <NAME>.
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
from scipy.special import wofz
from .kern import Kern
from ...core.parameterization import Param
from ...core.parameterization.transformations import Logexp
from ...util.caching import Cache_this
class EQ_ODE2(Kern):
"""
Covariance function for second order differential equation driven by an exponentiated quadratic covariance.
This outputs of this kernel have the form
.. math::
\frac{\text{d}^2y_j(t)}{\text{d}^2t} + C_j\frac{\text{d}y_j(t)}{\text{d}t} + B_jy_j(t) = \sum_{i=1}^R w_{j,i} u_i(t)
where :math:`R` is the rank of the system, :math:`w_{j,i}` is the sensitivity of the :math:`j`th output to the :math:`i`th latent function, :math:`d_j` is the decay rate of the :math:`j`th output and :math:`f_i(t)` and :math:`g_i(t)` are independent latent Gaussian processes goverened by an exponentiated quadratic covariance.
:param output_dim: number of outputs driven by latent function.
:type output_dim: int
:param W: sensitivities of each output to the latent driving function.
:type W: ndarray (output_dim x rank).
:param rank: If rank is greater than 1 then there are assumed to be a total of rank latent forces independently driving the system, each with identical covariance.
:type rank: int
:param C: damper constant for the second order system.
:type C: array of length output_dim.
:param B: spring constant for the second order system.
:type B: array of length output_dim.
"""
#This code will only work for the sparseGP model, due to limitations in models for this kernel
def __init__(self, input_dim=2, output_dim=1, rank=1, W=None, lengthscale=None, C=None, B=None, active_dims=None, name='eq_ode2'):
#input_dim should be 1, but kern._slice_X is not returning index information required to evaluate kernels
assert input_dim == 2, "only defined for 1 input dims"
super(EQ_ODE2, self).__init__(input_dim=input_dim, active_dims=active_dims, name=name)
self.rank = rank
self.output_dim = output_dim
if lengthscale is None:
lengthscale = .5+np.random.rand(self.rank)
else:
lengthscale = np.asarray(lengthscale)
assert lengthscale.size in [1, self.rank], "Bad number of lengthscales"
if lengthscale.size != self.rank:
lengthscale = np.ones(self.input_dim)*lengthscale
if W is None:
#W = 0.5*np.random.randn(self.output_dim, self.rank)/np.sqrt(self.rank)
W = np.ones((self.output_dim, self.rank))
else:
assert W.shape == (self.output_dim, self.rank)
if C is None:
C = np.ones(self.output_dim)
if B is None:
B = np.ones(self.output_dim)
self.C = Param('C', C, Logexp())
self.B = Param('B', B, Logexp())
self.lengthscale = Param('lengthscale', lengthscale, Logexp())
self.W = Param('W', W)
self.link_parameters(self.lengthscale, self.C, self.B, self.W)
@Cache_this(limit=2)
def K(self, X, X2=None):
#This way is not working, indexes are lost after using k._slice_X
#index = np.asarray(X, dtype=np.int)
#index = index.reshape(index.size,)
if hasattr(X, 'values'):
X = X.values
index = np.int_(X[:, 1])
index = index.reshape(index.size,)
X_flag = index[0] >= self.output_dim
if X2 is None:
if X_flag:
#Calculate covariance function for the latent functions
index -= self.output_dim
return self._Kuu(X, index)
else:
raise NotImplementedError
else:
#This way is not working, indexes are lost after using k._slice_X
#index2 = np.asarray(X2, dtype=np.int)
#index2 = index2.reshape(index2.size,)
if hasattr(X2, 'values'):
X2 = X2.values
index2 = np.int_(X2[:, 1])
index2 = index2.reshape(index2.size,)
X2_flag = index2[0] >= self.output_dim
#Calculate cross-covariance function
if not X_flag and X2_flag:
index2 -= self.output_dim
return self._Kfu(X, index, X2, index2) #Kfu
else:
index -= self.output_dim
return self._Kfu(X2, index2, X, index).T #Kuf
#Calculate the covariance function for diag(Kff(X,X))
def Kdiag(self, X):
#This way is not working, indexes are lost after using k._slice_X
#index = np.asarray(X, dtype=np.int)
#index = index.reshape(index.size,)
if hasattr(X, 'values'):
X = X.values
index = np.int_(X[:, 1])
index = index.reshape(index.size,)
#terms that move along t
t = X[:, 0].reshape(X.shape[0], 1)
d = np.unique(index) #Output Indexes
B = self.B.values[d]
C = self.C.values[d]
S = self.W.values[d, :]
#Index transformation
indd = np.arange(self.output_dim)
indd[d] = np.arange(d.size)
index = indd[index]
#Check where wd becomes complex
wbool = C*C >= 4.*B
B = B.reshape(B.size, 1)
C = C.reshape(C.size, 1)
alpha = .5*C
C2 = C*C
wbool2 = wbool[index]
ind2t = np.where(wbool2)
ind3t = np.where(np.logical_not(wbool2))
#Terms that move along q
lq = self.lengthscale.values.reshape(1, self.lengthscale.size)
S2 = S*S
kdiag = np.empty((t.size, ))
indD = np.arange(B.size)
#(1) When wd is real
if np.any(np.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
d = np.asarray(np.where(np.logical_not(wbool))[0]) #Selection of outputs
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(.5*lq)
c0 = S2lq*np.sqrt(np.pi)
w = .5*np.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
w2 = w*w
gam = alphad + 1j*w
gamc = alphad - 1j*w
c1 = .5/(alphad*w2)
c2 = .5/(gam*w2)
c = c1 - c2
#DxQ terms
nu = lq*(gam*.5)
K01 = c0*c
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = np.exp(gamt)
ec = egamt*c2[ind] - np.exp(gamct)*c1[ind]
#NxQ terms
t_lq = t1/lq
# Upsilon Calculations
# Using wofz
wnu = wofz(1j*nu)
lwnu = np.log(wnu)
t2_lq2 = -t_lq*t_lq
upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind]))))
upm[t1[:, 0] == 0, :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = np.where(z1.real >= 0.)
indv2 = np.where(z1.real < 0.)
upv = -np.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.))\
- np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2])))
upv[t1[:, 0] == 0, :] = 0.
#Covariance calculation
kdiag[ind3t] = np.sum(np.real(K01[ind]*upm), axis=1)
kdiag[ind3t] += np.sum(np.real((c0[ind]*ec)*upv), axis=1)
#(2) When w_d is complex
if np.any(wbool):
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = np.asarray(np.where(wbool)[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(lq*.25)
c0 = S2lq*np.sqrt(np.pi)
w = .5*np.sqrt(C2[d] - 4.*B[d])
alphad = alpha[d]
gam = alphad - w
gamc = alphad + w
w2 = -w*w
c1 = .5/(alphad*w2)
c21 = .5/(gam*w2)
c22 = .5/(gamc*w2)
c = c1 - c21
c2 = c1 - c22
#DxQ terms
K011 = c0*c
K012 = c0*c2
nu = lq*(.5*gam)
nuc = lq*(.5*gamc)
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = np.exp(gamt)
egamct = np.exp(gamct)
ec = egamt*c21[ind] - egamct*c1[ind]
ec2 = egamct*c22[ind] - egamt*c1[ind]
#NxQ terms
t_lq = t1/lq
#Upsilon Calculations using wofz
t2_lq2 = -t_lq*t_lq #Required when using wofz
wnu = wofz(1j*nu).real
lwnu = np.log(wnu)
upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind])).real))
upm[t1[:, 0] == 0., :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
upv = -np.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.))\
- np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real))
upv[t1[:, 0] == 0, :] = 0.
wnuc = wofz(1j*nuc).real
lwnuc = np.log(wnuc)
upmc = wnuc[ind] - np.exp(t2_lq2 + gamct + np.log(wofz(1j*(t_lq + nuc[ind])).real))
upmc[t1[:, 0] == 0., :] = 0.
nuc2 = nuc*nuc
z1 = nuc[ind] - t_lq
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
upvc = - np.exp(lwnuc[ind] + gamct)
if indv1[0].shape > 0:
upvc[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
upvc[indv2] += np.exp(nuc2[ind[indv2[0]], indv2[1]] + gamct[indv2[0], 0] + np.log(2.))\
- np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real))
upvc[t1[:, 0] == 0, :] = 0.
#Covariance calculation
kdiag[ind2t] = np.sum(K011[ind]*upm + K012[ind]*upmc + (c0[ind]*ec)*upv + (c0[ind]*ec2)*upvc, axis=1)
return kdiag
def update_gradients_full(self, dL_dK, X, X2 = None):
#index = np.asarray(X, dtype=np.int)
#index = index.reshape(index.size,)
if hasattr(X, 'values'):
X = X.values
self.B.gradient = np.zeros(self.B.shape)
self.C.gradient = np.zeros(self.C.shape)
self.W.gradient = np.zeros(self.W.shape)
self.lengthscale.gradient = np.zeros(self.lengthscale.shape)
index = np.int_(X[:, 1])
index = index.reshape(index.size,)
X_flag = index[0] >= self.output_dim
if X2 is None:
if X_flag: #Kuu or Kmm
index -= self.output_dim
tmp = dL_dK*self._gkuu_lq(X, index)
for q in np.unique(index):
ind = np.where(index == q)
self.lengthscale.gradient[q] = tmp[np.ix_(ind[0], ind[0])].sum()
else:
raise NotImplementedError
else: #Kfu or Knm
#index2 = np.asarray(X2, dtype=np.int)
#index2 = index2.reshape(index2.size,)
if hasattr(X2, 'values'):
X2 = X2.values
index2 = np.int_(X2[:, 1])
index2 = index2.reshape(index2.size,)
X2_flag = index2[0] >= self.output_dim
if not X_flag and X2_flag:
index2 -= self.output_dim
else:
dL_dK = dL_dK.T #so we obtaing dL_Kfu
indtemp = index - self.output_dim
Xtemp = X
X = X2
X2 = Xtemp
index = index2
index2 = indtemp
glq, gSdq, gB, gC = self._gkfu(X, index, X2, index2)
tmp = dL_dK*glq
for q in np.unique(index2):
ind = np.where(index2 == q)
self.lengthscale.gradient[q] = tmp[:, ind].sum()
tmpB = dL_dK*gB
tmpC = dL_dK*gC
tmp = dL_dK*gSdq
for d in np.unique(index):
ind = np.where(index == d)
self.B.gradient[d] = tmpB[ind, :].sum()
self.C.gradient[d] = tmpC[ind, :].sum()
for q in np.unique(index2):
ind2 = np.where(index2 == q)
self.W.gradient[d, q] = tmp[np.ix_(ind[0], ind2[0])].sum()
def update_gradients_diag(self, dL_dKdiag, X):
#index = np.asarray(X, dtype=np.int)
#index = index.reshape(index.size,)
if hasattr(X, 'values'):
X = X.values
self.B.gradient = np.zeros(self.B.shape)
self.C.gradient = np.zeros(self.C.shape)
self.W.gradient = np.zeros(self.W.shape)
self.lengthscale.gradient = np.zeros(self.lengthscale.shape)
index = np.int_(X[:, 1])
index = index.reshape(index.size,)
glq, gS, gB, gC = self._gkdiag(X, index)
tmp = dL_dKdiag.reshape(index.size, 1)*glq
self.lengthscale.gradient = tmp.sum(0)
#TODO: Avoid the reshape by a priori knowing the shape of dL_dKdiag
tmpB = dL_dKdiag*gB.reshape(dL_dKdiag.shape)
tmpC = dL_dKdiag*gC.reshape(dL_dKdiag.shape)
tmp = dL_dKdiag.reshape(index.size, 1)*gS
for d in np.unique(index):
ind = np.where(index == d)
self.B.gradient[d] = tmpB[ind].sum()
self.C.gradient[d] = tmpC[ind].sum()
self.W.gradient[d, :] = tmp[ind].sum(0)
def gradients_X(self, dL_dK, X, X2=None):
#index = np.asarray(X, dtype=np.int)
#index = index.reshape(index.size,)
if hasattr(X, 'values'):
X = X.values
index = np.int_(X[:, 1])
index = index.reshape(index.size,)
X_flag = index[0] >= self.output_dim
#If input_dim == 1, use this
#gX = np.zeros((X.shape[0], 1))
#Cheat to allow gradient for input_dim==2
gX = np.zeros(X.shape)
if X2 is None: #Kuu or Kmm
if X_flag:
index -= self.output_dim
gX[:, 0] = 2.*(dL_dK*self._gkuu_X(X, index)).sum(0)
return gX
else:
raise NotImplementedError
else: #Kuf or Kmn
#index2 = np.asarray(X2, dtype=np.int)
#index2 = index2.reshape(index2.size,)
if hasattr(X2, 'values'):
X2 = X2.values
index2 = np.int_(X2[:, 1])
index2 = index2.reshape(index2.size,)
X2_flag = index2[0] >= self.output_dim
if X_flag and not X2_flag: #gradient of Kuf(Z, X) wrt Z
index -= self.output_dim
gX[:, 0] = (dL_dK*self._gkfu_z(X2, index2, X, index).T).sum(1)
return gX
else:
raise NotImplementedError
#---------------------------------------#
# Helper functions #
#---------------------------------------#
#Evaluation of squared exponential for LFM
def _Kuu(self, X, index):
index = index.reshape(index.size,)
t = X[:, 0].reshape(X.shape[0],)
lq = self.lengthscale.values.reshape(self.rank,)
lq2 = lq*lq
#Covariance matrix initialization
kuu = np.zeros((t.size, t.size))
#Assign 1. to diagonal terms
kuu[np.diag_indices(t.size)] = 1.
#Upper triangular indices
indtri1, indtri2 = np.triu_indices(t.size, 1)
#Block Diagonal indices among Upper Triangular indices
ind = np.where(index[indtri1] == index[indtri2])
indr = indtri1[ind]
indc = indtri2[ind]
r = t[indr] - t[indc]
r2 = r*r
#Calculation of covariance function
kuu[indr, indc] = np.exp(-r2/lq2[index[indr]])
#Completation of lower triangular part
kuu[indc, indr] = kuu[indr, indc]
return kuu
#Evaluation of cross-covariance function
def _Kfu(self, X, index, X2, index2):
#terms that move along t
t = X[:, 0].reshape(X.shape[0], 1)
d = np.unique(index) #Output Indexes
B = self.B.values[d]
C = self.C.values[d]
S = self.W.values[d, :]
#Index transformation
indd = np.arange(self.output_dim)
indd[d] = np.arange(d.size)
index = indd[index]
#Check where wd becomes complex
wbool = C*C >= 4.*B
#Output related variables must be column-wise
C = C.reshape(C.size, 1)
B = B.reshape(B.size, 1)
C2 = C*C
#Input related variables must be row-wise
z = X2[:, 0].reshape(1, X2.shape[0])
lq = self.lengthscale.values.reshape((1, self.rank))
#print np.max(z), np.max(z/lq[0, index2])
alpha = .5*C
wbool2 = wbool[index]
ind2t = np.where(wbool2)
ind3t = np.where(np.logical_not(wbool2))
kfu = np.empty((t.size, z.size))
indD = np.arange(B.size)
#(1) when wd is real
if np.any(np.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
#Index transformation
d = np.asarray(np.where(np.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*np.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
gam = alphad - 1j*w
#DxQ terms
Slq = (S[d]/w)*(.5*lq)
c0 = Slq*np.sqrt(np.pi)
nu = gam*(.5*lq)
#1xM terms
z_lq = z/lq[0, index2]
#NxQ terms
t_lq = t1/lq
#NxM terms
zt_lq = z_lq - t_lq[:, index2]
# Upsilon Calculations
#Using wofz
tz = t1-z
fullind = np.ix_(ind, index2)
zt_lq2 = -zt_lq*zt_lq
z_lq2 = -z_lq*z_lq
gamt = -gam[ind]*t1
upsi = - np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind]))))
z1 = zt_lq + nu[fullind]
indv1 = np.where(z1.real >= 0.)
indv2 = np.where(z1.real < 0.)
if indv1[0].shape > 0:
upsi[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\
- np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2])))
upsi[t1[:, 0] == 0., :] = 0.
#Covariance calculation
kfu[ind3t] = c0[fullind]*upsi.imag
#(2) when wd is complex
if np.any(wbool):
#Indexes of index and t related to (2)
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = np.asarray(np.where(wbool)[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*np.sqrt(C2[d] - 4.*B[d])
alphad = alpha[d]
gam = alphad - w
gamc = alphad + w
#DxQ terms
Slq = S[d]*(lq*.25)
c0 = -Slq*(np.sqrt(np.pi)/w)
nu = gam*(lq*.5)
nuc = gamc*(lq*.5)
#1xM terms
z_lq = z/lq[0, index2]
#NxQ terms
t_lq = t1/lq[0, index2]
#NxM terms
zt_lq = z_lq - t_lq
# Upsilon Calculations
tz = t1-z
z_lq2 = -z_lq*z_lq
zt_lq2 = -zt_lq*zt_lq
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
fullind = np.ix_(ind, index2)
upsi = np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind])).real))\
- np.exp(z_lq2 + gamct + np.log(wofz(1j*(z_lq + nuc[fullind])).real))
z1 = zt_lq + nu[fullind]
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
if indv1[0].shape > 0:
upsi[indv1] -= np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] -= np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\
- np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real))
z1 = zt_lq + nuc[fullind]
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
if indv1[0].shape > 0:
upsi[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
nuac2 = nuc[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += np.exp(nuac2 - gamc[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\
- np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real))
upsi[t1[:, 0] == 0., :] = 0.
kfu[ind2t] = c0[np.ix_(ind, index2)]*upsi
return kfu
#Gradient of Kuu wrt lengthscale
def _gkuu_lq(self, X, index):
t = X[:, 0].reshape(X.shape[0],)
index = index.reshape(X.shape[0],)
lq = self.lengthscale.values.reshape(self.rank,)
lq2 = lq*lq
#Covariance matrix initialization
glq = np.zeros((t.size, t.size))
#Upper triangular indices
indtri1, indtri2 = np.triu_indices(t.size, 1)
#Block Diagonal indices among Upper Triangular indices
ind = np.where(index[indtri1] == index[indtri2])
indr = indtri1[ind]
indc = indtri2[ind]
r = t[indr] - t[indc]
r2 = r*r
r2_lq2 = r2/lq2[index[indr]]
#Calculation of covariance function
er2_lq2 = np.exp(-r2_lq2)
#Gradient wrt lq
c = 2.*r2_lq2/lq[index[indr]]
glq[indr, indc] = er2_lq2*c
#Complete the lower triangular
glq[indc, indr] = glq[indr, indc]
return glq
#Be careful this derivative should be transpose it
def _gkuu_X(self, X, index): #Diagonal terms are always zero
t = X[:, 0].reshape(X.shape[0],)
index = index.reshape(index.size,)
lq = self.lengthscale.values.reshape(self.rank,)
lq2 = lq*lq
#Covariance matrix initialization
gt = np.zeros((t.size, t.size))
#Upper triangular indices
indtri1, indtri2 = np.triu_indices(t.size, 1) #Offset of 1 from the diagonal
#Block Diagonal indices among Upper Triangular indices
ind = np.where(index[indtri1] == index[indtri2])
indr = indtri1[ind]
indc = indtri2[ind]
r = t[indr] - t[indc]
r2 = r*r
r2_lq2 = r2/(-lq2[index[indr]])
#Calculation of covariance function
er2_lq2 = np.exp(r2_lq2)
#Gradient wrt t
c = 2.*r/lq2[index[indr]]
gt[indr, indc] = er2_lq2*c
#Complete the lower triangular
gt[indc, indr] = -gt[indr, indc]
return gt
#Gradients for Diagonal Kff
def _gkdiag(self, X, index):
index = index.reshape(index.size,)
#terms that move along t
d = np.unique(index)
B = self.B[d].values
C = self.C[d].values
S = self.W[d, :].values
#Index transformation
indd = np.arange(self.output_dim)
indd[d] = np.arange(d.size)
index = indd[index]
#Check where wd becomes complex
wbool = C*C >= 4.*B
#Output related variables must be column-wise
t = X[:, 0].reshape(X.shape[0], 1)
B = B.reshape(B.size, 1)
C = C.reshape(C.size, 1)
alpha = .5*C
C2 = C*C
S2 = S*S
wbool2 = wbool[index]
ind2t = np.where(wbool2)
ind3t = np.where(np.logical_not(wbool2))
#Input related variables must be row-wise
lq = self.lengthscale.values.reshape(1, self.rank)
lq2 = lq*lq
gB = np.empty((t.size,))
gC = np.empty((t.size,))
glq = np.empty((t.size, lq.size))
gS = np.empty((t.size, lq.size))
indD = np.arange(B.size)
#(1) When wd is real
if np.any(np.logical_not(wbool)):
#Indexes of index and t related to (1)
t1 = t[ind3t]
ind = index[ind3t]
#Index transformation
d = np.asarray(np.where(np.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(.5*lq)
c0 = S2lq*np.sqrt(np.pi)
w = .5*np.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
alpha2 = alphad*alphad
w2 = w*w
gam = alphad + 1j*w
gam2 = gam*gam
gamc = alphad - 1j*w
c1 = 0.5/alphad
c2 = 0.5/gam
c = c1 - c2
#DxQ terms
c0 = c0/w2
nu = (.5*lq)*gam
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = np.exp(gamt)
egamct = np.exp(gamct)
ec = egamt*c2[ind] - egamct*c1[ind]
#NxQ terms
t_lq = t1/lq
t2_lq2 = -t_lq*t_lq
t_lq2 = t_lq/lq
et2_lq2 = np.exp(t2_lq2)
etlq2gamt = np.exp(t2_lq2 + gamt)
##Upsilon calculations
#Using wofz
wnu = wofz(1j*nu)
lwnu = np.log(wnu)
t2_lq2 = -t_lq*t_lq
upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind]))))
upm[t1[:, 0] == 0, :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = np.where(z1.real >= 0.)
indv2 = np.where(z1.real < 0.)
upv = -np.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.))\
- np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2])))
upv[t1[:, 0] == 0, :] = 0.
#Gradient wrt S
Slq = S[d]*lq #For grad wrt S
c0_S = Slq*np.sqrt(np.pi)/w2
K01 = c0_S*c
gS[ind3t] = np.real(K01[ind]*upm) + np.real((c0_S[ind]*ec)*upv)
#For B and C
upmd = etlq2gamt - 1.
upvd = egamt - et2_lq2
# gradient wrt B
dw_dB = 0.5/w
dgam_dB = 1j*dw_dB
Ba1 = c0*(0.5*dgam_dB/gam2 + (0.5*lq2*gam*dgam_dB - 2.*dw_dB/w)*c)
Ba2_1 = c0*(dgam_dB*(0.5/gam2 - 0.25*lq2) + dw_dB/(w*gam))
Ba2_2 = c0*dgam_dB/gam
Ba3 = c0*(-0.25*lq2*gam*dgam_dB/alphad + dw_dB/(w*alphad))
Ba4_1 = (S2lq*lq)*dgam_dB/w2
Ba4 = Ba4_1*c
gB[ind3t] = np.sum(np.real(Ba1[ind]*upm) - np.real(((Ba2_1[ind] + Ba2_2[ind]*t1)*egamt - Ba3[ind]*egamct)*upv)\
+ np.real(Ba4[ind]*upmd) + np.real((Ba4_1[ind]*ec)*upvd), axis=1)
# gradient wrt C
dw_dC = - alphad*dw_dB
dgam_dC = 0.5 + 1j*dw_dC
Ca1 = c0*(-0.25/alpha2 + 0.5*dgam_dC/gam2 + (0.5*lq2*gam*dgam_dC - 2.*dw_dC/w)*c)
Ca2_1 = c0*(dgam_dC*(0.5/gam2 - 0.25*lq2) + dw_dC/(w*gam))
Ca2_2 = c0*dgam_dC/gam
Ca3_1 = c0*(0.25/alpha2 - 0.25*lq2*gam*dgam_dC/alphad + dw_dC/(w*alphad))
Ca3_2 = 0.5*c0/alphad
Ca4_1 = (S2lq*lq)*dgam_dC/w2
Ca4 = Ca4_1*c
gC[ind3t] = np.sum(np.real(Ca1[ind]*upm) - np.real(((Ca2_1[ind] + Ca2_2[ind]*t1)*egamt - (Ca3_1[ind] + Ca3_2[ind]*t1)*egamct)*upv)\
+ np.real(Ca4[ind]*upmd) + np.real((Ca4_1[ind]*ec)*upvd), axis=1)
#Gradient wrt lengthscale
#DxQ terms
la = (1./lq + nu*gam)*c0
la1 = la*c
c0l = (S2[d]/w2)*lq
la3 = c0l*c
gam_2 = .5*gam
glq[ind3t] = (la1[ind]*upm).real + ((la[ind]*ec)*upv).real\
+ (la3[ind]*(-gam_2[ind] + etlq2gamt*(-t_lq2 + gam_2[ind]))).real\
+ ((c0l[ind]*ec)*(-et2_lq2*(t_lq2 + gam_2[ind]) + egamt*gam_2[ind])).real
#(2) When w_d is complex
if np.any(wbool):
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = np.asarray(np.where(wbool)[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
S2lq = S2[d]*(.25*lq)
c0 = S2lq*np.sqrt(np.pi)
w = .5*np.sqrt(C2[d]-4.*B[d])
w2 = -w*w
alphad = alpha[d]
alpha2 = alphad*alphad
gam = alphad - w
gamc = alphad + w
gam2 = gam*gam
gamc2 = gamc*gamc
c1 = .5/alphad
c21 = .5/gam
c22 = .5/gamc
c = c1 - c21
c2 = c1 - c22
#DxQ terms
c0 = c0/w2
nu = .5*lq*gam
nuc = .5*lq*gamc
#Nx1 terms
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
egamt = np.exp(gamt)
egamct = np.exp(gamct)
ec = egamt*c21[ind] - egamct*c1[ind]
ec2 = egamct*c22[ind] - egamt*c1[ind]
#NxQ terms
t_lq = t1/lq
t2_lq2 = -t_lq*t_lq
et2_lq2 = np.exp(t2_lq2)
etlq2gamct = np.exp(t2_lq2 + gamct)
etlq2gamt = np.exp(t2_lq2 + gamt)
#Upsilon Calculations using wofz
t2_lq2 = -t_lq*t_lq #Required when using wofz
wnu = np.real(wofz(1j*nu))
lwnu = np.log(wnu)
upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind])).real))
upm[t1[:, 0] == 0., :] = 0.
nu2 = nu*nu
z1 = nu[ind] - t_lq
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
upv = -np.exp(lwnu[ind] + gamt)
if indv1[0].shape > 0:
upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.)) - np.exp(t2_lq2[indv2]\
+ np.log(wofz(-1j*z1[indv2]).real))
upv[t1[:, 0] == 0, :] = 0.
wnuc = wofz(1j*nuc).real
upmc = wnuc[ind] - np.exp(t2_lq2 + gamct + np.log(wofz(1j*(t_lq + nuc[ind])).real))
upmc[t1[:, 0] == 0., :] = 0.
lwnuc = np.log(wnuc)
nuc2 = nuc*nuc
z1 = nuc[ind] - t_lq
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
upvc = -np.exp(lwnuc[ind] + gamct)
if indv1[0].shape > 0:
upvc[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
upvc[indv2] += np.exp(nuc2[ind[indv2[0]], indv2[1]] + gamct[indv2[0], 0] + np.log(2.)) - np.exp(t2_lq2[indv2]\
+ np.log(wofz(-1j*z1[indv2]).real))
upvc[t1[:, 0] == 0, :] = 0.
#Gradient wrt S
#NxQ terms
c0_S = (S[d]/w2)*(lq*(np.sqrt(np.pi)*.5))
K011 = c0_S*c
K012 = c0_S*c2
gS[ind2t] = K011[ind]*upm + K012[ind]*upmc + (c0_S[ind]*ec)*upv + (c0_S[ind]*ec2)*upvc
#Is required to cache this, C gradient also required them
upmd = -1. + etlq2gamt
upvd = -et2_lq2 + egamt
upmdc = -1. + etlq2gamct
upvdc = -et2_lq2 + egamct
# Gradient wrt B
dgam_dB = 0.5/w
dgamc_dB = -dgam_dB
Ba1 = c0*(0.5*dgam_dB/gam2 + (0.5*lq2*gam*dgam_dB - 1./w2)*c)
Ba3 = c0*(-0.25*lq2*gam*dgam_dB/alphad + 0.5/(w2*alphad))
Ba4_1 = (S2lq*lq)*dgam_dB/w2
Ba4 = Ba4_1*c
Ba2_1 = c0*(dgam_dB*(0.5/gam2 - 0.25*lq2) + 0.5/(w2*gam))
Ba2_2 = c0*dgam_dB/gam
Ba1c = c0*(0.5*dgamc_dB/gamc2 + (0.5*lq2*gamc*dgamc_dB - 1./w2)*c2)
Ba3c = c0*(-0.25*lq2*gamc*dgamc_dB/alphad + 0.5/(w2*alphad))
Ba4_1c = (S2lq*lq)*dgamc_dB/w2
Ba4c = Ba4_1c*c2
Ba2_1c = c0*(dgamc_dB*(0.5/gamc2 - 0.25*lq2) + 0.5/(w2*gamc))
Ba2_2c = c0*dgamc_dB/gamc
gB[ind2t] = np.sum(Ba1[ind]*upm - ((Ba2_1[ind] + Ba2_2[ind]*t1)*egamt - Ba3[ind]*egamct)*upv\
+ Ba4[ind]*upmd + (Ba4_1[ind]*ec)*upvd\
+ Ba1c[ind]*upmc - ((Ba2_1c[ind] + Ba2_2c[ind]*t1)*egamct - Ba3c[ind]*egamt)*upvc\
+ Ba4c[ind]*upmdc + (Ba4_1c[ind]*ec2)*upvdc, axis=1)
##Gradient wrt C
dw_dC = 0.5*alphad/w
dgam_dC = 0.5 - dw_dC
dgamc_dC = 0.5 + dw_dC
S2lq2 = S2lq*lq
Ca1 = c0*(-0.25/alpha2 + 0.5*dgam_dC/gam2 + (0.5*lq2*gam*dgam_dC + alphad/w2)*c)
Ca2_1 = c0*(dgam_dC*(0.5/gam2 - 0.25*lq2) - 0.5*alphad/(w2*gam))
Ca2_2 = c0*dgam_dC/gam
Ca3_1 = c0*(0.25/alpha2 - 0.25*lq2*gam*dgam_dC/alphad - 0.5/w2)
Ca3_2 = 0.5*c0/alphad
Ca4_1 = S2lq2*(dgam_dC/w2)
Ca4 = Ca4_1*c
Ca1c = c0*(-0.25/alpha2 + 0.5*dgamc_dC/gamc2 + (0.5*lq2*gamc*dgamc_dC + alphad/w2)*c2)
Ca2_1c = c0*(dgamc_dC*(0.5/gamc2 - 0.25*lq2) - 0.5*alphad/(w2*gamc))
Ca2_2c = c0*dgamc_dC/gamc
Ca3_1c = c0*(0.25/alpha2 - 0.25*lq2*gamc*dgamc_dC/alphad - 0.5/w2)
Ca3_2c = 0.5*c0/alphad
Ca4_1c = S2lq2*(dgamc_dC/w2)
Ca4c = Ca4_1c*c2
gC[ind2t] = np.sum(Ca1[ind]*upm - ((Ca2_1[ind] + Ca2_2[ind]*t1)*egamt - (Ca3_1[ind] + Ca3_2[ind]*t1)*egamct)*upv\
+ Ca4[ind]*upmd + (Ca4_1[ind]*ec)*upvd\
+ Ca1c[ind]*upmc - ((Ca2_1c[ind] + Ca2_2c[ind]*t1)*egamct - (Ca3_1c[ind] + Ca3_2c[ind]*t1)*egamt)*upvc\
+ Ca4c[ind]*upmdc + (Ca4_1c[ind]*ec2)*upvdc, axis=1)
#Gradient wrt lengthscale
#DxQ terms
la = (1./lq + nu*gam)*c0
lac = (1./lq + nuc*gamc)*c0
la1 = la*c
la1c = lac*c2
t_lq2 = t_lq/lq
c0l = (S2[d]/w2)*(.5*lq)
la3 = c0l*c
la3c = c0l*c2
gam_2 = .5*gam
gamc_2 = .5*gamc
glq[ind2t] = la1c[ind]*upmc + (lac[ind]*ec2)*upvc\
+ la3c[ind]*(-gamc_2[ind] + etlq2gamct*(-t_lq2 + gamc_2[ind]))\
+ (c0l[ind]*ec2)*(-et2_lq2*(t_lq2 + gamc_2[ind]) + egamct*gamc_2[ind])\
+ la1[ind]*upm + (la[ind]*ec)*upv\
+ la3[ind]*(-gam_2[ind] + etlq2gamt*(-t_lq2 + gam_2[ind]))\
+ (c0l[ind]*ec)*(-et2_lq2*(t_lq2 + gam_2[ind]) + egamt*gam_2[ind])
return glq, gS, gB, gC
def _gkfu(self, X, index, Z, index2):
index = index.reshape(index.size,)
#TODO: reduce memory usage
#terms that move along t
d = np.unique(index)
B = self.B[d].values
C = self.C[d].values
S = self.W[d, :].values
#Index transformation
indd = np.arange(self.output_dim)
indd[d] = np.arange(d.size)
index = indd[index]
#Check where wd becomes complex
wbool = C*C >= 4.*B
#t column
t = X[:, 0].reshape(X.shape[0], 1)
C = C.reshape(C.size, 1)
B = B.reshape(B.size, 1)
C2 = C*C
#z row
z = Z[:, 0].reshape(1, Z.shape[0])
index2 = index2.reshape(index2.size,)
lq = self.lengthscale.values.reshape((1, self.rank))
lq2 = lq*lq
alpha = .5*C
wbool2 = wbool[index]
ind2t = np.where(wbool2)
ind3t = np.where(np.logical_not(wbool2))
#kfu = np.empty((t.size, z.size))
glq = np.empty((t.size, z.size))
gSdq = np.empty((t.size, z.size))
gB = np.empty((t.size, z.size))
gC = np.empty((t.size, z.size))
indD = np.arange(B.size)
#(1) when wd is real
if np.any(np.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
#Index transformation
d = np.asarray(np.where(np.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*np.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
gam = alphad - 1j*w
gam_2 = .5*gam
S_w = S[d]/w
S_wpi = S_w*(.5*np.sqrt(np.pi))
#DxQ terms
c0 = S_wpi*lq #lq*Sdq*sqrt(pi)/(2w)
nu = gam*lq
nu2 = 1.+.5*(nu*nu)
nu *= .5
#1xM terms
z_lq = z/lq[0, index2]
z_lq2 = -z_lq*z_lq
#NxQ terms
t_lq = t1/lq
#DxM terms
gamt = -gam[ind]*t1
#NxM terms
zt_lq = z_lq - t_lq[:, index2]
zt_lq2 = -zt_lq*zt_lq
ezt_lq2 = -np.exp(zt_lq2)
ezgamt = np.exp(z_lq2 + gamt)
# Upsilon calculations
fullind = np.ix_(ind, index2)
upsi = - np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind]))))
tz = t1-z
z1 = zt_lq + nu[fullind]
indv1 = np.where(z1.real >= 0.)
indv2 = np.where(z1.real < 0.)
if indv1[0].shape > 0:
upsi[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\
- np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2])))
upsi[t1[:, 0] == 0., :] = 0.
#Gradient wrt S
#DxQ term
Sa1 = lq*(.5*np.sqrt(np.pi))/w
gSdq[ind3t] = Sa1[np.ix_(ind, index2)]*upsi.imag
#Gradient wrt lq
la1 = S_wpi*nu2
la2 = S_w*lq
uplq = ezt_lq2*(gam_2[ind])
uplq += ezgamt*(-z_lq/lq[0, index2] + gam_2[ind])
glq[ind3t] = (la1[np.ix_(ind, index2)]*upsi).imag
glq[ind3t] += la2[np.ix_(ind, index2)]*uplq.imag
#Gradient wrt B
#Dx1 terms
dw_dB = .5/w
dgam_dB = -1j*dw_dB
#DxQ terms
Ba1 = -c0*dw_dB/w #DXQ
Ba2 = c0*dgam_dB #DxQ
Ba3 = lq2*gam_2 #DxQ
Ba4 = (dgam_dB*S_w)*(.5*lq2) #DxQ
gB[ind3t] = ((Ba1[np.ix_(ind, index2)] + Ba2[np.ix_(ind, index2)]*(Ba3[np.ix_(ind, index2)] - (t1-z)))*upsi).imag\
+ (Ba4[np.ix_(ind, index2)]*(ezt_lq2 + ezgamt)).imag
#Gradient wrt C (it uses some calculations performed in B)
#Dx1 terms
dw_dC = -.5*alphad/w
dgam_dC = 0.5 - 1j*dw_dC
#DxQ terms
Ca1 = -c0*dw_dC/w #DXQ
Ca2 = c0*dgam_dC #DxQ
Ca4 = (dgam_dC*S_w)*(.5*lq2) #DxQ
gC[ind3t] = ((Ca1[np.ix_(ind, index2)] + Ca2[np.ix_(ind, index2)]*(Ba3[np.ix_(ind, index2)] - (t1-z)))*upsi).imag\
+ (Ca4[np.ix_(ind, index2)]*(ezt_lq2 + ezgamt)).imag
#(2) when wd is complex
if np.any(wbool):
#Indexes of index and t related to (2)
t1 = t[ind2t]
ind = index[ind2t]
#Index transformation
d = np.asarray(np.where(wbool)[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*np.sqrt(C2[d] - 4.*B[d])
w2 = w*w
alphad = alpha[d]
gam = alphad - w
gamc = alphad + w
#DxQ terms
S_w= -S[d]/w #minus is given by j*j
S_wpi = S_w*(.25*np.sqrt(np.pi))
c0 = S_wpi*lq
gam_2 = .5*gam
gamc_2 = .5*gamc
nu = gam*lq
nuc = gamc*lq
nu2 = 1.+.5*(nu*nu)
nuc2 = 1.+.5*(nuc*nuc)
nu *= .5
nuc *= .5
#1xM terms
z_lq = z/lq[0, index2]
z_lq2 = -z_lq*z_lq
#Nx1
gamt = -gam[ind]*t1
gamct = -gamc[ind]*t1
#NxQ terms
t_lq = t1/lq[0, index2]
#NxM terms
zt_lq = z_lq - t_lq
zt_lq2 = -zt_lq*zt_lq
ezt_lq2 = -np.exp(zt_lq2)
ezgamt = np.exp(z_lq2 + gamt)
ezgamct = np.exp(z_lq2 + gamct)
# Upsilon calculations
fullind = np.ix_(ind, index2)
upsi1 = - np.exp(z_lq2 + gamct + np.log(wofz(1j*(z_lq + nuc[fullind])).real))
tz = t1-z
z1 = zt_lq + nuc[fullind]
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
if indv1[0].shape > 0:
upsi1[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
nuac2 = nuc[ind[indv2[0]], index2[indv2[1]]]**2
upsi1[indv2] += np.exp(nuac2 - gamc[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\
- np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real))
upsi1[t1[:, 0] == 0., :] = 0.
upsi2 = - np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind])).real))
z1 = zt_lq + nu[fullind]
indv1 = np.where(z1 >= 0.)
indv2 = np.where(z1 < 0.)
if indv1[0].shape > 0:
upsi2[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi2[indv2] += np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\
- np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real))
upsi2[t1[:, 0] == 0., :] = 0.
#Gradient wrt lq
la1 = S_wpi*nu2
la1c = S_wpi*nuc2
la2 = S_w*(.5*lq)
uplq = ezt_lq2*(gamc_2[ind]) + ezgamct*(-z_lq/lq[0, index2] + gamc_2[ind])\
- ezt_lq2*(gam_2[ind]) - ezgamt*(-z_lq/lq[0, index2] + gam_2[ind])
glq[ind2t] = la1c[np.ix_(ind, index2)]*upsi1 - la1[np.ix_(ind, index2)]*upsi2\
+ la2[np.ix_(ind, index2)]*uplq
#Gradient wrt S
Sa1 = (lq*(-.25*np.sqrt(np.pi)))/w
gSdq[ind2t] = Sa1[np.ix_(ind, index2)]*(upsi1 - upsi2)
#Gradient wrt B
#Dx1 terms
dgam_dB = .5/w
dgamc_dB = -dgam_dB
#DxQ terms
Ba1 = .5*(c0/w2)
Ba2 = c0*dgam_dB
Ba3 = lq2*gam_2
Ba4 = (dgam_dB*S_w)*(.25*lq2)
Ba2c = c0*dgamc_dB
Ba3c = lq2*gamc_2
Ba4c = (dgamc_dB*S_w)*(.25*lq2)
gB[ind2t] = (Ba1[np.ix_(ind, index2)] + Ba2c[np.ix_(ind, index2)]*(Ba3c[np.ix_(ind, index2)] - (t1-z)))*upsi1\
+ Ba4c[np.ix_(ind, index2)]*(ezt_lq2 + ezgamct)\
- (Ba1[np.ix_(ind, index2)] + Ba2[np.ix_(ind, index2)]*(Ba3[np.ix_(ind, index2)] - (t1-z)))*upsi2\
- Ba4[np.ix_(ind, index2)]*(ezt_lq2 + ezgamt)
#Gradient wrt C
#Dx1 terms
dgam_dC = 0.5 - .5*(alphad/w)
dgamc_dC = 0.5 + .5*(alphad/w)
#DxQ terms
Ca1 = -c0*(.5*alphad/w2)
Ca2 = c0*dgam_dC
Ca4 = (dgam_dC*S_w)*(.25*lq2)
Ca2c = c0*dgamc_dC
Ca4c = (dgamc_dC*S_w)*(.25*lq2)
gC[ind2t] = (Ca1[np.ix_(ind, index2)] + Ca2c[np.ix_(ind, index2)]*(Ba3c[np.ix_(ind, index2)] - (t1-z)))*upsi1\
+ Ca4c[np.ix_(ind, index2)]*(ezt_lq2 + ezgamct)\
- (Ca1[np.ix_(ind, index2)] + Ca2[np.ix_(ind, index2)]*(Ba3[np.ix_(ind, index2)] - (t1-z)))*upsi2\
- Ca4[np.ix_(ind, index2)]*(ezt_lq2 + ezgamt)
return glq, gSdq, gB, gC
#TODO: reduce memory usage
def _gkfu_z(self, X, index, Z, index2): #Kfu(t,z)
index = index.reshape(index.size,)
#terms that move along t
d = np.unique(index)
B = self.B[d].values
C = self.C[d].values
S = self.W[d, :].values
#Index transformation
indd = np.arange(self.output_dim)
indd[d] = np.arange(d.size)
index = indd[index]
#Check where wd becomes complex
wbool = C*C >= 4.*B
wbool2 = wbool[index]
ind2t = np.where(wbool2)
ind3t = np.where(np.logical_not(wbool2))
#t column
t = X[:, 0].reshape(X.shape[0], 1)
C = C.reshape(C.size, 1)
B = B.reshape(B.size, 1)
C2 = C*C
alpha = .5*C
#z row
z = Z[:, 0].reshape(1, Z.shape[0])
index2 = index2.reshape(index2.size,)
lq = self.lengthscale.values.reshape((1, self.rank))
#kfu = np.empty((t.size, z.size))
gz = np.empty((t.size, z.size))
indD = np.arange(B.size)
#(1) when wd is real
if np.any(np.logical_not(wbool)):
#Indexes of index and t related to (2)
t1 = t[ind3t]
ind = index[ind3t]
#TODO: Find a better way of doing this
#Index transformation
d = np.asarray(np.where(np.logical_not(wbool))[0])
indd = indD.copy()
indd[d] = np.arange(d.size)
ind = indd[ind]
#Dx1 terms
w = .5*np.sqrt(4.*B[d] - C2[d])
alphad = alpha[d]
gam = alphad - 1j*w
S_w = S[d]/w
S_wpi =S_w*(.5*np.sqrt(np.pi))
#DxQ terms
c0 = S_wpi*lq #lq*Sdq*sqrt(pi)/(2w)
nu = (.5*gam)*lq
#1xM terms
z_lq = z/lq[0, index2]
z_lq2 = -z_lq*z_lq
#NxQ terms
t_lq = t1/lq
#DxM terms
gamt = -gam[ind]*t1
#NxM terms
zt_lq = z_lq - t_lq[:, index2]
zt_lq2 = -zt_lq*zt_lq
#ezt_lq2 = -np.exp(zt_lq2)
ezgamt = np.exp(z_lq2 + gamt)
# Upsilon calculations
fullind = np.ix_(ind, index2)
upsi = - np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind]))))
tz = t1-z
z1 = zt_lq + nu[fullind]
indv1 = np.where(z1.real >= 0.)
indv2 = np.where(z1.real < 0.)
if indv1[0].shape > 0:
upsi[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1])))
if indv2[0].shape > 0:
nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2
upsi[indv2] += np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\
- np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2])))
upsi[t1[:, 0] == 0., :] = 0.
#Gradient wrt z
za1 = c0*gam
#za2 = S_w
gz[ind3t] = (za1[np.ix_(ind, index2)]*upsi).imag + S_w[np.ix_(ind, index2)]*ezgamt.imag
#(2) when wd is complex
if | np.any(wbool) | numpy.any |
import numpy as np
import argparse
# TODO: need to be changed when changing dataset
parser = argparse.ArgumentParser()
parser.add_argument("--label_count", default='../lookup/label_frequency.npy', type=str)
parser.add_argument("--lookup_table_loc", default='../lookup/b_500/', type=str)
parser.add_argument('--num_hash_table', type=int, default=4, help="number of hash tables")
parser.add_argument('--R', type=int, default=500, help="hash table size")
parser.add_argument('--seed', type=int, default=101, help='random seed (default: 101)')
args = parser.parse_args()
nhash = args.num_hash_table
R = args.R
label_count = np.load(args.label_count)
lookup_tables = []
for i in range(nhash):
lookup_tables += [np.load(args.lookup_table_loc+'bucket_order_'+str(i)+'.npy')]
lookup_tables = np.stack(lookup_tables)
bucket_count = np.zeros((nhash, R))
for i in range(R):
for j in range(nhash):
bucket_count[j, i] = | np.sum(label_count[lookup_tables[j]==i]) | numpy.sum |
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from matplotlib import rc
from data_management import load_dataset
from find_adversarial import err_measure_l2, grid_attack
# ----- load configuration -----
import config # isort:skip
import config_robustness as cfg_rob # isort:skip
from config_robustness import methods # isort:skip
# ------ general setup ----------
device = cfg_rob.device
save_path = os.path.join(config.RESULTS_PATH, "attacks")
save_results = os.path.join(save_path, "example_S6_adv.pkl")
do_plot = True
save_plot = True
# ----- data prep -----
X_test, C_test, Y_test = [
tmp.unsqueeze(-2).to(device)
for tmp in load_dataset(config.set_params["path"], subset="test")
]
# ----- attack setup -----
# select samples
sample = 6
it_init = 200
keep_init = 100
# select range relative noise
noise_min = 1e-3
noise_max = 0.06
noise_steps = 50
noise_rel_grid = torch.tensor(
np.logspace(np.log10(noise_min), | np.log10(noise_max) | numpy.log10 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: <NAME> (<EMAIL>)
# @Filename: tiledb.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import itertools
import astropy
import cycler
import matplotlib.pyplot as plt
import numpy
from matplotlib import animation
import lvmsurveysim.target
from lvmsurveysim.schedule.tiledb import TileDB
from lvmsurveysim.schedule.scheduler import Scheduler
from lvmsurveysim.schedule.plan import ObservingPlan
from lvmsurveysim import IFU, config
from lvmsurveysim.schedule.plan import ObservingPlan
from lvmsurveysim.utils.plot import __MOLLWEIDE_ORIGIN__, get_axes, transform_patch_mollweide, convert_to_mollweide
numpy.seterr(invalid='raise')
__all__ = ['Simulator']
class Simulator(object):
"""Simulates an observing schedule for a list of targets (tile database) following and observing plan.
Parameters
----------
tiledb : ~lvmsurveysim.schedule.tiledb.TileDB
The `~lvmsurveysim.schedule.tiledb.TileDB` instance with the table of
tiles to schedule.
observing_plan : l`.ObservingPlan` or None
The `.ObservingPlan` to use (one for each observatory).
If `None`, it will be created from the ``observing_plan``
section in the configuration file. Contains dates and sun/moon data for the
duration of the survey as well as Observatory data.
ifu : ~lvmsurveysim.ifu.IFU
The `~lvmsurveysim.ifu.IFU` to use. Defaults to the one from the
configuration file. Used only for plotting the survey footprint.
Attributes
----------
tiledb : ~lvmsurveysim.schedule.tiledb.TileDB
Instance of the tile database to observe.
schedule : ~astropy.table.Table
An astropy table with the results of the scheduling. Includes
information about the JD of each observation, the target observed,
the index of the pointing in the target tiling, coordinates, etc.
"""
def __init__(self, tiledb, observing_plan=None, ifu=None):
assert isinstance(tiledb, lvmsurveysim.schedule.tiledb.TileDB), \
'tiledb must be a lvmsurveysim.schedule.tiledb.TileDB instances.'
# get rid of the special tiles, we do not need them for the simulator
tdb = tiledb.tile_table
tiledb.tile_table = tdb[numpy.where(tdb['TileID'] >= tiledb.tileid_start)[0]]
if observing_plan is None:
observing_plan = self._create_observing_plan()
assert isinstance(observing_plan, ObservingPlan), 'observing_plan is not an instance of ObservingPlan.'
self.zenith_avoidance = config['scheduler']['zenith_avoidance']
self.time_step = config['scheduler']['timestep']
self.observing_plan = observing_plan
self.tiledb = tiledb
self.targets = tiledb.targets
self.ifu = ifu or IFU.from_config()
self.schedule = None
def __repr__(self):
return (f'<Simulator (observing_plan={self.observing_plan.observatory.name}, '
f'tiles={len(self.tiledb.tile_table)})>')
def save(self, path, overwrite=False):
"""
Saves the results of the scheduling simulation to a FITS file.
"""
assert isinstance(self.schedule, astropy.table.Table), \
'cannot save empty schedule. Execute Scheduler.run() first.'
targfile = str(self.targets.filename) if self.targets.filename != None else 'NA'
self.schedule.meta['targfile'] = targfile
self.schedule.write(path+'.fits', format='fits', overwrite=overwrite)
@classmethod
def load(cls, path, tiledb=None, observing_plan=None):
"""Creates a new instance from a schedule file.
Parameters
----------
path : str or ~pathlib.Path
The path to the schedule file and the basename, no extension. The
routine expects to find path.fits and path.npy
tiledb : ~lvmsurveysim.schedule.tiledb.TileDB or path-like
Instance of the tile database to observe.
observing_plan : `.ObservingPlan` or None
The `.ObservingPlan` to use (one for each observatory).
"""
schedule = astropy.table.Table.read(path+'.fits')
if not isinstance(tiledb, lvmsurveysim.schedule.tiledb.TileDB):
assert tiledb != None and tiledb != 'NA', \
'invalid or unavailable tiledb file path.'
tiledb = TileDB.load(tiledb)
observing_plan = observing_plan or []
sim = cls(tiledb, observing_plan=observing_plan)
sim.schedule = schedule
return sim
def run(self, progress_bar=True):
"""Schedules the pointings for the whole survey defined
in the observing plan.
Parameters
----------
progress_bar : bool
If `True`, shows a progress bar.
"""
# Make self.schedule a list so that we can add rows. Later we'll make
# this an Astropy Table.
self.schedule = []
plan = self.observing_plan
# Instance of the Scheduler
scheduler = Scheduler(plan)
# observed exposure time for each pointing
observed = numpy.zeros(len(self.tiledb.tile_table), dtype=numpy.float)
# range of dates for the survey
min_date = numpy.min(plan['JD'])
max_date = numpy.max(plan['JD'])
dates = range(min_date, max_date + 1)
if progress_bar:
generator = astropy.utils.console.ProgressBar(dates)
else:
generator = dates
for jd in generator:
if progress_bar is False:
print(f'scheduling JD={jd}.')
# Skips JDs not found in the plan or those that don't have good weather.
if jd not in plan['JD'] or plan[plan['JD'] == jd]['is_clear'][0] == 0:
continue
observed += self.schedule_one_night(jd, scheduler, observed)
# Convert schedule to Astropy Table.
self.schedule = astropy.table.Table(
rows=self.schedule,
names=['JD', 'observatory', 'target', 'group', 'tileid', 'index', 'ra', 'dec', 'pa',
'airmass', 'lunation', 'shadow_height', "moon_dist", 'lst', 'exptime', 'totaltime'],
dtype=[float, 'S10', 'S20', 'S20', int, int, float, float, float,
float, float, float, float, float, float, float])
def schedule_one_night(self, jd, scheduler, observed):
"""Schedules a single night at a single observatory.
This method is not intended to be called directly. Instead, use `.run`.
Parameters
----------
jd : int
The Julian Date to schedule. Must be included in ``plan``.
scheduler : .Scheduler
The Scheduler instance that will determine the observing sequence.
observed : ~numpy.array
An array of the length of the tiledb that records the observing time
accumulated on each tile thus far
Returns
-------
exposure_times : `~numpy.ndarray`
Array with the exposure times in seconds added to each tile during
this night.
"""
# initialize the scheduler for the night
scheduler.prepare_for_night(jd, self.observing_plan, self.tiledb)
# shortcut
tdb = self.tiledb.tile_table
# begin at twilight
current_jd = scheduler.evening_twi
# While the current time is before morning twilight ...
while current_jd < scheduler.morning_twi:
# obtain the next tile to observe
observed_idx, current_lst, hz, alt, lunation = scheduler.get_optimal_tile(current_jd, observed)
if observed_idx == -1:
# nothing available
self._record_observation(current_jd, self.observing_plan.observatory,
lst=current_lst,
exptime=self.time_step,
totaltime=self.time_step)
current_jd += (self.time_step) / 86400.0
continue
# observe it, give it one quantum of exposure
exptime = tdb['VisitExptime'].data[observed_idx]
observed[observed_idx] += exptime
# collect observation data to put in table
tileid_observed = tdb['TileID'].data[observed_idx]
target_index = tdb['TargetIndex'].data[observed_idx]
target_name = self.targets[target_index].name
groups = self.targets[target_index].groups
target_group = groups[0] if groups else 'None'
target_overhead = self.targets[target_index].overhead
# Get the index of the first value in index_to_target that matches
# the index of the target.
target_index_first = numpy.nonzero(tdb['TargetIndex'].data == target_index)[0][0]
# Get the index of the pointing within its target.
pointing_index = observed_idx - target_index_first
# Record angular distance to moon
dist_to_moon = scheduler.moon_to_pointings[observed_idx]
# Update the table with the schedule.
airmass = 1.0 / numpy.cos(numpy.radians(90.0 - alt))
self._record_observation(current_jd, self.observing_plan.observatory,
target_name=target_name,
target_group=target_group,
tileid = tileid_observed,
pointing_index=pointing_index,
ra=tdb['RA'].data[observed_idx],
dec=tdb['DEC'].data[observed_idx],
pa=tdb['PA'].data[observed_idx],
airmass=airmass,
lunation=lunation,
shadow_height= hz, #hz[valid_priority_idx[obs_tile_idx]],
dist_to_moon=dist_to_moon,
lst=current_lst,
exptime=exptime,
totaltime=exptime * target_overhead)
current_jd += exptime * target_overhead / 86400.0
return observed
def animate_survey(self, filename='lvm_survey.mp4', step=100,
observatory=None, projection='mollweide'):
"""Create an animation of the survey progress and save as an mp4 file.
Parameters
----------
filename : str
Name of the mp4 file, defaults to ``'lvm_survey.mp4'``.
step : int
Number of observations per frame of movie.
observatory : str
Either ``'LCO'`` or ``'APO'`` or `None` (plots both).
projection : str
Which projection of the sphere to use. Defaults to Mollweide.
"""
data = self.schedule[self.schedule['target'] != '-']
if observatory:
data = data[data['observatory'] == observatory]
ll = int(len(data) / step)
x,y = convert_to_mollweide(data['ra'], data['dec'])
tt = [target.name for target in self.targets]
g = numpy.array([tt.index(i) for i in data['target']], dtype=float)
t = data['JD']
fig, ax = get_axes(projection=projection)
# scat = ax.scatter(x[:1], y[:1], c=g[:1], s=1, edgecolor=None, edgecolors=None)
scat = ax.scatter(x, y, c=g % 19, s=0.05, edgecolor=None, edgecolors=None, cmap='tab20')
# fig.show()
# return
def animate(ii):
if ii % 10 == 0:
print('%.1f %% done\r' % (ii / ll * 100))
scat.set_offsets(numpy.stack((x[:ii * step], y[:ii * step]), axis=0).T)
scat.set_array(g[:ii * step])
ax.set_title(str(t[ii]))
return scat,
anim = animation.FuncAnimation(fig, animate, frames=range(1, ll), interval=1,
blit=True, repeat=False)
anim.save(filename, fps=24, extra_args=['-vcodec', 'libx264'])
def plot(self, observatory=None, projection='mollweide', tname=None, fast=False, annotate=False, edge=False):
"""Plots the observed pointings.
Parameters
----------
observatory : str
Plot only the points for that observatory. Otherwise, plots all
the pointings.
projection : str
The projection to use, either ``'mollweide'`` or ``'rectangular'``.
tname : str
Select only a particular target name to plot
fast : bool
Plot IFU sized and shaped patches if `False`. This is the default.
Allows accurate zooming and viewing. If `True`, plot scatter-plot
dots instead of IFUs, for speed sacrificing accuracy.
This is MUCH faster.
annotate : bool
Write the targets' names next to the target coordinates. Implies
``fast=True``.
edge : bool
Draw tile edges and make tiles partly transparent to better judge overlap.
Makes zoomed-out view look odd, so use default False.
Returns
-------
figure : `matplotlib.figure.Figure`
The figure with the plot.
"""
if annotate is True:
fast = True
color_cycler = cycler.cycler(bgcolor=['b', 'r', 'g', 'y', 'm', 'c', 'k'])
fig, ax = get_axes(projection=projection)
if tname != None:
data = self.schedule[self.schedule['target'] == tname]
else:
data = self.schedule[self.schedule['target'] != '-']
if observatory:
data = data[data['observatory'] == observatory]
if fast is True:
if projection == 'mollweide':
x,y = convert_to_mollweide(data['ra'], data['dec'])
else:
x,y = data['ra'], data['dec']
tt = [target.name for target in self.targets]
g = numpy.array([tt.index(i) for i in data['target']], dtype=float)
ax.scatter(x, y, c=g % 19, s=0.05, edgecolor=None, edgecolors=None, cmap='tab20')
if annotate is True:
_, text_indices = numpy.unique(g, return_index=True)
for i in range(len(tt)):
plt.text(x[text_indices[i]], y[text_indices[i]], tt[i], fontsize=9)
else:
for ii, sty in zip(range(len(self.targets)), itertools.cycle(color_cycler)):
target = self.targets[ii]
name = target.name
target_data = data[data['target'] == name]
if edge:
patches = [self.ifu.get_patch(scale=target.telescope.plate_scale, centre=[p['ra'], p['dec']], pa=p['pa'],
edgecolor='k', linewidth=1, alpha=0.5, facecolor=sty['bgcolor'])[0]
for p in target_data]
else:
patches = [self.ifu.get_patch(scale=target.telescope.plate_scale, centre=[p['ra'], p['dec']], pa=p['pa'],
edgecolor='None', linewidth=0.0, facecolor=sty['bgcolor'])[0]
for p in target_data]
if projection == 'mollweide':
patches = [transform_patch_mollweide(patch) for patch in patches]
for patch in patches:
ax.add_patch(patch)
if observatory != None:
ax.set_title(f'Observatory: {observatory}')
return fig
def _create_observing_plan(self):
"""Returns an `.ObservingPlan` from the configuration file."""
observatory = config['observing_plan']
obs_data = config['observing_plan'][observatory]
start_date = obs_data['start_date']
end_date = obs_data['end_date']
return ObservingPlan(start_date, end_date, observatory=observatory)
def _record_observation(self, jd, observatory, target_name='-', target_group='-',
tileid=-1, pointing_index=-1, ra=-999., dec=-999., pa=-999.,
airmass=-999., lunation=-999., shadow_height=-999., dist_to_moon=-999.,
lst=-999.,
exptime=0., totaltime=0.):
"""Adds a row to the schedule."""
self.schedule.append((jd, observatory, target_name, target_group, tileid, pointing_index,
ra, dec, pa, airmass, lunation, shadow_height, dist_to_moon, lst, exptime,
totaltime))
def get_target_time(self, tname, group=False, observatory=None, lunation=None,
return_lst=False):
"""Returns the JDs or LSTs for a target at an observatory.
Parameters
----------
tname : str
The name of the target or group. Use ``'-'`` for unused time.
group : bool
If not true, ``tname`` will be the name of a group not a single
target.
observatory : str
The observatory to filter for.
lunation : list
Restrict the data to a range in lunation. Defaults to returning
all lunations. Set to ``[lmin, lmax]`` to return values of
``lmin < lunation <= lmax``.
return_lst : bool
If `True`, returns an array with the LSTs of all the unobserved
times.
Returns
-------
table : `~numpy.ndarray`
An array containing the times the target is observed at an
observatory, as JDs. If ``return_lst=True`` returns an array of
the corresponding LSTs.
"""
column = 'group' if group is True else 'target'
t = self.schedule[self.schedule[column] == tname]
if observatory:
t = t[t['observatory'] == observatory]
if lunation != None:
t = t[(t['lunation'] > lunation[0]) * (t['lunation'] <= lunation[1])]
if return_lst:
return t['lst'].data
else:
return t['JD'].data
def print_statistics(self, out_file=None, out_format="ascii", overwrite_out=True, observatory=None, targets=None, return_table=False):
"""Prints a summary of observations at a given observatory.
Parameters
----------
observatory : str
The observatory to filter for.
targets : `~lvmsurveysim.target.TargetList`
The targets to summarize. If `None`, use ``self.targets``.
return_table : bool
If `True`, return a `~astropy.table.Table` with the results.
out_file : str
Outfile to write statistics.
out_format : str
Outfile format consistent with astropy.table dumps
"""
if targets is None:
targets = self.targets
names = [t.name for t in targets]
time_on_target = {} # time spent exposing target
exptime_on_target = {} # total time (exp + overhead) on target
tile_area = {} # area of a single tile
target_ntiles = {} # number of tiles in a target tiling
target_ntiles_observed = {} # number of observed tiles
target_nvisits = {} # number of visits for each tile
surveytime = 0.0 # total time of survey
names.append('-') # deals with unused time
for tname, i in zip(names, range(len(names))):
if (tname != '-'):
target = self.targets[i]
tile_area[tname] = target.get_pixarea(ifu=self.ifu)
target_ntiles[tname] = len(numpy.where(self.tiledb.tile_table['TargetIndex'] == i)[0])
target_nvisits[tname] = float(target.n_exposures / target.min_exposures)
else:
tile_area[tname] = -999
target_ntiles[tname] = -999
target_nvisits[tname] = 1
tdata = self.schedule[self.schedule['target'] == tname]
if observatory:
tdata = tdata[tdata['observatory'] == observatory]
exptime_on_target[tname] = numpy.sum(tdata['exptime'].data)
target_ntiles_observed[tname] = len(tdata) / target_nvisits[tname]
target_total_time = numpy.sum(tdata['totaltime'].data)
time_on_target[tname] = target_total_time
surveytime += target_total_time
# targets that completely overlap with others have no tiles
for t in self.targets:
if target_ntiles[t.name] == 0:
print(t.name + ' has no tiles')
target_ntiles[t.name] = 1
rows = [
(t if t != '-' else 'unused',
numpy.float(target_ntiles[t]),
numpy.around(target_ntiles_observed[t], decimals=2),
numpy.around(time_on_target[t] / 3600.0, decimals=2),
numpy.around(exptime_on_target[t] / 3600.0, decimals=2),
numpy.around(time_on_target[t] / surveytime, decimals=2),
numpy.around(target_ntiles_observed[t] * tile_area[t],
decimals=2) if t != '-' else -999,
numpy.around(float(target_ntiles_observed[t]) / float(target_ntiles[t]),
decimals=2) if t != '-' else -999)
for t in names]
stats = astropy.table.Table(rows=rows,
names=['Target', 'tiles', 'tiles_obs', 'tottime/h',
'exptime/h', 'timefrac', 'area', 'areafrac'],
dtype=('S8', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4'))
print('%s :' % (observatory if observatory != None else 'APO+LCO'))
stats.pprint(max_lines=-1, max_width=-1)
if out_file != None:
stats.write(out_file, format=out_format, overwrite=overwrite_out)
if return_table:
return stats
def plot_survey(self, observatory=None, bin_size=30., targets=None, groups=None,
use_groups=False, use_primary_group=True,
show_ungrouped=True, cumulative=False, lst=False,
lunation=None,
show_unused=True, skip_fast=False, show_mpld3=False):
"""Plot the hours spent on target.
Parameters
----------
observatory : str
The observatory to plot. If `None`, all observatories.
bin_size : int
The number of days in each bin of the plot.
targets : list
A list with the names of the targets to plot. If empty, plots all
targets.
groups : list
A list with the names of the groups to plot. If empty, plots all
groups.
use_groups : bool
If set, the targets are grouped together using the
``Target.groups`` list.
use_primary_group : bool
If `True`, a target will only be added to its primary group (the
first one in the group list). Only used when ``use_groups=True``.
show_ungrouped : bool
If `True`, targets that don't belong to any group are plotted
individually. Only used when ``use_groups=True``.
cumulative : bool or str
If `True`, plots the cumulative sum of hours spent on each target.
If ``'group'``, it plots the cumulative on-target hours normalised
by the total hours needed to observe the target group. If ``'survey'``,
plots the cumulative hours normalised by the total survey hours.
When ``cumulative`` is not `False`, ``bin_size`` is set to 1.
lst : bool
Whether to bin the used time by LST instead of JD.
show_unused : bool
Display the unused time.
lunation : list
Range of lunations to include in statistics. Defaults to all lunations.
Set to ``[lmin, lmax]`` to return values of ``lmin < lunation <= lmax``.
Can be used to restrict lst usage plots to only bright, grey, or
dark time.
skip_fast : bool
If set, do not plot targets that complete in the first 20% of the
survey.
Return
------
fig : `~matplotlib.figure.Figure`
The Matplotlib figure of the plot.
"""
assert self.schedule != None, 'you still have not run a simulation.'
if not targets:
targets = [target.name for target in self.targets]
ncols = 2 if len(targets) > 15 else 1
if lst:
bin_size = 1. if bin_size == 30. else bin_size
assert cumulative is False, 'cumulative cannot be used with lst=True.'
if cumulative != False:
bin_size = 1
fig, ax = plt.subplots(figsize=(12, 8))
# Leaves a margin on the right to put the legend
fig.subplots_adjust(right=0.65 if ncols == 2 else 0.8)
ax.set_prop_cycle(color=['r', 'g', 'b', 'c', 'm', 'y', 'g', 'b', 'c', 'm', 'y', 'r', 'b',
'c', 'm', 'y', 'r', 'g', 'c', 'm', 'y', 'r', 'g', 'b', ],
linestyle=['-', '--', '-.', ':', '-', '--', '-.', ':', '-', '--', '-.',
':', '-', '--', '-.', ':', '-', '--', '-.', ':', '-', '--',
'-.', ':'])
min_b = (numpy.min(self.schedule['JD']) - 2451545.0) if not lst else 0.0
max_b = (numpy.max(self.schedule['JD']) - 2451545.0) if not lst else 24.0
b = numpy.arange(min_b, max_b + bin_size, bin_size)
# Creates a list of groups to plot. If use_groups=False,
# this is just the list of targets.
if not use_groups:
groups = [target.name for target in self.targets]
else:
groups = groups or self.targets.get_groups()
# Adds the ungrouped targets.
if show_ungrouped:
for target in self.targets:
if len(target.groups) == 0:
groups.append(target.name)
for group in groups:
# Cumulated group heights
group_heights = numpy.zeros(len(b) - 1, dtype=numpy.float)
group_target_tot_time = 0.0
# If we are not using groups or the "group"
# name is that of an ungrouped target.
if not use_groups or group in self.targets._names:
targets = [group]
else:
targets = self.targets.get_group_targets(group, primary=use_primary_group)
for tname in targets:
t = self.targets.get_target(tname)
tindex = [target.name for target in self.targets].index(tname)
# plot each target
tt = self.get_target_time(tname, observatory=observatory, lunation=lunation, return_lst=lst)
if len(tt) == 0:
continue
if not lst:
tt -= 2451545.0
heights, bins = numpy.histogram(tt, bins=b)
heights = numpy.array(heights, dtype=float)
heights *= t.exptime * t.min_exposures / 3600.0
ntiles = len(numpy.where(self.tiledb.tile_table['TargetIndex'].data == tindex)[0])
target_tot_time = ntiles * t.exptime * t.n_exposures / 3600.
if skip_fast:
completion = heights.cumsum() / target_tot_time
if numpy.quantile(completion, 0.2) >= 1:
continue
group_heights += heights
group_target_tot_time += target_tot_time
# Only plot the heights if they are not zero. This prevents
# targets that are not observed at an observatory to be displayed.
if numpy.sum(group_heights) > 0:
if cumulative is False:
ax.plot(bins[:-1] + numpy.diff(bins) / 2, group_heights, label=group)
else:
ax.plot(bins[:-1] + numpy.diff(bins) / 2, numpy.cumsum(group_heights)/group_target_tot_time, label=group)
# deal with unused time
tt = self.get_target_time('-', observatory=observatory, return_lst=lst)
if not lst:
tt -= 2451545.0
heights, bins = numpy.histogram(tt, bins=b)
heights = numpy.array(heights, dtype=float)
heights *= self.time_step / 3600.0
if cumulative:
heights = heights.cumsum()
if show_unused and cumulative is False:
ax.plot(bins[:-1] + numpy.diff(bins) / 2, heights, ':',
color='k', label='Unused')
ax.set_xlabel('JD - 2451545.0' if not lst else 'LST / h')
if cumulative is False:
ax.set_ylabel('Hours on target / %.f %s' % ((bin_size, 'days')
if not lst else (bin_size, 'h')))
elif cumulative is True:
ax.set_ylabel('Hours on target [cumulative]')
elif cumulative == 'target':
ax.set_ylabel('Fraction of target completed')
elif cumulative == 'survey':
ax.set_ylabel('Fraction of survey time spent on target')
ax.set_title(observatory if observatory != None else 'APO+LCO')
# Move legend outside the plot
ax.legend(loc='upper left', bbox_to_anchor=(1.05, 1.0), ncol=ncols)
return fig
def plot_lunation(self, tname, group=False, observatory=None, dark_limit=0.2):
"""
plot the lunation distribution for a target. use '-' for unused time
Parameters
----------
tname : str
The name of the target or group. Use ``'-'`` for unused time.
group : bool
If not true, ``tname`` will be the name of a group not a single
target.
observatory : str
The observatory to filter for.
dark_limit : float
Limiting lunation value to count as dark time. Defaults to 0.2.
Return
------
fig : `~matplotlib.figure.Figure`
The Matplotlib figure of the plot.
"""
dark = self.get_target_time(tname, group=group, lunation=[-0.01, dark_limit],
observatory=observatory, return_lst=True)
bright = self.get_target_time(tname, group=group, lunation=[dark_limit, 1.0],
observatory=observatory, return_lst=True)
bin_size = 1
b = numpy.arange(0, 24 + bin_size, bin_size)
heights_dark, bins = numpy.histogram(dark, bins=b)
heights_dark = numpy.array(heights_dark, dtype=float)
heights_bright, bins = numpy.histogram(bright, bins=b)
heights_bright = numpy.array(heights_bright, dtype=float)
fig, ax = plt.subplots()
ax.plot(bins[:-1] + numpy.diff(bins) / 2, heights_dark, label='dark')
ax.plot(bins[:-1] + numpy.diff(bins) / 2, heights_bright, label='bright')
ax.legend()
plt.xlabel('LST')
plt.ylabel('# of exposures')
plt.title('unused' if tname == '-' else tname)
return fig
def plot_shadow_height(self, tname=None, group=False, observatory=None, norm=False, cumulative=0, linear_log=False):
"""
plot the shadow height distribution for a target. use '-' for unused time
Parameters
----------
tname : str
The name of the target or group. Use 'ALL' for all groups and group==True.
group : bool
If not true, ``tname`` will be the name of a group not a single
target.
observatory : str
The observatory to filter for.
norm : bool
Normalize the histograms instead of plotting raw numbers.
Return
------
fig : `~matplotlib.figure.Figure`
The Matplotlib figure of the plot.
"""
if linear_log is False:
b = numpy.logspace( | numpy.log10(100.) | numpy.log10 |
import ctypes as c
import pathlib
from enum import IntEnum
import numpy as np
from plaster.run.sigproc_v2.c_gauss2_fitter.build import build_dev
from plaster.tools.c_common.c_common_tools import CException
from plaster.tools.schema import check
from plumbum import local
c_gauss_fitter_path = local.path(
"/erisyon/plaster/plaster/run/sigproc_v2/c_gauss2_fitter"
)
def _init():
"""
Must be called before anything else in this module
"""
if local.env.get("PLASTER_C_COMPILE"):
with local.cwd(c_gauss_fitter_path):
build_dev()
# Note, this is executed at LOAD TIME!
_init()
class Gauss2Params:
AMP = 0
SIGMA_X = 1
SIGMA_Y = 2
CENTER_X = 3
CENTER_Y = 4
RHO = 5
OFFSET = 6
N_PARAMS = 7 # Number above this point
class AugmentedGauss2Params(Gauss2Params):
# These must match in gauss2_fitter.h
MEA = 7
NOISE = 8
ASPECT_RATIO = 9
N_FULL_PARAMS = 10
_lib = None
MODULE_DIR = pathlib.Path(__file__).parent
def load_lib():
global _lib
if _lib is not None:
return _lib
lib = c.CDLL(MODULE_DIR / "_gauss2_fitter.so")
lib.gauss2_check.argtypes = []
lib.gauss2_check.restype = c.c_char_p
lib.gauss_2d.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # double *p
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # double *dst_x
c.c_int, # int m
c.c_int, # int n
c.c_void_p, # void *data
]
lib.fit_gauss_2d_on_float_image.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=2, flags="C_CONTIGUOUS"
), # np_float64 *im
c.c_int, # np_int64 im_w
c.c_int, # np_int64 im_h
c.c_int, # np_int64 center_x
c.c_int, # np_int64 center_y
c.c_int, # np_int64 mea
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # np_float64 *params
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # np_float64 *info
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # np_float64 *covar
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # np_float64 *noise
]
lib.fit_gauss_2d_on_float_image.restype = c.c_int
lib.fit_array_of_gauss_2d_on_float_image.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=2, flags="C_CONTIGUOUS"
), # np_float64 *im
c.c_int, # np_int64 im_w
c.c_int, # np_int64 im_h
c.c_int, # np_int64 mea
c.c_int, # np_int64 n_peaks
np.ctypeslib.ndpointer(
dtype=np.int64, ndim=1, flags="C_CONTIGUOUS"
), # np_int64 *center_x
np.ctypeslib.ndpointer(
dtype=np.int64, ndim=1, flags="C_CONTIGUOUS"
), # np_int64 *center_y
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # np_float64 *params
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=1, flags="C_CONTIGUOUS"
), # np_float64 *var_params
np.ctypeslib.ndpointer(
dtype=np.int64, ndim=1, flags="C_CONTIGUOUS"
), # np_int64 *fail
]
lib.fit_array_of_gauss_2d_on_float_image.restype = c.c_char_p
lib.synth_image.argtypes = [
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=2, flags="C_CONTIGUOUS"
), # np_float64 *im
c.c_int, # np_int64 im_w
c.c_int, # np_int64 im_h
c.c_int, # np_int64 peak_mea
c.c_int, # np_int64 n_peaks
np.ctypeslib.ndpointer(
dtype=np.float64, ndim=2, flags="C_CONTIGUOUS"
), # np_float64 *params
]
lib.synth_image.restype = c.c_char_p
_lib = lib
return lib
class Gauss2FitParams(IntEnum):
# These must match in gauss2_fitter.h
AMP = 0
SIGNAL = 0 # Alias for AMP
SIGMA_X = 1
SIGMA_Y = 2
CENTER_X = 3
CENTER_Y = 4
RHO = 5
OFFSET = 6
N_FIT_PARAMS = 7 # Number above this point
MEA = 7
NOISE = 8
ASPECT_RATIO = 9
N_FULL_PARAMS = 10
def gauss2(params):
params = np.ascontiguousarray(params, dtype=np.float64)
im = np.zeros((11, 11))
im = np.ascontiguousarray(im.flatten(), dtype=np.float64)
lib = load_lib()
lib.gauss_2d(params, im, 7, 11 * 11, 0)
return im.reshape((11, 11))
def fit_image(im, locs, guess_params, peak_mea):
"""
Arguments:
im: ndarray single image
locs: ndarray (n_locs, 2)
guess_params: ndarray (n_locs, AugmentedGauss2Params.N_FULL_PARAMS)
peak_mea: measure of peak
Returns:
fit_params: ndarray(n_locs, AugmentedGauss2Params.N_FULL_PARAMS)
The fit parameters
std_params: The std of each fit, essentially a quality of fit metric
"""
lib = load_lib()
n_locs = int(len(locs))
check.array_t(im, ndim=2, dtype=np.float64)
im = np.ascontiguousarray(im, dtype=np.float64)
# assert np.all(~np.isnan(im))
check.array_t(im, ndim=2, dtype=np.float64, c_contiguous=True)
check.array_t(locs, ndim=2, shape=(None, 2))
locs_y = np.ascontiguousarray(locs[:, 0], dtype=np.int64)
locs_x = np.ascontiguousarray(locs[:, 1], dtype=np.int64)
# MARK all nans as negative which fit_array_of_gauss_2d_on_float_image
# treats as a sentinel for "DO NOT FIT"
locs_y[np.isnan(locs[:, 0])] = -1
locs_x[np.isnan(locs[:, 1])] = -1
fit_fails = np.zeros((n_locs,), dtype=np.int64)
check.array_t(fit_fails, dtype=np.int64, c_contiguous=True)
check.array_t(
guess_params,
dtype=np.float64,
ndim=2,
shape=(n_locs, AugmentedGauss2Params.N_FULL_PARAMS,),
)
fit_params = guess_params.copy()
fit_params[:, AugmentedGauss2Params.MEA] = peak_mea
fit_params = np.ascontiguousarray(fit_params.flatten())
std_params = np.zeros((n_locs, AugmentedGauss2Params.N_FULL_PARAMS))
std_params = np.ascontiguousarray(std_params.flatten())
check.array_t(
fit_params,
dtype=np.float64,
c_contiguous=True,
ndim=1,
shape=(n_locs * AugmentedGauss2Params.N_FULL_PARAMS,),
)
error = lib.gauss2_check()
if error is not None:
raise CException(error)
error = lib.fit_array_of_gauss_2d_on_float_image(
im,
im.shape[1], # Note inversion of axis (y is primary in numpy)
im.shape[0],
peak_mea,
n_locs,
locs_x,
locs_y,
fit_params,
std_params,
fit_fails,
)
if error is not None:
raise CException(error)
# RESHAPE fit_params and NAN-out any where the fit failed
fit_params = fit_params.reshape((n_locs, AugmentedGauss2Params.N_FULL_PARAMS))
# fit_fails will be 1 both in the case that the fit failed
# and the case where it was skipped because "DO NOT FIT" was passed above
fit_params[fit_fails == 1, :] = np.nan
# After some very basic analysis, it seems that the following
# parameters are reasonable guess for out of bound on the
# std of fit.
# Note, this analysis was done on 11x11 pixels and might
# need to be different for other sizes.
# BUT! after using this they seemed to knock out everything
# so apparently the are not well tuned yet so this block is
# temporarily removed.
"""
std_params = std_params.reshape((n_locs, AugmentedGauss2Params.N_FULL_PARAMS))
param_std_of_fit_limits = np.array((500, 0.18, 0.18, 0.15, 0.15, 0.08, 5,))
out_of_bounds_mask = np.any(
std_params[:, 0 : AugmentedGauss2Params.N_FIT_PARAMS]
> param_std_of_fit_limits[None, :],
axis=1,
)
fit_params[out_of_bounds_mask, :] = np.nan
"""
return fit_params, std_params
def synth_image(im, peak_mea, locs, amps, std_xs, std_ys):
"""
Generate a synthetic image using the Gaussians in the parallel arrays
and accumulate into im
"""
lib = load_lib()
n_locs = int(len(locs))
check.array_t(amps, shape=(n_locs,))
check.array_t(std_xs, shape=(n_locs,))
check.array_t(std_ys, shape=(n_locs,))
params = np.zeros((n_locs, Gauss2FitParams.N_FIT_PARAMS))
params[:, Gauss2FitParams.AMP] = amps
params[:, Gauss2FitParams.CENTER_Y] = locs[:, 0]
params[:, Gauss2FitParams.CENTER_X] = locs[:, 1]
params[:, Gauss2FitParams.SIGMA_X] = std_xs
params[:, Gauss2FitParams.SIGMA_Y] = std_ys
check.array_t(im, ndim=2)
params = np.ascontiguousarray(params, dtype=np.float64)
im = | np.ascontiguousarray(im, dtype=np.float64) | numpy.ascontiguousarray |
# -*- coding: utf-8 -*-
"""
Utility functions and classes.
"""
import datetime
import math
import sys
from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple, Union
import argparse as ap
import numpy as np
import pandas as pd
def is_debugging():
"""
Check if the application is running under a debugger.
:return: Returns True if debugger is attached.
"""
return sys.gettrace() is not None
def parse_bool_string(val: str) -> bool:
"""
Parse string representation of a boolean and
return its presumed truth value.
Following inputs are valid "True" values:
- "True", "true", "T", "t",
- "Yes", "yes", "Y", "y"
- "1"
Following inputs are valid "False" values:
- "False", "false", "F", "f",
- "No", "no", "N", "n"
- "0"
:param val: String representation of the boolean.
:raises argparse.ArgumentTypeError: Raised when
given string is not any of the valid options.
:return: Returns boolean value of given string.
"""
if val.lower() in ("true", "t", "yes", "y", "1"):
return True
elif val.lower() in ("false", "f", "no", "n", "0"):
return False
else:
raise ap.ArgumentTypeError(f"Unknown boolean value \"{val}\"!")
def parse_datetime(val: str, format: str = "%d.%m.%Y"
) -> datetime.datetime:
"""
Parse date-time from string, using provided formatting string.
:param val: String being parsed.
:return: Parsed date-time.
"""
return datetime.datetime.strptime(
date_string=val, format=format
)
def parse_datetime_interval(val: str, format: str = "%d.%m.%Y", sep: str = "/"
) -> Tuple[datetime.datetime]:
"""
Parse interval of date-times from string, using provided
formatting string and separator.
:param val: String being parsed.
:return: Parsed date-time.
"""
return tuple([
parse_datetime(val=split, format=format)
for split in val.split(sep)
])
def parse_list(val: str, typ: callable, sep: str = ",") -> list:
"""
Parse separated list of objects into python list.
:param val: List of object, separated using separator.
:param typ: Parser for the type used as typ(str).
:param sep: Separator of list elements.
:raises argparse.ArgumentTypeError: Raised when
given string does not represent valid list.
:return: Returns list of types produced by typ(str).
"""
try:
if val == sep:
return [ ]
items = val.split(sep)
result = [ typ(item) for item in items ]
except:
raise ap.ArgumentTypeError(f"Invalid list value \"{val}\"!")
return result
def parse_list_string(typ: callable, sep: str = ",") -> callable:
""" Construct a list parser for given type and separator. """
def parser(val: str) -> list:
return parse_list(val=val, typ=typ, sep=sep)
return parser
def reshape_scalar(val: any) -> np.array:
""" Create array from given value, keeping dimensions for array types and creating [ val ] for scalars. """
arr = | np.array(val) | numpy.array |
import os
from typing import IO
from PySDDP.newave.script.templates.confhd import ConfhdTemplate
from matplotlib import pyplot as plt
import numpy as np
from random import randint
from mpl_toolkits.mplot3d import Axes3D
class Confhd(ConfhdTemplate):
def __init__(self):
super().__init__()
self.lista_entrada = list()
self._conteudo_ = None
self.dir_base = None
self._numero_registros_ = None
def ler(self, file_name: str, hidr, vazoes, dger, modif, exph) -> None:
"""
Implementa o método para leitura do arquivo HIDR.DAT que contem os dados cadastrais das usinas
hidrelétricas que podem ser utilizadas para a execucao do NEWAVE
:param file_name: string com o caminho completo para o arquivo,
hidr: classe contendo o cadastro de todas as usinas hidreletrica,
vazoes: classe contendo o historico de vazoes completo
"""
self.dir_base = os.path.split(file_name)[0]
self.nome_arquivo = os.path.split(file_name)[1]
self._copiavazoes = vazoes.vaz_nat
self._numero_registros_ = 0
self.nuhe = 0
nanos = dger.num_anos['valor']
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
continua = True
contador = 1
while continua:
self.next_line(f)
linha = self.linha
if contador >= 3:
if len(linha) > 5:
self._codigo["valor"].append(int(linha[1:5]))
else:
break
self._nome["valor"].append(linha[6:18])
self._posto["valor"].append(int(linha[19:23]))
self._jusante["valor"].append(int(linha[25:29]))
self._ree["valor"].append(int(linha[30:34]))
self._vol_ini["valor"].append(float(linha[35:41]))
self._status["valor"].append(linha[44:46])
self._modif["valor"].append(int(linha[49:53]))
self._ano_i["valor"].append(int(linha[58:62]))
self._ano_f["valor"].append(int(linha[67:71]))
# Preenche com dados cadastrais
uhe = hidr.get(self._codigo["valor"][-1])
self._bdh['valor'].append(uhe['bdh'])
self._sist['valor'].append(uhe['sist'])
self._empr['valor'].append(uhe['empr'])
self._desvio['valor'].append(uhe['desvio'])
self._vol_min['valor'].append(uhe['vol_min'])
self._vol_max['valor'].append(uhe['vol_max'])
self._vol_vert['valor'].append(uhe['vol_vert'])
self._vol_min_desv['valor'].append(uhe['vol_min_desv'])
self._cota_min['valor'].append(uhe['cota_min'])
self._cota_max['valor'].append(uhe['cota_max'])
self._pol_cota_vol['valor'].append(uhe['pol_cota_vol'])
self._pol_cota_area['valor'].append(uhe['pol_cota_area'])
self._coef_evap['valor'].append(uhe['coef_evap'])
self._num_conj_maq['valor'].append(uhe['num_conj_maq'])
self._maq_por_conj['valor'].append(uhe['maq_por_conj'])
self._pef_por_conj['valor'].append(uhe['pef_por_conj'])
self._cf_hbqt['valor'].append(uhe['cf_hbqt'])
self._cf_hbqt['valor_2'].append(uhe['cf_hbqt_2'])
self._cf_hbqt['valor_3'].append(uhe['cf_hbqt_3'])
self._cf_hbqt['valor_4'].append(uhe['cf_hbqt_4'])
self._cf_hbqt['valor_5'].append(uhe['cf_hbqt_5'])
self._cf_hbqg['valor'].append(uhe['cf_hbqg'])
self._cf_hbqg['valor_2'].append(uhe['cf_hbqg_2'])
self._cf_hbqg['valor_3'].append(uhe['cf_hbqg_3'])
self._cf_hbqg['valor_4'].append(uhe['cf_hbqg_4'])
self._cf_hbqg['valor_5'].append(uhe['cf_hbqg_5'])
self._cf_hbpt['valor'].append(uhe['cf_hbpt'])
self._cf_hbpt['valor_2'].append(uhe['cf_hbpt_2'])
self._cf_hbpt['valor_3'].append(uhe['cf_hbpt_3'])
self._cf_hbpt['valor_4'].append(uhe['cf_hbpt_4'])
self._cf_hbpt['valor_5'].append(uhe['cf_hbpt_5'])
self._alt_efet_conj['valor'].append(uhe['alt_efet_conj'])
self._vaz_efet_conj['valor'].append(uhe['vaz_efet_conj'])
self._prod_esp['valor'].append(uhe['prod_esp'])
self._perda_hid['valor'].append(uhe['perda_hid'])
self._num_pol_vnj['valor'].append(uhe['num_pol_vnj'])
self._pol_vaz_niv_jus['valor'].append(uhe['pol_vaz_niv_jus'])
self._pol_vaz_niv_jus['valor_2'].append(uhe['pol_vaz_niv_jus_2'])
self._pol_vaz_niv_jus['valor_3'].append(uhe['pol_vaz_niv_jus_3'])
self._pol_vaz_niv_jus['valor_4'].append(uhe['pol_vaz_niv_jus_4'])
self._pol_vaz_niv_jus['valor_5'].append(uhe['pol_vaz_niv_jus_5'])
self._cota_ref_nivel_jus['valor'].append(uhe['cota_ref_nivel_jus'])
self._cfmed['valor'].append(uhe['cfmed'])
self._inf_canal_fuga['valor'].append(uhe['inf_canal_fuga'])
self._fator_carga_max['valor'].append(uhe['fator_carga_max'])
self._fator_carga_min['valor'].append(uhe['fator_carga_min'])
self._vaz_min['valor'].append(uhe['vaz_min'])
self._unid_base['valor'].append(uhe['unid_base'])
self._tipo_turb['valor'].append(uhe['tipo_turb'])
self._repres_conj['valor'].append(uhe['repres_conj'])
self._teifh['valor'].append(uhe['teifh'])
self._ip['valor'].append(uhe['ip'])
self._tipo_perda['valor'].append(uhe['tipo_perda'])
self._data['valor'].append(uhe['data'])
self._observ['valor'].append(uhe['observ'])
self._vol_ref['valor'].append(uhe['vol_ref'])
self._tipo_reg['valor'].append(uhe['tipo_reg'])
# Inclui as vazoes naturais
vaz_nat = vazoes.vaz_nat.transpose()
vaz_nat = vaz_nat[self._posto["valor"][-1]-1]
vaz_nat = vaz_nat.transpose()
self._vazoes['valor'].append(vaz_nat)
# Se a usina for 'NE' ou 'EE' nao deve possuir maquinas
if self._status['valor'][-1] == 'NE' or self._status['valor'][-1] == 'EE':
for iconj in range(5):
self._maq_por_conj['valor'][-1][iconj] = 0
# Parametros Temporais controlados pelo MODIF.DAT
self._vol_mint['valor'].append(self._vol_min['valor'][-1]*np.ones((nanos, 12), 'f'))
self._vol_maxt['valor'].append(self._vol_max['valor'][-1]*np.ones((nanos, 12), 'f'))
self._vol_minp['valor'].append(self._vol_min['valor'][-1]*np.ones((nanos, 12), 'f'))
self._vaz_mint['valor'].append(self._vaz_min['valor'][-1]*np.ones((nanos, 12), 'f'))
self._cfugat['valor'].append(self._cfmed['valor'][-1]*np.ones((nanos, 12), 'f'))
self._cmont['valor'].append(self._cota_max['valor'][-1]*np.ones((nanos, 12), 'f'))
#
# Calcula Volume Útil
#
if self._tipo_reg['valor'][-1] == 'M':
self._vol_util['valor'].append(self._vol_max['valor'][-1] - self._vol_min['valor'][-1])
else:
self._vol_util['valor'].append(float(0))
self._vol_min['valor'][-1] = self._vol_max['valor'][-1]
# Incorpora Modificações do MODIF.DAT
usinadf = modif.bloco_usina['df'][modif.bloco_usina['df']['codigo'] == self._codigo['valor'][-1]]
self._acerta_modif(usinadf, dger)
# Calcula Parametros
#
# Re-Calcula Volume Útil
#
if self._tipo_reg['valor'][-1] == 'M':
self._vol_util['valor'][-1] = self._vol_max['valor'][-1] - self._vol_min['valor'][-1]
else:
self._vol_min['valor'][-1] = self._vol_max['valor'][-1]
self._calc_pot_efetiva()
self._calc_vaz_efetiva()
self._calc_produtibs(nanos)
self._calc_engol_maximo()
# Parametros Temporais calculados pelo EXPH.DAT
if self._status['valor'][-1] == 'EX':
self._status_vol_morto['valor'].append(2 * np.ones((nanos, 12), 'i'))
self._status_motoriz['valor'].append(2 * np.ones((nanos, 12), 'i'))
self._vol_morto_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
self._engol_tempo['valor'].append(self._engolimento['valor'][-1] * np.ones((nanos, 12), 'f'))
self._potencia_tempo['valor'].append(self._pot_efet['valor'][-1] * np.ones((nanos, 12), 'f'))
self._unidades_tempo['valor'].append(sum(self._maq_por_conj['valor'][-1]) * np.ones((nanos, 12), 'f'))
else:
self._status_vol_morto['valor'].append(np.zeros((nanos, 12), 'i'))
self._status_motoriz['valor'].append(np.zeros((nanos, 12), 'i'))
self._vol_morto_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
if self._status['valor'][-1] == 'EE':
self._engol_tempo['valor'].append(self._engolimento['valor'][-1] * np.ones((nanos, 12), 'f'))
self._potencia_tempo['valor'].append(self._pot_efet['valor'][-1] * np.ones((nanos, 12), 'f'))
self._unidades_tempo['valor'].append(sum(self._maq_por_conj['valor'][-1]) * np.ones((nanos, 12), 'f'))
else:
self._engol_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
self._potencia_tempo['valor'].append(np.zeros((nanos, 12), 'f'))
self._unidades_tempo['valor'].append(np.zeros((nanos, 12), 'i'))
#
# Insere matrizes com nanos x 12 para cada tipo de produtibilidade acumulada
#
self._ro_acum_a_ree['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_b_ree['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_c_ree['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_a_sist['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_b_sist['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_c_sist['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_65['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_max['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_med['valor'].append(np.zeros((nanos, 12), 'd'))
self._ro_acum_min['valor'].append(np.zeros((nanos, 12), 'd'))
# Incorpora Modificações do EXPH.DAT
usinadf = exph.bloco_usina['df'][exph.bloco_usina['df']['codigo'] == self._codigo['valor'][-1]]
self._acerta_exph(usinadf, dger)
self.nuhe += 1
self._numero_registros_ += 1
contador += 1
except Exception as err:
if isinstance(err, StopIteration):
maior = np.array(self._codigo['valor'], dtype=int)
maior = np.max(maior)
self._mapa = -np.ones(maior+1, dtype=int)
for i, codigo in enumerate(self._codigo['valor']):
self._mapa[codigo]=int(i)
# Acerta Produtibilidades Acumuladas
self._prod_acum()
print("OK! Leitura do", os.path.split(file_name)[1], "realizada com sucesso.")
else:
raise
def escrever(self, file_out: str) -> None:
"""
Implementa o método para escrita do arquivo HIDR.DAT que contem os dados cadastrais das usinas
hidrelétricas que podem ser utilizadas para a execucao do NEWAVE
:param file_out: string com o caminho completo para o arquivo
"""
self.dir_base = os.path.split(file_out)[0]
self.nome_arquivo = os.path.split(file_out)[1]
self._numero_registros_ = 0
formato = "{codigo: >5} {nome: <12} {posto: >4} {jusante: >5} {ree: >4} {vol_ini: >6} {status: >4} {modif: >6} {ano_i: >8} {ano_f: >8}\n"
if not os.path.isdir(os.path.split(file_out)[0]):
os.mkdir(os.path.split(file_out)[0])
try:
with open(file_out, 'w', encoding='latin-1') as f: # type: IO[str]
# Imprime Cabeçalho
f.write(" NUM NOME POSTO JUS REE V.INIC U.EXIS MODIF INIC.HIST FIM HIST\n")
f.write(" XXXX XXXXXXXXXXXX XXXX XXXX XXXX XXX.XX XXXX XXXX XXXX XXXX \n")
for iusi in range(self.nuhe):
linha = dict(
codigo=self._codigo['valor'][iusi],
nome=self._nome['valor'][iusi],
posto=self._posto['valor'][iusi],
jusante=self._jusante['valor'][iusi],
ree=self._ree['valor'][iusi],
vol_ini=self._vol_ini['valor'][iusi],
status=self._status['valor'][iusi],
modif=self._modif['valor'][iusi],
ano_i=self._ano_i['valor'][iusi],
ano_f=self._ano_f['valor'][iusi]
)
f.write(formato.format(**linha))
self._numero_registros_ += 1
except Exception as err:
raise
print("OK! Escrita do", os.path.split(file_out)[1], "realizada com sucesso.")
def get(self, entrada):
"""
Busca uma usina hidreletrica do arquivo CONFHD e retorna um dicionario de dados contendo todas as
informacoes desta usina
:param entrada: string com o nome da usina ou inteiro com o numero de referencia da usina
"""
if (type(entrada) == float) or (type(entrada) == int):
#for i, valor in enumerate(self._codigo["valor"]):
# if valor == int(entrada):
# posicao = i
# break
if type(entrada) == float:
entrada = int(entrada)
posicao = int(self._mapa[entrada])
if posicao == -1:
return None
if type(entrada) == str:
posicao = None
for i, valor in enumerate(self._nome["valor"]):
if (valor.upper()).strip() == (entrada.upper()).strip():
posicao = i
break
if posicao is None:
return None
uhe = {
'codigo': self._codigo['valor'][posicao],
'nome': self._nome['valor'][posicao],
'posto': self._posto['valor'][posicao],
'ree': self._ree["valor"][posicao],
'vol_ini': self._vol_ini["valor"][posicao],
'status': self._status["valor"][posicao],
'modif': self._modif["valor"][posicao],
'ano_i': self._ano_i["valor"][posicao],
'ano_f': self._ano_f["valor"][posicao],
'bdh': self._bdh['valor'][posicao],
'sist': self._sist['valor'][posicao],
'empr': self._empr['valor'][posicao],
'jusante': self._jusante['valor'][posicao],
'desvio': self._desvio['valor'][posicao],
'vol_min': self._vol_min['valor'][posicao],
'vol_max': self._vol_max['valor'][posicao],
'vol_vert': self._vol_vert['valor'][posicao],
'vol_min_desv': self._vol_min_desv['valor'][posicao],
'cota_min': self._cota_min['valor'][posicao],
'cota_max': self._cota_max['valor'][posicao],
'pol_cota_vol': self._pol_cota_vol['valor'][posicao],
'pol_cota_area': self._pol_cota_area['valor'][posicao],
'coef_evap': self._coef_evap['valor'][posicao],
'num_conj_maq': self._num_conj_maq['valor'][posicao],
'maq_por_conj': self._maq_por_conj['valor'][posicao],
'pef_por_conj': self._pef_por_conj['valor'][posicao],
'cf_hbqt': self._cf_hbqt['valor'][posicao],
'cf_hbqt_2': self._cf_hbqt['valor_2'][posicao],
'cf_hbqt_3': self._cf_hbqt['valor_3'][posicao],
'cf_hbqt_4': self._cf_hbqt['valor_4'][posicao],
'cf_hbqt_5': self._cf_hbqt['valor_5'][posicao],
'cf_hbqg': self._cf_hbqg['valor'][posicao],
'cf_hbqg_2': self._cf_hbqg['valor_2'][posicao],
'cf_hbqg_3': self._cf_hbqg['valor_3'][posicao],
'cf_hbqg_4': self._cf_hbqg['valor_4'][posicao],
'cf_hbqg_5': self._cf_hbqg['valor_5'][posicao],
'cf_hbpt': self._cf_hbpt['valor'][posicao],
'cf_hbpt_2': self._cf_hbpt['valor_2'][posicao],
'cf_hbpt_3': self._cf_hbpt['valor_3'][posicao],
'cf_hbpt_4': self._cf_hbpt['valor_4'][posicao],
'cf_hbpt_5': self._cf_hbpt['valor_5'][posicao],
'alt_efet_conj': self._alt_efet_conj['valor'][posicao],
'vaz_efet_conj': self._vaz_efet_conj['valor'][posicao],
'prod_esp': self._prod_esp['valor'][posicao],
'perda_hid': self._perda_hid['valor'][posicao],
'num_pol_vnj': self._num_pol_vnj['valor'][posicao],
'pol_vaz_niv_jus': self._pol_vaz_niv_jus['valor'][posicao],
'pol_vaz_niv_jus_2': self._pol_vaz_niv_jus['valor_2'][posicao],
'pol_vaz_niv_jus_3': self._pol_vaz_niv_jus['valor_3'][posicao],
'pol_vaz_niv_jus_4': self._pol_vaz_niv_jus['valor_4'][posicao],
'pol_vaz_niv_jus_5': self._pol_vaz_niv_jus['valor_5'][posicao],
'cota_ref_nivel_jus': self._cota_ref_nivel_jus['valor'][posicao],
'cfmed': self._cfmed['valor'][posicao],
'inf_canal_fuga': self._inf_canal_fuga['valor'][posicao],
'fator_carga_max': self._fator_carga_max['valor'][posicao],
'fator_carga_min': self._fator_carga_min['valor'][posicao],
'vaz_min': self._vaz_min['valor'][posicao],
'unid_base': self._unid_base['valor'][posicao],
'tipo_turb': self._tipo_turb['valor'][posicao],
'repres_conj': self._repres_conj['valor'][posicao],
'teifh': self._teifh['valor'][posicao],
'ip': self._ip['valor'][posicao],
'tipo_perda': self._tipo_perda['valor'][posicao],
'data': self._data['valor'][posicao],
'observ': self._observ['valor'][posicao],
'vol_ref': self._vol_ref['valor'][posicao],
'tipo_reg': self._tipo_reg['valor'][posicao],
'vazoes': self._vazoes['valor'][posicao],
'vol_mint': self._vol_mint['valor'][posicao],
'vol_maxt': self._vol_maxt['valor'][posicao],
'vol_minp': self._vol_minp['valor'][posicao],
'vaz_mint': self._vaz_mint['valor'][posicao],
'cmont': self._cmont['valor'][posicao],
'cfugat': self._cfugat['valor'][posicao],
'vol_util': self._vol_util['valor'][posicao],
'pot_efet': self._pot_efet['valor'][posicao],
'vaz_efet': self._vaz_efet['valor'][posicao],
'status_vol_morto': self._status_vol_morto['valor'][posicao],
'status_motoriz': self._status_motoriz['valor'][posicao],
'vol_morto_tempo': self._vol_morto_tempo['valor'][posicao],
'engol_tempo': self._engol_tempo['valor'][posicao],
'potencia_tempo': self._potencia_tempo['valor'][posicao],
'unidades_tempo': self._unidades_tempo['valor'][posicao],
'ro_65': self._ro_65['valor'][posicao],
'ro_50': self._ro_50['valor'][posicao],
'ro_equiv': self._ro_equiv['valor'][posicao],
'ro_equiv65': self._ro_equiv65['valor'][posicao],
'ro_min': self._ro_min['valor'][posicao],
'ro_max': self._ro_max['valor'][posicao],
'engolimento': self._engolimento['valor'][posicao],
'ro_acum_a_ree': self._ro_acum_a_ree['valor'][posicao],
'ro_acum_b_ree': self._ro_acum_b_ree['valor'][posicao],
'ro_acum_c_ree': self._ro_acum_c_ree['valor'][posicao],
'ro_acum_a_sist': self._ro_acum_a_sist['valor'][posicao],
'ro_acum_b_sist': self._ro_acum_b_sist['valor'][posicao],
'ro_acum_c_sist': self._ro_acum_c_sist['valor'][posicao],
'ro_acum': self._ro_acum['valor'][posicao],
'ro_acum_65': self._ro_acum_65['valor'][posicao],
'ro_acum_max': self._ro_acum_max['valor'][posicao],
'ro_acum_med': self._ro_acum_med['valor'][posicao],
'ro_acum_med': self._ro_acum_min['valor'][posicao]
}
return uhe
def put(self, uhe):
"""
Atualiza os dados da usina com do CONFHD de acordo com o dicionario de dados fornecido na entrada.
As chaves do dicionario de dados de entrada devem ser as mesmas do dicionario obtido atraves do
comando get.
:param uhe: dicionario de dados contendo informacoes da usina a ser atualizada.
"""
posicao = None
for i, valor in enumerate(self._codigo["valor"]):
if valor == uhe['codigo']:
posicao = i
break
if posicao is None:
return None
self._codigo['valor'][posicao] = uhe['codigo']
self._nome['valor'][posicao] = uhe['nome']
self._posto['valor'][posicao] = uhe['posto']
self._bdh['valor'][posicao] = uhe['bdh']
self._sist['valor'][posicao] = uhe['sist']
self._empr['valor'][posicao] = uhe['empr']
self._jusante['valor'][posicao] = uhe['jusante']
self._desvio['valor'][posicao] = uhe['desvio']
self._vol_min['valor'][posicao] = uhe['vol_min']
self._vol_max['valor'][posicao] = uhe['vol_max']
self._vol_vert['valor'][posicao] = uhe['vol_vert']
self._vol_min_desv['valor'][posicao] = uhe['vol_min_desv']
self._cota_min['valor'][posicao] = uhe['cota_min']
self._cota_max['valor'][posicao] = uhe['cota_max']
self._pol_cota_vol['valor'][posicao] = uhe['pol_cota_vol']
self._pol_cota_area['valor'][posicao] = uhe['pol_cota_area']
self._coef_evap['valor'][posicao] = uhe['coef_evap']
self._num_conj_maq['valor'][posicao] = uhe['num_conj_maq']
self._maq_por_conj['valor'][posicao] = uhe['maq_por_conj']
self._pef_por_conj['valor'][posicao] = uhe['pef_por_conj']
self._cf_hbqt['valor'][posicao] = uhe['cf_hbqt']
self._cf_hbqt['valor_2'][posicao] = uhe['cf_hbqt_2']
self._cf_hbqt['valor_3'][posicao] = uhe['cf_hbqt_3']
self._cf_hbqt['valor_4'][posicao] = uhe['cf_hbqt_4']
self._cf_hbqt['valor_5'][posicao] = uhe['cf_hbqt_5']
self._cf_hbqg['valor'][posicao] = uhe['cf_hbqg']
self._cf_hbqg['valor_2'][posicao] = uhe['cf_hbqg_2']
self._cf_hbqg['valor_3'][posicao] = uhe['cf_hbqg_3']
self._cf_hbqg['valor_4'][posicao] = uhe['cf_hbqg_4']
self._cf_hbqg['valor_5'][posicao] = uhe['cf_hbqg_5']
self._cf_hbpt['valor'][posicao] = uhe['cf_hbpt']
self._cf_hbpt['valor_2'][posicao] = uhe['cf_hbpt_2']
self._cf_hbpt['valor_3'][posicao] = uhe['cf_hbpt_3']
self._cf_hbpt['valor_4'][posicao] = uhe['cf_hbpt_4']
self._cf_hbpt['valor_5'][posicao] = uhe['cf_hbpt_5']
self._alt_efet_conj['valor'][posicao] = uhe['alt_efet_conj']
self._vaz_efet_conj['valor'][posicao] = uhe['vaz_efet_conj']
self._prod_esp['valor'][posicao] = uhe['prod_esp']
self._perda_hid['valor'][posicao] = uhe['perda_hid']
self._num_pol_vnj['valor'][posicao] = uhe['num_pol_vnj']
self._pol_vaz_niv_jus['valor'] = uhe['pol_vaz_niv_jus']
self._pol_vaz_niv_jus['valor_2'][posicao] = uhe['pol_vaz_niv_jus_2']
self._pol_vaz_niv_jus['valor_3'][posicao] = uhe['pol_vaz_niv_jus_3']
self._pol_vaz_niv_jus['valor_4'][posicao] = uhe['pol_vaz_niv_jus_4']
self._pol_vaz_niv_jus['valor_5'][posicao] = uhe['pol_vaz_niv_jus_5']
self._cota_ref_nivel_jus['valor'][posicao] = uhe['cota_ref_nivel_jus']
self._cfmed['valor'][posicao] = uhe['cfmed']
self._inf_canal_fuga['valor'][posicao] = uhe['inf_canal_fuga']
self._fator_carga_max['valor'][posicao] = uhe['fator_carga_max']
self._fator_carga_min['valor'][posicao] = uhe['fator_carga_min']
self._vaz_min['valor'][posicao] = uhe['vaz_min']
self._unid_base['valor'][posicao] = uhe['unid_base']
self._tipo_turb['valor'][posicao] = uhe['tipo_turb']
self._repres_conj['valor'][posicao] = uhe['repres_conj']
self._teifh['valor'][posicao] = uhe['teifh']
self._ip['valor'][posicao] = uhe['ip']
self._tipo_perda['valor'][posicao] = uhe['tipo_perda']
self._data['valor'][posicao] = uhe['data']
self._observ['valor'][posicao] = uhe['observ']
self._vol_ref['valor'][posicao] = uhe['vol_ref']
self._tipo_reg['valor'][posicao] = uhe['tipo_reg']
self._vazoes['valor'][posicao] = uhe['vazoes']
self._vol_mint['valor'][posicao] = uhe['vol_mint']
self._vol_maxt['valor'][posicao] = uhe['vol_maxt']
self._vol_minp['valor'][posicao] = uhe['vol_minp']
self._vaz_mint['valor'][posicao] = uhe['vaz_mint']
self._cfugat['valor'][posicao] = uhe['cfugat']
self._vol_util['valor'][posicao] = uhe['vol_util']
self._pot_efet['valor'][posicao] = uhe['pot_efet']
self._vaz_efet['valor'][posicao] = uhe['vaz_efet']
self._status_vol_morto['valor'][posicao] = uhe['status_vol_morto']
self._status_motoriz['valor'][posicao] = uhe['status_motoriz']
self._vol_morto_tempo['valor'][posicao] = uhe['vol_morto_tempo']
self._engol_tempo['valor'][posicao] = uhe['engol_tempo']
self._potencia_tempo['valor'][posicao] = uhe['potencia_tempo']
self._unidades_tempo['valor'][posicao] = uhe['unidades_tempo']
self._ro_65['valor'][posicao] = uhe['ro_65']
self._ro_50['valor'][posicao] = uhe['ro_50']
self._ro_equiv['valor'][posicao] = uhe['ro_equiv']
self._ro_equiv65['valor'][posicao] = uhe['ro_equiv65']
self._ro_min['valor'][posicao] = uhe['ro_min']
self._ro_max['valor'][posicao] = uhe['ro_max']
self._engolimento['valor'][posicao] = uhe['engolimento']
print(np.shape(self._copiavazoes))
for iano in range(np.shape(self._copiavazoes)[0]):
for imes in range(12):
self._copiavazoes[iano][imes][self._posto['valor'][posicao]-1] = self._vazoes['valor'][posicao][iano][imes]
return 'sucesso'
def help(self, parametro):
"""
Detalha o tipo de informacao de uma chave do dicionario de dados obtido pelo comando get.
:param parametro: string contendo a chave do dicionario de dados cuja o detalhamento eh desejado
"""
duvida = getattr(self, '_'+parametro)
return duvida['descricao']
# Calcula Vazao Incremental
def vaz_inc(self, uhe, iano, imes):
def Montante(uhe, iano, imes):
for iusi in self.lista_uhes():
usina = self.get(iusi)
if usina['jusante'] == uhe['codigo']:
if usina['status_vol_morto'][iano][imes] == 2:
yield iusi
else:
yield from Montante(usina, iano, imes)
# Inicia a vazão incremental da uhe com a sua vazão natural, depois abate as naturais de montante
incremental = uhe['vazoes'][:,imes]
if uhe['status_vol_morto'][iano][imes] != 2:
print ('Erro: Tentativa de calculo de Incremental para usina (', uhe['nome'], ') fora de operacao no mes ', imes, ' e ano ', iano)
return 0
else:
for iusina in Montante(uhe, iano, imes):
usina = self.get(iusina)
incremental = incremental - usina['vazoes'][:,imes]
# Caso Alguma Incremental seja Menor que zero, força para zero
codigos = np.where(incremental<0)
incremental[codigos] = 0
return incremental
def vaz_inc_entre_res(self, codigo, ianoconf, imesconf):
uhe = self.get(codigo)
nanos_hist = len(uhe['vazoes'])
def Montante(codigo, iano, imes):
#for iusi in self.lista_uhes():
# usina = self.get(iusi)
for iusi, jusante in enumerate(self._jusante['valor']):
if jusante == codigo:
if self._status_vol_morto['valor'][iusi][iano][imes] == 2:
if self._vol_util['valor'][iusi] > 0:
yield iusi
else:
yield from Montante(self._codigo['valor'][iusi], iano, imes)
else:
yield from Montante(self._codigo['valor'][iusi], iano, imes)
if uhe['status_vol_morto'][ianoconf][imesconf] != 2:
print ('Erro: Tentativa de calculo de Incremental para usina (', uhe['nome'], ') fora de operacao no mes ', imesconf, ' e ano ', ianoconf)
return 0
else:
incremental = np.zeros(nanos_hist)
for ianoh in range(nanos_hist):
incremental[ianoh] = uhe['vazoes'][ianoh][imesconf]
for iusina in Montante(codigo, ianoconf, imesconf):
for ianoh in range(nanos_hist):
incremental[ianoh] = incremental[ianoh] - self._vazoes['valor'][iusina][ianoh][imesconf]
# Caso Alguma Incremental seja Menor que zero, força para zero
codigos = np.where(incremental<0)
incremental[codigos] = 0
return incremental
##########################################################################################################
# Calcula Parametros das Usinas
##########################################################################################################
#def _calc_vol_util(self): # Calcula Volume Util da Usina
# if self._tipo_reg['valor'][-1] == 'M':
# self._vol_util['valor'].append(self._vol_max['valor'][-1] - self._vol_min['valor'][-1])
# else:
# self._vol_util['valor'].append(float(0))
# self._vol_min['valor'][-1] = self._vol_max['valor'][-1]
def _calc_pot_efetiva(self): # Calcula Potencia Efetiva da Usina
a = np.array(self._maq_por_conj["valor"][-1])
b = np.array(self._pef_por_conj["valor"][-1])
self._pot_efet['valor'].append(np.vdot(a, b))
def _calc_vaz_efetiva(self): # Calcula Vazao Efetiva da Usina
a = np.array(self._maq_por_conj["valor"][-1])
b = np.array(self._vaz_efet_conj["valor"][-1])
self._vaz_efet['valor'].append(np.vdot(a, b))
def _calc_produtibs(self, nanos): # Calcula Produtibilidades Associadas aa diversos volumes
self._ro_65['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_50['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_equiv['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_equiv65['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_min['valor'].append(np.zeros( (nanos,12), 'd' ))
self._ro_max['valor'].append(np.zeros( (nanos,12), 'd' ))
a = self._pol_cota_vol["valor"][-1][0]
b = self._pol_cota_vol["valor"][-1][1]
c = self._pol_cota_vol["valor"][-1][2]
d = self._pol_cota_vol["valor"][-1][3]
e = self._pol_cota_vol["valor"][-1][4]
# Calcula Produtibilidade Associada a 65% do Volume Util
volume = self._vol_min['valor'][-1] + 0.65*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Associada a 50% do Volume Util
volume = self._vol_min['valor'][-1] + 0.50*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_50['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_50['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Associada ao Volume Maximo
volume = self._vol_max['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_max['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_max['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Associada ao Volume Minimo
volume = self._vol_min['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_min['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_min['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
# Calcula Produtibilidade Equivalente
if ( self._vol_util['valor'][-1] > 0):
cota = 0
cota65 = 0
Vol65 = self._vol_min['valor'][-1] + 0.65*self._vol_util['valor'][-1]
for i in range(5):
cota = cota + self._pol_cota_vol["valor"][-1][i] * (self._vol_max['valor'][-1]**(i+1)) / (i+1)
cota = cota - self._pol_cota_vol["valor"][-1][i] * (self._vol_min['valor'][-1]**(i+1)) / (i+1)
cota65 = cota65 + self._pol_cota_vol["valor"][-1][i] * (Vol65**(i+1)) / (i+1)
cota65 = cota65 - self._pol_cota_vol["valor"][-1][i] * (self._vol_min['valor'][-1]**(i+1)) / (i+1)
cota = cota / self._vol_util['valor'][-1]
cota65 = cota65 / (Vol65 - self._vol_min['valor'][-1])
else:
cota65 = cota
for iano in range(nanos):
for imes in range(12):
cfuga = self._cfugat['valor'][-1][iano][imes]
if self._tipo_perda['valor'][-1] == 2:
self._ro_equiv['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga - self._perda_hid['valor'][-1])
self._ro_equiv65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota65 - cfuga - self._perda_hid['valor'][-1])
else:
self._ro_equiv['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
self._ro_equiv65['valor'][-1][iano][imes] = self._prod_esp['valor'][-1] * (cota65 - cfuga)*(1. - self._perda_hid['valor'][-1]/100)
return
def _prod_acum(self):
def cascata(confhd, codigo, iano,imes):
current = confhd.get(codigo)
if current['status_vol_morto'][iano][imes] == 2:
yield current['codigo']
while current['jusante'] != 0:
current = confhd.get(current['jusante'])
if current['status_vol_morto'][iano][imes] == 2:
yield current['codigo']
#
# Percorre todas as usinas do confhd para inserir produtibilidades acumuladas
#
for reg, codigo in enumerate(self._codigo['valor']):
nanos = len(self._status_vol_morto['valor'][reg])
#
# As produtibilidades devem ser calculadas para cada mês/ano do histórico
#
for iano in range(nanos):
for imes in range(12):
trocouRee = 0
trocouSist = 0
FioRee = True
FioSist = True
for iusina in cascata(self, codigo, iano, imes):
uhe = self.get(iusina)
produtib = uhe['ro_equiv'][iano][imes]
produtib65 = uhe['ro_equiv65'][iano][imes]
produtibMax = uhe['ro_max'][iano][imes]
produtibMed = uhe['ro_65'][iano][imes]
produtibMin = uhe['ro_min'][iano][imes]
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum['valor'][reg][iano][imes] += produtib
self._ro_acum_65['valor'][reg][iano][imes] += produtib65
self._ro_acum_max['valor'][reg][iano][imes] += produtibMax
self._ro_acum_med['valor'][reg][iano][imes] += produtibMed
self._ro_acum_min['valor'][reg][iano][imes] += produtibMin
if uhe['sist'] != self._sist['valor'][reg]:
trocouSist = trocouSist + 1
if uhe['ree'] != self._ree['valor'][reg]:
trocouRee = trocouRee + 1
if trocouRee == 0:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_a_ree['valor'][reg][iano][imes] += produtib
else:
if uhe['vol_util'] > 0:
FioRee = False
if FioRee:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_b_ree['valor'][reg][iano][imes] += produtib
else:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_c_ree['valor'][reg][iano][imes] += produtib
if trocouSist == 0:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_a_sist['valor'][reg][iano][imes] += produtib
else:
if uhe['vol_util'] > 0:
FioSist = False
if FioSist:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_b_sist['valor'][reg][iano][imes] += produtib
else:
if uhe['status_motoriz'][iano][imes] == 2:
self._ro_acum_c_sist['valor'][reg][iano][imes] += produtib
def _prod_acum_entre_res_ree(self, uhe, iano, imes):
if uhe['jusante'] == 0:
return 0
uhe_nova = self.get(uhe['jusante'])
if uhe_nova['vol_util'] != 0:
return 0.
elif uhe_nova['ree'] != uhe['ree']:
return 0.
elif uhe_nova['status_motoriz'][iano][imes] == 2:
return uhe_nova['ro_equiv'] + self._prod_acum_entre_res_ree(uhe_nova, iano, imes)
else:
return self._prod_acum_entre_res_ree(uhe_nova, iano, imes)
#
# def ProdAcumEntreResSist(self, iano, imes, usinas):
# if self.Jusante == 0:
# return 0
# for iusina in usinas:
# if iusina.Codigo == self.Jusante:
# if iusina.VolUtil != 0:
# return 0.
# elif self.Sist != iusina.Sist:
# return 0.
# elif iusina.StatusMotoriz[iano][imes] == 2:
# return iusina.RoEquiv + iusina.ProdAcumEntreResSist(iano, imes, usinas)
# else:
# return iusina.ProdAcumEntreResSist(iano, imes, usinas)
# break
def _calc_engol(self, ql):
engol = 0.
for i in range(5): # Varre Conjuntos de Maquinas
if self._maq_por_conj['valor'][-1][i] > 0:
if ql < self._alt_efet_conj['valor'][-1][i]:
if self._tipo_turb == 1 or self._tipo_turb == 3:
alpha = 0.5
else:
alpha = 0.2
else:
alpha = -1
if self._alt_efet_conj['valor'][-1][i] != 0:
engol = engol + self._maq_por_conj['valor'][-1][i]*self._vaz_efet_conj['valor'][-1][i]*((ql/self._alt_efet_conj['valor'][-1][i])**alpha)
return engol
def _calc_engol_maximo(self): # Estima Engolimento Maximo da Usina
a = self._pol_cota_vol['valor'][-1][0]
b = self._pol_cota_vol['valor'][-1][1]
c = self._pol_cota_vol['valor'][-1][2]
d = self._pol_cota_vol['valor'][-1][3]
e = self._pol_cota_vol['valor'][-1][4]
# Calcula Engolimento a 65% do Volume Util
volume = self._vol_min['valor'][-1] + 0.65*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
queda65 = cota - self._cfmed['valor'][-1]
engol65 = self._calc_engol(queda65)
# Calcula Engolimento a 50% do Volume Util
volume = self._vol_min['valor'][-1] + 0.50*self._vol_util['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
queda50 = cota - self._cfmed['valor'][-1]
engol50 = self._calc_engol(queda50)
# Calcula Engolimento Associada ao Volume Maximo
volume = self._vol_max['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
quedaMax = cota - self._cfmed['valor'][-1]
engolMax = self._calc_engol(quedaMax)
# Calcula Engolimento Associada ao Volume Minimo
volume = self._vol_min['valor'][-1]
cota = a + b*volume + c*volume**2 + d*volume**3 + e*volume**4
quedaMin = cota - self._cfmed['valor'][-1]
engolMin = self._calc_engol(quedaMin)
# Calcula Engolimento Associado a Altura Equivalente
if ( self._vol_util['valor'][-1] > 0):
cota = 0
for i in range(5):
cota = cota + self._pol_cota_vol['valor'][-1][i] * (self._vol_max['valor'][-1]**(i+1)) / (i+1)
cota = cota - self._pol_cota_vol['valor'][-1][i] * (self._vol_min['valor'][-1]**(i+1)) / (i+1)
cota = cota / self._vol_util['valor'][-1]
quedaEquiv = cota - self._cfmed['valor'][-1]
engolEquiv = self._calc_engol(quedaEquiv)
self._engolimento['valor'].append((engol50+engol65+engolEquiv+engolMax+engolMin)/5)
return
def lista_uhes(self):
"""
Calcula um generator contendo todos os codigos de referencia das usinas pertencentes ao CONFHD.
"""
for i in range(self.nuhe):
yield self._codigo["valor"][i]
def _acerta_modif(self, df, dger):
tamanho = df.shape
tamanho = tamanho[0]
for linha in range(tamanho):
registro = df.iloc[linha].values
#
# Palavras chaves tipo zero - somente atualiza valores
#
if registro[4].upper() == 'NUMCNJ':
self._num_conj_maq['valor'][-1] = registro[5]
if registro[4].upper() == 'PRODESP':
self._prod_esp['valor'][-1] = registro[5]
if registro[4].upper() == 'TEIF':
self._teifh['valor'][-1] = registro[5]
if registro[4].upper() == 'IP':
self._ip['valor'][-1] = registro[5]
if registro[4].upper() == 'PERDHID':
self._perda_hid['valor'][-1] = registro[5]
if registro[4].upper() == 'VAZMIN':
self._vaz_min['valor'][-1] = registro[5]
if registro[4].upper() == 'NUMBAS':
self._unid_base['valor'][-1] = registro[5]
#
# Palavras chaves tipo um - dois campos
#
if registro[4].upper() == 'NUMMAQ':
nr_conj = int(registro[6])
self._maq_por_conj['valor'][-1][nr_conj-1] = int(registro[5])
if registro[4].upper() == 'POTEFE':
nr_conj = int(registro[6])
self._pef_por_conj['valor'][-1][nr_conj-1] = registro[5]
if registro[4].upper() == 'COEFEVAP':
mes = int(registro[6])
self._coef_evap['valor'][-1][mes-1] = registro[5]
if registro[4].upper() == 'VOLMIN':
if registro[6].find("%") == 1:
self._vol_min['valor'][-1] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
if registro[6].find("h") == 1:
self._vol_min['valor'][-1] = registro[5]
if registro[4].upper() == 'VOLMAX':
if registro[6].find("%") == 1:
self._vol_max['valor'][-1] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
if registro[6].find("h") == 1:
self._vol_max['valor'][-1] = registro[5]
#
# Palavras chaves tipo dois - coeficientes PCA e PCV
#
if registro[4].upper() == 'VOLCOTA':
self._pol_cota_vol['valor'][-1] = registro[5]
if registro[4].upper() == 'COTAREA':
self._pol_cota_area['valor'][-1] = registro[5]
#
# Palavras chaves tipo 3 - Data e valor
#
if registro[4].upper() == 'CFUGA':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
self._cfugat['valor'][-1][ano][mes] = registro[5]
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'VAZMINT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
self._vaz_mint['valor'][-1][ano][mes] = registro[5]
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'CMONT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
self._cmont['valor'][-1][ano][mes] = registro[5]
mes += 1
mes = 0
ano += 1
#
# Palavras chaves tipo 4 - Data, valor e ('h' ou '%')
#
if registro[4].upper() == 'VMINP':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
if registro[6].find("h") == 1:
self._vol_minp['valor'][-1][ano][mes] = registro[5]
if registro[6].find("%") == 1:
self._vol_minp['valor'][-1][ano][mes] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'VMINT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
if registro[6].find("h") == 1:
self._vol_mint['valor'][-1][ano][mes] = registro[5]
if registro[6].find("%") == 1:
self._vol_mint['valor'][-1][ano][mes] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
mes += 1
mes = 0
ano += 1
if registro[4].upper() == 'VMAXT':
ano = int(registro[0]) - dger.ano_ini['valor']
mes = int(registro[3]) - 1
while ano < dger.num_anos['valor']:
while mes < 12:
if registro[6].find("h") == 1:
self._vol_maxt['valor'][-1][ano][mes] = registro[5]
if registro[6].find("%") == 1:
self._vol_maxt['valor'][-1][ano][mes] = self._vol_min['valor'][-1] + \
float(registro[5]) * self._vol_util['valor'][-1] / 100
mes += 1
mes = 0
ano += 1
return
def _acerta_exph(self, df, dger):
tamanho = df.shape
tamanho = tamanho[0]
#
# Organização do Registro
#
# registro[0] = 'codigo',
# registro[1] = 'nome',
# registro[2] = 'mesi_evm',
# registro[3] = 'anoi_evm',
# registro[4] = 'dura_evm',
# registro[5] = 'perc_evm',
# registro[6] = 'mesi_tur',
# registro[7] = 'anoi_tur',
# registro[8] = 'comentar',
# registro[9] = 'nume_tur',
# registro[10] = 'nume_cnj']
if tamanho > 0:
registro = df.iloc[0].values
#
# Trata Enchimento de Volume Morto
#
if not np.isnan(registro[2]):
dur_vm = int(registro[4])
mesinicial = int(registro[2])
anoinicial = int(registro[3])
volume = self._vol_min['valor'][-1] * float(registro[5]) / 100
volume = (self._vol_min['valor'][-1] - volume) / dur_vm
vol_frac = volume
for iano in range(anoinicial - dger.ano_ini['valor'], dger.num_anos['valor']):
for imes in range(mesinicial - 1, 12):
if dur_vm > 0:
self._status_vol_morto['valor'][-1][iano][imes] = 1
self._vol_morto_tempo['valor'][-1][iano][imes] += volume
volume += vol_frac
dur_vm -= 1
else:
self._status_vol_morto['valor'][-1][iano][imes] = 2
self._vol_morto_tempo['valor'][-1][iano][imes] = 0.
mesinicial = 1
else:
self._status_vol_morto['valor'][-1] = 2 * np.ones((dger.num_anos['valor'], 12), 'i')
for linha in range(tamanho):
registro = df.iloc[linha].values
if not np.isnan(registro[6]):
#
# Preenche evolução temporal do (1) Número de Unidades; (2) Engolimento; (3) Potência
#
mes_ent = int(registro[6])
ano_ent = int(registro[7])
pot_ent = float(registro[8])
unidade = int(registro[9])
conjunto = int(registro[10])
if mes_ent > 0:
mesinicial = mes_ent
self._maq_por_conj['valor'][-1][conjunto - 1] = unidade
self._pef_por_conj['valor'][-1][conjunto - 1] = pot_ent
self._calc_pot_efetiva()
self._calc_engol_maximo()
for iano in range(ano_ent - dger.ano_ini['valor'], dger.num_anos['valor']):
for imes in range(mesinicial - 1, 12):
self._unidades_tempo['valor'][-1][iano][imes] += 1
self._engol_tempo['valor'][-1][iano][imes] = self._engolimento['valor'][-1]
self._potencia_tempo['valor'][-1][iano][imes] = self._pot_efet['valor'][-1]
mesinicial = 1
#
# Acerta Status da Motorização
#
for iano in range(dger.num_anos['valor']):
for imes in range(12):
if self._unidades_tempo['valor'][-1][iano][imes] >= self._unid_base['valor'][-1]:
self._status_motoriz['valor'][-1][iano][imes] = 2
elif self._unidades_tempo['valor'][-1][iano][imes] > 0:
self._status_motoriz['valor'][-1][iano][imes] = 1
else:
if self._status_motoriz['valor'][-1][iano][imes] == 2:
self._status_motoriz['valor'][-1][iano][imes] = 1
else:
self._status_motoriz['valor'][-1][iano][imes] = 0
##########################################################################################################
# Plota Gráficos Diversos
##########################################################################################################
def plota_volume(self, uhe):
nanos = len(uhe['vol_mint'])
fig = plt.figure()
ax = plt.subplot(111)
x_axis = np.arange(1,nanos*12+1)
ax.plot(x_axis,uhe['vol_mint'].reshape(nanos*12),'g-.',lw=2, label = 'Vol.Min.Operat.')
ax.plot(x_axis,uhe['vol_maxt'].reshape(nanos*12),'g-.',lw=2, label = 'Vol.Max.Operat.')
ax.plot(x_axis,uhe['vol_max']*np.ones(nanos*12),'b-',lw=3, label = 'Vol.Minimo Real')
ax.plot(x_axis,uhe['vol_min']*np.ones(nanos*12),'b-',lw=3, label = 'Vol.Maximo Real')
ax.plot(x_axis,uhe['vol_minp'].reshape(nanos*12),'b-.',lw=2, label = 'Vol.Min.com Pen.')
plt.fill_between(x_axis,uhe['vol_mint'].reshape(nanos*12), uhe['vol_maxt'].reshape(nanos*12), facecolor='g', alpha=0.1)
titulo = 'Evolucao dos Volumes da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Volume em hm^3', fontsize=16)
box = ax.get_position()
ax.set_position([ box.x0, box.y0, box.width*0.7, box.height] )
ax.legend(loc='center left', shadow=True, fontsize=12, bbox_to_anchor=(1, 0.5))
plt.show()
def plota_vaz_min(self, uhe):
nanos = len(uhe['vaz_mint'])
fig = plt.figure()
ax = plt.subplot(111)
x_axis = np.arange(1,nanos*12+1)
ax.plot(x_axis,uhe['vaz_mint'].reshape(nanos*12),'g-.',lw=2, label='Vaz.Min.Operat.')
ax.plot(x_axis,uhe['vaz_min']*np.ones(nanos*12),'b-',lw=3, label='Vaz.Min.Cadastro')
titulo = 'Evolucao da Vazao Minima da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Vazao Minima em m^3', fontsize=16)
box = ax.get_position()
ax.set_position([ box.x0, box.y0, box.width*0.7, box.height] )
ax.legend(loc='center left', shadow=True, fontsize=12, bbox_to_anchor=(1, 0.5))
plt.show()
def plota_volmorto(self, uhe):
if uhe['status'] == 'EX':
print('Grafico de Volume Morto nao impresso, pois ', uhe['nome'], 'e uma usina existente')
return
nanos = len(uhe['vol_morto_tempo'])
nmeses = np.count_nonzero(uhe['vol_morto_tempo'])
legenda = str(nmeses) + ' Meses'
ax = plt.subplot(111)
x_axis = np.arange(1,nanos*12+1)
p1 = ax.plot(x_axis,uhe['vol_morto_tempo'].reshape(nanos*12),'g-.',lw=2, label = legenda )
titulo = 'Enchimento do Volume Morto da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Volume Morto em hm^3', fontsize=16)
plt.legend(fontsize=12)
np.count_nonzero(uhe['vol_morto_tempo'])
plt.show()
def plota_potencia(self, uhe):
nanos = len(uhe['potencia_tempo'])
ax = plt.subplot(111)
x_axis = np.arange(1, nanos * 12 + 1)
p1 = ax.plot(x_axis, uhe['potencia_tempo'].reshape(nanos * 12), 'g-.', lw=2)
titulo = 'Evolucao da Potencia Efetiva da Usina \n' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes de Estudo', fontsize=16)
plt.ylabel('Potencia Efetiva em MW', fontsize=16)
plt.show()
def plot_vaz(self, uhe):
"""
Plota as todas as series historicas anuais da usina cujo dicionario de dados eh fornecia na entrada.
Em ciano estao as diversas series anuais.
Em azul escuro esta a ultima serie anual.
Em vermelho continuo esta a media mensal.
Em vermelho pontilhado esta a media menos ou mais o desvio padrao.
:param uhe: Dicionario de dados contendo informacoes de uma usina hidreletrica
"""
vaz_nat = uhe['vazoes']
x_axis = np.arange(1, 13)
plt.plot(x_axis, vaz_nat.transpose(), 'c-')
media = np.mean(vaz_nat, axis=0)
plt.plot(x_axis, media, 'r-', lw=3)
desvio = np.nanstd(vaz_nat, axis=0)
plt.plot(x_axis, media + desvio, 'r-.', lw=2)
plt.plot(x_axis, media - desvio, 'r-.', lw=2)
ultimo = len(vaz_nat) - 1
plt.plot(x_axis, vaz_nat[:][ultimo], 'b-')
titulo = 'Historico de Vazoes da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.xlabel('Mes do Ano', fontsize=16)
plt.ylabel('Vazao', fontsize=16)
plt.show()
return
# Plota Polinomio Cota-Volume
def plot_pcv(self, uhe):
"""
Plota polinimo Cota-Volume da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
if uhe["vol_min"] == 0:
return
a = uhe['pol_cota_vol'][0]
b = uhe['pol_cota_vol'][1]
c = uhe['pol_cota_vol'][2]
d = uhe['pol_cota_vol'][3]
e = uhe['pol_cota_vol'][4]
if (uhe["vol_min"] == uhe["vol_max"]):
volumes = np.linspace(uhe["vol_min"] - 1,uhe["vol_max"] + 1, 100)
cota = a + b*uhe["vol_min"] + c*uhe["vol_min"]**2 + d*uhe["vol_min"]**3 + e*uhe["vol_min"]**4
cota = cota*np.ones(100)
else:
volumes = np.linspace(uhe["vol_min"],uhe["vol_max"],100)
cota = a + b*volumes + c*volumes**2 + d*volumes**3 + e*volumes**4
cota.shape = volumes.shape
plt.plot(volumes, cota, 'b-', lw=3)
plt.xlabel('Volume do Reservatorio (hm^3)', fontsize=16)
titulo = 'Polinomio Cota-Volume da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.ylabel('Cota em Metros', fontsize=16)
plt.xlim(volumes[0], volumes[99])
if ( cota[0] == cota[99]):
plt.ylim(cota[0]-1, cota[99]+1)
else:
plt.ylim(cota[0], cota[99])
plt.show()
# Plota Polinomio Cota-Area
def plot_pca(self, uhe):
"""
Plota polinimo cota-area da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
if uhe['vol_min'] == 0:
return
if (uhe['cota_min'] == uhe['cota_max']):
cotas = np.linspace(uhe['cota_min'] - 1,uhe['cota_max'] + 1, 100)
else:
cotas = np.linspace(uhe['cota_min'],uhe['cota_max'],100)
a = uhe['pol_cota_area'][0]
b = uhe['pol_cota_area'][1]
c = uhe['pol_cota_area'][2]
d = uhe['pol_cota_area'][3]
e = uhe['pol_cota_area'][4]
areas = a + b*cotas + c*cotas**2 + d*cotas**3 + e*cotas**4
areas.shape = cotas.shape
plt.plot(cotas, areas, 'b-', lw=3)
plt.xlabel('Cota do Reservatorio (em metros)', fontsize=16)
titulo = 'Polinomio Cota-Area da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.ylabel('Area Superficia em km^2', fontsize=16)
plt.xlim(cotas[0], cotas[99])
if ( areas[0] == areas[99]):
plt.ylim(areas[0]-1, areas[99]+1)
else:
plt.ylim(areas[0], areas[99])
plt.show()
# Plota Produtibilidades Constantes da Usina
def plota_produtibs(self, uhe, iano, imes):
"""
Plota polinimo cota-area da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
x_axis = np.arange(1,7)
y_axis = [ uhe['ro_equiv'][iano][imes], uhe['ro_equiv65'][iano][imes], uhe['ro_min'][iano][imes],
uhe['ro_50'][iano][imes], uhe['ro_65'][iano][imes], uhe['ro_max'][iano][imes] ]
fig, ax = plt.subplots()
a, b, c, d, e, f = plt.bar(x_axis, y_axis)
a.set_facecolor('r')
b.set_facecolor('g')
c.set_facecolor('b')
d.set_facecolor('y')
e.set_facecolor('m')
f.set_facecolor('c')
ax.set_xticks(x_axis)
ax.set_xticklabels(['Equiv', 'Equiv65', 'Min', '50%', '65%', 'Max'])
titulo = 'Produtibilidades da Usina ' + uhe['nome'] + ' - Ano: ' + str(iano+1) + ' - Mês:' + str(imes+1)
plt.title(titulo, fontsize=16)
plt.xlabel('Tipo de Produtibilidade', fontsize=16)
plt.ylabel('Produtibilidade', fontsize=16)
plt.show()
# Plota Variação de Produtibilidade
def plot_var_prod(self, uhe):
"""
Plota variacao da produtibilidade da usina hidreletrica especificada na entrada
:param uhe: Dicionario de dados contendo informacoes da usina hidreletrica
"""
if uhe['vol_min'] == 0:
return
a = uhe['pol_cota_vol'][0]
b = uhe['pol_cota_vol'][1]
c = uhe['pol_cota_vol'][2]
d = uhe['pol_cota_vol'][3]
e = uhe['pol_cota_vol'][4]
if (uhe["vol_min"] == uhe["vol_max"]):
volumes = np.linspace(uhe["vol_min"] - 1,uhe["vol_max"] + 1, 100)
cotamont = a + b*uhe["vol_min"] + c*uhe["vol_min"]**2 + d*uhe["vol_min"]**3 + e*uhe["vol_min"]**4
cotamont = cotamont*np.ones(100)
else:
volumes = np.linspace(uhe["vol_min"],uhe["vol_max"],100)
cotamont = a + b*volumes + c*volumes**2 + d*volumes**3 + e*volumes**4
cotamont.shape = volumes.shape
qdef = np.linspace(uhe['vaz_min'], 2*uhe['engolimento'], 100)
a = uhe['pol_vaz_niv_jus'][0]
b = uhe['pol_vaz_niv_jus'][1]
c = uhe['pol_vaz_niv_jus'][2]
d = uhe['pol_vaz_niv_jus'][3]
e = uhe['pol_vaz_niv_jus'][4]
cotajus = a + b*qdef + c*qdef**2 + d*qdef**3 + e*qdef**4
cotajus.shape = qdef.shape
xGrid, yGrid = np.meshgrid(cotamont, cotajus)
z = uhe['prod_esp'] * ( xGrid - yGrid )
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(qdef, volumes,z, rcount=100, ccount = 100, cmap=plt.cm.coolwarm,
linewidth=0, antialiased=False)
plt.xlabel('Vazão Defluente em m^3/s', fontsize=12)
titulo = 'Produtibilidade da Usina ' + uhe['nome']
plt.title(titulo, fontsize=16)
plt.ylabel('Volume Armazenado em hm^3', fontsize=12)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
# Plota Usinas Não Existentes e Existentes em Expansao
def plota_expansao(self):
# Conta quantas usinas estao
cont = 0
nomes = []
for iusi, status in enumerate(self._status['valor']):
if status == 'EE' or status == 'NE':
cont += 1
nomes.append(self._nome['valor'][iusi])
motorizada = np.zeros(cont)
vazia = np.zeros(cont)
enchendo = np.zeros(cont)
submotorizada = np.zeros(cont)
ind = np.arange(cont)
cont = 0
nanos = len(self._status_vol_morto['valor'][0])
for iusi, status in enumerate(self._status['valor']):
if status == 'EE' or status == 'NE':
# Meses em que a usina esta motorizada
motorizada[cont] = nanos * 12 - np.count_nonzero(self._status_motoriz['valor'][iusi] - 2)
# Meses que a usina ainda nao iniciou o enchimento do volume morto
vazia[cont] = nanos * 12 - np.count_nonzero(self._status_vol_morto['valor'][iusi])
# Meses que a usina encontra-se enchendo o volume morto
enchendo[cont] = nanos * 12 - np.count_nonzero(self._status_vol_morto['valor'][iusi] - 1)
# Meses que a usina encontra-se motorizando
submotorizada[cont] = nanos * 12 - np.count_nonzero(self._status_motoriz['valor'][iusi] - 1)
cont += 1
width = 0.35 # the width of the bars: can also be len(x) sequence
ax = plt.axes()
p1 = plt.barh(ind, vazia, width, color='w')
p2 = plt.barh(ind, enchendo, width, color='lime', left=vazia)
p3 = plt.barh(ind, submotorizada, width, color='sienna', left=vazia + enchendo)
p4 = plt.barh(ind, motorizada, width, color='black', left=vazia + enchendo + submotorizada)
plt.ylabel('Usinas', fontsize=16)
plt.title('Usinas Hidreletricas em Expansao', fontsize=16)
plt.yticks(ind, nomes, fontsize=12)
plt.xticks(np.arange(0, nanos * 12 + 2, 12))
# plt.yticks(np.arange(0, 81, 10))
plt.legend((p1[0], p2[0], p3[0], p4[0]), ('Nao Entrou', 'Enchendo Vol. Morto', 'Submotorizada', 'Motorizada'),
fontsize=12)
plt.xlabel('Meses do Estudo', fontsize=16)
ax.xaxis.grid()
plt.show()
def parp(self, uhe, ord_max):
"""
Implementa o método para o calculo dos coeficentes do modelo PAR(p).
:param uhe: dicionario de dados com informacoes da usina hidreletrica,
ord_max: ord_max do modelo PAR(p)
:returns ordem: Ordem do modelo Ar para cada mes,
coef_parp: Coeficientes do modelo AR para cada mes,
fac: Funcao de Auto-Correlacao,
facp: Funcao de Auto-Correlacao Parcial,
residuos: Matriz de residuos
"""
vazoes = uhe['vazoes']
nanos = len(vazoes) # A serie historica do ultimo ano geralmente nao vem completa (despreze-a)
media = np.mean(vazoes[1:(nanos-1)], 0) # A primeira serie historica eh utilizada como tendencia (despreze-a)
desvio = np.std(vazoes[1:(nanos-1)], 0) # A primeira serie historica eh utilizada como tendencia (despreze-a)
# Calcula vazao normalizada (nao precisa)
#vaznorm = np.zeros((nanos,12),'d')
#for iano in range(nanos):
# for imes in range(12):
# vaznorm[iano][imes] = (self.Vazoes[iano][imes] - media[imes])/desvio[imes]
# Calcula funcao de auto-correlacao (uma para cada mes)
fac = np.zeros( (12, ord_max+1), 'd')
for ilag in range(ord_max+1):
for imes in range(12):
for iano in np.arange(1,nanos-1):
ano_ant = iano
mes_ant = imes - ilag
if mes_ant < 0:
ano_ant -= 1
mes_ant += 12
fac[imes][ilag] += (vazoes[iano][imes] - media[imes]) * (vazoes[ano_ant][mes_ant] - media[mes_ant])
fac[imes][ilag] /= (nanos-2)
fac[imes][ilag] /= (desvio[imes]*desvio[mes_ant])
# Calcula funcao de auto-correlacao parcial (uma para cada mes)
facp = np.zeros((12, ord_max+1), 'd')
for ilag in np.arange(1,ord_max+1):
for imes in range(12):
A = np.eye(ilag)
B = np.zeros(ilag)
# Preenche matriz triangular superior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(ilin+1,len(A)): Testar depois
if icol > ilin:
mes = imes - ilin - 1
if mes < 0:
mes = mes + 12
A[ilin][icol] = fac[mes][icol-ilin]
B[ilin] = fac[imes][ilin+1]
# Preenche matriz triangular inferior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(0, ilin): Testar depois
if icol < ilin:
A[ilin][icol] = A[icol][ilin]
phi = np.linalg.solve(A,B)
facp[imes][ilag] = phi[ len(phi)-1 ]
# Identificacao da ordem
IC = 1.96/np.sqrt(nanos-2)
ordem = np.zeros(12, 'i')
for imes in range(12):
ordem[imes] = 0
for ilag in range(ord_max+1):
if facp[imes][ilag] > IC or facp[imes][ilag] < -IC:
ordem[imes] = ilag
# Calculo dos coeficientes
coef_parp = np.zeros( (12,ord_max), 'd')
for imes in range(12):
ilag = ordem[imes]
A = np.eye(ilag)
B = np.zeros(ilag)
# Preenche matriz triangular superior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(ilin+1,len(A)): Testar depois
if icol > ilin:
mes = imes - ilin - 1
if mes < 0:
mes = mes + 12
A[ilin][icol] = fac[mes][icol-ilin]
B[ilin] = fac[imes][ilin+1]
# Preenche matriz triangular inferior
for ilin in range(len(A)):
for icol in range( len(A) ): # TODO: Aqui poderia ser np.arange(0, ilin): Testar depois
if icol < ilin:
A[ilin][icol] = A[icol][ilin]
phi = | np.linalg.solve(A,B) | numpy.linalg.solve |
from datetime import datetime
from math import sqrt, exp
import numpy as np
class LevenshteinCalculator(object):
def calculate(self, source, target):
if len(source) < len(target):
return self.calculate(target, source)
if len(target) == 0:
return len(source)
source = np.array(tuple(source))
target = np.array(tuple(target))
previous_row = np.arange(target.size + 1)
for s in source:
current_row = previous_row + 1
current_row[1:] = np.minimum(current_row[1:], np.add(previous_row[:-1], target != s))
current_row[1:] = | np.minimum(current_row[1:], current_row[0:-1] + 1) | numpy.minimum |
import torch
import numpy as np
import torch.nn as torch_nn
from torch.nn import Parameter
import torch.nn.functional as F
from collections import OrderedDict
from scipy.spatial.transform import Rotation as R
import platform
# torch.set_default_tensor_type('torch.cuda.FloatTensor')
import jittor as jt
from jittor import Module
from jittor import nn
class Sine(Module):
def __init(self, w0=30.):
super().__init__()
self.w0 = w0
def forward(self, input):
return jt.sin(self.w0 * input)
act_dict = {'relu': nn.ReLU, 'sigmoid': nn.Sigmoid, 'elu': nn.ELU, 'tanh': nn.Tanh, 'sine': Sine}
class Embedder(Module):
def __init__(self, input_dim, max_freq_log2, N_freqs,
log_sampling=True, include_input=True,
periodic_fns=(jt.sin, jt.cos)):
'''
:param input_dim: dimension of input to be embedded
:param max_freq_log2: log2 of max freq; min freq is 1 by default
:param N_freqs: number of frequency bands
:param log_sampling: if True, frequency bands are linerly sampled in log-space
:param include_input: if True, raw input is included in the embedding
:param periodic_fns: periodic functions used to embed input
'''
super().__init__()
self.input_dim = input_dim
self.include_input = include_input
self.periodic_fns = periodic_fns
self.out_dim = 0
if self.include_input:
self.out_dim += self.input_dim
self.out_dim += self.input_dim * N_freqs * len(self.periodic_fns)
if log_sampling:
self.freq_bands = 2. ** np.linspace(0., max_freq_log2, N_freqs)
else:
self.freq_bands = np.linspace(2. ** 0., 2. ** max_freq_log2, N_freqs)
self.freq_bands = self.freq_bands.tolist()
def execute(self, x):
'''
:param x: tensor of shape [..., self.input_dim]
:return: tensor of shape [..., self.out_dim]
'''
assert (x.shape[-1] == self.input_dim)
out = []
if self.include_input:
out.append(x)
for i in range(len(self.freq_bands)):
freq = self.freq_bands[i]
for p_fn in self.periodic_fns:
out.append(p_fn(x * freq))
out = jt.concat(out, dim=-1)
assert (out.shape[-1] == self.out_dim)
return out
class MLP(Module):
def __init__(self, D=8, W=256, input_ch=3, input_ch_viewdirs=3, skips=[4], act_func=nn.ReLU, use_viewdir=True,
sigma_mul=0.):
'''
:param D: network depth
:param W: network width
:param input_ch: input channels for encodings of (x, y, z)
:param input_ch_viewdirs: input channels for encodings of view directions
:param skips: skip connection in network
'''
super().__init__()
self.input_ch = input_ch
self.input_ch_viewdirs = input_ch_viewdirs
self.skips = skips
self.use_viewdir = use_viewdir
self.sigma_mul = sigma_mul
# base
self.base_layers = []
dim = self.input_ch
for i in range(D):
self.base_layers.append(nn.Linear(in_features=dim, out_features=W, bias=True))
dim = W
if i in self.skips and i != (D - 1): # skip connection after i^th layer
dim += input_ch
self.base_layers = nn.ModuleList(self.base_layers)
self.act = act_func()
# sigma
sigma_layer = nn.Linear(dim, 1) # sigma must be positive
self.sigma_layer = sigma_layer
# remap
base_remap_layer = nn.Linear(dim, 256)
self.base_remap_layer = base_remap_layer
# rgb
self.rgb_layers = []
dim = 256 + self.input_ch_viewdirs if self.use_viewdir else 256
self.rgb_layers.append(nn.Linear(dim, W // 2))
self.rgb_layers.append(nn.Linear(W // 2, 3))
self.rgb_layers = nn.ModuleList(self.rgb_layers)
self.layers = [*self.base_layers, self.sigma_layer, self.base_remap_layer, *self.rgb_layers]
def execute(self, pts, dirs):
'''
:param input: [..., input_ch+input_ch_viewdirs]
:return [..., 4]
'''
base = self.base_layers[0](pts)
for i in range(len(self.base_layers) - 1):
if i in self.skips:
base = torch.cat((pts, base), dim=-1)
base = self.act(self.base_layers[i + 1](base))
sigma = self.sigma_layer(base)
sigma = sigma + nn.relu(sigma) * self.sigma_mul
base_remap = self.act(self.base_remap_layer(base))
if self.use_viewdir:
rgb_fea = self.act(self.rgb_layers[0](torch.cat((base_remap, dirs), dim=-1)))
else:
rgb_fea = self.act(self.rgb_layers[0](base_remap))
rgb = jt.sigmoid(self.rgb_layers[1](rgb_fea))
ret = OrderedDict([('rgb', rgb),
('sigma', sigma.squeeze(-1))])
return ret
def get_grads(self, only_last=False):
if only_last:
layers = [self.layers[-1], self.layers[-4]]
else:
layers = self.layers
grads = None
for layer in layers:
grad = layer.get_grads()
grads = grad if grads is None else np.concatenate([grads, grad], axis=-1)
return grads
class MLP_style(Module):
def __init__(self, D=8, W=256, input_ch=3, input_ch_viewdirs=3, skips=[4], act_func=nn.ReLU(), use_viewdir=True,
sigma_mul=0., enable_style=False):
'''
:param D: network depth
:param W: network width
:param input_ch: input channels for encodings of (x, y, z)
:param input_ch_viewdirs: input channels for encodings of view directions
:param skips: skip connection in network
'''
super().__init__()
self.input_ch = input_ch
self.input_ch_viewdirs = input_ch_viewdirs
self.skips = skips
self.use_viewdir = use_viewdir
self.sigma_mul = sigma_mul
self.enable_style = enable_style
self.act = act_func()
# base
self.base_layers = []
dim = self.input_ch
for i in range(D):
self.base_layers.append(nn.Linear(in_features=dim, out_features=W))
dim = W
if i in self.skips and i != (D - 1): # skip connection after i^th layer
dim += input_ch
self.base_layers = nn.ModuleList(self.base_layers)
# sigma
sigma_layer = nn.Linear(dim, 1) # sigma must be positive
self.sigma_layer = sigma_layer
# remap
base_remap_layer = nn.Linear(dim, 256)
self.base_remap_layer = base_remap_layer
# rgb
self.rgb_layers = []
dim = 256 + self.input_ch_viewdirs if self.use_viewdir else 256
self.rgb_layers.append(nn.Linear(dim, W // 2))
self.rgb_layers.append(nn.Linear(W // 2, 3))
self.rgb_layers = nn.ModuleList(self.rgb_layers)
self.layers = [*self.base_layers, self.sigma_layer, self.base_remap_layer, *self.rgb_layers]
def execute(self, **kwargs):
pts, dirs = kwargs['pts'], kwargs['dirs']
base = self.act(self.base_layers[0](pts))
for i in range(len(self.base_layers) - 1):
if i in self.skips:
base = jt.concat((pts, base), dim=-1)
base = self.act(self.base_layers[i + 1](base))
sigma = self.sigma_layer(base)
sigma = sigma + jt.nn.relu(sigma) * self.sigma_mul
base_remap = self.act(self.base_remap_layer(base))
if self.use_viewdir:
rgb_fea = self.act(self.rgb_layers[0](jt.concat((base_remap, dirs), dim=-1)))
else:
rgb_fea = self.act(self.rgb_layers[0](base_remap))
rgb = jt.sigmoid(self.rgb_layers[1](rgb_fea))
if self.enable_style:
ret = OrderedDict([('rgb', rgb),
# ('base', base), # for base input style nerf
('pts', pts),
('sigma', sigma.squeeze(-1))])
return ret
else:
ret = OrderedDict([('rgb', rgb),
('sigma', sigma.squeeze(-1))])
return ret
class Nerf(Module):
def __init__(self, args, mode='coarse'):
super().__init__()
self.use_viewdir = args.use_viewdir
"""Activation Function"""
act_func = act_dict[args.act_type]
self.is_siren = (args.act_type == 'sine')
"""Embedding"""
if not self.is_siren:
self.embedder_coor = Embedder(input_dim=3, max_freq_log2=args.embed_freq_coor - 1,
N_freqs=args.embed_freq_coor)
self.embedder_dir = Embedder(input_dim=3, max_freq_log2=args.embed_freq_dir - 1,
N_freqs=args.embed_freq_dir)
input_ch, input_ch_viewdirs = self.embedder_coor.out_dim, self.embedder_dir.out_dim
skips = [4]
self.sigma_mul = 0.
else:
input_ch, input_ch_viewdirs = 3, 3
skips = []
self.sigma_mul = args.siren_sigma_mul
"""Neural Network"""
if mode == 'coarse':
net_depth, net_width = args.netdepth, args.netwidth
else:
net_depth, net_width = args.netdepth_fine, args.netwidth_fine
self.net = MLP(D=net_depth, W=net_width, input_ch=input_ch, input_ch_viewdirs=input_ch_viewdirs,
skips=skips, use_viewdir=self.use_viewdir, act_func=act_func, sigma_mul=self.sigma_mul)
def execute(self, pts, dirs):
if not self.is_siren:
pts = self.embedder_coor(pts)
dirs = self.embedder_dir(dirs)
ret = self.net(pts, dirs)
return ret
class StyleMLP(Module):
def __init__(self, args):
super().__init__()
self.D = args.style_D
self.input_ch = args.embed_freq_coor * 3 * 2 + 3 + args.vae_latent
self.layers = []
self.skips = [4]
dim = self.input_ch
for i in range(self.D-1):
if i in self.skips:
dim += self.input_ch
self.layers.append(nn.Linear(dim, args.netwidth))
dim = args.netwidth
self.layers.append(nn.Linear(args.netwidth, 3))
self.layers = nn.ModuleList(self.layers)
def execute(self, **kwargs):
x = kwargs['x']
h = x
for i in range(len(self.layers)-1):
if i in self.skips:
h = jt.concat([h, x], dim=-1)
h = self.layers[i](h)
h = nn.relu(h)
h = self.layers[-1](h)
h = jt.sigmoid(h)
return {'rgb': h}
class StyleMLP_Wild_multilayers(Module):
def __init__(self, args):
super().__init__()
self.D = args.style_D
self.input_ch = args.embed_freq_coor * 3 * 2 + 3 + args.vae_latent
self.layers = []
self.skips = [4]
dim = self.input_ch
for i in range(self.D-1):
if i in self.skips:
dim += (args.embed_freq_coor * 3 * 2 + 3)
self.layers.append(nn.Linear(dim, args.netwidth))
dim = args.netwidth + args.vae_latent
self.layers.append(nn.Linear(args.netwidth + args.vae_latent, 3))
self.layers = nn.ModuleList(self.layers)
def execute(self, **kwargs):
x = kwargs['x']
l = kwargs['latent']
h = x
for i in range(len(self.layers)-1):
h = jt.concat([h, l], dim=-1)
if i in self.skips:
h = jt.concat([h, x], dim=-1)
h = self.layers[i](h)
h = nn.relu(h)
h = jt.concat([h, l], dim=-1)
h = self.layers[-1](h)
h = jt.sigmoid(h)
return {'rgb': h}
class StyleNerf(Module):
def __init__(self, args, mode='coarse', enable_style=False):
super().__init__()
self.use_viewdir = args.use_viewdir
"""Activation Function"""
act_func = act_dict[args.act_type]
self.is_siren = (args.act_type == 'sine')
"""Embedding"""
if not self.is_siren:
self.embedder_coor = Embedder(input_dim=3, max_freq_log2=args.embed_freq_coor - 1,
N_freqs=args.embed_freq_coor)
self.embedder_dir = Embedder(input_dim=3, max_freq_log2=args.embed_freq_dir - 1,
N_freqs=args.embed_freq_dir)
input_ch, input_ch_viewdirs = self.embedder_coor.out_dim, self.embedder_dir.out_dim
skips = [4]
self.sigma_mul = 0.
else:
input_ch, input_ch_viewdirs = 3, 3
skips = []
self.sigma_mul = args.siren_sigma_mul
"""Neural Network"""
if mode == 'coarse':
net_depth, net_width = args.netdepth, args.netwidth
else:
net_depth, net_width = args.netdepth_fine, args.netwidth_fine
self.net = MLP_style(D=net_depth, W=net_width, input_ch=input_ch, input_ch_viewdirs=input_ch_viewdirs,
skips=skips, use_viewdir=self.use_viewdir, act_func=act_func, sigma_mul=self.sigma_mul, enable_style=enable_style)
self.enable_style = enable_style
def set_enable_style(self, enable_style=False):
self.enable_style = enable_style
self.net.enable_style = enable_style
def execute(self, **kwargs):
# mode consistency
self.net.enable_style = self.enable_style
if not self.is_siren:
kwargs['pts'] = self.embedder_coor(kwargs['pts'])
kwargs['dirs'] = self.embedder_dir(kwargs['dirs'])
ret = self.net(**kwargs)
ret['dirs'] = kwargs['dirs']
return ret
def vec2skew(v):
"""
:param v: (N, 3, ) torch tensor
:return: (N, 3, 3)
"""
zero = jt.zeros([v.shape[0], 1], dtype=jt.float32, device=v.device)
skew_v0 = jt.concat([zero, -v[:, 2:3], v[:, 1:2]], dim=-1) # (N, 3)
skew_v1 = jt.concat([v[:, 2:3], zero, -v[:, 0:1]], dim=-1)
skew_v2 = jt.concat([-v[:, 1:2], v[:, 0:1], zero], dim=-1)
skew_v = jt.stack([skew_v0, skew_v1, skew_v2], dim=-1) # (N, 3, 3)
return skew_v
def Exp(r):
"""so(3) vector to SO(3) matrix
:param r: (N, 3) axis-angle, torch tensor
:return: (N, 3, 3)
"""
skew_r = vec2skew(r) # (N, 3, 3)
norm_r = r.norm(dim=1, keepdim=True).unsqueeze(-1) + 1e-15 # (N, 1, 1)
eye = jt.init.eye(3).unsqueeze(0) # (1, 3, 3)
R = eye + (jt.sin(norm_r) / norm_r) * skew_r + ((1 - jt.cos(norm_r)) / norm_r ** 2) * jt.matmul(skew_r, skew_r)
return R
def make_c2w(r, t):
"""
:param r: (N, 3, ) axis-angle torch tensor
:param t: (N, 3, ) translation vector torch tensor
:return: (N, 4, 4)
"""
R = Exp(r) # (N, 3, 3)
c2w = jt.concat([R, t.unsqueeze(-1)], dim=-1) # (N, 3, 4)
c2w = jt.concat([c2w, jt.zeros_like(c2w[:, :1])], dim=1) # (N, 4, 4)
c2w[:, 3, 3] = 1.
return c2w
def idx2img(idx, fea, pad=0):
batch_size, h, w, z = idx.shape
batch_size_p, point_num, dim = fea.shape
assert batch_size == batch_size_p, 'Batch Size Do Not Match'
idx_img = idx.reshape([batch_size, h*w*z, 1]).expand([batch_size, h*w*z, dim]).long()
idx_lst = point_num * torch.ones_like(idx_img)
idx_img = torch.where(idx_img >= 0, idx_img, idx_lst)
fea_pad = fea.reshape([1, batch_size*point_num, dim]).expand([batch_size, batch_size*point_num, dim])
fea_pad = torch.cat([fea_pad, pad * torch.ones([batch_size, 1, dim]).to(idx.device)], dim=1)
fea_img = torch.gather(fea_pad, 1, idx_img).reshape([batch_size, h, w, z, dim])
return fea_img
class Camera:
def __init__(self, projectionMatrix=None, cameraPose=None, device=torch.device("cpu")):
super().__init__()
self.device = device
self.tensor_list = ['projectionMatrix', 'cameraPose', 'w2c_matrix']
for attr in self.tensor_list:
setattr(self, attr, None)
self.set(projectionMatrix=projectionMatrix, cameraPose=cameraPose)
def set(self, **kwargs):
keys = kwargs.keys()
func_map = {'projectionMatrix': self.set_project, 'cameraPose': self.set_pose}
for name in keys:
try:
if name in func_map.keys():
func_map[name](kwargs[name])
else:
raise ValueError(name + f'is not in{keys}')
except ValueError as e:
print(repr(e))
def set_pose(self, cameraPose):
if cameraPose is None:
self.cameraPose = self.w2c_matrix = None
return
elif type(cameraPose) is np.ndarray:
cameraPose = torch.from_numpy(cameraPose)
self.cameraPose = cameraPose.float()
self.w2c_matrix = torch.inverse(self.cameraPose).float()
self.to(self.device)
def set_project(self, projectionMatrix):
if projectionMatrix is None:
self.projectionMatrix = None
return
elif type(projectionMatrix) is np.ndarray:
projectionMatrix = torch.from_numpy(projectionMatrix)
self.projectionMatrix = projectionMatrix.float()
self.to(self.device)
def to(self, device):
if type(device) is str:
device = torch.device(device)
self.device = device
for tensor in self.tensor_list:
if getattr(self, tensor) is not None:
setattr(self, tensor, getattr(self, tensor).to(self.device))
return self
def WorldtoCamera(self, coor_world):
coor_world = coor_world.clone()
if len(coor_world.shape) == 2:
coor_world = torch.cat([coor_world, torch.ones([coor_world.shape[0], 1]).to(self.device)], -1)
coor_camera = torch.einsum('bcw,nw->bnc', self.w2c_matrix, coor_world)
else:
coor_world = self.homogeneous(coor_world)
coor_camera = torch.einsum('bcw,bnw->bnc', self.w2c_matrix, coor_world)
return coor_camera
def CameratoWorld(self, coor_camera):
coor_camera = coor_camera.clone()
coor_camera = self.homogeneous(coor_camera)
coor_world = torch.einsum('bwc,bnc->bnw', self.cameraPose, coor_camera)[:, :, :3]
return coor_world
def WorldtoCVV(self, coor_world):
coor_camera = self.WorldtoCamera(coor_world)
coor_cvv = torch.einsum('vc,bnc->bnv', self.projectionMatrix, coor_camera)
coor_cvv = coor_cvv[..., :-1] / coor_cvv[..., -1:]
return coor_cvv
def homogeneous(self, coor3d, force=False):
if coor3d.shape[-1] == 3 or force:
coor3d = torch.cat([coor3d, torch.ones_like(coor3d[..., :1]).to(self.device)], -1)
return coor3d
def rasterize(self, coor_world, rgb, h=192, w=256, k=1.5, z=1):
from pytorch3d.structures import Pointclouds
from pytorch3d.renderer import compositing
from pytorch3d.renderer.points import rasterize_points
def PixeltoCvv(h, w, hid=0, wid=0):
cvv = torch.tensor([[[1., 0., 0.], [-1., 0., 0.], [0., 1., 0.]]]).float()
pts = Pointclouds(points=cvv, features=cvv)
idx, _, dist2 = rasterize_points(pts, [h, w], 1e10, 3)
a2, b2, c2 = (dist2.cpu().numpy())[0, hid, wid]
x2 = (a2 + b2) / 2 - 1
cosa = (x2 + 1 - a2) / (2 * x2**0.5)
sina_abs = (1 - cosa**2)**0.5
u = (x2 ** 0.5) * cosa
v = (x2 ** 0.5) * sina_abs
if | np.abs((u**2 + (v-1)**2)**0.5 - c2**0.5) | numpy.abs |
import unittest
import numpy as np
import collections
import pycomlink as pycml
class TestWetDryandRainErrorfunctions(unittest.TestCase):
def test_WetDryError_with_simple_arrays(self):
reference = np.array([True, False, True, True, False,
True, False, np.nan, np.nan, np.nan])
predicted = np.array([True, False, False, True, True,
True, True, True, False, np.nan])
wd_error = pycml.validation.stats.calc_wet_dry_performance_metrics(
reference,
predicted)
class_name = 'WetDryError_reference'
fields = 'false_wet_rate missed_wet_rate matthews_correlation ' \
'true_wet_rate true_dry_rate N_dry_reference N_wet_reference '\
'N_true_wet N_true_dry N_false_wet N_missed_wet ' \
'N_all_pairs N_nan_pairs N_nan_reference_only ' \
'N_nan_predicted_only'
WetDryError_reference = collections.namedtuple(class_name, fields)
ref = WetDryError_reference(0.66666667, 0.25, 0.09128709291752767, 0.75,
0.33333334, 3, 4, 3, 1, 2, 1, 10, 3, 3, 1)
np.testing.assert_array_almost_equal(
wd_error,
ref)
# the mcc should be zero, when predicted only contains false/zeros
def test_mcc_with_zero_wet_prediction(self):
reference = np.array([True, False, False])
predicted = | np.array([False, False, False]) | numpy.array |
import os, re
import tensorflow as tf
import pandas as pd
import tensorflow_hub as hub
from tqdm import tqdm
import numpy as np
from bert.tokenization import FullTokenizer
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import classification_report
"""# Tokenize
Next, tokenize our text to create `input_ids`, `input_masks`, and `segment_ids`
"""
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def create_tokenizer_from_hub_module(bert_path):
"""Get the vocab file and casing info from the Hub module."""
bert_layer = hub.KerasLayer(bert_path, trainable=False)
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
tokenizer = FullTokenizer(vocab_file, do_lower_case)
return tokenizer, bert_layer
def convert_single_example(tokenizer, example, max_seq_length=256):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
input_ids = [0] * max_seq_length
input_mask = [0] * max_seq_length
segment_ids = [0] * max_seq_length
label = 0
return input_ids, input_mask, segment_ids, label
tokens_a = tokenizer.tokenize(example.text_a)
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0 : (max_seq_length - 2)]
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return input_ids, input_mask, segment_ids, example.label
def convert_examples_to_features(tokenizer, examples, max_seq_length=256):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
input_ids, input_masks, segment_ids, labels = [], [], [], []
for example in tqdm(examples, desc="Converting examples to features"):
input_id, input_mask, segment_id, label = convert_single_example(
tokenizer, example, max_seq_length
)
input_ids.append(input_id)
input_masks.append(input_mask)
segment_ids.append(segment_id)
labels.append(label)
return (
np.array(input_ids),
np.array(input_masks),
np.array(segment_ids),
np.array(labels).reshape(-1, 1),
)
def convert_text_to_examples(texts, labels):
"""Create InputExamples"""
InputExamples = []
for text, label in zip(texts, labels):
InputExamples.append(
InputExample(guid=None, text_a=" ".join(text), text_b=None, label=label)
)
return InputExamples
# Build model
def build_model(bert_layer, max_seq_length, n_classes):
act = 'softmax'
loss = 'categorical_crossentropy'
in_id = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_ids")
in_mask = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="input_masks")
in_segment = tf.keras.layers.Input(shape=(max_seq_length,), dtype=tf.int32, name="segment_ids")
bert_inputs = [in_id, in_mask, in_segment]
pooled_output, sentence_output = bert_layer(bert_inputs)
flatten = tf.keras.layers.Flatten()(pooled_output)
dense_1 = tf.keras.layers.Dense(512, activation='relu')(flatten)
dropout_1 = tf.keras.layers.Dropout(0.5)(dense_1)
dense_2 = tf.keras.layers.Dense(256, activation='relu')(dropout_1)
dense_3 = tf.keras.layers.Dense(128, activation='relu')(dense_2)
dropout_2 = tf.keras.layers.Dropout(0.4)(dense_3)
dense_4 = tf.keras.layers.Dense(64, activation='relu')(dropout_2)
pred = tf.keras.layers.Dense(n_classes, activation=act)(dense_4)
model = tf.keras.models.Model(inputs=bert_inputs, outputs=pred)
adam = Adam(lr=0.0003)
model.compile(loss=loss, optimizer=adam, metrics=['accuracy'])
model.summary()
return model
def load_dataset(bert_path, max_seq_length, data_path, text_col, label_col, split=[0.80, 0.10, 0.10]):
df = pd.read_csv(data_path)
df = df.sample(frac=1).reset_index(drop=True)
text = df[text_col].tolist()
texts = [' '.join(t.split()[0:max_seq_length]) for t in text]
texts = | np.array(texts, dtype=object) | numpy.array |
# Created by <NAME>
# Date: 16/03/2020
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
class RandomWalkEnv(gym.Env):
"""
Random walk environment. Used to test trajectory evolution algorithms. Each step is decided by the action given
"""
def __init__(self, seed=None, max_steps=100):
""" Constructor
:param seed: the random seed for the environment
:param max_steps: the maximum number of steps the episode lasts
:return:
"""
self.pose = np.zeros(2)
self.t = 0
self.max_steps = max_steps
self.observation_space = spaces.Box(low=-np.ones(2), high=np.ones(2), dtype=np.float32)
self.action_space = spaces.Box(low=-np.ones(2), high= | np.ones(2) | numpy.ones |
import numpy as np
import pyautogui as pg
from scipy.sparse import csr_matrix
from scipy.signal import convolve2d
pg.PAUSE = 0
pg.FAILSAFE = True
_width, _height = pg.size()
## put hero in the center of the camera
#def center_hero():
# tmp = pg.PAUSE
# pg.PAUSE = 0
# for i in range(570, 820, 60):
# pg.click(x=i, y=20, button='left')
# for i in range(1095, 1345, 60):
# pg.click(x=i, y=20, button='left')
# pg.click(x=880, y=20, button='right')
# ## left click the picture of the hero in the UI to put the hero
# ## in the center of the camera
# HERO_PICTURE = (634, 1002)
# pg.PAUSE = 0.05
# pg.click(x=HERO_PICTURE[0], y=HERO_PICTURE[1], button='left')
# pg.PAUSE = tmp
def center_hero():
pg.doubleClick(573, 22)
## Dota 2 environment
## features, the current state and environmental parameters for learning
class DotaEnv:
TIME_LIMIT = 600
## time interval between two actions by pyautogui
## set true to raise an exception at (0, 0)
over_time = None # time in the game
def __init__(self):
center_hero()
self.views = [np.array(pg.screenshot())]
tmp = pg.PAUSE
pg.PAUSE = 0.05
self.views.append(np.array(pg.screenshot()))
pg.PAUSE = tmp
self.UI = DotaUI(self.views[-1])
self.reward = 0
self.over_time = self.UI.check_time()
self.hp = self.UI.get_hp()
self.gold = self.UI.get_gold()
self.lvl = self.UI.get_lvl()
self.ability = self.UI.unlock_ability()
## update once the bot makes an action
def update(self):
## screenshot corresponds to the action by the bot
self.update_views()
self.UI.update(self.views[-1])
self.update_reward()
self.over_time = self.UI.check_time()
def update_views(self):
center_hero()
self.views = [np.array(pg.screenshot())]
tmp = pg.PAUSE
pg.PAUSE = 0.1
self.views.append(np.array(pg.screenshot()))
pg.PAUSE = tmp
def update_reward(self):
UI = self.UI
hp = UI.get_hp()
lvl = UI.get_lvl()
gold = UI.get_gold()
ability = UI.unlock_ability()
delta_hp = hp - self.hp
delta_lvl = lvl - self.lvl
delta_gold = gold - self.gold
delta_ability = ability - self.ability
if delta_gold < 20:
delta_gold = 0
## only considering losing hp
if delta_hp > 0:
delta_hp = 0
self.reward = delta_gold * 2 + delta_lvl * 100 + delta_hp
self.hp = hp
self.lvl = lvl
self.gold = gold
self.ability = ability
class DotaBot:
MEMORY_LIMIT = 1000
MEMORY_RETRIEVAL = 6
def __init__(self):
self.env = DotaEnv()
self.policy = BotPolicy(self)
self.memory = []
self.center_x = _width / 2
self.center_y = _height / 2
## interpret the commands and execute them
def onestep(self):
## generate the commands based on the current state
views = self.env.views
policy = self.policy
X = policy.get_state(views[-1], views[-2], self.policy.scale)
p, meta = policy.forward(X)
direction = policy.execute(p)
if len(self.memory) >= self.MEMORY_LIMIT:
## randomly throw away old record
i = np.random.randint(len(self.memory) - self.MEMORY_RETRIEVAL)
self.memory.pop(i)
self.memory.append([p.copy(), meta.copy(), direction.copy()])
def get_parameters(self):
return self.policy.paras
def set_parameters(self, parameters):
paras = self.get_parameters()
for k in parameters:
paras[k] = parameters[k]
def get_UI(self):
return self.env.UI
class BotPolicy:
BLACKPIXEL_PERCENT = 0.95
LEFT_PERCENT = 0.1
NUM_ACTIONS = 9
## add random walk to avoid local minima
RANDOM_PROB = 0.005
RANDOM_DIST = 450
RANDOM_PAUSE = 3
def __init__(self, bot):
self.bot = bot
self.scale = 10 # scaling the screenshot to reduce the dimension
self.paras = {}
self.paras['w_fc1'] = np.random.normal(loc=0, scale=0.05, \
size=[_width // self.scale * _height // self.scale * 2, 100])
## output eight direction
self.paras['w_fc2'] = np.random.normal(loc=0, scale=0.05, \
size=[100, self.NUM_ACTIONS])
## the maximum score at the end of the game in the history
## the formula is gold + 100 * lvl
self.paras['max_score'] = 1040
## TODO: tune the parameters
self.battle_pause = 0.3
self.battle_L = 100
self.walk_pause = 1.3
self.walk_L = 300
self.learning_rate = 0.00001
self.batch_size = 50
## the baseline score assuming that the bot did nothing
self.standard_score = 1140
## return the location of the click for a given state
def forward(self, X):
## fully connected layer
w_fc1 = self.paras['w_fc1']
X_flatten = X.flatten(order='F')
X_flatten = np.matrix(X_flatten)
fc1 = X_flatten.dot(w_fc1)
## relu
fc1[fc1 < 0] = 0
## second fully connect layer
w_fc2 = self.paras['w_fc2']
fc2 = fc1.dot(w_fc2)
## stable softmax
fc2 -= np.max(fc2)
p = np.exp(fc2)
p = p / np.sum(p)
## store results for backpropogation
meta = [X, fc1]
return p, meta
## return the gradient of parameters
def backward(self, p, meta, direction):
reward = self.bot.env.reward
X, fc1 = meta
X_flatten = X.flatten(order='F')
X_flatten = np.matrix(X_flatten)
i = direction.argmax()
dp = np.zeros_like(p)
for j in range(len(dp)):
if j == i:
dp[0, j] = -(1 - p[0, i])
else:
dp[0, j] = p[0, j]
dw_fc2 = fc1.T.dot(dp)
w_fc2 = self.paras["w_fc2"]
dx_fc2 = dp.dot(w_fc2.T)
## relu
dx_fc2[dx_fc2 < 0] = 0
## the first layer
dw_fc1 = X_flatten.T.dot(dx_fc2)
return (dw_fc1, dw_fc2)
def local_optimizer(self):
reward = self.bot.env.reward
if reward != 0:
print("reward is ", reward)
dw_fc1 = np.zeros_like(self.paras['w_fc1'])
dw_fc2 = np.zeros_like(self.paras['w_fc2'])
l = min(len(self.bot.memory), self.bot.MEMORY_RETRIEVAL)
for i in range(-1, -(l+1), -1):
p, meta, direction, _ = self.bot.memory[i]
x, y= self.backward(p, meta, direction)
dw_fc1 += x
dw_fc2 += y
dw_fc1 /= l
dw_fc2 /= l
## update the parameter
self.paras['w_fc1'] -= dw_fc1 * self.learning_rate * reward
self.paras['w_fc2'] -= dw_fc2 * self.learning_rate * reward
def global_optimizer(self):
## increase or desease the amount of gold compared with previous match
max_score = self.paras['max_score']
reward = 0
score = self.bot.env.gold + 100 * self.bot.env.lvl
if score < self.bot.policy.standard_score:
reward = min(score - self.bot.policy.standard_score, -100)
elif score > max_score:
reward = score - max_score
self.paras['max_score'] = max(max_score, score)
print("overall reward is ", reward)
## update the parameter if reward is nonzero
if reward != 0:
batch_size = self.batch_size
if reward > 0:
## try to update effective actions
rewards = [i[3] for i in self.bot.memory]
pos_indexes = [k for k, v in enumerate(rewards) if v > 0]
if len(pos_indexes) > 0:
l = max(pos_indexes)
else:
l = len(self.bot.memory)
else:
l = len(self.bot.memory)
## STG
batch = (l - 1) // batch_size + 1
for i in range(batch):
start = i * batch_size
end = (i+1) * batch_size
dw_fc1 = np.zeros_like(self.paras['w_fc1'])
dw_fc2 = np.zeros_like(self.paras['w_fc2'])
for j in self.bot.memory[start: end]:
p, meta, direction, _ = j
x, y= self.backward(p, meta, direction)
dw_fc1 += x
dw_fc2 += y
dw_fc1 /= batch_size
dw_fc2 /= batch_size
## update the parameter
self.paras['w_fc1'] -= dw_fc1 * self.learning_rate * reward
self.paras['w_fc2'] -= dw_fc2 * self.learning_rate * reward
## negative log likelihood
def loss(self):
l = min(len(self.bot.memory), self.bot.MEMORY_RETRIEVAL)
reward = self.bot.env.reward
logp = 0
for i in range(-1, -(l+1), -1):
p, meta, direction, _ = self.bot.memory[i]
prob = p.dot(direction)
logp += np.log(prob)
return -logp * reward
def get_state(self, view1, view2, scale):
## use the difference
X = view1 - view2
X = | np.mean(X[:, :, 0:3], axis=2) | numpy.mean |
"""
Created on March 14, 2017
Originally written by <NAME> in 2015
@author: <NAME>
"""
import os
from datetime import datetime
import numpy as np
import pandas as pd
import pytz
def storms(precipitation, perc_snow, mass=1, time=4,
stormDays=None, stormPrecip=None, ps_thresh=0.5):
"""
Calculate the decimal days since the last storm given a precip time series,
percent snow, mass threshold, and time threshold
- Will look for pixels where perc_snow > 50% as storm locations
- A new storm will start if the mass at the pixel has exceeded the mass
limit, this ensures that the enough has accumulated
Args:
precipitation: Precipitation values
perc_snow: Precent of precipitation that was snow
mass: Threshold for the mass to start a new storm
time: Threshold for the time to start a new storm
stormDays: If specified, this is the output from a previous run of storms
stormPrecip: Keeps track of the total storm precip
Returns:
tuple:
- **stormDays** - Array representing the days since the last storm at
a pixel
- **stormPrecip** - Array representing the precip accumulated during
the most recent storm
Created April 17, 2015
@author: <NAME>
"""
# either preallocate or use the input
if stormDays is None:
stormDays = np.zeros(precipitation.shape)
if stormPrecip is None:
stormPrecip = np.zeros(precipitation.shape)
# if there is no snow, don't reset the counter
# This ensures that the albedo won't be reset
stormDays += 1
if np.sum(perc_snow) == 0:
# stormDays = np.add(stormDays, 1)
stormPrecip = | np.zeros(precipitation.shape) | numpy.zeros |
import collections
import fractions
import json
import os
import re
import warnings
import numpy as np # pip3 install numpy
import torch
from scipy import ndimage
import autodisc as ad
warnings.filterwarnings('ignore', '.*output shape of zoom.*') # suppress warning from snd.zoom()
ROUND = 10
EPS = 0.0001
class SphericPad(torch.nn.Module):
"""Pads spherically the input on all sides with the given padding size."""
def __init__(self, padding_size):
super(SphericPad, self).__init__()
if isinstance(padding_size, int):
self.pad_left = self.pad_right = self.pad_top = self.pad_bottom = padding_size
elif isinstance(padding_size, tuple) and len(padding_size) == 2:
self.pad_left = self.pad_right = padding_size[0]
self.pad_top = self.pad_bottom = padding_size[1]
elif isinstance(padding_size, tuple) and len(padding_size) == 4:
self.pad_left = padding_size[0]
self.pad_top = padding_size[1]
self.pad_right = padding_size[2]
self.pad_bottom = padding_size[3]
else:
raise ValueError('The padding size shoud be: int, tuple of size 2 or tuple of size 4')
def forward(self, input):
output = torch.cat([input, input[:, :, :self.pad_bottom, :]], dim=2)
output = torch.cat([output, output[:, :, :, :self.pad_right]], dim=3)
output = torch.cat([output[:, :, -(self.pad_bottom + self.pad_top):-self.pad_bottom, :], output], dim=2)
output = torch.cat([output[:, :, :, -(self.pad_right + self.pad_left):-self.pad_right], output], dim=3)
return output
def rle2arr(st):
'''
Transforms an RLE string to a numpy array.
Code from <NAME>.
:param st Description of the array in RLE format.
:return Numpy array.
'''
rle_groups = re.findall("(\d*)([p-y]?[.boA-X$])", st.rstrip('!')) # [(2 yO)(1 $)(1 yO)]
code_list = sum([[c] * (1 if n == '' else int(n)) for n, c in rle_groups], []) # [yO yO $ yO]
code_arr = [l.split(',') for l in ','.join(code_list).split('$')] # [[yO yO] [yO]]
V = [[0 if c in ['.', 'b'] else 255 if c == 'o' else ord(c) - ord('A') + 1 if len(c) == 1 else (ord(c[0]) - ord(
'p')) * 24 + (ord(c[1]) - ord('A') + 25) for c in row if c != ''] for row in code_arr] # [[255 255] [255]]
maxlen = len(max(V, key=len))
A = np.array([row + [0] * (maxlen - len(row)) for row in V]) / 255 # [[1 1] [1 0]]
return A
class Board:
def __init__(self, size=(10,10)):
self.params = {'R':10, 'T':10, 'b':[1], 'm':0.1, 's':0.01, 'kn':1, 'gn':1}
self.cells = np.zeros(size)
def clear(self):
self.cells.fill(0)
'''---------------------------------------------------------------
AUTOMATON PYTORCH VERSION
-------------------------------------------------------------------'''
def complex_mult_torch(X, Y):
""" Computes the complex multiplication in Pytorch when the tensor last dimension is 2: 0 is the real component and 1 the imaginary one"""
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
return torch.stack(
(X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0]),
dim=-1)
def roll_n(X, axis, n):
""" Rolls a tensor with a shift n on the specified axis"""
f_idx = tuple(slice(None, None, None) if i != axis else slice(0,n,None)
for i in range(X.dim()))
b_idx = tuple(slice(None, None, None) if i != axis else slice(n,None,None)
for i in range(X.dim()))
front = X[f_idx]
back = X[b_idx]
return torch.cat([back, front],axis)
class LeniaStepFFT(torch.nn.Module):
""" Module pytorch that computes one Lenia Step with the fft version"""
def __init__(self, R, b, kn, gn, m, s, T, is_soft_clip, is_gpu, size_y, size_x):
super(LeniaStepFFT, self).__init__()
self.R = R
self.T = T
self.dt = float (1.0 / T)
self.b = b
self.kn = kn
self.gn = gn
self.m = m
self.s = s
self.spheric_pad = SphericPad(int(self.R))
self.is_soft_clip = is_soft_clip
self.is_gpu = is_gpu
self.size_y = size_y
self.size_x = size_x
self.compute_kernel()
def compute_kernel(self):
size_y = self.size_y
size_x = self.size_x
# implementation of meshgrid in torch
x = torch.arange(size_x)
y = torch.arange(size_y)
xx = x.repeat(size_y, 1)
yy = y.view(-1,1).repeat(1, size_x)
X = (xx - int(size_x / 2)).float() / float(self.R)
Y = (yy - int(size_y / 2)).float() / float(self.R)
# distance to center in normalized space
D = torch.sqrt(X**2 + Y**2)
# kernel
k = len(self.b)
kr = k * D
bs = torch.tensor([float(f) for f in self.b])
b = bs[torch.min(torch.floor(kr).long(), (k-1)*torch.ones_like(kr).long())]
kfunc = AutomatonPytorch.kernel_core[self.kn - 1]
kernel = (D<1).float() * kfunc(torch.min(kr % 1, torch.ones_like(kr))) * b
kernel_sum = torch.sum(kernel)
# normalization of the kernel
self.kernel_norm = (kernel / kernel_sum).unsqueeze(0).unsqueeze(0)
# fft of the kernel
self.kernel_FFT = torch.rfft(self.kernel_norm, signal_ndim=2, onesided=False)
self.kernel_updated = False
def forward(self, input):
if self.is_gpu:
input = input.cuda()
self.kernel_FFT = self.kernel_FFT.cuda()
self.world_FFT = torch.rfft(input, signal_ndim=2, onesided=False)
self.potential_FFT = complex_mult_torch(self.kernel_FFT, self.world_FFT)
self.potential = torch.irfft(self.potential_FFT, signal_ndim=2, onesided=False)
self.potential = roll_n(self.potential, 3, self.potential.size(3)//2)
self.potential = roll_n(self.potential, 2, self.potential.size(2)//2)
gfunc = AutomatonPytorch.field_func[min(self.gn,2)]
self.field = gfunc(self.potential, self.m, self.s)
if not self.is_soft_clip:
output_img = torch.clamp(input + self.dt * self.field, min=0., max=1.)
else:
output_img = AutomatonPytorch.soft_clip(input + self.dt * self.field, 0, 1, self.T)
return output_img
class LeniaStepConv2d(torch.nn.Module):
""" Module pytorch that computes one Lenia Step with the conv2d version"""
def __init__(self, R, b, kn, gn, m, s, T, is_soft_clip, is_gpu):
super(LeniaStepConv2d, self).__init__()
self.R = R
self.T = T
self.dt = float (1.0 / T)
self.b = b
self.kn = kn
self.gn = gn
self.m = m
self.s = s
self.spheric_pad = SphericPad(int(self.R))
self.is_soft_clip = is_soft_clip
self.is_gpu = is_gpu
self.compute_kernel()
def compute_kernel(self):
size_y = 2 * self.R + 1
size_x = 2 * self.R + 1
# implementation of meshgrid in torch
x = torch.arange(size_x)
y = torch.arange(size_y)
xx = x.repeat(size_y, 1)
yy = y.view(-1,1).repeat(1, size_x)
X = (xx - int(size_x / 2)).float() / float(self.R)
Y = (yy - int(size_y / 2)).float() / float(self.R)
# distance to center in normalized space
D = torch.sqrt(X**2 + Y**2)
# kernel
k = len(self.b)
kr = k * D
bs = torch.tensor([float(f) for f in self.b])
b = bs[torch.min(torch.floor(kr).long(), (k-1)*torch.ones_like(kr).long())]
kfunc = AutomatonPytorch.kernel_core[self.kn - 1]
kernel = (D<1).float() * kfunc(torch.min(kr % 1, torch.ones_like(kr))) * b
kernel_sum = torch.sum(kernel)
# normalization of the kernel
self.kernel_norm = (kernel / kernel_sum).unsqueeze(0).unsqueeze(0)
self.kernel_updated = False
def forward(self, input):
if self.is_gpu:
input = input.cuda()
self.kernel_norm = self.kernel_norm.cuda()
self.potential = torch.nn.functional.conv2d(self.spheric_pad(input), weight = self.kernel_norm)
gfunc = AutomatonPytorch.field_func[self.gn]
self.field = gfunc(self.potential, self.m, self.s)
if not self.is_soft_clip:
output_img = torch.clamp(input + self.dt * self.field, 0, 1) # A_new = A + dt * torch.clamp(D, -A/dt, (1-A)/dt)
else:
output_img = AutomatonPytorch.soft_clip(input + self.dt * self.field, 0, 1, self.T) # A_new = A + dt * Automaton.soft_clip(D, -A/dt, (1-A)/dt, 1)
return output_img
class AutomatonPytorch:
kernel_core = {
0: lambda r: (4 * r * (1-r))**4, # polynomial (quad4)
1: lambda r: torch.exp( 4 - 1 / (r * (1-r)) ), # exponential / gaussian bump (bump4)
2: lambda r, q=1/4: (r>=q).float() * (r<=1-q).float(), # step (stpz1/4)
3: lambda r, q=1/4: (r>=q).float() * (r<=1-q).float() + (r<q).float() *0.5 # staircase (life)
}
field_func = {
0: lambda n, m, s: torch.max(torch.zeros_like(n), 1 - (n-m)**2 / (9 * s**2) )**4 * 2 - 1, # polynomial (quad4)
1: lambda n, m, s: torch.exp( - (n-m)**2 / (2 * s**2) ) * 2 - 1, # exponential / gaussian (gaus)
2: lambda n, m, s: (torch.abs(n-m)<=s).float() * 2 - 1 # step (stpz)
}
@staticmethod
def soft_max(x, m, k):
return torch.log(torch.exp(k*x) + torch.exp(k*m)) / k
@staticmethod
def soft_clip(x, min, max, k):
a = torch.exp(k*x)
b = torch.exp(k*min)
c = torch.exp(-k*max)
return torch.log( 1/(a+b)+c ) / -k
def __init__(self, world, version = 'fft', use_gpu = True):
self.world = world
#self.world_FFT = np.zeros(world.cells.shape)
#self.potential_FFT = np.zeros(world.cells.shape)
#self.potential = np.zeros(world.cells.shape)
#self.field = np.zeros(world.cells.shape)
#self.field_old = None
#self.change = np.zeros(world.cells.shape)
self.X = None
self.Y = None
self.D = None
#self.gen = 0
#self.time = 0
self.is_multi_step = False
self.is_soft_clip = False
self.is_inverted = False
self.kn = 1
self.gn = 1
# look if gpu is available
if use_gpu and torch.cuda.is_available():
self.is_gpu = True
else:
self.is_gpu = False
# initialization of the pytorch model to perform one step in Lenia
if version == 'fft':
self.model = LeniaStepFFT(self.world.params['R'], self.world.params['b'], (self.world.params.get('kn') or self.kn), (self.world.params.get('gn') or self.gn), self.world.params['m'], self.world.params['s'], self.world.params['T'], self.is_soft_clip, self.is_gpu, self.world.cells.shape[0], self.world.cells.shape[1])
elif version == 'conv2d':
self.model = LeniaStepConv2d(self.world.params['R'], self.world.params['b'], (self.world.params.get('kn') or self.kn), (self.world.params.get('gn') or self.gn), self.world.params['m'], self.world.params['s'], self.world.params['T'], self.is_soft_clip, self.is_gpu)
else:
raise ValueError('Lenia pytorch automaton step calculation can be done with fft or conv 2d')
if self.is_gpu:
self.model = self.model.cuda()
def calc_once(self):
A = torch.from_numpy(self.world.cells).unsqueeze(0).unsqueeze(0).float()
A_new = self.model(A)
#A = A[0,0,:,:].cpu().numpy()
A_new = A_new[0,0,:,:].cpu().numpy()
#self.change = (A_new - A) / (1/self.world.params['T'])
self.world.cells = A_new
#self.gen += 1
#self.time = round(self.time + (1/self.world.params['T']), ROUND)
def reset(self):
pass
#self.gen = 0
#self.time = 0
#self.field_old = None
class Lenia(ad.core.System):
@staticmethod
def default_config():
def_config = ad.core.System.default_config()
def_config.version = 'pytorch_fft' # reikna_fft, pytorch_fft, pytorch_conv2d
def_config.use_gpu = True
return def_config
@staticmethod
def default_system_parameters():
def_params = ad.core.System.default_system_parameters()
def_params.size_y = 100
def_params.size_x = 100
def_params.R = 13
def_params.T = 10
def_params.b = [1]
def_params.m = 0.15
def_params.s = 0.017
def_params.kn = 1
def_params.gn = 1
def_params.init_state = np.zeros((def_params.size_y, def_params.size_x))
return def_params
def default_statistics(self):
def_stats = super().default_statistics()
def_stats.append(LeniaStatistics(self))
return def_stats
def __init__(self, statistics=None, system_parameters=None, config=None, **kwargs):
super().__init__(statistics=statistics, system_parameters=system_parameters, config=config, **kwargs)
self.run_parameters = None
self.world = None
self.automaton = None
def init_run(self, run_parameters=None):
if run_parameters is None:
self.run_parameters = self.system_parameters
else:
self.run_parameters = {**self.system_parameters, **run_parameters}
self.world = Board((self.run_parameters['size_y'], self.run_parameters['size_x']))
self.world.cells = self.run_parameters["init_state"]
self.world.params = self.run_parameters
if np.min(self.world.cells) < 0 or np.max(self.world.cells) > 1:
raise Warning('The given initial state has values below 0 and\or above 1. It will be clipped to the range [0, 1]')
self.world.cells = np.clip(self.world.cells, 0, 1)
if self.config.version.lower() == 'pytorch_fft':
self.automaton = AutomatonPytorch(self.world, version='fft', use_gpu = self.config.use_gpu)
elif self.config.version.lower() == 'pytorch_conv2d':
self.automaton = AutomatonPytorch(self.world, version='conv2d', use_gpu = self.config.use_gpu)
else:
raise ValueError('Unknown lenia version (config.version = {!r})'.format(self.config.version))
return self.world.cells
def step(self, step_idx):
self.automaton.calc_once()
# for some invalid parameters become the cells nan
# this makes problems with the computation of the statistics
# therefore, assume that all nan cells are 0
self.world.cells[np.isnan(self.world.cells)] = 0
return self.world.cells
def stop(self):
pass
class LeniaStatistics(ad.core.SystemStatistic):
'''Default statistics for the lenia system.'''
DISTANCE_WEIGHT = 2 # 1=linear, 2=quadratic, ...
@staticmethod
def calc_statistic_diff(statistic_names, stat1, stat2, nan_value_diff=1.0, nan_nan_diff=0.0):
if isinstance(stat1, list) or isinstance(stat2, list):
raise NotImplementedError('Difference between statistics given as lists are not implemented!')
if not isinstance(statistic_names, list):
statistic_names = [statistic_names]
stat1 = [stat1]
stat2 = [stat2]
# assume default difference for all
diff = stat1 - stat2
# check if there are angle statistics and calculate there difference appropriately
statistic_names_ndarray = np.array(statistic_names)
angle_statistics_inds = (statistic_names_ndarray == 'activation_center_movement_angle') \
| (statistic_names_ndarray == 'activation_center_movement_angle_mean') \
| (statistic_names_ndarray == 'positive_growth_center_movement_angle') \
| (statistic_names_ndarray == 'positive_growth_center_movement_angle_mean')
for angle_stat_idx in np.where(angle_statistics_inds)[0]:
diff[angle_stat_idx] = ad.helper.misc.angle_difference_degree(stat1[angle_stat_idx], stat2[angle_stat_idx])
# if both statistics are nan, then the difference is nan_nan_diff (default=0)
diff[np.isnan(stat1) & np.isnan(stat2)] = nan_nan_diff
# if one statistic is nan, then the current diff is nan, then use nan_value_diff
diff[np.isnan(diff)] = nan_value_diff
return diff
@staticmethod
def calc_goalspace_distance(points1, points2, config, goal_space_extent=None):
# normalize representations
if goal_space_extent is not None:
points1 = points1 - goal_space_extent[:,0]
points1 = points1 / (goal_space_extent[:,1] - goal_space_extent[:,0])
points2 = points2 - goal_space_extent[:,0]
points2 = points2 / (goal_space_extent[:,1] - goal_space_extent[:,0])
diff = LeniaStatistics.calc_statistic_diff(config.statistics, points1, points2)
if len(diff) == 0:
dist = np.array([])
elif np.ndim(diff) == 1:
dist = np.linalg.norm(diff)
else:
dist = np.linalg.norm(diff, axis=1)
return dist
def __init__(self, system):
super().__init__(system)
# statistics
self.data['is_dead'] = []
self.data['activation_mass'] = []
self.data['activation_mass_mean'] = []
self.data['activation_mass_std'] = []
self.data['activation_volume'] = []
self.data['activation_volume_mean'] = []
self.data['activation_volume_std'] = []
self.data['activation_density'] = []
self.data['activation_density_mean'] = []
self.data['activation_density_std'] = []
self.data['activation_center_position'] = []
self.data['activation_center_velocity'] = []
self.data['activation_center_velocity_mean'] = []
self.data['activation_center_velocity_std'] = []
self.data['activation_center_movement_angle'] = []
self.data['activation_center_movement_angle_mean'] = []
self.data['activation_center_movement_angle_std'] = []
self.data['activation_center_movement_angle_velocity'] = []
self.data['activation_center_movement_angle_velocity_mean'] = []
self.data['activation_center_movement_angle_velocity_std'] = []
self.data['activation_mass_asymmetry'] = []
self.data['activation_mass_asymmetry_mean'] = []
self.data['activation_mass_asymmetry_std'] = []
self.data['activation_mass_distribution'] = []
self.data['activation_mass_distribution_mean'] = []
self.data['activation_mass_distribution_std'] = []
self.data['activation_hu1'] = []
self.data['activation_hu1_mean'] = []
self.data['activation_hu1_std'] = []
self.data['activation_hu2'] = []
self.data['activation_hu2_mean'] = []
self.data['activation_hu2_std'] = []
self.data['activation_hu3'] = []
self.data['activation_hu3_mean'] = []
self.data['activation_hu3_std'] = []
self.data['activation_hu4'] = []
self.data['activation_hu4_mean'] = []
self.data['activation_hu4_std'] = []
self.data['activation_hu5'] = []
self.data['activation_hu5_mean'] = []
self.data['activation_hu5_std'] = []
self.data['activation_hu6'] = []
self.data['activation_hu6_mean'] = []
self.data['activation_hu6_std'] = []
self.data['activation_hu7'] = []
self.data['activation_hu7_mean'] = []
self.data['activation_hu7_std'] = []
self.data['activation_hu8'] = []
self.data['activation_hu8_mean'] = []
self.data['activation_hu8_std'] = []
self.data['activation_flusser9'] = []
self.data['activation_flusser9_mean'] = []
self.data['activation_flusser9_std'] = []
self.data['activation_flusser10'] = []
self.data['activation_flusser10_mean'] = []
self.data['activation_flusser10_std'] = []
self.data['activation_flusser11'] = []
self.data['activation_flusser11_mean'] = []
self.data['activation_flusser11_std'] = []
self.data['activation_flusser12'] = []
self.data['activation_flusser12_mean'] = []
self.data['activation_flusser12_std'] = []
self.data['activation_flusser13'] = []
self.data['activation_flusser13_mean'] = []
self.data['activation_flusser13_std'] = []
self.data['positive_growth_mass'] = []
self.data['positive_growth_mass_mean'] = []
self.data['positive_growth_mass_std'] = []
self.data['positive_growth_volume'] = []
self.data['positive_growth_volume_mean'] = []
self.data['positive_growth_volume_std'] = []
self.data['positive_growth_density'] = []
self.data['positive_growth_density_mean'] = []
self.data['positive_growth_density_std'] = []
self.data['positive_growth_center_position'] = []
self.data['positive_growth_center_velocity'] = []
self.data['positive_growth_center_velocity_mean'] = []
self.data['positive_growth_center_velocity_std'] = []
self.data['positive_growth_center_movement_angle'] = []
self.data['positive_growth_center_movement_angle_mean'] = []
self.data['positive_growth_center_movement_angle_std'] = []
self.data['positive_growth_center_movement_angle_velocity'] = []
self.data['positive_growth_center_movement_angle_velocity_mean'] = []
self.data['positive_growth_center_movement_angle_velocity_std'] = []
self.data['activation_positive_growth_centroid_distance'] = []
self.data['activation_positive_growth_centroid_distance_mean'] = []
self.data['activation_positive_growth_centroid_distance_std'] = []
# other
self.distance_weight_matrix = LeniaStatistics.calc_distance_matrix(system.system_parameters.size_y,
system.system_parameters.size_x)
self.angles_from_middle = None
def reset(self):
# set all statistics to zero
self.data = dict.fromkeys(self.data, [])
def calc_after_run(self, system, all_obs):
'''Calculates the final statistics for lenia observations after a run is completed'''
self.reset()
num_of_obs = len(all_obs)
activation_mass_data = np.ones(num_of_obs) * np.nan
activation_volume_data = np.ones(num_of_obs) * np.nan
activation_density_data = np.ones(num_of_obs) * np.nan
activation_center_position_data = np.ones((num_of_obs, 2)) * np.nan
activation_center_velocity_data = np.ones(num_of_obs) * np.nan
activation_center_movement_angle_data = np.ones(num_of_obs) * np.nan
activation_center_movement_angle_velocity_data = np.ones(num_of_obs) * np.nan
activation_mass_asymmetry_data = np.ones(num_of_obs) * np.nan
activation_mass_distribution_data = np.ones(num_of_obs) * np.nan
activation_hu1_data = np.ones(num_of_obs) * np.nan
activation_hu2_data = np.ones(num_of_obs) * np.nan
activation_hu3_data = np.ones(num_of_obs) * np.nan
activation_hu4_data = np.ones(num_of_obs) * np.nan
activation_hu5_data = np.ones(num_of_obs) * np.nan
activation_hu6_data = np.ones(num_of_obs) * np.nan
activation_hu7_data = np.ones(num_of_obs) * np.nan
activation_hu8_data = np.ones(num_of_obs) * np.nan
activation_flusser9_data = np.ones(num_of_obs) * np.nan
activation_flusser10_data = np.ones(num_of_obs) * np.nan
activation_flusser11_data = np.ones(num_of_obs) * np.nan
activation_flusser12_data = np.ones(num_of_obs) * np.nan
activation_flusser13_data = np.ones(num_of_obs) * np.nan
positive_growth_mass_data = np.ones(num_of_obs) * np.nan
positive_growth_volume_data = np.ones(num_of_obs) * np.nan
positive_growth_density_data = np.ones(num_of_obs) * np.nan
positive_growth_center_position_data = np.ones((num_of_obs, 2)) * np.nan
positive_growth_center_velocity_data = np.ones(num_of_obs) * np.nan
positive_growth_center_movement_angle_data = np.ones(num_of_obs) * np.nan
positive_growth_center_movement_angle_velocity_data = np.ones(num_of_obs) * np.nan
activation_positive_growth_centroid_distance_data = np.ones(num_of_obs) * np.nan
# positive_growth_data = np.ones(num_of_obs) * np.nan
# positive_growth_volume_data = np.ones(num_of_obs) * np.nan
# positive_growth_density_data = np.ones(num_of_obs) * np.nan
size_y = all_obs[0].shape[0]
size_x = all_obs[0].shape[1]
num_of_cells = size_y * size_x
# calc initial center of mass and use it as a reference point to "center" the world around it
# in consequetive steps, recalculate the center of mass and "recenter" the wolrd around them
#mid_y = int((size_y-1) / 2)
#mid_x = int((size_x-1) / 2)
mid_y = (size_y - 1) / 2
mid_x = (size_x - 1) / 2
mid = np.array([mid_y, mid_x])
# prepare the angles of the vectors from the middle point for each point in the env, used to compute the mass asymmetry
# only recompute for first calculation of statistics (self.angles_from_middle is None) or if the observation size changed
if self.angles_from_middle is None or self.angles_from_middle.shape[0] != size_y or self.angles_from_middle.shape[1] != size_x:
self.angles_from_middle = np.ones((size_y,size_x))*np.nan
for y in range(size_y):
for x in range(size_x):
vec = [mid_y-y, x-mid_x]
self.angles_from_middle[y][x] = ad.helper.misc.angle_of_vec_degree([vec[1], vec[0]])
activation_center_of_mass = np.array(LeniaStatistics.center_of_mass(all_obs[0]))
activation_shift_to_center = mid - activation_center_of_mass
init_growth = all_obs[1] - all_obs[0]
positive_growth_center_of_mass = np.array(LeniaStatistics.center_of_mass(init_growth))
positive_growth_shift_to_center = mid - positive_growth_center_of_mass
prev_activation_center_movement_angle = np.nan
prev_positive_growth_center_movement_angle = np.nan
uncentered_activation_center_position = np.array([np.nan, np.nan])
for step in range(len(all_obs)):
activation = all_obs[step]
# uncentered_activation_center_position = np.array(ndimage.measurements.center_of_mass(activation))
#
# # set center to middle if it can not be calculated, for example if all cells are dead
# if np.isnan(uncentered_activation_center_position[0]) or np.isnan(uncentered_activation_center_position[1]) or \
# uncentered_activation_center_position[0] == float('inf') or uncentered_activation_center_position[1] == float('inf'):
# uncentered_activation_center_position = mid.copy()
# shift the system to the last calculated center of mass so that it is in the middle
# the matrix can only be shifted in discrete values, therefore the shift is transformed to integer
centered_activation = np.roll(activation, activation_shift_to_center.astype(int), (0, 1))
# calculate the image moments
activation_moments = ad.helper.statistics.calc_image_moments(centered_activation)
# new center of mass
activation_center_of_mass = np.array([activation_moments.y_avg, activation_moments.x_avg])
# calculate the change of center as a vector
activation_shift_from_prev_center = mid - activation_center_of_mass
# calculate the new shift to center the next obs to the new center
activation_shift_to_center = activation_shift_to_center + activation_shift_from_prev_center
# transform the new center, encoded as a shift from the first image, back into the original image coordinates
uncentered_activation_center_position[0] = (mid_y - activation_shift_to_center[0]) % size_y
uncentered_activation_center_position[1] = (mid_x - activation_shift_to_center[1]) % size_x
activation_center_position_data[step] = uncentered_activation_center_position
# activation mass
activation_mass = activation_moments.m00
activation_mass_data[step] = activation_mass / num_of_cells # activation is number of acitvated cells divided by the number of cells
# activation volume
activation_volume = np.sum(activation > EPS)
activation_volume_data[step] = activation_volume / num_of_cells
# activation density
if activation_volume == 0:
activation_density_data[step] = 0
else:
activation_density_data[step] = activation_mass/activation_volume
# activation moments
activation_hu1_data[step] = activation_moments.hu1
activation_hu2_data[step] = activation_moments.hu2
activation_hu3_data[step] = activation_moments.hu3
activation_hu4_data[step] = activation_moments.hu4
activation_hu5_data[step] = activation_moments.hu5
activation_hu6_data[step] = activation_moments.hu6
activation_hu7_data[step] = activation_moments.hu7
activation_hu8_data[step] = activation_moments.hu8
activation_flusser9_data[step] = activation_moments.flusser9
activation_flusser10_data[step] = activation_moments.flusser10
activation_flusser11_data[step] = activation_moments.flusser11
activation_flusser12_data[step] = activation_moments.flusser12
activation_flusser13_data[step] = activation_moments.flusser13
# get velocity and angle of movement
# distance between the previous center of mass and the new one is the velocity
# angle is computed based on the shift vector
if step <= 0:
activation_center_velocity = np.nan
activation_center_movement_angle = np.nan
activation_center_movement_angle_velocity = np.nan
activation_mass_asymmetry = np.nan
else:
activation_center_velocity = np.linalg.norm(activation_shift_from_prev_center)
if activation_center_velocity == 0:
activation_center_movement_angle = np.nan
else:
activation_center_movement_angle = ad.helper.misc.angle_of_vec_degree([-1 * activation_shift_from_prev_center[1], activation_shift_from_prev_center[0]])
# Angular velocity, is the difference between the current and previous angle of movement
if activation_center_movement_angle is np.nan or prev_activation_center_movement_angle is np.nan:
activation_center_movement_angle_velocity = 0
else:
activation_center_movement_angle_velocity = ad.helper.misc.angle_difference_degree(activation_center_movement_angle, prev_activation_center_movement_angle)
# activation mass asymmetry
# calculate the angle between the center shift and the angle from the center to each point.
# if the angle is < 180 the point is on the right side of the movement
# then use its mass
activation_right_side_mass = 0
if np.isnan(activation_center_movement_angle):
activation_mass_asymmetry = np.nan
else:
if activation_mass == 0 or activation_mass_asymmetry == num_of_cells:
# if all are active or dead then ther is perfect assymetry
activation_mass_asymmetry = 0
else:
# for y in range(size_y):
# for x in range(size_x):
# angle_dist = ad.helper.misc.angle_difference_degree(activation_center_movement_angle, angles_from_middle[y][x])
#
# if angle_dist < 180:
# activation_right_side_mass = activation_right_side_mass + activation[y][x]
angle_dist = ad.helper.misc.angle_difference_degree(activation_center_movement_angle, self.angles_from_middle)
activation_right_side_mass = np.sum(activation[angle_dist < 0])
# activation_mass_asymmetry = right_mass - left_mass = right_mass - (mass - right_mass) = 2*right_mass - mass
activation_mass_asymmetry = (2 * activation_right_side_mass - activation_mass) / activation_mass
prev_activation_center_movement_angle = activation_center_movement_angle
activation_center_velocity_data[step] = activation_center_velocity
activation_center_movement_angle_data[step] = activation_center_movement_angle
activation_center_movement_angle_velocity_data[step] = activation_center_movement_angle_velocity
activation_mass_asymmetry_data[step] = activation_mass_asymmetry
# mass distribution around the center
if activation_mass <= EPS:
activation_mass_distribution = 1.0
else:
activation_mass_distribution = np.sum(self.distance_weight_matrix * centered_activation) / np.sum(centered_activation)
activation_mass_distribution_data[step] = activation_mass_distribution
##########################################################################################################################################
# positive growth statistics
uncentered_positive_growth_center_position = np.array([np.nan, np.nan])
if step <= 0:
positive_growth_mass_data[step] = np.nan
positive_growth_volume_data[step] = np.nan
positive_growth_density_data[step] = np.nan
positive_growth_center_position_data[step] = [np.nan, np.nan]
positive_growth_center_velocity_data[step] = np.nan
positive_growth_center_movement_angle_data[step] = np.nan
positive_growth_center_movement_angle_velocity_data[step] = np.nan
else:
positive_growth = np.clip(all_obs[step] - all_obs[step - 1], 0, 1)
# uncentered_positive_growth_center_position = np.array(StatLenia.center_of_mass(positive_growth))
#
# # set center to middle if it can not be calculated, for example if all cells are dead
# if np.isnan(uncentered_positive_growth_center_position[0]) or np.isnan(uncentered_positive_growth_center_position[1]) or \
# uncentered_positive_growth_center_position[0] == float('inf') or uncentered_positive_growth_center_position[1] == float('inf'):
# uncentered_positive_growth_center_position = mid.copy()
#
# positive_growth_center_position_data[step] = uncentered_positive_growth_center_position
# shift the system to the last calculated center of mass so that it is in the middle
# the matrix can only be shifted in discrete values, therefore the shift is transformed to integer
centered_positive_growth = np.roll(positive_growth, [int(positive_growth_shift_to_center[0]), int(positive_growth_shift_to_center[1])], (0, 1))
# new center of mass
positive_growth_center_of_mass = np.array(LeniaStatistics.center_of_mass(centered_positive_growth))
# calculate the change of center as a vector
positive_growth_shift_from_prev_center = mid - positive_growth_center_of_mass
# calculate the new shift to center the next obs to the new center
positive_growth_shift_to_center = positive_growth_shift_to_center + positive_growth_shift_from_prev_center
# transform the new center, encoded as a shift from the first image, back into the original image coordinates
uncentered_positive_growth_center_position[0] = (mid_y - positive_growth_shift_to_center[0]) % size_y
uncentered_positive_growth_center_position[1] = (mid_x - positive_growth_shift_to_center[1]) % size_x
positive_growth_center_position_data[step] = uncentered_positive_growth_center_position
# growth mass
positive_growth_mass = np.sum(centered_positive_growth)
positive_growth_mass_data[step] = positive_growth_mass / num_of_cells # activation is number of acitvated cells divided by the number of cells
# activation volume
positive_growth_volume = np.sum( centered_positive_growth > EPS )
positive_growth_volume_data[step] = positive_growth_volume / num_of_cells
# activation density
if positive_growth_volume == 0:
positive_growth_density_data[step] = 0
else:
positive_growth_density_data[step] = positive_growth_mass / positive_growth_volume
# get velocity and angle of movement
# distance between the previous center of mass and the new one is the velocity
# angle is computed based on the shift vector
if step <= 1:
positive_growth_center_velocity = np.nan
positive_growth_center_movement_angle = np.nan
positive_growth_center_movement_angle_velocity = np.nan
else:
positive_growth_center_velocity = np.linalg.norm(positive_growth_shift_from_prev_center)
if positive_growth_center_velocity == 0:
positive_growth_center_movement_angle = np.nan
else:
positive_growth_center_movement_angle = ad.helper.misc.angle_of_vec_degree([-1 * positive_growth_shift_from_prev_center[1], positive_growth_shift_from_prev_center[0]])
# Angular velocity, is the difference between the current and previous angle of movement
if positive_growth_center_movement_angle is np.nan or prev_positive_growth_center_movement_angle is np.nan:
positive_growth_center_movement_angle_velocity = 0
else:
positive_growth_center_movement_angle_velocity = ad.helper.misc.angle_difference_degree(positive_growth_center_movement_angle, prev_positive_growth_center_movement_angle)
prev_positive_growth_center_movement_angle = positive_growth_center_movement_angle
positive_growth_center_velocity_data[step] = positive_growth_center_velocity
positive_growth_center_movement_angle_data[step] = positive_growth_center_movement_angle
positive_growth_center_movement_angle_velocity_data[step] = positive_growth_center_movement_angle_velocity
######################################################################################################################
# Growth - Activation centroid distance
if step <= 0:
activation_positive_growth_centroid_distance_data[step] = np.nan
else:
activation_positive_growth_centroid_distance = ad.helper.misc.get_min_distance_on_repeating_2d_array((size_y, size_x), uncentered_activation_center_position, uncentered_positive_growth_center_position)
activation_positive_growth_centroid_distance_data[step] = activation_positive_growth_centroid_distance
is_dead = np.all(all_obs[-1] == 1) or np.all(all_obs[-1] == 0)
self.data['is_dead'] = is_dead
self.data['activation_mass'] = activation_mass_data
self.data['activation_mass_mean'] = np.nanmean(activation_mass_data)
self.data['activation_mass_std'] = | np.nanstd(activation_mass_data) | numpy.nanstd |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Arduino lock-in amplifier
"""
__author__ = "<NAME>"
__authoremail__ = "<EMAIL>"
__url__ = "https://github.com/Dennis-van-Gils/DvG_Arduino_lock-in_amp"
__date__ = "31-08-2021"
__version__ = "2.0.0"
# pylint: disable=invalid-name
import os
import sys
import time as Time
import psutil
from PyQt5 import QtCore
from PyQt5 import QtWidgets as QtWid
from PyQt5.QtCore import QDateTime
import numpy as np
from dvg_pyqt_filelogger import FileLogger
from dvg_debug_functions import dprint
from dvg_fftw_welchpowerspectrum import FFTW_WelchPowerSpectrum
from Alia_protocol_serial import Alia, Waveform
from Alia_qdev import Alia_qdev
from Alia_gui import MainWindow
# Show debug info in terminal? Warning: Slow! Do not leave on unintentionally.
DEBUG = False
DEBUG_TIMING = False
# Enable GPU-accelerated computations on an NVIDIA videocard with CUDA support?
# Affects the FIR filters.
USE_CUDA = False
# ------------------------------------------------------------------------------
# current_date_time_strings
# ------------------------------------------------------------------------------
def current_date_time_strings():
cur_date_time = QDateTime.currentDateTime()
return (
cur_date_time.toString("dd-MM-yyyy"),
cur_date_time.toString("HH:mm:ss"),
)
# ------------------------------------------------------------------------------
# Program termination routines
# ------------------------------------------------------------------------------
def stop_running():
app.processEvents()
alia_qdev.turn_off()
alia_qdev.quit()
logger.close()
@QtCore.pyqtSlot()
def notify_connection_lost():
stop_running()
excl = " ! ! ! ! ! ! ! ! "
window.qlbl_title.setText("%sLOST CONNECTION%s" % (excl, excl))
str_cur_date, str_cur_time = current_date_time_strings()
str_msg = "%s %s\nLost connection to Arduino on port %s.\n" % (
str_cur_date,
str_cur_time,
alia.ser.portstr,
)
print("\nCRITICAL ERROR @ %s" % str_msg)
reply = QtWid.QMessageBox.warning(
window, "CRITICAL ERROR", str_msg, QtWid.QMessageBox.Ok
)
if reply == QtWid.QMessageBox.Ok:
pass # Leave the GUI open for read-only inspection by the user
@QtCore.pyqtSlot()
def about_to_quit():
print("\nAbout to quit")
stop_running()
alia.close()
# ------------------------------------------------------------------------------
# Lock-in amplifier data-acquisition update function
# ------------------------------------------------------------------------------
def lockin_DAQ_update():
"""Listen for new data blocks send by the lock-in amplifier and perform the
main mathematical operations for signal processing. This function will run
in a dedicated thread (i.e. `worker_DAQ`), separated from the main program
thread that handles the GUI.
NOTE: NO GUI OPERATIONS ARE ALLOWED HERE. Otherwise it may affect the
`worker_DAQ` thread negatively, resulting in lost blocks of data.
"""
# Shorthands
c: Alia.Config = alia.config
state: Alia_qdev.State = alia_qdev.state
# Prevent throwings errors if just paused
if alia.lockin_paused:
return False
if DEBUG_TIMING:
tock = Time.perf_counter()
print("%.2f _DAQ" % (tock - alia.tick))
alia.tick = tock
# Listen for data buffers send by the lock-in
(
success,
_counter,
state.time,
state.ref_X,
state.ref_Y,
state.sig_I,
) = alia.listen_to_lockin_amp()
if not success:
dprint("@ %s %s" % current_date_time_strings())
return False
# Detect dropped blocks
# ---------------------
# TODO: Rethink this procedure. Might be easier done with the index of the
# block that also gets send by the Arduino. We either receive a full block,
# or we don't. There are no partial blocks that can be received.
alia_qdev.state.blocks_received += 1
last_time = state.rb_time[-1] if state.blocks_received > 1 else np.nan
dT = (state.time[0] - last_time) / 1e6 # [usec] to [sec]
if dT > c.SAMPLING_PERIOD * 1e6 * 1.10: # Allow a little clock jitter
N_dropped_samples = int(round(dT / c.SAMPLING_PERIOD) - 1)
dprint("Dropped samples: %i" % N_dropped_samples)
dprint("@ %s %s" % current_date_time_strings())
# Replace dropped samples with np.nan samples.
# As a result, the filter output will contain a continuous series of
# np.nan values in the output for up to `RingBuffer_FIR_Filter.
# T_settle_filter` seconds long after the occurrence of the last dropped
# sample.
state.rb_time.extend(
last_time
+ np.arange(1, N_dropped_samples + 1) * c.SAMPLING_PERIOD * 1e6
)
state.rb_ref_X.extend(np.full(N_dropped_samples, np.nan))
state.rb_ref_Y.extend(np.full(N_dropped_samples, np.nan))
state.rb_sig_I.extend(np.full(N_dropped_samples, np.nan))
# Stage 0
# -------
state.sig_I_min = np.min(state.sig_I)
state.sig_I_max = np.max(state.sig_I)
state.sig_I_avg = np.mean(state.sig_I)
state.sig_I_std = np.std(state.sig_I)
state.rb_time.extend(state.time)
state.rb_ref_X.extend(state.ref_X)
state.rb_ref_Y.extend(state.ref_Y)
state.rb_sig_I.extend(state.sig_I)
# Note: `ref_X` [non-dim] is transformed to `ref_X*` [V]
# Note: `ref_Y` [non-dim] is transformed to `ref_Y*` [V]
window.hcc_ref_X.extendData(
state.time, np.multiply(state.ref_X, c.ref_V_ampl_RMS) + c.ref_V_offset
)
window.hcc_ref_Y.extendData(
state.time, np.multiply(state.ref_Y, c.ref_V_ampl_RMS) + c.ref_V_offset
)
window.hcc_sig_I.extendData(state.time, state.sig_I)
# Stage 1
# -------
# fmt: off
# Apply filter 1 to sig_I
state.filt_I = alia_qdev.firf_1_sig_I.apply_filter(state.rb_sig_I)
if alia_qdev.firf_1_sig_I.filter_has_settled:
# Retrieve the block of original data from the past that aligns with
# the current filter output
valid_slice = alia_qdev.firf_1_sig_I.rb_valid_slice
state.time_1 = state.rb_time [valid_slice]
old_sig_I = state.rb_sig_I[valid_slice]
old_ref_X = state.rb_ref_X[valid_slice]
old_ref_Y = state.rb_ref_Y[valid_slice]
# Heterodyne mixing
np.multiply(state.filt_I, old_ref_X, out=state.mix_X)
np.multiply(state.filt_I, old_ref_Y, out=state.mix_Y)
else:
state.time_1.fill(np.nan)
old_sig_I = np.full(c.BLOCK_SIZE, np.nan)
state.mix_X.fill(np.nan)
state.mix_Y.fill(np.nan)
state.filt_I_min = np.min(state.filt_I)
state.filt_I_max = np.max(state.filt_I)
state.filt_I_avg = np.mean(state.filt_I)
state.filt_I_std = np.std(state.filt_I)
state.rb_time_1.extend(state.time_1)
state.rb_filt_I.extend(state.filt_I)
state.rb_mix_X .extend(state.mix_X)
state.rb_mix_Y .extend(state.mix_Y)
window.hcc_filt_1_in .extendData(state.time_1, old_sig_I)
window.hcc_filt_1_out.extendData(state.time_1, state.filt_I)
window.hcc_mix_X .extendData(state.time_1, state.mix_X)
window.hcc_mix_Y .extendData(state.time_1, state.mix_Y)
# fmt: on
# Stage 2
# -------
# Apply filter 2 to the mixer output
state.X = alia_qdev.firf_2_mix_X.apply_filter(state.rb_mix_X)
state.Y = alia_qdev.firf_2_mix_Y.apply_filter(state.rb_mix_Y)
if alia_qdev.firf_2_mix_X.filter_has_settled:
# Retrieve the block of time data from the past that aligns with
# the current filter output
valid_slice = alia_qdev.firf_1_sig_I.rb_valid_slice
state.time_2 = state.rb_time_1[valid_slice]
# Signal amplitude: R
np.sqrt(np.add(np.square(state.X), np.square(state.Y)), out=state.R)
# Signal phase: Theta
np.arctan2(state.Y, state.X, out=state.T)
np.multiply(state.T, 180 / np.pi, out=state.T) # [rad] to [deg]
else:
state.time_2.fill(np.nan)
state.R.fill(np.nan)
state.T.fill(np.nan)
state.X_avg = np.mean(state.X)
state.Y_avg = | np.mean(state.Y) | numpy.mean |
import numpy as np
from scipy.linalg import eigh
import voice_activity_detector
import features_extraction
import statistics
import utils
def get_sigma(ubm, space_dimension):
sigma = np.zeros(shape=(len(ubm.covariances) * len(ubm.covariances[0])))
k = 0
for i in range(len(ubm.covariances[0])):
for j in range(len(ubm.covariances)):
sigma[k] = ubm.covariances[j][i]
k += 1
repeat_sigma = np.repeat(np.transpose(sigma)[:, np.newaxis],
space_dimension, axis=1)
return repeat_sigma
def save_i_vector_model(path, i_vector, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_model_" +
str(components_number) + ".txt",
"wb")
np.save(f, i_vector)
f.close
def load_i_vector_model(path, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_model_" +
str(components_number) + ".txt",
"rb")
i_vector = np.load(f)
f.close
return i_vector
def save_i_vectors(path, i_vectors, speaker, components_number):
f = open(
path + "/ivectors/" + speaker + "_ivector_" + str(
components_number) +
".txt",
"wb")
np.save(f, i_vectors)
f.close
def extract_i_vector_from_signal(ubm, utterance_path, t_matrix,
space_dimension,
mfcc_number, frame_duration, step_duration,
sigma):
t_matrix_divides_sigma = np.divide(t_matrix, sigma)
t_matrix_divides_sigma_transpose = | np.transpose(t_matrix_divides_sigma) | numpy.transpose |
'''
brt.py
Created by <NAME>
<EMAIL>
version 1.1 -- 7.15.2017
Buffalo Ray Trace (BRT) is an interactive GUI for plotting image
predictions for a lens model. BRT is written in Python, utilizing the
tkinter GUI library, the matplotlib plotting library, the astropy
library of tools for astrophysical data analysis. All are available
through Anaconda.
The only required inputs for BRT are the x and y deflection files (FITS),
in units of arcseconds, and a PNG color image or FITS image of the field of view.
These two sets of inputs need to have the same field of view. The program provides
helper functions to create these files.
VERSION HISTORY:
1.1 -- 7.15.2017: Fixed minor bugs. Fixed bug with computing dls/ds using proper
cosmology. Added postage stamp feature. Added feature that inserts
the redshift of the selected arcs from the arc list into the boxes
for ray tracing and plotting the critical curve.
'''
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
import os
import sys
if sys.version_info[0] < 3:
from Tkinter import *
import tkMessageBox as tkMB
else:
from tkinter import *
from tkinter import messagebox as tkMB
import pickle
from astropy.io import fits
from astropy.wcs import WCS
from astropy.cosmology import FlatLambdaCDM
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
from itertools import cycle
import warnings
import time
import datetime
import platform
from PIL import Image
def dlsds(zl,zs,Om0=0.3,H0=70):
cosmo = FlatLambdaCDM(Om0=Om0,H0=H0)
ratio = np.zeros_like(zs)
for i in range(len(zs)):
dls = cosmo.angular_diameter_distance_z1z2(zl,zs[i]).value
ds = cosmo.angular_diameter_distance(zs[i]).value
ratio[i] = dls/ds
return ratio
def predict_images(xim,yim,deflectx,deflecty,dlsds=1,maxdist=0.5):
dims = deflectx.shape
source_x = np.zeros_like(deflectx)
source_y = np.zeros_like(deflecty)
if dims[0] == dims[1]:
for i in range(dims[0]):
source_x[:,i] = i + 1 - deflectx[:,i]*dlsds
source_y[i,:] = i + 1 - deflecty[i,:]*dlsds
else:
for j in range(dims[0]): source_x[:,j] = j + 1 - deflectx[:,j]*dlsds
for k in range(dims[1]): source_y[k,:] = k + 1 - deflecty[k,:]*dlsds
xs = source_x[int(np.round(yim))-1,int(np.round(xim))-1]
ys = source_y[int(np.round(yim))-1,int(np.round(xim))-1]
d = np.sqrt((source_x-xs)**2+(source_y-ys)**2)
indices = np.where(d<maxdist)
ximp = []
yimp = []
for i,j in zip(indices[1],indices[0]): ximp.append(i+1),yimp.append(j+1)
ximp = np.array(ximp)
yimp = np.array(yimp)
return ximp, yimp
def update(f):
data = fits.getdata(f)
h = fits.getheader(f)
h['CRPIX1'] += 0.5
h['CRPIX2'] += 0.5
fits.writeto(f,data,header=h,clobber=True)
class StartWindow:
def __init__(self):
self.root = Tk()
self.root.wm_title("Start up")
self.root.geometry("380x380")
titleFrame = Frame(self.root)
titleFrame.pack()
title = Label(titleFrame,text="Buffalo Ray Trace",fg='blue')
title.config(font=("Helvetica", 24))
title.pack()
Label(titleFrame,text='Enter model parameters',fg='red').pack()
entryFrame = Frame(self.root)
entryFrame.pack()
Label(entryFrame, text = "Cluster redshift: ").grid(row=0, column=0,sticky=E)
self.entry_zl = Entry(entryFrame,width=15)
self.entry_zl.grid(row=0, column=1)
Label(entryFrame, text = "Image file: ").grid(row=1, column=0,sticky=E)
self.entry_imagefile = Entry(entryFrame,width=15)
self.entry_imagefile.grid(row=1, column=1)
Button(entryFrame, text='Create',command=self.tiff_window,padx=5,pady=5).grid(row=1,column=2)
Label(entryFrame, text = "X deflection file: ").grid(row=2, column=0,sticky=E)
self.entry_dxfile = Entry(entryFrame,width=15)
self.entry_dxfile.grid(row=2, column=1)
Label(entryFrame, text = "Y deflection file: ").grid(row=3, column=0,sticky=E)
self.entry_dyfile = Entry(entryFrame,width=15)
self.entry_dyfile.grid(row=3, column=1)
Label(entryFrame, text = "Deflection file redshift: ").grid(row=4, column=0,sticky=E)
self.entry_dz = Entry(entryFrame,width=15)
self.entry_dz.grid(row=4, column=1)
self.check = IntVar()
self.check.set(0)
Checkbutton(entryFrame, variable=self.check, onvalue=1, offvalue=0, text='Deflection files are dls/ds = 1').grid(row=5,columnspan=2)
Label(entryFrame,text='Enter cosmological parameters',fg='red').grid(row=6,columnspan=2)
Label(entryFrame, text = "Omega M (1 - Omega L): ").grid(row=7, column=0,sticky=E)
self.entry_Om0 = Entry(entryFrame,width=15)
self.entry_Om0.grid(row=7, column=1)
self.entry_Om0.insert(0,'0.3')
Label(entryFrame, text = "Hubble constant (km/s/Mpc): ").grid(row=8, column=0,sticky=E)
self.entry_H0 = Entry(entryFrame,width=15)
self.entry_H0.grid(row=8, column=1)
self.entry_H0.insert(0,'70.0')
submitFrame = Frame(self.root)
submitFrame.pack()
Button(submitFrame, text = "Enter", command = self.getParams,padx=5,pady=5).pack()
Label(submitFrame, text='Or').pack()
Button(submitFrame,text="Load previous model",command=self.loadPrevious,padx=5,pady=5).pack()
self.root.mainloop()
def tiff_window(self):
self.toplevel = Toplevel(self.root)
Label(self.toplevel, text='Open a fits file in ds9 (or three if RGB) and\nscale to the desired output.',fg='blue').grid(row=0,columnspan=2)
Label(self.toplevel, text='TIFF file name: ').grid(row=1,column=0,sticky=E)
self.file_entry = Entry(self.toplevel,width=10)
self.file_entry.grid(row=1,column=1)
Button(self.toplevel,text='Write TIFF',command=self.createTiff,padx=5,pady=5).grid(row=2,columnspan=2)
def createTiff(self):
tiffname = self.file_entry.get()
os.system('xpaset -p ds9 export tiff '+os.getcwd()+'/'+tiffname+' none')
fitsname = os.popen('xpaget ds9 file').readlines()[0].rsplit()[0]
htiff = fits.getheader(fitsname)
wtiff = WCS(htiff)
pickle.dump(wtiff,open(tiffname+'.wcs','wb'))
self.entry_imagefile.delete(0,END)
self.entry_imagefile.insert(0,tiffname)
self.toplevel.destroy()
def getParams(self):
self.zl = self.entry_zl.get()
self.imagefile = self.entry_imagefile.get()
self.dxfile = self.entry_dxfile.get()
self.dyfile = self.entry_dyfile.get()
self.dz = self.entry_dz.get()
self.isInf = self.check.get()
self.Om0 = self.entry_Om0.get()
self.H0 = self.entry_H0.get()
errors = []
try:
self.zl = float(self.zl)
except ValueError or self.zl < 0:
errors.append('Cluster redshift must be a number > 0.')
for file in [self.imagefile, self.dxfile, self.dyfile]:
if not os.path.isfile(file):
errors.append('File "'+file+ '" does not exist.')
if self.isInf == 0:
try:
self.dz = float(self.dz)
except ValueError or self.dz < self.zl:
errors.append('Deflect file redshift must be a number > cluster redshift.')
try:
self.Om0 = float(self.Om0)
except ValueError or Om0 < 0 or Om0>1:
errors.append('Omega M must be a number between 0 and 1')
try:
self.H0 = float(self.H0)
except ValueError or self.H0 < 0 or self.H0 > 100:
errors.append('H0 must be a number between 0 and 100.')
if len(errors) > 0:
tkMB.showinfo('Error','\n\n'.join(errors))
else:
pickle.dump((self.zl, self.imagefile, self.dxfile, self.dyfile, self.dz, self.isInf,self.Om0,self.H0),open('last.brt','wb'))
self.startUp()
self.root.destroy()
def loadPrevious(self):
if os.path.isfile('last.brt'):
self.zl, self.imagefile, self.dxfile, self.dyfile, self.dz, self.isInf, self.Om0, self.H0 = pickle.load(open('last.brt','rb'))
self.entry_zl.delete(0,END)
self.entry_zl.insert(0,str(self.zl))
self.entry_imagefile.delete(0,END)
self.entry_imagefile.insert(0,self.imagefile)
self.entry_dxfile.delete(0,END)
self.entry_dxfile.insert(0,self.dxfile)
self.entry_dyfile.delete(0,END)
self.entry_dyfile.insert(0,self.dyfile)
self.entry_dz.delete(0,END)
self.entry_dz.insert(0,str(self.dz))
self.check.set(self.isInf)
self.entry_Om0.delete(0,END)
self.entry_Om0.insert(0,str(self.Om0))
self.entry_H0.delete(0,END)
self.entry_H0.insert(0,str(self.H0))
else:
tkMB.showinfo('Error','Could not locate previous model. Enter new parameters.')
def startUp(self):
global zl, image, deflectx, deflecty, dDXdx, dDXdy, dDYdx, dDYdy, Om0, H0, wcs, scalefactor, xoff, yoff
zl = self.zl
Om0 = self.Om0
H0 = self.H0
with warnings.catch_warnings():
warnings.simplefilter('ignore')
im = Image.open(self.imagefile)
wtiff = pickle.load(open(self.imagefile+'.wcs','rb'))
deflectx = fits.getdata(self.dxfile)
deflecty = fits.getdata(self.dyfile)
h = fits.getheader(self.dxfile)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
wcs = WCS(h)
ra,dec = wcs.wcs_pix2world([1,h['NAXIS1']+1],[1,h['NAXIS2']+1],1)
x,y = wtiff.wcs_world2pix(ra,dec,1)
xoff = 0.5-(x[0]-int(x[0])+x[1]-int(x[1]))/2
yoff = 0.5-(y[0]-int(y[0])+y[1]-int(y[1]))/2
image = im.crop((int(x[0]),im.height-int(y[1])+1,int(x[1])+1,im.height-int(y[0])))
scalefactor = image.width/deflectx.shape[0]
ps = h['CDELT2']*3600
deflectx /= ps
deflecty /= ps
if self.isInf == 0:
ratio = dlsds(zl,[self.dz],Om0=Om0,H0=H0)
deflectx /= ratio[0]
deflecty /= ratio[0]
with warnings.catch_warnings():
warnings.simplefilter('ignore')
dDXdy,dDXdx = np.gradient(deflectx)
dDYdy,dDYdx = np.gradient(deflecty)
class MainWindow:
def __init__(self):
self.root = Tk()
self.root.wm_title("BRT")
self.root.geometry("1000x750")
self.points = []
self.labels = []
self.curves = []
self.arcs = []
self.arc_annotate = []
self.arc_labels = np.array([])
self.arc_x = np.array([])
self.arc_y = | np.array([]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: <NAME>
# Contact: <EMAIL>
# Date: 18/12/2018
# This code generates train/test splits of edges from input graphs for evaluating graph embeddings
# on link prediction. It also provides false train and test edge sets of the required sizes.
# The train/test sets are efficiently generated by: i) obtaining a spanning tree of the input graph
# selected uniformly at random. ii) adding more edges to the spanning tree until the required amount
# of train edges is reached.
from __future__ import division
from __future__ import print_function
import os
import random
import warnings
import networkx as nx
import numpy as np
import scipy as sp
from scipy.sparse import triu
from scipy.sparse import tril
from scipy.sparse.csgraph import depth_first_tree
from sklearn.externals.joblib import Parallel, delayed
def _sanity_check(G):
r"""
Helper function that checks if the input graphs contains a single connected component. Raises an error if not.
Parameters
----------
G : graph
A NetworkX graph
Raises
------
ValueError
If the graph has more than one (weakly) connected component.
"""
# Compute the number of connected components
if G.is_directed():
num_ccs = nx.number_weakly_connected_components(G)
else:
num_ccs = nx.number_connected_components(G)
# Rise an error if more than one CC exists
if num_ccs != 1:
raise ValueError("Input graph should contain one (weakly) connected component. "
"This graph contains: " + str(num_ccs))
def broder_alg(G, E):
r"""
Runs Andrei Broder's algorithm to select uniformly at random a spanning tree of the input
graph.The direction of the edges included in train_E is taken from E which respects the
edge directions in the original graph, thus, the results are still valid for directed graphs.
For pairs of nodes in the original digraphs which have edges in both directions, we randomly
select the direction of the edge included in the ST.
Parameters
----------
G : graph
A NetworkX graph
E : set
A set of directed or undirected edges constituting the graph G.
Returns
-------
train_E : set
A set of edges of G describing the random spanning tree
References
----------
.. [1] <NAME>, "Generating Random Spanning Trees", Proc. of the 30th Annual Symposium
on Foundations of Computer Science, pp. 442--447, 1989.
"""
# Create two partitions, S and T. Initially store all nodes in S.
S = set(G.nodes)
T = set()
# Pick a random node as the "current node" and mark it as visited.
current_node = random.sample(S, 1).pop()
S.remove(current_node)
T.add(current_node)
# Perform random walk on the graph
train_E = set()
while S:
if G.is_directed():
neighbour_node = random.sample(list(G.successors(current_node)) + list(G.predecessors(current_node)), 1).pop()
else:
neighbour_node = random.sample(list(G.neighbors(current_node)), 1).pop()
if neighbour_node not in T:
S.remove(neighbour_node)
T.add(neighbour_node)
if random.random() < 0.5:
if (current_node, neighbour_node) in E:
train_E.add((current_node, neighbour_node))
else:
train_E.add((neighbour_node, current_node))
else:
if (neighbour_node, current_node) in E:
train_E.add((neighbour_node, current_node))
else:
train_E.add((current_node, neighbour_node))
current_node = neighbour_node
# Return the set of edges constituting the spanning tree
return train_E
def wilson_alg(G, E):
r"""
Runs Willson's algorithm also known as loop erasing random walk to select uniformly at random
a spanning tree of the input graph. A set E contains the original direction of edges in graph G,
and train_E will only include edges which exist in E, thus, the results are still valid for
digraphs. For pairs of nodes in the original digraphs, which have edges in both directions,
we select the direction of the edge in the ST at random.
Parameters
----------
G : graph
A NetworkX graph
E : set
A set of directed or undirected edges constituting the graph G.
Returns
-------
train_E : set
A set of edges of G describing the random spanning tree
References
----------
.. [1] <NAME>, "Generating Random Spanning Trees More Quickly than the Cover Time",
In Proceedings of STOC, pp. 296--303, 1996.
.. [2] <NAME> and <NAME>, "How to Get a Perfectly Random Sample from a Generic
Markov Chain and Generate a Random Spanning Tree of a Directed Graph",
Journal of Algorithms 27, pp. 170--217, 1998.
"""
# Stores the nodes which are part of the trees created by the LERW.
intree = set()
# A dictionary which works as a linked list and stores the spanning tree
tree = dict()
# Pick a random node as the root of the spanning tree and add it to intree
# For undirected graphs this is the correct approach
r = random.sample(G.nodes, 1).pop()
intree.add(r)
for node in G.nodes:
i = node
while i not in intree:
# This random successor works for weighted and unweighted graphs because we just
# want to select a bunch of edges from the graph, no matter what the weights are.
if G.is_directed():
tree[i] = random.sample(list(G.successors(i)) + list(G.predecessors(i)), 1).pop()
else:
tree[i] = random.sample(list(G.neighbors(i)), 1).pop()
i = tree[i]
i = node
while i not in intree:
intree.add(i)
i = tree[i]
# Create a set to store the train edges
train_E = set()
# This is only relevant for directed graphs to make the selection of edge direction equiprobable
for e in set(zip(tree.keys(), tree.values())):
if random.random() < 0.5:
if e in E:
train_E.add(e)
else:
train_E.add(e[::-1])
else:
if e[::-1] in E:
train_E.add(e[::-1])
else:
train_E.add(e)
# Return the edges of the random spanning tree
return train_E
def _compute_one_split(G, output_path, owa=True, train_frac=0.51, num_fe_train=None, num_fe_test=None, split_id=0):
r"""
Computes one split of train/test edges as well as non-edges from an input graph and writes the data to files.
The train sets are always connected / weakly connected and span all nodes of the input graph.
Input graphs (digraphs) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
output_path : string
Indicates the path where data will be stored. Can include a name for all splits to share.
owa : bool, optional
Encodes the belief that the network respects or not the open world assumption. Default is True.
If OWA=True, false train edges can be true test edges. False edges sampled from train graph.
If OWA=False, closed world is assumed so false train edges are known to be false (not in G)
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
split_id : int, optional
The ID of train/test split. Default is 0.
"""
# Generate train and test edge splits
train_E, test_E = split_train_test(G, train_frac)
# Generate the train/test false edges
if owa:
train_E_false, test_E_false = generate_false_edges_owa(G, train_E, test_E, num_fe_train, num_fe_test)
else:
train_E_false, test_E_false = generate_false_edges_cwa(G, train_E, test_E, num_fe_train, num_fe_test)
# Write the computed split to a file
store_train_test_splits(output_path, train_E, train_E_false, test_E, test_E_false, split_id)
def compute_splits_parallel(G, output_path, owa=True, train_frac=0.51, num_fe_train=None, num_fe_test=None,
num_splits=10):
r"""
Computes in parallel the required number of train/test splits of edges and non-edges from an input graph
and writes the data to files. The train sets are always connected / weakly connected and span all nodes
of the input graph. Input graphs (digraphs) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
output_path : string
Indicates the path where data will be stored. Can include a name for all splits to share.
owa : bool, optional
Encodes the belief that the network respects or not the open world assumption. Default is True.
If OWA=True, false train edges can be true test edges. False edges sampled from train graph.
If OWA=False, closed world is assumed so false train edges are known to be false (not in G)
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
num_splits : int, optional
The number of train/test splits to generate. Default is 10.
"""
# Compute the splits sequentially or in parallel
backend = 'multiprocessing'
path_func = delayed(_compute_one_split)
Parallel(n_jobs=num_splits, verbose=True, backend=backend)(
path_func(G, output_path, owa, train_frac, num_fe_train, num_fe_test, split) for split in range(num_splits))
def split_train_test(G, train_frac=0.51, st_alg='wilson'):
r"""
Computes one train/test split of edges from an input graph and returns the results.
The train set will be (weakly) connected and span all nodes of the input graph (digraph).
Input graph (digraph) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
st_alg : basestring, optional
The algorithm to use for generating the spanning tree constituting the backbone of the train set.
Options are: 'wilson' and 'broder'. The first option, 'wilson', also known as LERW is much faster in most cases.
Default is 'wilson'.
Returns
-------
train_E : set
The set of train edges
test_E : set
The set of test edges
Raises
------
ValueError
If the train_frac parameter is not in range (0, 1].
If the input graph G has more than one (weakly) connected component.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
if train_frac <= 0.0 or train_frac > 1.0:
raise ValueError('The train_frac parameter needs to be in range: (0.0, 1.0]')
if train_frac == 1.0:
return set(G.edges()), set()
# Create a set of all edges in G
E = set(G.edges)
if st_alg == 'broder':
# Compute a random spanning tree using broder's algorithm
train_E = broder_alg(G, E)
else:
# Compute a random spanning tree using wilson's algorithm
train_E = wilson_alg(G, E)
# Fill test edge set as all edges not in the spanning tree
test_E = E - train_E
# Compute num train edges
num_E = len(E)
num_train_E = np.ceil(train_frac * num_E)
# Check if the num edges in the spanning tree is already greater than the num train edges
num_toadd = int(num_train_E - len(train_E))
if num_toadd <= 0:
print("WARNING: In order to return a connected train set the train_frac parameter needs to be higher!")
print("In this case, the provided train set constitutes a random spanning tree of the input graph.")
print("The train_frac value used is: {}".format(len(train_E) / num_E))
print("Edges requested: train = {}, test = {}".format(num_train_E, num_E - num_train_E))
print("Edges returned: train = {}, test = {}".format(len(train_E), num_E - len(train_E)))
else:
# Add more edges to train set from test set until it has desired size
edges = set(random.sample(test_E, num_toadd))
test_E = test_E - edges
train_E = train_E | edges
# Perform some simple checks
assert E == (test_E | train_E)
assert len(E) == len(test_E) + len(train_E)
if num_toadd > 0:
assert num_train_E == len(train_E)
# Return the sets of edges
return train_E, test_E
def rand_split_train_test(G, train_frac=0.51):
r"""
Computes one train/test split of edges from an input graph and returns the results.
The train/test split is computed by randomly removing 1-train_frac edges from the graph.
From the remaining edges, those in the mainCC constitute the train edges. From the set
of removed edges, those whose nodes are in the train set, are considered part or the
test set. The proportion of train/test edges returned might not be the required one.
The train set will be (weakly) connected and span all nodes of the input graph.
Input graph (digraph) can contain one or many (weakly) connected components.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
Returns
-------
train_E : set
The set of train edges
test_E : set
The set of test edges
Raises
------
ValueError
If the train_frac parameter is not in range (0, 1].
"""
if train_frac <= 0.0 or train_frac > 1.0:
raise ValueError('The train_frac parameter needs to be in range: (0.0, 1.0]')
if train_frac == 1.0:
return set(G.edges()), set()
# Create a set of all edges in G
E = set(G.edges)
num_E = len(E)
# Compute the potential number of train and test edges which corresponds to the fraction given
num_train_E = int(np.ceil(train_frac * num_E))
num_test_E = int(num_E - num_train_E)
# Randomly remove 1-train_frac edges from the graph and store them as potential test edges
pte_edges = set(random.sample(E, num_test_E))
# The remaining edges are potential train edges
ptr_edges = E - pte_edges
# Create a graph containing all ptr_edges and compute the mainCC
if G.is_directed():
H = nx.DiGraph()
H.add_edges_from(ptr_edges)
maincc = max(nx.weakly_connected_component_subgraphs(H), key=len)
else:
H = nx.Graph()
H.add_edges_from(ptr_edges)
maincc = max(nx.connected_component_subgraphs(H), key=len)
# The edges in the mainCC graph are the actual train edges
train_E = set(maincc.edges)
# Remove potential test edges for which the end nodes do not exist in the train_E
test_E = set()
for (src, dst) in pte_edges:
if src in maincc.nodes and dst in maincc.nodes:
test_E.add((src, dst))
# Return the sets of edges
return train_E, test_E
def naive_split_train_test(G, train_frac=0.51):
r"""
Computes one train/test split of edges from an input graph and returns the results.
The sets are computed using the naive approach that checks connectivity of the graph
for each removed edge. If graph gets disconnected, that edges is not removed.
The train set will be (weakly) connected and span all nodes of the input graph.
Input graph (digraph) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
Returns
-------
train_E : set
The set of train edges
test_E : set
The set of test edges
Raises
------
ValueError
If the train_frac parameter is not in range (0, 1].
If the input graph G has more than one (weakly) connected component.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
if train_frac <= 0.0 or train_frac > 1.0:
raise ValueError('The train_frac parameter needs to be in range: (0.0, 1.0]')
if train_frac == 1.0:
return set(G.edges()), set()
# Is directed
directed = G.is_directed()
G = G.copy()
# Create a set of all edges in G
aux = np.array(G.edges)
np.random.shuffle(aux)
E = set([tuple(edge) for edge in aux])
# Compute num train edges
num_E = len(E)
num_train_E = np.ceil(train_frac * num_E)
num_test_E = num_E - num_train_E
# Initialize train edges to an empty set
train_E = set(G.edges())
# Initialize test edges to an empty set
test_E = set()
# Iterate over shuffled edges, add to train/val sets
for i, edge in enumerate(E):
# if i % 500 == 0:
# print('{}/{}'.format(i, num_test_E))
node1 = edge[0]
node2 = edge[1]
# If removing edge would disconnect a connected component, backtrack and move on
G.remove_edge(node1, node2)
if directed:
if nx.number_weakly_connected_components(G) > 1:
G.add_edge(node1, node2)
continue
else:
if nx.number_connected_components(G) > 1:
G.add_edge(node1, node2)
continue
# Fill test_edges
if len(test_E) < num_test_E:
test_E.add(edge)
train_E.remove(edge)
else:
break
# Perform some simple checks
assert E == (test_E | train_E)
assert len(E) == len(train_E) + len(test_E)
# Return the sets of edges
return train_E, test_E
def generate_false_edges_owa(G, train_E, test_E, num_fe_train=None, num_fe_test=None):
r"""
This method generates false train and test edges for both directed and undirected graphs.
The train and test sets are non overlapping.
Follows the open world assumption, so false train edges are generated only using the true train edges,
so false train edges can be true test edges. This is the case for evolving graphs where edges can only appear.
For undirected graphs the output is sorted (smallNodeID, bigNodeID)
Parameters
----------
G : graph
A NetworkX graph
train_E : set
The set of train edges.
test_E : set
The set of test edges.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
Returns
-------
train_false_E : set
The set of false train edges
test_false_E : set
The set of false test edges
Raises
------
ValueError
If the input graph G has more than one (weakly) connected component.
If more false edges than existing in the graph are required.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
# Create a set of vertices
V = set(G.nodes)
# Initialize the sizes of the false edges
if num_fe_train is None:
num_fe_train = len(train_E)
if num_fe_test is None:
num_fe_test = len(test_E)
# Make sure the required amount of false edges can be generated
max_nonedges = len(V) * len(V) - len(train_E)
if num_fe_train > max_nonedges:
raise ValueError('Too many false train edges required! Max available for train+test is {}'.format(max_nonedges))
else:
if num_fe_train + num_fe_test > max_nonedges:
warnings.warn('Too many false edges required in train+test! '
'Using maximum number of false test edges available: {}'.format(max_nonedges - num_fe_train))
num_fe_test = max_nonedges - num_fe_train
# Create sets to store the false edges
train_E_false = set()
test_E_false = set()
# Generate negative train edges
while len(train_E_false) < num_fe_train:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if edge not in train_E:
if G.is_directed():
train_E_false.add(edge)
else:
if redge not in train_E:
train_E_false.add(tuple(sorted(edge)))
# Generate negative test edges
while len(test_E_false) < num_fe_test:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if edge not in train_E and edge not in test_E and edge not in train_E_false:
if G.is_directed():
test_E_false.add(edge)
else:
if redge not in train_E and redge not in test_E and redge not in train_E_false:
test_E_false.add(tuple(sorted(edge)))
# Perform some simple check before returning the result
assert len(train_E_false) == num_fe_train
assert len(test_E_false) == num_fe_test
assert train_E_false.isdisjoint(test_E_false)
assert train_E_false.isdisjoint(train_E)
assert test_E_false.isdisjoint(train_E | test_E)
# Return the sets of false edges
return train_E_false, test_E_false
def generate_false_edges_cwa(G, train_E, test_E, num_fe_train=None, num_fe_test=None):
r"""
This method generates false train and test edges for both directed and undirected graphs.
The train and test sets are non overlapping.
Follows the closed world assumption, so false train edges are selected as known to be false.
This is the case for some networks e.g. protein-protein interaction where information about
both the positive class (existing edges) and the negative class (missing edges) exists.
For undirected graphs the output is sorted (smallNodeID, bigNodeID)
Parameters
----------
G : graph
A NetworkX graph
train_E : set
The set of train edges.
test_E : set
The set of test edges.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
Returns
-------
train_false_E : set
The set of false train edges
test_false_E : set
The set of false test edges
Raises
------
ValueError
If the input graph G has more than one (weakly) connected component.
If more false edges than existing in the graph are required.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
# Create a set of vertices
V = set(G.nodes)
# Initialize the sizes of the false edges
if num_fe_train is None:
num_fe_train = len(train_E)
if num_fe_test is None:
num_fe_test = len(test_E)
# Make sure the required amount of false edges can be generated
max_nonedges = len(V) * len(V) - len(G.edges)
if num_fe_train > max_nonedges:
raise ValueError(
'Too many false train edges required! Max available for train+test is {}'.format(max_nonedges))
else:
if num_fe_train + num_fe_test > max_nonedges:
warnings.warn('Too many false edges required in train+test! '
'Using maximum number of false test edges available: {}'.format(max_nonedges - num_fe_train))
# num_fe_test = max_nonedges - num_fe_train
return _getall_false_edges(G, (1.0*num_fe_train)/max_nonedges)
# Create sets to store the false edges
train_E_false = set()
test_E_false = set()
# Generate negative train edges
while len(train_E_false) < num_fe_train:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if edge not in train_E and edge not in test_E:
if G.is_directed():
train_E_false.add(edge)
else:
if redge not in train_E and redge not in test_E:
train_E_false.add(tuple(sorted(edge)))
# Generate negative test edges
while len(test_E_false) < num_fe_test:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if edge not in train_E and edge not in test_E and edge not in train_E_false:
if G.is_directed():
test_E_false.add(edge)
else:
if redge not in train_E and redge not in test_E and redge not in train_E_false:
test_E_false.add(tuple(sorted(edge)))
# Perform some simple check before returning the result
assert len(train_E_false) == num_fe_train
assert len(test_E_false) == num_fe_test
assert train_E_false.isdisjoint(test_E_false)
assert train_E_false.isdisjoint(train_E | test_E)
assert test_E_false.isdisjoint(train_E | test_E)
# Return the sets of false edges
return train_E_false, test_E_false
def generate_false_edges_cwa_close1(G, train_E, test_E, num_fe_train=None, num_fe_test=None, length=None):
r"""
This method generates false train and test edges for both directed and undirected graphs.
The train and test sets are non overlapping.
Follows the closed world assumption, so false train edges are selected as known to be false.
This is the case for some networks e.g. protein-protein interaction where information about
both the positive class (existing edges) and the negative class (missing edges) exists.
For undirected graphs the output is sorted (smallNodeID, bigNodeID)
Parameters
----------
G : graph
A NetworkX graph
train_E : set
The set of train edges.
test_E : set
The set of test edges.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
Returns
-------
train_false_E : set
The set of false train edges
test_false_E : set
The set of false test edges
Raises
------
ValueError
If the input graph G has more than one (weakly) connected component.
If more false edges than existing in the graph are required.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
# Create a set of vertices
V = set(G.nodes)
# Initialize the sizes of the false edges
if num_fe_train is None:
num_fe_train = len(train_E)
if num_fe_test is None:
num_fe_test = len(test_E)
# Make sure the required amount of false edges can be generated
max_nonedges = len(V) * len(V) - len(G.edges)
if num_fe_train > max_nonedges:
raise ValueError(
'Too many false train edges required! Max available for train+test is {}'.format(max_nonedges))
else:
if num_fe_train + num_fe_test > max_nonedges:
warnings.warn('Too many false edges required in train+test! '
'Using maximum number of false test edges available: {}'.format(max_nonedges - num_fe_train))
# num_fe_test = max_nonedges - num_fe_train
return _getall_false_edges(G, (1.0*num_fe_train)/max_nonedges)
# Create sets to store the false edges
train_E_false = set()
test_E_false = set()
# Generate negative train edges
while len(train_E_false) < num_fe_train:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if edge not in train_E and edge not in test_E:
if G.is_directed():
if (not nx.has_path(G, source=edge[0], target=edge[1])) or (nx.shortest_path_length(G, source=edge[0], target=edge[1]) >= length):
train_E_false.add(edge)
else:
if redge not in train_E and redge not in test_E:
if (not nx.has_path(G, source=edge[0], target=edge[1])) or (nx.shortest_path_length(G, source=edge[0], target=edge[1]) >= length):
train_E_false.add(tuple(sorted(edge)))
# Generate negative test edges
while len(test_E_false) < num_fe_test:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if edge not in train_E and edge not in test_E and edge not in train_E_false:
if G.is_directed():
if (not nx.has_path(G, source=edge[0], target=edge[1])) or (nx.shortest_path_length(G, source=edge[0], target=edge[1]) >= length):
test_E_false.add(edge)
else:
if redge not in train_E and redge not in test_E and redge not in train_E_false:
if (not nx.has_path(G, source=edge[0], target=edge[1])) or (nx.shortest_path_length(G, source=edge[0], target=edge[1]) >= length):
test_E_false.add(tuple(sorted(edge)))
# Perform some simple check before returning the result
assert len(train_E_false) == num_fe_train
assert len(test_E_false) == num_fe_test
assert train_E_false.isdisjoint(test_E_false)
assert train_E_false.isdisjoint(train_E | test_E)
assert test_E_false.isdisjoint(train_E | test_E)
# Return the sets of false edges
return train_E_false, test_E_false
def generate_false_edges_cwa_close(G, train_E, test_E, num_fe_train=None, num_fe_test=None, length=None):
r"""
This method generates false train and test edges for both directed and undirected graphs.
The train and test sets are non overlapping.
Follows the closed world assumption, so false train edges are selected as known to be false.
This is the case for some networks e.g. protein-protein interaction where information about
both the positive class (existing edges) and the negative class (missing edges) exists.
For undirected graphs the output is sorted (smallNodeID, bigNodeID)
Parameters
----------
G : graph
A NetworkX graph
train_E : set
The set of train edges.
test_E : set
The set of test edges.
num_fe_train : int, optional
The number of train false edges to generate. Default is same number as true train edges.
num_fe_test : int, optional
The number of test false edges to generate. Default is same number as true test edges.
length: int, optional
false train and test edges whose shortest path length is larger than the value of length.
Returns
-------
train_false_E : set
The set of false train edges
test_false_E : set
The set of false test edges
Raises
------
ValueError
If the input graph G has more than one (weakly) connected component.
If more false edges than existing in the graph are required.
"""
# Sanity check to make sure the input is correct
_sanity_check(G)
# Create a set of vertices
V = set(G.nodes)
# Initialize the sizes of the false edges
if num_fe_train is None:
num_fe_train = len(train_E)
if num_fe_test is None:
num_fe_test = len(test_E)
# Make sure the required amount of false edges can be generated
max_nonedges = len(V) * len(V) - len(G.edges)
if num_fe_train > max_nonedges:
raise ValueError(
'Too many false train edges required! Max available for train+test is {}'.format(max_nonedges))
else:
if num_fe_train + num_fe_test > max_nonedges:
warnings.warn('Too many false edges required in train+test! '
'Using maximum number of false test edges available: {}'.format(max_nonedges - num_fe_train))
# num_fe_test = max_nonedges - num_fe_train
return _getall_false_edges(G, (1.0*num_fe_train)/max_nonedges)
# Create sets to store the false edges
train_E_false = set()
test_E_false = set()
# Generate negative train edges
while len(train_E_false) < num_fe_train:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if length is not None:
if G.is_directed():
if not nx.has_path(G, source=edge[0], target=edge[1]):
if not nx.has_path(G, source=edge[1], target=edge[0]):
if edge not in train_E and edge not in test_E:
train_E_false.add(edge)
train_E_false.add(redge)
continue
continue
continue
if not nx.has_path(G, source=edge[1], target=edge[0]):
continue
if G.is_directed() and (nx.shortest_path_length(G, source=edge[0], target=edge[1]) <= length
or nx.shortest_path_length(G, source=edge[1], target=edge[0]) <= length):
continue
else:
if nx.shortest_path_length(G, source=edge[0], target=edge[1]) <= length:
continue
if edge not in train_E and edge not in test_E:
if G.is_directed():
train_E_false.add(edge)
else:
if redge not in train_E and redge not in test_E:
train_E_false.add(tuple(sorted(edge)))
# Generate negative test edges
while len(test_E_false) < num_fe_test:
edge = tuple(random.sample(V, 2))
redge = tuple(reversed(edge))
if length is not None:
if (edge in test_E) or (edge in train_E_false) or (edge in test_E_false):
continue
if G.is_directed() :
if not nx.has_path(G, source=edge[0], target=edge[1]):
if not nx.has_path(G, source=edge[1], target=edge[0]):
if edge not in train_E and edge not in test_E:
train_E_false.add(edge)
train_E_false.add(redge)
continue
continue
continue
if not nx.has_path(G, source=edge[1], target=edge[0]):
continue
if G.is_directed() and (nx.shortest_path_length(G, source=edge[0], target=edge[1]) <= length
or nx.shortest_path_length(G, source=edge[1], target=edge[0]) <= length):
continue
else:
if nx.shortest_path_length(G, source=edge[0], target=edge[1]) <= length:
continue
if (edge not in train_E) and (edge not in test_E) and (edge not in train_E_false):
if G.is_directed():
test_E_false.add(edge)
else:
if (redge not in train_E) and (redge not in test_E) and (redge not in train_E_false):
test_E_false.add(redge)
# Perform some simple check before returning the result
assert len(train_E_false) == num_fe_train
assert len(test_E_false) == num_fe_test
assert train_E_false.isdisjoint(test_E_false)
assert train_E_false.isdisjoint(train_E | test_E)
assert test_E_false.isdisjoint(train_E | test_E)
# Return the sets of false edges
return train_E_false, test_E_false
def _getall_false_edges(G, fe_train_frac):
print("Generating all non-edges and splitting them in train and test...")
train_E_false = list()
test_E_false = list()
for e in nx.non_edges(G):
r = random.uniform(0, 1)
if r <= fe_train_frac:
train_E_false.append(e)
else:
test_E_false.append(e)
return train_E_false, test_E_false
def redges_false(train_E, test_E, output_path=None):
r"""
For directed graphs computes all non-edges (a->b) such that the opposite edge (a<-b) exists in the graph.
It does this for both the train and test edge sets. These non-edges can be used to asses the performance
of the embedding methods on predicting non-reciprocated edges.
Parameters
----------
train_E : set
The set of train edges.
test_E : set
The set of test edges.
output_path : string, optional
A path or file where to store the results. Default None.
Returns
-------
train_redges_false : set
A set of edges respecting the mentioned property regarding the train edges
test_redges_false : set
A set of edges respecting the mentioned property on the complete graph
"""
# Reverse all train and test edges
train_redges_false = set(tuple(reversed(edge_tuple)) for edge_tuple in train_E)
test_redges_false = set(tuple(reversed(edge_tuple)) for edge_tuple in test_E)
# Keep only the reversed edges which are not real train edges
train_redges_false = train_redges_false - train_E
# Keep only the test reversed edges which are not true edges in the graph
test_redges_false = test_redges_false - train_E
test_redges_false = test_redges_false - test_E
if output_path is not None:
# Store the reversed edges
train_redges_false_np = np.array([list(edge_tuple) for edge_tuple in train_redges_false])
test_redges_false_np = np.array([list(edge_tuple) for edge_tuple in test_redges_false])
# Save the splits in different files
np.savetxt(output_path, train_redges_false_np, delimiter=',', fmt='%d')
np.savetxt(output_path, test_redges_false_np, delimiter=',', fmt='%d')
# Return the computed sets
return train_redges_false, test_redges_false
def store_train_test_splits(output_path, train_E, train_E_false, test_E, test_E_false, split_id=0):
r"""
Writes the sets of true and false edges to files in the provided path. All files will share
the same split number as an identifier. If any folder in the path do not exist, it will be generated.
Parameters
----------
output_path : string
Indicates the path where data will be stored. It can also include a name for all the splits to share.
train_E : set
Set of train edges
train_E_false : set
Set of train non-edges
test_E : set
Set of test edges
test_E_false : set
Set of test non-edges
split_id : int, optional
The ID of train/test split to be stored. Default is 0.
Returns
-------
filenames : list
A list of strings, the names given to the 4 files where the true and false train and test edge are stored.
"""
# Create path if it does not exist
if not os.path.exists(output_path):
os.makedirs(output_path)
# Convert edge-lists to numpy arrays
train_E = np.array([list(edge_tuple) for edge_tuple in train_E])
train_E_false = np.array([list(edge_tuple) for edge_tuple in train_E_false])
test_E = np.array([list(edge_tuple) for edge_tuple in test_E])
test_E_false = np.array([list(edge_tuple) for edge_tuple in test_E_false])
filenames = (os.path.join(output_path, "trE_{}.csv".format(split_id)),
os.path.join(output_path, "negTrE_{}.csv".format(split_id)),
os.path.join(output_path, "teE_{}.csv".format(split_id)),
os.path.join(output_path, "negTeE_{}.csv".format(split_id)))
# Save the splits in different files
np.savetxt(fname=filenames[0], X=train_E, delimiter=',', fmt='%d')
np.savetxt(fname=filenames[1], X=train_E_false, delimiter=',', fmt='%d')
np.savetxt(fname=filenames[2], X=test_E, delimiter=',', fmt='%d')
np.savetxt(fname=filenames[3], X=test_E_false, delimiter=',', fmt='%d')
# Return the names given to the 4 files where data is stored
return filenames
def store_edgelists(train_path, test_path, train_edges, test_edges):
r"""
Writes the train and test edgelists to files with the specified names.
Parameters
----------
train_path : string
Indicates the path where the train data will be stored.
test_path : string
Indicates the path where the test data will be stored.
train_edges : array_like
Set of train true and false edges
test_edges : array_like
Set of test true and false edges
"""
# Convert edge-lists to numpy arrays
train_edges = np.array([list(edge_tuple) for edge_tuple in train_edges])
test_edges = np.array([list(edge_tuple) for edge_tuple in test_edges])
# Save the splits in different files
np.savetxt(fname=train_path, X=train_edges, delimiter=',', fmt='%d')
np.savetxt(fname=test_path, X=test_edges, delimiter=',', fmt='%d')
def check_overlap(filename, num_sets):
r"""
Shows the amount of overlap (shared elements) between edge sets from different random splits.
The path and name of the set (without split ID) for which to compute the overlap is required.
The method will read num_sets from the same path and compute the overlap between them.
Parameters
----------
filename : string
Indicates the path and name (without split ID) of the first set.
The sets are assumed to have sequential split IDs starting at 0.
num_sets : int
The number of sets for which to check the overlap.
"""
# Load the first set and transform it into a list of tuples
S = np.loadtxt(filename+"_0.csv", delimiter=',', dtype=int)
S = set(map(tuple, S))
# Initialize the intersection and union sets as all elements in first edge set
intrs = S
union = S
# Sequentially add the rest of the sets and check overlap
for i in range(num_sets-1):
# Read a new edge set
S = np.loadtxt(filename+"_{}.csv".format(i+1), delimiter=',', dtype=int)
S = set(map(tuple, S))
# Update intersection and union sets
intrs = intrs & S
union = union | S
# Print the information on screen
print("Intersection of {} sets is {}".format(i+2, len(intrs)))
print("Union of {} sets is {}".format(i+2, len(union)))
print("Jaccard coefficient: {}".format(len(intrs)/len(union)))
print("")
def random_edge_sample(a, samp_frac=0.01, directed=False):
r"""
Returns a sample of positive and negative edges from the given graph represented by `a` selected uniformly at
random without replacement. If the directed flag is set to False the samples are obtained only from the upper
triangle.
Parameters
----------
a : sparse matrix
A sparse adjacency matrix representing a graph.
samp_frac : float, optional
An float representing the fraction of elements to sample. Default is 1.0 (1%)
directed : bool, optional
A flag indicating if the adjacency matrix should be considered directed or undirected. If undirected
indices are obtained only from the lower triangle. Default is False.
Returns
-------
pos_e : ndarray
Positive edges
neg_e : ndarray
Negative edges
"""
n = a.shape[0]
if directed:
num_samp = int(n ** 2 * samp_frac / 100)
lin_indx_a = np.ravel_multi_index(a.nonzero(), (n, n))
# randomly generate linear indices
lin_indx = np.random.randint(0, n ** 2, num_samp)
else:
# For undir graphs we only need to sample half the num nodes
num_samp = int((n*(n-1))/2 * (samp_frac / 100))
lin_indx_a = np.ravel_multi_index(triu(a, k=1).nonzero(), (n, n))
ij = np.random.randint(0, n, size=(2, num_samp))
ij.sort(axis=0)
lin_indx = np.ravel_multi_index((ij[0], ij[1]), (n, n))
pos_e = np.intersect1d(lin_indx, lin_indx_a)
neg_e = np.setdiff1d(lin_indx, lin_indx_a)
# Remove the self edges
lin_diag_indxs = np.ravel_multi_index(np.diag_indices(n), (n, n))
pos_e = np.setdiff1d(pos_e, lin_diag_indxs)
neg_e = np.setdiff1d(neg_e, lin_diag_indxs)
# Unravel the linear indices to obtain src, dst pairs
pos_e = np.array(np.unravel_index(np.array(pos_e), (n, n))).T
neg_e = np.array(np.unravel_index(np.array(neg_e), (n, n))).T
return pos_e, neg_e
def random_edge_sample_other(a, samp_frac=0.01, directed=False):
r"""
Returns a sample of positive and negative edges from the given graph represented by `a` selected uniformly at
random without replacement. If the directed flag is set to False the samples are obtained only from the upper
triangle.
A different take on the random sampling technique. Probably less efficient than the other one. For undir graphs
generates lots of candidates also from the bottom triangle to reach the desired density, this is not as efficient
as the other version.
Parameters
----------
a : sparse matrix
A sparse adjacency matrix representing a graph.
samp_frac : float, optional
An float representing the fraction of elements to sample. Default is 0.01 (1%)
directed : bool, optional
A flag indicating if the adjacency matrix should be considered directed or undirected. If undirected
indices are obtained only from the lower triangle. Default is False.
Returns
-------
pos_e : ndarray
Positive edges
neg_e : ndarray
Negative edges
"""
n = a.shape[0]
num_samp = int(n**2 * samp_frac)
# Generate sparse random matrix representing mask of samples
density = (num_samp + n) / n**2
mask = sp.sparse.rand(n, n, density)
if not directed:
# For undir graphs we only look at the upper triangle
mask = triu(mask, k=1)
else:
# Remove elements from diagonal
mask.setdiag(0)
mask.eliminate_zeros()
mask.data[:] = 1
lin_indx_samp = np.ravel_multi_index(mask.nonzero(), (n, n))
# All positive edges sampled in mask will stay in aux
aux = mask.multiply(a)
pos_e = np.array(aux.nonzero()).T
# The rest of the lin indx not positive are negative
lin_indx_ne = np.setdiff1d(lin_indx_samp, np.ravel_multi_index(aux.nonzero(), (n, n)))
neg_e = np.array(np.unravel_index(lin_indx_ne, (n, n)))
return pos_e, neg_e
def quick_split(G, train_frac=0.51):
r"""
Computes one train/test split of edges from an input graph and returns the results.
The train set will be (weakly) connected and span all nodes of the input graph (digraph).
This implementation uses a depth first tree to obtain edges covering all nodes for the train graph.
Input graph (digraph) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train set with respect to the total number of edges in the graph.
Default is 0.51.
Returns
-------
train_E : array
Column array of train edges as pairs src, dst
test_E : array
Column array of test edges as pairs src, dst
Raises
------
ValueError
If the train_frac parameter is not in range (0, 1].
If the input graph G has more than one (weakly) connected component.
"""
_sanity_check(G)
if train_frac <= 0.0 or train_frac > 1.0:
raise ValueError('The train_frac parameter needs to be in range: (0.0, 1.0]')
if train_frac == 1.0:
return set(G.edges()), set()
# Restrict input graph to its main cc
if nx.is_directed(G):
a = nx.adj_matrix(G)
else:
a = triu(nx.adj_matrix(G), k=1)
# Compute initial statistics and linear indx of nonzeros
n = a.shape[0]
num_tr_e = int(a.nnz * train_frac)
nz_lin_ind = np.ravel_multi_index(a.nonzero(), (n, n))
# Build a dft starting at a random node. If dir false returns only upper triang
dft = depth_first_tree(a, np.random.randint(0, a.shape[0]), directed=nx.is_directed(G))
if nx.is_directed(G):
dft_lin_ind = np.ravel_multi_index(dft.nonzero(), (n, n))
else:
dft_lin_ind = np.ravel_multi_index(triu(tril(dft).T + dft, k=1).nonzero(), (n, n))
# From all nonzero indx remove those in dft. From the rest take enough to fill train quota. Rest are test
rest_lin_ind = np.setdiff1d(nz_lin_ind, dft_lin_ind)
aux = np.random.choice(rest_lin_ind, num_tr_e-len(dft_lin_ind), replace=False)
lin_tr_e = np.union1d(dft_lin_ind, aux)
lin_te_e = np.setdiff1d(rest_lin_ind, aux)
# Unravel the linear indices to obtain src, dst pairs
tr_e = np.array(np.unravel_index(np.array(lin_tr_e), (n, n))).T
te_e = np.array(np.unravel_index(np.array(lin_te_e), (n, n))).T
return tr_e, te_e
def quick_nonedges(G, train_frac=0.51, fe_ratio=1.0):
r"""
Computes one train/test split of non-edges from an input graph and returns the results.
The negative train and test edges will have no overlap. Also there will be no overlap between false train and test
edges and real ones. No selfloop false edges will be generated.
Input graph (digraph) cannot contain more than one (weakly) connected component.
Parameters
----------
G : graph
A NetworkX graph
train_frac : float, optional
The relative size (in range (0.0, 1.0]) of the train false edge set w.r.t. total number of edges in graph.
Default is 0.51.
fe_ratio : float, optional
The ratio of negative to positive edges to sample. For fr_ratio > 0 and < 1 less false than true edges will be
generated. For fe_edges > 1 more false than true edges will be generated. Default 1, same amounts.
Returns
-------
train_E : array
Column array of train edges as pairs src, dst
test_E : array
Column array of test edges as pairs src, dst
Raises
------
ValueError
If more false edges than existing in the graph are required.
"""
# fe_ration can be any float or keyword 'prop'
a = nx.adj_matrix(G)
n = a.shape[0]
density = a.nnz / n ** 2
if fe_ratio == 'prop':
fe_ratio = | np.floor(1.0 / density) | numpy.floor |
""" Packaged MASAC"""
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from unityagents import UnityEnvironment
from buffers.buffer import ReplayBuffer
from models.network import Network
from torch.nn.utils.clip_grad import clip_grad_norm_
class DQNAgent:
def __init__(
self,
env: UnityEnvironment,
memory_size: int,
batch_size: int,
target_update: int,
epsilon_decay: float = 1 / 2000,
max_epsilon: float = 1.0,
min_epsilon: float = 0.1,
gamma: float = 0.99,
):
self.brain_name = env.brain_names[0]
self.brain = env.brains[self.brain_name]
env_info = env.reset(train_mode=True)[self.brain_name]
self.env = env
action_size = self.brain.vector_action_space_size
state = env_info.vector_observations[0]
state_size = len(state)
self.obs_dim = state_size
self.action_dim = 1
self.memory = ReplayBuffer(self.obs_dim, self.action_dim, memory_size, batch_size)
self.batch_size = batch_size
self.target_update = target_update
self.epsilon_decay = epsilon_decay
self.max_epsilon = max_epsilon
self.min_epsilon = min_epsilon
self.gamma = gamma
self.epsilon = max_epsilon
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.dqn = Network(self.obs_dim, self.action_dim)
self.dqn_target = Network(self.obs_dim, self.action_dim)
self.dqn_target.load_state_dict(self.dqn.state_dict())
self.dqn_target.eval()
self.optimizer = optim.Adam(self.dqn.parameters(), lr=5e-5)
self.transition = list()
self.is_test = False
def select_action(self, state: np.ndarray) -> np.int64:
""" Select an action given input """
if self.epsilon > np.random.random():
selected_action = np.random.random_integers(0, self.action_dim-1)
else:
selected_action = self.dqn(
torch.FloatTensor(state).to(self.device)
)
selected_action = np.argmax(selected_action.detach().cpu().numpy())
if not self.is_test:
self.transition = [state, selected_action]
return selected_action
def step(self, action: np.int64) -> Tuple[np.ndarray, np.float64, bool]:
"Take an action and return environment response"
env_info = self.env.step(action)[self.brain_name]
next_state = env_info.vector_observations[0]
reward = env_info.rewards[0]
done = env_info.local_done[0]
if not self.is_test:
self.transition += [reward, next_state, done]
self.memory.store(*self.transition)
return next_state, reward, done
def update_model(self) -> torch.Tensor:
""" Update model by gradient descent"""
samples = self.memory.sample_batch()
loss = self._compute_dqn_loss(samples)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item()
def train(self, num_episode: int, max_iteration: int=1000, plotting_interval: int=400):
""" train the agent """
self.is_test = False
env_info = self.env.reset(train_mode=True)[self.brain_name]
state = env_info.vector_observations[0]
update_cnt = 0
epsilons = []
losses = []
avg_losses= []
scores = []
avg_scores = []
for episode in range(num_episode):
env_info = self.env.reset(train_mode=True)[self.brain_name]
state = env_info.vector_observations[0]
score = 0
for iter in range(max_iteration):
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
if done:
break
if len(self.memory) > self.batch_size:
loss = self.update_model()
losses.append(loss)
update_cnt += 1
avg_losses.append(np.mean(losses))
losses = []
self.epsilon = max(
self.min_epsilon, self.epsilon - (
self.max_epsilon - self.min_epsilon
) * self.epsilon_decay
)
epsilons.append(self.epsilon)
if update_cnt % self.target_update == 0:
self._target_hard_update()
scores.append(score)
epsilons.append(self.epsilon)
if episode >= 100:
avg_scores.append( | np.mean(scores[-100:]) | numpy.mean |
"""Run Demonstration Image Classification Experiments.
"""
import sys,os
sys.path.append('..')
import numpy as np
from models.BrokenModel import BrokenModel as BrokenModel
import glob
import tensorflow as tf
import pandas as pd
from timeit import default_timer as timer
from .calloc import loadChannel,quantInit
from .simmods import *
from errConceal.caltec import *
from errConceal.altec import *
from errConceal.tc_algos import *
import cv2 as cv2
from PIL import Image
# ---------------------------------------------------------------------------- #
def fnRunImgClassDemo(modelDict,splitLayerDict,ecDict,batch_size,path_base,transDict,outputDir):
print('TensorFlow version')
print(tf.__version__)
model_path = modelDict['fullModel']
customObjects = modelDict['customObjects']
task = modelDict['task']
normalize = modelDict['normalize']
reshapeDims = modelDict['reshapeDims']
splitLayer = splitLayerDict['split']
mobile_model_path = splitLayerDict['MobileModel']
cloud_model_path = splitLayerDict['CloudModel']
rowsPerPacket = transDict['rowsperpacket']
quantization = transDict['quantization']
numberOfBits_1 = quantization[1]['numberOfBits']
numberOfBits_2 = quantization[2]['numberOfBits']
channel = transDict['channel']
res_data_dir = outputDir['resDataDir'] # directory for loss maps.
sim_data_dir = outputDir['simDataDir'] # directory for simulation results.
# ------------------------------------------------------------------------ #
# tensorflow.keras deep model loading.
loaded_model = tf.keras.models.load_model(os.path.join(model_path))
loaded_model_config = loaded_model.get_config()
loaded_model_name = loaded_model_config['name']
# Check if mobile and cloud sub-models are already available:
if os.path.isfile(mobile_model_path) and os.path.isfile(cloud_model_path):
print(f'Sub-models of {loaded_model_name} split at {splitLayer} are available.')
mobile_model = tf.keras.models.load_model(os.path.join(mobile_model_path))
cloud_model = tf.keras.models.load_model(os.path.join(cloud_model_path))
else:
# if not, split the deep model.
# Object for splitting a tf.keras model into a mobile sub-model and a cloud
# sub-model at the chosen split layer 'splitLayer'.
testModel = BrokenModel(loaded_model, splitLayer, customObjects)
testModel.splitModel()
mobile_model = testModel.deviceModel
cloud_model = testModel.remoteModel
# Save the mobile and cloud sub-model
mobile_model.save(mobile_model_path)
cloud_model.save(cloud_model_path)
# ---------------------------------------------------------------------------- #
# Create results directory
if 'GilbertChannel' in channel:
lossProbability = channel['GilbertChannel']['lossProbability']
burstLength = channel['GilbertChannel']['burstLength']
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_lp_'+str(lossProbability)+'_Bl_'+str(burstLength))
channel_flag = 'GC'
elif 'RandomLossChannel' in channel:
lossProbability = channel['RandomLossChannel']['lossProbability']
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_lp_'+str(lossProbability))
channel_flag = 'RL'
elif 'ExternalChannel' in channel:
print('External packet traces imported')
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_ext_trace')
channel_flag = 'EX'
num_channels = transDict['channel']['ExternalChannel']['num_channels']
ext_dir = os.path.join(res_data_dir,path_base,loaded_model_name,splitLayer)
else:
# No lossy channel. This means we are doing a quantization experiment.
channel_flag = 'NC'
results_dir = os.path.join(sim_data_dir,path_base,loaded_model_name,'demo',splitLayer+'_NoChannel')
MC_runs = [0,1] # with no lossy channel, there's no need to do monte carlo runs because each monte carlo run would give the same results.
if channel_flag in ['GC','RL','EX']:
# Only load altec weights if we will be doing error concealment.
tc_weights_path = ecDict['ALTeC']['weightspath']
altec_w_path = os.path.join(tc_weights_path,loaded_model_name,splitLayer,splitLayer+'_rpp_'+str(rowsPerPacket)+'_'+str(numberOfBits_1)+'Bits_tensor_weights.npy')
altec_pkt_w = np.load(altec_w_path)
print(f'Loaded ALTeC weights for splitLayer {splitLayer} and {rowsPerPacket} rows per packet. Shape {np.shape(altec_pkt_w)}')
halrtc_iters = ecDict['HaLRTC']['numiters']
silrtc_iters = ecDict['SiLRTC']['numiters']
inpaint_radius = ecDict['InpaintNS']['radius']
os.makedirs(results_dir,exist_ok=True)
res_filename = '_'+str(numberOfBits_1)+'Bits_'+str(numberOfBits_2)+'Bits_'
# ------------------------------------------------------------------------ #
# Objects for the channel, quantization.
if channel_flag != 'EX':
channel = loadChannel(channel)
quant_tensor1 = quantInit(quantization,tensor_id = 1)
quant_tensor2 = quantInit(quantization,tensor_id = 2)
# ------------------------------------------------------------------------ #
# Load the dataset
dataset_x_files,dataset_y_labels,file_names = fn_Data_PreProcessing_ImgClass(path_base,reshapeDims,normalize)
# ------------------------------------------------------------------------ #
# Process the dataset.
batched_y_labels = [dataset_y_labels[i:i + batch_size] for i in range(0, len(dataset_y_labels), batch_size)]
batched_x_files = [dataset_x_files[i: i + batch_size] for i in range(0,len(dataset_x_files),batch_size)]
if channel_flag == 'EX':
loss_matrix_mc = []
print('Loading external packet traces')
for i_mc in range(MC_runs[0],MC_runs[1]):
# Load external packet traces as loss matrices.
lossMap_list = []
for i_c in range(num_channels):
df = pd.read_excel(os.path.join(ext_dir,'Rpp_'+str(rowsPerPacket)+'_MC_'+str(i_mc)+'.xlsx'),sheet_name=[str(i_c)],engine='openpyxl')
lossMap_channel = (df[str(i_c)].to_numpy())[:,1:].astype(np.bool)
lossMap_list.append(lossMap_channel)
loss_matrix_all = np.dstack(lossMap_list)
loss_matrix_ex = [loss_matrix_all[k_batch:k_batch+batch_size,:,:] for k_batch in range(0,np.shape(loss_matrix_all)[0],batch_size)]
loss_matrix_mc.append(loss_matrix_ex)
# lists to store results.
true_labels = []
top1_pred_full_model = []
top1_pred_split_model = []
top5_pred_full_model = []
top5_pred_split_model = []
top1_pred_caltec = []
top5_pred_caltec = []
top1_pred_altec = []
top5_pred_altec = []
top1_pred_halrtc = []
top5_pred_halrtc = []
top1_pred_silrtc = []
top5_pred_silrtc = []
top1_pred_inpaint = []
top5_pred_inpaint = []
top1_conf_full = []
top1_conf_split = []
top1_conf_caltec = []
top1_conf_altec = []
top1_conf_halrtc = []
top1_conf_silrtc = []
top1_conf_inpaint = []
for i_b in range(len(batched_y_labels)):
# Run through Monte Carlo experiments through each batch.
print(f"Batch {i_b}")
batch_labels = np.asarray(batched_y_labels[i_b],dtype=np.int64)
true_labels.extend(batch_labels)
batch_imgs = batched_x_files[i_b]
batch_imgs_stacked = np.vstack([i[np.newaxis,...] for i in batch_imgs])
# ---------------------------------------------------------------- #
full_model_out = loaded_model.predict(batch_imgs_stacked)
batch_top1_predictions = np.argmax(full_model_out,axis=1)
batch_confidence = np.max(full_model_out,axis=1)
top1_pred_full_model.extend(batch_top1_predictions)
top1_conf_full.extend(batch_confidence)
for i_item in range(np.shape(full_model_out)[0]):
item_top5_predictions = np.argpartition(-full_model_out[i_item,:],5)[:5]
top5_pred_full_model.append(item_top5_predictions)
# --------------------------------------------------------------- #
deviceOut = mobile_model.predict(batch_imgs_stacked)
print(f'Shape of device out tensor {np.shape(deviceOut)}')
# ---------------------------------------------------------------- #
devOut = []
if not isinstance(deviceOut, list):
devOut.append(deviceOut)
deviceOut = devOut
# deviceOut is the output tensor for a batch of data.
# Quantize the data
quanParams_1 = []
quanParams_2 = []
# If quantization is required:
if len(deviceOut) > 1:
if quant_tensor1!= 'noQuant':
print("Quantizing tensors")
quant_tensor1.bitQuantizer(deviceOut[0])
deviceOut[0] = quant_tensor1.quanData
quanParams_1.append(quant_tensor1.min)
quanParams_1.append(quant_tensor1.max)
quant_tensor2.bitQuantizer(deviceOut[1])
deviceOut[1] = quant_tensor2.quanData
quanParams_2.append(quant_tensor2.min)
quanParams_2.append(quant_tensor2.max)
else:
if quant_tensor1!= 'noQuant':
print("Quantizing tensor.")
quant_tensor1.bitQuantizer(deviceOut[0])
deviceOut[0] = quant_tensor1.quanData
quanParams_1.append(quant_tensor1.min)
quanParams_1.append(quant_tensor1.max)
# Save quantized tensors as image.
for i in range(len(deviceOut)):
quant_tensor = deviceOut[i]
for item_index in range(np.shape(quant_tensor)[0]):
for i_c in range(np.shape(quant_tensor)[-1]):
tensor_channel = Image.fromarray(quant_tensor[item_index,:,:,i_c].astype(np.uint8))
tensor_channel.save(os.path.join(results_dir,'original_batch_'+str(i_b)+'_item_'+str(item_index)+'_tensor_'+str(i)+'_channel_'+str(i_c)+res_filename+'.png'))
# -------------------------------------------------------------------- #
# Transmit the tensor deviceOut through the channel.
if channel_flag in ['GC','RL']:
# if a lossy channel has to be realized.
# if mc_task == 'GenLossPatterns':
# if we want to generate packet loss patterns.
lossMatrix = []
receivedIndices = []
lostIndices = []
dOut = []
for i in range(len(deviceOut)):
dO, lM, rI, lI = transmit(deviceOut[i], channel, rowsPerPacket)
dOut.append(dO)
lossMatrix.append(lM)
receivedIndices.append(rI)
lostIndices.append(lI)
channel.lossMatrix = []
deviceOut = dOut
# ---------------------------------------------------------------- #
# packetize tensor.
pkt_obj_list = []
for i in range(len(deviceOut)):
pkt_obj_list.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(deviceOut[i].data_tensor)))
# -------------------------------------------------------------------- #
if channel_flag == 'EX':
batch_loss_matrix = loss_matrix_mc[i_mc]
loss_matrix = [batch_loss_matrix[i_b]]
# -------------------------------------------------------------------- #
if channel_flag in ['GC','RL','EX']:
# ---------------------------------------------------------------- #
# apply the loss matrix to the tensor.
for i in range(len(pkt_obj_list)):
loss_map = lossMatrix[i]
#print(np.shape(loss_map))
channel_width = np.shape(pkt_obj_list[i].packet_seq)[3]
# loop through items in batch.
for item_index in range(np.shape(loss_map)[0]):
item_lost_map = loss_map[item_index,:,:]
lost_pkt_indices,lost_channel_indices = np.where(item_lost_map == False)
if len(lost_pkt_indices) != 0:
# drop packet in tensor.
for k in range(len(lost_pkt_indices)):
pkt_obj_list[i].packet_seq[item_index,lost_pkt_indices[k],:,:,lost_channel_indices[k]] = np.zeros([rowsPerPacket,channel_width])
for i in range(len(deviceOut)):
quant_tensor = pkt_obj_list[i].data_tensor
for item_index in range(np.shape(quant_tensor)[0]):
for i_c in range(np.shape(quant_tensor)[-1]):
tensor_channel = Image.fromarray(quant_tensor[item_index,:,:,i_c].astype(np.uint8))
tensor_channel.save(os.path.join(results_dir,'corrupted_batch_'+str(i_b)+'_item_'+str(item_index)+'_tensor_'+str(i)+'_channel_'+str(i_c)+res_filename+'.png'))
deviceOut = pkt_obj_list
# --------------------------------------------------====-------------- #
# Inverse quantize received packets.
# If necessary, inverse quantize tensors.
if len(deviceOut) > 1:
# If more than one tensor is transmitted from the mobile device to the cloud.
if quant_tensor1!= 'noQuant':
print("Inverse quantizing tensors")
if channel_flag != 'NC':
quant_tensor1.quanData = deviceOut[0].data_tensor
qMin, qMax = quanParams_1
quant_tensor1.min = qMin
quant_tensor1.max = qMax
deviceOut[0].data_tensor = quant_tensor1.inverseQuantizer()
quant_tensor2.quanData = deviceOut[1].data_tensor
qMin, qMax = quanParams_2
quant_tensor2.min = qMin
quant_tensor2.max = qMax
deviceOut[1].data_tensor = quant_tensor2.inverseQuantizer()
else:
# no channel.
quant_tensor1.quanData = deviceOut[0]
qMin, qMax = quanParams_1
quant_tensor1.min = qMin
quant_tensor1.max = qMax
deviceOut[0] = quant_tensor1.inverseQuantizer()
quant_tensor2.quanData = deviceOut[1]
qMin, qMax = quanParams_2
quant_tensor2.min = qMin
quant_tensor2.max = qMax
deviceOut[1] = quant_tensor2.inverseQuantizer()
else:
# A single tensor is transmitted from the mobile device to the cloud.
if quant_tensor1 != 'noQuant':
print("Inverse quantizing tensor")
if channel_flag != 'NC':
# we have lossy channels (either GE, RL or external packet traces.)
quant_tensor1.quanData = deviceOut[0].data_tensor
qMin, qMax = quanParams_1
quant_tensor1.min = qMin
quant_tensor1.max = qMax
deviceOut[0].data_tensor = quant_tensor1.inverseQuantizer()
else:
# no channel.
quant_tensor1.quanData = deviceOut[0]
qMin, qMax = quanParams_1
quant_tensor1.min = qMin
quant_tensor1.max = qMax
deviceOut[0] = quant_tensor1.inverseQuantizer()
cOut = []
for i in range(len(deviceOut)):
if channel_flag != 'NC':
cOut.append(np.copy(deviceOut[i].data_tensor))
else:
cOut.append(np.copy(deviceOut[i]))
deviceOut = cOut
# -------------------------------------------------------------------- #
# Run cloud prediction on channel output data.
tensor_out = cloud_model.predict(deviceOut)
cloud_Top1_pred = np.argmax(tensor_out,axis=1)
cloud_Top1_confidence = np.max(tensor_out,axis=1)
top1_pred_split_model.extend(cloud_Top1_pred)
top1_conf_split.extend(cloud_Top1_confidence)
for i_item in range(np.shape(tensor_out)[0]):
item_top5_predictions = np.argpartition(-tensor_out[i_item,:],5)[:5]
top5_pred_split_model.append(item_top5_predictions)
# -------------------------------------------------------------------- #
# Run packet loss concealment methods if a lossy channel was used.
if channel_flag in ['EX','RL','GC']:
# Flush missing packets out of tensor.
# packetize tensor.
pkt_obj_list_caltec = []
pkt_obj_list_altec = []
pkt_obj_list_halrtc = []
pkt_obj_list_silrtc = []
pkt_obj_list_inpaint = []
inpaint_masks_list = []
for i in range(len(deviceOut)):
pkt_obj_list_caltec.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(deviceOut[i])))
pkt_obj_list_altec.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(deviceOut[i])))
pkt_obj_list_halrtc.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(deviceOut[i])))
pkt_obj_list_silrtc.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(deviceOut[i])))
pkt_obj_list_inpaint.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(deviceOut[i])))
inpaint_masks = np.zeros(np.shape(pkt_obj_list[i].data_tensor),dtype= np.uint8)
inpaint_masks_list.append(PacketModel(rows_per_packet=rowsPerPacket,data_tensor=np.copy(inpaint_masks)))
# ---------------------------------------------------------------- #
# apply this loss matrix to the tensor.
for i in range(len(pkt_obj_list)):
loss_map = lossMatrix[i]
channel_width = np.shape(pkt_obj_list_caltec[i].packet_seq)[3]
# loop through items in batch.
for item_index in range(np.shape(loss_map)[0]):
item_lost_map = loss_map[item_index,:,:]
lost_pkt_indices,lost_channel_indices = np.where(item_lost_map == False)
if len(lost_pkt_indices) != 0:
# drop packet in tensor.
for k in range(len(lost_pkt_indices)):
pkt_obj_list_caltec[i].packet_seq[item_index,lost_pkt_indices[k],:,:,lost_channel_indices[k]] = np.zeros([rowsPerPacket,channel_width])
pkt_obj_list_altec[i].packet_seq[item_index,lost_pkt_indices[k],:,:,lost_channel_indices[k]] = np.zeros([rowsPerPacket,channel_width])
pkt_obj_list_halrtc[i].packet_seq[item_index,lost_pkt_indices[k],:,:,lost_channel_indices[k]] = np.zeros([rowsPerPacket,channel_width])
pkt_obj_list_silrtc[i].packet_seq[item_index,lost_pkt_indices[k],:,:,lost_channel_indices[k]] = np.zeros([rowsPerPacket,channel_width])
pkt_obj_list_inpaint[i].packet_seq[item_index,lost_pkt_indices[k],:,:,lost_channel_indices[k]] = | np.zeros([rowsPerPacket,channel_width]) | numpy.zeros |
import numpy as np
import random
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
import queue
training_files = ["bisecting.txt","blobs.txt","moons.txt"]
INPUT_FILE="blobs.txt"
ITERATIONS=50
#Define label for differnt point group
UNASSIGNED = 0
CORE_PT = -1
BORDER_PT = -2
dataset = []
noOfClusters = 0
def read_dataset(INPUT_FILE):
"""
Reading dataset
"""
global dataset
f = open(INPUT_FILE, "r")
lines = f.readlines()
for i in range(len(lines)):
data = lines[i].split()
dataset.append(list(map(float, data)))
print("Total dataset = {} points".format(len(dataset)))
f.close()
pass
def find_nearest_neighbour(k):
"""
Nearest neighbour
"""
global dataset
nearest_neighbors = NearestNeighbors(n_neighbors=k)
nearest_neighbors.fit(dataset)
distances, indices = nearest_neighbors.kneighbors(dataset)
distances = np.sort(distances, axis=0)[:, 1]
# print(distances, indices)
plt.grid()
plt.plot(distances)
# plt.savefig(INPUT_FILE+'_Nearest_Neighbour.png')
plt.show()
def plotClusters(dataset, labels, noOfClusters, file):
total_points = len(dataset)
print("Plotting for {} points".format(total_points))
plt.figure()
# Color array for clusters
scatterColors = ["blue","green","red","cyan","brown","indigo", "pink", "royalblue",
"orange","yellow","black","olive", "gold", "orangered", "skyblue", "teal" ]
for i in range(noOfClusters):
if (i==0):
#Plot all noise point as blue
color='blue'
else:
color = scatterColors[i % len(scatterColors)]
x = []; y = []
for j in range(total_points):
if labels[j] == i:
x.append(dataset[j][0])
y.append(dataset[j][1])
plt.scatter(x, y, c=color, alpha=1, marker='.')
plt.grid()
plt.savefig(file)
# plt.show()
def euclidean_dist(point1, point2):
"""
Euclid distance function
"""
x1 = point1[0]
x2 = point2[0]
y1 = point1[1]
y2 = point2[1]
# create the points
p1 = (x1 - x2)**2
p2 = (y1 - y2)**2
return np.sqrt(p1 + p2)
def neighbor_points(dataset, pointIdx, radius):
'''
find all neigbor points in radius from a given point.
'''
points = []
for i in range(len(dataset)):
# Calculating distance btn points
if euclidean_dist(dataset[i], dataset[pointIdx]) <= radius:
points.append(i)
return points
def dbscan(data, Eps, MinPt):
'''
DBSCAN Algorithm
'''
global dataset, noOfClusters
#initilize all pointlable to unassign
pointlabel = [UNASSIGNED] * len(data)
neighbourhood_arr = []
#initilize list for core/noncore point
core_pts=[]
non_core_pts=[]
#Find all neigbor for all point
for i in range(len(data)):
neighbourhood_arr.append(neighbor_points(dataset,i,Eps))
#Find all core point, edgepoint and noise
for i in range(len(neighbourhood_arr)):
# A point is a core point if it has more than a specified number of points (MinPts) within Eps
if (len(neighbourhood_arr[i]) >= MinPt):
pointlabel[i] = CORE_PT
core_pts.append(i)
else:
non_core_pts.append(i)
for i in non_core_pts:
for j in neighbourhood_arr[i]:
if j in core_pts:
pointlabel[i] = BORDER_PT
break
#start assigning point to cluster
cluster_no = 1
# Put all neigbor core point in queue and find neigboir's neigbor
for i in range(len(pointlabel)):
q = queue.Queue()
if (pointlabel[i] == CORE_PT):
pointlabel[i] = cluster_no
for j in neighbourhood_arr[i]:
if(pointlabel[j] == CORE_PT):
q.put(j)
pointlabel[j]= cluster_no
elif(pointlabel[j] == BORDER_PT):
pointlabel[j] = cluster_no
# checking queue
while not q.empty():
neighbors = neighbourhood_arr[q.get()]
for n in neighbors:
if (pointlabel[n] == CORE_PT):
pointlabel[n]=cluster_no
q.put(n)
if (pointlabel[n] == BORDER_PT):
pointlabel[n]=cluster_no
cluster_no = cluster_no + 1
noOfClusters = cluster_no
return pointlabel
def DBSCAN_start(eps, minpts):
"""
docstring
"""
global dataset, noOfClusters
print("Starting DBSCAN for EPS: {} | Minpts: {}".format(eps, minpts))
labels = dbscan(dataset,eps,minpts)
plotClusters(dataset, labels, noOfClusters, INPUT_FILE+'_DBSCAN.png')
outliers = labels.count(0)
print("No. of Clusters: {}".format(noOfClusters-1))
print("Outliers: {}".format(outliers))
return noOfClusters - 1
def calc_distance(X1, X2):
return (sum((X1 - X2)**2))**0.5
def assign_clusters(centroids, X):
assigned_cluster = []
for i in X:
distance=[]
for j in centroids:
distance.append(calc_distance(i, j))
# print(distance)
# print(np.argmin(distance))
# print("--------------------------------")
assigned_cluster.append(np.argmin(distance)) # idx of minimum element
# print(assigned_cluster)
return assigned_cluster
def calc_centroids(clusters_lables, k):
global dataset
points_per_cluster = [[] for _ in range(k)]
for i in range(len(clusters_lables)):
points_per_cluster[clusters_lables[i]].append(dataset[i])
centroids = []
for i in range(k):
centroids.append(np.mean(points_per_cluster[i], axis=0))
return centroids
def match_centroids(c_new, c_old):
return ( | np.array(c_new) | numpy.array |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
housePrice = pd.read_csv('metroMelbHousePrices.csv',encoding = 'ISO-8859-1')
commute = pd.read_csv('metroMelbCommuteDistance.csv',encoding = 'ISO-8859-1')
distance = pd.read_csv('distanceToCBD.csv',encoding = 'ISO-8859-1')
df = pd.merge(housePrice,commute,on='postcode')
df = df.sort_values('medPrice')
plt.scatter(df['medCommute'], df['medPrice'])
plt.xlabel('medCommute (km)')
plt.ylabel('medPrice ($)')
plt.grid(True)
x = np.array(df['medCommute'])
y = np.array(df['medPrice'])
m,b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b)
plt.savefig('withOutliers.png')
##Outlier Detection using IQR
# Keep only values inside the IQR
Q1 = df['medPrice'].quantile(0.25)
Q3 = df['medPrice'].quantile(0.75)
Q1b = df['medCommute'].quantile(0.25)
Q3b = df['medCommute'].quantile(0.75)
df1 = df.loc[df['medPrice'] > Q1]
df1 = df1.loc[df1['medCommute'] > Q1b]
df2 = df1.loc[df1['medPrice'] < Q3]
df2 = df2.loc[df2['medCommute'] < Q3b]
# re-plot
plt.clf()
plt.scatter(df2['medCommute'], df2['medPrice'])
plt.xlabel('medCommute (km)')
plt.ylabel('medPrice ($)')
plt.grid(True)
x = np.array(df2['medCommute'])
y = np.array(df2['medPrice'])
m,b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b)
plt.savefig('noOutliersIQR.png')
# z-score outlier detection #########################################################################
from scipy import stats
df['zPrice'] = np.abs(stats.zscore(df['medPrice']))
df['zCommute'] = np.abs(stats.zscore(df['medCommute']))
df1 = df.iloc[np.where(df['zPrice'] < 2)]
df2 = df.iloc[np.where(df1['zCommute'] < 2)]
# re-plot
plt.clf()
plt.scatter(df2['medCommute'], df2['medPrice'])
plt.xlabel('medCommute (km)')
plt.ylabel('medPrice ($)')
plt.grid(True)
x = np.array(df2['medCommute'])
y = np.array(df2['medPrice'])
m,b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b)
plt.savefig('noOutliersZSCORE2STD.png')
df1 = df.iloc[np.where(df['zPrice'] < 1)]
df2 = df.iloc[np.where(df1['zCommute'] < 1)]
# re-plot
plt.clf()
plt.scatter(df2['medCommute'], df2['medPrice'])
plt.xlabel('medCommute (km)')
plt.ylabel('medPrice ($)')
plt.grid(True)
x = np.array(df2['medCommute'])
y = np.array(df2['medPrice'])
m,b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b)
plt.savefig('noOutliersZSCORE1STD.png')
################################################################################
plt.clf()
df = pd.merge(housePrice,distance,on='postcode')
df = df.sort_values('medPrice')
plt.scatter(df['distance'], df['medPrice'])
plt.xlabel('distance (km)')
plt.ylabel('medPrice ($)')
plt.grid(True)
df['zPrice'] = np.abs(stats.zscore(df['medPrice']))
df['zDistance'] = np.abs(stats.zscore(df['distance']))
df1 = df.iloc[np.where(df['zPrice'] < 2)]
df2 = df.iloc[np.where(df1['zDistance'] < 2)]
# re-plot
plt.clf()
plt.scatter(df2['distance'], df2['medPrice'])
plt.xlabel('distance (km)')
plt.ylabel('medPrice ($)')
plt.grid(True)
x = np.array(df2['distance'])
y = | np.array(df2['medPrice']) | numpy.array |
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
from shapely.geometry.polygon import Polygon
from skimage import morphology
import abc
import warnings
from numba import njit
from . import mask
from . import cube
from . import section as dm_section
from . import plot
from . import utils
class BasePlanform(abc.ABC):
"""Base planform object.
Defines common attributes and methods of all planform objects.
"""
def __init__(self, planform_type, *args, name=None):
"""Instantiate for subclasses of BasePlanform.
The base class instantiation handles setting the `name` attribute of
the `Planform`, and defines the internal plotting routine
via :obj:`_show`.
Parameters
----------
planform_type : :obj`str`
String identifying the *type* of `Planform` being instantiated.
*args
Arbitrary arguments, passed from the subclass, not used here.
name : :obj:`str`, optional
An optional name for the planform, helpful for maintaining and
keeping track of multiple `Planform` objects of the same type.
This is disctinct from the :obj:`planform_type`. The name is used
internally if you use the :obj:`register_planform` method of a
`Cube`.
"""
# begin unconnected
self._shape = None
self._variables = None
self.planform_type = planform_type
self._name = name
@property
def name(self):
"""Planform name.
Helpful to differentiate multiple `Planform` objects.
"""
return self._name
@name.setter
def name(self, var):
if (self._name is None):
# _name is not yet set
self._name = var or self.planform_type
else:
# _name is already set
if not (var is None):
warnings.warn(
UserWarning("`name` argument supplied to instantiated "
"`Planform` object. To change the name of "
"a Planform, you must set the attribute "
"directly with `plan._name = 'name'`."))
# do nothing
@property
def shape(self):
"""Planform shape.
"""
return self._shape
def _show(self, field, varinfo, **kwargs):
"""Internal method for showing a planform.
Each planform may implement it's own method to determine what field to
show when called, and different calling options.
Parameters
----------
field : :obj:`DataArray`
The data to show.
varinfo : :obj:`VariableInfo`
A :obj:`VariableInfo` instance describing how to color `field`.
**kwargs
Acceptable kwargs are `ax`, `title`, `ticks`, `colorbar`,
`colorbar_label`. See description for `DataPlanform.show` for
more information.
"""
# process arguments and inputs
ax = kwargs.pop('ax', None)
title = kwargs.pop('title', None)
ticks = kwargs.pop('ticks', False)
colorbar = kwargs.pop('colorbar', True)
colorbar_label = kwargs.pop('colorbar_label', False)
if not ax:
ax = plt.gca()
# get the extent as arbitrary dimensions
d0, d1 = field.dims
d0_arr, d1_arr = field[d0], field[d1]
_extent = [d1_arr[0], # dim1, 0
d1_arr[-1] + d1_arr[1], # dim1, end + dx
d0_arr[-1] + d0_arr[1], # dim0, end + dx
d0_arr[0]] # dim0, 0
im = ax.imshow(field,
cmap=varinfo.cmap,
norm=varinfo.norm,
vmin=varinfo.vmin,
vmax=varinfo.vmax,
extent=_extent)
if colorbar:
cb = plot.append_colorbar(im, ax)
if colorbar_label:
_colorbar_label = \
varinfo.label if (colorbar_label is True) \
else str(colorbar_label) # use custom if passed
cb.ax.set_ylabel(_colorbar_label, rotation=-90, va="bottom")
if not ticks:
ax.set_xticks([], minor=[])
ax.set_yticks([], minor=[])
if title:
ax.set_title(str(title))
return im
class Planform(BasePlanform):
"""Basic Planform object.
This class is used to slice the `Cube` along the `dim0` axis. The object
is akin to the various `Section` classes, but there is only the one way
to slice as a Planform.
"""
def __init__(self, *args, z=None, t=None, idx=None, **kwargs):
"""
Identify coordinate defining the planform.
Parameters
----------
CubeInstance : :obj:`~deltametrics.cube.BaseCube` subclass, optional
Connect to this cube. No connection is made if cube is not
provided.
z : :obj:`float`, optional
t : :obj:`float`, optional
idx : :obj:`int`, optional
Notes
-----
If no positional arguments are passed, an empty `Planform` not
connected to any cube is returned. This cube may need to be manually
connected to have any functionality (via the :meth:`connect` method);
this need will depend on the type of `Planform`.
"""
if (not (z is None)) and (not (idx is None)):
raise TypeError('Cannot specify both `z` and `idx`.')
if (not (t is None)) and (not (idx is None)):
raise TypeError('Cannot specify both `t` and `idx`.')
if (not (z is None)) and (not (t is None)):
raise TypeError('Cannot specify both `z` and `t`.')
self.cube = None
self._dim0_idx = None
self._input_z = z
self._input_t = t
self._input_idx = idx
super().__init__('data', *args, **kwargs)
if len(args) > 0:
self.connect(args[0])
else:
pass
@property
def variables(self):
"""List of variables.
"""
return self._variables
@property
def idx(self):
"""Index into underlying Cube along axis 0.
"""
return self._dim0_idx
def connect(self, CubeInstance, name=None):
"""Connect this Planform instance to a Cube instance.
"""
if not issubclass(type(CubeInstance), cube.BaseCube):
raise TypeError('Expected type is subclass of {_exptype}, '
'but received was {_gottype}.'.format(
_exptype=type(cube.BaseCube),
_gottype=type(CubeInstance)))
self.cube = CubeInstance
self._variables = self.cube.variables
self.name = name # use the setter to determine the _name
self._shape = self.cube.shape[1:]
self._compute_planform_coords()
def _compute_planform_coords(self):
"""Should calculate vertical coordinate of the section.
Sets the value ``self._dim0_idx`` according to
the algorithm of a `Planform` initialization.
.. warning::
When implementing a new planform type, be sure that
``self._dim0_idx`` is a *one-dimensional array*, or you will get
an improperly shaped Planform array in return.
"""
# determine the index along dim0 to slice cube
if (not (self._input_z is None)) or (not (self._input_t is None)):
# z an t are treated the same internally, and either will be
# silently used to interpolate the dim0 coordinates to find the
# nearest index
dim0_val = self._input_z or self._input_t
self._dim0_idx = np.argmin(np.abs(
np.array(self.cube.dim0_coords) - dim0_val))
else:
# then idx must have been given
self._dim0_idx = self._input_idx
def __getitem__(self, var):
"""Get a slice of the planform.
Slicing the planform instance creates an `xarray` `DataArray` instance
from data for variable ``var``.
.. note:: We only support slicing by string.
Parameters
----------
var : :obj:`str`
Which variable to slice.
Returns
-------
data : :obj:`DataArray`
The undelrying data returned as an xarray `DataArray`, maintaining
coordinates.
"""
if isinstance(self.cube, cube.DataCube):
_xrDA = self.cube[var][self._dim0_idx, :, :]
_xrDA.attrs = {'slicetype': 'data_planform',
'knows_stratigraphy': self.cube._knows_stratigraphy,
'knows_spacetime': True}
if self.cube._knows_stratigraphy:
_xrDA.strat.add_information(
_psvd_mask=self.cube.strat_attr.psvd_idx[self._dim0_idx, :, :], # noqa: E501
_strat_attr=self.cube.strat_attr(
'planform', self._dim0_idx, None))
return _xrDA
elif isinstance(self.cube, cube.StratigraphyCube):
_xrDA = self.cube[var][self._dim0_idx, :, :]
_xrDA.attrs = {'slicetype': 'stratigraphy_planform',
'knows_stratigraphy': True,
'knows_spacetime': False}
return _xrDA
elif (self.cube is None):
raise AttributeError(
'No cube connected. Are you sure you ran `.connect()`?')
else:
raise TypeError('Unknown Cube type encountered: %s'
% type(self.cube))
def show(self, var, ax=None, title=None, ticks=False,
colorbar=True, colorbar_label=False):
"""Show the planform.
Method enumerates convenient routines for visualizing planform data
and slices of stratigraphy.
Parameters
----------
var : :obj:`str`
Which attribute to show. Can be a string for a named `Cube`
attribute.
label : :obj:`bool`, `str`, optional
Display a label of the variable name on the plot. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
colorbar : :obj:`bool`, optional
Whether a colorbar is appended to the axis.
colorbar_label : :obj:`bool`, `str`, optional
Display a label of the variable name along the colorbar. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
ax : :obj:`~matplotlib.pyplot.Axes` object, optional
A `matplotlib` `Axes` object to plot the section. Optional; if not
provided, a call is made to ``plt.gca()`` to get the current (or
create a new) `Axes` object.
Examples
--------
Display the `eta` and `velocity` planform of a DataCube.
.. plot::
:include-source:
>>> golfcube = dm.sample_data.golf()
>>> planform = dm.plan.Planform(golfcube, idx=70)
...
>>> fig, ax = plt.subplots(1, 2)
>>> planform.show('eta', ax=ax[0])
>>> planform.show('velocity', ax=ax[1])
>>> plt.show()
"""
# process the planform attribute to a field
_varinfo = self.cube.varset[var] if \
issubclass(type(self.cube), cube.BaseCube) else \
plot.VariableSet()[var]
_field = self[var]
# call the internal _show method
im = self._show(
_field, _varinfo,
ax=ax, title=title, ticks=ticks,
colorbar=colorbar, colorbar_label=colorbar_label)
return im
class SpecialtyPlanform(BasePlanform):
"""A base class for All specialty planforms.
.. hint:: All specialty planforms should subclass.
Specialty planforms are planforms that hold some computation or attribute
*about* some underlying data, rather than the actual data. As a general
rule, anything that is not a DataPlanform is a SpecialtyPlanform.
This base class implements a slicing method (it slices the `data` field),
and a `show` method for displaying the planform (it displays the `data`
field).
.. rubric:: Developer Notes
All subclassing objects must implement:
* a property named `data` that points to some field (i.e., an attribute
of the planform) that best characterizes the Planform. For example,
the OAP planform `data` property points to the `sea_angles` field.
All subclassing objects should consider implementing:
* the `show` method takes (optionally) a string argument specifying the
field to display, which can match any attriute of the
`SpecialtyPlanform`. If no argument is passed to `show`, the `data`
field is displayed. A :obj:`VariableInfo` object
`self._default_varinfo` is created on instantiating a subclass, which
will be used to style the displayed field. You can add different
`VariableInfo` objects with the name matching any other field of the
planform to use that style instead; for example, OAP implements
`self._sea_angles_varinfo`, which is used if the `sea_angles` field
is specified to :meth:`show`.
* The `self._default_varinfo` can be overwritten in a subclass
(after ``super().__init__``) to style the `show` default field
(`data`) a certain way. For example, OAP sets ``self._default_varinfo
= self._sea_angles_varinfo``.
"""
def __init__(self, planform_type, *args, **kwargs):
"""Initialize the SpecialtyPlanform.
BaseClass, only called by subclassing methods. This `__init__` method
calls the `BasePlanform.__init__`.
Parameters
----------
planform_type : :obj:`str`
A string specifying the type of planform being created.
*args
Passed to `BasePlanform.__init__`.
*kwargs
Passed to `BasePlanform.__init__`.
"""
super().__init__(planform_type, *args, **kwargs)
self._default_varinfo = plot.VariableInfo(
'data', label='data')
@property
@abc.abstractmethod
def data(self):
"""The public data field.
This attribute *must* be implemented as an alias to another attribute.
The choice of field is up to the developer.
"""
...
def __getitem__(self, slc):
"""Slice the planform.
Implements basic slicing for `SpecialtyPlanform` by passing the `slc`
to `self.data`. I.e., the returned slice is ``self.data[slc]``.
"""
return self.data[slc]
def show(self, var=None, ax=None, title=None, ticks=False,
colorbar=True, colorbar_label=False):
"""Show the planform.
Display a field of the planform, called by attribute name.
Parameters
----------
var : :obj:`str`
Which field to show. Must be an attribute of the planform. `show`
will look for another attribute describing
the :obj:`VariableInfo` for that attribute named
``self._<var>_varinfo`` and use that to style the plot, if
found. If this `VariableInfo` is not found, the default is used.
label : :obj:`bool`, `str`, optional
Display a label of the variable name on the plot. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
colorbar : :obj:`bool`, optional
Whether a colorbar is appended to the axis.
colorbar_label : :obj:`bool`, `str`, optional
Display a label of the variable name along the colorbar. Default is
False, display nothing. If ``label=True``, the label name from the
:obj:`~deltametrics.plot.VariableSet` is used. Other arguments are
attempted to coerce to `str`, and the literal is diplayed.
ax : :obj:`~matplotlib.pyplot.Axes` object, optional
A `matplotlib` `Axes` object to plot the section. Optional; if not
provided, a call is made to ``plt.gca()`` to get the current (or
create a new) `Axes` object.
"""
if (var is None):
_varinfo = self._default_varinfo
_field = self.data
elif (isinstance(var, str)):
_field = self.__getattribute__(var) # will error if var not attr
_expected_varinfo = '_' + var + '_varinfo'
if hasattr(self, _expected_varinfo):
_varinfo = self.__getattribute__(_expected_varinfo)
else:
_varinfo = self._default_varinfo
else:
raise TypeError('Bad value for `var`: {0}'.format(var))
self._show(
_field, _varinfo,
ax=ax, title=title, ticks=ticks,
colorbar=colorbar, colorbar_label=colorbar_label)
class OpeningAnglePlanform(SpecialtyPlanform):
"""Planform for handling the Shaw Opening Angle Method.
This `Planform` (called `OAP` for short) is a wrapper/handler for the
input and output from the :func:`shaw_opening_angle_method`. The `OAP` is a
convenient way to manage extraction of a shoreline or a delta topset area.
Moreover, the `OAP` can be used as the input for :doc:`many types of
Mask </reference/mask/index>` objects, so it is often computationally
advantageous to compute this `Planform` once, and then use it to create
many different types of masks.
Examples
--------
Instantiate the `OpeningAnglePlanform` from an **inverted** binary mask of
elevation data (i.e., from an :obj:`~deltametrics.mask.ElevationMask`).
Note that the below example is the most verbose method for creating the
`OAP`. Consider available static methods.
.. plot::
:context: reset
:include-source:
>>> golfcube = dm.sample_data.golf()
>>> _EM = dm.mask.ElevationMask(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
>>> # extract a mask of area below sea level as the
>>> # inverse of the ElevationMask
>>> below_mask = ~(_EM.mask)
>>> OAP = dm.plan.OpeningAnglePlanform(below_mask)
The OAP stores information computed from the
:func:`shaw_opening_angle_method`. See the two properties of the OAP
:obj:`below_mask` and :obj:`sea_angles`.
.. plot::
:context:
fig, ax = plt.subplots(1, 3, figsize=(10, 4))
golfcube.quick_show('eta', idx=-1, ax=ax[0])
im1 = ax[1].imshow(OAP.below_mask,
cmap='Greys_r')
im2 = ax[2].imshow(OAP.sea_angles,
cmap='jet')
dm.plot.append_colorbar(im2, ax=ax[2])
ax[0].set_title('input elevation data')
ax[1].set_title('OAP.below_mask')
ax[2].set_title('OAP.sea_angles')
for i in range(1, 3):
ax[i].set_xticks([])
ax[i].set_yticks([])
"""
@staticmethod
def from_arrays(*args):
"""Create directly from arrays.
.. warning:: not implemented.
"""
raise NotImplementedError
@staticmethod
def from_elevation_data(elevation_data, **kwargs):
"""Create an `OpeningAnglePlanform` from elevation data.
This process creates an ElevationMask from the input elevation array,
and proceeds to make the OAP from the below sea level mask.
.. note::
Keyword arguments are passed to the `ElevationMask` *and* to the
`OpeningAnglePlanform`, and thus passed to
:func:`shaw_opening_angle_method`.
.. important::
The `elevation_threshold` argument is implicitly required in this
method, because it is required to instantiate an
:obj:`ElevationMask` from elevation data.
Parameters
----------
elevation_data : :obj:`ndarray`
The elevation data to create the `ElevationMask` that is in
turn used to create the `OpeningAnglePlanform`.
Examples
--------
.. doctest::
>>> golfcube = dm.sample_data.golf()
>>> OAP = dm.plan.OpeningAnglePlanform.from_elevation_data(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
"""
# make a temporary mask
_em = mask.ElevationMask(
elevation_data, **kwargs)
# invert the mask for the below sea level area
_below_mask = ~(_em.mask)
# compute from __init__ pathway
return OpeningAnglePlanform(_below_mask, **kwargs)
@staticmethod
def from_ElevationMask(ElevationMask, **kwargs):
"""Create an `OpeningAnglePlanform` from an `ElevationMask`.
.. note::
Keyword arguments are passed to the `OpeningAnglePlanform`, and
thus passed to :func:`shaw_opening_angle_method`.
Parameters
----------
ElevationMask : :obj:`~deltametrics.mask.ElevationMask`
The :obj:`ElevationMask` to be used to create the
`OpeningAnglePlanform`.
Examples
--------
.. doctest::
>>> golfcube = dm.sample_data.golf()
>>> _EM = dm.mask.ElevationMask(
... golfcube['eta'][-1, :, :],
... elevation_threshold=0)
>>> OAP = dm.plan.OpeningAnglePlanform.from_ElevationMask(
... _EM)
"""
if not isinstance(ElevationMask, mask.ElevationMask):
raise TypeError('Must be type: ElevationMask.')
# invert the mask for the below sea level area
_below_mask = ~(ElevationMask.mask)
# compute from __init__ pathway
return OpeningAnglePlanform(_below_mask)
@staticmethod
def from_mask(UnknownMask, **kwargs):
"""Wraps :obj:`from_ElevationMask`.
"""
return OpeningAnglePlanform.from_ElevationMask(
UnknownMask, **kwargs)
def __init__(self, *args, **kwargs):
"""Init.
EXPECTS A BINARY OCEAN MASK AS THE INPUT!
.. note:: needs docstring.
"""
super().__init__('opening angle', *args)
self._shape = None
self._sea_angles = None
self._below_mask = None
# set variable info display options
self._sea_angles_varinfo = plot.VariableInfo(
'sea_angles', cmap=plt.cm.jet, label='opening angle')
self._below_mask_varinfo = plot.VariableInfo(
'below_mask', cmap=plt.cm.gray, label='where below')
self._default_varinfo = self._sea_angles_varinfo
# check for inputs to return or proceed
if (len(args) == 0):
_allow_empty = kwargs.pop('allow_empty', False)
if _allow_empty:
# do nothing and return partially instantiated object
return
else:
raise ValueError(
'Expected 1 input, got 0.')
if not (len(args) == 1):
raise ValueError(
'Expected 1 input, got %s.' % str(len(args)))
# process the argument to the omask needed for Shaw OAM
if utils.is_ndarray_or_xarray(args[0]):
_arr = args[0]
# check that is boolean or integer binary
if (_arr.dtype == bool):
_below_mask = _arr
elif (_arr.dtype == int):
if np.all(np.logical_or(_arr == 0, _arr == 1)):
_below_mask = _arr
else:
ValueError(
'The input was an integer array, but some elements in '
'the array were not 0 or 1.')
else:
raise TypeError(
'The input was not an integer or boolean array, but was '
'{0}. If you are trying to instantiate an OAP from '
'elevation data directly, see static method '
'`OpeningAnglePlanform.from_elevation_data`.')
# now check the type and allocate the arrays as xr.DataArray
if isinstance(_below_mask, xr.core.dataarray.DataArray):
self._below_mask = xr.zeros_like(_below_mask, dtype=bool)
self._below_mask.name = 'below_mask'
self._sea_angles = xr.zeros_like(_below_mask, dtype=float)
self._sea_angles.name = 'sea_angles'
elif isinstance(_below_mask, np.ndarray):
# this will use meshgrid to fill out with dx=1 in shape of array
self._below_mask = xr.DataArray(
data=np.zeros(_below_mask.shape, dtype=bool),
name='below_mask')
self._sea_angles = xr.DataArray(
data=np.zeros(_below_mask.shape, dtype=float),
name='sea_angles')
else:
raise TypeError('Invalid type {0}'.format(type(_below_mask)))
elif issubclass(type(args[0]), cube.BaseCube):
raise NotImplementedError(
'Instantiation from a Cube is not yet implemented.')
else:
# bad type supplied as argument
raise TypeError('Invalid type for argument.')
self._shape = _below_mask.shape
self._compute_from_below_mask(_below_mask, **kwargs)
def _compute_from_below_mask(self, below_mask, **kwargs):
"""Method for actual computation of the arrays.
Parameters
----------
below_mask
The binarized array of values that should be considered as the
ocean pixels.
**kwargs
Passed to :func:`shaw_opening_angle_method`.
"""
sea_angles = | np.zeros(self._shape) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 26 16:37:58 2021
@author: Diloz
"""
import os
import sys
import cv2
import scipy
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy import optimize
from scipy.stats import normaltest
import matplotlib.pylab as plt
from scipy.optimize import curve_fit
import imalgo
import plotting
import checkerCards
import segmentation
#%%
eps = np.finfo(float).eps
colrLable = ['blu', 'grn', 'red']
# lbl_illum = ['blu_L', 'grn_L', 'red_L']
expID, daySowing, cam = 'exp05', '2018-01-05-11-00', 'cam03' # Color Constancy
dirParent = "C:/Users/<NAME>/OneDrive - LA TROBE UNIVERSITY/PhD - Datasets"
# dirParent = "C:/Users/17600112/OneDrive - LA TROBE UNIVERSITY/PhD - Datasets"
root = os.path.join(dirParent, expID)
path_potTempl = os.path.join(root, 'potTempl.png')
potTemp = cv2.imread(path_potTempl)[:, :,0]
folder_trash = os.path.join(dirParent, 'trash')
if not os.path.exists(folder_trash): os.makedirs(folder_trash)
#%% 5. Estimate the illuminant on pots by interpolating the Macbeth cards
# This function estimates the illuminant on the centre of the pot using interpolation
def illum_interp(df_coor, df_Known, imBGR_src, imNum, illumLabl):
delt_int = (imBGR_src.shape[0]/100)
# Known illuminant at the colorCheckers
df_data = df_Known.copy().dropna()
# df_data = df_data.drop(columns=['left', 'width', 'top', 'height', 'light'])
df_data = df_data.loc[:, ['position', 'name', 'col_centre', 'row_centre', 'blu', 'grn', 'red']]
df_data.sort_values(by=["row_centre"], inplace=True)
df_data.reset_index(drop=True, inplace=True)
row_known1 = df_data.loc[:, "row_centre"].values
col_known1 = df_data.loc[:, "col_centre"].values
row_max = np.max(row_known1)
row_min = np.min(row_known1)
col_max = np.max(col_known1)
col_min = np.min(col_known1)
# Unknown illuminant at each pot centre coordinates
df_All = df_coor.drop(columns=['left', 'width', 'top', 'height', 'light']).copy()
df_All = df_All.drop(df_All[df_All.name == 'Checker'].index)
df_All.loc[df_All["row_centre"] >= row_max,"row_centre"] = row_max - delt_int
df_All.loc[df_All["row_centre"] <= row_min,"row_centre"] = row_min + delt_int
df_All.loc[df_All["col_centre"] >= col_max, "col_centre"] = col_max - delt_int
df_All.loc[df_All["col_centre"] <= col_min, "col_centre"] = col_min + delt_int
# df_All.sort_values(by=["position"], inplace=True)
df_All.sort_values(by=["row_centre"], inplace=True)
df_All.reset_index(drop=True, inplace=True)
row_All = df_All.loc[:, "row_centre"].values
col_All = df_All.loc[:, "col_centre"].values
print(df_All.describe())
print(df_data.loc[0:5, :])
# Zip coordinates to train Model interp1
zipCoord1 = list(zip(row_known1, col_known1))
# Double interpolation (NaN)
# for cnt in range(len(illumLabl)):
for cnt in range(len(illumLabl)):
row_known2, col_known2, interp1, illum_known2 = [], [], [], []
illum_chan = illumLabl[cnt]
# First Interpolation: Using a linear interpolator
illum_known1 = df_data.loc[:, illum_chan].values
interp1 = scipy.interpolate.LinearNDInterpolator(zipCoord1, illum_known1)
illum_estim1 = interp1(row_All, col_All)
# Second Interpolation for misssing values: Nearest Interpolator
indxEmpty = np.argwhere(np.isnan(illum_estim1)).flatten()
if len(indxEmpty) > 0:
indxNoEmpty = np.argwhere(~np.isnan(illum_estim1)).flatten()
row_known2 = row_All[indxNoEmpty]
col_known2 = col_All[indxNoEmpty]
illum_known2 = illum_estim1[indxNoEmpty]
# Zip coordinates to train Model interp2
zipCoord2 = list(zip(row_known2, col_known2))
interp2 = scipy.interpolate.NearestNDInterpolator(zipCoord2, illum_known2)
illum_estim2 = interp2(row_All, col_All)
df_All.loc[:, illum_chan] = illum_estim2
df_All = pd.concat([df_All, df_data])
df_All.reset_index(drop=True, inplace=True)
return df_All
#%%
# Generate the illuminant prior surface
def illum_surface(df_illum, Siz_src, imNum, illumLabl):
width = Siz_src[1]
height = Siz_src[0]
dim = (width, height)
wth = int(width/10)
hth = int(height /10)
df = df_illum.copy()
df.sort_values(by=["row_centre"], inplace=True)
df.reset_index(drop=True, inplace=True)
row_point = df.loc[:, "row_centre"].values
row_diff = np.append(row_point[0], (row_point[1:] - row_point[0:-1]))
row_chang = row_point[row_diff>100]
row_num = int(len(row_chang))
col_num = int(round((len(row_point) / row_num)))
im_aux = np.ones((row_num, col_num, 3), dtype=np.float32)
bott = 0
top = col_num - 1
for row in range(row_num):
df_aux = df.loc[bott:top, :].copy()
bott = top + 1
top+= col_num
df_aux.sort_values(by=["col_centre"], inplace=True)
df_aux.reset_index(drop=True, inplace=True)
for col in range(len(df_aux)):
im_aux[row, col, 0] = df_aux.loc[col, illumLabl[0]]
im_aux[row, col, 1] = df_aux.loc[col, illumLabl[1]]
im_aux[row, col, 2] = df_aux.loc[col, illumLabl[2]]
im_ill_resiz = cv2.resize(im_aux, dim, interpolation = cv2.INTER_AREA)
# Smooth the Surface using a kernel size equal to 10% of the image
blur1 = cv2.blur(im_ill_resiz,(wth,wth))
blur2 = cv2.blur(blur1,(101,101))
prior_surf = np.clip(blur2, 0, 511)
return prior_surf
#%% Correct an input image using the illuminant surface
def correctIllumGrey(img_src, illum):
imgBGR = np.float32(img_src.copy())
illumBGR = illum.copy()
# Convert RGB illuminant values into sRBG, Values [0, 1]
illumXYZ = np.float32(cv2.cvtColor(illumBGR, cv2.COLOR_BGR2XYZ))
illumXYZ[illumXYZ[:, :, :]<=0] = eps
# Normalize illuminant with respect of Y, where Y=1
illumXYZ_norm = np.zeros_like(illumXYZ)
illumXYZ_norm[:, :, 0] = np.divide(illumXYZ[:, :, 0], illumXYZ[:, :, 1])
illumXYZ_norm[:, :, 1] = np.divide(illumXYZ[:, :, 1], illumXYZ[:, :, 1])
illumXYZ_norm[:, :, 2] = np.divide(illumXYZ[:, :, 2], illumXYZ[:, :, 1])
illumXYZ_norm[illumXYZ_norm[:, :, :]<=0] = eps
illumBGR_aux = cv2.cvtColor(illumXYZ_norm * 255, cv2.COLOR_XYZ2BGR)
illumBGR_aux[illumBGR_aux[:, :, :]<=0] = eps
imgBGR[:, :, 0] = np.divide(imgBGR[:, :, 0], illumBGR_aux[:, :, 0])*255
imgBGR[:, :, 1] = np.divide(imgBGR[:, :, 1], illumBGR_aux[:, :, 1])*255
imgBGR[:, :, 2] = np.divide(imgBGR[:, :, 2], illumBGR_aux[:, :, 2])*255
imgBGR = np.uint8(np.clip(np.round(imgBGR), 0, 255))
return imgBGR
#%%
def correctIllumFit(img_src, illum):
imgBGR = img_src.copy()
imgBGR_aux1 = np.ones_like(np.float32(imgBGR))
illumBGR = np.float32(illum.copy())
illumBGR[illumBGR[:, :, :] == 0] = eps
illumBGR_aux = np.ones_like(illumBGR)
illumBGR_aux[:, :, 0] = np.divide(255, illumBGR[:, :, 0])
illumBGR_aux[:, :, 1] = np.divide(255, illumBGR[:, :, 1])
illumBGR_aux[:, :, 2] = np.divide(255, illumBGR[:, :, 2])
imgBGR_aux1[:, :, 0] = np.multiply(imgBGR[:, :, 0], illumBGR_aux[:, :, 0])
imgBGR_aux1[:, :, 1] = np.multiply(imgBGR[:, :, 1], illumBGR_aux[:, :, 1])
imgBGR_aux1[:, :, 2] = np.multiply(imgBGR[:, :, 2], illumBGR_aux[:, :, 2])
imgBGR[:, :, 0] =np.uint8(np.clip(np.round(imgBGR_aux1[:, :, 0]), 0, 255))
imgBGR[:, :, 1] =np.uint8(np.clip(np.round(imgBGR_aux1[:, :, 1]), 0, 255))
imgBGR[:, :, 2] =np.uint8(np.clip(np.round(imgBGR_aux1[:, :, 2]), 0, 255))
return imgBGR
#%% 6. Calculate the cloth Reflectance using Bayesian Inference
# Cloth reflectance Xc = Yc/Lc
def imgFeature(img_src, illum, coor_df, potTemp, imNum):
illBGR = illum.copy()
imgBGR = img_src.copy()
df = coor_df.loc[coor_df["name"]!="Checker", :].copy()
df.sort_values(by=["position"], inplace=True)
df.reset_index(drop=True, inplace=True)
df_ref = df.copy()
df_ill = df.copy()
df_pix = df.copy()
sampl = np.random.randint(len(df), size=1)
for cnt in range(len(df)):
posn, name, top, left, wd, ht = df.loc[cnt, ['position', 'name',
'top', 'left', 'width', 'height']].values
if name == "Checker": continue
bottom = top + ht
right = left+ wd
off = 10
potBGRCrp = imgBGR[top - off : bottom + off, left - off : right + off]
search_scale = np.linspace(1.00, 1.0040, 10)
search_degree = np.linspace(-2.5,2.5,15)
num_cores = 2
potImgAdj, startY, endY, startX, endX, deg = checkerCards.getChecker(potBGRCrp, potTemp,
search_scale, search_degree, num_cores)
off2 = 30
potImg = potImgAdj[off2:-off2, off2:-off2]
potMask, _ = segmentation.segment(potImg, 0.15, posn)
kernel = np.ones((3,3),np.uint8)
potMask = cv2.erode(potMask, kernel, iterations=7)
potSeg = cv2.bitwise_and(potImg, potImg, mask = potMask)
pixel_leaf = potImg[ | np.where(potMask >0) | numpy.where |
# -*- coding: utf-8 -*-
import numpy as np
import json
class BatchTableHeader(object):
def __init__(self):
self.properties = {}
def add_property_from_array(self, propertyName, array):
self.properties[propertyName] = array
def to_array(self):
# convert dict to json string
bt_json = json.dumps(self.properties, separators=(',', ':'))
# header must be 4-byte aligned (refer to batch table documentation)
#bt_json += ' '*(4 - len(bt_json) % 4)
# returns an array of binaries representing the batch table
return np.fromstring(bt_json, dtype=np.uint8)
class BatchTableBody(object):
def __init__(self):
self.properties = {}
self.header_length = 0
def sync(self, header):
self.header_length = len(header.to_array())
def to_array(self):
# header must be 4-byte aligned (refer to batch table documentation)
body = ' '*(4 - self.header_length % 4)
# Returns a blank space for now for testing
return | np.fromstring(body, dtype=np.uint8) | numpy.fromstring |
import os
from collections import defaultdict
import numpy
from matplotlib import pyplot as plt
from csep.utils.basic_types import seq_iter, AdaptiveHistogram
from csep.utils.calc import _compute_likelihood, bin1d_vec, _compute_spatial_statistic
from csep.utils.constants import CSEP_MW_BINS, SECONDS_PER_DAY, SECONDS_PER_HOUR, SECONDS_PER_WEEK
from csep.models import EvaluationResult
from csep.core.repositories import FileSystem
from csep.utils.plots import plot_number_test, plot_magnitude_test, plot_likelihood_test, plot_spatial_test, \
plot_cumulative_events_versus_time_dev, plot_magnitude_histogram_dev, plot_distribution_test, plot_probability_test, \
plot_spatial_dataset
from csep.utils.stats import get_quantiles, cumulative_square_diff, sup_dist
# todo: refactor these methods to not perform any filtering of catalogs inside the processing task
class AbstractProcessingTask:
def __init__(self, data=None, name=None, min_mw=2.5, n_cat=None, mws=None):
self.data = data or []
# to-be deprecated
self.mws = mws or [2.5, 3.0, 3.5, 4.0, 4.5]
self.min_mw = min_mw
self.n_cat = n_cat
self.name = name
self.ax = []
self.fnames = []
self.needs_two_passes = False
self.buffer = []
self.region = None
self.buffer_fname = None
self.fhandle = None
self.archive = True
self.version = 1
@staticmethod
def _build_filename(dir, mw, plot_id):
basename = f"{plot_id}_mw_{str(mw).replace('.','p')}".lower()
return os.path.join(dir, basename)
def process(self, data):
raise NotImplementedError('must implement process()!')
def process_again(self, catalog, args=()):
""" This function defaults to pass unless the method needs to read through the data twice. """
pass
def post_process(self, obs, args=None):
"""
Compute evaluation of data stored in self.data.
Args:
obs (csep.Catalog): used to evaluate the forecast
args (tuple): args for this function
Returns:
result (csep.core.evaluations.EvaluationResult):
"""
result = EvaluationResult()
return result
def plot(self, results, plot_dir, show=False):
"""
plots function, typically just a wrapper to function in utils.plotting()
Args:
show (bool): show plot, if plotting multiple, just run on last.
filename (str): where to save the file
plot_args (dict): plotting args to pass to function
Returns:
axes (matplotlib.axes)
"""
raise NotImplementedError('must implement plot()!')
def store_results(self, results, dir):
"""
Saves evaluation results serialized into json format. This format is used to recreate the results class which
can then be plotted if desired. The following directory structure will be created:
| dir
|-- n-test
|---- n-test_mw_2.5.json
|---- n_test_mw_3.0.json
|-- m-test
|---- m_test_mw_2.5.json
|---- m_test_mw_3.0.json
...
The results iterable should only contain results for a single evaluation. Typically they would contain different
minimum magnitudes.
Args:
results (Iterable of EvaluationResult): iterable object containing evaluation results. this could be a list or tuple of lists as well
dir (str): directory to store the testing results. name will be constructed programatically.
Returns:
None
"""
success = False
if self.archive == False:
return
# handle if results is just a single result
if isinstance(results, EvaluationResult):
repo = FileSystem(url=self._build_filename(dir, results.min_mw, results.name) + '.json')
if repo.save(results.to_dict()):
success = True
return success
# or if its an iterable
for idx in seq_iter(results):
# for debugging
if isinstance(results[idx], tuple) or isinstance(results[idx], list):
result = results[idx]
else:
result = [results[idx]]
for r in result:
repo = FileSystem(url=self._build_filename(dir, r.min_mw, r.name) + '.json')
if repo.save(r.to_dict()):
success = True
return success
def store_data(self, dir):
""" Store the intermediate data used to calculate the results for the evaluations. """
raise NotImplementedError
class NumberTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mws = [2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0]
def process(self, catalog, filter=False):
if not self.name:
self.name = catalog.name
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
counts.append(cat_filt.event_count)
self.data.append(counts)
def post_process(self, obs, args=None):
# we dont need args for this function
_ = args
results = {}
data = numpy.array(self.data)
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
observation_count = obs_filt.event_count
# get delta_1 and delta_2 values
delta_1, delta_2 = get_quantiles(data[:,i], observation_count)
# prepare result
result = EvaluationResult(test_distribution=data[:,i],
name='N-Test',
observed_statistic=observation_count,
quantile=(delta_1, delta_2),
status='Normal',
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
min_mw=mw,
obs_name=obs.name)
results[mw] = result
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result in results.items():
# compute bin counts, this one is special because of integer values
td = result.test_distribution
min_bin, max_bin = numpy.min(td), numpy.max(td)
# hard-code some logic for bin size
bins = numpy.arange(min_bin, max_bin)
if len(bins) == 1:
bins = 3
n_test_fname = AbstractProcessingTask._build_filename(plot_dir, mw, 'n_test')
_ = plot_number_test(result, show=show, plot_args={'percentile': 95,
'title': f'Number Test, M{mw}+',
'bins': bins,
'filename': n_test_fname})
self.fnames.append(n_test_fname)
class MagnitudeTest(AbstractProcessingTask):
def __init__(self, mag_bins=None, **kwargs):
super().__init__(**kwargs)
self.mws = [2.5, 3.0, 3.5, 4.0]
self.mag_bins = mag_bins
self.version = 4
def process(self, catalog):
if not self.name:
self.name = catalog.name
# magnitude mag_bins should probably be bound to the region, although we should have a SpaceMagnitudeRegion class
if self.mag_bins is None:
try:
self.mag_bins = catalog.region.mag_bins
except:
self.mag_bins = CSEP_MW_BINS
# optimization idea: always compute this for the lowest magnitude, above this is redundant
mags = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
binned_mags = cat_filt.magnitude_counts(mag_bins=self.mag_bins)
mags.append(binned_mags)
# data shape (n_cat, n_mw, n_mw_bins)
self.data.append(mags)
def post_process(self, obs, args=None):
# we dont need args
_ = args
results = {}
for i, mw in enumerate(self.mws):
test_distribution = []
# get observed magnitude counts
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f"Skipping {mw} in Magnitude test because no observed events.")
continue
obs_histogram = obs_filt.magnitude_counts(mag_bins=self.mag_bins)
n_obs_events = numpy.sum(obs_histogram)
mag_counts_all = numpy.array(self.data)
# get the union histogram, simply the sum over all catalogs, (n_cat, n_mw)
union_histogram = numpy.sum(mag_counts_all[:,i,:], axis=0)
n_union_events = numpy.sum(union_histogram)
union_scale = n_obs_events / n_union_events
scaled_union_histogram = union_histogram * union_scale
for j in range(mag_counts_all.shape[0]):
n_events = numpy.sum(mag_counts_all[j,i,:])
if n_events == 0:
continue
scale = n_obs_events / n_events
catalog_histogram = mag_counts_all[j,i,:] * scale
test_distribution.append(cumulative_square_diff(numpy.log10(catalog_histogram+1), numpy.log10(scaled_union_histogram+1)))
# compute statistic from the observation
obs_d_statistic = cumulative_square_diff(numpy.log10(obs_histogram+1), numpy.log10(scaled_union_histogram+1))
# score evaluation
_, quantile = get_quantiles(test_distribution, obs_d_statistic)
# prepare result
result = EvaluationResult(test_distribution=test_distribution,
name='M-Test',
observed_statistic=obs_d_statistic,
quantile=quantile,
status='Normal',
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
obs_name=obs.name,
sim_name=self.name)
results[mw] = result
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
# get the filename
for mw, result in results.items():
m_test_fname = self._build_filename(plot_dir, mw, 'm-test')
plot_args = {'percentile': 95,
'title': f'Magnitude Test, M{mw}+',
'bins': 'auto',
'filename': m_test_fname}
_ = plot_magnitude_test(result, show=False, plot_args=plot_args)
self.fnames.append(m_test_fname)
def _build_filename(self, dir, mw, plot_id):
try:
mag_dh = self.mag_bins[1] - self.mag_bins[0]
mag_dh_str = f"_dmag{mag_dh:.1f}".replace('.','p').lower()
except:
mag_dh_str = ''
basename = f"{plot_id}_mw_{str(mw).replace('.', 'p')}{mag_dh_str}".lower()
return os.path.join(dir, basename)
class LikelihoodAndSpatialTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.region = None
self.test_distribution_spatial = []
self.test_distribution_likelihood = []
self.cat_id = 0
self.needs_two_passes = True
self.buffer = []
self.fnames = {}
self.fnames['l-test'] = []
self.fnames['s-test'] = []
self.version = 5
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_counts()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
time_horizon, n_cat, end_epoch, obs = args
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# unfortunately, we need to iterate twice through the catalogs for this, unless we start pre-processing
# everything and storing approximate cell-wise rates
lhs = numpy.zeros(len(self.mws))
lhs_norm = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_counts()
lh, lh_norm = _compute_likelihood(gridded_cat, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
lhs[i] = lh
lhs_norm[i] = lh_norm
self.test_distribution_likelihood.append(lhs)
self.test_distribution_spatial.append(lhs_norm)
def post_process(self, obs, args=None):
cata_iter, time_horizon, end_epoch, n_cat = args
results = {}
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
test_distribution_likelihood = numpy.array(self.test_distribution_likelihood)
# there can be nans in the spatial distribution
test_distribution_spatial = numpy.array(self.test_distribution_spatial)
# prepare results for each mw
for i, mw in enumerate(self.mws):
# get observed likelihood
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog.')
continue
n_obs = obs_filt.get_number_of_events()
gridded_obs = obs_filt.spatial_counts()
obs_lh, obs_lh_norm = _compute_likelihood(gridded_obs, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
# if obs_lh is -numpy.inf, recompute but only for indexes where obs and simulated are non-zero
message = "normal"
if obs_lh == -numpy.inf or obs_lh_norm == -numpy.inf:
idx_good_sim = apprx_rate_density[i,:] != 0
new_gridded_obs = gridded_obs[idx_good_sim]
new_n_obs = numpy.sum(new_gridded_obs)
print(f"Found -inf as the observed likelihood score for M{self.mws[i]}+. "
f"Assuming event(s) occurred in undersampled region of forecast.\n"
f"Recomputing with {new_n_obs} events after removing {n_obs - new_n_obs} events.")
if new_n_obs == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog '
f'after correcting for under-sampling in forecast.')
continue
new_ard = apprx_rate_density[i,idx_good_sim]
# we need to use the old n_obs here, because if we normalize the ard to a different value the observed
# statistic will not be computed correctly.
obs_lh, obs_lh_norm = _compute_likelihood(new_gridded_obs, new_ard, expected_cond_count[i], n_obs)
message = "undersampled"
# determine outcome of evaluation, check for infinity
_, quantile_likelihood = get_quantiles(test_distribution_likelihood[:,i], obs_lh)
# build evaluation result
result_likelihood = EvaluationResult(test_distribution=test_distribution_likelihood[:,i],
name='L-Test',
observed_statistic=obs_lh,
quantile=quantile_likelihood,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
# check for nans here
test_distribution_spatial_1d = test_distribution_spatial[:,i]
if numpy.isnan(numpy.sum(test_distribution_spatial_1d)):
test_distribution_spatial_1d = test_distribution_spatial_1d[~numpy.isnan(test_distribution_spatial_1d)]
if n_obs == 0 or numpy.isnan(obs_lh_norm):
message = "not-valid"
quantile_spatial = -1
else:
_, quantile_spatial = get_quantiles(test_distribution_spatial_1d, obs_lh_norm)
result_spatial = EvaluationResult(test_distribution=test_distribution_spatial_1d,
name='S-Test',
observed_statistic=obs_lh_norm,
quantile=quantile_spatial,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = (result_likelihood, result_spatial)
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result_tuple in results.items():
# plot likelihood test
l_test_fname = self._build_filename(plot_dir, mw, 'l-test')
plot_args = {'percentile': 95,
'title': f'Pseudo-Likelihood Test, M{mw}+',
'filename': l_test_fname}
_ = plot_likelihood_test(result_tuple[0], axes=None, plot_args=plot_args, show=show)
# we can access this in the main program if needed
# self.ax.append((ax, spatial_ax))
self.fnames['l-test'].append(l_test_fname)
if result_tuple[1].status == 'not-valid':
print(f'Skipping plot for spatial test on {mw}. Test results are not valid, likely because no earthquakes observed in target observed_catalog.')
continue
# plot spatial test
s_test_fname = self._build_filename(plot_dir, mw, 's-test')
plot_args = {'percentile': 95,
'title': f'Spatial Test, M{mw}+',
'filename': s_test_fname}
_ = plot_spatial_test(result_tuple[1], axes=None, plot_args=plot_args, show=False)
self.fnames['s-test'].append(s_test_fname)
class SpatialTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.region = None
self.test_distribution_spatial = []
self.cat_id = 0
self.needs_two_passes = True
self.buffer = []
self.fnames = {}
self.fnames['s-test'] = []
self.version = 5
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_counts()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
time_horizon, n_cat, end_epoch, obs = args
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# unfortunately, we need to iterate twice through the catalogs for this, unless we start pre-processing
# everything and storing approximate cell-wise rates
lhs = numpy.zeros(len(self.mws))
lhs_norm = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_counts()
lh, lh_norm = _compute_likelihood(gridded_cat, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
lhs[i] = lh
lhs_norm[i] = lh_norm
self.test_distribution_spatial.append(lhs_norm)
def post_process(self, obs, args=None):
cata_iter, time_horizon, end_epoch, n_cat = args
results = {}
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# there can be nans in the spatial distribution
test_distribution_spatial = numpy.array(self.test_distribution_spatial)
# prepare results for each mw
for i, mw in enumerate(self.mws):
# get observed likelihood
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog.')
continue
n_obs = obs_filt.get_number_of_events()
gridded_obs = obs_filt.spatial_counts()
obs_lh, obs_lh_norm = _compute_likelihood(gridded_obs, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
# if obs_lh is -numpy.inf, recompute but only for indexes where obs and simulated are non-zero
message = "normal"
if obs_lh == -numpy.inf or obs_lh_norm == -numpy.inf:
idx_good_sim = apprx_rate_density[i,:] != 0
new_gridded_obs = gridded_obs[idx_good_sim]
new_n_obs = numpy.sum(new_gridded_obs)
print(f"Found -inf as the observed likelihood score for M{self.mws[i]}+. "
f"Assuming event(s) occurred in undersampled region of forecast.\n"
f"Recomputing with {new_n_obs} events after removing {n_obs - new_n_obs} events.")
if new_n_obs == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog '
f'after correcting for under-sampling in forecast.')
continue
new_ard = apprx_rate_density[i,idx_good_sim]
# we need to use the old n_obs here, because if we normalize the ard to a different value the observed
# statistic will not be computed correctly.
obs_lh, obs_lh_norm = _compute_likelihood(new_gridded_obs, new_ard, expected_cond_count[i], n_obs)
message = "undersampled"
# check for nans here
test_distribution_spatial_1d = test_distribution_spatial[:,i]
if numpy.isnan(numpy.sum(test_distribution_spatial_1d)):
test_distribution_spatial_1d = test_distribution_spatial_1d[~numpy.isnan(test_distribution_spatial_1d)]
if n_obs == 0 or numpy.isnan(obs_lh_norm):
message = "not-valid"
quantile_spatial = -1
else:
_, quantile_spatial = get_quantiles(test_distribution_spatial_1d, obs_lh_norm)
result_spatial = EvaluationResult(test_distribution=test_distribution_spatial_1d,
name='S-Test',
observed_statistic=obs_lh_norm,
quantile=quantile_spatial,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = result_spatial
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result in results.items():
if result.status == 'not-valid':
print(f'Skipping plot for spatial test on {mw}. Test results are not valid, likely because no earthquakes observed in target observed_catalog.')
continue
# plot spatial test
s_test_fname = self._build_filename(plot_dir, mw, 's-test')
plot_args = {'percentile': 95,
'title': f'Spatial Test, M{mw}+',
'filename': s_test_fname}
_ = plot_spatial_test(result, axes=None, plot_args=plot_args, show=False)
self.fnames['s-test'].append(s_test_fname)
class LikelihoodTest(AbstractProcessingTask):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.region = None
self.test_distribution_likelihood = []
self.cat_id = 0
self.needs_two_passes = True
self.buffer = []
self.fnames = {}
self.fnames['l-test'] = []
self.fnames['s-test'] = []
self.version = 5
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
# compute stuff from data
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_counts = cat_filt.spatial_counts()
counts.append(gridded_counts)
# we want to aggregate the counts in each bin to preserve memory
if len(self.data) == 0:
self.data = numpy.array(counts)
else:
self.data += numpy.array(counts)
def process_again(self, catalog, args=()):
# we dont actually need to do this if we are caching the data
time_horizon, n_cat, end_epoch, obs = args
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
# unfortunately, we need to iterate twice through the catalogs for this, unless we start pre-processing
# everything and storing approximate cell-wise rates
lhs = numpy.zeros(len(self.mws))
lhs_norm = numpy.zeros(len(self.mws))
for i, mw in enumerate(self.mws):
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
n_obs = obs_filt.event_count
cat_filt = catalog.filter(f'magnitude >= {mw}')
gridded_cat = cat_filt.spatial_counts()
lh, lh_norm = _compute_likelihood(gridded_cat, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
lhs[i] = lh
lhs_norm[i] = lh_norm
self.test_distribution_likelihood.append(lhs)
def post_process(self, obs, args=None):
cata_iter, time_horizon, end_epoch, n_cat = args
results = {}
apprx_rate_density = numpy.array(self.data) / n_cat
expected_cond_count = numpy.sum(apprx_rate_density, axis=1)
test_distribution_likelihood = numpy.array(self.test_distribution_likelihood)
# there can be nans in the spatial distribution
# prepare results for each mw
for i, mw in enumerate(self.mws):
# get observed likelihood
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
if obs_filt.event_count == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog.')
continue
n_obs = obs_filt.get_number_of_events()
gridded_obs = obs_filt.spatial_counts()
obs_lh, obs_lh_norm = _compute_likelihood(gridded_obs, apprx_rate_density[i,:], expected_cond_count[i], n_obs)
# if obs_lh is -numpy.inf, recompute but only for indexes where obs and simulated are non-zero
message = "normal"
if obs_lh == -numpy.inf or obs_lh_norm == -numpy.inf:
idx_good_sim = apprx_rate_density[i,:] != 0
new_gridded_obs = gridded_obs[idx_good_sim]
new_n_obs = numpy.sum(new_gridded_obs)
print(f"Found -inf as the observed likelihood score for M{self.mws[i]}+. "
f"Assuming event(s) occurred in undersampled region of forecast.\n"
f"Recomputing with {new_n_obs} events after removing {n_obs - new_n_obs} events.")
if new_n_obs == 0:
print(f'Skipping pseudo-likelihood based tests for M{mw}+ because no events in observed observed_catalog '
f'after correcting for under-sampling in forecast.')
continue
new_ard = apprx_rate_density[i,idx_good_sim]
# we need to use the old n_obs here, because if we normalize the ard to a different value the observed
# statistic will not be computed correctly.
obs_lh, obs_lh_norm = _compute_likelihood(new_gridded_obs, new_ard, expected_cond_count[i], n_obs)
message = "undersampled"
# determine outcome of evaluation, check for infinity
_, quantile_likelihood = get_quantiles(test_distribution_likelihood[:,i], obs_lh)
# build evaluation result
result_likelihood = EvaluationResult(test_distribution=test_distribution_likelihood[:,i],
name='L-Test',
observed_statistic=obs_lh,
quantile=quantile_likelihood,
status=message,
min_mw=mw,
obs_catalog_repr=obs.date_accessed,
sim_name=self.name,
obs_name=obs.name)
results[mw] = result_likelihood
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
for mw, result in results.items():
# plot likelihood test
l_test_fname = self._build_filename(plot_dir, mw, 'l-test')
plot_args = {'percentile': 95,
'title': f'Pseudo-Likelihood Test, M{mw}+',
'filename': l_test_fname}
_ = plot_likelihood_test(result, axes=None, plot_args=plot_args, show=show)
# we can access this in the main program if needed
# self.ax.append((ax, spatial_ax))
self.fnames['s-test'].append(l_test_fname)
class CumulativeEventPlot(AbstractProcessingTask):
def __init__(self, origin_epoch, end_epoch, **kwargs):
super().__init__(**kwargs)
self.origin_epoch = origin_epoch
self.end_epoch = end_epoch
self.time_bins, self.dt = self._get_time_bins()
self.n_bins = self.time_bins.shape[0]
self.archive = False
def _get_time_bins(self):
diff = (self.end_epoch - self.origin_epoch) / SECONDS_PER_DAY / 1000
# if less than 7 day use hours
if diff <= 7.0:
dt = SECONDS_PER_HOUR * 1000
# if less than 180 day use days
elif diff <= 180:
dt = SECONDS_PER_DAY * 1000
# if less than 3 years (1,095.75 days) use weeks
elif diff <= 1095.75:
dt = SECONDS_PER_WEEK * 1000
# use 30 day
else:
dt = SECONDS_PER_DAY * 1000 * 30
# always make bins from start to end of observed_catalog
return numpy.arange(self.origin_epoch, self.end_epoch+dt/2, dt), dt
def process(self, catalog):
counts = []
for mw in self.mws:
cat_filt = catalog.filter(f'magnitude >= {mw}')
n_events = cat_filt.catalog.shape[0]
ses_origin_time = cat_filt.get_epoch_times()
inds = bin1d_vec(ses_origin_time, self.time_bins)
binned_counts = numpy.zeros(self.n_bins)
for j in range(n_events):
binned_counts[inds[j]] += 1
counts.append(binned_counts)
self.data.append(counts)
def post_process(self, obs, args=None):
# data are stored as (n_cat, n_mw_bins, n_time_bins)
summed_counts = numpy.cumsum(self.data, axis=2)
# compute summary statistics for plotting
fifth_per = numpy.percentile(summed_counts, 5, axis=0)
first_quar = numpy.percentile(summed_counts, 25, axis=0)
med_counts = numpy.percentile(summed_counts, 50, axis=0)
second_quar = numpy.percentile(summed_counts, 75, axis=0)
nine_fifth = numpy.percentile(summed_counts, 95, axis=0)
# compute median for comcat observed_catalog
obs_counts = []
for mw in self.mws:
obs_filt = obs.filter(f'magnitude >= {mw}', in_place=False)
obs_binned_counts = numpy.zeros(self.n_bins)
inds = bin1d_vec(obs_filt.get_epoch_times(), self.time_bins)
for j in range(obs_filt.event_count):
obs_binned_counts[inds[j]] += 1
obs_counts.append(obs_binned_counts)
obs_summed_counts = numpy.cumsum(obs_counts, axis=1)
# update time_bins for plotting
millis_to_hours = 60 * 60 * 1000 * 24
time_bins = (self.time_bins - self.time_bins[0]) / millis_to_hours
# since we are cumulating, plot at bin ends
time_bins = time_bins + (self.dt / millis_to_hours)
# make all arrays start at zero
time_bins = numpy.insert(time_bins, 0, 0)
# 2d array with (n_mw, n_time_bins)
fifth_per = numpy.insert(fifth_per, 0, 0, axis=1)
first_quar = numpy.insert(first_quar, 0, 0, axis=1)
med_counts = numpy.insert(med_counts, 0, 0, axis=1)
second_quar = numpy.insert(second_quar, 0, 0, axis=1)
nine_fifth = numpy.insert(nine_fifth, 0, 0, axis=1)
obs_summed_counts = numpy.insert(obs_summed_counts, 0, 0, axis=1)
# ydata is now (5, n_mw, n_time_bins)
results = {'xdata': time_bins,
'ydata': (fifth_per, first_quar, med_counts, second_quar, nine_fifth),
'obs_data': obs_summed_counts}
return results
def plot(self, results, plot_dir, plot_args=None, show=False):
# these are numpy arrays with mw information
xdata = results['xdata']
ydata = numpy.array(results['ydata'])
obs_data = results['obs_data']
# get values from plotting args
for i, mw in enumerate(self.mws):
cum_counts_fname = self._build_filename(plot_dir, mw, 'cum_counts')
plot_args = {'title': f'Cumulative Event Counts, M{mw}+',
'xlabel': 'Days since start of forecast',
'filename': cum_counts_fname}
ax = plot_cumulative_events_versus_time_dev(xdata, ydata[:,i,:], obs_data[i,:], plot_args, show=False)
# self.ax.append(ax)
self.fnames.append(cum_counts_fname)
def store_results(self, results, dir):
# store quickly for numpy, because we dont have a results class to deal with this
fname = self._build_filename(dir, self.mws[0], 'cum_counts') + '.npy'
numpy.save(fname, results)
class MagnitudeHistogram(AbstractProcessingTask):
def __init__(self, calc=True, **kwargs):
super().__init__(**kwargs)
self.calc = calc
self.archive = False
def process(self, catalog):
""" this can share data with the Magnitude test, hence self.calc
"""
if not self.name:
self.name = catalog.name
if self.calc:
# always compute this for the lowest magnitude, above this is redundant
cat_filt = catalog.filter(f'magnitude >= {self.mws[0]}')
binned_mags = cat_filt.magnitude_counts()
self.data.append(binned_mags)
def post_process(self, obs, args=None):
""" just store observation for later """
_ = args
self.obs = obs
def plot(self, results, plot_dir, plot_args=None, show=False):
mag_hist_fname = self._build_filename(plot_dir, self.mws[0], 'mag_hist')
plot_args = {
'xlim': [self.mws[0], numpy.max(CSEP_MW_BINS)],
'title': f"Magnitude Histogram, M{self.mws[0]}+",
'sim_label': self.name,
'obs_label': self.obs.name,
'filename': mag_hist_fname
}
obs_filt = self.obs.filter(f'magnitude >= {self.mws[0]}', in_place=False)
# data (n_sim, n_mag, n_mw_bins)
ax = plot_magnitude_histogram_dev(numpy.array(self.data)[:,0,:], obs_filt, plot_args, show=False)
# self.ax.append(ax)
self.fnames.append(mag_hist_fname)
class UniformLikelihoodCalculation(AbstractProcessingTask):
"""
This calculation assumes that the spatial distribution of the forecast is uniform, but the seismicity is located
in spatial bins according to the clustering provided by the forecast model.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.data = None
self.test_distribution_likelihood = []
self.test_distribution_spatial = []
self.fnames = {}
self.fnames['l-test'] = []
self.fnames['s-test'] = []
self.needs_two_passes = True
def process(self, catalog):
# grab stuff from data that we might need later
if not self.region:
self.region = catalog.region
if not self.name:
self.name = catalog.name
def process_again(self, catalog, args=()):
time_horizon, n_cat, end_epoch, obs = args
expected_cond_count = | numpy.sum(self.data, axis=1) | numpy.sum |
from discord.ext import commands
import discord
import numpy as np
import os
import traceback
from parse import parse
client = discord.Client()
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
# @bot.event
# async def on_command_error(ctx, error):
# orig_error = getattr(error, "original", error)
# error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
# await ctx.send(error_msg)
# @bot.command()
# async def ping(ctx):
# await ctx.send('pong')
# @bot.command()
# async def neko(ctx):
# await ctx.send('nyan')
def dice(dice_size):
num = np.random.randint(1, int(dice_size + 1))
return num
def simple_dice(dice_size, dice_num):
dice_val = np.array([], dtype=np.int64)
for i in range(dice_num):
dice_val = np.append(dice_val, dice(dice_size))
#msg = 'dice: ' + str(np.sum(dice_val)) + ' = ' + str(dice_val)
m = dice_val
return m
def CCB(m, a):
if m <= (a/5):
msg = 'dice: ' + str(np.sum(m)) + ' = ' + str(m) + ' <= ' + str(a) + ' Extreme!!!'
elif (a/5) < m <= (a/2):
msg = 'dice: ' + str( | np.sum(m) | numpy.sum |
#!/usr/bin/env python3
''' Icecore PSM
Adapted from Sylvia's PRYSM code (https://github.com/sylvia-dee/PRYSM) with precipitation weighting added.
'''
import numpy as np
from scipy import integrate, signal
from pathos.multiprocessing import ProcessingPool as Pool
from tqdm import tqdm
import LMRt
# import time
# from IPython import embed
def ice_sensor(year, d18Op, pr, alt_diff=0.):
''' Icecore sensor model
The ice core sensor model calculates precipitation-weighted del18OP (i.e. isotope ratio is weighted by
the amount of precipitation that accumulates) and corrects for temperature and altitude bias between model
and site ([Yurtsever, 1975], 0.3/100m [Vogel et al., 1975]).
Args:
year (1d array: time): time axis [year in float]
d18Op (2d array: location, time): d18O of precipitation [permil]
pr (2d array: location, time): precipitation rate [kg m-2 s-1]
alt_diff 12d array: location): actual Altitude-Model Altitude [meters]
Returns:
d18Oice (2d array: location, year in int): annualizd d18O of ice [permil]
References:
Yurtsever, Y., Worldwide survey of stable isotopes in precipitation., Rep. Isotope Hydrology Section, IAEA, 1975.
'''
# Altitude Effect: cooling and precipitation of heavy isotopes.
# O18 ~0.15 to 0.30 permil per 100m.
alt_eff = -0.25
alt_corr = (alt_diff/100.)*alt_eff
d18Op_weighted, year_int = LMRt.utils.annualize_var(d18Op, year, weights=pr)
d18O_ice = d18Op_weighted + alt_corr
return d18O_ice
def diffusivity(rho, T=250, P=0.9, rho_d=822, b=1.3):
'''
DOCSTRING: Function 'diffusivity'
Description: Calculates diffusivity (in m^2/s) as a function of density.
Inputs:
P: Ambient Pressure in Atm
T: Temperature in K
rho: density profile (kg/m^3)
rho_d: 822 kg/m^2 [default], density at which ice becomes impermeable to diffusion
Defaults are available for all but rho, so only one argument need be entered.
Note values for diffusivity in air:
D16 = 2.1e-5*(T/273.15)^1.94*1/P
D18 = D16/1.0285
D2 = D16/1.0251
D17 = D16/((D16/D18)^0.518)
Reference: Johnsen et al. (2000): Diffusion of Stable isotopes in polar firn and ice:
the isotope effect in firn diffusion
'''
# Set Constants
R = 8.314478 # Gas constant
m = 18.02e-3 # molar weight of water (in kg)
alpha18 = np.exp(11.839/T-28.224e-3) # ice-vapor fractionation for oxygen 18
p = np.exp(9.5504+3.53*np.log(T)-5723.265/T-0.0073*T) # saturation vapor pressure
Po = 1. # reference pressure, atmospheres
rho_i = 920. # kg/m^3, density of solid ice
# Set diffusivity in air (units of m^2/s)
Da = 2.1e-5*np.power((T/273.15), 1.94)*(Po/P)
Dai = Da/1.0285
# Calculate Tortuosity
invtau = np.zeros(len(rho))
# for i in range(len(rho)):
# if rho[i] <= rho_i/np.sqrt(b):
# # invtau[i]=1.-1.3*np.power((rho[i]/rho_d),2)
# invtau[i] = 1.-1.3*np.power((rho[i]/rho_i), 2)
# else:
# invtau[i] = 0.
selector =rho <= rho_i/np.sqrt(b)
invtau[selector] = 1.-1.3*(rho[selector]/rho_i)**2
D = m*p*invtau*Dai*(1/rho-1/rho_d)/(R*T*alpha18)
return D
def densification(Tavg, bdot, rhos, z): # ,model='hljohnsen'):
''' Calculates steady state snow/firn depth density profiles using Herron-Langway type models.
Args:
Tavg: 10m temperature in celcius ## CELCIUS!
bdot: accumulation rate in mwe/yr or (kg/m2/yr)
rhos: surface density in kg/m3
z: depth in true_metres
model can be: {'HLJohnsen' 'HerronLangway' 'LiZwally' 'Helsen' 'NabarroHerring'}
default is herronlangway. (The other models are tuned for non-stationary modelling (Read Arthern et al.2010 before applying in steady state).
Returns:
rho: density (kg/m3) for all z-values.
zieq: ice equivalent depth for all z-values.
t: age for all z-values (only taking densification into account.)
Example usage:
z=0:300
[rho,zieq,t]=densitymodel(-31.5,177,340,z,'HerronLangway')
plot(z,rho)
References:
Herron-Langway type models. (Arthern et al. 2010 formulation).
<NAME>, University of Copenhagen 2010
Adapted by <NAME>, Brown University, 2017
Optimized by <NAME>, University of Southern California, 2017
'''
rhoi = 920.
rhoc = 550.
rhow = 1000.
rhos = 340.
R = 8.314
# Tavg=248.
# bdot=0.1
# Herron-Langway with Johnsen et al 2000 corrections.
# Small corrections to HL model which are not in Arthern et al. 2010
c0 = 0.85*11*(bdot/rhow)*np.exp(-10160./(R*Tavg))
c1 = 1.15*575*np.sqrt(bdot/rhow)*np.exp(-21400./(R*Tavg))
k0 = c0/bdot # ~g4
k1 = c1/bdot
# critical depth at which rho=rhoc
zc = (np.log(rhoc/(rhoi-rhoc))-np.log(rhos/(rhoi-rhos)))/(k0*rhoi) # g6
ix = z <= zc # find the z's above and below zc
upix = np.where(ix) # indices above zc
dnix = np.where(~ix) # indices below zc
q = np.zeros((z.shape)) # pre-allocate some space for q, rho
rho = np.zeros((z.shape))
# test to ensure that this will not blow up numerically if you have a very very long core.
# manually set all super deep layers to solid ice (rhoi=920)
NUM = k1*rhoi*(z-zc)+np.log(rhoc/(rhoi-rhoc))
numerical = np.where(NUM <= 100.0)
blowup = np.where(NUM > 100.0)
q[dnix] = np.exp(k1*rhoi*(z[dnix]-zc)+np.log(rhoc/(rhoi-rhoc))) # g7
q[upix] = np.exp(k0*rhoi*z[upix]+np.log(rhos/(rhoi-rhos))) # g7
rho[numerical] = q[numerical]*rhoi/(1+q[numerical]) # [g8] modified by fzhu to fix inconsistency of array size
rho[blowup] = rhoi
# only calculate this if you want zieq
tc = ( | np.log(rhoi-rhos) | numpy.log |
"""
Additional tests for PandasArray that aren't covered by
the interface tests.
"""
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.arrays import PandasArray
from pandas.core.arrays.numpy_ import PandasDtype
@pytest.fixture(
params=[
np.array(["a", "b"], dtype=object),
np.array([0, 1], dtype=float),
np.array([0, 1], dtype=int),
np.array([0, 1 + 2j], dtype=complex),
np.array([True, False], dtype=bool),
np.array([0, 1], dtype="datetime64[ns]"),
np.array([0, 1], dtype="timedelta64[ns]"),
]
)
def any_numpy_array(request):
"""
Parametrized fixture for NumPy arrays with different dtypes.
This excludes string and bytes.
"""
return request.param
# ----------------------------------------------------------------------------
# PandasDtype
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", True),
("uint", True),
("float", True),
("complex", True),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_numeric(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_numeric is expected
@pytest.mark.parametrize(
"dtype, expected",
[
("bool", True),
("int", False),
("uint", False),
("float", False),
("complex", False),
("str", False),
("bytes", False),
("datetime64[ns]", False),
("object", False),
("void", False),
],
)
def test_is_boolean(dtype, expected):
dtype = PandasDtype(dtype)
assert dtype._is_boolean is expected
def test_repr():
dtype = PandasDtype(np.dtype("int64"))
assert repr(dtype) == "PandasDtype('int64')"
def test_constructor_from_string():
result = PandasDtype.construct_from_string("int64")
expected = PandasDtype(np.dtype("int64"))
assert result == expected
# ----------------------------------------------------------------------------
# Construction
def test_constructor_no_coercion():
with pytest.raises(ValueError, match="NumPy array"):
PandasArray([1, 2, 3])
def test_series_constructor_with_copy():
ndarray = np.array([1, 2, 3])
ser = pd.Series(PandasArray(ndarray), copy=True)
assert ser.values is not ndarray
def test_series_constructor_with_astype():
ndarray = np.array([1, 2, 3])
result = pd.Series(PandasArray(ndarray), dtype="float64")
expected = pd.Series([1.0, 2.0, 3.0], dtype="float64")
tm.assert_series_equal(result, expected)
def test_from_sequence_dtype():
arr = np.array([1, 2, 3], dtype="int64")
result = PandasArray._from_sequence(arr, dtype="uint64")
expected = PandasArray(np.array([1, 2, 3], dtype="uint64"))
tm.assert_extension_array_equal(result, expected)
def test_constructor_copy():
arr = np.array([0, 1])
result = PandasArray(arr, copy=True)
assert np.shares_memory(result._ndarray, arr) is False
def test_constructor_with_data(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr)
assert arr.dtype.numpy_dtype == nparr.dtype
# ----------------------------------------------------------------------------
# Conversion
def test_to_numpy():
arr = PandasArray(np.array([1, 2, 3]))
result = arr.to_numpy()
assert result is arr._ndarray
result = arr.to_numpy(copy=True)
assert result is not arr._ndarray
result = arr.to_numpy(dtype="f8")
expected = np.array([1, 2, 3], dtype="f8")
tm.assert_numpy_array_equal(result, expected)
# ----------------------------------------------------------------------------
# Setitem
def test_setitem_series():
ser = pd.Series([1, 2, 3])
ser.array[0] = 10
expected = pd.Series([10, 2, 3])
tm.assert_series_equal(ser, expected)
def test_setitem(any_numpy_array):
nparr = any_numpy_array
arr = PandasArray(nparr, copy=True)
arr[0] = arr[1]
nparr[0] = nparr[1]
tm.assert_numpy_array_equal(arr.to_numpy(), nparr)
# ----------------------------------------------------------------------------
# Reductions
def test_bad_reduce_raises():
arr = np.array([1, 2, 3], dtype="int64")
arr = PandasArray(arr)
msg = "cannot perform not_a_method with type int"
with pytest.raises(TypeError, match=msg):
arr._reduce(msg)
def test_validate_reduction_keyword_args():
arr = PandasArray(np.array([1, 2, 3]))
msg = "the 'keepdims' parameter is not supported .*all"
with pytest.raises(ValueError, match=msg):
arr.all(keepdims=True)
# ----------------------------------------------------------------------------
# Ops
def test_ufunc():
arr = PandasArray(np.array([-1.0, 0.0, 1.0]))
result = np.abs(arr)
expected = PandasArray(np.abs(arr._ndarray))
tm.assert_extension_array_equal(result, expected)
r1, r2 = np.divmod(arr, np.add(arr, 2))
e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2))
e1 = PandasArray(e1)
e2 = PandasArray(e2)
tm.assert_extension_array_equal(r1, e1)
tm.assert_extension_array_equal(r2, e2)
def test_basic_binop():
# Just a basic smoke test. The EA interface tests exercise this
# more thoroughly.
x = PandasArray( | np.array([1, 2, 3]) | numpy.array |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Core module of the pulse drawer.
This module provides the `DrawerCanvas` which is a collection of `Chart` object.
The `Chart` object is a collection of drawings. A user can assign multiple channels
to a single chart instance. For example, we can define a chart for specific qubit
and assign all related channels to the chart. This chart-channel mapping is defined by
the function specified by ``layout.chart_channel_map`` of the stylesheet.
Because this chart instance is decoupled from the coordinate system of the plotter,
we can arbitrarily place charts on the plotter canvas, i.e. if we want to create 3D plot,
each chart may be placed on the X-Z plane and charts are arranged along the Y-axis.
Thus this data model maximizes the flexibility to generate an output image.
The chart instance is not just a container of drawings, as it also performs
data processing like binding abstract coordinates and truncating long pulses for an axis break.
Each chart object has `.parent` which points to the `DrawerCanvas` instance so that
each child chart can refer to the global figure settings such as time range and axis break.
Initialization
~~~~~~~~~~~~~~
The `DataCanvas` and `Chart` are not exposed to users as they are implicitly
initialized in the interface function. It is noteworthy that the data canvas is agnostic
to plotters. This means once the canvas instance is initialized we can reuse this data
among multiple plotters. The canvas is initialized with a stylesheet and quantum backend
information :py:class:~`qiskit.visualization.pulse_v2.device_info.DrawerBackendInfo`.
Chart instances are automatically generated when pulse program is loaded.
```python
canvas = DrawerCanvas(stylesheet=stylesheet, device=device)
canvas.load_program(sched)
canvas.update()
```
Once all properties are set, `.update` method is called to apply changes to drawings.
If the `DrawDataContainer` is initialized without backend information, the output shows
the time in units of the system cycle time `dt` and the frequencies are initialized to zero.
Update
~~~~~~
To update the image, a user can set new values to canvas and then call the `.update` method.
```python
canvas.set_time_range(2000, 3000, seconds=False)
canvas.update()
```
All stored drawings are updated accordingly. The plotter API can access to
drawings with `.collections` property of chart instance. This returns
an iterator of drawing with the unique data key.
If a plotter provides object handler for plotted shapes, the plotter API can manage
the lookup table of the handler and the drawing by using this data key.
"""
from copy import deepcopy
from enum import Enum
from functools import partial
from itertools import chain
from typing import Union, List, Tuple, Iterator, Optional
import numpy as np
from qiskit import pulse
from qiskit.pulse.transforms import target_qobj_transform
from qiskit.visualization.exceptions import VisualizationError
from qiskit.visualization.pulse_v2 import events, types, drawings, device_info
from qiskit.visualization.pulse_v2.stylesheet import QiskitPulseStyle
class DrawerCanvas:
"""Collection of `Chart` and configuration data.
Pulse channels are associated with some `Chart` instance and
drawing data object are stored in the `Chart` instance.
Device, stylesheet, and some user generators are stored in the `DrawingCanvas`
and `Chart` instances are also attached to the `DrawerCanvas` as children.
Global configurations are accessed by those children to modify
the appearance of the `Chart` output.
"""
def __init__(self,
stylesheet: QiskitPulseStyle,
device: device_info.DrawerBackendInfo):
"""Create new data container with backend system information.
Args:
stylesheet: Stylesheet to decide appearance of output image.
device: Backend information to run the program.
"""
# stylesheet
self.formatter = stylesheet.formatter
self.generator = stylesheet.generator
self.layout = stylesheet.layout
# device info
self.device = device
# chart
self.global_charts = Chart(parent=self, name='global')
self.charts = []
# visible controls
self.disable_chans = set()
self.disable_types = set()
# data scaling
self.chan_scales = dict()
# global time
self._time_range = (0, 0)
self._time_breaks = []
# title
self.fig_title = ''
@property
def time_range(self) -> Tuple[int, int]:
"""Return current time range to draw.
Calculate net duration and add side margin to edge location.
Returns:
Time window considering side margin.
"""
t0, t1 = self._time_range
total_time_elimination = 0
for t0b, t1b in self.time_breaks:
if t1b > t0 and t0b < t1:
total_time_elimination += t1b - t0b
net_duration = t1 - t0 - total_time_elimination
new_t0 = t0 - net_duration * self.formatter['margin.left_percent']
new_t1 = t1 + net_duration * self.formatter['margin.right_percent']
return new_t0, new_t1
@time_range.setter
def time_range(self, new_range: Tuple[int, int]):
"""Update time range to draw."""
self._time_range = new_range
@property
def time_breaks(self) -> List[Tuple[int, int]]:
"""Return time breaks with time range.
If an edge of time range is in the axis break period,
the axis break period is recalculated.
Raises:
VisualizationError: When axis break is greater than time window.
Returns:
List of axis break periods considering the time window edges.
"""
t0, t1 = self._time_range
axis_breaks = []
for t0b, t1b in self._time_breaks:
if t0b >= t1 or t1b <= t0:
# skip because break period is outside of time window
continue
if t0b < t0 and t1b > t1:
raise VisualizationError('Axis break is greater than time window. '
'Nothing will be drawn.')
if t0b < t0 < t1b:
if t1b - t0 > self.formatter['axis_break.length']:
new_t0 = t0 + 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((new_t0, t1b))
continue
if t0b < t1 < t1b:
if t1 - t0b > self.formatter['axis_break.length']:
new_t1 = t1 - 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((t0b, new_t1))
continue
axis_breaks.append((t0b, t1b))
return axis_breaks
@time_breaks.setter
def time_breaks(self, new_breaks: List[Tuple[int, int]]):
"""Set new time breaks."""
self._time_breaks = sorted(new_breaks, key=lambda x: x[0])
def load_program(self, program: Union[pulse.Waveform, pulse.ParametricPulse, pulse.Schedule]):
"""Load a program to draw.
Args:
program: `Waveform`, `ParametricPulse`, or `Schedule` to draw.
Raises:
VisualizationError: When input program is invalid data format.
"""
if isinstance(program, (pulse.Schedule, pulse.ScheduleBlock)):
self._schedule_loader(program)
elif isinstance(program, (pulse.Waveform, pulse.ParametricPulse)):
self._waveform_loader(program)
else:
raise VisualizationError('Data type %s is not supported.' % type(program))
# update time range
self.set_time_range(0, program.duration, seconds=False)
# set title
self.fig_title = self.layout['figure_title'](program=program, device=self.device)
def _waveform_loader(self, program: Union[pulse.Waveform, pulse.ParametricPulse]):
"""Load Waveform instance.
This function is sub-routine of py:method:`load_program`.
Args:
program: `Waveform` to draw.
"""
chart = Chart(parent=self)
# add waveform data
fake_inst = pulse.Play(program, types.WaveformChannel())
inst_data = types.PulseInstruction(t0=0,
dt=self.device.dt,
frame=types.PhaseFreqTuple(phase=0, freq=0),
inst=fake_inst,
is_opaque=program.is_parameterized())
for gen in self.generator['waveform']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
chart.add_data(data)
self.charts.append(chart)
def _schedule_loader(self, program: Union[pulse.Schedule, pulse.ScheduleBlock]):
"""Load Schedule instance.
This function is sub-routine of py:method:`load_program`.
Args:
program: `Schedule` to draw.
"""
program = target_qobj_transform(program, remove_directives=False)
# initialize scale values
self.chan_scales = {}
for chan in program.channels:
if isinstance(chan, pulse.channels.DriveChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.drive']
elif isinstance(chan, pulse.channels.MeasureChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.measure']
elif isinstance(chan, pulse.channels.ControlChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.control']
elif isinstance(chan, pulse.channels.AcquireChannel):
self.chan_scales[chan] = self.formatter['channel_scaling.acquire']
else:
self.chan_scales[chan] = 1.0
# create charts
mapper = self.layout['chart_channel_map']
for name, chans in mapper(channels=program.channels,
formatter=self.formatter,
device=self.device):
chart = Chart(parent=self, name=name)
# add standard pulse instructions
for chan in chans:
chart.load_program(program=program, chan=chan)
# add barriers
barrier_sched = program.filter(instruction_types=[pulse.instructions.RelativeBarrier],
channels=chans)
for t0, _ in barrier_sched.instructions:
inst_data = types.BarrierInstruction(t0, self.device.dt, chans)
for gen in self.generator['barrier']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
chart.add_data(data)
# add chart axis
chart_axis = types.ChartAxis(name=chart.name, channels=chart.channels)
for gen in self.generator['chart']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(chart_axis):
chart.add_data(data)
self.charts.append(chart)
# add snapshot data to global
snapshot_sched = program.filter(instruction_types=[pulse.instructions.Snapshot])
for t0, inst in snapshot_sched.instructions:
inst_data = types.SnapshotInstruction(t0, self.device.dt, inst.label, inst.channels)
for gen in self.generator['snapshot']:
obj_generator = partial(gen,
formatter=self.formatter,
device=self.device)
for data in obj_generator(inst_data):
self.global_charts.add_data(data)
# calculate axis break
self.time_breaks = self._calculate_axis_break(program)
def _calculate_axis_break(self, program: pulse.Schedule) -> List[Tuple[int, int]]:
"""A helper function to calculate axis break of long pulse sequence.
Args:
program: A schedule to calculate axis break.
Returns:
List of axis break periods.
"""
axis_breaks = []
edges = set()
for t0, t1 in chain.from_iterable(program.timeslots.values()):
if t1 - t0 > 0:
edges.add(t0)
edges.add(t1)
edges = sorted(edges)
for t0, t1 in zip(edges[:-1], edges[1:]):
if t1 - t0 > self.formatter['axis_break.length']:
t_l = t0 + 0.5 * self.formatter['axis_break.max_length']
t_r = t1 - 0.5 * self.formatter['axis_break.max_length']
axis_breaks.append((t_l, t_r))
return axis_breaks
def set_time_range(self,
t_start: Union[int, float],
t_end: Union[int, float],
seconds: bool = True):
"""Set time range to draw.
All child chart instances are updated when time range is updated.
Args:
t_start: Left boundary of drawing in units of cycle time or real time.
t_end: Right boundary of drawing in units of cycle time or real time.
seconds: Set `True` if times are given in SI unit rather than dt.
Raises:
VisualizationError: When times are given in float without specifying dt.
"""
# convert into nearest cycle time
if seconds:
if self.device.dt is not None:
t_start = int(np.round(t_start / self.device.dt))
t_end = int(np.round(t_end / self.device.dt))
else:
raise VisualizationError('Setting time range with SI units requires '
'backend `dt` information.')
self.time_range = (t_start, t_end)
def set_disable_channel(self,
channel: pulse.channels.Channel,
remove: bool = True):
"""Interface method to control visibility of pulse channels.
Specified object in the blocked list will not be shown.
Args:
channel: A pulse channel object to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if remove:
self.disable_chans.add(channel)
else:
self.disable_chans.discard(channel)
def set_disable_type(self,
data_type: types.DataTypes,
remove: bool = True):
"""Interface method to control visibility of data types.
Specified object in the blocked list will not be shown.
Args:
data_type: A drawing data type to disable.
remove: Set `True` to disable, set `False` to enable.
"""
if isinstance(data_type, Enum):
data_type_str = str(data_type.value)
else:
data_type_str = data_type
if remove:
self.disable_types.add(data_type_str)
else:
self.disable_types.discard(data_type_str)
def update(self):
"""Update all associated charts and generate actual drawing data from template object.
This method should be called before the canvas is passed to the plotter.
"""
for chart in self.charts:
chart.update()
class Chart:
"""A collection of drawing to be shown on the same line.
Multiple pulse channels can be assigned to a single `Chart`.
The parent `DrawerCanvas` should be specified to refer to the current user preference.
The vertical value of each `Chart` should be in the range [-1, 1].
This truncation should be performed in the plotter interface.
"""
# unique index of chart
chart_index = 0
# list of waveform type names
waveform_types = [str(types.WaveformType.REAL.value),
str(types.WaveformType.IMAG.value),
str(types.WaveformType.OPAQUE.value)]
def __init__(self, parent: DrawerCanvas, name: Optional[str] = None):
"""Create new chart.
Args:
parent: `DrawerCanvas` that this `Chart` instance belongs to.
name: Name of this `Chart` instance.
"""
self.parent = parent
# data stored in this channel
self._collections = dict()
self._output_dataset = dict()
# channel metadata
self.index = self._cls_index()
self.name = name or ''
self._channels = set()
# vertical axis information
self.vmax = 0
self.vmin = 0
self.scale = 1.0
self._increment_cls_index()
def add_data(self, data: drawings.ElementaryData):
"""Add drawing to collections.
If the given object already exists in the collections,
this interface replaces the old object instead of adding new entry.
Args:
data: New drawing to add.
"""
self._collections[data.data_key] = data
def load_program(self,
program: pulse.Schedule,
chan: pulse.channels.Channel):
"""Load pulse schedule.
This method internally generates `ChannelEvents` to parse the program
for the specified pulse channel. This method is called once
Args:
program: Pulse schedule to load.
chan: A pulse channels associated with this instance.
"""
chan_events = events.ChannelEvents.load_program(program, chan)
chan_events.set_config(dt=self.parent.device.dt,
init_frequency=self.parent.device.get_channel_frequency(chan),
init_phase=0)
# create objects associated with waveform
for gen in self.parent.generator['waveform']:
waveforms = chan_events.get_waveforms()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(waveform) for waveform in waveforms]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
# create objects associated with frame change
for gen in self.parent.generator['frame']:
frames = chan_events.get_frame_changes()
obj_generator = partial(gen,
formatter=self.parent.formatter,
device=self.parent.device)
drawing_items = [obj_generator(frame) for frame in frames]
for drawing_item in list(chain.from_iterable(drawing_items)):
self.add_data(drawing_item)
self._channels.add(chan)
def update(self):
"""Update vertical data range and scaling factor of this chart.
Those parameters are updated based on current time range in the parent canvas.
"""
self._output_dataset.clear()
self.vmax = 0
self.vmin = 0
# waveform
for key, data in self._collections.items():
if data.data_type not in Chart.waveform_types:
continue
# truncate, assume no abstract coordinate in waveform sample
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# update y range
scale = min(self.parent.chan_scales.get(chan, 1.0) for chan in data.channels)
self.vmax = max(scale * np.max(trunc_y), self.vmax)
self.vmin = min(scale * np.min(trunc_y), self.vmin)
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
# calculate chart level scaling factor
if self.parent.formatter['control.auto_chart_scaling']:
max_val = max(abs(self.vmax),
abs(self.vmin),
self.parent.formatter['general.vertical_resolution'])
self.scale = min(1.0 / max_val, self.parent.formatter['general.max_scale'])
else:
self.scale = 1.0
# update vertical range with scaling and limitation
self.vmax = max(self.scale * self.vmax,
self.parent.formatter['channel_scaling.pos_spacing'])
self.vmin = min(self.scale * self.vmin,
self.parent.formatter['channel_scaling.neg_spacing'])
# other data
for key, data in self._collections.items():
if data.data_type in Chart.waveform_types:
continue
# truncate
trunc_x, trunc_y = self._truncate_data(data)
# no available data points
if trunc_x.size == 0 or trunc_y.size == 0:
continue
# generate new data
new_data = deepcopy(data)
new_data.xvals = trunc_x
new_data.yvals = trunc_y
self._output_dataset[key] = new_data
@property
def is_active(self) -> bool:
"""Check if there is any active waveform data in this entry.
Returns:
Return `True` if there is any visible waveform in this chart.
"""
for data in self._output_dataset.values():
if data.data_type in Chart.waveform_types and self._check_visible(data):
return True
return False
@property
def collections(self) -> Iterator[Tuple[str, drawings.ElementaryData]]:
"""Return currently active entries from drawing data collection.
The object is returned with unique name as a key of an object handler.
When the horizontal coordinate contains `AbstractCoordinate`,
the value is substituted by current time range preference.
"""
for name, data in self._output_dataset.items():
# prepare unique name
unique_id = 'chart{ind:d}_{key}'.format(ind=self.index, key=name)
if self._check_visible(data):
yield unique_id, data
@property
def channels(self) -> List[pulse.channels.Channel]:
"""Return a list of channels associated with this chart.
Returns:
List of channels associated with this chart.
"""
return list(self._channels)
def _truncate_data(self,
data: drawings.ElementaryData) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to truncate drawings according to time breaks.
# TODO: move this function to common module to support axis break for timeline.
Args:
data: Drawing object to truncate.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xvals = self._bind_coordinate(data.xvals)
yvals = self._bind_coordinate(data.yvals)
if isinstance(data, drawings.BoxData):
# truncate box data. these object don't require interpolation at axis break.
return self._truncate_boxes(xvals, yvals)
elif data.data_type in [types.LabelType.PULSE_NAME, types.LabelType.OPAQUE_BOXTEXT]:
# truncate pulse labels. these objects are not removed by truncation.
return self._truncate_pulse_labels(xvals, yvals)
else:
# other objects
return self._truncate_vectors(xvals, yvals)
def _truncate_pulse_labels(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to remove text according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xpos = xvals[0]
t0, t1 = self.parent.time_range
if xpos < t0 or xpos > t1:
return np.array([]), np.array([])
offset_accumulation = 0
for tl, tr in self.parent.time_breaks:
if xpos < tl:
return np.array([xpos - offset_accumulation]), yvals
if tl < xpos < tr:
return np.array([tl - offset_accumulation]), yvals
else:
offset_accumulation += tr - tl
return np.array([xpos - offset_accumulation]), yvals
def _truncate_boxes(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to clip box object according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
x0, x1 = xvals
t0, t1 = self.parent.time_range
if x1 < t0 or x0 > t1:
# out of drawing range
return np.array([]), np.array([])
# clip outside
x0 = max(t0, x0)
x1 = min(t1, x1)
offset_accumulate = 0
for tl, tr in self.parent.time_breaks:
tl -= offset_accumulate
tr -= offset_accumulate
#
# truncate, there are 5 patterns wrt the relative position of truncation and xvals
#
if x1 < tl:
break
if tl < x0 and tr > x1:
# case 1: all data points are truncated
# : +-----+ :
# : |/////| :
# -----:---+-----+---:-----
# l 0 1 r
return np.array([]), np.array([])
elif tl < x1 < tr:
# case 2: t < tl, right side is truncated
# +---:-----+ :
# | ://///| :
# -----+---:-----+---:-----
# 0 l 1 r
x1 = tl
elif tl < x0 < tr:
# case 3: tr > t, left side is truncated
# : +-----:---+
# : |/////: |
# -----:---+-----:---+-----
# l 0 r 1
x0 = tl
x1 = tl + t1 - tr
elif tl > x0 and tr < x1:
# case 4: tr > t > tl, middle part is truncated
# +---:-----:---+
# | ://///: |
# -----+---:-----:---+-----
# 0 l r 1
x1 -= tr - tl
elif tr < x0:
# case 5: tr > t > tl, nothing truncated but need time shift
# : : +---+
# : : | |
# -----:---:-----+---+-----
# l r 0 1
x0 -= tr - tl
x1 -= tr - tl
offset_accumulate += tr - tl
return np.asarray([x0, x1], dtype=float), yvals
def _truncate_vectors(self,
xvals: np.ndarray,
yvals: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""A helper function to remove sequential data points according to time breaks.
Args:
xvals: Time points.
yvals: Data points.
Returns:
Set of truncated numpy arrays for x and y coordinate.
"""
xvals = np.asarray(xvals, dtype=float)
yvals = np.asarray(yvals, dtype=float)
t0, t1 = self.parent.time_range
if max(xvals) < t0 or min(xvals) > t1:
# out of drawing range
return np.array([]), np.array([])
if min(xvals) < t0:
# truncate x less than left limit
inds = xvals > t0
yvals = np.append(np.interp(t0, xvals, yvals), yvals[inds])
xvals = np.append(t0, xvals[inds])
if max(xvals) > t1:
# truncate x larger than right limit
inds = xvals < t1
yvals = np.append(yvals[inds], | np.interp(t1, xvals, yvals) | numpy.interp |
import os
import sys
import re
import argparse
import json
import tensorflow as tf
import numpy as np
import csv
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import sklearn.metrics
import torch
import logging
from mlio.pipeline import Pipeline, Dataset
from pytorch_lightning.callbacks import Callback
import torchvision
def get_model_args(sample, model_args):
if callable(model_args):
model_input = model_args(sample)
if isinstance(model_args, str):
model_input = sample[model_args]
if isinstance(model_args, (list, set)):
model_input = [get_model_args(sample, x) for x in model_args]
return model_input
class FilterClusterManager(Callback):
def __init__(
self,
path,
cache_writer,
filter_dataloader,
min_elements=5,
silhouette_score=0.1,
element_cache=200,
min_k=2,
max_k=20,
version=0,
device="cpu",
index="id",
step=None,
epoch=None,
first_epoch=True,
model_input="image",
model_output="feature",
concept_list="concept_ids",
resnet_imagenet=True,
):
self.feature_cache = {}
if not os.path.exists(path):
os.makedirs(path)
self.path = os.path.join(path, f"{version}.json")
self.meta_path = os.path.join(path, f"{version}_meta.json")
self.element_cache = element_cache
self.min_elements = min_elements
self.silhouette_score = silhouette_score
self.filter_dataloader = filter_dataloader
self.device = device
self.index = index
self.concept_list = concept_list
self.cache_writer = cache_writer
self._iter = 0
self.min_k = min_k
self.max_k = max_k
self.step = step
self.epoch = epoch
self.first_epoch = first_epoch
self.model_input = model_input
self.model_output = model_output
self.resnet_imagenet = resnet_imagenet
if resnet_imagenet:
logging.info('Filter use only a pretrained model')
resnet_model = torchvision.models.resnet.resnet50(pretrained=True)
self.imagenet_features = torch.nn.Sequential(*list(resnet_model.children())[:-1]).to(torch.device("cuda:0"))
self._epoch_counter = 0 if first_epoch else epoch # We will start before the first sample arrives
self._step_counter = step # We will start before the first sample arrives
if step is None and epoch is None:
self.epoch = 1
if step is not None and epoch is not None:
print("I should learn assert")
exit()
def on_epoch_start(self, trainer, pl_module):
if self.first_epoch and self._epoch_counter == 0:
self._epoch_counter -= 1
return self.clustering(trainer, pl_module)
if self.epoch is None:
return
self._epoch_counter -= 1
if self._epoch_counter > 0:
return
self._epoch_counter = self.epoch
result = self.clustering(trainer, pl_module)
return result
def on_batch_start(self, trainer, pl_module):
if self.step is None:
return
self._step_counter -= 1
if self._step_counter > 0:
return
self._step_counter = self.step
result = self.clustering(trainer, pl_module)
return result
def clustering(self, trainer, pl_module):
rank = 0
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
if rank == 0:
logging.info(f"Cluster: start filtering")
self._iter += 1
result = {
"filter/reject_count": 0,
"filter/accept_count": 0,
"filter/iter": self._iter,
"filter/k_hist": | np.zeros(shape=self.max_k) | numpy.zeros |
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import os
# customized
from scipy.optimize import minimize
class SolarlikeFourierAnalysis:
"""docstring for SolarlikeFourierAnalysis"""
def __init__(self, starname, outputdir, fnyq, numax):
sep = "\\" if os.name=="nt" else "/"
self.starname = starname
self.outputdir = outputdir # "with a / in the end"
assert outputdir.endswith(sep), "outputdir should end with "+sep
self.fnyq = fnyq # in microHz (muHz)
self.numax = numax # in microHz (muHz)
self.dnu = (self.numax/3050)**0.77 * 135.1 # Stello+2009
return
# def pass_light_curves
def pass_power_spectrum(self, freq, power, trimUpperLimitInDnu=None,
trimLowerLimitInDnu=None, ifGlobalMode=True):
idx = np.array(np.zeros(len(freq))+1, dtype=bool)
freq = np.array(freq)
power = np.array(power)
if not trimUpperLimitInDnu is None:
idx = (idx) & (freq<=self.numax+trimUpperLimitInDnu*self.dnu)
if not trimLowerLimitInDnu is None:
idx = (idx) & (freq>=self.numax-trimLowerLimitInDnu*self.dnu)
if ifGlobalMode:
self.freq = freq[idx]
self.power = power[idx]
return
else:
return freq[idx], power[idx]
def __smooth_power(self, period=None):
if period is None: period = self._dnu0/15.0 # microHz
self.powers = self._smooth_wrapper(self.freq, self.power, period, "bartlett")
return
def _smooth_wrapper(self, x, y, period, windowtype, samplinginterval=None):
if samplinginterval is None: samplinginterval = np.median(x[1:-1] - x[0:-2])
if not windowtype in ["flat", "hanning", "hamming", "bartlett", "blackman"]:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
xp = np.arange(np.min(x), np.max(x), samplinginterval)
yp = np.interp(xp, x, y)
window_len = int(period/samplinginterval)
if window_len % 2 == 0:
window_len = window_len + 1
if windowtype == "flat":
w = np.ones(window_len,"d")
else:
w = eval("np."+windowtype+"(window_len)")
ys = np.convolve(w/w.sum(),yp,mode="same")
yf = np.interp(x, xp, ys)
return yf
def __background_model(self, bgpara, bgtype="withgaussian", granNumber=1):
flatNoiseLevel = bgpara[0]
height = bgpara[1]
numax = bgpara[2]
sigma = bgpara[3]
ampHarvey, freqHarvey, powerHarvery = [], [], []
for igran in range(granNumber):
ampHarvey.append(bgpara[4+igran*2])
freqHarvey.append(bgpara[4+igran*2+1])
powerHarvery.append(bgpara[4+igran*2+2])
zeta = 2.0*2.0**0.5/np.pi
power_gran = np.zeros(len(self.freq))
for igran in range(granNumber):
power_gran += zeta*ampHarvey[igran]**2.0/(freqHarvey[igran]*(1+(self.freq/freqHarvey[igran])**powerHarvery[igran]))
power_gaussian = height * np.exp(-1.0*(numax-self.freq)**2/(2.0*sigma**2.0))
if bgtype == "withgaussian":
power = power_gran + power_gaussian
elif bgtype == "withoutgaussian":
power = power_gran
power *= self.__response_function()
power += flatNoiseLevel
return power
def __response_function(self):
sincfunctionarg = (np.pi/2.0)*self.freq/self.fnyq
responsefunction = (np.sin(sincfunctionarg)/sincfunctionarg)**2.0
return responsefunction
def __guess_background_parameters(self, granNumber=1):
zeta = 2*2**0.5/np.pi
flatNoiseLevel = np.median(self.powers[int(len(self.freq)*0.9):])
height = se.closest_point(self.freq, self.powers, self.numax)
sigma = 3.0 * self.dnu
freqHarvey_solar = [2440.5672465, 735.4653975, 24.298031575000003]
numax_solar = 3050
ampHarvey, freqHarvey= [], []
for igran in range(granNumber):
freqHarvey.append(self.numax/numax_solar*freqHarvey_solar[igran])
ampHarvey.append((se.closest_point(self.freq, self.powers, freqHarvey[igran])*2/zeta*freqHarvey[igran])**0.5)
init = [flatNoiseLevel, height, self.numax, sigma]
bounds = [[flatNoiseLevel*0.9, flatNoiseLevel*1.1],
[height*0.2, height*5.0],
[self.numax*0.8, self.numax*1.2],
[sigma*0.5, sigma*4.0]]
names = ["W", "H", "numax", "sigma"]
for igran in range(granNumber):
init.append(ampHarvey[igran])
init.append(freqHarvey[igran])
init.append(4.0)
bounds.append([ampHarvey[igran]*0.3, ampHarvey[igran]*3.0])
bounds.append([freqHarvey[igran]*0.2, freqHarvey[igran]*5.0])
bounds.append([2.0, 8.0])
names.append("a"+str(igran))
names.append("b"+str(igran))
names.append("c"+str(igran))
return init, bounds, names
def fit_background(self, granNumber=1, ifdisplay=True):
assert granNumber in [1,2,3], "granNumber should be one 1, 2 or 3."
assert ("freq" in self.__dict__) & ("power" in self.__dict__), "Power spectrum must be passed in before any fitting."
def residuals_bg(bgpara):
model = self.__background_model(bgpara, bgtype="withgaussian", granNumber=granNumber)
return np.sum(np.log(model) + self.power/model)
self.__smooth_power()
init, bounds, names = self.__guess_background_parameters(granNumber=granNumber)
res = minimize(residuals_bg, init, bounds=bounds)
bgpara = res.x
# save backbround parameters
print("Background parameters "+", ".join(names))
print(bgpara)
np.savetxt(self.outputdir+"bgpara.txt", bgpara,
header=", ".join(names))
power_bg = self.__background_model(bgpara, bgtype="withoutgaussian", granNumber=granNumber)
power_bg_wg = self.__background_model(bgpara, bgtype="withgaussian", granNumber=granNumber)
# divide the background and save power spectrum
self.snr = self.power/power_bg
SNRData = | np.array([self.freq, self.snr]) | numpy.array |
# -*- coding: utf-8 -*-
"""
This code allows us to run classical clustering approaches namely Kmeans, Spherical Kmeans and Auto-encoder
"""
import numpy as np
import pandas as pd
from sklearn.utils import check_random_state
from coclust import clustering
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from coclust.evaluation.external import accuracy
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn import metrics
import time
import random
from keras.models import Model
from keras.layers import Dense, Input
def random_init(n_clusters, n_cols, random_state=None):
"""Create a random column cluster assignment matrix.
Each row contains 1 in the column corresponding to the cluster where the
processed data matrix column belongs, 0 elsewhere.
Parameters
----------
n_clusters: int
Number of clusters
n_cols: int
Number of columns of the data matrix (i.e. number of rows of the
matrix returned by this function)
random_state : int or :class:`numpy.RandomState`, optional
The generator used to initialize the cluster labels. Defaults to the
global numpy random number generator.
Returns
-------
matrix
Matrix of shape (``n_cols``, ``n_clusters``)
"""
if random_state == None:
W_a = np.random.randint(n_clusters, size=n_cols)
else:
random_state = check_random_state(random_state)
W_a = random_state.randint(n_clusters, size=n_cols)
W = np.zeros((n_cols, n_clusters))
W[np.arange(n_cols), W_a] = 1
return W
def purity_score(y_true, y_pred):
# compute contingency matrix (also called confusion matrix)
contingency_matrix = metrics.cluster.contingency_matrix(y_true, y_pred)
# return purity
return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)
global_path = './data/'
path_to_save = './results/resultViewClustering/'
nom_BDD = ['DBLP1','DBLP2','PubMed_Diabets' ,'classic3', 'classic4', 'ag_news']
nCLusters = [3,3,3,3, 4, 4]
nbrIteration = 30
nbSlices = 8
nbAlgorithms = 3
df_results = pd.DataFrame(columns=["Dataset", "Time", "ACC", "NMI", "ARI", "Purity",'Algorithm','View'],
index=np.arange(nbrIteration * ((len(nom_BDD) * nbSlices*nbAlgorithms ))).tolist())
cpt = 0
for nb in range(len(nom_BDD)):
print('###############################################')
print('nom_BDD ', nom_BDD[nb])
bdd_name = nom_BDD[nb]
inf_doc = pd.read_csv(global_path+bdd_name+'/' + bdd_name+'.csv', delimiter=',')
abstracts = np.asarray(inf_doc['text']).astype(str).tolist()
# print(inf_doc)
labels_all = np.asarray(inf_doc['label']).astype(int)
n_new = inf_doc.shape[0]
d_new = inf_doc.shape[1]
labels = labels_all[0:n_new]
labels = labels.tolist()
##################################################################
# hyperparameters #
##################################################################
K = nCLusters[nb]
print('K ', K)
del inf_doc
##################################################################
# Load DBLP1 dataset #
##################################################################
simBow = np.load(global_path +bdd_name+'/' + 'view_bow' + '.npz')
simBow = simBow['arr_0']
print('simBow ', simBow.shape)
simBert = np.load(global_path +bdd_name+'/' + 'view_bert-base-cased' + '.npz')
simBert = simBert['arr_0']
simBertLPCA = | np.load(global_path +bdd_name+'/' + 'view_avgpca__bert-large-cased' + '.npz') | numpy.load |
import itertools
import math
import numpy as np
import tinychain as tc
import unittest
from testutils import DEFAULT_PORT, start_host, PersistenceTest
ENDPOINT = "/transact/hypothetical"
class DenseTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.host = start_host("test_dense_tensor")
def testConstant(self):
c = 1.414
shape = [3, 2, 1]
cxt = tc.Context()
cxt.tensor = tc.tensor.Dense.constant(shape, c)
cxt.result = tc.After(cxt.tensor[0, 0, 0].write(0), cxt.tensor)
expected = expect_dense(tc.F64, shape, [0] + [c] * (np.product(shape) - 1))
actual = self.host.post(ENDPOINT, cxt)
self.assertEqual(expected, actual)
def testSlice(self):
shape = [2, 5]
cxt = tc.Context()
cxt.tensor = tc.tensor.Dense.arange(shape, 1, 11)
cxt.result = cxt.tensor[1, 2:-1]
actual = self.host.post(ENDPOINT, cxt)
expected = expect_dense(tc.I64, [2], np.arange(1, 11).reshape([2, 5])[1, 2:-1])
self.assertEqual(actual, expected)
def testAssignSlice(self):
cxt = tc.Context()
cxt.big = tc.tensor.Dense.zeros([2, 2, 5])
cxt.small = tc.tensor.Dense.arange([2, 5], 1, 11)
cxt.result = tc.After(cxt.big[1, :2].write(cxt.small[0]), cxt.big)
actual = self.host.post(ENDPOINT, cxt)
expected = np.zeros([2, 2, 5], np.int64)
expected[1, 0:2] = np.arange(1, 11).reshape(2, 5)[0]
expected = expect_dense(tc.I64, [2, 2, 5], expected.flatten())
self.assertEqual(actual, expected)
def testAdd(self):
cxt = tc.Context()
cxt.left = tc.tensor.Dense.arange([5, 2, 2], 1., 21.)
cxt.right = tc.tensor.Dense.constant([2, 5, 1, 2], 2)
cxt.result = cxt.left + cxt.right
actual = self.host.post(ENDPOINT, cxt)
left = np.arange(1., 21., 1.).reshape([5, 2, 2])
right = np.ones([2, 5, 1, 2], np.int32) * 2
expected = expect_dense(tc.F64, [2, 5, 2, 2], (left + right).flatten())
self.assertEqual(actual, expected)
def testDiv(self):
shape = [3]
cxt = tc.Context()
cxt.left = tc.tensor.Dense.arange(shape, 2., 8.)
cxt.right = tc.tensor.Dense.constant([1], 2)
cxt.result = cxt.left / cxt.right
actual = self.host.post(ENDPOINT, cxt)
expected = expect_dense(tc.F64, shape, np.arange(1, 4))
self.assertEqual(actual, expected)
def testMul(self):
shape = [5, 2, 1]
cxt = tc.Context()
cxt.left = tc.tensor.Dense.arange(shape, 1, 11)
cxt.right = tc.tensor.Dense.constant([5], 2)
cxt.result = cxt.left * cxt.right
actual = self.host.post(ENDPOINT, cxt)
left = np.arange(1, 11).reshape(shape)
right = np.ones([5]) * 2
expected = left * right
expected = expect_dense(tc.I64, list(expected.shape), expected.flatten())
self.assertEqual(actual, expected)
def testMulWithBroadcast(self):
tau = np.array([[4.188]])
v = np.array([[1], [0.618]])
cxt = tc.Context()
cxt.tau = load_dense(tau)
cxt.v = load_dense(v)
cxt.result = cxt.tau * tc.tensor.einsum("ij,kj->ik", [cxt.v, cxt.v])
actual = self.host.post(ENDPOINT, cxt)
expected = tau * (v @ v.T)
self.assertEqual(expected.shape, tuple(actual[tc.uri(tc.tensor.Dense)][0][0]))
self.assertTrue(np.allclose(expected.flatten(), actual[tc.uri(tc.tensor.Dense)][1]))
def testSub(self):
shape = [1, 3]
cxt = tc.Context()
cxt.left = tc.tensor.Dense.arange(shape, 0, 6)
cxt.right = tc.tensor.Dense.constant([1], 2)
cxt.result = cxt.left - cxt.right
actual = self.host.post(ENDPOINT, cxt)
expected = expect_dense(tc.I64, shape, np.arange(-2, 4, 2))
self.assertEqual(actual, expected)
def testLogarithm(self):
size = 1_000_000
shape = [10, size / 10]
cxt = tc.Context()
cxt.x = tc.tensor.Dense.arange(shape, 2, size + 2)
cxt.ln = cxt.x.log()
cxt.log = cxt.x.log(math.e)
cxt.test = (cxt.ln == cxt.log).all()
self.assertTrue(self.host.post(ENDPOINT, cxt))
def testLogic(self):
big = [20, 20, 10]
trailing = [10]
cxt = tc.Context()
cxt.big_ones = tc.tensor.Dense.ones(big, tc.U8)
cxt.big_zeros = tc.tensor.Dense.zeros(big, tc.U8)
cxt.true = tc.tensor.Dense.ones(trailing)
cxt.false = tc.tensor.Dense.zeros(trailing)
cxt.result = [
cxt.big_ones.logical_and(cxt.false).any(),
cxt.big_ones.logical_and(cxt.true).all(),
cxt.big_zeros.logical_or(cxt.true).all(),
cxt.big_zeros.logical_or(cxt.false).any(),
cxt.big_ones.logical_xor(cxt.big_zeros).all(),
]
actual = self.host.post(ENDPOINT, cxt)
self.assertEqual(actual, [False, True, True, False, True])
def testProduct(self):
shape = [2, 3, 4]
axis = 1
cxt = tc.Context()
cxt.big = tc.tensor.Dense.arange(shape, 0, 24)
cxt.result = cxt.big.product(axis)
actual = self.host.post(ENDPOINT, cxt)
expected = np.product(np.arange(0, 24).reshape(shape), axis)
self.assertEqual(actual, expect_dense(tc.I64, [2, 4], expected.flatten()))
def testProductAll(self):
shape = [2, 3]
cxt = tc.Context()
cxt.big = tc.tensor.Dense.arange(shape, 1, 7)
cxt.result = cxt.big.product()
actual = self.host.post(ENDPOINT, cxt)
self.assertEqual(actual, np.product(range(1, 7)))
def testSum(self):
shape = [4, 2, 3, 5]
axis = 2
cxt = tc.Context()
cxt.big = tc.tensor.Dense.arange(shape, 0., 120.)
cxt.result = cxt.big.sum(axis)
actual = self.host.post(ENDPOINT, cxt)
expected = np.sum(np.arange(0, 120).reshape(shape), axis)
self.assertEqual(actual, expect_dense(tc.F64, [4, 2, 5], expected.flatten()))
def testSumAll(self):
shape = [5, 2]
cxt = tc.Context()
cxt.big = tc.tensor.Dense.arange(shape, 0, 10)
cxt.result = cxt.big.sum()
actual = self.host.post(ENDPOINT, cxt)
self.assertEqual(actual, sum(range(10)))
def testSliceAndTransposeAndSliceAndSlice(self):
self.maxDiff = None
shape = [2, 3, 4, 5]
cxt = tc.Context()
cxt.big = tc.tensor.Dense.arange(shape, 0, 120)
cxt.medium = cxt.big[0]
cxt.small = cxt.medium.transpose()[1, 1:3]
cxt.tiny = cxt.small[0, :-1]
expected = np.arange(0, 120).reshape(shape)
expected = expected[0]
expected = np.transpose(expected)[1, 1:3]
expected = expected[0, :-1]
actual = self.host.post(ENDPOINT, cxt)
self.assertEqual(actual, expect_dense(tc.I64, expected.shape, expected.flatten()))
def testFlip(self):
shape = [5, 4, 3]
cxt = tc.Context()
cxt.x = tc.tensor.Dense.arange(shape, 0, 60)
cxt.result = cxt.x.flip(0)
expected = np.arange(0, 60).reshape(shape)
expected = np.flip(expected, 0)
actual = self.host.post(ENDPOINT, cxt)
self.assertEqual(actual, expect_dense(tc.I64, expected.shape, expected.flatten()))
def testReshape(self):
source = [2, 3, 4, 1]
dest = [3, 8]
cxt = tc.Context()
cxt.x = tc.tensor.Dense.arange(source, 0, 24)
cxt.result = cxt.x.reshape(dest)
actual = self.host.post(ENDPOINT, cxt)
self.assertEqual(actual, expect_dense(tc.I64, dest, np.arange(24).tolist()))
def testTile(self):
shape = [2, 3]
multiples = 2
x = np.arange(0, np.product(shape)).reshape(shape)
cxt = tc.Context()
cxt.x = load_dense(x, tc.I32)
cxt.result = tc.tensor.tile(cxt.x, 2)
actual = self.host.post(ENDPOINT, cxt)
expected = np.tile(x, multiples)
self.assertEqual(actual, expect_dense(tc.I32, list(expected.shape), expected.flatten().tolist()))
def testArgmax(self):
shape = [2, 3, 4]
x = np.arange(0, np.product(shape)).reshape(shape)
cxt = tc.Context()
cxt.x = load_dense(x)
cxt.am = cxt.x.argmax()
cxt.am0 = cxt.x.argmax(0)
cxt.am1 = cxt.x.argmax(1)
cxt.result = cxt.am, cxt.am0, cxt.am1
actual_am, actual_am0, actual_am1 = self.host.post(ENDPOINT, cxt)
self.assertEqual(actual_am, np.argmax(x))
self.assertEqual(actual_am0, expect_dense(tc.U64, [3, 4], np.argmax(x, 0).flatten().tolist()))
self.assertEqual(actual_am1, expect_dense(tc.U64, [2, 4], np.argmax(x, 1).flatten().tolist()))
@classmethod
def tearDownClass(cls):
cls.host.stop()
class SparseTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.host = start_host("test_sparse_tensor")
def testCreate(self):
shape = [2, 5]
coord = [0, 0]
value = 1
cxt = tc.Context()
cxt.tensor = tc.tensor.Sparse.zeros(shape, tc.I32)
cxt.result = tc.After(cxt.tensor[coord].write(value), cxt.tensor)
actual = self.host.post(ENDPOINT, cxt)
expected = expect_sparse(tc.I32, shape, [[coord, value]])
self.assertEqual(actual, expected)
def testWriteAndSlice(self):
shape = [2, 5]
cxt = tc.Context()
cxt.tensor = tc.tensor.Sparse.zeros(shape)
cxt.result = tc.After(cxt.tensor[:, 2:-1].write(1), cxt.tensor)
actual = self.host.post(ENDPOINT, cxt)
expected = expect_sparse(tc.F32, shape, [[[0, 2], 1], [[0, 3], 1], [[1, 2], 1], [[1, 3], 1]])
self.assertEqual(actual, expected)
def testAdd(self):
shape = [5, 2, 3]
cxt = tc.Context()
cxt.big = tc.tensor.Sparse.zeros(shape)
cxt.small = tc.tensor.Sparse.zeros([3])
cxt.result = tc.After([
cxt.big[1].write(1),
cxt.small[1].write(2),
], cxt.big + cxt.small)
actual = self.host.post(ENDPOINT, cxt)
big = np.zeros(shape)
big[1] = 1
small = np.zeros([3])
small[1] = 2
expected = big + small
expected = expect_sparse(tc.F32, shape, expected)
self.assertEqual(actual, expected)
def testDiv(self):
shape = [3, 2, 4]
cxt = tc.Context()
cxt.big = tc.tensor.Sparse.zeros(shape)
cxt.small = tc.tensor.Sparse.zeros([1, 1])
cxt.result = tc.After([
cxt.big[:2].write(1),
cxt.small[0].write(-2),
], cxt.big / cxt.small)
actual = self.host.post(ENDPOINT, cxt)
big = np.zeros(shape)
big[:2] = 1.
small = np.zeros([1, 1])
small[0] = -2.
expected = big / small
expected = expect_sparse(tc.F32, shape, expected)
self.assertEqual(actual, expected)
def testMul(self):
shape = [3, 5, 2]
cxt = tc.Context()
cxt.big = tc.tensor.Sparse.zeros(shape)
cxt.small = tc.tensor.Sparse.zeros([5, 2])
cxt.result = tc.After([
cxt.big[:, 1:-2].write(2),
cxt.small[1].write(3),
], cxt.big * cxt.small)
actual = self.host.post(ENDPOINT, cxt)
big = np.zeros(shape)
big[:, 1:-2] = 2
small = np.zeros([5, 2])
small[1] = 3
expected = big * small
expected = expect_sparse(tc.F32, shape, expected)
self.assertEqual(actual, expected)
def testSub(self):
shape = [3, 5, 2]
cxt = tc.Context()
cxt.big = tc.tensor.Sparse.zeros(shape, tc.I16)
cxt.small = tc.tensor.Sparse.zeros([5, 2], tc.U32)
cxt.result = tc.After([
cxt.big[:, 1:-2].write(2),
cxt.small[1].write(3),
], cxt.small - cxt.big)
actual = self.host.post(ENDPOINT, cxt)
big = np.zeros(shape)
big[:, 1:-2] = 2
small = np.zeros([5, 2])
small[1] = 3
expected = small - big
expected = expect_sparse(tc.I32, shape, expected)
self.assertEqual(actual, expected)
def testSum(self):
shape = [2, 4, 3, 5]
axis = 1
cxt = tc.Context()
cxt.big = tc.tensor.Sparse.zeros(shape, tc.I32)
cxt.result = tc.After(cxt.big[0, 1:3].write(2), cxt.big.sum(axis))
actual = self.host.post(ENDPOINT, cxt)
expected = np.zeros(shape, dtype=np.int32)
expected[0, 1:3] = 2
expected = expected.sum(axis)
expected = expect_sparse(tc.I32, [2, 3, 5], expected)
self.assertEqual(actual, expected)
def testProduct(self):
shape = [2, 4, 3, 5]
axis = 2
cxt = tc.Context()
cxt.big = tc.tensor.Sparse.zeros(shape, tc.I32)
cxt.result = tc.After(cxt.big[0, 1:3].write(2), cxt.big.product(axis))
actual = self.host.post(ENDPOINT, cxt)
expected = np.zeros(shape, dtype=np.int32)
expected[0, 1:3] = 2
expected = expected.prod(axis)
expected = expect_sparse(tc.I32, [2, 4, 5], expected)
self.assertEqual(actual, expected)
def testSliceAndBroadcast(self):
self.maxDiff = None
data = [
[[0, 0, 3, 0], 1.],
[[0, 2, 0, 0], 2.],
[[1, 0, 0, 0], 3.],
]
shape = [2, 5, 2, 3, 4, 10]
cxt = tc.Context()
cxt.small = tc.tensor.Sparse.load([2, 3, 4, 1], tc.F32, data)
cxt.big = cxt.small * tc.tensor.Dense.ones(shape)
cxt.slice = cxt.big[:-1, 1:4, 1]
actual = self.host.post(ENDPOINT, cxt)
expected = np.zeros([2, 3, 4, 1])
for coord, value in data:
expected[tuple(coord)] = value
expected = expected * np.ones(shape)
expected = expected[:-1, 1:4, 1]
expected = expect_sparse(tc.F64, expected.shape, expected)
self.assertEqual(actual, expected)
def testArgmax(self):
shape = [2, 3]
x = (np.random.random(np.product(shape)) * 2) - 1
x = (x * (np.abs(x) > 0.5)).reshape(shape)
cxt = tc.Context()
cxt.x = tc.tensor.Sparse.load(
shape, tc.F32, [(list(coord), x[coord]) for coord in | np.ndindex(x.shape) | numpy.ndindex |
#!/usr/bin/env python3
import numpy as np
from urllib.request import urlopen
from Bio.PDB.PDBParser import PDBParser
import argparse
import os
import sys
import shutil
import glob
import re
import warnings
import time
startTime = time.time()
#silence warnings from numpy when doing gap_checks (and all others but it's all dealt with internally [hopefully])
old_settings = np.seterr(all='ignore')
warnings.simplefilter(action = "ignore", category = FutureWarning)
# Oregon State 2014
# <NAME>
# In collaboration with:
# <NAME>
# <NAME>
# <NAME>
# Dr. <NAME>
#checks if there is a valid file at a specified location
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return (open(arg, 'r')) # return an open file handle
# importing arguments from the user
parser=argparse.ArgumentParser(
description='''Assigns secondary structure without using hydrogen bonds. One method uses virtual dihedrals and bond angles, the other using phi/psi2 motifs to identify secondary structure. ''',
epilog="""IMPORTANT NOTES:\nExpect there to be strange ? residues at the end of the output if there are any ligands or other non-water HETATOMS present. This can be safely ignored."""
)
parser.add_argument('-i',dest="input",metavar="FILE",type=lambda x: is_valid_file(parser, x), help='This should be a pdb file. Do not use in combination with the "-c" option.')
parser.add_argument('-c',dest="code",type=str, help='This should be a four letter pdb code. Only use this option if you want to download directly from the PDB.')
parser.add_argument('-o',dest="output",metavar="FILE", help='Output file, a table using tabs as seperators. If not specified, default is to output to STDOUT as human readable output.')
parser.add_argument('--legend',dest='legend', action='store_true', help='Option to print a legend for the Secondary Structure codes.')
parser.add_argument('--verbose',dest='verbose', action='store_true', help='Option to print a all the output, including the behind the scenes methods for structure assignment.')
parser.set_defaults(legend=False, verbose=False)
args=parser.parse_args()
if args.legend == True:
print ("\nLEGEND:\n_:\tBreak in the chain (as detected by checking bond distance)\n-:\tUnassigned trans-residue\n=:\tUnassigned cis-residue\nP:\tPII-Helix\nt:\tTurn defined by CA to CA Distance (or implied as a consequence of other turns)\nN:\tA typically non-hydrogen bonded turn\nT:\tA typically hydrogen bonded turn T\nE:\tExtended or Beta-strand conformation\nH:\tAlpha Helical Conformation\nG:\t3,10 Helix\nBb:\tBeta-bulge\nU:\tPi-helical bulge\nX:\tThe Stig\n")
# Dictionary and function that converts triple letter codes to single letter
# Any non-conventional three letter code becomes "?"
# Victor made the first version of this function
def to_single(single,triple):
return (single[triple])
one_letter = {'ALA':'A', 'ARG':'R', 'ASN':'N', 'ASP':'D', 'CYS':'C', 'GLN':'Q', 'GLU':'E', 'GLY':'G', 'HIS':'H', 'ILE':'I', \
'LEU':'L', 'LYS':'K', 'MET':'M', 'PHE':'F', 'PRO':'P', 'SER':'S', 'THR':'T', 'TRP':'W', 'TYR':'Y', 'VAL':'V', 'BLA':'-'}
#function for getting pdb files online
def fetch_pdb(id):
pdb = "%s.pdb" % str(id.lower())
url = 'http://www.rcsb.org/pdb/files/%s.pdb' % id.lower()
tmp = urlopen(url)
fh = open(pdb,'wb')
fh.write(tmp.read())
fh.close()
if args.code == None:
try:
pdb = args.input
print ("\nWorking...\n")
except:
pass
elif args.input == None:
try:
fetch_pdb(args.code)
pdb = open('%s.pdb' %args.code.lower(), 'r')
print ("\nWorking...\n")
except:
print ("\n\n\nPlease enter a valid code or path.\n\n\n")
else:
print ("\n\n\nPlease only choose option -i or option -c.\n\n\n")
def atom_get(i,atom):
if atom_list[i][1] == atom:
return (atom_list[i][2])
else:
return ('no')
def chain_get(i,atom):
if atom_list[i][1] == atom:
return (atom_list[i][3])
else:
return ('no')
def model_get(i,atom):
if atom_list[i][1] == atom:
return (atom_list[i][4])
else:
return ('no')
# function to get indicies, matches the index from resnum list with the index for the atomic coords, which is why the resnum has to be in each
def index_getter(resnum):
indices = []
i = 0
for atom in atom_list:
try:
index = atom.index(resnum)
if index == 0:
indices.append(i)
except:
pass
i += 1
return (indices)
# checks for certain atom types and grabs just those coordinates
#this is for correctly sorting atom types
def atom_getter(index,atom):
if atom_list[index][1] == atom:
return (atom_xyz[index])
else:
return ('no')
#### GAP CHECK FUNCTION ######
def gap_check(resnum):
indices = index_getter(resnum)
prev_indices = []
next_indices = []
for i in indices:
prev_indices.append(i-4)
next_indices.append(i+4)
atom_types = ['C','N']
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
elif atom == 'N':
iN = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
for i in next_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
nextN = atom_getter(i,atom)
try:
ahead = np.subtract(nextN, iC)
behind = np.subtract(iN, prevC)
ahead_mag = np.sqrt(ahead.dot(ahead))
behind_mag = np.sqrt(behind.dot(behind))
if ((ahead_mag > 1.5) and (behind_mag > 1.5)):
return('isolated')
elif(ahead_mag > 1.5):
return('ahead')
elif(behind_mag > 1.5):
return('behind')
else:
return('no')
except:
return("Fatal Error in Gap Check")
#### GAP CHECK FUNCTION for dison3, discn3, discaca3, and dison4 ######
def long_gap_check(resnum):
indices = index_getter(resnum)
next_indices = []
plus_two_indices = []
plus_three_indices =[]
plus_four_indices =[]
for i in indices:
plus_two_indices.append(i+8)
next_indices.append(i+4)
plus_three_indices.append(i+12)
plus_four_indices.append(i+16)
atom_types = ['C','N']
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
for i in plus_two_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
plus_twoC = atom_getter(i,atom)
elif atom == 'N':
plus_twoN = atom_getter(i,atom)
for i in next_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
nextC = atom_getter(i,atom)
elif atom == 'N':
nextN = atom_getter(i,atom)
for i in plus_three_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
plus_threeC = atom_getter(i,atom)
elif atom == 'N':
plus_threeN = atom_getter(i,atom)
for i in plus_four_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
plus_fourN = atom_getter(i,atom)
try:
ahead = np.subtract(nextN, iC)
two_ahead = np.subtract(plus_twoN, nextC)
three_ahead = np.subtract(plus_threeN, plus_twoC)
four_ahead = np.subtract(plus_fourN, plus_threeC)
ahead_mag = np.sqrt(ahead.dot(ahead))
two_ahead_mag = np.sqrt(two_ahead.dot(ahead))
three_ahead_mag = np.sqrt(three_ahead.dot(ahead))
four_ahead_mag = np.sqrt(four_ahead.dot(ahead))
if ((ahead_mag > 1.5) or (two_ahead_mag > 1.5) or (three_ahead_mag > 1.5)):
return('threegap')
elif ((ahead_mag > 1.5) or (two_ahead_mag > 1.5) or (three_ahead_mag > 1.5) or (four_ahead_mag > 1.5)):
return('fourgap')
else:
return('no')
except:
return("Fatal Error in Gap Check")
# ZETA FUNCTION
# returns a single value, zeta for the resnum entered
# There are functions within functions here: I'm sorry. I was new to python
def zeta_calc(resnum):
# The order of atoms in atom_list is N, CA, C, O
#this returns the index values of each of the atoms at this residue number
indices = index_getter(resnum)
prev_indices = []
for i in indices:
prev_indices.append(i-4)
if ((gap_check(resnum) == 'behind') or (gap_check(resnum) == 'isolated')):
return('ERROR')
atom_types = ['C','O']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
elif atom == 'O':
iO = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
elif atom == 'O':
prevO = atom_getter(i,atom)
v1 = np.subtract(iC, iO)
v2 = np.subtract(prevC, iC)
v3 = np.subtract(prevO, prevC)
n1 = np.cross(v1,v2)
n2 = np.cross(v2,v3)
dot1 = np.dot(n1,n2)
n1mag = np.sqrt(n1.dot(n1))
n2mag = np.sqrt(n2.dot(n2))
cos_zeta = dot1/(n1mag*n2mag)
zeta = np.degrees(np.arccos(cos_zeta))
#testing for direction
cross = np.cross(n1,n2)
direction = np.dot(cross, v2)
if direction < 0:
zeta = -1 * zeta
return (zeta)
# Omega FUNCTION
# returns a single value, ome for the resnum entered
# There are functions within functions here: I'm sorry. I was new to python
def ome_calc(resnum):
# The order of atoms in atom_list is N, CA, C, O
indices = index_getter(resnum)
prev_indices = []
for i in indices:
prev_indices.append(i-4)
if ((gap_check(resnum) == 'behind') or (gap_check(resnum) == 'isolated')):
return('ERROR')
atom_types = ['C','N','CA']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
iN = atom_getter(i,atom)
elif atom == 'CA':
iCA = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
elif atom == 'CA':
prevCA = atom_getter(i,atom)
v1 = np.subtract(iN, iCA)
v2 = np.subtract(prevC, iN)
v3 = np.subtract(prevCA, prevC)
n1 = np.cross(v1,v2)
n2 = np.cross(v2,v3)
dot1 = np.dot(n1,n2)
n1mag = np.sqrt(n1.dot(n1))
n2mag = np.sqrt(n2.dot(n2))
cos_ome = dot1/(n1mag*n2mag)
ome = np.degrees(np.arccos(cos_ome))
#testing for direction
cross = np.cross(n1,n2)
direction = np.dot(cross, v2)
if direction < 0:
ome = -1 * ome
return (ome)
# PHI FUNCTION
# returns a single value, phi for the resnum entered
# There are functions within functions here: I'm sorry. I was new to python
def phi_calc(resnum):
indices = index_getter(resnum)
prev_indices = []
for i in indices:
prev_indices.append(i-4)
if ((gap_check(resnum) == 'behind') or (gap_check(resnum) == 'isolated')):
return('ERROR')
atom_types = ['C','N','CA']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
elif atom == 'CA':
iCA = atom_getter(i,atom)
elif atom == 'N':
iN = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
v1 = np.subtract(iCA, iC)
v2 = np.subtract(iN, iCA)
v3 = | np.subtract(prevC, iN) | numpy.subtract |
import numpy as np
import copy
from scipy.stats import beta
from scipy.special import expit
from scipy.stats import expon
from scipy.stats import uniform
from scipy.stats import multinomial
from numpy.polynomial import legendre
class SNMHawkesBeta:
"""
This class implements sigmoid nonlinear multivariate Hawkes processes with Beta densities as basis functions.
The main features it provides include simulation and statistical inference.
"""
def __init__(self, number_of_dimensions, number_of_basis):
"""
Initialises an instance.
:type number_of_dimensions: int
:param number_of_dimensions: number of dimensions (neurons)
:type number_of_basis: int
:param number_of_basis: number of basis functions (beta densities)
"""
self.number_of_dimensions = number_of_dimensions
self.number_of_basis = number_of_basis
self.beta_ab = np.zeros((number_of_basis, 3))
self.T_phi = 0
self.lamda_ub = | np.zeros(number_of_dimensions) | numpy.zeros |
from __future__ import print_function
import numpy as np
import random
import json
import sys
import os
import pickle as pkl
import networkx as nx
from networkx.readwrite import json_graph
version_info = list(map(int, nx.__version__.split('.')))
major = version_info[0]
import scipy.sparse as sp
minor = version_info[1]
# assert (major <= 1) and (minor <= 11), "networkx major version > 1.11"
def load_data(prefix, normalize=True):
G_data = json.load(open(prefix + "-G.json"))
G = json_graph.node_link_graph(G_data)
conversion = lambda n : int(n)
if os.path.exists(prefix + "-feats.npy"):
feats = np.load(prefix + "-feats.npy")
else:
print("No features present.. Only identity features will be used.")
feats = None
class_map = json.load(open(prefix + "-class_map.json"))
if isinstance(list(class_map.values())[0], list):
lab_conversion = lambda n : n
else:
lab_conversion = lambda n : int(n)
class_map = {conversion(k):lab_conversion(v) for k,v in class_map.items()}
## Remove all nodes that do not have val/test annotations
## (necessary because of networkx weirdness with the Reddit data)
if normalize and not feats is None:
from sklearn.preprocessing import StandardScaler
train_ids = np.array([n for n in G.nodes() if not G.node[n]['val'] and not G.node[n]['test']])
train_feats = feats[train_ids]
scaler = StandardScaler()
scaler.fit(train_feats)
feats = scaler.transform(feats)
return G, feats, class_map
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data_gcn(dataset_str,task_type = "semi"):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
if task_type == "full":
print("Load full supervised task.")
#supervised setting
idx_test = test_idx_range.tolist()
idx_train = range(len(ally)- 500)
idx_val = range(len(ally) - 500, len(ally))
elif task_type == "semi":
print("Load semi-supervised task.")
#semi-supervised setting
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
else:
raise ValueError("Task type: %s is not supported. Available option: full and semi.")
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = | np.zeros(labels.shape) | numpy.zeros |
"""
The game of Reversi. Warning: this game is not coded in an optimal
way, the AI will be slow.
"""
import numpy as np
from easyAI import TwoPlayersGame
to_string = lambda a : "ABCDEFGH"[a[0]] + str(a[1]+1)
to_array = lambda s : np.array(["ABCDEFGH".index(s[0]),int(s[1])-1])
class Reversi( TwoPlayersGame ):
"""
See the rules on http://en.wikipedia.org/wiki/Reversi
Here for simplicity we suppose that the game ends when a
player cannot play, but it would take just a few more lines to
implement the real ending rules, by which the game ends when both
players can't play.
This implementation will make a slow and dumbe AI and could be sped
up by adding a way of unmaking moves (method unmake_moves) and
coding some parts in C (this is left as an exercise :) )
"""
def __init__(self, players, board = None):
self.players = players
self.board = np.zeros((8,8), dtype=int)
self.board[3,[3,4]] = [1,2]
self.board[4,[3,4]] = [2,1]
self.nplayer=1
def possible_moves(self):
""" Only moves that lead to flipped pieces are allowed """
return [to_string((i,j)) for i in range(8) for j in range(8)
if (self.board[i,j] == 0)
and (pieces_flipped(self.board, (i,j), self.nplayer) != [])]
def make_move(self, pos):
""" Put the piece at position ``pos`` and flip the pieces that
much be flipped """
pos= to_array(pos)
flipped = pieces_flipped(self.board, pos, self.nplayer)
for i,j in flipped:
self.board[i,j] = self.nplayer
self.board[pos[0],pos[1]] = self.nplayer
def show(self):
""" Prints the board in a fancy (?) way """
print('\n'+'\n'.join([' 1 2 3 4 5 6 7 8']+ ['ABCDEFGH'[k] +
' '+' '.join([['.','1','2','X'][self.board[k][i]]
for i in range(8)]) for k in range(8)]+['']))
def is_over(self):
""" The game is considered over when someone cannot play. That
may not be the actual rule but it is simpler to code :). Of
course it would be possible to implement that a player can pass
if it cannot play (by adding the move 'pass')"""
return self.possible_moves() == []
def scoring(self):
"""
In the beginning of the game (less than 32 pieces) much
importance is given to placing pieces on the border. After this
point, only the number of pieces of each player counts
"""
if np.sum(self.board==0) > 32: # less than half the board is full
player = self.board==self.nplayer
opponent = self.board==self.nopponent
return ((player-opponent)*BOARD_SCORE).sum()
else:
npieces_player = np.sum(self.board==self.nplayer)
npieces_opponent = | np.sum(self.board==self.nopponent) | numpy.sum |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit Tests
__author__: <NAME>, <NAME>, <NAME>
"""
import os
import sys
import unittest
import numpy as np
from scipy.io import loadmat
sys.path.append(".")
from inferactively.distributions import Categorical, Dirichlet # nopep8
class TestCategorical(unittest.TestCase):
def test_init_empty(self):
c = Categorical()
self.assertEqual(c.ndim, 2)
def test_init_overload(self):
with self.assertRaises(ValueError):
values = np.random.rand(3, 2)
_ = Categorical(dims=2, values=values)
def test_float_conversion(self):
values = np.array([2, 3])
self.assertEqual(values.dtype, np.int)
c = Categorical(values=values)
self.assertEqual(c.values.dtype, np.float64)
def test_init_dims_expand(self):
c = Categorical(dims=[5])
self.assertEqual(c.shape, (5, 1))
def test_init_dims_int_expand(self):
c = Categorical(dims=5)
self.assertEqual(c.shape, (5, 1))
def test_multi_factor_init_dims(self):
c = Categorical(dims=[[5, 4], [4, 3]])
self.assertEqual(c.shape, (2,))
self.assertEqual(c[0].shape, (5, 4))
self.assertEqual(c[1].shape, (4, 3))
def test_multi_factor_init_values(self):
values_1 = np.random.rand(5, 4)
values_2 = np.random.rand(4, 3)
values = np.array([values_1, values_2])
c = Categorical(values=values)
self.assertEqual(c.shape, (2,))
self.assertEqual(c[0].shape, (5, 4))
self.assertEqual(c[1].shape, (4, 3))
def test_multi_factor_init_values_expand(self):
values_1 = np.random.rand(5)
values_2 = np.random.rand(4)
values = np.array([values_1, values_2])
c = Categorical(values=values)
self.assertEqual(c.shape, (2,))
self.assertEqual(c[0].shape, (5, 1))
self.assertEqual(c[1].shape, (4, 1))
def test_normalize_multi_factor(self):
values_1 = np.random.rand(5)
values_2 = np.random.rand(4, 3)
values = np.array([values_1, values_2])
c = Categorical(values=values)
c.normalize()
self.assertTrue(c.is_normalized())
def test_normalize_single_dim(self):
values = np.array([1.0, 1.0])
c = Categorical(values=values)
expected_values = np.array([[0.5], [0.5]])
c.normalize()
self.assertTrue(np.array_equal(c.values, expected_values))
def test_normalize_two_dim(self):
values = np.array([[1.0, 1.0], [1.0, 1.0]])
c = Categorical(values=values)
expected_values = np.array([[0.5, 0.5], [0.5, 0.5]])
c.normalize()
self.assertTrue(np.array_equal(c.values, expected_values))
def test_is_normalized(self):
values = np.array([[0.7, 0.5], [0.3, 0.5]])
c = Categorical(values=values)
self.assertTrue(c.is_normalized())
values = np.array([[0.2, 0.8], [0.3, 0.5]])
c = Categorical(values=values)
self.assertFalse(c.is_normalized())
def test_remove_zeros(self):
values = np.array([[1.0, 0.0], [1.0, 1.0]])
c = Categorical(values=values)
self.assertTrue((c.values == 0.0).any())
c.remove_zeros()
self.assertFalse((c.values == 0.0).any())
def test_contains_zeros(self):
values = np.array([[1.0, 0.0], [1.0, 1.0]])
c = Categorical(values=values)
self.assertTrue(c.contains_zeros())
values = np.array([[1.0, 1.0], [1.0, 1.0]])
c = Categorical(values=values)
self.assertFalse(c.contains_zeros())
def test_entropy(self):
values = np.random.rand(3, 2)
entropy = -np.sum(values * np.log(values), 0)
c = Categorical(values=values)
self.assertTrue(np.array_equal(c.entropy(return_numpy=True), entropy))
def test_log(self):
values = np.random.rand(3, 2)
log_values = np.log(values)
c = Categorical(values=values)
self.assertTrue(np.array_equal(c.log(return_numpy=True), log_values))
def test_copy(self):
values = np.random.rand(3, 2)
c = Categorical(values=values)
c_copy = c.copy()
self.assertTrue(np.array_equal(c_copy.values, c.values))
c_copy.values = c_copy.values * 2
self.assertFalse(np.array_equal(c_copy.values, c.values))
def test_ndim(self):
values = np.random.rand(3, 2)
c = Categorical(values=values)
self.assertEqual(c.ndim, c.values.ndim)
def test_shape(self):
values = np.random.rand(3, 2)
c = Categorical(values=values)
self.assertEqual(c.shape, (3, 2))
def test_sample_single(self):
# values are already normalized
values = np.array([1.0, 0.0])
c = Categorical(values=values)
self.assertEqual(0, c.sample())
# values are not normalized
values = np.array([0, 10.0])
c = Categorical(values=values)
self.assertEqual(1, c.sample())
def test_sample_AoA(self):
# values are already normalized
values_1 = np.array([1.0, 0.0])
values_2 = np.array([0.0, 1.0, 0.0])
values = np.array([values_1, values_2])
c = Categorical(values=values)
self.assertTrue(np.isclose(np.array([0, 1]), c.sample()).all())
# values are not normalized
values_1 = np.array([10.0, 0.0])
values_2 = np.array([0.0, 10.0, 0.0])
values = np.array([values_1, values_2])
c = Categorical(values=values)
self.assertTrue(np.isclose(np.array([0, 1]), c.sample()).all())
def test_dot_function_a(self):
""" test with vectors and matrices, discrete state / outcomes """
array_path = os.path.join(os.getcwd(), "tests/data/dot_a.mat")
mat_contents = loadmat(file_name=array_path)
A = mat_contents["A"]
obs = mat_contents["o"]
states = mat_contents["s"]
states = np.array(states, dtype=object)
result_1 = mat_contents["result1"]
result_2 = mat_contents["result2"]
result_3 = mat_contents["result3"]
A = Categorical(values=A)
result_1_py = A.dot(obs, return_numpy=True)
self.assertTrue(np.isclose(result_1, result_1_py).all())
result_2_py = A.dot(states, return_numpy=True)
result_2_py = result_2_py.astype("float64")[:, np.newaxis]
self.assertTrue(np.isclose(result_2, result_2_py).all())
result_3_py = A.dot(states, dims_to_omit=[0], return_numpy=True)
self.assertTrue(np.isclose(result_3, result_3_py).all())
def test_dot_function_a_cat(self):
""" test with vectors and matrices, discrete state / outcomes
Now, when arguments themselves are instances of Categorical
"""
array_path = os.path.join(os.getcwd(), "tests/data/dot_a.mat")
mat_contents = loadmat(file_name=array_path)
A = mat_contents["A"]
obs = Categorical(values=mat_contents["o"])
states = Categorical(values=mat_contents["s"][0])
result_1 = mat_contents["result1"]
result_2 = mat_contents["result2"]
result_3 = mat_contents["result3"]
A = Categorical(values=A)
result_1_py = A.dot(obs, return_numpy=True)
self.assertTrue(np.isclose(result_1, result_1_py).all())
result_2_py = A.dot(states, return_numpy=True)
result_2_py = result_2_py.astype("float64")[:, np.newaxis]
self.assertTrue(np.isclose(result_2, result_2_py).all())
result_3_py = A.dot(states, dims_to_omit=[0], return_numpy=True)
self.assertTrue(np.isclose(result_3, result_3_py).all())
def test_dot_function_b(self):
""" continuous states and outcomes """
array_path = os.path.join(os.getcwd(), "tests/data/dot_b.mat")
mat_contents = loadmat(file_name=array_path)
A = mat_contents["A"]
obs = mat_contents["o"]
states = mat_contents["s"]
states = np.array(states, dtype=object)
result_1 = mat_contents["result1"]
result_2 = mat_contents["result2"]
result_3 = mat_contents["result3"]
A = Categorical(values=A)
result_1_py = A.dot(obs, return_numpy=True)
self.assertTrue(np.isclose(result_1, result_1_py).all())
result_2_py = A.dot(states, return_numpy=True)
result_2_py = result_2_py.astype("float64")[:, np.newaxis]
self.assertTrue(np.isclose(result_2, result_2_py).all())
result_3_py = A.dot(states, dims_to_omit=[0], return_numpy=True)
self.assertTrue(np.isclose(result_3, result_3_py).all())
def test_dot_function_c(self):
""" DISCRETE states and outcomes, but also a third hidden state factor """
array_path = os.path.join(os.getcwd(), "tests/data/dot_c.mat")
mat_contents = loadmat(file_name=array_path)
A = mat_contents["A"]
obs = mat_contents["o"]
states = mat_contents["s"]
states_array_version = np.empty(states.shape[1], dtype=object)
for i in range(states.shape[1]):
states_array_version[i] = states[0][i][0]
result_1 = mat_contents["result1"]
result_2 = mat_contents["result2"]
result_3 = mat_contents["result3"]
A = Categorical(values=A)
result_1_py = A.dot(obs, return_numpy=True)
self.assertTrue(np.isclose(result_1, result_1_py).all())
result_2_py = A.dot(states_array_version, return_numpy=True)
result_2_py = result_2_py.astype("float64")[:, np.newaxis]
self.assertTrue(np.isclose(result_2, result_2_py).all())
result_3_py = A.dot(states_array_version, dims_to_omit=[0], return_numpy=True)
self.assertTrue(np.isclose(result_3, result_3_py).all())
def test_dot_function_c_cat(self):
""" test with vectors and matrices, discrete state / outcomes but with a
third hidden state factor. Now, when arguments themselves are
instances of Categorical
"""
array_path = os.path.join(os.getcwd(), "tests/data/dot_c.mat")
mat_contents = loadmat(file_name=array_path)
A = mat_contents["A"]
obs = Categorical(values=mat_contents["o"])
states = mat_contents["s"]
states_array_version = np.empty(states.shape[1], dtype=object)
for i in range(states.shape[1]):
states_array_version[i] = states[0][i][0]
states_array_version = Categorical(values=states_array_version)
result_1 = mat_contents["result1"]
result_2 = mat_contents["result2"]
result_3 = mat_contents["result3"]
A = Categorical(values=A)
result_1_py = A.dot(obs, return_numpy=True)
self.assertTrue(np.isclose(result_1, result_1_py).all())
result_2_py = A.dot(states_array_version, return_numpy=True)
result_2_py = result_2_py.astype("float64")[:, np.newaxis]
self.assertTrue(np.isclose(result_2, result_2_py).all())
result_3_py = A.dot(states_array_version, dims_to_omit=[0], return_numpy=True)
self.assertTrue(np.isclose(result_3, result_3_py).all())
def test_dot_function_d(self):
""" CONTINUOUS states and outcomes, but also a third hidden state factor """
array_path = os.path.join(os.getcwd(), "tests/data/dot_d.mat")
mat_contents = loadmat(file_name=array_path)
A = mat_contents["A"]
obs = mat_contents["o"]
states = mat_contents["s"]
states_array_version = np.empty(states.shape[1], dtype=object)
for i in range(states.shape[1]):
states_array_version[i] = states[0][i][0]
result_1 = mat_contents["result1"]
result_2 = mat_contents["result2"]
result_3 = mat_contents["result3"]
A = Categorical(values=A)
result_1_py = A.dot(obs, return_numpy=True)
self.assertTrue(np.isclose(result_1, result_1_py).all())
result_2_py = A.dot(states_array_version, return_numpy=True)
result_2_py = result_2_py.astype("float64")[:, np.newaxis]
self.assertTrue(np.isclose(result_2, result_2_py).all())
result_3_py = A.dot(states_array_version, dims_to_omit=[0], return_numpy=True)
self.assertTrue(np.isclose(result_3, result_3_py).all())
def test_dot_function_e(self):
""" CONTINUOUS states and outcomes, but add a final (fourth) hidden state factor """
array_path = os.path.join(os.getcwd(), "tests/data/dot_e.mat")
mat_contents = loadmat(file_name=array_path)
A = mat_contents["A"]
obs = mat_contents["o"]
states = mat_contents["s"]
states_array_version = | np.empty(states.shape[1], dtype=object) | numpy.empty |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`k2.py` - Main mission routines
---------------------------------------
Implements several routines specific to the `K2` mission.
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from . import sysrem
from .utils import *
from ...config import EVEREST_SRC, EVEREST_DAT, EVEREST_DEV, MAST_ROOT, \
EVEREST_MAJOR_MINOR
from ...utils import DataContainer, sort_like, AP_COLLAPSED_PIXEL, \
AP_SATURATED_PIXEL
from ...mathutils import SavGol, Interpolate, Scatter, Downbin
try:
import pyfits
except ImportError:
try:
import astropy.io.fits as pyfits
except ImportError:
raise Exception('Please install the `pyfits` package.')
import matplotlib.pyplot as pl
from matplotlib.ticker import ScalarFormatter, MaxNLocator
import k2plr as kplr
kplr_client = kplr.API()
from k2plr.config import KPLR_ROOT
import numpy as np
import george
from tempfile import NamedTemporaryFile
import random
import os
import sys
import shutil
import time
import logging
log = logging.getLogger(__name__)
__all__ = ['Setup', 'Season', 'Breakpoints', 'GetData', 'GetNeighbors',
'Statistics', 'TargetDirectory', 'HasShortCadence', 'DVSFile',
'InjectionStatistics', 'HDUCards', 'CSVFile', 'FITSFile', 'FITSUrl',
'CDPP', 'GetTargetCBVs', 'FitCBVs', 'PlanetStatistics',
'StatsToCSV']
def Setup():
'''
Called when the code is installed. Sets up directories and downloads
the K2 catalog.
'''
if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'cbv')):
os.makedirs(os.path.join(EVEREST_DAT, 'k2', 'cbv'))
GetK2Stars(clobber=False)
def Season(EPIC, **kwargs):
'''
Returns the campaign number for a given EPIC target.
'''
return Campaign(EPIC, **kwargs)
def Breakpoints(EPIC, season=None, cadence='lc', **kwargs):
'''
Returns the location of the breakpoints for a given target.
:param int EPIC: The EPIC ID number
:param str cadence: The light curve cadence. Default `lc`
.. note :: The number corresponding to a given breakpoint is the number \
of cadences *since the beginning of the campaign*.
'''
# Get the campaign number
if season is None:
campaign = Season(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s."
% campaign)
else:
campaign = season
# Select LC or SC
if cadence == 'lc':
breakpoints = {
0: [665], # OK
1: [2210], # OK
2: [2042], # OK
3: [2140], # OK
4: [520, 2153], # OK
5: [1774], # OK
6: [2143], # OK
7: [1192, 2319], # OK
8: [1950], # OK
91: [],
92: [],
101: [], # NO DATA
102: [], # NO BREAKPOINT
111: [],
112: [],
12: [1900], # OK
13: [2157], # OK
14: [1950], # OK
15: [2150], # OK
16: [1945], # OK
17: [1640], # OK
18: [] # Short campaign
}
elif cadence == 'sc':
breakpoints = {
0: np.array([3753, 11259, 15012, 18765, 60048, # OK
63801, 67554, 71307, 75060, 78815,
82566, 86319, 90072, 93825, 97578,
101331, 105084, 108837]),
1: np.array([8044, 12066, 16088, 20135, 24132, 28154, # OK
32176, 36198, 40220, 44242, 48264, 52286,
56308, 60330, 64352, 68374, 72396, 76418,
80440, 84462, 88509, 92506, 96528, 100550,
104572, 108594, 112616, 116638]),
2: np.array(np.linspace(0, 115680, 31)[1:-1], dtype=int), # OK
3: np.array([3316, 6772, 10158, 13694, 16930, 20316, # OK
23702, 27088, 30474, 33860, 37246, 40632,
44018, 47404, 50790, 54176, 57562, 60948,
64334, 67720, 71106, 74492, 77878, 81264,
84650, 88036, 91422, 94808, 98194]),
4: np.array(np.linspace(0, 101580, 31)[1:-1], dtype=int), # OK
5: np.array([3663, 7326, 10989, 14652, 18315, 21978, # OK
25641, 29304, 32967, 36630, 40293, 43956,
47619, 51282, 54945, 58608, 62271, 65934,
69597, 73260, 76923, 80646, 84249, 87912,
91575, 95238, 98901, 102564, 106227]),
6: np.array(np.linspace(0, 115890, 31)[1:-1], dtype=int), # OK
7: np.array(np.linspace(0, 121290, 31)[1:-1], dtype=int), # OK
# Unclear
8: np.array(np.linspace(0, 115590, 31)[1:-1], dtype=int),
91: [],
92: [],
101: [],
102: [],
111: [],
112: [],
12: [],
13: [],
14: [],
15: [],
16: [],
17: [],
18: []
}
else:
raise ValueError("Invalid value for the cadence.")
# Return
if campaign in breakpoints:
return breakpoints[campaign]
else:
return None
def CDPP(flux, mask=[], cadence='lc'):
'''
Compute the proxy 6-hr CDPP metric.
:param array_like flux: The flux array to compute the CDPP for
:param array_like mask: The indices to be masked
:param str cadence: The light curve cadence. Default `lc`
'''
# 13 cadences is 6.5 hours
rmswin = 13
# Smooth the data on a 2 day timescale
svgwin = 49
# If short cadence, need to downbin
if cadence == 'sc':
newsize = len(flux) // 30
flux = Downbin(flux, newsize, operation='mean')
flux_savgol = SavGol(np.delete(flux, mask), win=svgwin)
if len(flux_savgol):
return Scatter(flux_savgol / np.nanmedian(flux_savgol),
remove_outliers=True, win=rmswin)
else:
return np.nan
def GetData(EPIC, season=None, cadence='lc', clobber=False, delete_raw=False,
aperture_name='k2sff_15', saturated_aperture_name='k2sff_19',
max_pixels=75, download_only=False, saturation_tolerance=-0.1,
bad_bits=[1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 16, 17],
get_hires=True,
get_nearby=True, **kwargs):
'''
Returns a :py:obj:`DataContainer` instance with the
raw data for the target.
:param int EPIC: The EPIC ID number
:param int season: The observing season (campaign). Default :py:obj:`None`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param bool delete_raw: Delete the FITS TPF after processing it? \
Default :py:obj:`False`
:param str aperture_name: The name of the aperture to use. Select \
`custom` to call :py:func:`GetCustomAperture`. Default `k2sff_15`
:param str saturated_aperture_name: The name of the aperture to use if \
the target is saturated. Default `k2sff_19`
:param int max_pixels: Maximum number of pixels in the TPF. Default 75
:param bool download_only: Download raw TPF and return? Default \
:py:obj:`False`
:param float saturation_tolerance: Target is considered saturated \
if flux is within this fraction of the pixel well depth. \
Default -0.1
:param array_like bad_bits: Flagged :py:obj`QUALITY` bits to consider \
outliers when computing the model. \
Default `[1,2,3,4,5,6,7,8,9,11,12,13,14,16,17]`
:param bool get_hires: Download a high resolution image of the target? \
Default :py:obj:`True`
:param bool get_nearby: Retrieve location of nearby sources? \
Default :py:obj:`True`
'''
# Campaign no.
if season is None:
campaign = Season(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s."
% campaign)
else:
campaign = season
# Is there short cadence data available for this target?
# DEBUG: Disabling short cadence for now!
short_cadence = False #HasShortCadence(EPIC, season=campaign)
if cadence == 'sc' and not short_cadence:
raise ValueError("Short cadence data not available for this target.")
# Local file name
filename = os.path.join(EVEREST_DAT, 'k2', 'c%02d' % campaign,
('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:],
'data.npz')
# Download?
if clobber or not os.path.exists(filename):
# Get the TPF
tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files',
str(EPIC), 'ktwo%09d-c%02d_lpd-targ.fits.gz'
% (EPIC, campaign))
sc_tpf = os.path.join(KPLR_ROOT, 'data', 'k2', 'target_pixel_files',
str(EPIC), 'ktwo%09d-c%02d_spd-targ.fits.gz'
% (EPIC, campaign))
if clobber or not os.path.exists(tpf):
# DEBUG: Disabling short cadence for now!
kplr_client.k2_star(EPIC).get_target_pixel_files(fetch=True,
short_cadence=False)
with pyfits.open(tpf) as f:
qdata = f[1].data
# Get the TPF aperture
tpf_aperture = (f[2].data & 2) // 2
# Get the enlarged TPF aperture
tpf_big_aperture = np.array(tpf_aperture)
for i in range(tpf_big_aperture.shape[0]):
for j in range(tpf_big_aperture.shape[1]):
if f[2].data[i][j] == 1:
for n in [(i - 1, j), (i + 1, j),
(i, j - 1), (i, j + 1)]:
if n[0] >= 0 and n[0] < tpf_big_aperture.shape[0]:
if n[1] >= 0 and n[1] < \
tpf_big_aperture.shape[1]:
if tpf_aperture[n[0]][n[1]] == 1:
tpf_big_aperture[i][j] = 1
# Is there short cadence data?
if short_cadence:
with pyfits.open(sc_tpf) as f:
sc_qdata = f[1].data
# Get K2SFF apertures
try:
k2sff = kplr.K2SFF(EPIC, sci_campaign=campaign)
k2sff_apertures = k2sff.apertures
if delete_raw:
os.remove(k2sff._file)
except:
k2sff_apertures = [None for i in range(20)]
# Make a dict of all our apertures
# We're not getting K2SFF apertures 0-9 any more
apertures = {'tpf': tpf_aperture, 'tpf_big': tpf_big_aperture}
for i in range(10, 20):
apertures.update({'k2sff_%02d' % i: k2sff_apertures[i]})
# Get the header info
fitsheader = [pyfits.getheader(tpf, 0).cards,
pyfits.getheader(tpf, 1).cards,
pyfits.getheader(tpf, 2).cards]
if short_cadence:
sc_fitsheader = [pyfits.getheader(sc_tpf, 0).cards,
pyfits.getheader(sc_tpf, 1).cards,
pyfits.getheader(sc_tpf, 2).cards]
else:
sc_fitsheader = None
# Get a hi res image of the target
if get_hires:
hires = GetHiResImage(EPIC)
else:
hires = None
# Get nearby sources
if get_nearby:
nearby = GetSources(EPIC)
else:
nearby = []
# Delete?
if delete_raw:
os.remove(tpf)
if short_cadence:
os.remove(sc_tpf)
# Get the arrays
cadn = np.array(qdata.field('CADENCENO'), dtype='int32')
time = np.array(qdata.field('TIME'), dtype='float64')
fpix = np.array(qdata.field('FLUX'), dtype='float64')
fpix_err = np.array(qdata.field('FLUX_ERR'), dtype='float64')
qual = np.array(qdata.field('QUALITY'), dtype=int)
# Get rid of NaNs in the time array by interpolating
naninds = np.where(np.isnan(time))
time = Interpolate(np.arange(0, len(time)), naninds, time)
# Get the motion vectors (if available!)
pc1 = np.array(qdata.field('POS_CORR1'), dtype='float64')
pc2 = np.array(qdata.field('POS_CORR2'), dtype='float64')
if not np.all(np.isnan(pc1)) and not np.all(np.isnan(pc2)):
pc1 = Interpolate(time, np.where(np.isnan(pc1)), pc1)
pc2 = Interpolate(time, np.where(np.isnan(pc2)), pc2)
else:
pc1 = None
pc2 = None
# Do the same for short cadence
if short_cadence:
sc_cadn = np.array(sc_qdata.field('CADENCENO'), dtype='int32')
sc_time = np.array(sc_qdata.field('TIME'), dtype='float64')
sc_fpix = np.array(sc_qdata.field('FLUX'), dtype='float64')
sc_fpix_err = np.array(sc_qdata.field('FLUX_ERR'), dtype='float64')
sc_qual = np.array(sc_qdata.field('QUALITY'), dtype=int)
sc_naninds = np.where(np.isnan(sc_time))
sc_time = Interpolate(
np.arange(0, len(sc_time)), sc_naninds, sc_time)
sc_pc1 = np.array(sc_qdata.field('POS_CORR1'), dtype='float64')
sc_pc2 = np.array(sc_qdata.field('POS_CORR2'), dtype='float64')
if not np.all(np.isnan(sc_pc1)) and not np.all(np.isnan(sc_pc2)):
sc_pc1 = Interpolate(
sc_time, np.where(np.isnan(sc_pc1)), sc_pc1)
sc_pc2 = Interpolate(
sc_time, np.where(np.isnan(sc_pc2)), sc_pc2)
else:
sc_pc1 = None
sc_pc2 = None
else:
sc_cadn = None
sc_time = None
sc_fpix = None
sc_fpix_err = None
sc_qual = None
sc_pc1 = None
sc_pc2 = None
# Static pixel images for plotting
pixel_images = [fpix[0], fpix[len(fpix) // 2], fpix[len(fpix) - 1]]
# Atomically write to disk.
# http://stackoverflow.com/questions/2333872/
# atomic-writing-to-file-with-python
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
f = NamedTemporaryFile("wb", delete=False)
np.savez_compressed(f, cadn=cadn, time=time, fpix=fpix,
fpix_err=fpix_err,
qual=qual, apertures=apertures,
pc1=pc1, pc2=pc2, fitsheader=fitsheader,
pixel_images=pixel_images, nearby=nearby,
hires=hires,
sc_cadn=sc_cadn, sc_time=sc_time, sc_fpix=sc_fpix,
sc_fpix_err=sc_fpix_err, sc_qual=sc_qual,
sc_pc1=sc_pc1, sc_pc2=sc_pc2,
sc_fitsheader=sc_fitsheader)
f.flush()
os.fsync(f.fileno())
f.close()
shutil.move(f.name, filename)
if download_only:
return
# Load
data = np.load(filename)
apertures = data['apertures'][()]
pixel_images = data['pixel_images']
nearby = data['nearby']
hires = data['hires'][()]
if cadence == 'lc':
fitsheader = data['fitsheader']
cadn = data['cadn']
time = data['time']
fpix = data['fpix']
fpix_err = data['fpix_err']
qual = data['qual']
pc1 = data['pc1']
pc2 = data['pc2']
elif cadence == 'sc':
fitsheader = data['sc_fitsheader']
cadn = data['sc_cadn']
time = data['sc_time']
fpix = data['sc_fpix']
fpix_err = data['sc_fpix_err']
qual = data['sc_qual']
pc1 = data['sc_pc1']
pc2 = data['sc_pc2']
else:
raise ValueError("Invalid value for the cadence.")
# Select the "saturated aperture" to check if the star is saturated
# If it is, we will use this aperture instead
if saturated_aperture_name == 'custom':
saturated_aperture = GetCustomAperture(data)
else:
if saturated_aperture_name is None:
saturated_aperture_name = 'k2sff_19'
saturated_aperture = apertures[saturated_aperture_name]
if saturated_aperture is None:
log.error("Invalid aperture selected. Defaulting to `tpf_big`.")
saturated_aperture_name = 'tpf_big'
saturated_aperture = apertures[saturated_aperture_name]
# HACK: Some C05 K2SFF apertures don't match the target pixel file
# pixel grid size. This is likely because they're defined on the M67
# superstamp. For now, let's ignore these stars.
if saturated_aperture.shape != fpix.shape[1:]:
log.error("Aperture size mismatch!")
return None
# Compute the saturation flux and the 97.5th percentile
# flux in each pixel of the saturated aperture. We're going
# to compare these to decide if the star is saturated.
satflx = SaturationFlux(EPIC, campaign=campaign) * \
(1. + saturation_tolerance)
f97 = np.zeros((fpix.shape[1], fpix.shape[2]))
for i in range(fpix.shape[1]):
for j in range(fpix.shape[2]):
if saturated_aperture[i, j]:
# Let's remove NaNs...
tmp = np.delete(fpix[:, i, j], np.where(
np.isnan(fpix[:, i, j])))
# ... and really bad outliers...
if len(tmp):
f = SavGol(tmp)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) |
(f < med - 10. * MAD))[0]
np.delete(tmp, bad)
# ... so we can compute the 97.5th percentile flux
i97 = int(0.975 * len(tmp))
tmp = tmp[np.argsort(tmp)[i97]]
f97[i, j] = tmp
# Check if any of the pixels are actually saturated
if np.nanmax(f97) <= satflx:
log.info("No saturated columns detected.")
saturated = False
else:
log.info("Saturated pixel(s) found. Switching to aperture `%s`." %
saturated_aperture_name)
aperture_name = saturated_aperture_name
saturated = True
# Now grab the aperture we'll actually use
if aperture_name == 'custom':
aperture = GetCustomAperture(data)
else:
if aperture_name is None:
aperture_name = 'k2sff_15'
aperture = apertures[aperture_name]
if aperture is None:
log.error("Invalid aperture selected. Defaulting to `tpf_big`.")
aperture_name = 'tpf_big'
aperture = apertures[aperture_name]
# HACK: Some C05 K2SFF apertures don't match the target pixel file
# pixel grid size. This is likely because they're defined on the M67
# superstamp. For now, let's ignore these stars.
if aperture.shape != fpix.shape[1:]:
log.error("Aperture size mismatch!")
return None
# Now we check if the aperture is too big. Can lead to memory errors...
# Treat saturated and unsaturated stars differently.
if saturated:
# Need to check if we have too many pixels *after* collapsing columns.
# Sort the apertures in decreasing order of pixels, but keep the apert.
# chosen by the user first.
aperture_names = np.array(list(apertures.keys()))
npix_per_aperture = np.array(
[np.sum(apertures[k]) for k in aperture_names])
aperture_names = aperture_names[np.argsort(npix_per_aperture)[::-1]]
aperture_names = np.append([aperture_name], np.delete(
aperture_names, np.argmax(aperture_names == aperture_name)))
# Loop through them. Pick the first one that satisfies
# the `max_pixels` constraint
for aperture_name in aperture_names:
aperture = apertures[aperture_name]
aperture[np.isnan(fpix[0])] = 0
ncol = 0
apcopy = np.array(aperture)
for j in range(apcopy.shape[1]):
if np.any(f97[:, j] > satflx):
apcopy[:, j] = 0
ncol += 1
if np.sum(apcopy) + ncol <= max_pixels:
break
if np.sum(apcopy) + ncol > max_pixels:
log.error(
"No apertures available with fewer than %d pixels. Aborting."
% max_pixels)
return None
# Now, finally, we collapse the saturated columns into single pixels
# and make the pixel array 2D
ncol = 0
fpixnew = []
ferrnew = []
# HACK: K2SFF sometimes clips the heads/tails of saturated columns
# That's really bad, since that's where all the information is. Let's
# artificially extend the aperture by two pixels at the top and bottom
# of each saturated column. This *could* increase contamination, but
# it's unlikely since the saturated target is by definition really
# bright
ext = 0
for j in range(aperture.shape[1]):
if np.any(f97[:, j] > satflx):
for i in range(aperture.shape[0]):
if (aperture[i, j] == 0) and \
(np.nanmedian(fpix[:, i, j]) > 0):
if (i + 2 < aperture.shape[0]) and \
aperture[i + 2, j] == 1:
aperture[i, j] = 2
ext += 1
elif (i + 1 < aperture.shape[0]) and \
aperture[i + 1, j] == 1:
aperture[i, j] = 2
ext += 1
elif (i - 1 >= 0) and aperture[i - 1, j] == 1:
aperture[i, j] = 2
ext += 1
elif (i - 2 >= 0) and aperture[i - 2, j] == 1:
aperture[i, j] = 2
ext += 1
if ext:
log.info("Extended saturated columns by %d pixel(s)." % ext)
for j in range(aperture.shape[1]):
if np.any(f97[:, j] > satflx):
marked = False
collapsed = np.zeros(len(fpix[:, 0, 0]))
collapsed_err2 = np.zeros(len(fpix[:, 0, 0]))
for i in range(aperture.shape[0]):
if aperture[i, j]:
if not marked:
aperture[i, j] = AP_COLLAPSED_PIXEL
marked = True
else:
aperture[i, j] = AP_SATURATED_PIXEL
collapsed += fpix[:, i, j]
collapsed_err2 += fpix_err[:, i, j] ** 2
if np.any(collapsed):
fpixnew.append(collapsed)
ferrnew.append(np.sqrt(collapsed_err2))
ncol += 1
else:
for i in range(aperture.shape[0]):
if aperture[i, j]:
fpixnew.append(fpix[:, i, j])
ferrnew.append(fpix_err[:, i, j])
fpix2D = np.array(fpixnew).T
fpix_err2D = np.array(ferrnew).T
log.info("Collapsed %d saturated column(s)." % ncol)
else:
# Check if there are too many pixels
if np.sum(aperture) > max_pixels:
# This case is simpler: we just pick the largest aperture
# that's less than or equal to `max_pixels`
keys = list(apertures.keys())
npix = np.array([np.sum(apertures[k]) for k in keys])
aperture_name = keys[np.argmax(npix * (npix <= max_pixels))]
aperture = apertures[aperture_name]
aperture[np.isnan(fpix[0])] = 0
if np.sum(aperture) > max_pixels:
log.error("No apertures available with fewer than " +
"%d pixels. Aborting." % max_pixels)
return None
log.warn(
"Selected aperture is too big. Proceeding with aperture " +
"`%s` instead." % aperture_name)
# Make the pixel flux array 2D
aperture[np.isnan(fpix[0])] = 0
ap = np.where(aperture & 1)
fpix2D = np.array([f[ap] for f in fpix], dtype='float64')
fpix_err2D = np.array([p[ap] for p in fpix_err], dtype='float64')
# Compute the background
binds = np.where(aperture ^ 1)
if RemoveBackground(EPIC, campaign=campaign) and (len(binds[0]) > 0):
bkg = np.nanmedian(np.array([f[binds]
for f in fpix], dtype='float64'), axis=1)
# Uncertainty of the median:
# http://davidmlane.com/hyperstat/A106993.html
bkg_err = 1.253 * np.nanmedian(np.array([e[binds] for e in fpix_err],
dtype='float64'), axis=1) \
/ np.sqrt(len(binds[0]))
bkg = bkg.reshape(-1, 1)
bkg_err = bkg_err.reshape(-1, 1)
else:
bkg = 0.
bkg_err = 0.
# Make everything 2D and remove the background
fpix = fpix2D - bkg
fpix_err = np.sqrt(fpix_err2D ** 2 + bkg_err ** 2)
flux = np.sum(fpix, axis=1)
ferr = np.sqrt(np.sum(fpix_err ** 2, axis=1))
# Get NaN data points
nanmask = np.where(np.isnan(flux) | (flux == 0))[0]
# Get flagged data points -- we won't train our model on them
badmask = []
for b in bad_bits:
badmask += list(np.where(qual & 2 ** (b - 1))[0])
# Flag >10 sigma outliers -- same thing.
tmpmask = np.array(list(set(np.concatenate([badmask, nanmask]))))
t = np.delete(time, tmpmask)
f = np.delete(flux, tmpmask)
f = SavGol(f)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
bad = np.where((f > med + 10. * MAD) | (f < med - 10. * MAD))[0]
badmask.extend([np.argmax(time == t[i]) for i in bad])
# Campaign 2 hack: the first day or two are screwed up
if campaign == 2:
badmask.extend(np.where(time < 2061.5)[0])
# TODO: Fix time offsets in first half of
# Campaign 0. See note in everest 1.0 code
# Finalize the mask
badmask = np.array(sorted(list(set(badmask))))
# Interpolate the nans
fpix = Interpolate(time, nanmask, fpix)
fpix_err = Interpolate(time, nanmask, fpix_err)
# Return
data = DataContainer()
data.ID = EPIC
data.campaign = campaign
data.cadn = cadn
data.time = time
data.fpix = fpix
data.fpix_err = fpix_err
data.nanmask = nanmask
data.badmask = badmask
data.aperture = aperture
data.aperture_name = aperture_name
data.apertures = apertures
data.quality = qual
data.Xpos = pc1
data.Ypos = pc2
data.meta = fitsheader
data.mag = fitsheader[0]['KEPMAG'][1]
data.pixel_images = pixel_images
data.nearby = nearby
data.hires = hires
data.saturated = saturated
data.bkg = bkg
return data
def GetNeighbors(EPIC, season=None, model=None, neighbors=10,
mag_range=(11., 13.),
cdpp_range=None, aperture_name='k2sff_15',
cadence='lc', **kwargs):
'''
Return `neighbors` random bright stars on the same module as `EPIC`.
:param int EPIC: The EPIC ID number
:param str model: The :py:obj:`everest` model name. Only used when \
imposing CDPP bounds. Default :py:obj:`None`
:param int neighbors: Number of neighbors to return. Default 10
:param str aperture_name: The name of the aperture to use. Select \
`custom` to call \
:py:func:`GetCustomAperture`. Default `k2sff_15`
:param str cadence: The light curve cadence. Default `lc`
:param tuple mag_range: (`low`, `high`) values for the Kepler magnitude. \
Default (11, 13)
:param tuple cdpp_range: (`low`, `high`) values for the de-trended CDPP. \
Default :py:obj:`None`
'''
# Zero neighbors?
if neighbors == 0:
return []
# Get the IDs
# Campaign no.
if season is None:
campaign = Season(EPIC)
if hasattr(campaign, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s."
% campaign)
else:
campaign = season
epics, kepmags, channels, short_cadence = np.array(GetK2Stars()[
campaign]).T
short_cadence = np.array(short_cadence, dtype=bool)
epics = np.array(epics, dtype=int)
c = GetNeighboringChannels(Channel(EPIC, campaign=season))
# Manage kwargs
if aperture_name is None:
aperture_name = 'k2sff_15'
if mag_range is None:
mag_lo = -np.inf
mag_hi = np.inf
else:
mag_lo = mag_range[0]
mag_hi = mag_range[1]
# K2-specific tweak. The short cadence stars are preferentially
# really bright ones, so we won't get many neighbors if we
# stick to the default magnitude range! I'm
# therefore enforcing a lower magnitude cut-off of 8.
if cadence == 'sc':
mag_lo = 8.
if cdpp_range is None:
cdpp_lo = -np.inf
cdpp_hi = np.inf
else:
cdpp_lo = cdpp_range[0]
cdpp_hi = cdpp_range[1]
targets = []
# First look for nearby targets, then relax the constraint
# If still no targets, widen magnitude range
for n in range(3):
if n == 0:
nearby = True
elif n == 1:
nearby = False
elif n == 2:
mag_lo -= 1
mag_hi += 1
# Loop over all stars
for star, kp, channel, sc in zip(epics, kepmags, channels, short_cadence):
# Preliminary vetting
if not (((channel in c) if nearby else True) and (kp < mag_hi) \
and (kp > mag_lo) and (sc if cadence == 'sc' else True)):
continue
# Reject if self or if already in list
if (star == EPIC) or (star in targets):
continue
# Ensure raw light curve file exists
if not os.path.exists(
os.path.join(TargetDirectory(star, campaign), 'data.npz')):
continue
# Ensure crowding is OK. This is quite conservative, as we
# need to prevent potential astrophysical false positive
# contamination from crowded planet-hosting neighbors when
# doing neighboring PLD.
contam = False
data = np.load(os.path.join(
TargetDirectory(star, campaign), 'data.npz'))
aperture = data['apertures'][()][aperture_name]
# Check that the aperture exists!
if aperture is None:
continue
fpix = data['fpix']
for source in data['nearby'][()]:
# Ignore self
if source['ID'] == star:
continue
# Ignore really dim stars
if source['mag'] < kp - 5:
continue
# Compute source position
x = int(np.round(source['x'] - source['x0']))
y = int(np.round(source['y'] - source['y0']))
# If the source is within two pixels of the edge
# of the target aperture, reject the target
for j in [x - 2, x - 1, x, x + 1, x + 2]:
if j < 0:
# Outside the postage stamp
continue
for i in [y - 2, y - 1, y, y + 1, y + 2]:
if i < 0:
# Outside the postage stamp
continue
try:
if aperture[i][j]:
# Oh-oh!
contam = True
except IndexError:
# Out of bounds... carry on!
pass
if contam:
continue
# HACK: This happens for K2SFF M67 targets in C05.
# Let's skip them
if aperture.shape != fpix.shape[1:]:
continue
# Reject if the model is not present
if model is not None:
if not os.path.exists(os.path.join(
TargetDirectory(star, campaign), model + '.npz')):
continue
# Reject if CDPP out of range
if cdpp_range is not None:
cdpp = np.load(os.path.join(TargetDirectory(
star, campaign), model + '.npz'))['cdpp']
if (cdpp > cdpp_hi) or (cdpp < cdpp_lo):
continue
# Passed all the tests!
targets.append(star)
# Do we have enough? If so, return
if len(targets) == neighbors:
random.shuffle(targets)
return targets
# If we get to this point, we didn't find enough neighbors...
# Return what we have anyway.
return targets
def PlanetStatistics(model='nPLD', compare_to='k2sff', **kwargs):
'''
Computes and plots the CDPP statistics comparison between `model` and
`compare_to` for all known K2 planets.
:param str model: The :py:obj:`everest` model name
:param str compare_to: The :py:obj:`everest` model name or \
other K2 pipeline name
'''
# Load all planet hosts
f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables', 'planets.tsv')
epic, campaign, kp, _, _, _, _, _, _ = np.loadtxt(
f, unpack=True, skiprows=2)
epic = np.array(epic, dtype=int)
campaign = np.array(campaign, dtype=int)
cdpp = np.zeros(len(epic))
saturated = np.zeros(len(epic), dtype=int)
cdpp_1 = np.zeros(len(epic))
# Get the stats
for c in set(campaign):
# Everest model
f = os.path.join(EVEREST_SRC, 'missions', 'k2',
'tables', 'c%02d_%s.cdpp' % (int(c), model))
e0, _, _, c0, _, _, _, _, s0 = np.loadtxt(f, unpack=True, skiprows=2)
for i, e in enumerate(epic):
if e in e0:
j = np.argmax(e0 == e)
cdpp[i] = c0[j]
saturated[i] = s0[j]
# Comparison model
f = os.path.join(EVEREST_SRC, 'missions', 'k2', 'tables',
'c%02d_%s.cdpp' % (int(c), compare_to.lower()))
if not os.path.exists(f):
continue
if compare_to.lower() in ['everest1', 'k2sff', 'k2sc']:
e1, c1 = np.loadtxt(f, unpack=True, skiprows=2)
else:
e1, _, _, c1, _, _, _, _, _ = np.loadtxt(
f, unpack=True, skiprows=2)
for i, e in enumerate(epic):
if e in e1:
j = np.argmax(e1 == e)
cdpp_1[i] = c1[j]
sat = np.where(saturated == 1)
unsat = np.where(saturated == 0)
# Plot the equivalent of the Aigrain+16 figure
fig, ax = pl.subplots(1)
fig.canvas.set_window_title(
'K2 Planet Hosts: %s versus %s' % (model, compare_to))
x = kp
y = (cdpp - cdpp_1) / cdpp_1
ax.scatter(x[unsat], y[unsat], color='b', marker='.',
alpha=0.5, zorder=-1, picker=True)
ax.scatter(x[sat], y[sat], color='r', marker='.',
alpha=0.5, zorder=-1, picker=True)
ax.set_ylim(-1, 1)
ax.set_xlim(8, 18)
ax.axhline(0, color='gray', lw=2, zorder=-99, alpha=0.5)
ax.axhline(0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5)
ax.axhline(-0.5, color='gray', ls='--', lw=2, zorder=-99, alpha=0.5)
ax.set_title(r'K2 Planet Hosts', fontsize=18)
ax.set_ylabel(r'Relative CDPP', fontsize=18)
ax.set_xlabel('Kepler Magnitude', fontsize=18)
# Pickable points
Picker = StatsPicker([ax], [kp], [y], epic,
model=model, compare_to=compare_to)
fig.canvas.mpl_connect('pick_event', Picker)
# Show
pl.show()
def ShortCadenceStatistics(campaign=None, clobber=False, model='nPLD',
plot=True, **kwargs):
'''
Computes and plots the CDPP statistics comparison between short cadence
and long cadence de-trended light curves
:param campaign: The campaign number or list of campaign numbers. \
Default is to plot all campaigns
:param bool clobber: Overwrite existing files? Default :py:obj:`False`
:param str model: The :py:obj:`everest` model name
:param bool plot: Default :py:obj:`True`
'''
# Check campaign
if campaign is None:
campaign = np.arange(9)
else:
campaign = np.atleast_1d(campaign)
# Update model name
model = '%s.sc' % model
# Compute the statistics
for camp in campaign:
sub = np.array(GetK2Campaign(
camp, cadence='sc', epics_only=True), dtype=int)
outfile = os.path.join(EVEREST_SRC, 'missions', 'k2',
'tables', 'c%02d_%s.cdpp' % (int(camp), model))
if clobber or not os.path.exists(outfile):
with open(outfile, 'w') as f:
print("EPIC Kp Raw CDPP " +
"Everest CDPP Saturated", file=f)
print("--------- ------ --------- " +
"------------ ---------", file=f)
all = GetK2Campaign(int(camp), cadence='sc')
stars = | np.array([s[0] for s in all], dtype=int) | numpy.array |
import os
import math
import warnings
import numpy as np
import pandas as pd
import gmhazard_calc.constants as const
from gmhazard_calc.im import IM, IMType
from qcore import nhm
def calculate_rupture_rates(
nhm_df: pd.DataFrame,
rup_name: str = "rupture_name",
annual_rec_prob_name: str = "annual_rec_prob",
mag_name: str = "mag_name",
) -> pd.DataFrame:
"""Takes in a list of background ruptures and
calculates the rupture rates for the given magnitudes
The rupture rate calculation is based on the Gutenberg-Richter equation from OpenSHA.
It discretises the recurrance rate per magnitude instead of storing the probability of
rupture exceeding a certain magnitude
https://en.wikipedia.org/wiki/Gutenberg%E2%80%93Richter_law
https://github.com/opensha/opensha-core/blob/master/src/org/opensha/sha/magdist/GutenbergRichterMagFreqDist.java
Also includes the rupture magnitudes
"""
data = np.ndarray(
sum(nhm_df.n_mags),
dtype=[
(rup_name, str, 64),
(annual_rec_prob_name, np.float64),
(mag_name, np.float64),
],
)
# Make an array of fault bounds so the ith faults has
# the ruptures indexes[i]-indexes[i+1]-1 (inclusive)
indexes = np.cumsum(nhm_df.n_mags.values)
indexes = np.insert(indexes, 0, 0)
index_mask = np.zeros(len(data), dtype=bool)
warnings.filterwarnings(
"ignore", message="invalid value encountered in true_divide"
)
for i, line in nhm_df.iterrows():
index_mask[indexes[i] : indexes[i + 1]] = True
# Generate the magnitudes for each rupture
sample_mags = np.linspace(line.M_min, line.M_cutoff, line.n_mags)
for ii, iii in enumerate(range(indexes[i], indexes[i + 1])):
data[rup_name][iii] = create_ds_rupture_name(
line.source_lat,
line.source_lon,
line.source_depth,
sample_mags[ii],
line.tect_type,
)
# Calculate the cumulative rupture rate for each rupture
baseline = (
line.b
* math.log(10, 2.72)
/ (1 - 10 ** (-1 * line.b * (line.M_cutoff - line.M_min)))
)
f_m_mag = np.power(10, (-1 * line.b * (sample_mags - line.M_min))) * baseline
f_m_mag = np.append(f_m_mag, 0)
rup_prob = (f_m_mag[:-1] + f_m_mag[1:]) / 2 * 0.1
total_cumulative_rate = rup_prob * line.totCumRate
# normalise
total_cumulative_rate = (
line.totCumRate * total_cumulative_rate / np.sum(total_cumulative_rate)
)
data[mag_name][index_mask] = sample_mags
data[annual_rec_prob_name][index_mask] = total_cumulative_rate
index_mask[indexes[i] : indexes[i + 1]] = False
background_values = pd.DataFrame(data=data)
background_values.fillna(0, inplace=True)
return background_values
def convert_im_type(im_type: str):
"""Converts the IM type to the standard format,
will be redundant in the future"""
if im_type.startswith("SA"):
return "p" + im_type.replace("p", ".")
return im_type
def get_erf_name(erf_ffp: str) -> str:
"""Gets the erf name, required for rupture ids
Use this function for consistency, instead of doing it manual
"""
return os.path.basename(erf_ffp).split(".")[0]
def pandas_isin(array_1: np.ndarray, array_2: np.ndarray) -> np.ndarray:
"""This is the same as a np.isin,
however is significantly faster for large arrays
https://stackoverflow.com/questions/15939748/check-if-each-element-in-a-numpy-array-is-in-another-array
"""
return pd.Index(pd.unique(array_2)).get_indexer(array_1) >= 0
def get_min_max_values_for_im(im: IM):
"""Get minimum and maximum for the given im. Values for velocity are
given on cm/s, acceleration on cm/s^2 and Ds on s
"""
if im.is_pSA():
assert im.period is not None, "No period provided for pSA, this is an error"
if im.period <= 0.5:
return 0.005, 10.0
elif 0.5 < im.period <= 1.0:
return 0.005, 7.5
elif 1.0 < im.period <= 3.0:
return 0.0005, 5.0
elif 3.0 < im.period <= 5.0:
return 0.0005, 4.0
elif 5.0 < im.period <= 10.0:
return 0.0005, 3.0
if im.im_type is IMType.PGA:
return 0.0001, 10.0
elif im.im_type is IMType.PGV:
return 1.0, 400.0
elif im.im_type is IMType.CAV:
return 0.0001 * 980, 20.0 * 980.0
elif im.im_type is IMType.AI:
return 0.01, 1000.0
elif im.im_type is IMType.Ds575 or im.im_type is IMType.Ds595:
return 1.0, 400.0
else:
print("Unknown IM, cannot generate a range of IM values. Exiting the program")
exit(1)
def get_im_values(im: IM, n_values: int = 100):
"""
Create an range of values for a given IM according to their min, max
as defined by get_min_max_values
Parameters
----------
im: IM
The IM Object to get im values for
n_values: int
Returns
-------
Array of IM values
"""
start, end = get_min_max_values_for_im(im)
im_values = np.logspace(
start=np.log(start), stop=np.log(end), num=n_values, base=np.e
)
return im_values
def closest_location(locations, lat, lon):
"""
Find position of closest location in locations 2D np.array of (lat, lon).
"""
d = (
np.sin(np.radians(locations[:, 0] - lat) / 2.0) ** 2
+ np.cos( | np.radians(lat) | numpy.radians |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 27 14:04:56 2020
@author: geraldod
"""
from numpy import pi, sin, cos, argsort, sqrt, iscomplex, real
from numpy import array, diag, argsort, zeros, zeros_like, eye, ones, allclose, argmax, hstack, vstack, block
from scipy.linalg import eig, eigh, cholesky, inv, block_diag
from Gear import GearSet
# import Drivetrain
class model:
def __init__(self, dtrain):
self.drivetrain = dtrain # Drivetrain()
# self.x = 0
self.M = 0
self.K = 0
# self.F = 0
# self.n_DOF = 0
# self.f_n = 0
# self.mode_shape = 0
def modal_analysis(self):
eig_val, mode_shape = eig(self.K, self.M, right = True)
if(not any(iscomplex(eig_val))):
eig_val = real(eig_val)
else:
print('At least one complex eigenvalue detected during the calculation of the symmetric undamped eigenvalue problem.')
# lambda to omega_n:
omega_n = sqrt(eig_val)
# omega_n to Hz:
f_n = omega_n/(2.0*pi)
idx = argsort(f_n)
f_n = f_n[idx]
mode_shape = mode_shape[:, idx]
for i in range(len(f_n)):
j = argmax(abs(mode_shape[:, i]))
mode_shape[:, i] = mode_shape[:, i]/mode_shape[j, i]
return {
'f_n': f_n,
'mode_shape': mode_shape
}
###############################################################################
class torsional_2DOF(model):
def __init__(self, dtrain):
super().__init__(dtrain)
self.n_DOF = 2
self.M = self.__inertia_matrix()
self.K = self.__stiffness_matrix()
modA = self.modal_analysis()
self.f_n = modA['f_n']
self.mode_shape = modA['mode_shape']
def __inertia_matrix(self):
DT = self.drivetrain
J_R = DT.J_Rotor # [kg-m^2], Rotor inertia
J_G = DT.J_Gen # [kg-m^2], Generator inertia
U = DT.u[-1]
M = diag([J_R, J_G*U**2])
return M
def __stiffness_matrix(self):
DT = self.drivetrain
U = DT.u[-1]
k_LSS = DT.main_shaft.stiffness('torsional')
k_HSS = DT.stage[-1].output_shaft.stiffness('torsional')
k = (k_LSS*k_HSS*U**2)/(k_LSS + k_HSS*U**2)
K = k*array([[ 1.0, -1.0],
[-1.0, 1.0]])
return K
###############################################################################
class Kahraman_94(model):
def __init__(self, dtrain):
super().__init__(dtrain)
# number of DOFs for each stage:
self.n_DOF = self.__calc_NDOF()
self.M = self.__inertia_matrix()
self.K = self.__stiffness_matrix()
modA = self.modal_analysis()
self.f_n = modA['f_n']
self.mode_shape = modA['mode_shape']
def __calc_NDOF(self):
stage = self.drivetrain.stage
Np = [0, 2]
for i in range(len(stage)):
Np.append(Np[-1] + sum([stage[i].N_p + 1 if(stage[i].configuration == 'parallel')
else stage[i].N_p + 2]))
return Np
def __inertia_matrix(self):
DT = self.drivetrain
N = self.n_DOF
M = zeros((N[-1], N[-1]))
M[0 , 0 ] = DT.J_Rotor # [kg-m^2], Rotor inertia
M[-1, -1] = DT.J_Gen # [kg-m^2], Generator inertia
i = 0
sub_range = slice(N[i], N[i + 1])
M[sub_range,
sub_range] += DT.main_shaft.inertia_matrix('torsional')
for i in range(DT.N_st):
sub_range = slice(N[i + 1] - 1, N[i + 2])
M[sub_range,
sub_range] += Kahraman_94.__stage_inertia_matrix(DT.stage[i])
return M
@staticmethod
def __stage_inertia_matrix(stage):
if(stage.configuration == 'parallel'):
J_p = stage.J_x[0]
J_w = stage.J_x[1]
M = diag([J_w, J_p, 0.0])
elif(stage.configuration == 'planetary'):
J_c = stage.carrier.J_x
J_s = stage.J_x[0]
J_p = stage.J_x[1]
d = [J_c]
[d.append(J_p) for i in range(stage.N_p)]
d.append(J_s)
d.append(0.0)
M = diag(d)
M[-2:, -2:] += stage.output_shaft.inertia_matrix('torsional')
return M
def __stiffness_matrix(self):
DT = self.drivetrain
N = self.n_DOF
K = zeros((N[-1], N[-1]))
i = 0
sub_range = slice(N[i], N[i + 1])
K[sub_range,
sub_range] += DT.main_shaft.stiffness_matrix('torsional')
for i in range(DT.N_st):
sub_range = slice(N[i + 1] - 1, N[i + 2])
K[sub_range,
sub_range] += Kahraman_94.__stage_stiffness_matrix(DT.stage[i])
return K
@staticmethod
def __stage_stiffness_matrix(stage):
if(stage.configuration == 'parallel'):
N = 3
K = zeros((N, N))
r_p = stage.d[0]*1.0e-3/2.0
r_w = stage.d[1]*1.0e-3/2.0
k = stage.k_mesh
K[0:2, 0:2] = k*array([[ r_w**2, r_p*r_w],
[r_p*r_w , r_p**2]])
elif(stage.configuration == 'planetary'):
N = stage.N_p + 3
K = zeros((N, N))
k_1 = stage.sub_set('planet-ring').k_mesh
k_2 = stage.sub_set('sun-planet').k_mesh
r_c = stage.a_w*1.0e-3
r_s = stage.d[0]*1.0e-3/2.0
r_p = stage.d[1]*1.0e-3/2.0
d = [stage.N_p*r_c*(k_1 + k_2)]
[d.append((k_1 + k_2)*r_p**2) for i in range(stage.N_p)]
d.append(stage.N_p*k_2*r_s**2)
d.append(0.0)
pla_lin = ones(stage.N_p + 1)*r_c*r_p*(k_1 - k_2)
pla_lin[-1] = -3.0*k_2*r_s*r_c
pla_col = ones(stage.N_p )*k_2*r_p*r_s
i = stage.N_p + 1
i1 = i + 1
K[0, 1:i1] = pla_lin
K[1:i, -2] = pla_col
K += K.T
K += diag(d)
K[-2:, -2:] += stage.output_shaft.stiffness_matrix('torsional')
return K
###############################################################################
class Lin_Parker_99(model):
def __init__(self, dtrain):
super().__init__(dtrain)
self.n_DOF = self.__calc_
class Lin_Parker_99_mod(model):
def __init__(self, dtrain):
super().__init__(dtrain)
# number of DOFs for each stage:
self.n_DOF = self.__calc_NDOF()
self.M = self.__inertia_matrix()
stiff = self.__stiffness_matrix()
self.K_b = stiff['K_b']
self.K_m = stiff['K_m']
self.K_Omega = stiff['K_Omega']
self.K = self.K_b + self.K_m
modA = self.modal_analysis()
self.f_n = modA['f_n']
self.mode_shape = modA['mode_shape']
def __calc_NDOF(self):
stage = self.drivetrain.stage
Np = [0, 6]
for i in range(len(stage)):
Np.append(Np[-1] + sum([(stage[i].N_p + 1)*3 if(stage[i].configuration == 'parallel')
else (stage[i].N_p + 2)*3]))
return Np
def __inertia_matrix(self):
DT = self.drivetrain
m_R = DT.m_Rotor
J_R = DT.J_Rotor
m_G = DT.m_Gen
J_G = DT.J_Gen
N = self.n_DOF
M = zeros((N[-1], N[-1]))
M[:3, :3 ] = diag([m_R, m_R, J_R]) # Rotor inertia matrix
M[-3:, -3:] = diag([m_G, m_G, J_G]) # Generator inertia matrix
i = 0
sub_range = slice(N[i], N[i + 1])
M[sub_range,
sub_range] += DT.main_shaft.inertia_matrix('Lin_Parker_99')*0
for i in range(DT.N_st):
sub_range = slice(N[i + 1] - 3, N[i + 2])
M[sub_range,
sub_range] += Lin_Parker_99.__stage_inertia_matrix(DT.stage[i])
return M
@staticmethod
def __stage_inertia_matrix(stage):
M_ = lambda m, J: diag([m, m, J])
if(stage.configuration == 'parallel'):
d = [M_(stage.mass[1], stage.J_x[1]), # wheel
M_(stage.mass[0], stage.J_x[0]), # pinion
M_( 0 , 0 )] # output shaft
elif(stage.configuration == 'planetary'):
m_p = stage.mass[1]
J_p = stage.J_x[1]
d = [M_(stage.carrier.mass, stage.carrier.J_x)] # carrier
[d.append(M_(m_p, J_p)) for i in range(stage.N_p)] # planet
d.append( M_(stage.mass[0], stage.J_x[0])) # sun
d.append( M_( 0, 0)) # output shaft
M = block_diag(*d)
M[-6:, -6:] += stage.output_shaft.inertia_matrix('Lin_Parker_99')*0
return M
def __stiffness_matrix(self):
DT = self.drivetrain
N = self.n_DOF
K_b = zeros((N[-1], N[-1]))
K_m = zeros_like(K_b)
K_Omega = zeros_like(K_b)
i = 0
sub_range = slice(N[i], N[i + 1])
K_b[sub_range,
sub_range] += DT.main_shaft.stiffness_matrix('Lin_Parker_99')*0
for i in range(DT.N_st):
stiff = Lin_Parker_99.__stage_stiffness_matrix(DT.stage[i])
sub_range = slice(N[i + 1] - 3, N[i + 2])
K_b[ sub_range, sub_range] += stiff['K_b']
K_m[ sub_range, sub_range] += stiff['K_m']
K_Omega[sub_range, sub_range] += stiff['K_Omega']
return {'K_b' : K_b,
'K_m' : K_m,
'K_Omega': K_Omega}
@staticmethod
def __stage_stiffness_matrix(stage):
# Bearing stiffness sub-matrix:
K_b_ = lambda x, y: diag([x, y, 0])
alpha_n = stage.alpha_n
psi = lambda i: (i - 1)*(2*pi/stage.N_p)
psi_s = lambda i: psi(i) - alpha_n
# psi_r = lambda i: psi(i) + alpha_n
# sun-sun mesh-stiffness matrix:
K_s1 = lambda k, i: k*array([[ sin(psi_s(i))**2, -cos(psi_s(i))*sin(psi_s(i)), -sin(psi_s(i))],
[-cos(psi_s(i))*sin(psi_s(i)) , cos(psi_s(i))**2 , cos(psi_s(i))],
[- sin(psi_s(i)) , cos(psi_s(i)) , 1 ]])
# sun-planet mesh-stiffness matrix:
K_s2 = lambda k, i: k*array([[ sin(psi_s(i))*sin(alpha_n), sin(psi_s(i))*cos(alpha_n), -sin(psi_s(i))],
[-cos(psi_s(i))*sin(alpha_n), -cos(psi_s(i))*cos(alpha_n), cos(psi_s(i))],
[- sin(alpha_n), - cos(alpha_n), 1 ]])
# planet-planet [?] mesh-stiffness matrix:
K_s3 = lambda k : k*array([[ sin(alpha_n)**2 , sin(alpha_n)*cos(alpha_n), -sin(alpha_n)],
[ sin(alpha_n)*cos(alpha_n), cos(alpha_n)**2 , -cos(alpha_n)],
[-sin(alpha_n) , -cos(alpha_n) , 1 ]])
# [?]
K_r3 = lambda k : k*array([[ sin(alpha_n)**2 , - | sin(alpha_n) | numpy.sin |
"""
Configuration and fixtures for pytest.
https://docs.pytest.org/en/latest/fixture.html#conftest-py-sharing-fixture-functions
"""
from typing import Tuple
from unittest.mock import MagicMock
# pylint: disable=redefined-outer-name
import numpy as np
import pytest
from pystork.activations import Relu, Tanh, Sigmoid
from pystork.costs.binary_classfication import BinaryClassificationCost
from pystork.initializers import RandomInitializer
from pystork.layer import Layer
from pystork.model import Model
@pytest.fixture
def layer() -> Layer:
"""
:return: a simple relu layer with configured parameters
"""
layer = Layer(units_number=2, inputs_number=2, activation_function=Relu())
layer.set_parameters(np.array([[1, 0], [0, 1]]), np.array([[0], [0]]))
return layer
@pytest.fixture
def forward_propagation_model() -> Model:
"""
a simple model with one hidden layer used for forward propagation
"""
hidden_layer = Layer(units_number=4, inputs_number=2, activation_function=Tanh())
output_layer = Layer(units_number=1, inputs_number=4, activation_function=Sigmoid())
model = Model(
layers=[hidden_layer, output_layer],
cost_function=BinaryClassificationCost(),
initializer=RandomInitializer(),
)
hidden_layer.W = np.array(
[
[-0.00416758, -0.00056267],
[-0.02136196, 0.01640271],
[-0.01793436, -0.00841747],
[0.00502881, -0.01245288],
]
)
hidden_layer.b = np.array([[1.74481176], [-0.7612069], [0.3190391], [-0.24937038]])
output_layer.W = np.array([-0.01057952, -0.00909008, 0.00551454, 0.02292208])
output_layer.b = | np.array([[-1.3]]) | numpy.array |
import numpy as np
import scipy.signal as sig
import matplotlib.pyplot as plt
import sys
import pprint as pp
import numpy.random as random
sys.path.append("../")
import custom_tools.fftplot as fftplot
import control as con
import control.matlab as ctrl
import custom_tools.handyfuncs as hf
K = 1
GOLz = con.tf(0.83155 * K, [1, -1, 0], 1)
plt.figure()
real, imag, freq = con.nyquist_plot(GOLz, omega=np.linspace(0, np.pi, 1000))
plt.title('Nyquist plot of GOL with K={}'.format(K))
plt.axis([-1.4, .5, -10, 10])
# Modified Nyquist Plot:
# A Nyquist Plot of -1/Gol will show the range of K for stability
plt.figure()
real, imag, freq = con.nyquist_plot(-1 / GOLz, omega= | np.linspace(0, np.pi, 1000) | numpy.linspace |
"""DEP WEPP cli editor. One "tile" at a time.
Usage:
python daily_climate_editor.py <xtile> <ytile> <tilesz>
<scenario> <YYYY> <mm> <dd>
Where tiles start in the lower left corner and are 5x5 deg in size
development laptop has data for 3 March 2019, 23 May 2009, and 8 Jun 2009
"""
try:
from zoneinfo import ZoneInfo # type: ignore
except ImportError:
from backports.zoneinfo import ZoneInfo # type: ignore
from collections import namedtuple
import datetime
import sys
import os
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from tqdm import tqdm
import numpy as np
from scipy.interpolate import NearestNDInterpolator
from osgeo import gdal
from pyiem import iemre
from pyiem.dep import SOUTH, WEST, NORTH, EAST, get_cli_fname
from pyiem.util import ncopen, logger, convert_value, utc
LOG = logger()
CENTRAL = ZoneInfo("America/Chicago")
UTC = datetime.timezone.utc
ST4PATH = "/mesonet/data/stage4"
# used for breakpoint logic
ZEROHOUR = datetime.datetime(2000, 1, 1, 0, 0)
# How many CPUs are we going to burn
CPUCOUNT = min([4, int(cpu_count() / 4)])
MEMORY = {"stamp": datetime.datetime.now()}
BOUNDS = namedtuple("Bounds", ["south", "north", "east", "west"])
def get_sts_ets_at_localhour(date, local_hour):
"""Return a Day Interval in UTC for the given date at CST/CDT hour."""
# ZoneInfo is supposed to get this right at instanciation
sts = datetime.datetime(
date.year,
date.month,
date.day,
local_hour,
tzinfo=CENTRAL,
)
date2 = datetime.date(
date.year, date.month, date.day
) + datetime.timedelta(days=1)
ets = datetime.datetime(
date2.year,
date2.month,
date2.day,
local_hour,
tzinfo=CENTRAL,
)
return (
sts.replace(hour=local_hour).astimezone(UTC),
ets.replace(hour=local_hour).astimezone(UTC),
)
def iemre_bounds_check(name, val, lower, upper):
"""Make sure our data is within bounds, if not, exit!"""
if np.isnan(val).all():
LOG.warning("FATAL: iemre %s all NaN", name)
sys.exit(3)
minval = np.nanmin(val)
maxval = np.nanmax(val)
if minval < lower or maxval > upper:
LOG.warning(
"FATAL: iemre failure %s %.3f to %.3f [%.3f to %.3f]",
name,
minval,
maxval,
lower,
upper,
)
sys.exit(3)
return val
def load_iemre(nc, data, valid):
"""Use IEM Reanalysis for non-precip data
24km product is smoothed down to the 0.01 degree grid
"""
offset = iemre.daily_offset(valid)
lats = nc.variables["lat"][:]
lons = nc.variables["lon"][:]
lons, lats = np.meshgrid(lons, lats)
# Storage is W m-2, we want langleys per day
ncdata = (
nc.variables["rsds"][offset, :, :].filled(np.nan)
* 86400.0
/ 1000000.0
* 23.9
)
# Default to a value of 300 when this data is missing, for some reason
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["solar"][:] = iemre_bounds_check(
"rsds", nn(data["lon"], data["lat"]), 0, 1000
)
ncdata = convert_value(
nc.variables["high_tmpk"][offset, :, :].filled(np.nan), "degK", "degC"
)
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["high"][:] = iemre_bounds_check(
"high_tmpk", nn(data["lon"], data["lat"]), -60, 60
)
ncdata = convert_value(
nc.variables["low_tmpk"][offset, :, :].filled(np.nan), "degK", "degC"
)
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["low"][:] = iemre_bounds_check(
"low_tmpk", nn(data["lon"], data["lat"]), -60, 60
)
ncdata = convert_value(
nc.variables["avg_dwpk"][offset, :, :].filled(np.nan), "degK", "degC"
)
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["dwpt"][:] = iemre_bounds_check(
"avg_dwpk", nn(data["lon"], data["lat"]), -60, 60
)
# Wind is already in m/s, but could be masked
ncdata = nc.variables["wind_speed"][offset, :, :].filled(np.nan)
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["wind"][:] = iemre_bounds_check(
"wind_speed", nn(data["lon"], data["lat"]), 0, 30
)
def load_stage4(data, valid, xtile, ytile):
"""It sucks, but we need to load the stage IV data to give us something
to benchmark the MRMS data against, to account for two things:
1) Wind Farms
2) Over-estimates
"""
LOG.debug("called")
# The stage4 files store precip in the rears, so compute 1 AM
one_am, tomorrow = get_sts_ets_at_localhour(valid, 1)
sts_tidx = iemre.hourly_offset(one_am)
ets_tidx = iemre.hourly_offset(tomorrow)
LOG.debug(
"stage4 sts_tidx:%s[%s] ets_tidx:%s[%s]",
sts_tidx,
one_am,
ets_tidx,
tomorrow,
)
with ncopen(f"{ST4PATH}/{valid.year}_stage4_hourly.nc", "r") as nc:
p01m = nc.variables["p01m"]
lats = nc.variables["lat"][:]
lons = nc.variables["lon"][:]
# crossing jan 1
if ets_tidx < sts_tidx:
LOG.debug("Exercise special stageIV logic for jan1!")
totals = np.sum(p01m[sts_tidx:, :, :], axis=0)
with ncopen(f"{ST4PATH}/{tomorrow.year}_stage4_hourly.nc") as nc2:
p01m = nc2.variables["p01m"]
totals += np.sum(p01m[:ets_tidx, :, :], axis=0)
else:
totals = np.sum(p01m[sts_tidx:ets_tidx, :, :], axis=0)
if np.ma.max(totals) > 0:
pass
else:
LOG.warning("No StageIV data found, aborting...")
sys.exit(3)
# set a small non-zero number to keep things non-zero
totals = np.where(totals > 0.001, totals, 0.001)
nn = NearestNDInterpolator(
(lons.flatten(), lats.flatten()), totals.flatten()
)
data["stage4"][:] = nn(data["lon"], data["lat"])
write_grid(data["stage4"], valid, xtile, ytile, "stage4")
LOG.debug("finished")
def qc_precip(data, valid, xtile, ytile):
"""Make some adjustments to the `precip` grid
Not very sophisticated here, if the hires precip grid is within 33% of
Stage IV, then we consider it good. If not, then we apply a multiplier to
bring it to near the stage IV value.
"""
hires_total = | np.sum(data["precip"], 2) | numpy.sum |
# ________
# /
# \ /
# \ /
# \/
import random
import textwrap
import emd_mean
import AdvEMDpy
import emd_basis
import emd_utils
import numpy as np
import pandas as pd
import cvxpy as cvx
import seaborn as sns
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from scipy.ndimage import gaussian_filter
from emd_utils import time_extension, Utility
from scipy.interpolate import CubicSpline
from emd_hilbert import Hilbert, hilbert_spectrum
from emd_preprocess import Preprocess
from emd_mean import Fluctuation
from AdvEMDpy import EMD
# alternate packages
from PyEMD import EMD as pyemd0215
import emd as emd040
sns.set(style='darkgrid')
pseudo_alg_time = np.linspace(0, 2 * np.pi, 1001)
pseudo_alg_time_series = np.sin(pseudo_alg_time) + np.sin(5 * pseudo_alg_time)
pseudo_utils = Utility(time=pseudo_alg_time, time_series=pseudo_alg_time_series)
# plot 0 - addition
fig = plt.figure(figsize=(9, 4))
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('First Iteration of Sifting Algorithm')
plt.plot(pseudo_alg_time, pseudo_alg_time_series, label=r'$h_{(1,0)}(t)$', zorder=1)
plt.scatter(pseudo_alg_time[pseudo_utils.max_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.max_bool_func_1st_order_fd()],
c='r', label=r'$M(t_i)$', zorder=2)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) + 1, '--', c='r', label=r'$\tilde{h}_{(1,0)}^M(t)$', zorder=4)
plt.scatter(pseudo_alg_time[pseudo_utils.min_bool_func_1st_order_fd()],
pseudo_alg_time_series[pseudo_utils.min_bool_func_1st_order_fd()],
c='c', label=r'$m(t_j)$', zorder=3)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time) - 1, '--', c='c', label=r'$\tilde{h}_{(1,0)}^m(t)$', zorder=5)
plt.plot(pseudo_alg_time, np.sin(pseudo_alg_time), '--', c='purple', label=r'$\tilde{h}_{(1,0)}^{\mu}(t)$', zorder=5)
plt.yticks(ticks=[-2, -1, 0, 1, 2])
plt.xticks(ticks=[0, np.pi, 2 * np.pi],
labels=[r'0', r'$\pi$', r'$2\pi$'])
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.95, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/pseudo_algorithm.png')
plt.show()
knots = np.arange(12)
time = np.linspace(0, 11, 1101)
basis = emd_basis.Basis(time=time, time_series=time)
b_spline_basis = basis.cubic_b_spline(knots)
chsi_basis = basis.chsi_basis(knots)
# plot 1
plt.title('Non-Natural Cubic B-Spline Bases at Boundary')
plt.plot(time[500:], b_spline_basis[2, 500:].T, '--', label=r'$ B_{-3,4}(t) $')
plt.plot(time[500:], b_spline_basis[3, 500:].T, '--', label=r'$ B_{-2,4}(t) $')
plt.plot(time[500:], b_spline_basis[4, 500:].T, '--', label=r'$ B_{-1,4}(t) $')
plt.plot(time[500:], b_spline_basis[5, 500:].T, '--', label=r'$ B_{0,4}(t) $')
plt.plot(time[500:], b_spline_basis[6, 500:].T, '--', label=r'$ B_{1,4}(t) $')
plt.xticks([5, 6], [r'$ \tau_0 $', r'$ \tau_1 $'])
plt.xlim(4.4, 6.6)
plt.plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
plt.legend(loc='upper left')
plt.savefig('jss_figures/boundary_bases.png')
plt.show()
# plot 1a - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
knots_uniform = np.linspace(0, 2 * np.pi, 51)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs = emd.empirical_mode_decomposition(knots=knots_uniform, edge_effect='anti-symmetric', verbose=False)[0]
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Uniform Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Uniform Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Uniform Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots_uniform[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots_uniform)):
axs[i].plot(knots_uniform[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_uniform.png')
plt.show()
# plot 1b - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=1, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Statically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Statically Optimised Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Statically Optimised Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots)):
axs[i].plot(knots[j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_1.png')
plt.show()
# plot 1c - addition
knot_demonstrate_time = np.linspace(0, 2 * np.pi, 1001)
knot_demonstrate_time_series = np.sin(knot_demonstrate_time) + np.sin(5 * knot_demonstrate_time)
emd = EMD(time=knot_demonstrate_time, time_series=knot_demonstrate_time_series)
imfs, _, _, _, knots, _, _ = emd.empirical_mode_decomposition(edge_effect='anti-symmetric',
optimise_knots=2, verbose=False)
fig, axs = plt.subplots(3, 1)
fig.subplots_adjust(hspace=0.6)
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Time Series and Dynamically Optimised Knots')
axs[0].plot(knot_demonstrate_time, knot_demonstrate_time_series, Linewidth=2, zorder=100)
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].set_title('IMF 1 and Dynamically Knots')
axs[1].plot(knot_demonstrate_time, imfs[1, :], Linewidth=2, zorder=100)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[1].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[2].set_title('IMF 2 and Dynamically Knots')
axs[2].plot(knot_demonstrate_time, imfs[2, :], Linewidth=2, zorder=100)
axs[2].set_yticks(ticks=[-2, 0, 2])
axs[2].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[2].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[0].plot(knots[0][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[0].legend(loc='lower left')
axs[1].plot(knots[1][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
axs[2].plot(knots[2][0] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey', label='Knots')
for i in range(3):
for j in range(1, len(knots[i])):
axs[i].plot(knots[i][j] * np.ones(101), np.linspace(-2, 2, 101), '--', c='grey')
plt.savefig('jss_figures/knot_2.png')
plt.show()
# plot 1d - addition
window = 81
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Filtering Demonstration')
axs[1].set_title('Zoomed Region')
preprocess_time = pseudo_alg_time.copy()
np.random.seed(1)
random.seed(1)
preprocess_time_series = pseudo_alg_time_series + np.random.normal(0, 0.1, len(preprocess_time))
for i in random.sample(range(1000), 500):
preprocess_time_series[i] += np.random.normal(0, 1)
preprocess = Preprocess(time=preprocess_time, time_series=preprocess_time_series)
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[0].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[0].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[0].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[0].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple', label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.mean_filter(window_width=window)[1], label=textwrap.fill('Mean filter', 12))
axs[1].plot(preprocess_time, preprocess.median_filter(window_width=window)[1], label=textwrap.fill('Median filter', 13))
axs[1].plot(preprocess_time, preprocess.winsorize(window_width=window, a=0.8)[1], label=textwrap.fill('Windsorize filter', 12))
axs[1].plot(preprocess_time, preprocess.winsorize_interpolate(window_width=window, a=0.8)[1],
label=textwrap.fill('Windsorize interpolation filter', 14))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.90)[1], c='grey',
label=textwrap.fill('Quantile window', 12))
axs[1].plot(preprocess_time, preprocess.quantile_filter(window_width=window, q=0.10)[1], c='grey')
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.05, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_filter.png')
plt.show()
# plot 1e - addition
fig, axs = plt.subplots(2, 1)
fig.subplots_adjust(hspace=0.4)
figure_size = plt.gcf().get_size_inches()
factor = 0.8
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
axs[0].set_title('Preprocess Smoothing Demonstration')
axs[1].set_title('Zoomed Region')
axs[0].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[0].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[0].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[0].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
downsampled_and_decimated = preprocess.downsample()
axs[0].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 11))
downsampled = preprocess.downsample(decimate=False)
axs[0].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), -3 * np.ones(101), '--', c='black',
label=textwrap.fill('Zoomed region', 10))
axs[0].plot(np.linspace(0.85 * np.pi, 1.15 * np.pi, 101), 3 * np.ones(101), '--', c='black')
axs[0].plot(0.85 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].plot(1.15 * np.pi * np.ones(101), np.linspace(-3, 3, 101), '--', c='black')
axs[0].set_yticks(ticks=[-2, 0, 2])
axs[0].set_xticks(ticks=[0, np.pi, 2 * np.pi])
axs[0].set_xticklabels(labels=['0', r'$\pi$', r'$2\pi$'])
axs[1].plot(preprocess_time, preprocess_time_series, label='x(t)')
axs[1].plot(pseudo_alg_time, pseudo_alg_time_series, '--', c='purple',
label=textwrap.fill('Noiseless time series', 12))
axs[1].plot(preprocess_time, preprocess.hp()[1],
label=textwrap.fill('Hodrick-Prescott smoothing', 12))
axs[1].plot(preprocess_time, preprocess.hw(order=51)[1],
label=textwrap.fill('Henderson-Whittaker smoothing', 13))
axs[1].plot(downsampled_and_decimated[0], downsampled_and_decimated[1],
label=textwrap.fill('Downsampled & decimated', 13))
axs[1].plot(downsampled[0], downsampled[1],
label=textwrap.fill('Downsampled', 13))
axs[1].set_xlim(0.85 * np.pi, 1.15 * np.pi)
axs[1].set_ylim(-3, 3)
axs[1].set_yticks(ticks=[-2, 0, 2])
axs[1].set_xticks(ticks=[np.pi])
axs[1].set_xticklabels(labels=[r'$\pi$'])
box_0 = axs[0].get_position()
axs[0].set_position([box_0.x0 - 0.06, box_0.y0, box_0.width * 0.85, box_0.height])
axs[0].legend(loc='center left', bbox_to_anchor=(1, -0.15))
box_1 = axs[1].get_position()
axs[1].set_position([box_1.x0 - 0.06, box_1.y0, box_1.width * 0.85, box_1.height])
plt.savefig('jss_figures/preprocess_smooth.png')
plt.show()
# plot 2
fig, axs = plt.subplots(1, 2, sharey=True)
axs[0].set_title('Cubic B-Spline Bases')
axs[0].plot(time, b_spline_basis[2, :].T, '--', label='Basis 1')
axs[0].plot(time, b_spline_basis[3, :].T, '--', label='Basis 2')
axs[0].plot(time, b_spline_basis[4, :].T, '--', label='Basis 3')
axs[0].plot(time, b_spline_basis[5, :].T, '--', label='Basis 4')
axs[0].legend(loc='upper left')
axs[0].plot(5 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].plot(6 * np.ones(100), np.linspace(-0.2, 0.8, 100), 'k-')
axs[0].set_xticks([5, 6])
axs[0].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[0].set_xlim(4.5, 6.5)
axs[1].set_title('Cubic Hermite Spline Bases')
axs[1].plot(time, chsi_basis[10, :].T, '--')
axs[1].plot(time, chsi_basis[11, :].T, '--')
axs[1].plot(time, chsi_basis[12, :].T, '--')
axs[1].plot(time, chsi_basis[13, :].T, '--')
axs[1].plot(5 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].plot(6 * np.ones(100), np.linspace(-0.2, 1.2, 100), 'k-')
axs[1].set_xticks([5, 6])
axs[1].set_xticklabels([r'$ \tau_k $', r'$ \tau_{k+1} $'])
axs[1].set_xlim(4.5, 6.5)
plt.savefig('jss_figures/comparing_bases.png')
plt.show()
# plot 3
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101)
max_dash = maxima_y[-1] * np.ones_like(max_dash_time)
min_dash_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101)
min_dash = minima_y[-1] * np.ones_like(min_dash_time)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
max_discard = maxima_y[-1]
max_discard_time = minima_x[-1] - maxima_x[-1] + minima_x[-1]
max_discard_dash_time = np.linspace(max_discard_time - width, max_discard_time + width, 101)
max_discard_dash = max_discard * np.ones_like(max_discard_dash_time)
dash_2_time = np.linspace(minima_x[-1], max_discard_time, 101)
dash_2 = np.linspace(minima_y[-1], max_discard, 101)
end_point_time = time[-1]
end_point = time_series[-1]
time_reflect = np.linspace((5 - a) * np.pi, (5 + a) * np.pi, 101)
time_series_reflect = np.flip(np.cos(np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)) + np.cos(5 * np.linspace((5 - 2.6 * a) * np.pi,
(5 - a) * np.pi, 101)))
time_series_anti_reflect = time_series_reflect[0] - time_series_reflect
utils = emd_utils.Utility(time=time, time_series=time_series_anti_reflect)
anti_max_bool = utils.max_bool_func_1st_order_fd()
anti_max_point_time = time_reflect[anti_max_bool]
anti_max_point = time_series_anti_reflect[anti_max_bool]
utils = emd_utils.Utility(time=time, time_series=time_series_reflect)
no_anchor_max_time = time_reflect[utils.max_bool_func_1st_order_fd()]
no_anchor_max = time_series_reflect[utils.max_bool_func_1st_order_fd()]
point_1 = 5.4
length_distance = np.linspace(maxima_y[-1], minima_y[-1], 101)
length_distance_time = point_1 * np.pi * np.ones_like(length_distance)
length_time = np.linspace(point_1 * np.pi - width, point_1 * np.pi + width, 101)
length_top = maxima_y[-1] * np.ones_like(length_time)
length_bottom = minima_y[-1] * np.ones_like(length_time)
point_2 = 5.2
length_distance_2 = np.linspace(time_series[-1], minima_y[-1], 101)
length_distance_time_2 = point_2 * np.pi * np.ones_like(length_distance_2)
length_time_2 = np.linspace(point_2 * np.pi - width, point_2 * np.pi + width, 101)
length_top_2 = time_series[-1] * np.ones_like(length_time_2)
length_bottom_2 = minima_y[-1] * np.ones_like(length_time_2)
symmetry_axis_1_time = minima_x[-1] * np.ones(101)
symmetry_axis_2_time = time[-1] * np.ones(101)
symmetry_axis = np.linspace(-2, 2, 101)
end_time = np.linspace(time[-1] - width, time[-1] + width, 101)
end_signal = time_series[-1] * np.ones_like(end_time)
anti_symmetric_time = np.linspace(time[-1] - 0.5, time[-1] + 0.5, 101)
anti_symmetric_signal = time_series[-1] * np.ones_like(anti_symmetric_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Symmetry Edge Effects Example')
plt.plot(time_reflect, time_series_reflect, 'g--', LineWidth=2, label=textwrap.fill('Symmetric signal', 10))
plt.plot(time_reflect[:51], time_series_anti_reflect[:51], '--', c='purple', LineWidth=2,
label=textwrap.fill('Anti-symmetric signal', 10))
plt.plot(max_dash_time, max_dash, 'k-')
plt.plot(min_dash_time, min_dash, 'k-')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(length_distance_time, length_distance, 'k--')
plt.plot(length_distance_time_2, length_distance_2, 'k--')
plt.plot(length_time, length_top, 'k-')
plt.plot(length_time, length_bottom, 'k-')
plt.plot(length_time_2, length_top_2, 'k-')
plt.plot(length_time_2, length_bottom_2, 'k-')
plt.plot(end_time, end_signal, 'k-')
plt.plot(symmetry_axis_1_time, symmetry_axis, 'r--', zorder=1)
plt.plot(anti_symmetric_time, anti_symmetric_signal, 'r--', zorder=1)
plt.plot(symmetry_axis_2_time, symmetry_axis, 'r--', label=textwrap.fill('Axes of symmetry', 10), zorder=1)
plt.text(5.1 * np.pi, -0.7, r'$\beta$L')
plt.text(5.34 * np.pi, -0.05, 'L')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(max_discard_time, max_discard, c='purple', zorder=4, label=textwrap.fill('Symmetric Discard maxima', 10))
plt.scatter(end_point_time, end_point, c='orange', zorder=4, label=textwrap.fill('Symmetric Anchor maxima', 10))
plt.scatter(anti_max_point_time, anti_max_point, c='green', zorder=4, label=textwrap.fill('Anti-Symmetric maxima', 10))
plt.scatter(no_anchor_max_time, no_anchor_max, c='gray', zorder=4, label=textwrap.fill('Symmetric maxima', 10))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_symmetry_anti.png')
plt.show()
# plot 4
a = 0.21
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
max_dash_1 = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
max_dash_2 = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_dash_time_1 = maxima_x[-1] * np.ones_like(max_dash_1)
max_dash_time_2 = maxima_x[-2] * np.ones_like(max_dash_1)
min_dash_1 = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101)
min_dash_2 = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101)
min_dash_time_1 = minima_x[-1] * np.ones_like(min_dash_1)
min_dash_time_2 = minima_x[-2] * np.ones_like(min_dash_1)
dash_1_time = np.linspace(maxima_x[-1], minima_x[-1], 101)
dash_1 = np.linspace(maxima_y[-1], minima_y[-1], 101)
dash_2_time = np.linspace(maxima_x[-1], minima_x[-2], 101)
dash_2 = np.linspace(maxima_y[-1], minima_y[-2], 101)
s1 = (minima_y[-2] - maxima_y[-1]) / (minima_x[-2] - maxima_x[-1])
slope_based_maximum_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
slope_based_maximum = minima_y[-1] + (slope_based_maximum_time - minima_x[-1]) * s1
max_dash_time_3 = slope_based_maximum_time * np.ones_like(max_dash_1)
max_dash_3 = np.linspace(slope_based_maximum - width, slope_based_maximum + width, 101)
dash_3_time = np.linspace(minima_x[-1], slope_based_maximum_time, 101)
dash_3 = np.linspace(minima_y[-1], slope_based_maximum, 101)
s2 = (minima_y[-1] - maxima_y[-1]) / (minima_x[-1] - maxima_x[-1])
slope_based_minimum_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
slope_based_minimum = slope_based_maximum - (slope_based_maximum_time - slope_based_minimum_time) * s2
min_dash_time_3 = slope_based_minimum_time * np.ones_like(min_dash_1)
min_dash_3 = np.linspace(slope_based_minimum - width, slope_based_minimum + width, 101)
dash_4_time = np.linspace(slope_based_maximum_time, slope_based_minimum_time)
dash_4 = np.linspace(slope_based_maximum, slope_based_minimum)
maxima_dash = np.linspace(2.5 - width, 2.5 + width, 101)
maxima_dash_time_1 = maxima_x[-2] * np.ones_like(maxima_dash)
maxima_dash_time_2 = maxima_x[-1] * np.ones_like(maxima_dash)
maxima_dash_time_3 = slope_based_maximum_time * np.ones_like(maxima_dash)
maxima_line_dash_time = np.linspace(maxima_x[-2], slope_based_maximum_time, 101)
maxima_line_dash = 2.5 * np.ones_like(maxima_line_dash_time)
minima_dash = np.linspace(-3.4 - width, -3.4 + width, 101)
minima_dash_time_1 = minima_x[-2] * np.ones_like(minima_dash)
minima_dash_time_2 = minima_x[-1] * np.ones_like(minima_dash)
minima_dash_time_3 = slope_based_minimum_time * np.ones_like(minima_dash)
minima_line_dash_time = np.linspace(minima_x[-2], slope_based_minimum_time, 101)
minima_line_dash = -3.4 * np.ones_like(minima_line_dash_time)
# slightly edit signal to make difference between slope-based method and improved slope-based method more clear
time_series[time >= minima_x[-1]] = 1.5 * (time_series[time >= minima_x[-1]] - time_series[time == minima_x[-1]]) + \
time_series[time == minima_x[-1]]
improved_slope_based_maximum_time = time[-1]
improved_slope_based_maximum = time_series[-1]
improved_slope_based_minimum_time = slope_based_minimum_time
improved_slope_based_minimum = improved_slope_based_maximum + s2 * (improved_slope_based_minimum_time -
improved_slope_based_maximum_time)
min_dash_4 = np.linspace(improved_slope_based_minimum - width, improved_slope_based_minimum + width, 101)
min_dash_time_4 = improved_slope_based_minimum_time * np.ones_like(min_dash_4)
dash_final_time = np.linspace(improved_slope_based_maximum_time, improved_slope_based_minimum_time, 101)
dash_final = np.linspace(improved_slope_based_maximum, improved_slope_based_minimum, 101)
ax = plt.subplot(111)
figure_size = plt.gcf().get_size_inches()
factor = 0.9
plt.gcf().set_size_inches((figure_size[0], factor * figure_size[1]))
plt.gcf().subplots_adjust(bottom=0.10)
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.title('Slope-Based Edge Effects Example')
plt.plot(max_dash_time_1, max_dash_1, 'k-')
plt.plot(max_dash_time_2, max_dash_2, 'k-')
plt.plot(max_dash_time_3, max_dash_3, 'k-')
plt.plot(min_dash_time_1, min_dash_1, 'k-')
plt.plot(min_dash_time_2, min_dash_2, 'k-')
plt.plot(min_dash_time_3, min_dash_3, 'k-')
plt.plot(min_dash_time_4, min_dash_4, 'k-')
plt.plot(maxima_dash_time_1, maxima_dash, 'k-')
plt.plot(maxima_dash_time_2, maxima_dash, 'k-')
plt.plot(maxima_dash_time_3, maxima_dash, 'k-')
plt.plot(minima_dash_time_1, minima_dash, 'k-')
plt.plot(minima_dash_time_2, minima_dash, 'k-')
plt.plot(minima_dash_time_3, minima_dash, 'k-')
plt.text(4.34 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.74 * np.pi, -3.2, r'$\Delta{t^{min}_{m}}$')
plt.text(4.12 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.50 * np.pi, 2, r'$\Delta{t^{max}_{M}}$')
plt.text(4.30 * np.pi, 0.35, r'$s_1$')
plt.text(4.43 * np.pi, -0.20, r'$s_2$')
plt.text(4.30 * np.pi + (minima_x[-1] - minima_x[-2]), 0.35 + (minima_y[-1] - minima_y[-2]), r'$s_1$')
plt.text(4.43 * np.pi + (slope_based_minimum_time - minima_x[-1]),
-0.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.text(4.50 * np.pi + (slope_based_minimum_time - minima_x[-1]),
1.20 + (slope_based_minimum - minima_y[-1]), r'$s_2$')
plt.plot(minima_line_dash_time, minima_line_dash, 'k--')
plt.plot(maxima_line_dash_time, maxima_line_dash, 'k--')
plt.plot(dash_1_time, dash_1, 'k--')
plt.plot(dash_2_time, dash_2, 'k--')
plt.plot(dash_3_time, dash_3, 'k--')
plt.plot(dash_4_time, dash_4, 'k--')
plt.plot(dash_final_time, dash_final, 'k--')
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.scatter(slope_based_maximum_time, slope_based_maximum, c='orange', zorder=4,
label=textwrap.fill('Slope-based maximum', 11))
plt.scatter(slope_based_minimum_time, slope_based_minimum, c='purple', zorder=4,
label=textwrap.fill('Slope-based minimum', 11))
plt.scatter(improved_slope_based_maximum_time, improved_slope_based_maximum, c='deeppink', zorder=4,
label=textwrap.fill('Improved slope-based maximum', 11))
plt.scatter(improved_slope_based_minimum_time, improved_slope_based_minimum, c='dodgerblue', zorder=4,
label=textwrap.fill('Improved slope-based minimum', 11))
plt.xlim(3.9 * np.pi, 5.5 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-3, -2, -1, 0, 1, 2), ('-3', '-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.85, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_slope_based.png')
plt.show()
# plot 5
a = 0.25
width = 0.2
time = np.linspace(0, (5 - a) * np.pi, 1001)
time_series = np.cos(time) + np.cos(5 * time)
utils = emd_utils.Utility(time=time, time_series=time_series)
max_bool = utils.max_bool_func_1st_order_fd()
maxima_x = time[max_bool]
maxima_y = time_series[max_bool]
min_bool = utils.min_bool_func_1st_order_fd()
minima_x = time[min_bool]
minima_y = time_series[min_bool]
A2 = np.abs(maxima_y[-2] - minima_y[-2]) / 2
A1 = np.abs(maxima_y[-1] - minima_y[-1]) / 2
P2 = 2 * np.abs(maxima_x[-2] - minima_x[-2])
P1 = 2 * np.abs(maxima_x[-1] - minima_x[-1])
Huang_time = (P1 / P2) * (time[time >= maxima_x[-2]] - time[time == maxima_x[-2]]) + maxima_x[-1]
Huang_wave = (A1 / A2) * (time_series[time >= maxima_x[-2]] - time_series[time == maxima_x[-2]]) + maxima_y[-1]
Coughlin_time = Huang_time
Coughlin_wave = A1 * np.cos(2 * np.pi * (1 / P1) * (Coughlin_time - Coughlin_time[0]))
Average_max_time = maxima_x[-1] + (maxima_x[-1] - maxima_x[-2])
Average_max = (maxima_y[-2] + maxima_y[-1]) / 2
Average_min_time = minima_x[-1] + (minima_x[-1] - minima_x[-2])
Average_min = (minima_y[-2] + minima_y[-1]) / 2
utils_Huang = emd_utils.Utility(time=time, time_series=Huang_wave)
Huang_max_bool = utils_Huang.max_bool_func_1st_order_fd()
Huang_min_bool = utils_Huang.min_bool_func_1st_order_fd()
utils_Coughlin = emd_utils.Utility(time=time, time_series=Coughlin_wave)
Coughlin_max_bool = utils_Coughlin.max_bool_func_1st_order_fd()
Coughlin_min_bool = utils_Coughlin.min_bool_func_1st_order_fd()
Huang_max_time = Huang_time[Huang_max_bool]
Huang_max = Huang_wave[Huang_max_bool]
Huang_min_time = Huang_time[Huang_min_bool]
Huang_min = Huang_wave[Huang_min_bool]
Coughlin_max_time = Coughlin_time[Coughlin_max_bool]
Coughlin_max = Coughlin_wave[Coughlin_max_bool]
Coughlin_min_time = Coughlin_time[Coughlin_min_bool]
Coughlin_min = Coughlin_wave[Coughlin_min_bool]
max_2_x_time = np.linspace(maxima_x[-2] - width, maxima_x[-2] + width, 101)
max_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
max_2_x = maxima_y[-2] * np.ones_like(max_2_x_time)
min_2_x_time = np.linspace(minima_x[-2] - width, minima_x[-2] + width, 101)
min_2_x_time_side = np.linspace(5.3 * np.pi - width, 5.3 * np.pi + width, 101)
min_2_x = minima_y[-2] * np.ones_like(min_2_x_time)
dash_max_min_2_x = np.linspace(minima_y[-2], maxima_y[-2], 101)
dash_max_min_2_x_time = 5.3 * np.pi * np.ones_like(dash_max_min_2_x)
max_2_y = np.linspace(maxima_y[-2] - width, maxima_y[-2] + width, 101)
max_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
max_2_y_time = maxima_x[-2] * np.ones_like(max_2_y)
min_2_y = np.linspace(minima_y[-2] - width, minima_y[-2] + width, 101)
min_2_y_side = np.linspace(-1.8 - width, -1.8 + width, 101)
min_2_y_time = minima_x[-2] * np.ones_like(min_2_y)
dash_max_min_2_y_time = np.linspace(minima_x[-2], maxima_x[-2], 101)
dash_max_min_2_y = -1.8 * np.ones_like(dash_max_min_2_y_time)
max_1_x_time = np.linspace(maxima_x[-1] - width, maxima_x[-1] + width, 101)
max_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
max_1_x = maxima_y[-1] * np.ones_like(max_1_x_time)
min_1_x_time = np.linspace(minima_x[-1] - width, minima_x[-1] + width, 101)
min_1_x_time_side = np.linspace(5.4 * np.pi - width, 5.4 * np.pi + width, 101)
min_1_x = minima_y[-1] * np.ones_like(min_1_x_time)
dash_max_min_1_x = np.linspace(minima_y[-1], maxima_y[-1], 101)
dash_max_min_1_x_time = 5.4 * np.pi * np.ones_like(dash_max_min_1_x)
max_1_y = np.linspace(maxima_y[-1] - width, maxima_y[-1] + width, 101)
max_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
max_1_y_time = maxima_x[-1] * np.ones_like(max_1_y)
min_1_y = np.linspace(minima_y[-1] - width, minima_y[-1] + width, 101)
min_1_y_side = np.linspace(-2.1 - width, -2.1 + width, 101)
min_1_y_time = minima_x[-1] * np.ones_like(min_1_y)
dash_max_min_1_y_time = np.linspace(minima_x[-1], maxima_x[-1], 101)
dash_max_min_1_y = -2.1 * np.ones_like(dash_max_min_1_y_time)
ax = plt.subplot(111)
plt.gcf().subplots_adjust(bottom=0.10)
plt.title('Characteristic Wave Effects Example')
plt.plot(time, time_series, LineWidth=2, label='Signal')
plt.scatter(Huang_max_time, Huang_max, c='magenta', zorder=4, label=textwrap.fill('Huang maximum', 10))
plt.scatter(Huang_min_time, Huang_min, c='lime', zorder=4, label=textwrap.fill('Huang minimum', 10))
plt.scatter(Coughlin_max_time, Coughlin_max, c='darkorange', zorder=4,
label=textwrap.fill('Coughlin maximum', 14))
plt.scatter(Coughlin_min_time, Coughlin_min, c='dodgerblue', zorder=4,
label=textwrap.fill('Coughlin minimum', 14))
plt.scatter(Average_max_time, Average_max, c='orangered', zorder=4,
label=textwrap.fill('Average maximum', 14))
plt.scatter(Average_min_time, Average_min, c='cyan', zorder=4,
label=textwrap.fill('Average minimum', 14))
plt.scatter(maxima_x, maxima_y, c='r', zorder=4, label='Maxima')
plt.scatter(minima_x, minima_y, c='b', zorder=4, label='Minima')
plt.plot(Huang_time, Huang_wave, '--', c='darkviolet', label=textwrap.fill('Huang Characteristic Wave', 14))
plt.plot(Coughlin_time, Coughlin_wave, '--', c='darkgreen', label=textwrap.fill('Coughlin Characteristic Wave', 14))
plt.plot(max_2_x_time, max_2_x, 'k-')
plt.plot(max_2_x_time_side, max_2_x, 'k-')
plt.plot(min_2_x_time, min_2_x, 'k-')
plt.plot(min_2_x_time_side, min_2_x, 'k-')
plt.plot(dash_max_min_2_x_time, dash_max_min_2_x, 'k--')
plt.text(5.16 * np.pi, 0.85, r'$2a_2$')
plt.plot(max_2_y_time, max_2_y, 'k-')
plt.plot(max_2_y_time, max_2_y_side, 'k-')
plt.plot(min_2_y_time, min_2_y, 'k-')
plt.plot(min_2_y_time, min_2_y_side, 'k-')
plt.plot(dash_max_min_2_y_time, dash_max_min_2_y, 'k--')
plt.text(4.08 * np.pi, -2.2, r'$\frac{p_2}{2}$')
plt.plot(max_1_x_time, max_1_x, 'k-')
plt.plot(max_1_x_time_side, max_1_x, 'k-')
plt.plot(min_1_x_time, min_1_x, 'k-')
plt.plot(min_1_x_time_side, min_1_x, 'k-')
plt.plot(dash_max_min_1_x_time, dash_max_min_1_x, 'k--')
plt.text(5.42 * np.pi, -0.1, r'$2a_1$')
plt.plot(max_1_y_time, max_1_y, 'k-')
plt.plot(max_1_y_time, max_1_y_side, 'k-')
plt.plot(min_1_y_time, min_1_y, 'k-')
plt.plot(min_1_y_time, min_1_y_side, 'k-')
plt.plot(dash_max_min_1_y_time, dash_max_min_1_y, 'k--')
plt.text(4.48 * np.pi, -2.5, r'$\frac{p_1}{2}$')
plt.xlim(3.9 * np.pi, 5.6 * np.pi)
plt.xticks((4 * np.pi, 5 * np.pi), (r'4$\pi$', r'5$\pi$'))
plt.yticks((-2, -1, 0, 1, 2), ('-2', '-1', '0', '1', '2'))
box_0 = ax.get_position()
ax.set_position([box_0.x0 - 0.05, box_0.y0, box_0.width * 0.84, box_0.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.savefig('jss_figures/edge_effects_characteristic_wave.png')
plt.show()
# plot 6
t = np.linspace(5, 95, 100)
signal_orig = np.cos(2 * np.pi * t / 50) + 0.6 * np.cos(2 * np.pi * t / 25) + 0.5 * np.sin(2 * np.pi * t / 200)
util_nn = emd_utils.Utility(time=t, time_series=signal_orig)
maxima = signal_orig[util_nn.max_bool_func_1st_order_fd()]
minima = signal_orig[util_nn.min_bool_func_1st_order_fd()]
cs_max = CubicSpline(t[util_nn.max_bool_func_1st_order_fd()], maxima)
cs_min = CubicSpline(t[util_nn.min_bool_func_1st_order_fd()], minima)
time = np.linspace(0, 5 * np.pi, 1001)
lsq_signal = np.cos(time) + np.cos(5 * time)
knots = np.linspace(0, 5 * np.pi, 101)
time_extended = time_extension(time)
time_series_extended = np.zeros_like(time_extended) / 0
time_series_extended[int(len(lsq_signal) - 1):int(2 * (len(lsq_signal) - 1) + 1)] = lsq_signal
neural_network_m = 200
neural_network_k = 100
# forward ->
P = np.zeros((int(neural_network_k + 1), neural_network_m))
for col in range(neural_network_m):
P[:-1, col] = lsq_signal[(-(neural_network_m + neural_network_k - col)):(-(neural_network_m - col))]
P[-1, col] = 1 # for additive constant
t = lsq_signal[-neural_network_m:]
# test - top
seed_weights = | np.ones(neural_network_k) | numpy.ones |
import os
import sys
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import nms
from simpleAICV.detection.models.anchor import RetinaAnchors, FCOSPositions, Yolov3Anchors, YoloxAnchors, TTFNetPositions
__all__ = [
'RetinaDecoder',
'FCOSDecoder',
'Yolov4Decoder',
'Yolov5Decoder',
'CenterNetDecoder',
'TTFNetDecoder',
]
class DetNMSMethod:
def __init__(self, nms_type='python_nms', nms_threshold=0.5):
assert nms_type in ['torch_nms', 'python_nms',
'diou_python_nms'], 'wrong nms type!'
self.nms_type = nms_type
self.nms_threshold = nms_threshold
def __call__(self, sorted_bboxes, sorted_scores):
'''
sorted_bboxes:[anchor_nums,4],4:x_min,y_min,x_max,y_max
sorted_scores:[anchor_nums],classification predict scores
'''
if self.nms_type == 'torch_nms':
sorted_bboxes, sorted_scores = torch.tensor(sorted_bboxes).cpu(
).detach(), torch.tensor(sorted_scores).cpu().detach()
keep = nms(sorted_bboxes, sorted_scores, self.nms_threshold)
keep = keep.cpu().detach().numpy()
else:
sorted_bboxes_wh = sorted_bboxes[:, 2:4] - sorted_bboxes[:, 0:2]
sorted_bboxes_areas = sorted_bboxes_wh[:, 0] * sorted_bboxes_wh[:,
1]
sorted_bboxes_areas = np.maximum(sorted_bboxes_areas, 0)
indexes = np.array([i for i in range(sorted_scores.shape[0])],
dtype=np.int32)
keep = []
while indexes.shape[0] > 0:
keep_idx = indexes[0]
keep.append(keep_idx)
indexes = indexes[1:]
if len(indexes) == 0:
break
keep_box_area = sorted_bboxes_areas[keep_idx]
overlap_area_top_left = np.maximum(
sorted_bboxes[keep_idx, 0:2], sorted_bboxes[indexes, 0:2])
overlap_area_bot_right = np.minimum(
sorted_bboxes[keep_idx, 2:4], sorted_bboxes[indexes, 2:4])
overlap_area_sizes = np.maximum(
overlap_area_bot_right - overlap_area_top_left, 0)
overlap_area = overlap_area_sizes[:, 0] * overlap_area_sizes[:,
1]
# compute ious for top1 pred_bbox and the other pred_bboxes
union_area = keep_box_area + sorted_bboxes_areas[
indexes] - overlap_area
union_area = np.maximum(union_area, 1e-4)
ious = overlap_area / union_area
if self.nms_type == 'diou_python_nms':
enclose_area_top_left = np.minimum(
sorted_bboxes[keep_idx, 0:2], sorted_bboxes[indexes,
0:2])
enclose_area_bot_right = np.maximum(
sorted_bboxes[keep_idx, 2:4], sorted_bboxes[indexes,
2:4])
enclose_area_sizes = np.maximum(
enclose_area_bot_right - enclose_area_top_left, 0)
# c2:convex diagonal squared
c2 = ((enclose_area_sizes)**2).sum(axis=1)
c2 = np.maximum(c2, 1e-4)
# p2:center distance squared
keep_box_ctr = (sorted_bboxes[keep_idx, 2:4] +
sorted_bboxes[keep_idx, 0:2]) / 2
other_boxes_ctr = (sorted_bboxes[indexes, 2:4] +
sorted_bboxes[indexes, 0:2]) / 2
p2 = (keep_box_ctr - other_boxes_ctr)**2
p2 = p2.sum(axis=1)
ious = ious - p2 / c2
candidate_indexes = np.where(ious < self.nms_threshold)[0]
indexes = indexes[candidate_indexes]
keep = np.array(keep)
return keep
class DecodeMethod:
def __init__(self,
max_object_num=100,
min_score_threshold=0.05,
topn=1000,
nms_type='python_nms',
nms_threshold=0.5):
self.max_object_num = max_object_num
self.min_score_threshold = min_score_threshold
self.topn = topn
self.nms_function = DetNMSMethod(nms_type=nms_type,
nms_threshold=nms_threshold)
def __call__(self, cls_scores, cls_classes, pred_bboxes):
batch_size = cls_scores.shape[0]
batch_scores = np.ones(
(batch_size, self.max_object_num), dtype=np.float32) * (-1)
batch_classes = np.ones(
(batch_size, self.max_object_num), dtype=np.float32) * (-1)
batch_bboxes = np.zeros((batch_size, self.max_object_num, 4),
dtype=np.float32)
for i, (per_image_scores, per_image_score_classes,
per_image_pred_bboxes) in enumerate(
zip(cls_scores, cls_classes, pred_bboxes)):
score_classes = per_image_score_classes[
per_image_scores > self.min_score_threshold].astype(np.float32)
bboxes = per_image_pred_bboxes[
per_image_scores > self.min_score_threshold].astype(np.float32)
scores = per_image_scores[
per_image_scores > self.min_score_threshold].astype(np.float32)
if scores.shape[0] != 0:
# descending sort
sorted_indexes = np.argsort(-scores)
sorted_scores = scores[sorted_indexes]
sorted_score_classes = score_classes[sorted_indexes]
sorted_bboxes = bboxes[sorted_indexes]
if self.topn < sorted_scores.shape[0]:
sorted_scores = sorted_scores[0:self.topn]
sorted_score_classes = sorted_score_classes[0:self.topn]
sorted_bboxes = sorted_bboxes[0:self.topn]
# nms
keep = self.nms_function(sorted_bboxes, sorted_scores)
keep_scores = sorted_scores[keep]
keep_classes = sorted_score_classes[keep]
keep_bboxes = sorted_bboxes[keep]
final_detection_num = min(self.max_object_num,
keep_scores.shape[0])
batch_scores[
i,
0:final_detection_num] = keep_scores[0:final_detection_num]
batch_classes[i, 0:final_detection_num] = keep_classes[
0:final_detection_num]
batch_bboxes[i, 0:final_detection_num, :] = keep_bboxes[
0:final_detection_num, :]
# batch_scores shape:[batch_size,max_object_num]
# batch_classes shape:[batch_size,max_object_num]
# batch_bboxes shape[batch_size,max_object_num,4]
return [batch_scores, batch_classes, batch_bboxes]
class RetinaDecoder:
def __init__(self,
areas=[[32, 32], [64, 64], [128, 128], [256, 256], [512,
512]],
ratios=[0.5, 1, 2],
scales=[2**0, 2**(1.0 / 3.0), 2**(2.0 / 3.0)],
strides=[8, 16, 32, 64, 128],
max_object_num=100,
min_score_threshold=0.05,
topn=1000,
nms_type='python_nms',
nms_threshold=0.5):
assert nms_type in ['torch_nms', 'python_nms',
'diou_python_nms'], 'wrong nms type!'
self.anchors = RetinaAnchors(areas=areas,
ratios=ratios,
scales=scales,
strides=strides)
self.decode_function = DecodeMethod(
max_object_num=max_object_num,
min_score_threshold=min_score_threshold,
topn=topn,
nms_type=nms_type,
nms_threshold=nms_threshold)
def __call__(self, preds):
cls_preds, reg_preds = preds
feature_size = [[
per_level_cls_pred.shape[2], per_level_cls_pred.shape[1]
] for per_level_cls_pred in cls_preds]
one_image_anchors = self.anchors(feature_size)
cls_preds = np.concatenate([
per_cls_pred.cpu().detach().numpy().reshape(
per_cls_pred.shape[0], -1, per_cls_pred.shape[-1])
for per_cls_pred in cls_preds
],
axis=1)
reg_preds = np.concatenate([
per_reg_pred.cpu().detach().numpy().reshape(
per_reg_pred.shape[0], -1, per_reg_pred.shape[-1])
for per_reg_pred in reg_preds
],
axis=1)
one_image_anchors = np.concatenate([
per_level_anchor.reshape(-1, per_level_anchor.shape[-1])
for per_level_anchor in one_image_anchors
],
axis=0)
batch_anchors = np.repeat(np.expand_dims(one_image_anchors, axis=0),
cls_preds.shape[0],
axis=0)
cls_classes = np.argmax(cls_preds, axis=2)
cls_scores = np.concatenate([
np.expand_dims(per_image_preds[np.arange(per_image_preds.shape[0]),
per_image_cls_classes],
axis=0)
for per_image_preds, per_image_cls_classes in zip(
cls_preds, cls_classes)
],
axis=0)
pred_bboxes = self.snap_txtytwth_to_x1y1x2y2(reg_preds, batch_anchors)
[batch_scores, batch_classes,
batch_bboxes] = self.decode_function(cls_scores, cls_classes,
pred_bboxes)
# batch_scores shape:[batch_size,max_object_num]
# batch_classes shape:[batch_size,max_object_num]
# batch_bboxes shape[batch_size,max_object_num,4]
return [batch_scores, batch_classes, batch_bboxes]
def snap_txtytwth_to_x1y1x2y2(self, reg_preds, anchors):
'''
snap reg heads to pred bboxes
reg_preds:[batch_size,anchor_nums,4],4:[tx,ty,tw,th]
anchors:[batch_size,anchor_nums,4],4:[x_min,y_min,x_max,y_max]
'''
anchors_wh = anchors[:, :, 2:4] - anchors[:, :, 0:2]
anchors_ctr = anchors[:, :, 0:2] + 0.5 * anchors_wh
pred_bboxes_wh = np.exp(reg_preds[:, :, 2:4]) * anchors_wh
pred_bboxes_ctr = reg_preds[:, :, :2] * anchors_wh + anchors_ctr
pred_bboxes_x_min_y_min = pred_bboxes_ctr - 0.5 * pred_bboxes_wh
pred_bboxes_x_max_y_max = pred_bboxes_ctr + 0.5 * pred_bboxes_wh
pred_bboxes = np.concatenate(
[pred_bboxes_x_min_y_min, pred_bboxes_x_max_y_max], axis=2)
pred_bboxes = pred_bboxes.astype(np.int32)
# pred bboxes shape:[batch,anchor_nums,4]
return pred_bboxes
class FCOSDecoder:
def __init__(self,
strides=[8, 16, 32, 64, 128],
max_object_num=100,
min_score_threshold=0.05,
topn=1000,
nms_type='python_nms',
nms_threshold=0.6):
assert nms_type in ['torch_nms', 'python_nms',
'diou_python_nms'], 'wrong nms type!'
self.positions = FCOSPositions(strides=strides)
self.decode_function = DecodeMethod(
max_object_num=max_object_num,
min_score_threshold=min_score_threshold,
topn=topn,
nms_type=nms_type,
nms_threshold=nms_threshold)
def __call__(self, preds):
cls_preds, reg_preds, center_preds = preds
feature_size = [[
per_level_cls_pred.shape[2], per_level_cls_pred.shape[1]
] for per_level_cls_pred in cls_preds]
one_image_positions = self.positions(feature_size)
cls_preds = [
per_cls_pred.cpu().detach().numpy().reshape(
per_cls_pred.shape[0], -1, per_cls_pred.shape[-1])
for per_cls_pred in cls_preds
]
reg_preds = [
per_reg_pred.cpu().detach().numpy().reshape(
per_reg_pred.shape[0], -1, per_reg_pred.shape[-1])
for per_reg_pred in reg_preds
]
center_preds = [
per_center_pred.cpu().detach().numpy().reshape(
per_center_pred.shape[0], -1, per_center_pred.shape[-1])
for per_center_pred in center_preds
]
cls_preds = np.concatenate(cls_preds, axis=1)
reg_preds = | np.concatenate(reg_preds, axis=1) | numpy.concatenate |
#!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
import bisect
import warnings
from PIL import Image
import numpy as np
import string
import cv2
import os
import re
sys.path.append('../')
from utils import str_filt
from utils.labelmaps import get_vocabulary, labels2strs
from IPython import embed
from pyfasttext import FastText
random.seed(0)
from utils import utils_deblur
from utils import utils_sisr as sr
from utils import utils_image as util
import imgaug.augmenters as iaa
from scipy import io as sio
scale = 0.90
kernel = utils_deblur.fspecial('gaussian', 15, 1.)
noise_level_img = 0.
def rand_crop(im):
w, h = im.size
p1 = (random.uniform(0, w*(1-scale)), random.uniform(0, h*(1-scale)))
p2 = (p1[0] + scale*w, p1[1] + scale*h)
return im.crop(p1 + p2)
def central_crop(im):
w, h = im.size
p1 = (((1-scale)*w/2), (1-scale)*h/2)
p2 = ((1+scale)*w/2, (1+scale)*h/2)
return im.crop(p1 + p2)
def buf2PIL(txn, key, type='RGB'):
imgbuf = txn.get(key)
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
im = Image.open(buf).convert(type)
return im
class lmdbDataset_realBadSet(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=100, test=False, rotate=False):
super(lmdbDataset_realBadSet, self).__init__()
# root should be detailed by upper folder of images
# anno_dir = os.path.join(root, "ANNOTATION")
self.imlist = os.listdir(root)
self.image_dir = root
# self.impath_list = []
# self.anno_list = []
print("collect images from:", root)
# mode = "train" if root.split("/")[-2] == "TRAIN" else "test"
self.nSamples = len(self.imlist)
print("Done, we have ", self.nSamples, "samples...")
self.voc_type = voc_type
self.max_len = max_len
self.test = test
def __len__(self):
return self.nSamples
def __getitem__(self, index):
idx = index % self.nSamples
imfile = self.imlist[index]
image_path = os.path.join(self.image_dir, imfile)
print("imfile:", imfile)
word = imfile.split("_")[1] if len(imfile.split("_")) > 1 else ""
if not os.path.isfile(image_path):
print("File not found for", image_path)
return self[index+1]
try:
img_HR = Image.open(image_path)
img_lr = img_HR.copy()
img_lr_np = np.array(img_lr).astype(np.uint8)
img_lry = cv2.cvtColor(img_lr_np, cv2.COLOR_RGB2YUV)[..., 0]
img_lry = Image.fromarray(img_lry)
img_HR_np = np.array(img_HR).astype(np.uint8)
img_HRy = cv2.cvtColor(img_HR_np, cv2.COLOR_RGB2YUV)[..., 0]
img_HRy = Image.fromarray(img_HRy)
if img_HR.size[0] < 2 or img_HR.size[1] < 2:
print("img_HR:", img_HR.size)
return self[(index + 1) % self.nSamples]
except ValueError:
print("File not found for", image_path)
return self[(index + 1) % self.nSamples]
# print("annos:", img_HR_np.shape, img_lr_np.shape)
# label_str = str_filt(word, self.voc_type)
return img_HR, img_lr, img_HRy, img_lry, imfile
class lmdbDataset(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=31, test=True):
super(lmdbDataset, self).__init__()
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get(b'num-samples'))
self.nSamples = nSamples
self.max_len = max_len
self.voc_type = voc_type
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
try:
img = buf2PIL(txn, b'image_hr-%09d' % index, 'RGB')
except TypeError:
img = buf2PIL(txn, b'image-%09d' % index, 'RGB')
except IOError or len(label) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
return img, label_str
def get_Syn_800K_with_words(mode, dataset_dir, lang_seq=False):
# if mode == 'train':
# image_dir = os.path.join(dataset_dir, 'image_9000/')
# gt_dir = os.path.join(dataset_dir, 'txt_9000/')
# ./ICPR_dataset/update_ICPR_text_train_part1_20180316/train_1000/
# else:
# image_dir = os.path.join(dataset_dir, 'image_1000/')
# gt_dir = os.path.join(dataset_dir, 'txt_1000/')
word2vec_mat = '../selected_smaller_dic.mat'
#mat_data = sio.loadmat(word2vec_mat)
#all_words = mat_data['selected_vocab']
#all_vecs = mat_data['selected_dict']
#w2v_dict = {}
#print('Building w2v dictionary...')
#for i in range(len(all_words)):
# w2v_dict[all_words[i][0][0]] = all_vecs[i]
#print('done')
mat_file = os.path.join(dataset_dir, 'gt.mat')
# print('mat_file:', mat_file)
mat_f = sio.loadmat(mat_file)
wordBBs = mat_f['wordBB'][0]
txt_annos = mat_f['txt'][0]
im_names = mat_f['imnames'][0]
sam_size = len(txt_annos)
# image_list = os.listdir(image_dir)
# image_list.sort()
im_infos = []
if mode == 'train':
cache_pkl = './data_cache/Syn_800K_training'
else:
cache_pkl = './data_cache/Syn_800K_testing'
if lang_seq:
cache_pkl += "_lang_seq"
cache_pkl += "_E2E.pkl"
if os.path.isfile(cache_pkl):
return pickle.load(open(cache_pkl, 'rb'))
pro_cnt = 0
im_range = (0, 200000) if mode == "train" else (200000, 205000)
for i in range(im_range[0], im_range[1]):
txts = txt_annos[i]
im_path = os.path.join(dataset_dir, im_names[i][0])
word_boxes = wordBBs[i]
pro_cnt += 1
if pro_cnt % 2000 == 0:
print('processed image:', str(pro_cnt) + '/' + str(im_range[1] - im_range[0]))
cnt = 0
# print('word_boxes:', word_boxes.shape)
im = cv2.imread(im_path)
if len(word_boxes.shape) < 3:
word_boxes = np.expand_dims(word_boxes, -1)
words = []
boxes = []
word_vecs = []
for txt in txts:
txtsp = txt.split('\n')
for line in txtsp:
line = line.replace('\n', '').replace('\n', '').replace('\r', '').replace('\t', '').split(' ')
# print('line:', line)
for w in line:
# w = w
if len(w) > 0:
gt_ind = np.transpose(np.array(word_boxes[:, :, cnt], dtype=np.int32), (1, 0)).reshape(8)
# print(imname, gt_ind, w)
cnt += 1
'''
cv2.line(im, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 3)
cv2.line(im, (box[2], box[3]), (box[4], box[5]), (0, 0, 255), 3)
cv2.line(im, (box[4], box[5]), (box[6], box[7]), (0, 0, 255), 3)
cv2.line(im, (box[6], box[7]), (box[0], box[1]), (0, 0, 255), 3)
cv2.putText(im, w, (box[0], box[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 122), 2)
'''
pt1 = (int(gt_ind[0]), int(gt_ind[1]))
pt2 = (int(gt_ind[2]), int(gt_ind[3]))
pt3 = (int(gt_ind[4]), int(gt_ind[5]))
pt4 = (int(gt_ind[6]), int(gt_ind[7]))
edge1 = np.sqrt((pt1[0] - pt2[0]) * (pt1[0] - pt2[0]) + (pt1[1] - pt2[1]) * (pt1[1] - pt2[1]))
edge2 = np.sqrt((pt2[0] - pt3[0]) * (pt2[0] - pt3[0]) + (pt2[1] - pt3[1]) * (pt2[1] - pt3[1]))
angle = 0
if edge1 > edge2:
width = edge1
height = edge2
if pt1[0] - pt2[0] != 0:
angle = -np.arctan(float(pt1[1] - pt2[1]) / float(pt1[0] - pt2[0])) / 3.1415926 * 180
else:
angle = 90.0
elif edge2 >= edge1:
width = edge2
height = edge1
# print pt2[0], pt3[0]
if pt2[0] - pt3[0] != 0:
angle = -np.arctan(float(pt2[1] - pt3[1]) / float(pt2[0] - pt3[0])) / 3.1415926 * 180
else:
angle = 90.0
if angle < -45.0:
angle = angle + 180
x_ctr = float(pt1[0] + pt3[0]) / 2 # pt1[0] + np.abs(float(pt1[0] - pt3[0])) / 2
y_ctr = float(pt1[1] + pt3[1]) / 2 # pt1[1] + np.abs(float(pt1[1] - pt3[1])) / 2
if height * width * (800 / float(im.shape[0])) < 16 * 32 and mode == "train":
continue
if x_ctr >= im.shape[1] or x_ctr < 0 or y_ctr >= im.shape[0] or y_ctr < 0:
continue
#com_num = re.compile('[0-9]+')
#com_prices = re.compile('[$¥€£]+')
#match_num = re.findall(com_num, w)
#match_prices = re.findall(com_prices, w)
# choices: original, prices, others
# 2 for English
if lang_seq:
w = ["1" for i in range(len(w))]
w = "".join(w)
words.append(w)
'''
w = w.lower()
if w in w2v_dict:
word_vecs.append(w2v_dict[w.lower()])
elif match_prices and match_num:
word_vecs.append(w2v_dict['price'])
elif match_num and not match_prices:
word_vecs.append(w2v_dict['ten'])
else:
print(im_path, w)
word_vecs.append(np.zeros(100, dtype=np.float32) + 1e-10)
'''
gt_ptx = gt_ind.reshape(-1, 2)
xmax = np.max(gt_ptx[:, 0])
xmin = np.min(gt_ptx[:, 0])
ymax = np.max(gt_ptx[:, 1])
ymin = np.min(gt_ptx[:, 1])
# return to width, height
boxes.append([xmin, ymin, xmax - xmin, ymax - ymin]) #x_ctr, y_ctr, width, height, angle, w
cls_num = 2
len_of_bboxes = len(boxes)
gt_boxes = np.zeros((len_of_bboxes, 4), dtype=np.int16)
gt_classes = np.zeros((len_of_bboxes), dtype=np.int32)
overlaps = np.zeros((len_of_bboxes, cls_num), dtype=np.float32) # text or non-text
seg_areas = np.zeros((len_of_bboxes), dtype=np.float32)
for idx in range(len(boxes)):
gt_classes[idx] = 1 # cls_text
overlaps[idx, 1] = 1.0 # prob
seg_areas[idx] = (boxes[idx][2]) * (boxes[idx][3])
gt_boxes[idx, :] = [boxes[idx][0], boxes[idx][1], boxes[idx][2], boxes[idx][3]] #, boxes[idx][4]
# print ("boxes_size:", gt_boxes.shape[0])
if gt_boxes.shape[0] > 0:
max_overlaps = overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = overlaps.argmax(axis=1)
else:
continue
im_info = {
'gt_classes': gt_classes,
'max_classes': max_classes,
'image': im_path,
'boxes': gt_boxes,
'flipped': False,
'gt_overlaps': overlaps,
'seg_areas': seg_areas,
'height': im.shape[0],
'width': im.shape[1],
'gt_words': words,
# 'gt_wordvec': np.array(word_vecs),
'max_overlaps': max_overlaps,
'rotated': True
}
im_infos.append(im_info)
f_save_pkl = open(cache_pkl, 'wb')
pickle.dump(im_infos, f_save_pkl)
f_save_pkl.close()
print("Save pickle done.")
return im_infos
class lmdbDataset_GlobalSR(Dataset):
def __init__(self, root=None, voc_type='upper', max_len=31, test=False, rotate=False):
super(lmdbDataset_GlobalSR, self).__init__()
if test:
mode = "test"
else:
mode = "train"
self.image_dataset = get_Syn_800K_with_words(mode, dataset_dir=root, lang_seq=False)
self.nSamples = len(self.image_dataset)
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
# index += 1
'''
txn = self.env.begin(write=False)
label_key = b'label-%09d' % index
word = str(txn.get(label_key).decode())
try:
img = buf2PIL(txn, b'image_hr-%09d' % index, 'RGB')
except TypeError:
img = buf2PIL(txn, b'image-%09d' % index, 'RGB')
except IOError or len(label) > self.max_len:
return self[index + 1]
label_str = str_filt(word, self.voc_type)
'''
image_info = self.image_dataset[index]
impath = image_info['image']
image_pil = Image.open(impath)
boxes = image_info['boxes']
gt_words = image_info['gt_words']
return image_pil, boxes, gt_words
def gauss_unsharp_mask(rgb, shp_kernel, shp_sigma, shp_gain):
LF = cv2.GaussianBlur(rgb, (shp_kernel, shp_kernel), shp_sigma)
HF = rgb - LF
RGB_peak = rgb + HF * shp_gain
RGB_noise_NR_shp = np.clip(RGB_peak, 0.0, 255.0)
return RGB_noise_NR_shp, LF
def add_shot_gauss_noise(rgb, shot_noise_mean, read_noise):
noise_var_map = shot_noise_mean * rgb + read_noise
noise_dev_map = np.sqrt(noise_var_map)
noise = np.random.normal(loc=0.0, scale = noise_dev_map, size=None)
if (rgb.mean() > 252.0):
noise_rgb = rgb
else:
noise_rgb = rgb + noise
noise_rgb = np.clip(noise_rgb, 0.0, 255.0)
return noise_rgb
def degradation(src_img):
# RGB Image input
GT_RGB = | np.array(src_img) | numpy.array |
#
# Last modified on Thu Jan 31 10:27:11 PST 2002 by lindy
#
# $Header: /opt/cvs/python/packages/share1.5/mglutil/math/rmsdtest.py,v 1.4.12.1 2016/02/11 23:15:05 annao Exp $
#
"""Unit test for rmsd.py
Requirements for rmsd:
A. RMSDCalculator.__init__
0. should ..
B. RMSDCalculator.setRefCoords
0. should ..
C. RMSDCalculator.computeRMSD
1. should return known result with known input
2. raise ValueError for input of unlike dimensions
3. for two random sets of points, rmsd(x,y) == rmsd(y,x)
4. raise ValueError if the reference coords have not been set
D.
"""
from mglutil.math import rmsd
import unittest, math
import numpy
from numpy import random as RandomArray
class ComputedValues(unittest.TestCase):
decimals = 4 # decimal places to round to for float comparison
point_list_0 = numpy.zeros((5,3))
point_list_1 = numpy.ones( (5,3))
knowValues = ( (point_list_0, point_list_0, 0.0),
(point_list_1, point_list_1, 0.0),
(point_list_0, point_list_1, math.sqrt(3.0)),
(point_list_1, point_list_0, math.sqrt(3.0)))
def test_computeRMSD_KnowValues(self):
"""1. should return known result with known input"""
for ref, input, known in self.knowValues:
self.assertEqual(known,
rmsd.RMSDCalculator(ref).computeRMSD(input))
def test_computeRMSD_RandomOffset(self):
"""5. offset point by random value returns offset*sqrt(3)"""
min = -10000.
max = 10000.
num_points = 20
dimension = 3
point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
delta = point_list_1[0][0]
point_list_2 = point_list_1 + delta
answer = rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2)
self.assertEqual(
round(answer, self.decimals),
round(abs(delta)*math.sqrt(3.0), self.decimals))
def test_computeRMSD_Random(self):
"""3. for two random sets of points, rmsd(x,y) == rmsd(y,x)"""
min = -10000.
max = 10000.
num_points = 20
dimension = 3
point_list_1 = RandomArray.uniform(min, max, (num_points, dimension))
point_list_2 = RandomArray.uniform(min, max, (num_points, dimension))
self.assertEqual(
rmsd.RMSDCalculator(point_list_1).computeRMSD(point_list_2),
rmsd.RMSDCalculator(point_list_2).computeRMSD(point_list_1))
class InputValues(unittest.TestCase):
point_list_0 = numpy.zeros((3,3))
point_list_1 = | numpy.ones( (4,3)) | numpy.ones |
from collections.abc import Iterable
from collections import namedtuple
from difflib import get_close_matches
from numbers import Real
from io import StringIO
import itertools
import os
import re
import tempfile
from warnings import warn
import numpy as np
import h5py
import openmc.checkvalue as cv
from openmc.mixin import EqualityMixin
from openmc.stats import Discrete, Tabular
from . import HDF5_VERSION, HDF5_VERSION_MAJOR, endf
from .data import K_BOLTZMANN, ATOMIC_SYMBOL, EV_PER_MEV, NATURAL_ABUNDANCE
from .ace import Table, get_table, Library
from .angle_energy import AngleEnergy
from .function import Tabulated1D, Function1D
from .njoy import make_ace_thermal
from .thermal_angle_energy import (CoherentElasticAE, IncoherentElasticAE,
IncoherentElasticAEDiscrete,
IncoherentInelasticAEDiscrete,
IncoherentInelasticAE)
_THERMAL_NAMES = {
'c_Al27': ('al', 'al27', 'al-27'),
'c_Al_in_Sapphire': ('asap00',),
'c_Be': ('be', 'be-metal', 'be-met', 'be00'),
'c_BeO': ('beo',),
'c_Be_in_BeO': ('bebeo', 'be-beo', 'be-o', 'be/o', 'bbeo00'),
'c_Be_in_Be2C': ('bebe2c',),
'c_C6H6': ('benz', 'c6h6'),
'c_C_in_SiC': ('csic', 'c-sic'),
'c_Ca_in_CaH2': ('cah', 'cah00'),
'c_D_in_D2O': ('dd2o', 'd-d2o', 'hwtr', 'hw', 'dhw00'),
'c_D_in_D2O_ice': ('dice',),
'c_Fe56': ('fe', 'fe56', 'fe-56'),
'c_Graphite': ('graph', 'grph', 'gr', 'gr00'),
'c_Graphite_10p': ('grph10',),
'c_Graphite_30p': ('grph30',),
'c_H_in_CaH2': ('hcah2', 'hca00'),
'c_H_in_CH2': ('hch2', 'poly', 'pol', 'h-poly', 'pol00'),
'c_H_in_CH4_liquid': ('lch4', 'lmeth'),
'c_H_in_CH4_solid': ('sch4', 'smeth'),
'c_H_in_CH4_solid_phase_II': ('sch4p2',),
'c_H_in_H2O': ('hh2o', 'h-h2o', 'lwtr', 'lw', 'lw00'),
'c_H_in_H2O_solid': ('hice', 'h-ice', 'ice00'),
'c_H_in_C5O2H8': ('lucite', 'c5o2h8', 'h-luci'),
'c_H_in_Mesitylene': ('mesi00',),
'c_H_in_Toluene': ('tol00',),
'c_H_in_YH2': ('hyh2', 'h-yh2'),
'c_H_in_ZrH': ('hzrh', 'h-zrh', 'h-zr', 'h/zr', 'hzr', 'hzr00'),
'c_Mg24': ('mg', 'mg24', 'mg00'),
'c_O_in_Sapphire': ('osap00',),
'c_O_in_BeO': ('obeo', 'o-beo', 'o-be', 'o/be', 'obeo00'),
'c_O_in_D2O': ('od2o', 'o-d2o', 'ohw00'),
'c_O_in_H2O_ice': ('oice', 'o-ice'),
'c_O_in_UO2': ('ouo2', 'o-uo2', 'o2-u', 'o2/u', 'ouo200'),
'c_N_in_UN': ('n-un',),
'c_ortho_D': ('orthod', 'orthoD', 'dortho', 'od200'),
'c_ortho_H': ('orthoh', 'orthoH', 'hortho', 'oh200'),
'c_Si28': ('si00',),
'c_Si_in_SiC': ('sisic', 'si-sic'),
'c_SiO2_alpha': ('sio2', 'sio2a'),
'c_SiO2_beta': ('sio2b',),
'c_para_D': ('parad', 'paraD', 'dpara', 'pd200'),
'c_para_H': ('parah', 'paraH', 'hpara', 'ph200'),
'c_U_in_UN': ('u-un',),
'c_U_in_UO2': ('uuo2', 'u-uo2', 'u-o2', 'u/o2', 'uuo200'),
'c_Y_in_YH2': ('yyh2', 'y-yh2'),
'c_Zr_in_ZrH': ('zrzrh', 'zr-zrh', 'zr-h', 'zr/h')
}
def _temperature_str(T):
# round() normally returns an int when called with a single argument, but
# numpy floats overload rounding to return another float
return "{}K".format(int(round(T)))
def get_thermal_name(name):
"""Get proper S(a,b) table name, e.g. 'HH2O' -> 'c_H_in_H2O'
Parameters
----------
name : str
Name of an ACE thermal scattering table
Returns
-------
str
GND-format thermal scattering name
"""
if name in _THERMAL_NAMES:
return name
else:
for proper_name, names in _THERMAL_NAMES.items():
if name.lower() in names:
return proper_name
# Make an educated guess?? This actually works well for
# JEFF-3.2 which stupidly uses names like lw00.32t,
# lw01.32t, etc. for different temperatures
# First, construct a list of all the values/keys in the names
# dictionary
all_names = itertools.chain(_THERMAL_NAMES.keys(),
*_THERMAL_NAMES.values())
matches = get_close_matches(name, all_names, cutoff=0.5)
if matches:
# Figure out the key for the corresponding match
match = matches[0]
if match not in _THERMAL_NAMES:
for key, value_list in _THERMAL_NAMES.items():
if match in value_list:
match = key
break
warn('Thermal scattering material "{}" is not recognized. '
'Assigning a name of {}.'.format(name, match))
return match
else:
# OK, we give up. Just use the ACE name.
warn('Thermal scattering material "{0}" is not recognized. '
'Assigning a name of c_{0}.'.format(name))
return 'c_' + name
class CoherentElastic(Function1D):
r"""Coherent elastic scattering data from a crystalline material
The integrated cross section for coherent elastic scattering from a
powdered crystalline material may be represented as:
.. math::
\sigma(E,T) = \frac{1}{E} \sum\limits_{i=1}^{E_i < E} s_i(T)
where :math:`s_i(T)` is proportional the structure factor in [eV-b] at
the moderator temperature :math:`T` in Kelvin.
Parameters
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial sum of structure factors, :math:`\sum\limits_{i=1}^{E_i<E} s_i`
Attributes
----------
bragg_edges : Iterable of float
Bragg edge energies in eV
factors : Iterable of float
Partial sum of structure factors, :math:`\sum\limits_{i=1}^{E_i<E} s_i`
"""
def __init__(self, bragg_edges, factors):
self.bragg_edges = bragg_edges
self.factors = factors
def __call__(self, E):
idx = | np.searchsorted(self.bragg_edges, E) | numpy.searchsorted |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 10
@author: jaehyuk
"""
import numpy as np
import scipy.stats as ss
import scipy.optimize as sopt
import scipy.integrate as spint
from . import normal
from . import bsm
import pyfeng as pf
'''
MC model class for Beta=1
'''
class ModelBsmMC:
beta = 1.0 # fixed (not used)
vov, rho = 0.0, 0.0
sigma, intr, divr = None, None, None
bsm_model = None
'''
You may define more members for MC: time step, etc
'''
def __init__(self, sigma, vov=0, rho=0.0, beta=1.0, intr=0, divr=0, time_steps=1_000, n_samples=10_000):
self.sigma = sigma
self.vov = vov
self.rho = rho
self.intr = intr
self.divr = divr
self.time_steps = time_steps
self.n_samples = n_samples
self.bsm_model = pf.Bsm(sigma, intr=intr, divr=divr)
def bsm_vol(self, strike, spot, texp=None, sigma=None):
'''
From the price from self.price() compute the implied vol
this is the opposite of bsm_vol in ModelHagan class
use bsm_model
'''
if sigma is None:
sigma = self.sigma
price = self.price(strike, spot, texp, sigma)
vol = self.bsm_model.impvol(price, strike, spot, texp)
return vol
def price(self, strike, spot, texp=None, sigma=None, cp=1, seed=None):
'''
Your MC routine goes here
Generate paths for vol and price first. Then get prices (vector) for all strikes
You may fix the random number seed
'''
if seed is not None:
np.random.seed(seed)
if sigma is None:
sigma = self.sigma
znorm_m = np.random.normal(size=(self.n_samples, ))
X1 = np.random.normal(loc=0., scale=1., size=(self.n_samples,))
# generate path for vol
n_intervals = np.linspace(0, texp, self.time_steps)
vol_path = sigma * np.exp(self.vov * znorm_m - 1/2 * (self.vov**2) * n_intervals[:, None])
div_fac = np.exp(-texp * self.divr)
disc_fac = | np.exp(-texp * self.intr) | numpy.exp |
#! Python PyDoppler
import numpy as np
import imp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.signal import savgol_filter
import sys
plt.rcParams.update({'font.size': 12})
import os
plt.ion()
class spruit:
"""
A class to store and process data for Doppler tomography code
by <NAME>.
...
Methods
-------
foldspec()
Reads the data and stores in spruit object
sort(column, order='ascending')
Sort by `column`
"""
def __init__(self,force_install = False):
self.object = 'disc'
self.wave = 0.0
self.flux = 0.0
self.pha = 0.0
self.input_files = 0.0
self.input_phase = 0.0
self.trsp = 0.0
self.nbins = 20
self.normalised_flux = 0.0
self.normalised_wave = 0.0
self.base_dir = '.'
self.lam0 = 6562.83
self.delw = 80
self.list = 'phases.txt'
self.overs = 0.3
self.gama = 0.0
self.delta_phase = 0.001
self.verbose = True
###### Plotting parameters
self.psname='j0644' # Name of output plot file
self.output='pdf' # Can choose between: pdf, eps or png
self.data=False # If True then exit data will put in file *.txt
self.plot=True # Plot in Python window
self.plotlim=1.3 # Plot limits. 1 = close fit.
self.overs=0.4
####### Dop.in parameters
self.ih = 0
self.iw = 0
self.pb0 = 0.95
self.pb1 = 1.05
self.ns = 7
self.ac = 8e-4
self.nim = 150
self.al0 = 0.002
self.alf = 1.7
self.nal = 0
self.clim = 1.6
self.ipri = 2
self.norm = 1
self.wid = 10e5
self.af = 0.0
# %%%%%%%%%%%%%%%%%% Doppler Options %%%%%%%%%%%%%%%%%%
self.lobcol='white' # Stream color
self.module_path = os.path.dirname(os.path.realpath(__file__))
#### Copy Fortran files to local directory
if os.path.isfile("dop.f"):
#print("Fortran code exists")
if force_install:
print("-- Force_Install --")
os.system('cp '+self.module_path+'/fortran_code/* ./.')
count = 0
dst_file = './sample_script.py'
while os.path.exists(dst_file):
count += 1
dst_file = './%s-%d%s' % ('sample_script', count, '.py')
#print 'Renaming %s to %s' % (file, dst_file)
print("PyDoppler scipt -->",dst_file)
#os.rename(file, dst_file)
os.system('cp '+self.module_path+'/test_data/sample_script.py '+\
dst_file)
else:
print("-- Copying fortran code --")
os.system('cp '+self.module_path+'/fortran_code/* ./.')
count = 0
dst_file = './sample_script.py'
while os.path.exists(dst_file):
count += 1
dst_file = './%s-%d%s' % ('sample_script', count, '.py')
#print 'Renaming %s to %s' % (file, dst_file)
print("PyDoppler scipt -->",dst_file)
#os.rename(file, dst_file)
os.system('cp '+self.module_path+'/test_data/sample_script.py '+\
dst_file)
def Foldspec(self):
"""Foldspec. Prepares the spectra to be read by dopin.
*** Remember to prepare the keywords before running ***
Parameters
----------
None
Returns
-------
None
"""
try:
f = open(self.base_dir+'/'+self.list)
f.close()
except IOError:
print('Phase file - {} - is not accessible. Check "base_dir" and "list"'.format(self.base_dir+'/'+self.list))
inputs = np.loadtxt(self.base_dir+'/'+self.list,dtype={'names': ('files', 'phase'),'formats': ('S14', 'f4')})
# Check 1st spectrum and get wavelength to interpolate
#print()
#print(inputs['files'][0].astype('str'))
w1st = np.loadtxt(self.base_dir+'/'+inputs['files'][0].astype('str'),unpack=True)
if self.nbins==None:
self.nbins=int(1.5/np.abs(inputs['phase'][2]-inputs['phase'][1])) #By default
if self.verbose:
print ("Number of Bins:",self.nbins,np.abs(inputs['phase'][2]-inputs['phase'][1]))
wave,flux=[],[]
for z,i in enumerate(inputs):
w,f=np.loadtxt(self.base_dir+'/'+i['files'].astype('str'),unpack=True)
print (str(z+1).zfill(3)+' '+i['files'].astype('str')+' '+str(i['phase'])+' '+str(w.size))
if z == 0:
wo = w
wave.append(w),flux.append(f)
else:
wave.append(wo),flux.append(np.interp(wo,w,f))
delp=1.0/self.nbins
pha=np.arange(0,1,delp)
bin=np.arange(self.nbins+1)/float(self.nbins)
bin=np.concatenate((bin[:self.nbins]-1,bin))
wt=np.zeros(2*self.nbins)
trsp=np.zeros((2*self.nbins,len(wo)))
# Determine
dph = delp
for ph,il in zip(pha,np.arange(len(pha))):
ph0=ph-dph/2
ph1=ph+dph/2
for ib in np.arange(2*(self.nbins)):
r=bin[ib+1]
l=bin[ib]
if ph0 <= r and ph1> l:
wph=min([r,ph1])-max([r,ph1])
#print [r,ph1],[r,ph1],wph
wt[ib]=wt[ib]+wph
trsp[ib]=trsp[ib]+wph*flux[il]
wt[self.nbins:2*self.nbins]=wt[self.nbins:2*self.nbins]+wt[:self.nbins]
wt = wt[self.nbins:2*self.nbins]
trsp[self.nbins:2*self.nbins] = trsp[self.nbins:2*self.nbins] + trsp[:self.nbins]
trsp=trsp[self.nbins:2*self.nbins]
wt=wt/wt.sum()*float(self.nbins)
#print(wt)
self.wave = wave
self.flux = flux
self.pha = pha
self.input_files = inputs['files'].astype('str')
self.input_phase = inputs['phase']
self.trsp = trsp
def Dopin(self,poly_degree=2, continnum_band=False,
rebin=True,plot_median = False, rebin_wave= 0.,
xlim=None,two_orbits=True,vel_space=True,
verbose=False):
"""Normalises each spectrum to a user-defined continnum.
Optional, it plots a trail spectra
Parameters
----------
poly_degree : int, Optional
polynomial degree to fit the continuum. Default, 2.
continnum_band : array-like,
Define two wavelength bands (x1,x2) and (x3,x4)
to fit the continuum.
contiunnum_band = [x1,x2,x3,x4].
If False, an interactive plot will allow to select this four numbers
- Default, False
plot_median : bool,
Plots above teh trail spectra a median of the dataset.
- Defautl, False
rebin_wave : float,
TBD
xlim : float,
TBD
two_orbits : float,
TBD
vel_space : float,
TBD
verbose : float,
TBD
Returns
-------
None.
"""
lam=self.lam0
cl=2.997e5
xaxis='vel'
cmaps = plt.cm.binary_r #cm.winter_r cm.Blues#cm.gist_stern
medi=17
line_lbl='K I'
if lam < min(self.wave[0]) or lam > max(self.wave[0]):
print('Error: input wavelength out of bounds.')
print('Must be between '+str(min(self.wave[0]))+' and '+str(max(self.wave[0]))+'.')
sys.exit()
ss=0
for i in np.arange(len(self.wave[0])-1):
if lam >= self.wave[0][i] and lam <= self.wave[0][i+1]:
ss=i
fig=plt.figure(num="Average Spec",figsize=(6.57,8.57))
plt.clf()
ax=fig.add_subplot(211)
avgspec=np.sum(self.flux,axis=0)
plt.plot(self.wave[0],avgspec/len(self.pha))
plt.draw()
if not continnum_band:
print( 'Choose 4 points to define continuum')
xor=[]
for i in np.arange(4):
xx=plt.ginput(1,timeout=-1)
xor.append(xx[0][0])
plt.axvline(x=xx[0][0],linestyle='--',color='k')
plt.draw()
else:
xor = continnum_band
lab1 = 'Cont Bands'
for i in np.arange(4):
if i != 0: lab1 = ''
plt.axvline(x=xor[i],linestyle='--',color='k',label=lab1)
plt.draw()
lop = ((self.wave[0]>xor[0]) * (self.wave[0]<xor[1])) + ((self.wave[0]>xor[2]) * (self.wave[0]<xor[3]))
yor=avgspec[lop]/len(self.pha)
plt.ylim(avgspec[lop].min()/len(self.pha)*0.8,avgspec.max()/len(self.pha)*1.1)
z = np.polyfit(self.wave[0][lop], yor, poly_degree)
pz = np.poly1d(z)
linfit = pz(self.wave[0])
plt.plot(self.wave[0],linfit,'r',label='Cont Fit')
lg = plt.legend(fontsize=14)
plt.xlim(xor[0]-10,xor[3]+10)
plt.xlabel(r'Wavelength / $\AA$')
plt.ylabel('Input flux')
ax=fig.add_subplot(212)
vell=((self.wave[0]/self.lam0)**2-1)*cl/(1+(self.wave[0]/self.lam0)**2)
plt.plot(vell,avgspec/len(self.pha)-linfit,'k')
plt.axhline(y=0,linestyle='--',color='k')
plt.axvline(x=-self.delw/self.lam0*cl,linestyle='-',color='DarkOrange')
plt.axvline(x= self.delw/self.lam0*cl,linestyle='-',
color='DarkOrange',label='DopMap limits')
lg = plt.legend(fontsize=14)
plt.xlim(-self.delw/self.lam0*cl*1.5,self.delw/self.lam0*cl*1.5)
qq = (np.abs(vell) < self.delw/self.lam0*cl*1.5)
plt.ylim(-0.05*np.max(avgspec[qq]/len(self.pha)-linfit[qq] -1.0),
np.max(avgspec[qq]/len(self.pha)-linfit[qq] -1.0)*1.1)
plt.xlabel('Velocity km/s')
plt.ylabel('Bkg subtracted Flux')
plt.draw()
plt.tight_layout()
######## Do individual fit on the blaze
for ct,flu in enumerate(self.flux):
#print(lop.sum)
if ct == 0 :
nufac=(1.0+self.gama/2.998e5) * np.sqrt(1.0-(self.gama/2.998e5)**2)
lop = (self.wave[0]/nufac > self.lam0 - self.delw) * \
(self.wave[0]/nufac < self.lam0 + self.delw)
self.normalised_wave = np.array(self.wave[0][lop]/nufac)
# Interpolate in velocity space
vell_temp=((self.normalised_wave/self.lam0)**2-1.0)*cl/(1.0 + \
(self.normalised_wave/self.lam0)**2)
self.vell = np.linspace(vell_temp[0],vell_temp[-1],vell_temp.size)
self.normalised_flux = np.zeros((len(self.flux),lop.sum()))
polmask = ((self.wave[0]/nufac>xor[0]) * (self.wave[0]/nufac<xor[1])) +\
((self.wave[0]/nufac>xor[2]) * (self.wave[0]/nufac<xor[3]))
z = np.polyfit(self.wave[0][polmask]/nufac,flu[polmask], 3)
pz = np.poly1d(z)
linfit = pz(self.normalised_wave)
self.normalised_flux[ct] = np.array(flu[lop]) - np.array(linfit)
self.normalised_flux[ct] = np.interp(self.vell,vell_temp,self.normalised_flux[ct])
if self.verbose:
print(">> Max/Min velocities in map: {} / {}".format(self.vell.min(),
self.vell.max()))
## JVHS 2019 August 6
## Add binning
phase = np.linspace(0,2,self.nbins*2+1,endpoint=True) - 1./(self.nbins)/2.
phase = np.concatenate((phase,[2.0+1./(self.nbins)/2.]))
phase_dec = phase - np.floor(phase)
#print(phase_dec)
#rebin_trail(waver, flux, input_phase, nbins, delp, rebin_wave=None):
trail,temp_phase = rebin_trail(self.vell, self.normalised_flux,
self.input_phase, self.nbins, self.delta_phase,
rebin_wave=None)
self.pha = self.input_phase
self.trsp = self.normalised_flux
#print(">> SHAPES = ",self.pha.shape,self.trsp.shape)
## Phases of individual spectra
#print("LAM_SIZE= {}, VELL_SIZE={}".format(self.normalised_wave.size,self.vell.size))
f=open('dopin','w')
f.write("{:8.0f}{:8.0f}{:13.2f}\n".format(self.pha.size,
self.vell.size,
self.lam0))
# f.write(str(len(self.flux))+" "+str(self.nbins)+" "+str(self.lam0)+'\n')
f.write("{:13.5f}{:8.0f}{:8.0f} {:}\n".format(self.gama*1e5,0,0,
self.base_dir+'/'+self.list))
ctr = 0
for pp in self.pha:
if ctr <5:
f.write("{:13.6f}".format(pp))
ctr +=1
else:
f.write("{:13.6f}\n".format(pp))
ctr=0
f.write("\n{:8.0f}\n".format(1))
ctr = 0
for pp in np.ones(self.pha.size)*self.delta_phase:
if ctr <5:
f.write("{:13.6f}".format(pp))
ctr +=1
else:
f.write("{:13.6f}\n".format(pp))
ctr=0
if ctr != 0: f.write("\n")
##
ctr = 0
for pp in self.vell:
#print('velo size:',len(vell))
if ctr <5:
f.write("{:13.5e}".format(pp*1e5))
ctr +=1
else:
f.write("{:13.5e}\n".format(pp*1e5))
ctr=0
if ctr != 0: f.write("\n")
ctr = 0
# Where we write the normalised flux
for pp in np.array(self.trsp.T).flatten():
if ctr <5:
f.write("{:13.5f}".format(pp))
ctr +=1
else:
f.write("{:13.5f}\n".format(pp))
ctr=0
if ctr != 0: f.write("\n")
f.close()
if xlim == None:
rr = np.ones(self.normalised_wave.size,dtype='bool')
else:
rr = (self.normalised_wave > xlim[0]) & (self.normalised_wave < xlim[1])
if rebin_wave == 0:
waver = self.normalised_wave[rr]
else:
dw = (self.normalised_wave[rr][1] - self.normalised_wave[rr][0]) *\
rebin_wave
print(dw , dw/rebin_wave)
waver = np.arange(self.normalised_wave[rr][0],
self.normalised_wave[rr][-1],dw )
"""
trail = np.zeros((waver.size,phase.size))
tots = trail.copy()
#print(phases.size)
for i in range(self.input_phase.size):
#print("spec phase = ",grid['phase'][i])
dist = phase_dec - (self.input_phase[i]+self.delta_phase/2.)
#print(dist)
dist[np.abs(dist)>1./self.nbins] = 0.
#print(dist/delpha)
dist[dist>0] = 0.0
#print(dist)
weights = np.abs(dist)/(1./self.nbins)
#print(weights)
#print('---------------')
dist = phase_dec - (self.input_phase[i]-self.delta_phase/2.)
#print(dist)
dist[np.abs(dist)>1./self.nbins] = 0.0
#print(dist)
dist[dist>0] = 0.0
#print(dist/delpha)
dist[np.abs(dist)>0] = 1.0 - (np.abs(dist[np.abs(dist)>0]))/(1./self.nbins)
weights += dist
#print(weights)
temp = trail.copy().T
for j in range(phase.size):
if rebin_wave == 0:
temp[j] = self.normalised_flux[i][rr] * weights[j]
temp[j] = self.normalised_flux[i][rr] * weights[j]
else:
temp[j] = np.interp(waver,wave[rr],
self.normalised_flux[i][rr]) * weights[j]
temp[j] = np.interp(waver,wave[rr],
self.normalised_flux[i][rr]) * weights[j]
trail+=temp.T
tots += weights
trail /= tots
"""
if plot_median:
si = 0
lo = 2
else:
si = 2
lo = 0
plt.figure('Trail',figsize=(6.57,8.57))
plt.clf()
if plot_median:
ax1 = plt.subplot2grid((6, 1), (0, 0), rowspan=2)
ax1.minorticks_on()
if rebin_wave ==0:
plt.plot(waver,np.nanmedian(self.normalised_flux,axis=0)[rr],
label='Median',color='#8e44ad')
else:
print(dw)
new_med = np.interp(waver,wave[rr],
np.nanmedian(self.normalised_flux,axis=0)[rr])
plt.plot(waver,np.nanmedian(self.normalised_flux,axis=0)[rr],
label='Median',color='k',alpha=1)
plt.plot(waver,new_med,
label='Median',color='#8e44ad',alpha=1)
plt.axhline(y=0,ls='--',color='r',alpha=0.7)
ax1.set_xticklabels([])
#plt.xlim(self.lam0 - self.delw, self.lam0 + self.delw)
plt.ylim(-0.05,np.nanmax(np.nanmedian(self.normalised_flux,
axis=0)[rr])*1.1)
### Print trail spectra
if limits == None:
limits=[np.nanmax(np.nanmedian(grid,axis=0)[rr])*0.35,
np.nanmax(np.nanmedian(grid,axis=0)[rr])*1.1]
ax2 = plt.subplot2grid((6, 1), (lo, 0), rowspan=4+si)
ax2.minorticks_on()
if vel_space:
x1_lim = (min(waver)-self.lam0)/self.lam0*2.998e5
x2_lim = (max(waver)-self.lam0)/self.lam0*2.998e5
else:
x1_lim = min(waver)
x2_lim = max(waver)
img = plt.imshow(trail.T,interpolation='nearest',
cmap=plt.cm.binary,
aspect='auto',origin='lower',
extent=(x1_lim,
x2_lim,phase[0],phase[-1]+1/self.nbins))#
#vmin=limits[0],vmax=limits[1])
if vel_space:
plt.xlim((self.lam0 - self.delw-self.lam0)/self.lam0*2.998e5,
(self.lam0 + self.delw-self.lam0)/self.lam0*2.998e5)
plt.xlabel('Velocity / km s$^{-1}$')
else:
plt.xlim(self.lam0 - self.delw, self.lam0 + self.delw)
plt.xlabel('Wavelength / $\AA$')
plt.axvline(x=self.lam0,ls='--',color='DarkOrange')
if two_orbits:
lim_two = 2
else:
lim_two = 1
plt.ylim(phase[0],lim_two+1/self.nbins/2.)
plt.ylabel('Orbital Phase')
plt.tight_layout(h_pad=0)
def Syncdop(self,nri=0.9,ndi=0.7):
'''
Runs the fortran code dopp, using the output files from dopin
Parameters
----------
None
Returns
-------
None.
'''
compile_flag = True
while compile_flag == True:
f=open('dop.in','w')
f.write("{} ih type of likelihood function (ih=1 for chi-squared)\n".format(self.ih))
f.write("{} iw iw=1 if error bars are to be read and used\n".format(self.iw))
f.write("{} {} pb0,pb1 range of phases to be ignored\n".format(self.pb0,self.pb1))
f.write("{} ns smearing width in default map\n".format(self.ns))
f.write("{:.1e} ac accuracy of convergence\n".format(self.ac))
f.write("{} nim max no of iterations\n".format(self.nim))
f.write("{} {} {} al0,alf,nal starting value, factor, max number of alfas\n".format(self.al0,self.alf,self.nal))
f.write("{} clim 'C-aim'\n".format(self.clim))
f.write("{} ipri printout control for standard output channel (ipr=2 for full)\n".format(self.ipri))
f.write("{} norm norm=1 for normalization to flat light curve\n".format(self.norm))
f.write("{:2.1e} {} wid,af width and amplitude central absorption fudge\n".format(self.wid,self.af))
f.write("end of parameter input file")
f.close()
f=open('dopin')
lines=f.readlines()
f.close()
# np == npp
npp,nvp=int(lines[0].split()[0]),int(lines[0].split()[1])
lines=[]
f=open('emap_ori.par')
lines=f.readlines()
f.close()
s=lines[0]
npm=int(s[s.find('npm=')+len('npm='):s.rfind(',nvpm')])
nvpm=int(s[s.find('nvpm=')+len('nvpm='):s.rfind(',nvm')])
nvm=int(s[s.find('nvm=')+len('nvm='):s.rfind(')')])
nvp = self.vell.size
print('nvp',nvp)
print(self.trsp.shape)
nv0=int(self.overs*nvp)
nv=max([nv0,int(min([1.5*nv0,npp/3.]))])
print('nv',nv,nv0)
if nv%2 == 1:
nv+=1
#nv=120
nd = npm * nvpm
nr = 0.8 * nv * nv
nt = (nvpm * npm) + (nv * nvpm * 3) + (2 * npm * nv)
prmsize = (0.9 * nv * nt) + (0.9 * nv * nt)
print ('Estimated Memory required ',int(8*prmsize/1e6),' Mbytes')
#print nv,nvm,np,npm,nvp,nvpm
print("np={}; nvpm={}, nvm={}".format(npp, nvp, nv))
print('ND',nd)
print('NR',nr)
if nv != nvm or npp != npm or nvp !=nvpm:
a1=' parameter (npm=%4d'% npp
a2=',nvpm=%4d'%nvp
a3=',nvm=%4d)'%nv
a1=a1+a2+a3
f=open('emap.par','w')
f.write(a1+'\n')
for i,lino in enumerate(lines[1:]):
#print(lino)
if i == 2:
tempo_str = ' parameter (nri={:.3f}*nvm*nt/nd,ndi={:.3f}*nvm*nt/nr)\n'.format(nri,ndi)
#aprint(tempo_str)
f.write(tempo_str)
elif lino !=3:
f.write(lino[:])
else:
f.write(lino[:]+')')
f.close()
if self.verbose:
print ('>> Computing MEM tomogram <<')
print ('----------------------------')
#os.system('gfortran -O -o dopp_input.txt dop.in dop.f clock.f')
os.system('make dop.out')
os.system('./dopp dopp.out')
fo=open('dop.log')
lines=fo.readlines()
fo.close()
#print(clim,rr)
if self.verbose: print ('----------------------------')
if lines[-1].split()[0] == 'projection':
nri = np.float(lines[-1].split()[-1])/np.float(lines[-2].split()[-1])
ndi = np.float(lines[-1].split()[-2])/np.float(lines[-2].split()[-2])
print('>> PROJECTION MATRIX TOO SMALL <<')
print('>> Recomputing with values from Spruit:')
print('>> ndi = {}, nri = {}'.format(ndi,nri))
else:
compile_flag=False
clim,rr=lines[-2].split()[-1],lines[-2].split()[-2]
if rr > clim:
print ('>> NOT CONVERGED: Specified reduced chi^2 not reached: {} > {}'.format(rr,clim))
sys.exit()
else:
if self.verbose:
print ('>> Succesful Dopmap!')
def Dopmap(self,dopout = 'dop.out',cmaps = cm.Greys_r,
limits=None, colorbar=False, negative=False,remove_mean=False,
corrx=0,corry=0, smooth=False):
"""
Read output files from Henk Spruit's *.out and plot a Doppler map
Parameters
----------
dopout : str, Optional
Name of output file to be read. Default, dop.out
cmaps : cmap function,
Color scheme to use for Doppler map
- Default, cm.Greys_r
limits : array,
Normalised limtis e.g. [.8,1.1] for colour display. if None,
automatic Limits will be generated
- Default, None
colorbar : bool,
Generates an interactive colorbar. (unstable...)
- Default, True
remove_mean : bool,
Remove an azimuthal mean of the map
- Default, False
corrx, corry : float, float
Pixel correction for center of removal of azimuthal mean map
smooth : bool,
Apply Gaussian filter to map.
- Default, False
Returns
-------
cbar : object,
Colorbar object for interactivity
data : 2D-array,
Data cube from Doppler map
"""
if self.verbose:
print(">> Reading {} file".format(dopout))
fro=open(dopout,'r')
lines=fro.readlines()
fro.close()
#READ ALL FILES
nph,nvp,nv,w0,aa=int(lines[0].split()[0]),int(lines[0].split()[1]),int(lines[0].split()[2]),float(lines[0].split()[3]),float(lines[0].split()[4])
gamma,abso,atm,dirin=float(lines[1].split()[0]),lines[1].split()[1],lines[1].split()[2],lines[1].split()[3]
new = ''.join(lines[2:len(lines)])
new = new.replace("E",'e')
war = ''.join(new.splitlines()).split()
#print(war)
if self.verbose:
print(">> Finished reading dop.out file")
pha=np.array(war[:nph]).astype(np.float)/2.0/np.pi
dum1=war[nph]
dpha=np.array(war[nph+1:nph+1+nph]).astype(np.float)/2.0/np.pi
last=nph+1+nph
vp=np.array(war[last:last+nvp]).astype(np.float)
dvp=vp[1]-vp[0]
vp=vp-dvp/2.0
last=last+nvp
dm=np.array(war[last:last+nvp*nph]).astype(np.float)
dm=dm.reshape(nvp,nph)
last=last+nvp*nph
#print(war[last])
ih,iw,pb0,pb1,ns,ac,al,clim,norm,wid,af=int(war[last]),int(war[last+1]),float(war[last+2]),float(war[last+3]),int(war[last+4]),float(war[last+5]),float(war[last+6]),float(war[last+7]),int(war[last+8]),float(war[last+9]),float(war[last+10])
nv,va,dd=int(war[last+11]),float(war[last+12]),war[last+13]
last=last+14
im=np.array(war[last:last+nv*nv]).astype(np.float)
im=im.reshape(nv,nv)
last=last+nv*nv
ndum,dum2,dum3=int(war[last]),war[last+1],war[last+2]
last=last+3
dmr=np.array(war[last:last+nvp*nph]).astype(np.float)
dmr=dmr.reshape(nvp,nph)
last=last+nvp*nph
ndum,dum4,dum2,dum3=int(war[last]),int(war[last+1]),war[last+2],war[last+3]
last=last+4
dpx=np.array(war[last:last+nv*nv]).astype(np.float)
dpx=dpx.reshape(nv,nv)
dpx = np.array(dpx)
vp = np.array(vp)/1e5
data = im
data[data == 0.0] = np.nan
new_data = (data - np.nanmin(data) )/np.nanmax(data)
#new_data = np.arcsinh(new_data)
if limits == None:
limits = [np.nanmax((new_data))*0.95,np.nanmax((new_data))*1.05]
if self.verbose:
print("Limits auto {:6.5f} {:6.5f}".format(np.nanmedian(data)*0.8,np.nanmedian(data)*1.2))
print("Limits user {:6.5f} {:6.5f}".format(limits[0],limits[1]))
print("Limits min={:6.5f}, max={:6.5f}".format(np.nanmin(data),np.nanmax(data)))
# Here comes the plotting
fig = plt.figure(num='Doppler Map',figsize=(8.57,8.57))
plt.clf()
ax = fig.add_subplot(111)
ax.minorticks_on()
ll = ~(np.isnan(data) )
#data[~ll] = np.nan
delvp = vp[1]-vp[0]
#print(">>> VP",min(vp),max(vp),delvp)
vpmin, vpmax = min(vp)-.5/delvp,max(vp)+.5/delvp,
if smooth:
interp_mode = 'gaussian'
else:
interp_mode = 'nearest'
if remove_mean:
rad_prof = radial_profile(data,[data[0].size/2-corrx,data[0].size/2-corry])
meano = create_profile(data,rad_prof,[data[0].size/2-corrx,data[0].size/2-corry])
qq = ~np.isnan(data - meano)
if negative:
if remove_mean:
#print data[ll].max(),meano[qq].max()
img = plt.imshow((data - meano)/(data - meano)[qq].max(),
interpolation=interp_mode, cmap=cmaps,aspect='equal',
origin='lower',extent=(vpmin, vpmax,vpmin, vpmax ),
vmin=limits[0],vmax=limits[1])
else:
img = plt.imshow(-(data)/data[ll].max(),
interpolation=interp_mode, cmap=cmaps,aspect='equal',
origin='lower',extent=(vpmin, vpmax,vpmin, vpmax),
vmin=-limits[1],vmax=-limits[0] )
else:
if remove_mean:
#print data[ll].max(),meano[qq].max()
img = plt.imshow((data - meano)/(data - meano)[qq].max(),
interpolation=interp_mode, cmap=cmaps,aspect='equal',
origin='lower',extent=(vpmin, vpmax,vpmin, vpmax),
vmin=limits[0],vmax=limits[1])
else:
#print(np.nanmin(data),np.nanmax(data))
#new_data = (data - np.nanmin(data) )/np.nanmax(data)
#new_data = data
#print(np.nanmedian(data),np.nanstd(data))
print("Limits min={:6.3f}, max={:6.3f}".format(np.nanmin(new_data),np.nanmax(new_data)))
img = plt.imshow(new_data,interpolation=interp_mode,
cmap=cmaps,aspect='equal',origin='lower',
extent=(vpmin, vpmax,vpmin, vpmax ),
vmin=limits[0],vmax=limits[1] )
axlimits=[min(vp), max(vp),min(vp), max(vp) ]
plt.axis(axlimits)
#plt.axvline(x=0.0,linestyle='--',color='white')
plt.xlabel('V$_x$ / km s$^{-1}$')
plt.ylabel('V$_y$ / km s$^{-1}$')
plt.tight_layout()
plt.show()
if colorbar:
cbar = plt.colorbar(format='%.1f',orientation='vertical',
fraction=0.046, pad=0.04)
cbar.set_label('Normalised Flux')
cbar.set_norm(MyNormalize(vmin=limits[0],vmax=limits[1],
stretch='log'))
cbar = DraggableColorbar(cbar,img)
cbar.connect()
else:
cbar=1
'''
if remove_mean:
#print data.size/2
rad_prof = radial_profile(data,[data[0].size/2,data[0].size/2])
mean = create_profile(data,rad_prof,[data[0].size/2,data[0].size/2])
ll = ~np.isnan(mean)
fig = plt.figure('Mean')
plt.clf()
fig.add_subplot(211)
plt.plot(rad_prof)
fig.add_subplot(212)
plt.show()
'''
return cbar,new_data
def Reco(self, cmaps=plt.cm.binary, limits=None, colorbar=True):
"""
Plot original and reconstructed trail spectra from Henk Spruit's *.out
Parameters
----------
cmaps : cmap function,
Color scheme to use for Doppler map
- Default, cm.Greys_r
limits : array,
Normalised limtis e.g. [.8,1.1] for colour display. if None,
automatic Limits will be generated
- Default, None
colorbar : bool,
Generates an interactive colorbar. (unstable...)
- Default, True
Returns
-------
cbar : object,
Colorbar object for interactivity
data : 2D-array,
Data cube from reconstructed spectra
"""
fro=open('dop.out','r')
lines=fro.readlines()
fro.close()
#READ ALL FILES
nph,nvp,nv,w0,aa=int(lines[0].split()[0]),int(lines[0].split()[1]),int(lines[0].split()[2]),float(lines[0].split()[3]),float(lines[0].split()[4])
gamma,abso,atm,dirin=float(lines[1].split()[0]),lines[1].split()[1],lines[1].split()[2],lines[1].split()[3]
#print(">> Reading dop.out file")
#flag=0
#for i in np.arange(3,len(lines),1):
# if flag==0:
# temp=lines[i-1]+lines[i]
# flag=1
# else:
# temp=temp+lines[i]
# war=temp.split()
new = ''.join(lines[2:len(lines)])
new = new.replace("E",'e')
war = ''.join(new.splitlines()).split()
#print(war)
#print(">> Finished reading dop.out file")
pha=np.array(war[:nph]).astype(np.float)/2.0/np.pi
dum1=war[nph]
dpha=np.array(war[nph+1:nph+1+nph]).astype(np.float)/2.0/np.pi
last=nph+1+nph
vp=np.array(war[last:last+nvp]).astype(np.float)
dvp=vp[1]-vp[0]
vp=vp-dvp/2.0
last=last+nvp
dm=np.array(war[last:last+nvp*nph]).astype(np.float)
dm=dm.reshape(nvp,nph)
last=last+nvp*nph
#print(war[last])
ih,iw,pb0,pb1,ns,ac,al,clim,norm,wid,af=int(war[last]),int(war[last+1]),float(war[last+2]),float(war[last+3]),int(war[last+4]),float(war[last+5]),float(war[last+6]),float(war[last+7]),int(war[last+8]),float(war[last+9]),float(war[last+10])
nv,va,dd=int(war[last+11]),float(war[last+12]),war[last+13]
last=last+14
im=np.array(war[last:last+nv*nv]).astype(np.float)
im=im.reshape(nv,nv)
last=last+nv*nv
ndum,dum2,dum3=int(war[last]),war[last+1],war[last+2]
last=last+3
dmr=np.array(war[last:last+nvp*nph]).astype(np.float)
dmr=dmr.reshape(nvp,nph)
last=last+nvp*nph
ndum,dum4,dum2,dum3=int(war[last]),int(war[last+1]),war[last+2],war[last+3]
last=last+4
dpx=np.array(war[last:last+nv*nv]).astype(np.float)
dpx=dpx.reshape(nv,nv)
dpx = np.array(dpx)
vp = np.array(vp)/1e5
data = im
data[data <= 0.0] = np.nan
dpx[dpx <= 0.0] = np.nan
#dmr[dmr <= 0.0] = np.nan
#dm[dm <= 0.0] = np.nan
#print(pha)
#print(self.nbins)
trail_dm,phase = rebin_trail(vp, dm.T, pha, self.nbins, self.delta_phase,
rebin_wave=None)
trail_dmr,phase = rebin_trail(vp, dmr.T, pha, self.nbins, self.delta_phase,
rebin_wave=None)
delvp = vp[1]-vp[0]
x1_lim = min(vp)
x2_lim = max(vp)
#print(phase)
if limits == None:
limits = [np.median(dmr/np.nanmax(dmr))*0.8,
np.median(dmr/np.nanmax(dmr))*1.2]
# Now lets do the plotting
figor = plt.figure('Reconstruction',figsize=(10,8))
plt.clf()
ax1 = figor.add_subplot(121)
print(np.nanmax(trail_dm))
imgo = plt.imshow(trail_dm.T/np.nanmax(trail_dm),interpolation='nearest',
cmap=cmaps,aspect='auto',origin='upper',
extent=(x1_lim,x2_lim,phase[0],
phase[-1]+1/self.nbins),
vmin=limits[0], vmax=limits[1])
ax1.set_xlabel('Velocity / km s$^{-1}$')
ax1.set_ylabel('Orbital Phase')
if colorbar:
cbar2 = plt.colorbar(format='%.1e',orientation='vertical',
fraction=0.046, pad=0.04)
cbar2.set_label('Normalised Flux')
cbar2.set_norm(MyNormalize(vmin=np.median(dm/np.nanmax(dm))*0.8,
vmax=np.median(dm/np.nanmax(dm))*1.1,
stretch='linear'))
cbar2 = DraggableColorbar(cbar2,imgo)
cbar2.connect()
else:
cbar2=1
ax2 = figor.add_subplot(122)
print(np.nanmax(trail_dmr))
imgo = plt.imshow(trail_dmr.T/np.nanmax(trail_dmr),interpolation='nearest',
cmap=cmaps,aspect='auto',origin='upper',
extent=(x1_lim,x2_lim,phase[0],
phase[-1]+1/self.nbins),
vmin=limits[0], vmax=limits[1])
ax2.set_xlabel('Velocity / km s$^{-1}$')
ax2.set_yticklabels([])
plt.tight_layout(w_pad=0)
if colorbar:
cbar3 = plt.colorbar(format='%.1e',orientation='vertical',
fraction=0.046, pad=0.04)
cbar3.set_label('Normalised Flux')
cbar3.set_norm(MyNormalize(vmin=np.median(dmr/np.nanmax(dmr))*0.8,
vmax=np.median(dmr/np.nanmax(dmr))*1.1,
stretch='linear'))
cbar3 = DraggableColorbar(cbar3,imgo)
cbar3.connect()
else:
cbar3=1
return cbar2,cbar3,dmr,dm
def rebin_trail(waver, flux, input_phase, nbins, delp, rebin_wave=None):
"""
"""
phase = np.linspace(0,2,nbins*2+1,endpoint=True) - 1./(nbins)/2.
phase = np.concatenate((phase,[2.0+1./(nbins)/2.]))
phase_dec = phase - np.floor(phase)
trail = | np.zeros((waver.size,phase.size)) | numpy.zeros |
from functools import partial
import numpy as np
import pytest
import nengo
import nengo.utils.numpy as npext
from nengo.connection import ConnectionSolverParam
from nengo.dists import Choice, UniformHypersphere
from nengo.exceptions import BuildError, ValidationError
from nengo.solvers import LstsqL2
from nengo.processes import Piecewise
from nengo.transforms import Dense, NoTransform
from nengo.utils.testing import signals_allclose
def test_args(AnyNeuronType, seed, rng):
N = 10
d1, d2 = 3, 2
with nengo.Network(seed=seed) as model:
model.config[nengo.Ensemble].neuron_type = AnyNeuronType()
A = nengo.Ensemble(N, dimensions=d1)
B = nengo.Ensemble(N, dimensions=d2)
nengo.Connection(
A,
B,
eval_points=rng.normal(size=(500, d1)),
synapse=0.01,
function=np.sin,
transform=rng.normal(size=(d2, d1)),
)
def test_node_to_neurons(Simulator, PositiveNeuronType, plt, seed, allclose):
N = 50
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
a = nengo.Ensemble(N, dimensions=1)
inn = nengo.Node(output=np.sin)
inh = nengo.Node(Piecewise({0: 0, 0.5: 1}))
nengo.Connection(inn, a)
nengo.Connection(inh, a.neurons, transform=[[-5]] * N)
inn_p = nengo.Probe(inn, "output")
a_p = nengo.Probe(a, "decoded_output", synapse=0.1)
inh_p = nengo.Probe(inh, "output")
with Simulator(m) as sim:
sim.run(1.0)
t = sim.trange()
ideal = np.sin(t)
ideal[t >= 0.5] = 0
plt.plot(t, sim.data[inn_p], label="Input")
plt.plot(t, sim.data[a_p], label="Neuron approx, synapse=0.1")
plt.plot(t, sim.data[inh_p], label="Inhib signal")
plt.plot(t, ideal, label="Ideal output")
plt.legend(loc="best", fontsize="small")
assert allclose(sim.data[a_p][-10:], 0, atol=0.1, rtol=0.01)
def test_ensemble_to_neurons(Simulator, PositiveNeuronType, plt, seed, allclose):
with nengo.Network(seed=seed) as net:
net.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
ens = nengo.Ensemble(40, dimensions=1)
inhibitor = nengo.Ensemble(40, dimensions=1)
stim = nengo.Node(output=np.sin)
inhibition = nengo.Node(Piecewise({0: 0, 0.5: 1}))
nengo.Connection(stim, ens)
nengo.Connection(inhibition, inhibitor)
nengo.Connection(
inhibitor, ens.neurons, transform=-10 * np.ones((ens.n_neurons, 1))
)
stim_p = nengo.Probe(stim, "output")
ens_p = nengo.Probe(ens, "decoded_output", synapse=0.05)
inhibitor_p = nengo.Probe(inhibitor, "decoded_output", synapse=0.05)
inhibition_p = nengo.Probe(inhibition, "output")
with Simulator(net) as sim:
sim.run(1.0)
t = sim.trange()
ideal = np.sin(t)
ideal[t >= 0.5] = 0
plt.plot(t, sim.data[stim_p], label="Input")
plt.plot(t, sim.data[ens_p], label="`ens` value, pstc=0.05")
plt.plot(t, sim.data[inhibitor_p], label="`inhibitor` value, pstc=0.05")
plt.plot(t, sim.data[inhibition_p], label="Inhibition signal")
plt.plot(t, ideal, label="Ideal output")
plt.legend(loc=0, prop={"size": 10})
assert allclose(sim.data[ens_p][-10:], 0, atol=0.1, rtol=0.01)
assert allclose(sim.data[inhibitor_p][-10:], 1, atol=0.1, rtol=0.01)
def test_node_to_ensemble(Simulator, NonDirectNeuronType, plt, seed, allclose):
N = 50
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = NonDirectNeuronType()
input_node = nengo.Node(output=lambda t: [np.sin(t * 3), np.cos(t * 3)])
a = nengo.Ensemble(N * 1, dimensions=1)
b = nengo.Ensemble(N * 1, dimensions=1)
c = nengo.Ensemble(N * 2, dimensions=2)
d = nengo.Ensemble(N, neuron_type=nengo.Direct(), dimensions=3)
nengo.Connection(input_node, a, function=lambda x: -x[0])
nengo.Connection(input_node[:1], b, function=lambda x: -x)
nengo.Connection(input_node, c, function=lambda x: -(x ** 2))
nengo.Connection(
input_node, d, function=lambda x: [-x[0], -(x[0] ** 2), -(x[1] ** 2)]
)
a_p = nengo.Probe(a, "decoded_output", synapse=0.01)
b_p = nengo.Probe(b, "decoded_output", synapse=0.01)
c_p = nengo.Probe(c, "decoded_output", synapse=0.01)
d_p = nengo.Probe(d, "decoded_output", synapse=0.01)
with Simulator(m) as sim:
sim.run(2.0)
t = sim.trange()
plt.plot(t, sim.data[a_p])
plt.plot(t, sim.data[b_p])
plt.plot(t, sim.data[c_p])
plt.plot(t, sim.data[d_p])
plt.legend(
[
"-sin",
"-sin",
"-(sin ** 2)",
"-(cos ** 2)",
"-sin",
"-(sin ** 2)",
"-(cos ** 2)",
],
loc="best",
fontsize="small",
)
assert allclose(sim.data[a_p][-10:], sim.data[d_p][-10:][:, 0], atol=0.1, rtol=0.01)
assert allclose(sim.data[b_p][-10:], sim.data[d_p][-10:][:, 0], atol=0.1, rtol=0.01)
assert allclose(
sim.data[c_p][-10:], sim.data[d_p][-10:][:, 1:3], atol=0.1, rtol=0.01
)
def test_neurons_to_ensemble(Simulator, PositiveNeuronType, plt, seed):
N = 20
m = nengo.Network(seed=seed)
with m:
m.config[nengo.Ensemble].neuron_type = PositiveNeuronType()
a = nengo.Ensemble(N * 2, dimensions=2)
b = nengo.Ensemble(N, dimensions=1)
c = nengo.Ensemble(N, dimensions=N * 2)
nengo.Connection(a.neurons, b, transform=-5 * | np.ones((1, N * 2)) | numpy.ones |
import folium
import geopy.distance
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import butter,filtfilt,sosfilt
import os
import sys
import time
import serial
from pathlib import Path
Path(__file__).parents[1]
cwd = os.getcwd()
input_file_path = str(Path(__file__).parents[1]) + '\Python\capturow_datalog.txt'
class datalog:
def __init__(self):
self.elap_milli = []
self.current_sample = 0
self.ODR = []
self.latitude = []
self.longitude = []
self.num_strokes = 0
self.aX = []
self.aX_avg = 0
self.aY = []
self.aY_avg = 0
self.aZ = []
self.aZ_avg = 0
#self.distance = 0
self.num_logs = 0
self.num_samples_per_log = 0
self.num_valid_gps_coords = 0
def count_logs():
# Count logs/accel samples in file
dlog.num_logs = sum(1 for odr in dlog.ODR)
dlog.num_samples_per_log = dlog.ODR[-1]
dlog.num_valid_gps_coords = sum(1 for lat in dlog.latitude)
def plot_coords(sample_interval):
m = folium.Map(location=[dlog.latitude[0], dlog.longitude[0]], max_zoom=19, zoom_start=19)
for i in range (dlog.num_valid_gps_coords):
if i % sample_interval == 0:
folium.Marker([dlog.latitude[i], dlog.longitude[i]], icon=folium.Icon(color='red', icon='times', prefix='fa')).add_to(m)
m.save('map.html')
def calc_total_distance():
tot_distance = 0
for i in range (dlog.num_valid_gps_coords):
if i > 0:
coords_1 = (dlog.latitude[i-1], dlog.longitude[i-1])
coords_2 = (dlog.latitude[i], dlog.longitude[i])
meter_distance = geopy.distance.distance(coords_1, coords_2).m
tot_distance += meter_distance
print('Total Distance = ' + str(round(tot_distance, 2)) + ' m')
def calc_max_speed(sample_interval):
max_speed = 0
for i in range (dlog.num_logs):
if ((i > 0) and (i % sample_interval == 0)):
coords_1 = (dlog.latitude[i - sample_interval], dlog.longitude[i - sample_interval])
coords_2 = (dlog.latitude[i], dlog.longitude[i])
distance = geopy.distance.distance(coords_1, coords_2).m
speed = distance / ((dlog.elap_milli[i] - (dlog.elap_milli[i - sample_interval])) / 1000)
if (speed > max_speed):
max_speed = speed
print('Max Speed = ' + str(round(max_speed, 2)) + ' m/s')
if max_speed != 0:
factor = 500 / max_speed
min_sec = factor / 60
mins = math.modf(min_sec)[1]
frac_mins = math.modf(min_sec)[0]
secs = 60 * frac_mins
print('Max Speed = ' + str(round(mins,0)) + ' mins ' + str(round(secs, 2)) + ' secs/500m')
else:
print('Max Speed could not be determined or distance travelled = 0')
# def calc_avg_speed(sample_interval):
# avg_speed = 0
# for i in range (dlog.num_samples):
# if ((i > 0) and (i % sample_interval == 0)):
# coords_1 = (dlog.latitude[i-sample_interval], dlog.longitude[i-sample_interval])
# coords_2 = (dlog.latitude[i], dlog.longitude[i])
# distance = geopy.distance.distance(coords_1, coords_2).m
# speed = distance / (sample_interval)
# if (speed > max_speed):
# max_speed = speed
def plot_accel(plot_all_axes_sum = False, round_precision = 2, x_axis_step = 20, plot_avg = True, start_point = 0, end_point = 10000):
aX_rounded = []
aY_rounded = []
aZ_rounded = []
for i in range(start_point, end_point):
aX_rounded.append(round(float(dlog.aX[i]), round_precision))
aY_rounded.append(round(float(dlog.aY[i]), round_precision))
aZ_rounded.append(round(float(dlog.aZ[i]), round_precision))
x_points = np.arange(start_point, end_point, 1)
if plot_all_axes_sum:
plt.subplot(3,2,1)
plt.plot(x_points, aX_rounded)
plt.title('X-Axis Acceleration')
plt.subplot(3,2,2)
plt.plot(x_points, aY_rounded)
plt.title('Y-Axis Acceleration')
plt.subplot(3,2,3)
plt.plot(x_points, aZ_rounded)
plt.title('Z-Axis Acceleration')
sum_rounded = []
for i in range(0, len(x_points)):
sum_rounded.append((aX_rounded[i] + aY_rounded[i] + aZ_rounded[i]))
plt.subplot(3,2,4)
plt.plot(x_points, sum_rounded)
plt.title('X-Axis + Y-Axis + Z-Axis Acceleration')
sum_rounded = remove_offset(sum_rounded)
zero_mrkr = generate_zero_mrkr(length=len(x_points))
plt.subplot(3,2,5)
plt.plot(x_points, sum_rounded)
plt.plot(x_points, zero_mrkr, ':')
plt.title('All Axes Sum Gravity Calibrated-Out')
plt.xlabel('Sample')
plt.ylabel('Acceleration (2G normalized)')
plt.subplot(3,2,6)
#sum_rounded_lpf = lpf_data(data=sum_rounded, cutoff=1.67, sample_period=0.122)
#sum_rounded_lpf = lpf_data(data=sum_rounded, cutoff=0.835, sample_period=0.122)
sum_rounded_lpf = lpf_data(data=sum_rounded, cutoff=1.67, sample_period=1/(dlog.ODR[-1]))
plt.plot(x_points, sum_rounded_lpf)
plt.plot(x_points, zero_mrkr, ':')
plt.title('All Axes Sum Low-Pass Filtered')
plt.xlabel('Sample')
plt.ylabel('Acceleration (2G normalized)')
plt.show()
# [t, dummy_data] = gen_dummy_data(f=0.34, sample_period=0.122, start_time=0, end_time=60)
# plt.subplot(3,3,7)
# plt.plot(t, dummy_data)
# plt.title('Noisy Dummy Data')
# plt.xlabel('Time')
# plt.ylabel('Amplitude')
# #lpf_dummy_data = lpf_data(data=dummy_data, cutoff=1.67, sample_period=0.122)
# lpf_dummy_data = lpf_data(data=dummy_data, cutoff=0.5, sample_period=0.122)
# plt.subplot(3,3,8)
# plt.plot(t, lpf_dummy_data)
# plt.title('LPF Noisy Dummy Data')
# plt.xlabel('Time')
# plt.ylabel('Amplitude')
# plt.show()
else:
plt.subplot(3,1,1)
plt.plot(x_points, aX_rounded)
if plot_avg:
plt.plot(x_points, np.full(end_point - start_point, dlog.aX_avg))
plt.title('X-Axis Acceleration')
plt.xlabel('Sample')
plt.ylabel('Acceleration (2G normalized)')
plt.xticks(np.arange(start_point, end_point, step=x_axis_step))
plt.subplot(3,1,2)
plt.plot(x_points, aY_rounded)
if plot_avg:
plt.plot(x_points, np.full(end_point - start_point, dlog.aY_avg))
plt.title('Y-Axis Acceleration')
plt.xlabel('Sample')
plt.ylabel('Acceleration (2G normalized)')
plt.xticks(np.arange(start_point, end_point, step=x_axis_step))
plt.subplot(3,1,3)
plt.plot(x_points, aZ_rounded)
if plot_avg:
plt.plot(x_points, np.full(end_point - start_point, dlog.aZ_avg))
plt.title('Z-Axis Acceleration')
plt.xlabel('Sample')
plt.ylabel('Acceleration (2G normalized)')
plt.xticks(np.arange(start_point, end_point, step=x_axis_step))
plt.show()
def avg_accel_data():
x_sum = 0
y_sum = 0
z_sum = 0
for i in range(0, dlog.num_samples_per_log):
x_sum += float(dlog.aX[i])
y_sum += float(dlog.aY[i])
z_sum += float(dlog.aZ[i])
dlog.aX_avg = x_sum / dlog.num_samples_per_log
dlog.aY_avg = y_sum / dlog.num_samples_per_log
dlog.aZ_avg = z_sum / dlog.num_samples_per_log
def find_g_axis():
# Determine which axis/axes gravity is more dominant on
print('')
def remove_offset(data):
# Calibrate-out offset in accel data
sum = 0
for value in data:
sum += value
mean = sum / len(data)
for i, value in enumerate(data):
data[i] = value - mean
return data
def generate_zero_mrkr(length):
mrkr = np.zeros(length)
return mrkr
def gen_dummy_data(f, sample_period, start_time, end_time):
# Generate dummy data for debug purposes.
# Sine wave function (of freq (Hz)) = A*Sin(2*pi*f*t), we will take A as 1
fs = 1/sample_period
t = | np.arange(start_time, end_time, 1/fs) | numpy.arange |
#!/usr/bin/env python
# Family size distribution of tags which were aligned to the reference genome
#
# Author: <NAME> & <NAME>, Johannes-Kepler University Linz (Austria)
# Contact: <EMAIL>
#
# Takes at least one TABULAR file with tags before the alignment to the SSCS,
# a BAM file with tags of reads that overlap the regions of the reference genome and
# an optional BED file with chromosome, start and stop position of the regions as input.
# The program produces a plot which shows the distribution of family sizes of the tags from the input files and
# a tabular file with the data of the plot.
# USAGE: python FSD_regions.py --inputFile filenameSSCS --inputName1 filenameSSCS
# --bamFile DCSbamFile --rangesFile BEDfile --output_tabular outptufile_name_tabular
# --output_pdf outputfile_name_pdf
import argparse
import collections
import os.path
import re
import sys
import matplotlib.pyplot as plt
import numpy as np
import pysam
from matplotlib.backends.backend_pdf import PdfPages
plt.switch_backend('agg')
def readFileReferenceFree(file, delim):
with open(file, 'r') as dest_f:
data_array = | np.genfromtxt(dest_f, skip_header=0, delimiter=delim, comments='#', dtype=str) | numpy.genfromtxt |
# Runs a simple Metropolis-Hastings (ie MCMC) algorithm to simulate two
# jointly distributed random variables with probability density
# p(x,y)=exp(-(x^2+y^2)/s^2)/consNorm, where s>0 and consNorm is a
# normalization constant.
#
# Author: <NAME>, 2019.
# Website: hpaulkeeler.com
# Repository: github.com/hpaulkeeler/posts
import numpy as np; # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # for plotting
from matplotlib import cm # for heatmap plotting
from mpl_toolkits import mplot3d # for 3-D plots
from scipy import integrate # for integrating
plt.close("all"); # close all previous plots
# Simulation window parameters
xMin = -1;
xMax = 1;
yMin = -1;
yMax = 1;
numbSim = 10 ** 5; # number of random variables simulated
numbSteps = 25; # number of steps for the Markov process
numbBins = 50; # number of bins for histogram
sigma = 2; # standard deviation for normal random steps
# probability density parameters
s = .5; # scale parameter for distribution to be simulated
def fun_lambda(x, y):
return np.exp(-(x ** 2 + y ** 2) / s ** 2);
# normalization constant
consNorm = integrate.dblquad(fun_lambda, xMin, xMax, lambda x: yMin, lambda y: yMax)[0];
def fun_p(x, y):
return (fun_lambda(x, y) / consNorm) * (x >= xMin) * (y >= yMin) * (x <= xMax) * (y <= yMax);
xRand = np.random.uniform(xMin, xMax, numbSim); # random initial values
yRand = np.random.uniform(yMin, yMax, numbSim); # random initial values
probCurrent = fun_p(xRand, yRand); # current transition probabilities
for jj in range(numbSteps):
zxRand = xRand + sigma * np.random.normal(0, 1, numbSim); # take a (normally distributed) random step
zyRand = yRand + sigma * np.random.normal(0, 1, numbSim); # take a (normally distributed) random step
# Conditional random step needs to be symmetric in x and y
# For example: Z|x ~ N(x,1) (or Y=x+N(0,1)) with probability density
# p(z|x)=e(-(z-x)^2/2)/sqrt(2*pi)
probProposal = fun_p(zxRand, zyRand); # proposed probability
# acceptance rejection step
booleAccept = np.random.uniform(0, 1, numbSim) < probProposal / probCurrent;
# update state of random walk/Marjov chain
xRand[booleAccept] = zxRand[booleAccept];
yRand[booleAccept] = zyRand[booleAccept];
# update transition probabilities
probCurrent[booleAccept] = probProposal[booleAccept];
# for histogram, need to reshape as vectors
xRand = np.reshape(xRand, numbSim);
yRand = | np.reshape(yRand, numbSim) | numpy.reshape |
'''Statistical tests for NDVars
Common Attributes
-----------------
The following attributes are always present. For ANOVA, they are lists with the
corresponding items for different effects.
t/f/... : NDVar
Map of the statistical parameter.
p_uncorrected : NDVar
Map of uncorrected p values.
p : NDVar | None
Map of corrected p values (None if no correct was applied).
clusters : Dataset | None
Table of all the clusters found (None if no clusters were found, or if no
clustering was performed).
n_samples : None | int
The actual number of permutations. If ``samples = -1``, i.e. a complete set
or permutations is performed, then ``n_samples`` indicates the actual
number of permutations that constitute the complete set.
'''
from datetime import datetime, timedelta
from functools import reduce, partial
from itertools import chain, repeat
from math import ceil
from multiprocessing import Process, Event, SimpleQueue
from multiprocessing.sharedctypes import RawArray
import logging
import operator
import os
import re
import socket
from time import time as current_time
from typing import Union
import numpy as np
import scipy.stats
from scipy import ndimage
from tqdm import trange
from .. import fmtxt, _info, _text
from ..fmtxt import FMText
from .._celltable import Celltable
from .._config import CONFIG
from .._data_obj import (
CategorialArg, CellArg, IndexArg, ModelArg, NDVarArg, VarArg,
Dataset, Var, Factor, Interaction, NestedEffect,
NDVar, Categorial, UTS,
ascategorial, asmodel, asndvar, asvar, assub,
cellname, combine, dataobj_repr)
from .._exceptions import OldVersionError, WrongDimension, ZeroVariance
from .._utils import LazyProperty, user_activity
from .._utils.numpy_utils import FULL_AXIS_SLICE
from . import opt, stats, vector
from .connectivity import Connectivity, find_peaks
from .connectivity_opt import merge_labels, tfce_increment
from .glm import _nd_anova
from .permutation import (
_resample_params, permute_order, permute_sign_flip, random_seeds,
rand_rotation_matrices)
from .t_contrast import TContrastRel
from .test import star, star_factor
__test__ = False
def check_for_vector_dim(y: NDVar) -> None:
for dim in y.dims:
if dim._connectivity_type == 'vector':
raise WrongDimension(f"{dim}: mass-univariate methods are not suitable for vectors. Consider using vector norm as test statistic, or using a testnd.Vector test function.")
def check_variance(x):
if x.ndim != 2:
x = x.reshape((len(x), -1))
if opt.has_zero_variance(x):
raise ZeroVariance("y contains data column with zero variance")
class NDTest:
"""Baseclass for testnd test results
Attributes
----------
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_common = ('y', 'match', 'sub', 'samples', 'tfce', 'pmin', '_cdist',
'tstart', 'tstop', '_dims')
_state_specific = ()
_statistic = None
_statistic_tail = 0
@property
def _attributes(self):
return self._state_common + self._state_specific
def __init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop):
self.y = y.name
self.match = dataobj_repr(match) if match else match
self.sub = sub
self.samples = samples
self.tfce = tfce
self.pmin = pmin
self._cdist = cdist
self.tstart = tstart
self.tstop = tstop
self._dims = y.dims[1:]
def __getstate__(self):
return {name: getattr(self, name, None) for name in self._attributes}
def __setstate__(self, state):
# backwards compatibility:
if 'Y' in state:
state['y'] = state.pop('Y')
if 'X' in state:
state['x'] = state.pop('X')
for k, v in state.items():
setattr(self, k, v)
# backwards compatibility:
if 'tstart' not in state:
cdist = self._first_cdist
self.tstart = cdist.tstart
self.tstop = cdist.tstop
if '_dims' not in state: # 0.17
if 't' in state:
self._dims = state['t'].dims
elif 'r' in state:
self._dims = state['r'].dims
elif 'f' in state:
self._dims = state['f'][0].dims
else:
raise RuntimeError("Error recovering old test results dims")
self._expand_state()
def __repr__(self):
args = self._repr_test_args()
if self.sub is not None:
if isinstance(self.sub, np.ndarray):
sub_repr = '<array>'
else:
sub_repr = repr(self.sub)
args.append(f'sub={sub_repr}')
if self._cdist:
args += self._repr_cdist()
else:
args.append('samples=0')
return f"<{self.__class__.__name__} {', '.join(args)}>"
def _repr_test_args(self):
"""List of strings describing parameters unique to the test
Will be joined with ``", ".join(repr_args)``
"""
raise NotImplementedError()
def _repr_cdist(self):
"""List of results (override for MultiEffectResult)"""
return (self._cdist._repr_test_args(self.pmin) +
self._cdist._repr_clusters())
def _expand_state(self):
"Override to create secondary results"
cdist = self._cdist
if cdist is None:
self.tfce_map = None
self.p = None
self._kind = None
else:
self.tfce_map = cdist.tfce_map
self.p = cdist.probability_map
self._kind = cdist.kind
def _desc_samples(self):
if self.samples == -1:
return f"a complete set of {self.n_samples} permutations"
elif self.samples is None:
return "no permutations"
else:
return f"{self.n_samples} random permutations"
def _desc_timewindow(self):
tstart = self._time_dim.tmin if self.tstart is None else self.tstart
tstop = self._time_dim.tstop if self.tstop is None else self.tstop
return f"{_text.ms(tstart)} - {_text.ms(tstop)} ms"
def _asfmtext(self):
p = self.p.min()
max_stat = self._max_statistic()
return FMText((fmtxt.eq(self._statistic, max_stat, 'max', stars=p), ', ', fmtxt.peq(p)))
def _default_plot_obj(self):
raise NotImplementedError
def _iter_cdists(self):
yield (None, self._cdist)
@property
def _first_cdist(self):
return self._cdist
def _plot_model(self):
"Determine x for plotting categories"
return None
def _plot_sub(self):
if isinstance(self.sub, str) and self.sub == "<unsaved array>":
raise RuntimeError("The sub parameter was not saved for previous "
"versions of Eelbrain. Please recompute this "
"result with the current version.")
return self.sub
def _assert_has_cdist(self):
if self._cdist is None:
raise RuntimeError("This method only applies to results of tests "
"with threshold-based clustering and tests with "
"a permutation distribution (samples > 0)")
def masked_parameter_map(self, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
return self._cdist.masked_parameter_map(pmin, **sub)
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
return self._cdist.cluster(cluster_id)
@LazyProperty
def clusters(self):
if self._cdist is None:
return None
else:
return self.find_clusters(None, True)
def find_clusters(self, pmin=None, maps=False, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
return self._cdist.clusters(pmin, maps, **sub)
def find_peaks(self):
"""Find peaks in a threshold-free cluster distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
return self._cdist.find_peaks()
def compute_probability_map(self, **sub):
"""Compute a probability map
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
return self._cdist.compute_probability_map(**sub)
def info_list(self, computation=True):
"List with information about the test"
out = fmtxt.List("Mass-univariate statistics:")
out.add_item(self._name())
dimnames = [dim.name for dim in self._dims]
dimlist = out.add_sublist(f"Over {_text.enumeration(dimnames)}")
if 'time' in dimnames:
dimlist.add_item(f"Time interval: {self._desc_timewindow()}.")
cdist = self._first_cdist
if cdist is None:
out.add_item("No inferential statistics")
return out
# inference
l = out.add_sublist("Inference:")
if cdist.kind == 'raw':
l.add_item("Based on maximum statistic")
elif cdist.kind == 'tfce':
l.add_item("Based on maximum statistic with threshold-"
"free cluster enhancement (Smith & Nichols, 2009)")
elif cdist.kind == 'cluster':
l.add_item("Based on maximum cluster mass statistic")
sl = l.add_sublist("Cluster criteria:")
for dim in dimnames:
if dim == 'time':
sl.add_item(f"Minimum cluster duration {_text.ms(cdist.criteria.get('mintime', 0))} ms")
elif dim == 'source':
sl.add_item(f"At least {cdist.criteria.get('minsource', 0)} contiguous sources.")
elif dim == 'sensor':
sl.add_item(f"At least {cdist.criteria.get('minsensor', 0)} contiguous sensors.")
else:
value = cdist.criteria.get(f'min{dim}', 0)
sl.add_item(f"Minimum number of contiguous elements in {dim}: {value}")
# n samples
l.add_item(f"In {self._desc_samples()}")
# computation
if computation:
out.add_item(cdist.info_list())
return out
@property
def _statistic_map(self):
return getattr(self, self._statistic)
def _max_statistic(self):
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(self._statistic_map, self.p, tail)
@staticmethod
def _max_statistic_from_map(stat_map: NDVar, p_map: NDVar, tail: int):
if tail == 0:
func = stat_map.extrema
elif tail == 1:
func = stat_map.max
else:
func = stat_map.min
if p_map:
mask = p_map <= .05 if p_map.min() <= .05 else None
else:
mask = None
return func() if mask is None else func(mask)
@property
def n_samples(self):
if self.samples == -1:
return self._first_cdist.samples
else:
return self.samples
@property
def _time_dim(self):
for dim in self._first_cdist.dims:
if isinstance(dim, UTS):
return dim
return None
class t_contrast_rel(NDTest):
"""Mass-univariate contrast based on t-values
Parameters
----------
y : NDVar
Dependent variable.
x : categorial
Model containing the cells which are compared with the contrast.
contrast : str
Contrast specification: see Notes.
match : Factor
Match cases for a repeated measures test.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value for a related samples t-test (with df =
len(match.cells) - 1).
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Notes
-----
A contrast specifies the steps to calculate a map based on *t*-values.
Contrast definitions can contain:
- Comparisons using ``>`` or ``<`` and data cells to compute *t*-maps.
For example, ``"cell1 > cell0"`` will compute a *t*-map of the comparison
if ``cell1`` and ``cell0``, being positive where ``cell1`` is greater than
``cell0`` and negative where ``cell0`` is greater than ``cell1``.
If the data is defined based on an interaction, cells are specified with
``|``, e.g. ``"a1 | b1 > a0 | b0"``. Cells can contain ``*`` to average
multiple cells. Thus, if the second factor in the model has cells ``b1``
and ``b0``, ``"a1 | * > a0 | *"`` would compare ``a1`` to ``a0``
while averaging ``b1`` and ``b0`` within ``a1`` and ``a0``.
- Unary numpy functions ``abs`` and ``negative``, e.g.
``"abs(cell1 > cell0)"``.
- Binary numpy functions ``subtract`` and ``add``, e.g.
``"add(a>b, a>c)"``.
- Numpy functions for multiple arrays ``min``, ``max`` and ``sum``,
e.g. ``min(a>d, b>d, c>d)``.
Cases with zero variance are set to t=0.
Examples
--------
To find cluster where both of two pairwise comparisons are reliable,
i.e. an intersection of two effects, one could use
``"min(a > c, b > c)"``.
To find a specific kind of interaction, where a is greater than b, and
this difference is greater than the difference between c and d, one
could use ``"(a > b) - abs(c > d)"``.
"""
_state_specific = ('x', 'contrast', 't', 'tail')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: CategorialArg,
contrast: str,
match: CategorialArg = None,
sub: CategorialArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
if match is None:
raise TypeError("The `match` parameter needs to be specified for repeated measures test t_contrast_rel")
ct = Celltable(y, x, match, sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
check_variance(ct.y.x)
# setup contrast
t_contrast = TContrastRel(contrast, ct.cells, ct.data_indexes)
# original data
tmap = t_contrast.map(ct.y.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
df = len(ct.match.cells) - 1
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(
ct.y, samples, threshold, tfce, tail, 't', "t-contrast",
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(len(ct.y), samples, unit=ct.match)
run_permutation(t_contrast, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = ('%'.join(ct.x.base_names) if isinstance(ct.x, Interaction) else
ct.x.name)
self.contrast = contrast
self.tail = tail
self.tmin = tmin
self.t = t
self._expand_state()
def _name(self):
if self.y:
return "T-Contrast: %s ~ %s" % (self.y, self.contrast)
else:
return "T-Contrast: %s" % self.contrast
def _plot_model(self):
return self.x
def _repr_test_args(self):
args = [repr(self.y), repr(self.x), repr(self.contrast)]
if self.tail:
args.append("tail=%r" % self.tail)
if self.match:
args.append('match=%r' % self.match)
return args
class corr(NDTest):
"""Mass-univariate correlation
Parameters
----------
y : NDVar
Dependent variable.
x : continuous
The continuous predictor variable.
norm : None | categorial
Categories in which to normalize (z-score) x.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an r-value equivalent to an
uncorrected p-value.
rmin : None | scalar
Threshold for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : None | categorial
When permuting data, only shuffle the cases within the categories
of match.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
r : NDVar
Map of correlation values (with threshold contours).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
"""
_state_specific = ('x', 'norm', 'n', 'df', 'r')
_statistic = 'r'
@user_activity
def __init__(
self,
y: NDVarArg,
x: VarArg,
norm: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
rmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: CategorialArg = None,
parc: str = None,
**criteria):
sub = assub(sub, ds)
y = asndvar(y, sub=sub, ds=ds, dtype=np.float64)
check_for_vector_dim(y)
if not y.has_case:
raise ValueError("Dependent variable needs case dimension")
x = asvar(x, sub=sub, ds=ds)
if norm is not None:
norm = ascategorial(norm, sub, ds)
if match is not None:
match = ascategorial(match, sub, ds)
name = "%s corr %s" % (y.name, x.name)
# Normalize by z-scoring the data for each subject
# normalization is done before the permutation b/c we are interested in
# the variance associated with each subject for the z-scoring.
y = y.copy()
if norm is not None:
for cell in norm.cells:
idx = (norm == cell)
y.x[idx] = scipy.stats.zscore(y.x[idx], None)
# subtract the mean from y and x so that this can be omitted during
# permutation
y -= y.summary('case')
x = x - x.mean()
n = len(y)
df = n - 2
rmap = stats.corr(y.x, x.x)
n_threshold_params = sum((pmin is not None, rmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, rmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.rtest_r(pmin, df)
elif rmin is not None:
threshold = abs(rmin)
else:
threshold = None
cdist = NDPermutationDistribution(
y, samples, threshold, tfce, 0, 'r', name,
tstart, tstop, criteria, parc)
cdist.add_original(rmap)
if cdist.do_permutation:
iterator = permute_order(n, samples, unit=match)
run_permutation(stats.corr, cdist, iterator, x.x)
# compile results
info = _info.for_stat_map('r', threshold)
r = NDVar(rmap, y.dims[1:], info, name)
# store attributes
NDTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist,
tstart, tstop)
self.x = x.name
self.norm = None if norm is None else norm.name
self.rmin = rmin
self.n = n
self.df = df
self.r = r
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
r = self.r
# uncorrected probability
pmap = stats.rtest_p(r.x, self.df)
info = _info.for_p_map()
p_uncorrected = NDVar(pmap, r.dims, info, 'p_uncorrected')
self.p_uncorrected = p_uncorrected
self.r_p = [[r, self.p]] if self.samples else None
def _name(self):
if self.y and self.x:
return "Correlation: %s ~ %s" % (self.y, self.x)
else:
return "Correlation"
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.norm:
args.append('norm=%r' % self.norm)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_parameter_map()
else:
return self.r
class NDDifferenceTest(NDTest):
difference = None
def _get_mask(self, p=0.05):
self._assert_has_cdist()
if not 1 >= p > 0:
raise ValueError(f"p={p}: needs to be between 1 and 0")
if p == 1:
if self._cdist.kind != 'cluster':
raise ValueError(f"p=1 is only a valid mask for threshold-based cluster tests")
mask = self._cdist.cluster_map == 0
else:
mask = self.p > p
return self._cdist.uncrop(mask, self.difference, True)
def masked_difference(self, p=0.05):
"""Difference map masked by significance
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.difference.mask(mask)
class NDMaskedC1Mixin:
def masked_c1(self, p=0.05):
"""``c1`` map masked by significance of the ``c1``-``c0`` difference
Parameters
----------
p : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
"""
mask = self._get_mask(p)
return self.c1_mean.mask(mask)
class ttest_1samp(NDDifferenceTest):
"""Mass-univariate one sample t-test
Parameters
----------
y : NDVar
Dependent variable.
popmean : scalar
Value to compare y against (default is 0).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
The difference value entering the test (``y`` if popmean is 0).
n : int
Number of cases.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Data points with zero variance are set to t=0.
"""
_state_specific = ('popmean', 'tail', 'n', 'df', 't', 'difference')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
popmean: float = 0,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
check_for_vector_dim(ct.y)
n = len(ct.y)
df = n - 1
y = ct.y.summary()
tmap = stats.t_1samp(ct.y.x)
if popmean:
raise NotImplementedError("popmean != 0")
diff = y - popmean
if np.any(diff < 0):
diff.info['cmap'] = 'xpolar'
else:
diff = y
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
if popmean:
y_perm = ct.y - popmean
else:
y_perm = ct.y
n_samples, samples = _resample_params(len(y_perm), samples)
cdist = NDPermutationDistribution(
y_perm, n_samples, threshold, tfce, tail, 't', '1-Sample t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=ct.y.info)
t = NDVar(tmap, ct.y.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, ct.y, ct.match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.popmean = popmean
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.difference = diff
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
def _expand_state(self):
NDTest._expand_state(self)
t = self.t
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map(t.info)
p_uncorr = NDVar(pmap, t.dims, info, 'p')
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "One-Sample T-Test: %s" % self.y
else:
return "One-Sample T-Test"
def _repr_test_args(self):
args = [repr(self.y)]
if self.popmean:
args.append(repr(self.popmean))
if self.match:
args.append('match=%r' % self.match)
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
return self.masked_difference()
else:
return self.difference
def _independent_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for independent measures tests (2 different argspecs)"
if isinstance(x, str):
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
y0 = asndvar(x, sub, ds)
y = combine((y1, y0))
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar, dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
x_name = ct.x.name
match = ct.match
y = ct.y
y1 = ct.data[c1]
y0 = ct.data[c0]
return y, y1, y0, c1, c0, match, x_name, c1_name, c0_name
class ttest_ind(NDDifferenceTest):
"""Mass-univariate independent samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold p value for forming clusters. None for threshold-free
cluster enhancement.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Notes
-----
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n1', 'n0', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y)
n1 = len(y1)
n = len(y)
n0 = n - n1
df = n - 2
groups = np.arange(n) < n1
groups.dtype = np.int8
tmap = stats.t_ind(y.x, groups)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
cdist = NDPermutationDistribution(y, samples, threshold, tfce, tail, 't', 'Independent Samples t-Test', tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_order(n, samples)
run_permutation(stats.t_ind, cdist, iterator, groups)
# store attributes
NDDifferenceTest.__init__(self, y, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n1 = n1
self.n0 = n0
self.df = df
self.tail = tail
info = _info.for_stat_map('t', threshold, tail=tail, old=y.info)
self.t = NDVar(tmap, y.dims[1:], info, 't')
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(self.t.x, self.df, self.tail)
info = _info.for_p_map(self.t.info)
p_uncorr = NDVar(pmap, self.t.dims, info, 'p')
self.p_uncorrected = p_uncorr
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Independent-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Independent-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
if self.c1 is None:
args = [f'{self.y!r} (n={self.n1})', f'{self.x!r} (n={self.n0})']
else:
args = [f'{self.y!r}', f'{self.x!r}', f'{self.c1!r} (n={self.n1})', f'{self.c0!r} (n={self.n0})']
if self.match:
args.append(f'match{self.match!r}')
if self.tail:
args.append(f'tail={self.tail}')
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
def _related_measures_args(y, x, c1, c0, match, ds, sub):
"Interpret parameters for related measures tests (2 different argspecs)"
if isinstance(x, str):
if ds is None:
raise TypeError(f"x={x!r} specified as str without specifying ds")
x = ds.eval(x)
if isinstance(x, NDVar):
assert c1 is None
assert c0 is None
assert match is None
y1 = asndvar(y, sub, ds)
n = len(y1)
y0 = asndvar(x, sub, ds, n)
c1_name = y1.name
c0_name = y0.name
x_name = y0.name
elif match is None:
raise TypeError("The `match` argument needs to be specified for related measures tests")
else:
ct = Celltable(y, x, match, sub, cat=(c1, c0), ds=ds, coercion=asndvar,
dtype=np.float64)
c1, c0 = ct.cat
c1_name = c1
c0_name = c0
if not ct.all_within:
raise ValueError(f"conditions {c1!r} and {c0!r} do not have the same values on {dataobj_repr(ct.match)}")
n = len(ct.y) // 2
y1 = ct.y[:n]
y0 = ct.y[n:]
x_name = ct.x.name
match = ct.match
return y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name
class ttest_rel(NDMaskedC1Mixin, NDDifferenceTest):
"""Mass-univariate related samples t-test
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
tail : 0 | 1 | -1
Which tail of the t-distribution to consider:
0: both (two-tailed, default);
1: upper tail (one-tailed);
-1: lower tail (one-tailed).
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use a t-value equivalent to an
uncorrected p-value.
tmin : scalar
Threshold for forming clusters as t-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : NDVar
Map of p-values uncorrected for multiple comparison.
t : NDVar
Map of t-values.
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
n : int
Number of cases.
Notes
-----
In the permutation cluster test, permutations are done within the
categories of ``match``.
Cases with zero variance are set to t=0.
"""
_state_specific = ('x', 'c1', 'c0', 'tail', 't', 'n', 'df', 'c1_mean',
'c0_mean')
_statistic = 't'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: CellArg = None,
c0: CellArg = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
tail: int = 0,
samples: int = 10000,
pmin: float = None,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
check_for_vector_dim(y1)
if n <= 2:
raise ValueError("Not enough observations for t-test (n=%i)" % n)
df = n - 1
diff = y1 - y0
tmap = stats.t_1samp(diff.x)
n_threshold_params = sum((pmin is not None, tmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
threshold = cdist = None
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, tmin and tfce can be specified")
else:
if pmin is not None:
threshold = stats.ttest_t(pmin, df, tail)
elif tmin is not None:
threshold = abs(tmin)
else:
threshold = None
n_samples, samples = _resample_params(len(diff), samples)
cdist = NDPermutationDistribution(
diff, n_samples, threshold, tfce, tail, 't', 'Related Samples t-Test',
tstart, tstop, criteria, parc, force_permutation)
cdist.add_original(tmap)
if cdist.do_permutation:
iterator = permute_sign_flip(n, samples)
run_permutation(opt.t_1samp_perm, cdist, iterator)
# NDVar map of t-values
info = _info.for_stat_map('t', threshold, tail=tail, old=y1.info)
t = NDVar(tmap, y1.dims[1:], info, 't')
# store attributes
NDDifferenceTest.__init__(self, y1, match, sub, samples, tfce, pmin, cdist, tstart, tstop)
self.x = x_name
self.c0 = c0
self.c1 = c1
self.n = n
self.df = df
self.tail = tail
self.t = t
self.tmin = tmin
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._expand_state()
def _expand_state(self):
NDTest._expand_state(self)
cdist = self._cdist
t = self.t
# difference
diff = self.c1_mean - self.c0_mean
if np.any(diff.x < 0):
diff.info['cmap'] = 'xpolar'
diff.name = 'difference'
self.difference = diff
# uncorrected p
pmap = stats.ttest_p(t.x, self.df, self.tail)
info = _info.for_p_map()
self.p_uncorrected = NDVar(pmap, t.dims, info, 'p')
# composites
if self.samples:
diff_p = self.masked_difference()
else:
diff_p = self.difference
self.all = [self.c1_mean, self.c0_mean, diff_p]
def _name(self):
if self.tail == 0:
comp = "%s == %s" % (self.c1, self.c0)
elif self.tail > 0:
comp = "%s > %s" % (self.c1, self.c0)
else:
comp = "%s < %s" % (self.c1, self.c0)
if self.y:
return "Related-Samples T-Test: %s ~ %s" % (self.y, comp)
else:
return "Related-Samples T-Test: %s" % comp
def _plot_model(self):
return self.x
def _plot_sub(self):
return "(%s).isin(%s)" % (self.x, (self.c1, self.c0))
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.c1 is not None:
args.extend((repr(self.c1), repr(self.c0), repr(self.match)))
args[-1] += " (n=%i)" % self.n
if self.tail:
args.append("tail=%i" % self.tail)
return args
def _default_plot_obj(self):
if self.samples:
diff = self.masked_difference()
else:
diff = self.difference
return [self.c1_mean, self.c0_mean, diff]
class MultiEffectNDTest(NDTest):
def _repr_test_args(self):
args = [repr(self.y), repr(self.x)]
if self.match is not None:
args.append('match=%r' % self.match)
return args
def _repr_cdist(self):
args = self._cdist[0]._repr_test_args(self.pmin)
for cdist in self._cdist:
effect_args = cdist._repr_clusters()
args.append("%r: %s" % (cdist.name, ', '.join(effect_args)))
return args
def _asfmtext(self):
table = fmtxt.Table('llll')
table.cells('Effect', fmtxt.symbol(self._statistic, 'max'), fmtxt.symbol('p'), 'sig')
table.midrule()
for i, effect in enumerate(self.effects):
table.cell(effect)
table.cell(fmtxt.stat(self._max_statistic(i)))
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
def _expand_state(self):
self.effects = tuple(e.name for e in self._effects)
# clusters
cdists = self._cdist
if cdists is None:
self._kind = None
else:
self.tfce_maps = [cdist.tfce_map for cdist in cdists]
self.p = [cdist.probability_map for cdist in cdists]
self._kind = cdists[0].kind
def _effect_index(self, effect: Union[int, str]):
if isinstance(effect, str):
return self.effects.index(effect)
else:
return effect
def _iter_cdists(self):
for cdist in self._cdist:
yield cdist.name.capitalize(), cdist
@property
def _first_cdist(self):
if self._cdist is None:
return None
else:
return self._cdist[0]
def _max_statistic(self, effect: Union[str, int]):
i = self._effect_index(effect)
stat_map = self._statistic_map[i]
tail = getattr(self, 'tail', self._statistic_tail)
return self._max_statistic_from_map(stat_map, self.p[i], tail)
def cluster(self, cluster_id, effect=0):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
effect : int | str
Index or name of the effect from which to retrieve a cluster
(default is the first effect).
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].cluster(cluster_id)
def compute_probability_map(self, effect=0, **sub):
"""Compute a probability map
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map
(default is the first effect).
Returns
-------
probability : NDVar
Map of p-values.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].compute_probability_map(**sub)
def masked_parameter_map(self, effect=0, pmin=0.05, **sub):
"""Create a copy of the parameter map masked by significance
Parameters
----------
effect : int | str
Index or name of the effect from which to use the parameter map.
pmin : scalar
Threshold p-value for masking (default 0.05). For threshold-based
cluster tests, ``pmin=1`` includes all clusters regardless of their
p-value.
Returns
-------
masked_map : NDVar
NDVar with data from the original parameter map wherever p <= pmin
and 0 everywhere else.
"""
self._assert_has_cdist()
i = self._effect_index(effect)
return self._cdist[i].masked_parameter_map(pmin, **sub)
def find_clusters(self, pmin=None, maps=False, effect=None, **sub):
"""Find significant regions or clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value. For threshold-based tests, all clusters with a
p-value smaller than ``pmin`` are included (default 1);
for other tests, find contiguous regions with ``p ≤ pmin`` (default
0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default ``False``).
effect : int | str
Index or name of the effect from which to find clusters (default is
all effects).
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
self._assert_has_cdist()
if effect is not None:
i = self._effect_index(effect)
return self._cdist[i].clusters(pmin, maps, **sub)
dss = []
info = {}
for cdist in self._cdist:
ds = cdist.clusters(pmin, maps, **sub)
ds[:, 'effect'] = cdist.name
if 'clusters' in ds.info:
info['%s clusters' % cdist.name] = ds.info.pop('clusters')
dss.append(ds)
out = combine(dss)
out.info.update(info)
return out
def find_peaks(self):
"""Find peaks in a TFCE distribution
Returns
-------
ds : Dataset
Dataset with information about the peaks.
"""
self._assert_has_cdist()
dss = []
for cdist in self._cdist:
ds = cdist.find_peaks()
ds[:, 'effect'] = cdist.name
dss.append(ds)
return combine(dss)
class anova(MultiEffectNDTest):
"""Mass-univariate ANOVA
Parameters
----------
y : NDVar
Dependent variable.
x : Model
Independent variables.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10,000).
pmin : None | scalar (0 < pmin < 1)
Threshold for forming clusters: use an f-value equivalent to an
uncorrected p-value.
fmin : scalar
Threshold for forming clusters as f-value.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
replacement : bool
whether random samples should be drawn with replacement or
without
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
match : categorial | False
When permuting data, only shuffle the cases within the categories
of match. By default, ``match`` is determined automatically based on
the random efects structure of ``x``.
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
effects : tuple of str
Names of the tested effects, in the same order as in other attributes.
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
f : list of NDVar
Maps of F values.
p : list of NDVar | None
Maps of p-values corrected for multiple comparison (or None if no
correction was performed).
p_uncorrected : list of NDVar
Maps of p-values uncorrected for multiple comparison.
tfce_maps : list of NDVar | None
Maps of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
Examples
--------
For information on model specification see the univariate
:func:`~eelbrain.test.anova` examples.
"""
_state_specific = ('x', 'pmin', '_effects', '_dfs_denom', 'f')
_statistic = 'f'
_statistic_tail = 1
@user_activity
def __init__(
self,
y: NDVarArg,
x: ModelArg,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
pmin: float = None,
fmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
match: Union[CategorialArg, bool] = None,
parc: str = None,
force_permutation: bool = False,
**criteria):
x_arg = x
sub_arg = sub
sub = assub(sub, ds)
y = asndvar(y, sub, ds, dtype=np.float64)
check_for_vector_dim(y)
x = asmodel(x, sub, ds)
if match is None:
random_effects = [e for e in x.effects if e.random]
if not random_effects:
match = None
elif len(random_effects) > 1:
raise NotImplementedError(
"Automatic match parameter for model with more than one "
"random effect. Set match manually.")
else:
match = random_effects[0]
elif match is not False:
match = ascategorial(match, sub, ds)
lm = _nd_anova(x)
effects = lm.effects
dfs_denom = lm.dfs_denom
fmaps = lm.map(y.x)
n_threshold_params = sum((pmin is not None, fmin is not None, bool(tfce)))
if n_threshold_params == 0 and not samples:
cdists = None
thresholds = tuple(repeat(None, len(effects)))
elif n_threshold_params > 1:
raise ValueError("Only one of pmin, fmin and tfce can be specified")
else:
if pmin is not None:
thresholds = tuple(stats.ftest_f(pmin, e.df, df_den) for e, df_den in zip(effects, dfs_denom))
elif fmin is not None:
thresholds = tuple(repeat(abs(fmin), len(effects)))
else:
thresholds = tuple(repeat(None, len(effects)))
cdists = [
NDPermutationDistribution(
y, samples, thresh, tfce, 1, 'f', e.name,
tstart, tstop, criteria, parc, force_permutation)
for e, thresh in zip(effects, thresholds)]
# Find clusters in the actual data
do_permutation = 0
for cdist, fmap in zip(cdists, fmaps):
cdist.add_original(fmap)
do_permutation += cdist.do_permutation
if do_permutation:
iterator = permute_order(len(y), samples, unit=match)
run_permutation_me(lm, cdists, iterator)
# create ndvars
dims = y.dims[1:]
f = []
for e, fmap, df_den, f_threshold in zip(effects, fmaps, dfs_denom, thresholds):
info = _info.for_stat_map('f', f_threshold, tail=1, old=y.info)
f.append(NDVar(fmap, dims, info, e.name))
# store attributes
MultiEffectNDTest.__init__(self, y, match, sub_arg, samples, tfce, pmin,
cdists, tstart, tstop)
self.x = x_arg if isinstance(x_arg, str) else x.name
self._effects = effects
self._dfs_denom = dfs_denom
self.f = f
self._expand_state()
def _expand_state(self):
# backwards compatibility
if hasattr(self, 'effects'):
self._effects = self.effects
MultiEffectNDTest._expand_state(self)
# backwards compatibility
if hasattr(self, 'df_den'):
df_den_temp = {e.name: df for e, df in self.df_den.items()}
del self.df_den
self._dfs_denom = tuple(df_den_temp[e] for e in self.effects)
# f-maps with clusters
pmin = self.pmin or 0.05
if self.samples:
f_and_clusters = []
for e, fmap, df_den, cdist in zip(self._effects, self.f,
self._dfs_denom, self._cdist):
# create f-map with cluster threshold
f0 = stats.ftest_f(pmin, e.df, df_den)
info = _info.for_stat_map('f', f0)
f_ = NDVar(fmap.x, fmap.dims, info, e.name)
# add overlay with cluster
if cdist.probability_map is not None:
f_and_clusters.append([f_, cdist.probability_map])
else:
f_and_clusters.append([f_])
self.f_probability = f_and_clusters
# uncorrected probability
p_uncorr = []
for e, f, df_den in zip(self._effects, self.f, self._dfs_denom):
info = _info.for_p_map()
pmap = stats.ftest_p(f.x, e.df, df_den)
p_ = NDVar(pmap, f.dims, info, e.name)
p_uncorr.append(p_)
self.p_uncorrected = p_uncorr
def _name(self):
if self.y:
return "ANOVA: %s ~ %s" % (self.y, self.x)
else:
return "ANOVA: %s" % self.x
def _plot_model(self):
return '%'.join(e.name for e in self._effects if isinstance(e, Factor) or
(isinstance(e, NestedEffect) and isinstance(e.effect, Factor)))
def _plot_sub(self):
return super(anova, self)._plot_sub()
def _default_plot_obj(self):
if self.samples:
return [self.masked_parameter_map(e) for e in self.effects]
else:
return self._statistic_map
def table(self):
"""Table with effects and smallest p-value"""
table = fmtxt.Table('rlr' + ('' if self.p is None else 'rl'))
table.cells('#', 'Effect', 'f_max')
if self.p is not None:
table.cells('p', 'sig')
table.midrule()
for i in range(len(self.effects)):
table.cell(i)
table.cell(self.effects[i])
table.cell(fmtxt.stat(self.f[i].max()))
if self.p is not None:
pmin = self.p[i].min()
table.cell(fmtxt.p(pmin))
table.cell(star(pmin))
return table
class Vector(NDDifferenceTest):
"""Test a vector field for vectors with non-random direction
Parameters
----------
y : NDVar
Dependent variable (needs to include one vector dimension).
match : None | categorial
Combine data for these categories before testing.
sub : index
Perform test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
difference : NDVar
The vector field averaged across cases.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
Notes
-----
Vector tests are based on the Hotelling T-Square statistic. Computation of
the T-Square statistic relies on [1]_.
References
----------
.. [1] <NAME>. (2008). Efficient numerical diagonalization of hermitian 3 x
3 matrices. International Journal of Modern Physics C, 19(3), 523-548.
`10.1142/S0129183108012303 <https://doi.org/10.1142/S0129183108012303>`_
"""
_state_specific = ('difference', 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: Union[float, bool] = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
ct = Celltable(y, match=match, sub=sub, ds=ds, coercion=asndvar, dtype=np.float64)
n = len(ct.y)
cdist = NDPermutationDistribution(ct.y, samples, tmin, tfce, 1, 'norm', 'Vector test', tstart, tstop, criteria, parc, force_permutation)
v_dim = ct.y.dimnames[cdist._vector_ax + 1]
v_mean = ct.y.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(ct.y)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, ct.y, ct.match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self._v_dim = v_dim
self.n = n
self._expand_state()
def __setstate__(self, state):
if 'diff' in state:
state['difference'] = state.pop('diff')
NDTest.__setstate__(self, state)
@property
def _statistic(self):
return 'norm' if self.t2 is None else 't2'
def _name(self):
if self.y:
return f"Vector test: {self.y}"
else:
return "Vector test"
def _repr_test_args(self):
args = []
if self.y:
args.append(repr(self.y))
if self.match:
args.append(f'match={self.match!r}')
return args
@staticmethod
def _vector_perm(y, out, seed, use_norm):
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
rotation = rand_rotation_matrices(n_cases, seed)
if use_norm:
return vector.mean_norm_rotated(y, rotation, out)
else:
return vector.t2_stat_rotated(y, rotation, out)
@staticmethod
def _vector_t2_map(y):
dimnames = y.get_dimnames(first=('case', 'space'))
x = y.get_data(dimnames)
t2_map = stats.t2_1samp(x)
if y.ndim == 2:
return np.float64(t2_map)
else:
dims = y.get_dims(dimnames[2:])
return NDVar(t2_map, dims)
class VectorDifferenceIndependent(Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Combine cases with the same cell on ``x % match``.
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Total number of cases.
n1 : int
Number of cases in ``c1``.
n0 : int
Number of cases in ``c0``.
c1_mean : NDVar
Mean in the c1 condition.
c0_mean : NDVar
Mean in the c0 condition.
difference : NDVar
Difference between the mean in condition c1 and condition c0.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or None if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
_statistic = 'norm'
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y, y1, y0, c1, c0, match, x_name, c1_name, c0_name = _independent_measures_args(y, x, c1, c0, match, ds, sub)
self.n1 = len(y1)
self.n0 = len(y0)
self.n = len(y)
cdist = NDPermutationDistribution(y, samples, tmin, tfce, 1, 'norm', 'Vector test (independent)', tstart, tstop, criteria, parc, force_permutation)
self._v_dim = v_dim = y.dimnames[cdist._vector_ax + 1]
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self.difference = self.c1_mean - self.c0_mean
self.difference.name = 'difference'
v_mean_norm = self.difference.norm(v_dim)
if not use_norm:
raise NotImplementedError("t2 statistic not implemented for VectorDifferenceIndependent")
else:
cdist.add_original(v_mean_norm.x if self.difference.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator, self.n1)
NDTest.__init__(self, y, match, sub, samples, tfce, None, cdist, tstart, tstop)
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (independent): {self.y}"
else:
return "Vector test (independent)"
@staticmethod
def _vector_perm(y, n1, out, seed, use_norm):
assert use_norm
n_cases, n_dims, n_tests = y.shape
assert n_dims == 3
# randomize directions
rotation = rand_rotation_matrices(n_cases, seed)
# randomize groups
cases = np.arange(n_cases)
np.random.shuffle(cases)
# group 1
mean_1 = np.zeros((n_dims, n_tests))
for case in cases[:n1]:
mean_1 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_1 /= n1
# group 0
mean_0 = np.zeros((n_dims, n_tests))
for case in cases[n1:]:
mean_0 += np.tensordot(rotation[case], y[case], ((1,), (0,)))
mean_0 /= (n_cases - n1)
# difference
mean_1 -= mean_0
norm = scipy.linalg.norm(mean_1, 2, axis=0)
if out is not None:
out[:] = norm
return norm
class VectorDifferenceRelated(NDMaskedC1Mixin, Vector):
"""Test difference between two vector fields for non-random direction
Parameters
----------
y : NDVar
Dependent variable.
x : categorial | NDVar
Model containing the cells which should be compared, or NDVar to which
``y`` should be compared. In the latter case, the next three parameters
are ignored.
c1 : str | tuple | None
Test condition (cell of ``x``). ``c1`` and ``c0`` can be omitted if
``x`` only contains two cells, in which case cells will be used in
alphabetical order.
c0 : str | tuple | None
Control condition (cell of ``x``).
match : categorial
Units within which measurements are related (e.g. 'subject' in a
within-subject comparison).
sub : index
Perform the test with a subset of the data.
ds : None | Dataset
If a Dataset is specified, all data-objects can be specified as
names of Dataset variables.
samples : int
Number of samples for permutation test (default 10000).
tmin : scalar
Threshold value for forming clusters.
tfce : bool | scalar
Use threshold-free cluster enhancement. Use a scalar to specify the
step of TFCE levels (for ``tfce is True``, 0.1 is used).
tstart : scalar
Start of the time window for the permutation test (default is the
beginning of ``y``).
tstop : scalar
Stop of the time window for the permutation test (default is the
end of ``y``).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation: bool
Conduct permutations regardless of whether there are any clusters.
norm : bool
Use the vector norm as univariate test statistic (instead of Hotelling’s
T-Square statistic).
mintime : scalar
Minimum duration for clusters (in seconds).
minsource : int
Minimum number of sources per cluster.
Attributes
----------
n : int
Number of cases.
c1_mean : NDVar
Mean in the ``c1`` condition.
c0_mean : NDVar
Mean in the ``c0`` condition.
difference : NDVar
Difference between the mean in condition ``c1`` and condition ``c0``.
t2 : NDVar | None
Hotelling T-Square map; ``None`` if the test used ``norm=True``.
p : NDVar | None
Map of p-values corrected for multiple comparison (or ``None`` if no
correction was performed).
tfce_map : NDVar | None
Map of the test statistic processed with the threshold-free cluster
enhancement algorithm (or None if no TFCE was performed).
clusters : None | Dataset
For cluster-based tests, a table of all clusters. Otherwise a table of
all significant regions (or ``None`` if permutations were omitted).
See also the :meth:`.find_clusters` method.
See Also
--------
Vector : One-sample vector test, notes on vector test implementation
"""
_state_specific = ('difference', 'c1_mean', 'c0_mean' 'n', '_v_dim', 't2')
@user_activity
def __init__(
self,
y: NDVarArg,
x: Union[CategorialArg, NDVarArg],
c1: str = None,
c0: str = None,
match: CategorialArg = None,
sub: IndexArg = None,
ds: Dataset = None,
samples: int = 10000,
tmin: float = None,
tfce: bool = False,
tstart: float = None,
tstop: float = None,
parc: str = None,
force_permutation: bool = False,
norm: bool = False,
**criteria):
use_norm = bool(norm)
y1, y0, c1, c0, match, n, x_name, c1, c1_name, c0, c0_name = _related_measures_args(y, x, c1, c0, match, ds, sub)
difference = y1 - y0
difference.name = 'difference'
n_samples, samples = _resample_params(n, samples)
cdist = NDPermutationDistribution(difference, n_samples, tmin, tfce, 1, 'norm', 'Vector test (related)', tstart, tstop, criteria, parc, force_permutation)
v_dim = difference.dimnames[cdist._vector_ax + 1]
v_mean = difference.mean('case')
v_mean_norm = v_mean.norm(v_dim)
if not use_norm:
t2_map = self._vector_t2_map(difference)
cdist.add_original(t2_map.x if v_mean.ndim > 1 else t2_map)
if v_mean.ndim == 1:
self.t2 = t2_map
else:
self.t2 = NDVar(t2_map, v_mean_norm.dims, _info.for_stat_map('t2'), 't2')
else:
cdist.add_original(v_mean_norm.x if v_mean.ndim > 1 else v_mean_norm)
self.t2 = None
if cdist.do_permutation:
iterator = random_seeds(n_samples)
vector_perm = partial(self._vector_perm, use_norm=use_norm)
run_permutation(vector_perm, cdist, iterator)
# store attributes
NDTest.__init__(self, difference, match, sub, samples, tfce, None, cdist, tstart, tstop)
self.difference = v_mean
self.c1_mean = y1.mean('case', name=cellname(c1_name))
self.c0_mean = y0.mean('case', name=cellname(c0_name))
self._v_dim = v_dim
self.n = n
self._expand_state()
def _name(self):
if self.y:
return f"Vector test (related): {self.y}"
else:
return "Vector test (related)"
def flatten(array, connectivity):
"""Reshape SPM buffer array to 2-dimensional map for connectivity processing
Parameters
----------
array : ndarray
N-dimensional array (with non-adjacent dimension at first position).
connectivity : Connectivity
N-dimensional connectivity.
Returns
-------
flat_array : ndarray
The input array reshaped if necessary, making sure that input and output
arrays share the same underlying data buffer.
"""
if array.ndim == 2 or not connectivity.custom:
return array
else:
out = array.reshape((array.shape[0], -1))
assert out.base is array
return out
def flatten_1d(array):
if array.ndim == 1:
return array
else:
out = array.ravel()
assert out.base is array
return out
def label_clusters(stat_map, threshold, tail, connectivity, criteria):
"""Label clusters
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
Returns
-------
cmap : np.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
cmap = np.empty(stat_map.shape, np.uint32)
bin_buff = np.empty(stat_map.shape, np.bool8)
cmap_flat = flatten(cmap, connectivity)
if tail == 0:
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
else:
int_buff = int_buff_flat = None
cids = _label_clusters(stat_map, threshold, tail, connectivity, criteria,
cmap, cmap_flat, bin_buff, int_buff, int_buff_flat)
return cmap, cids
def _label_clusters(stat_map, threshold, tail, conn, criteria, cmap, cmap_flat,
bin_buff, int_buff, int_buff_flat):
"""Find clusters on a statistical parameter map
Parameters
----------
stat_map : array
Statistical parameter map (non-adjacent dimension on the first
axis).
cmap : array of int
Buffer for the cluster id map (will be modified).
Returns
-------
cluster_ids : np.ndarray of uint32
Identifiers of the clusters that survive the minimum duration
criterion.
"""
# compute clusters
if tail >= 0:
bin_map_above = np.greater(stat_map, threshold, bin_buff)
cids = _label_clusters_binary(bin_map_above, cmap, cmap_flat, conn,
criteria)
if tail <= 0:
bin_map_below = np.less(stat_map, -threshold, bin_buff)
if tail < 0:
cids = _label_clusters_binary(bin_map_below, cmap, cmap_flat, conn,
criteria)
else:
cids_l = _label_clusters_binary(bin_map_below, int_buff,
int_buff_flat, conn, criteria)
x = cmap.max()
int_buff[bin_map_below] += x
cids_l += x
cmap += int_buff
cids = np.concatenate((cids, cids_l))
return cids
def label_clusters_binary(bin_map, connectivity, criteria=None):
"""Label clusters in a boolean map
Parameters
----------
bin_map : numpy.ndarray
Binary map.
connectivity : Connectivity
Connectivity corresponding to ``bin_map``.
criteria : dict
Cluster criteria.
Returns
-------
cmap : numpy.ndarray of uint32
Array with clusters labelled as integers.
cluster_ids : numpy.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
cmap = np.empty(bin_map.shape, np.uint32)
cmap_flat = flatten(cmap, connectivity)
cids = _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria)
return cmap, cids
def _label_clusters_binary(bin_map, cmap, cmap_flat, connectivity, criteria):
"""Label clusters in a binary array
Parameters
----------
bin_map : np.ndarray
Binary map of where the parameter map exceeds the threshold for a
cluster (non-adjacent dimension on the first axis).
cmap : np.ndarray
Array in which to label the clusters.
cmap_flat : np.ndarray
Flat copy of cmap (ndim=2, only used when all_adjacent==False)
connectivity : Connectivity
Connectivity.
criteria : None | list
Cluster size criteria, list of (axes, v) tuples. Collapse over axes
and apply v minimum length).
Returns
-------
cluster_ids : np.ndarray of uint32
Sorted identifiers of the clusters that survive the selection criteria.
"""
# find clusters
n = ndimage.label(bin_map, connectivity.struct, cmap)
if n <= 1:
# in older versions, n is 1 even when no cluster is found
if n == 0 or cmap.max() == 0:
return np.array((), np.uint32)
else:
cids = np.array((1,), np.uint32)
elif connectivity.custom:
cids = merge_labels(cmap_flat, n, *connectivity.custom[0])
else:
cids = np.arange(1, n + 1, 1, np.uint32)
# apply minimum cluster size criteria
if criteria and cids.size:
for axes, v in criteria:
cids = np.setdiff1d(cids,
[i for i in cids if np.count_nonzero(np.equal(cmap, i).any(axes)) < v],
True)
if cids.size == 0:
break
return cids
def tfce(stat_map, tail, connectivity, dh=0.1):
tfce_im = np.empty(stat_map.shape, np.float64)
tfce_im_1d = flatten_1d(tfce_im)
bin_buff = np.empty(stat_map.shape, np.bool8)
int_buff = np.empty(stat_map.shape, np.uint32)
int_buff_flat = flatten(int_buff, connectivity)
int_buff_1d = flatten_1d(int_buff)
return _tfce(stat_map, tail, connectivity, tfce_im, tfce_im_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh)
def _tfce(stat_map, tail, conn, out, out_1d, bin_buff, int_buff,
int_buff_flat, int_buff_1d, dh=0.1, e=0.5, h=2.0):
"Threshold-free cluster enhancement"
out.fill(0)
# determine slices
if tail == 0:
hs = chain(np.arange(-dh, stat_map.min(), -dh),
np.arange(dh, stat_map.max(), dh))
elif tail < 0:
hs = np.arange(-dh, stat_map.min(), -dh)
else:
hs = np.arange(dh, stat_map.max(), dh)
# label clusters in slices at different heights
# fill each cluster with total section value
# each point's value is the vertical sum
for h_ in hs:
if h_ > 0:
np.greater_equal(stat_map, h_, bin_buff)
h_factor = h_ ** h
else:
np.less_equal(stat_map, h_, bin_buff)
h_factor = (-h_) ** h
c_ids = _label_clusters_binary(bin_buff, int_buff, int_buff_flat, conn, None)
tfce_increment(c_ids, int_buff_1d, out_1d, e, h_factor)
return out
class StatMapProcessor:
def __init__(self, tail, max_axes, parc):
"""Reduce a statistical map to the relevant maximum statistic"""
self.tail = tail
self.max_axes = max_axes
self.parc = parc
def max_stat(self, stat_map):
if self.tail == 0:
v = np.abs(stat_map, stat_map).max(self.max_axes)
elif self.tail > 0:
v = stat_map.max(self.max_axes)
else:
v = -stat_map.min(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class TFCEProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, dh):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.dh = dh
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._int_buff = np.empty(shape, np.uint32)
self._tfce_im = np.empty(shape, np.float64)
self._tfce_im_1d = flatten_1d(self._tfce_im)
self._int_buff_flat = flatten(self._int_buff, connectivity)
self._int_buff_1d = flatten_1d(self._int_buff)
def max_stat(self, stat_map):
v = _tfce(
stat_map, self.tail, self.connectivity, self._tfce_im, self._tfce_im_1d,
self._bin_buff, self._int_buff, self._int_buff_flat, self._int_buff_1d,
self.dh,
).max(self.max_axes)
if self.parc is None:
return v
else:
return [v[idx].max() for idx in self.parc]
class ClusterProcessor(StatMapProcessor):
def __init__(self, tail, max_axes, parc, shape, connectivity, threshold,
criteria):
StatMapProcessor.__init__(self, tail, max_axes, parc)
self.shape = shape
self.connectivity = connectivity
self.threshold = threshold
self.criteria = criteria
# Pre-allocate memory buffers used for cluster processing
self._bin_buff = np.empty(shape, np.bool8)
self._cmap = np.empty(shape, np.uint32)
self._cmap_flat = flatten(self._cmap, connectivity)
if tail == 0:
self._int_buff = np.empty(shape, np.uint32)
self._int_buff_flat = flatten(self._int_buff, connectivity)
else:
self._int_buff = self._int_buff_flat = None
def max_stat(self, stat_map, threshold=None):
if threshold is None:
threshold = self.threshold
cmap = self._cmap
cids = _label_clusters(stat_map, threshold, self.tail, self.connectivity,
self.criteria, cmap, self._cmap_flat,
self._bin_buff, self._int_buff,
self._int_buff_flat)
if self.parc is not None:
v = []
for idx in self.parc:
clusters_v = ndimage.sum(stat_map[idx], cmap[idx], cids)
if len(clusters_v):
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
v.append(clusters_v.max())
else:
v.append(0)
return v
elif len(cids):
clusters_v = ndimage.sum(stat_map, cmap, cids)
if self.tail <= 0:
np.abs(clusters_v, clusters_v)
return clusters_v.max()
else:
return 0
def get_map_processor(kind, *args):
if kind == 'tfce':
return TFCEProcessor(*args)
elif kind == 'cluster':
return ClusterProcessor(*args)
elif kind == 'raw':
return StatMapProcessor(*args)
else:
raise ValueError("kind=%s" % repr(kind))
class NDPermutationDistribution:
"""Accumulate information on a cluster statistic.
Parameters
----------
y : NDVar
Dependent variable.
samples : int
Number of permutations.
threshold : scalar > 0
Threshold-based clustering.
tfce : bool | scalar
Threshold-free cluster enhancement.
tail : 1 | 0 | -1
Which tail(s) of the distribution to consider. 0 is two-tailed,
whereas 1 only considers positive values and -1 only considers
negative values.
meas : str
Label for the parameter measurement (e.g., 't' for t-values).
name : None | str
Name for the comparison.
tstart, tstop : None | scalar
Restrict the time window for finding clusters (None: use the whole
epoch).
criteria : dict
Dictionary with threshold criteria for cluster size: 'mintime'
(seconds) and 'minsource' (n_sources).
parc : str
Collect permutation statistics for all regions of the parcellation of
this dimension. For threshold-based test, the regions are disconnected.
force_permutation : bool
Conduct permutations regardless of whether there are any clusters.
Notes
-----
Use of the NDPermutationDistribution proceeds in 3 steps:
- initialize the NDPermutationDistribution object: ``cdist = NDPermutationDistribution(...)``
- use a copy of y cropped to the time window of interest:
``y = cdist.Y_perm``
- add the actual statistical map with ``cdist.add_original(pmap)``
- if any clusters are found (``if cdist.n_clusters``):
- proceed to add statistical maps from permuted data with
``cdist.add_perm(pmap)``.
Permutation data shape: case, [vector, ][non-adjacent, ] ...
internal shape: [non-adjacent, ] ...
"""
tfce_warning = None
def __init__(self, y, samples, threshold, tfce=False, tail=0, meas='?', name=None,
tstart=None, tstop=None, criteria={}, parc=None, force_permutation=False):
assert y.has_case
assert parc is None or isinstance(parc, str)
if tfce and threshold:
raise RuntimeError(f"threshold={threshold!r}, tfce={tfce!r}: mutually exclusive parameters")
elif tfce:
if tfce is not True:
tfce = abs(tfce)
kind = 'tfce'
elif threshold:
threshold = float(threshold)
kind = 'cluster'
assert threshold > 0
else:
kind = 'raw'
# vector: will be removed for stat_map
vector = [d._connectivity_type == 'vector' for d in y.dims[1:]]
has_vector_ax = any(vector)
if has_vector_ax:
vector_ax = vector.index(True)
else:
vector_ax = None
# prepare temporal cropping
if (tstart is None) and (tstop is None):
y_perm = y
self._crop_for_permutation = False
self._crop_idx = None
else:
t_ax = y.get_axis('time') - 1
y_perm = y.sub(time=(tstart, tstop))
# for stat-maps
if vector_ax is not None and vector_ax < t_ax:
t_ax -= 1
t_slice = y.time._array_index(slice(tstart, tstop))
self._crop_for_permutation = True
self._crop_idx = FULL_AXIS_SLICE * t_ax + (t_slice,)
dims = list(y_perm.dims[1:])
if has_vector_ax:
del dims[vector_ax]
# custom connectivity: move non-adjacent connectivity to first axis
custom = [d._connectivity_type == 'custom' for d in dims]
n_custom = sum(custom)
if n_custom > 1:
raise NotImplementedError("More than one axis with custom connectivity")
nad_ax = None if n_custom == 0 else custom.index(True)
if nad_ax:
swapped_dims = list(dims)
swapped_dims[0], swapped_dims[nad_ax] = dims[nad_ax], dims[0]
else:
swapped_dims = dims
connectivity = Connectivity(swapped_dims, parc)
assert connectivity.vector is None
# cluster map properties
ndim = len(dims)
# prepare cluster minimum size criteria
if criteria:
criteria_ = []
for k, v in criteria.items():
m = re.match('min(\w+)', k)
if m:
dimname = m.group(1)
if not y.has_dim(dimname):
raise TypeError(
"%r is an invalid keyword argument for this testnd "
"function (no dimension named %r)" % (k, dimname))
ax = y.get_axis(dimname) - 1
if dimname == 'time':
v = int(ceil(v / y.time.tstep))
else:
raise TypeError("%r is an invalid keyword argument for this testnd function" % (k,))
if nad_ax:
if ax == 0:
ax = nad_ax
elif ax == nad_ax:
ax = 0
axes = tuple(i for i in range(ndim) if i != ax)
criteria_.append((axes, v))
if kind != 'cluster':
# here so that invalid keywords raise explicitly
err = ("Can not use cluster size criteria when doing "
"threshold free cluster evaluation")
raise ValueError(err)
else:
criteria_ = None
# prepare distribution
samples = int(samples)
if parc:
for parc_ax, parc_dim in enumerate(swapped_dims):
if parc_dim.name == parc:
break
else:
raise ValueError("parc=%r (no dimension named %r)" % (parc, parc))
if parc_dim._connectivity_type == 'none':
parc_indexes = np.arange(len(parc_dim))
elif kind == 'tfce':
raise NotImplementedError(
f"TFCE for parc={parc!r} ({parc_dim.__class__.__name__} dimension)")
elif parc_dim._connectivity_type == 'custom':
if not hasattr(parc_dim, 'parc'):
raise NotImplementedError(f"parc={parc!r}: dimension has no parcellation")
parc_indexes = tuple(np.flatnonzero(parc_dim.parc == cell) for
cell in parc_dim.parc.cells)
parc_dim = Categorial(parc, parc_dim.parc.cells)
else:
raise NotImplementedError(f"parc={parc!r}")
dist_shape = (samples, len(parc_dim))
dist_dims = ('case', parc_dim)
max_axes = tuple(chain(range(parc_ax), range(parc_ax + 1, ndim)))
else:
dist_shape = (samples,)
dist_dims = None
max_axes = None
parc_indexes = None
# arguments for the map processor
shape = tuple(map(len, swapped_dims))
if kind == 'raw':
map_args = (kind, tail, max_axes, parc_indexes)
elif kind == 'tfce':
dh = 0.1 if tfce is True else tfce
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, dh)
else:
map_args = (kind, tail, max_axes, parc_indexes, shape, connectivity, threshold, criteria_)
self.kind = kind
self.y_perm = y_perm
self.dims = tuple(dims) # external stat map dims (cropped time)
self.shape = shape # internal stat map shape
self._connectivity = connectivity
self.samples = samples
self.dist_shape = dist_shape
self._dist_dims = dist_dims
self._max_axes = max_axes
self.dist = None
self.threshold = threshold
self.tfce = tfce
self.tail = tail
self._nad_ax = nad_ax
self._vector_ax = vector_ax
self.tstart = tstart
self.tstop = tstop
self.parc = parc
self.meas = meas
self.name = name
self._criteria = criteria_
self.criteria = criteria
self.map_args = map_args
self.has_original = False
self.do_permutation = False
self.dt_perm = None
self._finalized = False
self._init_time = current_time()
self._host = socket.gethostname()
self.force_permutation = force_permutation
from .. import __version__
self._version = __version__
def _crop(self, im):
"Crop an original stat_map"
if self._crop_for_permutation:
return im[self._crop_idx]
else:
return im
def uncrop(
self,
ndvar: NDVar, # NDVar to uncrop
to: NDVar, # NDVar that has the target time dimensions
default: float = 0, # value to fill in uncropped area
):
if self.tstart is None and self.tstop is None:
return ndvar
target_time = to.get_dim('time')
t_ax = ndvar.get_axis('time')
dims = list(ndvar.dims)
dims[t_ax] = target_time
shape = list(ndvar.shape)
shape[t_ax] = len(target_time)
t_slice = target_time._array_index(slice(self.tstart, self.tstop))
x = np.empty(shape, ndvar.x.dtype)
x.fill(default)
x[FULL_AXIS_SLICE * t_ax + (t_slice,)] = ndvar.x
return NDVar(x, dims, ndvar.info, ndvar.name)
def add_original(self, stat_map):
"""Add the original statistical parameter map.
Parameters
----------
stat_map : array
Parameter map of the statistic of interest (uncropped).
"""
if self.has_original:
raise RuntimeError("Original pmap already added")
logger = logging.getLogger(__name__)
logger.debug("Adding original parameter map...")
# crop/reshape stat_map
stat_map = self._crop(stat_map)
if self._nad_ax:
stat_map = stat_map.swapaxes(0, self._nad_ax)
# process map
if self.kind == 'tfce':
dh = 0.1 if self.tfce is True else self.tfce
self.tfce_warning = max(stat_map.max(), -stat_map.min()) < dh
cmap = tfce(stat_map, self.tail, self._connectivity, dh)
cids = None
n_clusters = cmap.max() > 0
elif self.kind == 'cluster':
cmap, cids = label_clusters(stat_map, self.threshold, self.tail,
self._connectivity, self._criteria)
n_clusters = len(cids)
# clean original cluster map
idx = np.in1d(cmap, cids, invert=True).reshape(self.shape)
cmap[idx] = 0
else:
cmap = stat_map
cids = None
n_clusters = True
self._t0 = current_time()
self._original_cluster_map = cmap
self._cids = cids
self.n_clusters = n_clusters
self.has_original = True
self.dt_original = self._t0 - self._init_time
self._original_param_map = stat_map
if self.force_permutation or (self.samples and n_clusters):
self._create_dist()
self.do_permutation = True
else:
self.dist_array = None
self.finalize()
def _create_dist(self):
"Create the distribution container"
if CONFIG['n_workers']:
n = reduce(operator.mul, self.dist_shape)
dist_array = RawArray('d', n)
dist = np.frombuffer(dist_array, np.float64, n)
dist.shape = self.dist_shape
else:
dist_array = None
dist = np.zeros(self.dist_shape)
self.dist_array = dist_array
self.dist = dist
def _aggregate_dist(self, **sub):
"""Aggregate permutation distribution to one value per permutation
Parameters
----------
[dimname] : index
Limit the data for the distribution.
Returns
-------
dist : array, shape = (samples,)
Maximum value for each permutation in the given region.
"""
dist = self.dist
if sub:
if self._dist_dims is None:
raise TypeError("NDPermutationDistribution does not have parcellation")
dist_ = NDVar(dist, self._dist_dims)
dist_sub = dist_.sub(**sub)
dist = dist_sub.x
if dist.ndim > 1:
axes = tuple(range(1, dist.ndim))
dist = dist.max(axes)
return dist
def __repr__(self):
items = []
if self.has_original:
dt = timedelta(seconds=round(self.dt_original))
items.append("%i clusters (%s)" % (self.n_clusters, dt))
if self.samples > 0 and self.n_clusters > 0:
if self.dt_perm is not None:
dt = timedelta(seconds=round(self.dt_perm))
items.append("%i permutations (%s)" % (self.samples, dt))
else:
items.append("no data")
return "<NDPermutationDistribution: %s>" % ', '.join(items)
def __getstate__(self):
if not self._finalized:
raise RuntimeError("Cannot pickle cluster distribution before all "
"permutations have been added.")
state = {
name: getattr(self, name) for name in (
'name', 'meas', '_version', '_host', '_init_time',
# settings ...
'kind', 'threshold', 'tfce', 'tail', 'criteria', 'samples', 'tstart', 'tstop', 'parc',
# data properties ...
'dims', 'shape', '_nad_ax', '_vector_ax', '_criteria', '_connectivity',
# results ...
'dt_original', 'dt_perm', 'n_clusters', '_dist_dims', 'dist', '_original_param_map', '_original_cluster_map', '_cids',
)}
state['version'] = 3
return state
def __setstate__(self, state):
# backwards compatibility
version = state.pop('version', 0)
if version == 0:
if '_connectivity_src' in state:
del state['_connectivity_src']
del state['_connectivity_dst']
if '_connectivity' in state:
del state['_connectivity']
if 'N' in state:
state['samples'] = state.pop('N')
if '_version' not in state:
state['_version'] = '< 0.11'
if '_host' not in state:
state['_host'] = 'unknown'
if '_init_time' not in state:
state['_init_time'] = None
if 'parc' not in state:
if state['_dist_dims'] is None:
state['parc'] = None
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
elif isinstance(state['parc'], tuple):
if len(state['parc']) == 0:
state['parc'] = None
elif len(state['parc']) == 1:
state['parc'] = state['parc'][0]
else:
raise OldVersionError("This pickled file is from a previous version of Eelbrain and is not compatible anymore. Please recompute this test.")
nad_ax = state['_nad_ax']
state['dims'] = dims = state['dims'][1:]
state['_connectivity'] = Connectivity(
(dims[nad_ax],) + dims[:nad_ax] + dims[nad_ax + 1:],
state['parc'])
if version < 2:
state['_vector_ax'] = None
if version < 3:
state['tfce'] = ['kind'] == 'tfce'
for k, v in state.items():
setattr(self, k, v)
self.has_original = True
self.finalize()
def _repr_test_args(self, pmin):
"Argument representation for TestResult repr"
args = ['samples=%r' % self.samples]
if pmin is not None:
args.append(f"pmin={pmin!r}")
elif self.kind == 'tfce':
arg = f"tfce={self.tfce!r}"
if self.tfce_warning:
arg = f"{arg} [WARNING: The TFCE step is larger than the largest value in the data]"
args.append(arg)
if self.tstart is not None:
args.append(f"tstart={self.tstart!r}")
if self.tstop is not None:
args.append(f"tstop={self.tstop!r}")
for k, v in self.criteria.items():
args.append(f"{k}={v!r}")
return args
def _repr_clusters(self):
info = []
if self.kind == 'cluster':
if self.n_clusters == 0:
info.append("no clusters")
else:
info.append("%i clusters" % self.n_clusters)
if self.n_clusters and self.samples:
info.append(f"{fmtxt.peq(self.probability_map.min())}")
return info
def _package_ndvar(self, x, info=None, external_shape=False):
"Generate NDVar from map with internal shape"
if not self.dims:
if isinstance(x, np.ndarray):
return x.item()
return x
if not external_shape and self._nad_ax:
x = x.swapaxes(0, self._nad_ax)
if info is None:
info = {}
return NDVar(x, self.dims, info, self.name)
def finalize(self):
"Package results and delete temporary data"
if self.dt_perm is None:
self.dt_perm = current_time() - self._t0
# original parameter map
param_contours = {}
if self.kind == 'cluster':
if self.tail >= 0:
param_contours[self.threshold] = (0.7, 0.7, 0)
if self.tail <= 0:
param_contours[-self.threshold] = (0.7, 0, 0.7)
info = _info.for_stat_map(self.meas, contours=param_contours)
self.parameter_map = self._package_ndvar(self._original_param_map, info)
# TFCE map
if self.kind == 'tfce':
self.tfce_map = self._package_ndvar(self._original_cluster_map)
else:
self.tfce_map = None
# cluster map
if self.kind == 'cluster':
self.cluster_map = self._package_ndvar(self._original_cluster_map)
else:
self.cluster_map = None
self._finalized = True
def data_for_permutation(self, raw=True):
"""Retrieve data flattened for permutation
Parameters
----------
raw : bool
Return a RawArray and a shape tuple instead of a numpy array.
"""
# get data in the right shape
x = self.y_perm.x
if self._vector_ax:
x = np.moveaxis(x, self._vector_ax + 1, 1)
if self._nad_ax is not None:
dst = 1
src = 1 + self._nad_ax
if self._vector_ax is not None:
dst += 1
if self._vector_ax > self._nad_ax:
src += 1
if dst != src:
x = x.swapaxes(dst, src)
# flat y shape
ndims = 1 + (self._vector_ax is not None)
n_flat = 1 if x.ndim == ndims else reduce(operator.mul, x.shape[ndims:])
y_flat_shape = x.shape[:ndims] + (n_flat,)
if not raw:
return x.reshape(y_flat_shape)
n = reduce(operator.mul, y_flat_shape)
ra = RawArray('d', n)
ra[:] = x.ravel() # OPT: don't copy data
return ra, y_flat_shape, x.shape[ndims:]
def _cluster_properties(self, cluster_map, cids):
"""Create a Dataset with cluster properties
Parameters
----------
cluster_map : NDVar
NDVar in which clusters are marked by bearing the same number.
cids : array_like of int
Numbers specifying the clusters (must occur in cluster_map) which
should be analyzed.
Returns
-------
cluster_properties : Dataset
Cluster properties. Which properties are included depends on the
dimensions.
"""
ndim = cluster_map.ndim
n_clusters = len(cids)
# setup compression
compression = []
for ax, dim in enumerate(cluster_map.dims):
extents = np.empty((n_clusters, len(dim)), dtype=np.bool_)
axes = tuple(i for i in range(ndim) if i != ax)
compression.append((ax, dim, axes, extents))
# find extents for all clusters
c_mask = np.empty(cluster_map.shape, np.bool_)
for i, cid in enumerate(cids):
np.equal(cluster_map, cid, c_mask)
for ax, dim, axes, extents in compression:
np.any(c_mask, axes, extents[i])
# prepare Dataset
ds = Dataset()
ds['id'] = Var(cids)
for ax, dim, axes, extents in compression:
properties = dim._cluster_properties(extents)
if properties is not None:
ds.update(properties)
return ds
def cluster(self, cluster_id):
"""Retrieve a specific cluster as NDVar
Parameters
----------
cluster_id : int
Cluster id.
Returns
-------
cluster : NDVar
NDVar of the cluster, 0 outside the cluster.
Notes
-----
Clusters only have stable ids for thresholded cluster distributions.
"""
if self.kind != 'cluster':
raise RuntimeError(
f'Only cluster-based tests have clusters with stable ids, this '
f'is a {self.kind} distribution. Use the .find_clusters() '
f'method instead with maps=True.')
elif cluster_id not in self._cids:
raise ValueError(f'No cluster with id {cluster_id!r}')
out = self.parameter_map * (self.cluster_map == cluster_id)
properties = self._cluster_properties(self.cluster_map, (cluster_id,))
for k in properties:
out.info[k] = properties[0, k]
return out
def clusters(self, pmin=None, maps=True, **sub):
"""Find significant clusters
Parameters
----------
pmin : None | scalar, 1 >= p >= 0
Threshold p-value for clusters (for thresholded cluster tests the
default is 1, for others 0.05).
maps : bool
Include in the output a map of every cluster (can be memory
intensive if there are large statistical maps and/or many
clusters; default True).
[dimname] : index
Limit the data for the distribution.
Returns
-------
ds : Dataset
Dataset with information about the clusters.
"""
if pmin is None:
if self.samples > 0 and self.kind != 'cluster':
pmin = 0.05
elif self.samples == 0:
msg = ("Can not determine p values in distribution without "
"permutations.")
if self.kind == 'cluster':
msg += " Find clusters with pmin=None."
raise RuntimeError(msg)
if sub:
param_map = self.parameter_map.sub(**sub)
else:
param_map = self.parameter_map
if self.kind == 'cluster':
if sub:
cluster_map = self.cluster_map.sub(**sub)
cids = | np.setdiff1d(cluster_map.x, [0]) | numpy.setdiff1d |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
=============
desitarget.io
=============
Functions for reading, writing and manipulating files related to targeting.
"""
from __future__ import (absolute_import, division)
#
import numpy as np
import fitsio
from fitsio import FITS
import os
import re
from . import __version__ as desitarget_version
import numpy.lib.recfunctions as rfn
import healpy as hp
from glob import glob, iglob
from time import time
from desiutil import depend
from desitarget.geomask import hp_in_box, box_area, is_in_box
from desitarget.geomask import hp_in_cap, cap_area, is_in_cap
from desitarget.geomask import is_in_hp, nside2nside, pixarea2nside
from desitarget.targets import main_cmx_or_sv
# ADM set up the DESI default logger
from desiutil.log import get_logger
log = get_logger()
# ADM this is a lookup dictionary to map RELEASE to a simpler "North" or "South".
# ADM photometric system. This will expand with the definition of RELEASE in the
# ADM Data Model (e.g. https://desi.lbl.gov/trac/wiki/DecamLegacy/DR4sched).
# ADM 7999 were the dr8a test reductions, for which only 'S' surveys were processed.
releasedict = {3000: 'S', 4000: 'N', 5000: 'S', 6000: 'N', 7000: 'S', 7999: 'S',
8000: 'S', 8001: 'N', 9000: 'S', 9001: 'N'}
# ADM This is an empty array of most of the TS data model columns and
# ADM dtypes. Note that other columns are added in read_tractor and
# ADM from the "addedcols" data models below.
basetsdatamodel = np.array([], dtype=[
('RELEASE', '>i2'), ('BRICKID', '>i4'), ('BRICKNAME', 'S8'),
('OBJID', '>i4'), ('TYPE', 'S4'), ('RA', '>f8'), ('RA_IVAR', '>f4'),
('DEC', '>f8'), ('DEC_IVAR', '>f4'), ('DCHISQ', '>f4', (5,)), ('EBV', '>f4'),
('FLUX_G', '>f4'), ('FLUX_R', '>f4'), ('FLUX_Z', '>f4'),
('FLUX_IVAR_G', '>f4'), ('FLUX_IVAR_R', '>f4'), ('FLUX_IVAR_Z', '>f4'),
('MW_TRANSMISSION_G', '>f4'), ('MW_TRANSMISSION_R', '>f4'), ('MW_TRANSMISSION_Z', '>f4'),
('FRACFLUX_G', '>f4'), ('FRACFLUX_R', '>f4'), ('FRACFLUX_Z', '>f4'),
('FRACMASKED_G', '>f4'), ('FRACMASKED_R', '>f4'), ('FRACMASKED_Z', '>f4'),
('FRACIN_G', '>f4'), ('FRACIN_R', '>f4'), ('FRACIN_Z', '>f4'),
('NOBS_G', '>i2'), ('NOBS_R', '>i2'), ('NOBS_Z', '>i2'),
('PSFDEPTH_G', '>f4'), ('PSFDEPTH_R', '>f4'), ('PSFDEPTH_Z', '>f4'),
('GALDEPTH_G', '>f4'), ('GALDEPTH_R', '>f4'), ('GALDEPTH_Z', '>f4'),
('FLUX_W1', '>f4'), ('FLUX_W2', '>f4'), ('FLUX_W3', '>f4'), ('FLUX_W4', '>f4'),
('FLUX_IVAR_W1', '>f4'), ('FLUX_IVAR_W2', '>f4'),
('FLUX_IVAR_W3', '>f4'), ('FLUX_IVAR_W4', '>f4'),
('MW_TRANSMISSION_W1', '>f4'), ('MW_TRANSMISSION_W2', '>f4'),
('MW_TRANSMISSION_W3', '>f4'), ('MW_TRANSMISSION_W4', '>f4'),
('ALLMASK_G', '>i2'), ('ALLMASK_R', '>i2'), ('ALLMASK_Z', '>i2'),
('FIBERFLUX_G', '>f4'), ('FIBERFLUX_R', '>f4'), ('FIBERFLUX_Z', '>f4'),
('FIBERTOTFLUX_G', '>f4'), ('FIBERTOTFLUX_R', '>f4'), ('FIBERTOTFLUX_Z', '>f4'),
('REF_EPOCH', '>f4'), ('WISEMASK_W1', '|u1'), ('WISEMASK_W2', '|u1'),
('MASKBITS', '>i2')
])
# ADM columns that are new for the DR9 data model.
dr9addedcols = np.array([], dtype=[
('LC_FLUX_W1', '>f4', (13,)), ('LC_FLUX_W2', '>f4', (13,)),
('LC_FLUX_IVAR_W1', '>f4', (13,)), ('LC_FLUX_IVAR_W2', '>f4', (13,)),
('LC_NOBS_W1', '>i2', (13,)), ('LC_NOBS_W2', '>i2', (13,)),
('LC_MJD_W1', '>f8', (13,)), ('LC_MJD_W2', '>f8', (13,)),
('SHAPE_R', '>f4'), ('SHAPE_E1', '>f4'), ('SHAPE_E2', '>f4'),
('SHAPE_R_IVAR', '>f4'), ('SHAPE_E1_IVAR', '>f4'), ('SHAPE_E2_IVAR', '>f4'),
('SERSIC', '>f4'), ('SERSIC_IVAR', '>f4')
])
# ADM columns that were deprecated in the DR8 data model.
dr8addedcols = np.array([], dtype=[
('FRACDEV', '>f4'), ('FRACDEV_IVAR', '>f4'),
('SHAPEDEV_R', '>f4'), ('SHAPEDEV_E1', '>f4'), ('SHAPEDEV_E2', '>f4'),
('SHAPEDEV_R_IVAR', '>f4'), ('SHAPEDEV_E1_IVAR', '>f4'), ('SHAPEDEV_E2_IVAR', '>f4'),
('SHAPEEXP_R', '>f4'), ('SHAPEEXP_E1', '>f4'), ('SHAPEEXP_E2', '>f4'),
('SHAPEEXP_R_IVAR', '>f4'), ('SHAPEEXP_E1_IVAR', '>f4'), ('SHAPEEXP_E2_IVAR', '>f4'),
])
def desitarget_nside():
"""Default HEALPix Nside for all target selection algorithms."""
nside = 64
return nside
def desitarget_resolve_dec():
"""Default Dec cut to separate targets in BASS/MzLS from DECaLS."""
dec = 32.375
return dec
def add_photsys(indata):
"""Add the PHOTSYS column to a sweeps-style array.
Parameters
----------
indata : :class:`~numpy.ndarray`
Numpy structured array to which to add PHOTSYS column.
Returns
-------
:class:`~numpy.ndarray`
Input array with PHOTSYS added (and set using RELEASE).
Notes
-----
- The PHOTSYS column is only added if the RELEASE column
is available in the passed `indata`.
"""
# ADM only add the PHOTSYS column if RELEASE exists.
if 'RELEASE' in indata.dtype.names:
# ADM add PHOTSYS to the data model.
# ADM the fitsio check is a hack for the v0.9 to v1.0 transition
# ADM (v1.0 now converts all byte strings to unicode strings).
from distutils.version import LooseVersion
if LooseVersion(fitsio.__version__) >= LooseVersion('1'):
pdt = [('PHOTSYS', '<U1')]
else:
pdt = [('PHOTSYS', '|S1')]
dt = indata.dtype.descr + pdt
# ADM create a new numpy array with the fields from the new data model...
nrows = len(indata)
outdata = np.empty(nrows, dtype=dt)
# ADM ...and populate them with the passed columns of data.
for col in indata.dtype.names:
outdata[col] = indata[col]
# ADM add the PHOTSYS column.
photsys = release_to_photsys(indata["RELEASE"])
outdata['PHOTSYS'] = photsys
else:
outdata = indata
return outdata
def read_tractor(filename, header=False, columns=None):
"""Read a tractor catalogue or sweeps file.
Parameters
----------
filename : :class:`str`
File name of one Tractor or sweeps file.
header : :class:`bool`, optional
If ``True``, return (data, header) instead of just data.
columns: :class:`list`, optional
Specify the desired Tractor catalog columns to read; defaults to
desitarget.io.tsdatamodel.dtype.names + most of the columns in
desitarget.gaiamatch.gaiadatamodel.dtype.names, where
tsdatamodel is, e.g., basetsdatamodel + dr9addedcols.
Returns
-------
:class:`~numpy.ndarray`
Array with the tractor schema, uppercase field names.
"""
check_fitsio_version()
# ADM read in the file information. Due to fitsio header bugs
# ADM near v1.0.0, make absolutely sure the user wants the header.
if header:
indata, hdr = fitsio.read(filename, upper=True, header=True,
columns=columns)
else:
indata = fitsio.read(filename, upper=True, columns=columns)
# ADM form the final data model in a manner that maintains
# ADM backwards-compatability with DR8.
if "FRACDEV" in indata.dtype.names:
tsdatamodel = np.array(
[], dtype=basetsdatamodel.dtype.descr + dr8addedcols.dtype.descr)
else:
tsdatamodel = np.array(
[], dtype=basetsdatamodel.dtype.descr + dr9addedcols.dtype.descr)
# ADM the full data model including Gaia columns.
from desitarget.gaiamatch import gaiadatamodel
from desitarget.gaiamatch import pop_gaia_coords, pop_gaia_columns
gaiadatamodel = pop_gaia_coords(gaiadatamodel)
# ADM special handling of the pre-DR7 Data Model.
for gaiacol in ['GAIA_PHOT_BP_RP_EXCESS_FACTOR',
'GAIA_ASTROMETRIC_SIGMA5D_MAX',
'GAIA_ASTROMETRIC_PARAMS_SOLVED', 'REF_CAT']:
if gaiacol not in indata.dtype.names:
gaiadatamodel = pop_gaia_columns(gaiadatamodel, [gaiacol])
dt = tsdatamodel.dtype.descr + gaiadatamodel.dtype.descr
dtnames = tsdatamodel.dtype.names + gaiadatamodel.dtype.names
# ADM limit to just passed columns.
if columns is not None:
dt = [d for d, name in zip(dt, dtnames) if name in columns]
# ADM set-up the output array.
nrows = len(indata)
data = | np.zeros(nrows, dtype=dt) | numpy.zeros |
import pickle
import unittest
import numpy as np
import hnswlib
def get_dist(metric, pt1, pt2):
if metric == 'l2':
return np.sum((pt1-pt2)**2)
elif metric == 'ip':
return 1. - np.sum(np.multiply(pt1, pt2))
elif metric == 'cosine':
return 1. - np.sum(np.multiply(pt1, pt2)) / (np.sum(pt1**2) * np.sum(pt2**2))**.5
def brute_force_distances(metric, items, query_items, k):
dists = np.zeros((query_items.shape[0], items.shape[0]))
for ii in range(items.shape[0]):
for jj in range(query_items.shape[0]):
dists[jj,ii] = get_dist(metric, items[ii, :], query_items[jj, :])
labels = np.argsort(dists, axis=1) # equivalent, but faster: np.argpartition(dists, range(k), axis=1)
dists = np.sort(dists, axis=1) # equivalent, but faster: np.partition(dists, range(k), axis=1)
return labels[:, :k], dists[:, :k]
def check_ann_results(self, metric, items, query_items, k, ann_l, ann_d, err_thresh=0, total_thresh=0, dists_thresh=0):
brute_l, brute_d = brute_force_distances(metric, items, query_items, k)
err_total = 0
for jj in range(query_items.shape[0]):
err = np.sum( | np.isin(brute_l[jj, :], ann_l[jj, :], invert=True) | numpy.isin |
##
# \brief Test copula mle fit with weighted samples
from __future__ import print_function, division
import unittest
import numpy as np
from scipy.stats import norm
import seaborn as sns
from six import iteritems
import os
import pandas as pd
# starvine imports
from starvine.bvcopula.pc_base import PairCopula
from starvine.bvcopula.copula_factory import Copula
from starvine.mvar.mv_plot import matrixPairPlot
#
pwd_ = os.getcwd()
dataDir = pwd_ + "/tests/data/"
np.random.seed(123)
class TestWeightedReg(unittest.TestCase):
def testWgtCopula(self):
"""!
@brief Test ability to construct copula
given samples with unequal weights.
Compose two bivariate gauss dists, one with
positive and one with negative depencence.
Sample from dists.
Assign large sample weights to positive gauss
and low sample weights to neg gauss.
Combine weighted samples into a single "X" shaped distribution.
Refit weighted samples and ensure positive depencence
"""
np.random.seed(123)
# construct gaussian margins; mu={0, 0}, sd={1.0, 2}
# marg1 = Uvm("gauss")(1e-3, 1.)
marg1 = norm(loc=1e-3, scale=1.0)
# marg2 = Uvm("gauss")(1e-3, 2.)
marg2 = norm(loc=1e-3, scale=2.0)
# construct gaussian copula positive dep
cop1 = Copula("gauss")
cop1.fittedParams = [0.7]
# construct gaussian copula neg dep
cop2 = Copula("gauss")
cop2.fittedParams = [-0.7]
# draw 4000 samples from each model
n = 4000
x1, y1 = cop1.sampleScale(marg1, marg2, n)
x2, y2 = cop2.sampleScale(marg1, marg2, n)
# assign weights to each gauss sample group
cop1_wgts = np.ones(n) * 0.95
cop2_wgts = np.ones(n) * 0.05
# combine both gauss models into dbl gauss model
x = np.append(x1, x2)
y = np.append(y1, y2)
wgts = np.append(cop1_wgts, cop2_wgts)
# plot
data = pd.DataFrame([x, y]).T
matrixPairPlot(data, weights=wgts, savefig='x_gauss_original.png')
# fit copula to weighted data
copModel = PairCopula(x, y, wgts)
copModel.copulaTournament()
# verify that a positive dep copula was produced with a
# dep parameter of slightly less than 0.7
x_wt, y_wt = copModel.copulaModel.sampleScale(marg1, marg2, n)
self.assertTrue(copModel.copulaModel.kTau() > 0.)
self.assertTrue((copModel.copulaModel.fittedParams[0] > 0.)
& (copModel.copulaModel.fittedParams[0] < 0.7))
# plot
data = pd.DataFrame([x_wt, y_wt]).T
matrixPairPlot(data, savefig='x_gauss_weighted_fit.png')
def testWgtResampledCopula(self):
"""!
@brief Test ability to construct copula
given samples with unequal weights using a resampling strat
"""
np.random.seed(123)
# construct gaussian margins; mu={0, 0}, sd={1.0, 2}
# marg1 = Uvm("gauss")(1e-3, 1.)
marg1 = norm(loc=1e-3, scale=1.0)
# marg2 = Uvm("gauss")(1e-3, 2.)
marg2 = norm(loc=1e-3, scale=2.0)
# construct gaussian copula positive dep
cop1 = Copula("gauss")
cop1.fittedParams = [0.7]
# construct gaussian copula neg dep
cop2 = Copula("gauss")
cop2.fittedParams = [-0.7]
# draw 1000 samples from each model
n = 1000
x1, y1 = cop1.sampleScale(marg1, marg2, n)
x2, y2 = cop2.sampleScale(marg1, marg2, n)
# assign weights to each gauss sample group
cop1_wgts = np.ones(n) * 0.95
cop2_wgts = np.ones(n) * 0.05
# combine both gauss models into dbl gauss model
x = np.append(x1, x2)
y = | np.append(y1, y2) | numpy.append |
'''
Basic sound functions
'''
# pylint: disable=C0103, R0912, R0914
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import spectrogram as ss_spec
def tdt50k():
'''
TDT "50k" sample rate
'''
return 48828.125
def amp2level(amp):
'''
Convert an amplitude multiplier to level in dB.
E.g. a multiplier of 10 is +20dB
'''
return 20*np.log10(amp)
def level2amp(dB):
'''
Convert a dB difference to amplitude.
E.g. a difference of +10dB is a multiplier of 3.16
'''
return 10**(dB/20)
def rms2dBSPL(rms_Pa):
'''
Convert a sound RMS in Pascals to dB SPL.
E.g. 1 Pa RMS = 94dB
'''
return amp2level(rms_Pa)+94
def dBSPL2rms(dBSPL):
'''
Convert dBSPL to RMS in Pascals
E.g. 94dB = 1 Pa RMS
'''
return level2amp(dBSPL-94)
def puretone(fs, n_samples, freq=440, level_dB='max', phase=0):
'''
Generate a pure tone
'''
t = np.arange(n_samples) * 1/fs
if level_dB == 'max':
mult = 1
else:
mult = np.sqrt(2) * dBSPL2rms(level_dB)
return np.sin(2*np.pi*freq*t + phase) * mult
def freq_sweep(fs, n_samples, f_min=200, f_max=1000, method='log', level_dB='max', phase=0):
'''
Generate a frequency sweep
'''
t = np.arange(n_samples)/fs
if level_dB == 'max':
mult = 1
else:
mult = np.sqrt(2) * dBSPL2rms(level_dB)
if method.lower().startswith('li'):
c = (f_max-f_min)/(n_samples/fs)
return np.sin(2*np.pi*(f_min*t + c/2*(t**2)) + phase) * mult
# else: # method == 'log'
k = (f_max/f_min)**(fs/n_samples)
return np.sin(2*np.pi*f_min*(k**t-1)/np.log(k) + phase) * mult
def whitenoise(n_samples, method='uniform', level_dB='max'):
'''
Generate white noise -- NB method='normal', level_dB='max' will
scale the range depending on the random numbers produced, so won't
give a consistent sound level
'''
if method.lower()[0] == 'u': # 'uniform'
if level_dB == 'max':
mult = 1
else:
mult = np.sqrt(3) * dBSPL2rms(level_dB)
return ((2*np.random.random((n_samples)))-1) * mult
# elif method.lower()[0] == 'n': # 'normal'
rand = np.random.randn((n_samples))
if level_dB == 'max':
return rand / np.max(np.abs(rand))
# else:
return np.random.randn((n_samples)) * dBSPL2rms(level_dB)
def cosramp_on(n_samples, ramp_samples=None):
'''
Ramp on - total length n_samples, ramp length ramp_samples
'''
if ramp_samples is None:
ramp_samples = n_samples
t = np.minimum(np.arange(n_samples), ramp_samples)
return np.sin(np.pi/2/ramp_samples*t)
def cosramp_off(n_samples, ramp_samples=None):
'''
Ramp off - total length n_samples, ramp length ramp_samples
'''
if ramp_samples is None:
ramp_samples = n_samples
return cosramp_on(n_samples, ramp_samples)[::-1]
def cosramp_onoff(n_samples, ramp_samples):
'''
Ramp on and off - total length n_samples, ramp lengths ramp_samples
'''
r = cosramp_on(n_samples, ramp_samples)
return r * r[::-1]
def make_diotic(snd, n_channels=2):
'''
Duplicate sound to 2 or more channels
'''
return np.tile(snd.ravel(), (n_channels, 1))
def apply_ild(snd, ild_dB=10):
'''
Apply an ILD to a mono sound, and return stereo sound
'''
left = snd
right = snd * level2amp(ild_dB)
return np.stack((left, right), axis=0)
def apply_itd(fs, snd, itd_us=100):
'''
Apply an ITD to a mono sound, and return stereo sound
'''
shift_samples = np.int(np.abs(itd_us*1000/fs))
leading = np.concatenate((snd, np.zeros(shift_samples)))
lagging = np.concatenate((np.zeros(shift_samples), snd))
if itd_us < 0:
return np.stack((leading, lagging), axis=0)
# else: itd_us >= 0
return np.stack((lagging, leading), axis=0)
def ild_stimulus(fs, len_s, f0, ild_dB, level_dB='max'):
'''
Make a pure-tone ILD stimulus
'''
n_samples = np.int(len_s*fs)
ramplen_ms = 5
snd_mono = puretone(fs, n_samples, f0, level_dB=level_dB) * \
cosramp_onoff(n_samples, ramp_samples=np.round(ramplen_ms/1000*fs))
return apply_ild(snd_mono, ild_dB=ild_dB)
def itd_stimulus(fs, len_s, f0, itd_us, level_dB='max'):
'''
Make a pure-tone ILD stimulus
'''
n_samples = np.int(len_s*fs)
ramplen_ms = 5
snd_mono = puretone(fs, n_samples, f0, level_dB=level_dB) * \
cosramp_onoff(n_samples, ramp_samples=np.round(ramplen_ms/1000*fs))
return apply_itd(fs, snd_mono, itd_us=itd_us)
def binaural_beats(f_s, n_samples, f_l=520, f_r=530, level_dB='max'):
'''
Binaural beat stimulus
'''
return np.stack((puretone(f_s, n_samples, f_l, level_dB=level_dB),
puretone(f_s, n_samples, f_r, level_dB=level_dB)), axis=0)
def spectrogram(*args, **kwargs):
'''
This is just to help find the scipy spectrogram function
'''
return ss_spec(*args, **kwargs)
def show_spectrogram(*args, **kwargs):
'''
Show spectrogram using pyplot
'''
f, t, s = ss_spec(*args, **kwargs)
_, ax = plt.subplots(figsize=(10, 6))
ax.imshow(s, origin='lower', extent=[ | np.min(t) | numpy.min |
import copy
import numpy as np
from scipy import ndimage
import gnomonic_projection as gp
import spherical_coordinates as sc
import polygon
from logger import Logger
log = Logger(__name__)
log.logger.propagate = False
"""
Implement icosahedron projection and stitch with the Gnomonic projection (forward and reverse projection).
Reference:
[1]: https://mathworld.wolfram.com/GnomonicProjection.html
"""
def get_icosahedron_parameters(triangle_index, padding_size=0.0):
"""
Get icosahedron's tangent face's paramters.
Get the tangent point theta and phi. Known as the theta_0 and phi_0.
The erp image origin as top-left corner
:return the tangent face's tangent point and 3 vertices's location.
"""
# reference: https://en.wikipedia.org/wiki/Regular_icosahedron
radius_circumscribed = np.sin(2 * np.pi / 5.0)
radius_inscribed = np.sqrt(3) / 12.0 * (3 + np.sqrt(5))
radius_midradius = np.cos(np.pi / 5.0)
# the tangent point
theta_0 = None
phi_0 = None
# the 3 points of tangent triangle in spherical coordinate
triangle_point_00_theta = None
triangle_point_00_phi = None
triangle_point_01_theta = None
triangle_point_01_phi = None
triangle_point_02_theta = None
triangle_point_02_phi = None
# triangles' row/col range in the erp image
# erp_image_row_start = None
# erp_image_row_stop = None
# erp_image_col_start = None
# erp_image_col_stop = None
theta_step = 2.0 * np.pi / 5.0
# 1) the up 5 triangles
if 0 <= triangle_index <= 4:
# tangent point of inscribed spheric
theta_0 = - np.pi + theta_step / 2.0 + triangle_index * theta_step
phi_0 = np.pi / 2 - np.arccos(radius_inscribed / radius_circumscribed)
# the tangent triangle points coordinate in tangent image
triangle_point_00_theta = -np.pi + triangle_index * theta_step
triangle_point_00_phi = np.arctan(0.5)
triangle_point_01_theta = -np.pi + np.pi * 2.0 / 5.0 / 2.0 + triangle_index * theta_step
triangle_point_01_phi = np.pi / 2.0
triangle_point_02_theta = -np.pi + (triangle_index + 1) * theta_step
triangle_point_02_phi = np.arctan(0.5)
# # availied area of ERP image
# erp_image_row_start = 0
# erp_image_row_stop = (np.pi / 2 - np.arctan(0.5)) / np.pi
# erp_image_col_start = 1.0 / 5.0 * triangle_index_temp
# erp_image_col_stop = 1.0 / 5.0 * (triangle_index_temp + 1)
# 2) the middle 10 triangles
# 2-0) middle-up triangles
if 5 <= triangle_index <= 9:
triangle_index_temp = triangle_index - 5
# tangent point of inscribed spheric
theta_0 = - np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
phi_0 = np.pi / 2.0 - np.arccos(radius_inscribed / radius_circumscribed) - 2 * np.arccos(radius_inscribed / radius_midradius)
# the tangent triangle points coordinate in tangent image
triangle_point_00_theta = -np.pi + triangle_index_temp * theta_step
triangle_point_00_phi = np.arctan(0.5)
triangle_point_01_theta = -np.pi + (triangle_index_temp + 1) * theta_step
triangle_point_01_phi = np.arctan(0.5)
triangle_point_02_theta = -np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
triangle_point_02_phi = -np.arctan(0.5)
# # availied area of ERP image
# erp_image_row_start = (np.arccos(radius_inscribed / radius_circumscribed) + np.arccos(radius_inscribed / radius_midradius)) / np.pi
# erp_image_row_stop = (np.pi / 2.0 + np.arctan(0.5)) / np.pi
# erp_image_col_start = 1 / 5.0 * triangle_index_temp
# erp_image_col_stop = 1 / 5.0 * (triangle_index_temp + 1)
# 2-1) the middle-down triangles
if 10 <= triangle_index <= 14:
triangle_index_temp = triangle_index - 10
# tangent point of inscribed spheric
theta_0 = - np.pi + triangle_index_temp * theta_step
phi_0 = -(np.pi / 2.0 - np.arccos(radius_inscribed / radius_circumscribed) - 2 * np.arccos(radius_inscribed / radius_midradius))
# the tangent triangle points coordinate in tangent image
triangle_point_00_phi = -np.arctan(0.5)
triangle_point_00_theta = - np.pi - theta_step / 2.0 + triangle_index_temp * theta_step
if triangle_index_temp == 10:
# cross the ERP image boundary
triangle_point_00_theta = triangle_point_00_theta + 2 * np.pi
triangle_point_01_theta = -np.pi + triangle_index_temp * theta_step
triangle_point_01_phi = np.arctan(0.5)
triangle_point_02_theta = - np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
triangle_point_02_phi = -np.arctan(0.5)
# # availied area of ERP image
# erp_image_row_start = (np.pi / 2.0 - np.arctan(0.5)) / np.pi
# erp_image_row_stop = (np.pi - np.arccos(radius_inscribed / radius_circumscribed) - np.arccos(radius_inscribed / radius_midradius)) / np.pi
# erp_image_col_start = 1.0 / 5.0 * triangle_index_temp - 1.0 / 5.0 / 2.0
# erp_image_col_stop = 1.0 / 5.0 * triangle_index_temp + 1.0 / 5.0 / 2.0
# 3) the down 5 triangles
if 15 <= triangle_index <= 19:
triangle_index_temp = triangle_index - 15
# tangent point of inscribed spheric
theta_0 = - np.pi + triangle_index_temp * theta_step
phi_0 = - (np.pi / 2 - np.arccos(radius_inscribed / radius_circumscribed))
# the tangent triangle points coordinate in tangent image
triangle_point_00_theta = - np.pi - theta_step / 2.0 + triangle_index_temp * theta_step
triangle_point_00_phi = -np.arctan(0.5)
triangle_point_01_theta = - np.pi + theta_step / 2.0 + triangle_index_temp * theta_step
# cross the ERP image boundary
if triangle_index_temp == 15:
triangle_point_01_theta = triangle_point_01_theta + 2 * np.pi
triangle_point_01_phi = -np.arctan(0.5)
triangle_point_02_theta = - np.pi + triangle_index_temp * theta_step
triangle_point_02_phi = -np.pi / 2.0
# # spherical coordinate (0,0) is in the center of ERP image
# erp_image_row_start = (np.pi / 2.0 + np.arctan(0.5)) / np.pi
# erp_image_row_stop = 1.0
# erp_image_col_start = 1.0 / 5.0 * triangle_index_temp - 1.0 / 5.0 / 2.0
# erp_image_col_stop = 1.0 / 5.0 * triangle_index_temp + 1.0 / 5.0 / 2.0
tangent_point = [theta_0, phi_0]
# the 3 vertices in tangent image's gnomonic coordinate
triangle_points_tangent = []
triangle_points_tangent.append(gp.gnomonic_projection(triangle_point_00_theta, triangle_point_00_phi, theta_0, phi_0))
triangle_points_tangent.append(gp.gnomonic_projection(triangle_point_01_theta, triangle_point_01_phi, theta_0, phi_0))
triangle_points_tangent.append(gp.gnomonic_projection(triangle_point_02_theta, triangle_point_02_phi, theta_0, phi_0))
# pading the tangent image
triangle_points_tangent_no_pading = copy.deepcopy(triangle_points_tangent) # Needed for NN blending
triangle_points_tangent_pading = polygon.enlarge_polygon(triangle_points_tangent, padding_size)
# if padding_size != 0.0:
triangle_points_tangent = copy.deepcopy(triangle_points_tangent_pading)
# the points in spherical location
triangle_points_sph = []
for index in range(3):
tri_pading_x, tri_pading_y = triangle_points_tangent_pading[index]
triangle_point_theta, triangle_point_phi = gp.reverse_gnomonic_projection(tri_pading_x, tri_pading_y, theta_0, phi_0)
triangle_points_sph.append([triangle_point_theta, triangle_point_phi])
# compute bounding box of the face in spherical coordinate
availied_sph_area = []
availied_sph_area = np.array(copy.deepcopy(triangle_points_sph))
triangle_points_tangent_pading = np.array(triangle_points_tangent_pading)
point_insert_x = np.sort(triangle_points_tangent_pading[:, 0])[1]
point_insert_y = np.sort(triangle_points_tangent_pading[:, 1])[1]
availied_sph_area = np.append(availied_sph_area, [gp.reverse_gnomonic_projection(point_insert_x, point_insert_y, theta_0, phi_0)], axis=0)
# the bounding box of the face with spherical coordinate
availied_ERP_area_sph = [] # [min_longitude, max_longitude, min_latitude, max_lantitude]
if 0 <= triangle_index <= 4:
if padding_size > 0.0:
availied_ERP_area_sph.append(-np.pi)
availied_ERP_area_sph.append(np.pi)
else:
availied_ERP_area_sph.append(np.amin(availied_sph_area[:, 0]))
availied_ERP_area_sph.append( | np.amax(availied_sph_area[:, 0]) | numpy.amax |
import json
import pdb
from pytorch_lightning.trainer import optimizers
import six
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
from model.pt_vl_transformer_encoder import encoder, pre_process_layer
from utils.pl_metric import LitAUROC
from utils.scheduler import GradualWarmupScheduler
class ErnieVilConfig(object):
"""
configuration for ernie-vil
"""
def __init__(self, config_path):
self._config_dict = self._parse(config_path)
def _parse(self, config_path):
try:
with open(config_path) as json_file:
config_dict = json.load(json_file)
except Exception:
raise IOError("Error in parsing Ernie model config file '%s'" %
config_path)
else:
return config_dict
def __getitem__(self, key):
return self._config_dict[key]
def print_config(self):
"""
print configuration value
"""
for arg, value in sorted(six.iteritems(self._config_dict)):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
class ErnieVilModel(nn.Module):
"""
main class for ERNIE-ViL model
"""
def __init__(self,
config,
predict_feature=False,
predict_class=True,
use_attr=False,
use_soft_label=True,
fusion_method="mul",
fusion_dropout=0.1,
):
super().__init__()
self.fusion_method = fusion_method
self.fusion_dropout = fusion_dropout
hidden_act_map = {
'gelu': F.gelu,
}
self._emb_size = config['hidden_size']
self._n_layer = config['num_hidden_layers']
self._n_head = config['num_attention_heads']
self._v_feat_size = 2048
self._v_head = config['v_num_attention_heads']
self._v_emb_size = config['v_hidden_size']
self._v_inter_hid = config['v_intermediate_size']
self._co_head = config['co_num_attention_heads']
self._co_emb_size = config['co_hidden_size']
self._co_inter_hid = config['co_intermediate_size']
self._voc_size = config['vocab_size']
self._class_size = config['class_size']
self._class_attr_size = config['class_attr_size']
self._max_position_seq_len = config['max_position_embeddings']
self._sent_types = config['sent_type_vocab_size']
self._task_types = config['task_type_vocab_size']
self._hidden_act = hidden_act_map[config['hidden_act']]
self._prepostprocess_dropout = config['hidden_dropout_prob']
self._attention_dropout = config['attention_probs_dropout_prob']
self._v_biattention_id = config['v_biattention_id']
self._t_biattention_id = config['t_biattention_id']
self._predict_feature = predict_feature
self._predict_class = predict_class
self._use_attr = use_attr
self._use_soft_label = use_soft_label
self._word_emb_name = "word_embedding"
self._pos_emb_name = "pos_embedding"
self._sent_emb_name = "sent_embedding"
self._image_emb_name = "image_embedding"
self._loc_emb_name = "loc_embedding"
self._dtype = "float32"
self._emb_dtype = "float32"
self._build_model()
def load_paddle_weight(self, w_dict):
translate_name = {
nn.Embedding: {
'weight': '',
},
nn.Linear: {
'weight': 'w_0',
'bias': 'b_0',
}
}
new_state_dict = {}
for mn, md in self.named_children():
if hasattr(md, 'load_paddle_weight'):
print('v' * 10)
md.load_paddle_weight(w_dict, "")
print('^' * 10)
else:
# Native pytorch nn modules
for n, p in md.named_parameters():
blocks = [
translate_name[type(md)][sn]
if type(md) in translate_name
else sn
for sn in n.split('.')
]
new_p_name = '.'.join(blocks)
pd_full_name = '.'.join([mn, new_p_name]) if new_p_name else mn
pt_full_name = f"{mn}.{n}"
if pd_full_name in w_dict:
new_state_dict[pt_full_name] = torch.tensor(w_dict[pd_full_name])
if 'weight' in pt_full_name and isinstance(md, nn.Linear):
new_state_dict[pt_full_name] = new_state_dict[pt_full_name].T
print(f'matched: {pd_full_name} -> {pt_full_name}', new_state_dict[pt_full_name].shape)
else:
print('not match: ', pd_full_name)
import pdb; pdb.set_trace()
mismatchs = self.load_state_dict(new_state_dict, strict=False)
def _build_model(self, ):
self.word_embedding = nn.Embedding(self._voc_size, self._emb_size)
self.pos_embedding = nn.Embedding(self._max_position_seq_len, self._emb_size)
self.sent_embedding = nn.Embedding(self._sent_types, self._emb_size)
self.pre_encoder = pre_process_layer(
'nd',
self._emb_size,
dropout_rate=self._prepostprocess_dropout,
postfix='pre_encoder',
)
self.image_emb = nn.Linear(self._v_feat_size, self._v_emb_size, bias=True)
self.image_loc = nn.Linear(5, self._v_emb_size, bias=True)
self.vl_pre_encoder = pre_process_layer(
'nd',
self._v_emb_size,
dropout_rate=self._prepostprocess_dropout,
postfix='vl_pre_encoder',
)
self.encoder = encoder(
n_layer=self._n_layer,
n_head=self._n_head,
d_key=self._emb_size // self._n_head,
d_value=self._emb_size // self._n_head,
d_model=self._emb_size,
d_inner_hid=self._emb_size * 4,
v_head=self._v_head,
v_key=self._v_emb_size // self._v_head,
v_value=self._v_emb_size // self._v_head,
v_model=self._v_emb_size,
v_inner_hid=self._v_inter_hid,
co_head=self._co_head,
co_key=self._co_emb_size // self._co_head,
co_value=self._co_emb_size // self._co_head,
co_model=self._co_emb_size,
co_inner_hid=self._co_inter_hid,
prepostprocess_dropout=self._prepostprocess_dropout,
attention_dropout=self._attention_dropout,
relu_dropout=0,
hidden_act=self._hidden_act,
preprocess_cmd="",
postprocess_cmd="dan",
v_biattention_id=self._v_biattention_id,
t_biattention_id=self._t_biattention_id,
name='encoder',
)
self.pooled_fc_text = nn.Linear(self._emb_size, self._co_emb_size, bias=True)
self.pooled_fc_image = nn.Linear(self._v_emb_size, self._co_emb_size, bias=True)
self.emb_fuse_dropout = nn.Dropout(self.fusion_dropout)
def get_pooled_output(self, _enc_out, _enc_vl_out):
text_cls_feat = _enc_out[:, 0, :]
text_cls_feat = self.pooled_fc_text(text_cls_feat)
text_cls_feat = torch.relu(text_cls_feat)
image_cls_feat = _enc_vl_out[:, 0, :]
image_cls_feat = self.pooled_fc_image(image_cls_feat)
image_cls_feat = torch.relu(image_cls_feat)
return text_cls_feat, image_cls_feat
def get_match_score(self, text, image, mode="mul"):
if mode == "sum":
emb_fuse = text + image
elif mode == "mul":
emb_fuse = text * image
else:
raise ValueError(f"current mode {mode} is not supported")
emb_fuse = self.emb_fuse_dropout(emb_fuse)
return emb_fuse
def forward(self,
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
image_embeddings,
image_loc,
input_image_mask,
pooled_output=False,
match_score=False,):
emb_out = self.word_embedding(src_ids)
position_emb_out = self.pos_embedding(position_ids)
sent_emb_out = self.sent_embedding(sentence_ids)
emb_out = emb_out + position_emb_out
emb_out = emb_out_0 = emb_out + sent_emb_out
emb_out = emb_out_1 = self.pre_encoder(emb_out)
self_attn_mask = torch.matmul(input_mask, input_mask.permute([0, 2, 1]))
self_attn_mask = (self_attn_mask - 1.0) * 10000.0
n_head_self_attn_mask = torch.stack([self_attn_mask] * self._n_head, dim=1)
n_head_self_attn_mask = n_head_self_attn_mask.detach()
image_embeddings = self.image_emb(image_embeddings)
loc_emb_out = self.image_loc(image_loc)
emb_vl_out = image_embeddings + loc_emb_out
emb_vl_out = self.vl_pre_encoder(emb_vl_out)
self_attn_image_mask = torch.matmul(
input_image_mask,
input_image_mask.permute([0, 2, 1])
)
self_attn_image_mask = (self_attn_image_mask - 1.0) * 10000.0
n_head_self_attn_image_mask = torch.stack([self_attn_image_mask] * self._v_head, dim=1)
n_head_self_attn_image_mask = n_head_self_attn_image_mask.detach()
self_attn_vl_mask = torch.matmul(
input_image_mask,
input_mask.permute([0, 2, 1])
)
self_attn_vl_mask = (self_attn_vl_mask - 1.0) * 10000.0
n_head_self_attn_vl_mask = torch.stack([self_attn_vl_mask] * self._co_head, dim=1)
n_head_self_attn_vl_mask = n_head_self_attn_vl_mask.detach()
enc_out, enc_vl_out = self.encoder(
emb_out,
emb_vl_out,
n_head_self_attn_mask,
n_head_self_attn_image_mask,
n_head_self_attn_vl_mask,
)
if match_score:
h_cls, h_img = self.get_pooled_output(enc_out, enc_vl_out)
emb_fuse = self.get_match_score(h_cls, h_img, mode=self.fusion_method)
return emb_fuse
elif pooled_output:
return self.pooled_output(enc_out, enc_vl_out)
else:
return enc_out, enc_vl_out
class LitErnieVil(pl.LightningModule):
def __init__(self, args, fusion_method="mul", fusion_dropout=0.1, cls_head='linear'):
super().__init__()
hparams = vars(args)
hparams.update({
"fusion_method": fusion_method,
"fusion_dropout": fusion_dropout,
"cls_head": cls_head,
})
self.hparams = hparams
self.args = args
self.train_accuracy = pl.metrics.classification.Accuracy()
self.val_accuracy = pl.metrics.classification.Accuracy()
self.val_auroc = LitAUROC()
self.ernie_config = ErnieVilConfig(args.ernie_config_path)
self.ernie_vil = ErnieVilModel(
self.ernie_config,
fusion_dropout=fusion_dropout,
fusion_method=fusion_method,
)
if cls_head == 'linear':
self.fc = nn.Sequential(
# nn.Linear(self.ernie_vil._co_emb_size, self.ernie_vil._co_emb_size * 2),
nn.Linear(self.ernie_vil._co_emb_size, 2, bias=True),
)
torch.nn.init.normal_(self.fc[0].weight, mean=0.0, std=0.02)
# torch.nn.init.xavier_normal_(self.fc[0].weight)
# torch.nn.init.constant_(self.fc[0].bias, 0.0)
elif cls_head == 'mlm':
self.fc = nn.Sequential(
nn.Linear(self.ernie_vil._co_emb_size, self.ernie_vil._co_emb_size),
nn.GELU(),
nn.LayerNorm(self.ernie_vil._co_emb_size),
nn.Dropout(fusion_dropout),
nn.Linear(self.ernie_vil._co_emb_size, 2),
)
else:
raise ValueError(f'cls_head: {cls_head} is not supported!')
def load_paddle_weight(self, npz_path):
w_dict = np.load(npz_path)
self.ernie_vil.load_paddle_weight(w_dict)
def forward(self,
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
image_embeddings,
image_loc,
input_image_mask):
emb_fuse = self.ernie_vil(
src_ids,
position_ids,
sentence_ids,
task_ids,
input_mask,
image_embeddings,
image_loc,
input_image_mask,
match_score=True,
)
cls_logit = self.fc(emb_fuse)
return cls_logit
def training_step(self, batch, batch_idx):
(src_ids, src_pos, src_seg, src_task, src_masks,
image_embeddings, image_loc, image_mask, labels, batch_anno_ids,
_, _, _) = batch
logits = self.forward(
src_ids,
src_pos,
src_seg,
src_task,
src_masks,
image_embeddings,
image_loc,
image_mask
)
if labels.ndim == 2 and labels.dtype == torch.long:
labels = torch.squeeze(labels, dim=-1)
loss = F.cross_entropy(logits, labels).mean()
self.log('train_loss', loss)
self.log('train_acc_step', self.train_accuracy(logits, labels))
if hasattr(self, "scheduler"):
lr = torch.tensor(self.scheduler.get_last_lr()[0])
self.log('lr', lr, prog_bar=True)
return loss
def validation_step(self, batch, batch_idx):
(src_ids, src_pos, src_seg, src_task, src_masks,
image_embeddings, image_loc, image_mask, labels, batch_anno_ids,
_, _, _) = batch
logits = self.forward(
src_ids,
src_pos,
src_seg,
src_task,
src_masks,
image_embeddings,
image_loc,
image_mask
)
pred = F.softmax(logits, dim=-1)
self.val_auroc(pred[..., 1], labels)
self.val_accuracy(pred, labels)
return {
'predict': pred,
'anno_idx': batch_anno_ids
}
def validation_epoch_end(self, validation_step_outputs):
self.log('val_auroc_epoch', self.val_auroc.compute())
self.log('val_aucc_epoch', self.val_accuracy.compute())
def test_step(self, batch, batch_idx):
return self.validation_step(batch, batch_idx)
def test_epoch_end(self, test_outs):
pass
def configure_optimizers(self):
optimizer = torch.optim.AdamW(
self.parameters(),
lr=self.args.learning_rate,
weight_decay=self.args.weight_decay,
)
T_0 = self.args.num_train_steps
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer, T_0, T_mult=1, eta_min=1e-8)
scheduler_warmup = GradualWarmupScheduler(
optimizer,
multiplier=1,
total_epoch=self.args.warmup_steps,
after_scheduler=scheduler
)
self.scheduler = scheduler_warmup
return [optimizer], [{
'scheduler': scheduler_warmup,
'interval': 'step',
}]
def test_ernie_vil():
w_dict = | np.load('/home/ron_zhu/Disk2/ernie/ernie-vil-base-vcr.npz') | numpy.load |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os, sys
# add python path of PadleDetection to sys.path
parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 4)))
if parent_path not in sys.path:
sys.path.append(parent_path)
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import base
import ppdet.modeling.ops as ops
from ppdet.modeling.tests.test_base import LayerTest
def make_rois(h, w, rois_num, output_size):
rois = np.zeros((0, 4)).astype('float32')
for roi_num in rois_num:
roi = np.zeros((roi_num, 4)).astype('float32')
roi[:, 0] = np.random.randint(0, h - output_size[0], size=roi_num)
roi[:, 1] = np.random.randint(0, w - output_size[1], size=roi_num)
roi[:, 2] = np.random.randint(roi[:, 0] + output_size[0], h)
roi[:, 3] = np.random.randint(roi[:, 1] + output_size[1], w)
rois = np.vstack((rois, roi))
return rois
def softmax(x):
# clip to shiftx, otherwise, when calc loss with
# log(exp(shiftx)), may get log(0)=INF
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
class TestCollectFpnProposals(LayerTest):
def test_collect_fpn_proposals(self):
multi_bboxes_np = []
multi_scores_np = []
rois_num_per_level_np = []
for i in range(4):
bboxes_np = np.random.rand(5, 4).astype('float32')
scores_np = np.random.rand(5, 1).astype('float32')
rois_num = np.array([2, 3]).astype('int32')
multi_bboxes_np.append(bboxes_np)
multi_scores_np.append(scores_np)
rois_num_per_level_np.append(rois_num)
with self.static_graph():
multi_bboxes = []
multi_scores = []
rois_num_per_level = []
for i in range(4):
bboxes = paddle.static.data(
name='rois' + str(i),
shape=[5, 4],
dtype='float32',
lod_level=1)
scores = paddle.static.data(
name='scores' + str(i),
shape=[5, 1],
dtype='float32',
lod_level=1)
rois_num = paddle.static.data(
name='rois_num' + str(i), shape=[None], dtype='int32')
multi_bboxes.append(bboxes)
multi_scores.append(scores)
rois_num_per_level.append(rois_num)
fpn_rois, rois_num = ops.collect_fpn_proposals(
multi_bboxes,
multi_scores,
2,
5,
10,
rois_num_per_level=rois_num_per_level)
feed = {}
for i in range(4):
feed['rois' + str(i)] = multi_bboxes_np[i]
feed['scores' + str(i)] = multi_scores_np[i]
feed['rois_num' + str(i)] = rois_num_per_level_np[i]
fpn_rois_stat, rois_num_stat = self.get_static_graph_result(
feed=feed, fetch_list=[fpn_rois, rois_num], with_lod=True)
fpn_rois_stat = np.array(fpn_rois_stat)
rois_num_stat = np.array(rois_num_stat)
with self.dynamic_graph():
multi_bboxes_dy = []
multi_scores_dy = []
rois_num_per_level_dy = []
for i in range(4):
bboxes_dy = base.to_variable(multi_bboxes_np[i])
scores_dy = base.to_variable(multi_scores_np[i])
rois_num_dy = base.to_variable(rois_num_per_level_np[i])
multi_bboxes_dy.append(bboxes_dy)
multi_scores_dy.append(scores_dy)
rois_num_per_level_dy.append(rois_num_dy)
fpn_rois_dy, rois_num_dy = ops.collect_fpn_proposals(
multi_bboxes_dy,
multi_scores_dy,
2,
5,
10,
rois_num_per_level=rois_num_per_level_dy)
fpn_rois_dy = fpn_rois_dy.numpy()
rois_num_dy = rois_num_dy.numpy()
self.assertTrue(np.array_equal(fpn_rois_stat, fpn_rois_dy))
self.assertTrue(np.array_equal(rois_num_stat, rois_num_dy))
def test_collect_fpn_proposals_error(self):
def generate_input(bbox_type, score_type, name):
multi_bboxes = []
multi_scores = []
for i in range(4):
bboxes = paddle.static.data(
name='rois' + name + str(i),
shape=[10, 4],
dtype=bbox_type,
lod_level=1)
scores = paddle.static.data(
name='scores' + name + str(i),
shape=[10, 1],
dtype=score_type,
lod_level=1)
multi_bboxes.append(bboxes)
multi_scores.append(scores)
return multi_bboxes, multi_scores
with self.static_graph():
bbox1 = paddle.static.data(
name='rois', shape=[5, 10, 4], dtype='float32', lod_level=1)
score1 = paddle.static.data(
name='scores', shape=[5, 10, 1], dtype='float32', lod_level=1)
bbox2, score2 = generate_input('int32', 'float32', '2')
self.assertRaises(
TypeError,
ops.collect_fpn_proposals,
multi_rois=bbox1,
multi_scores=score1,
min_level=2,
max_level=5,
post_nms_top_n=2000)
self.assertRaises(
TypeError,
ops.collect_fpn_proposals,
multi_rois=bbox2,
multi_scores=score2,
min_level=2,
max_level=5,
post_nms_top_n=2000)
paddle.disable_static()
class TestDistributeFpnProposals(LayerTest):
def test_distribute_fpn_proposals(self):
rois_np = np.random.rand(10, 4).astype('float32')
rois_num_np = np.array([4, 6]).astype('int32')
with self.static_graph():
rois = paddle.static.data(
name='rois', shape=[10, 4], dtype='float32')
rois_num = paddle.static.data(
name='rois_num', shape=[None], dtype='int32')
multi_rois, restore_ind, rois_num_per_level = ops.distribute_fpn_proposals(
fpn_rois=rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224,
rois_num=rois_num)
fetch_list = multi_rois + [restore_ind] + rois_num_per_level
output_stat = self.get_static_graph_result(
feed={'rois': rois_np,
'rois_num': rois_num_np},
fetch_list=fetch_list,
with_lod=True)
output_stat_np = []
for output in output_stat:
output_np = np.array(output)
if len(output_np) > 0:
output_stat_np.append(output_np)
with self.dynamic_graph():
rois_dy = base.to_variable(rois_np)
rois_num_dy = base.to_variable(rois_num_np)
multi_rois_dy, restore_ind_dy, rois_num_per_level_dy = ops.distribute_fpn_proposals(
fpn_rois=rois_dy,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224,
rois_num=rois_num_dy)
output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy
output_dy_np = []
for output in output_dy:
output_np = output.numpy()
if len(output_np) > 0:
output_dy_np.append(output_np)
for res_stat, res_dy in zip(output_stat_np, output_dy_np):
self.assertTrue(np.array_equal(res_stat, res_dy))
def test_distribute_fpn_proposals_error(self):
with self.static_graph():
fpn_rois = paddle.static.data(
name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
self.assertRaises(
TypeError,
ops.distribute_fpn_proposals,
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
paddle.disable_static()
class TestROIAlign(LayerTest):
def test_roi_align(self):
b, c, h, w = 2, 12, 20, 20
inputs_np = np.random.rand(b, c, h, w).astype('float32')
rois_num = [4, 6]
output_size = (7, 7)
rois_np = make_rois(h, w, rois_num, output_size)
rois_num_np = np.array(rois_num).astype('int32')
with self.static_graph():
inputs = paddle.static.data(
name='inputs', shape=[b, c, h, w], dtype='float32')
rois = paddle.static.data(
name='rois', shape=[10, 4], dtype='float32')
rois_num = paddle.static.data(
name='rois_num', shape=[None], dtype='int32')
output = ops.roi_align(
input=inputs,
rois=rois,
output_size=output_size,
rois_num=rois_num)
output_np, = self.get_static_graph_result(
feed={
'inputs': inputs_np,
'rois': rois_np,
'rois_num': rois_num_np
},
fetch_list=output,
with_lod=False)
with self.dynamic_graph():
inputs_dy = base.to_variable(inputs_np)
rois_dy = base.to_variable(rois_np)
rois_num_dy = base.to_variable(rois_num_np)
output_dy = ops.roi_align(
input=inputs_dy,
rois=rois_dy,
output_size=output_size,
rois_num=rois_num_dy)
output_dy_np = output_dy.numpy()
self.assertTrue(np.array_equal(output_np, output_dy_np))
def test_roi_align_error(self):
with self.static_graph():
inputs = paddle.static.data(
name='inputs', shape=[2, 12, 20, 20], dtype='float32')
rois = paddle.static.data(
name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
self.assertRaises(
TypeError,
ops.roi_align,
input=inputs,
rois=rois,
output_size=(7, 7))
paddle.disable_static()
class TestROIPool(LayerTest):
def test_roi_pool(self):
b, c, h, w = 2, 12, 20, 20
inputs_np = np.random.rand(b, c, h, w).astype('float32')
rois_num = [4, 6]
output_size = (7, 7)
rois_np = make_rois(h, w, rois_num, output_size)
rois_num_np = np.array(rois_num).astype('int32')
with self.static_graph():
inputs = paddle.static.data(
name='inputs', shape=[b, c, h, w], dtype='float32')
rois = paddle.static.data(
name='rois', shape=[10, 4], dtype='float32')
rois_num = paddle.static.data(
name='rois_num', shape=[None], dtype='int32')
output, _ = ops.roi_pool(
input=inputs,
rois=rois,
output_size=output_size,
rois_num=rois_num)
output_np, = self.get_static_graph_result(
feed={
'inputs': inputs_np,
'rois': rois_np,
'rois_num': rois_num_np
},
fetch_list=[output],
with_lod=False)
with self.dynamic_graph():
inputs_dy = base.to_variable(inputs_np)
rois_dy = base.to_variable(rois_np)
rois_num_dy = base.to_variable(rois_num_np)
output_dy, _ = ops.roi_pool(
input=inputs_dy,
rois=rois_dy,
output_size=output_size,
rois_num=rois_num_dy)
output_dy_np = output_dy.numpy()
self.assertTrue(np.array_equal(output_np, output_dy_np))
def test_roi_pool_error(self):
with self.static_graph():
inputs = paddle.static.data(
name='inputs', shape=[2, 12, 20, 20], dtype='float32')
rois = paddle.static.data(
name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
self.assertRaises(
TypeError,
ops.roi_pool,
input=inputs,
rois=rois,
output_size=(7, 7))
paddle.disable_static()
class TestIoUSimilarity(LayerTest):
def test_iou_similarity(self):
b, c, h, w = 2, 12, 20, 20
inputs_np = np.random.rand(b, c, h, w).astype('float32')
output_size = (7, 7)
x_np = make_rois(h, w, [20], output_size)
y_np = make_rois(h, w, [10], output_size)
with self.static_graph():
x = paddle.static.data(name='x', shape=[20, 4], dtype='float32')
y = paddle.static.data(name='y', shape=[10, 4], dtype='float32')
iou = ops.iou_similarity(x=x, y=y)
iou_np, = self.get_static_graph_result(
feed={
'x': x_np,
'y': y_np,
}, fetch_list=[iou], with_lod=False)
with self.dynamic_graph():
x_dy = base.to_variable(x_np)
y_dy = base.to_variable(y_np)
iou_dy = ops.iou_similarity(x=x_dy, y=y_dy)
iou_dy_np = iou_dy.numpy()
self.assertTrue(np.array_equal(iou_np, iou_dy_np))
class TestBipartiteMatch(LayerTest):
def test_bipartite_match(self):
distance = np.random.random((20, 10)).astype('float32')
with self.static_graph():
x = paddle.static.data(name='x', shape=[20, 10], dtype='float32')
match_indices, match_dist = ops.bipartite_match(
x, match_type='per_prediction', dist_threshold=0.5)
match_indices_np, match_dist_np = self.get_static_graph_result(
feed={'x': distance, },
fetch_list=[match_indices, match_dist],
with_lod=False)
with self.dynamic_graph():
x_dy = base.to_variable(distance)
match_indices_dy, match_dist_dy = ops.bipartite_match(
x_dy, match_type='per_prediction', dist_threshold=0.5)
match_indices_dy_np = match_indices_dy.numpy()
match_dist_dy_np = match_dist_dy.numpy()
self.assertTrue(np.array_equal(match_indices_np, match_indices_dy_np))
self.assertTrue(np.array_equal(match_dist_np, match_dist_dy_np))
class TestYoloBox(LayerTest):
def test_yolo_box(self):
# x shape [N C H W], C=K * (5 + class_num), class_num=10, K=2
np_x = np.random.random([1, 30, 7, 7]).astype('float32')
np_origin_shape = np.array([[608, 608]], dtype='int32')
class_num = 10
conf_thresh = 0.01
downsample_ratio = 32
scale_x_y = 1.2
# static
with self.static_graph():
# x shape [N C H W], C=K * (5 + class_num), class_num=10, K=2
x = paddle.static.data(
name='x', shape=[1, 30, 7, 7], dtype='float32')
origin_shape = paddle.static.data(
name='origin_shape', shape=[1, 2], dtype='int32')
boxes, scores = ops.yolo_box(
x,
origin_shape, [10, 13, 30, 13],
class_num,
conf_thresh,
downsample_ratio,
scale_x_y=scale_x_y)
boxes_np, scores_np = self.get_static_graph_result(
feed={
'x': np_x,
'origin_shape': np_origin_shape,
},
fetch_list=[boxes, scores],
with_lod=False)
# dygraph
with self.dynamic_graph():
x_dy = fluid.layers.assign(np_x)
origin_shape_dy = fluid.layers.assign(np_origin_shape)
boxes_dy, scores_dy = ops.yolo_box(
x_dy,
origin_shape_dy, [10, 13, 30, 13],
10,
0.01,
32,
scale_x_y=scale_x_y)
boxes_dy_np = boxes_dy.numpy()
scores_dy_np = scores_dy.numpy()
self.assertTrue(np.array_equal(boxes_np, boxes_dy_np))
self.assertTrue(np.array_equal(scores_np, scores_dy_np))
def test_yolo_box_error(self):
with self.static_graph():
# x shape [N C H W], C=K * (5 + class_num), class_num=10, K=2
x = paddle.static.data(
name='x', shape=[1, 30, 7, 7], dtype='float32')
origin_shape = paddle.static.data(
name='origin_shape', shape=[1, 2], dtype='int32')
self.assertRaises(
TypeError,
ops.yolo_box,
x,
origin_shape, [10, 13, 30, 13],
10.123,
0.01,
32,
scale_x_y=1.2)
paddle.disable_static()
class TestPriorBox(LayerTest):
def test_prior_box(self):
input_np = np.random.rand(2, 10, 32, 32).astype('float32')
image_np = np.random.rand(2, 10, 40, 40).astype('float32')
min_sizes = [2, 4]
with self.static_graph():
input = paddle.static.data(
name='input', shape=[2, 10, 32, 32], dtype='float32')
image = paddle.static.data(
name='image', shape=[2, 10, 40, 40], dtype='float32')
box, var = ops.prior_box(
input=input,
image=image,
min_sizes=min_sizes,
clip=True,
flip=True)
box_np, var_np = self.get_static_graph_result(
feed={
'input': input_np,
'image': image_np,
},
fetch_list=[box, var],
with_lod=False)
with self.dynamic_graph():
inputs_dy = base.to_variable(input_np)
image_dy = base.to_variable(image_np)
box_dy, var_dy = ops.prior_box(
input=inputs_dy,
image=image_dy,
min_sizes=min_sizes,
clip=True,
flip=True)
box_dy_np = box_dy.numpy()
var_dy_np = var_dy.numpy()
self.assertTrue(np.array_equal(box_np, box_dy_np))
self.assertTrue( | np.array_equal(var_np, var_dy_np) | numpy.array_equal |
import os
import sys
import obspy
import scipy
import pyasdf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.fftpack import next_fast_len
from obspy.signal.filter import bandpass
from seisgo import noise, stacking,utils
import pygmt as gmt
from obspy import UTCDateTime
def plot_eventsequence(cat,figsize=(12,4),ytype='magnitude',figname=None,
yrange=None,save=False,stem=True):
if isinstance(cat,obspy.core.event.catalog.Catalog):
cat=pd.DataFrame(utils.qml2list(cat))
elif isinstance(cat,list):
cat=pd.DataFrame(cat)
#All magnitudes greater than or equal to the limit will be plotted
plt.figure(figsize=figsize)
plt.title(ytype+" vs. time")
plt.xlabel("Date (UTC)")
plt.ylabel(ytype)
if yrange is not None:
ymin,ymax=yrange
if ytype.lower()=="magnitude":
cat2=cat[(cat.magnitude>=yrange[0]) & (cat.magnitude<=yrange[1]) ]
elif ytype.lower()=="depth":
cat2=cat[(cat.depth>=yrange[0]) & (cat.depth<=yrange[1]) ]
else:
cat2=cat
if ytype.lower()=="magnitude":
ymin=np.min(cat2.magnitude)*0.9
ymax=np.max(cat2.magnitude)*1.1
elif ytype.lower()=="depth":
ymin=np.min(cat2.depth)*0.9
ymax=np.max(cat2.depth)*1.1
t=[]
for i in range(len(cat2)):
tTime=obspy.UTCDateTime(cat2.iloc[i]["datetime"])
t.append(tTime.datetime)
if stem:
if ytype.lower()=="magnitude":
markerline, stemlines, baseline=plt.stem(t,cat2.magnitude,linefmt='k-',markerfmt="o",
bottom=ymin)
elif ytype.lower()=="depth":
markerline, stemlines, baseline=plt.stem(t,cat2.depth,linefmt='k-',markerfmt="o",
bottom=ymin)
markerline.set_markerfacecolor('r')
markerline.set_markeredgecolor('r')
else:
if ytype.lower()=="magnitude":
plt.scatter(t,cat2.magnitude,5,'k')
elif ytype.lower()=="depth":
plt.scatter(t,cat2.depth,cat2.magnitude,'k')
#
plt.grid(axis="both")
plt.ylim([ymin,ymax])
if save:
if figname is not None:
plt.savefig(figname)
else:
plt.savefig(ytype+"_vs_time.png")
else:
plt.show()
def plot_stations(lon,lat,region,markersize="c0.2c",title="station map",style="fancy",figname=None,
format='png',distance=None,projection="M5i", xshift="6i",frame="af"):
"""
lon, lat: could be list of vectors contaning multiple sets of stations. The number of sets must be the same
as the length of the marker list.
marker: a list specifying the symbols for each station set.
region: [minlon,maxlon,minlat,maxlat] for map view
"""
nsta=len(lon)
if isinstance(markersize,str):
markersize=[markersize]*nsta
fig = gmt.Figure()
gmt.config(MAP_FRAME_TYPE=style)
for i in range(nsta):
if i==0:
fig.coast(region=region, resolution="f",projection=projection, rivers='rivers',
water="cyan",frame=frame,land="white",
borders=["1/0.5p,gray,2/1p,gray"])
fig.basemap(frame='+t"'+title+'"')
fig.plot(
x=lon[i],
y=lat[i],
style=markersize[i],
color="red",
)
if figname is None:
figname='stationmap.'+format
fig.savefig(figname)
print('plot was saved to: '+figname)
##plot power spectral density
def plot_psd(data,dt,labels=None,xrange=None,cmap='jet',normalize=True,figsize=(13,5),\
save=False,figname=None,tick_inc=None):
"""
Plot the power specctral density of the data array.
=PARAMETERS=
data: 2-D array containing the data. the data to be plotted should be on axis 1 (second dimention)
dt: sampling inverval in time.
labels: row labels of the data, default is None.
cmap: colormap, default is 'jet'
time_format: format to show time marks, default is: '%Y-%m-%dT%H'
normalize: whether normalize the PSD in plotting, default is True
figsize: figure size, default: (13,5)
"""
data=np.array(data)
if data.ndim > 2:
raise ValueError('only plot 1-d arrya or 2d matrix for now. the input data has a dimention of %d'%(data.ndim))
f,psd=utils.psd(data,1/dt)
f=f[1:]
plt.figure(figsize=figsize)
ax=plt.subplot(111)
if data.ndim==2:
nwin=data.shape[0]
if tick_inc is None:
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
psdN=np.ndarray((psd.shape[0],psd.shape[1]-1))
for i in range(psd.shape[0]):
if normalize: psdN[i,:]=psd[i,1:]/np.max(np.abs(psd[i,1:]))
else: psdN[i,:]=psd[i,1:]
plt.imshow(psdN,aspect='auto',extent=[f.min(),f.max(),psdN.shape[0],0],cmap=cmap)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
if labels is not None: ax.set_yticklabels(labels[0:nwin:tick_inc])
if normalize: plt.colorbar(label='normalized PSD')
else: plt.colorbar(label='PSD')
else:
if normalize: psdN=psd[1:]/np.max(np.abs(psd[1:]))
else: psdN[i,:]=psd[1:]
plt.plot(f,psdN)
if xrange is None:plt.xlim([f[1],f[-1]])
else:
plt.xlim(xrange)
plt.xscale('log')
plt.xlabel('frequency (Hz)')
plt.title('PSD')
if save:
if figname is not None:
plt.savefig(figname)
else:
plt.savefig("PSD.png")
else:
plt.show()
#############################################################################
############### PLOTTING RAW SEISMIC WAVEFORMS ##########################
#############################################################################
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_waveform(sfile,net,sta,freqmin,freqmax,save=False,figdir=None,format='pdf'):
'''
display the downloaded waveform for station A
PARAMETERS:
-----------------------
sfile: containing all wavefrom data for a time-chunck in ASDF format
net,sta,comp: network, station name and component
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
USAGE:
-----------------------
plot_waveform('temp.h5','CI','BLC',0.01,0.5)
'''
# open pyasdf file to read
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
sta_list = ds.waveforms.list()
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# check whether station exists
tsta = net+'.'+sta
if tsta not in sta_list:
raise ValueError('no data for %s in %s'%(tsta,sfile))
tcomp = ds.waveforms[tsta].get_waveform_tags()
ncomp = len(tcomp)
if ncomp==0:
print('no data found for the specified net.sta.')
return None
tr = ds.waveforms[tsta][tcomp[0]]
dt = tr[0].stats.delta
npts = tr[0].stats.npts
tt = np.arange(0,npts)*dt
if ncomp == 1:
data = tr[0].data
data = bandpass(data,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
fig=plt.figure(figsize=(9,3))
plt.plot(tt,data,'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,tcomp[0].split('_')[0].upper(),freqmin,freqmax))
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.tight_layout()
plt.show()
else:
data = np.zeros(shape=(ncomp,npts),dtype=np.float32)
for ii in range(ncomp):
data[ii] = ds.waveforms[tsta][tcomp[ii]][0].data
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
fig=plt.figure(figsize=(9,6))
for c in range(ncomp):
if c==0:
plt.subplot(ncomp,1,1)
plt.plot(tt,data[0],'k-',linewidth=1)
plt.title('T\u2080:%s %s.%s @%5.3f-%5.2f Hz' % (tr[0].stats.starttime,net,sta,freqmin,freqmax))
plt.legend([tcomp[0].split('_')[0].upper()],loc='upper left')
plt.xlabel('Time [s]')
else:
plt.subplot(ncomp,1,c+1)
plt.plot(tt,data[c],'k-',linewidth=1)
plt.legend([tcomp[c].split('_')[0].upper()],loc='upper left')
plt.xlabel('Time [s]')
fig.tight_layout()
if save:
if not os.path.isdir(figdir):os.mkdir(figdir)
sfilebase=sfile.split('/')[-1]
outfname = figdir+'/{0:s}_{1:s}.{2:s}'.format(sfilebase.split('.')[0],net,sta)
fig.savefig(outfname+'.'+format, format=format, dpi=300)
plt.close()
else:
fig.show()
#############################################################################
###############PLOTTING XCORR RESULTS AS THE OUTPUT OF SEISGO ##########################
#############################################################################
def plot_xcorr_substack(sfile,freqmin,freqmax,lag=None,comp='ZZ',
save=True,figdir=None):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
sfile: cross-correlation functions outputed by SeisGo workflow
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_xcorr_substack('temp.h5',0.1,1,100,True,'./')
Note: IMPORTANT!!!! this script only works for cross-correlation with sub-stacks being set to True in S1.
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not lag:lag=maxlag
lag0=np.min([1.0*lag,maxlag])
if lag>maxlag:raise ValueError('lag excceds maxlag!')
# t is the time labels for plotting
if lag>=5:
tstep=int(int(lag)/5)
t1=np.arange(-int(lag),0,step=tstep)
t2=np.arange(0,int(lag+0.5*tstep),step=tstep)
t=np.concatenate((t1,t2))
else:
tstep=lag/5
t1=np.arange(-lag,0,step=tstep)
t2=np.arange(0,lag+0.5*tstep,step=tstep)
t=np.concatenate((t1,t2))
indx1 = int((maxlag-lag0)/dt)
indx2 = indx1+2*int(lag0/dt)+1
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
path_lists = ds.auxiliary_data[spair].list()
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
cc_comp=chan1[-1]+chan2[-1]
if cc_comp == comp or comp=='all' or comp=='ALL':
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
# print(data.shape)
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
tmarks = []
data_normalizd=data
# load cc for each station-pair
for ii in range(nwin):
data[ii] = bandpass(data[ii],freqmin,freqmax,1/dt,corners=4, zerophase=True)
data[ii] = data[ii]-np.mean(data[ii])
amax[ii] = np.max(np.abs(data[ii]))
data_normalizd[ii] = data[ii]/amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
tmarks.append(obspy.UTCDateTime(ttime[ii]).strftime('%Y-%m-%dT%H:%M:%S'))
dstack_mean=np.mean(data,axis=0)
dstack_robust=stacking.robust_stack(data)[0]
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig = plt.figure(figsize=(10,6))
ax = fig.add_subplot(5,1,(1,3))
ax.matshow(data_normalizd,cmap='seismic',extent=[-lag0,lag0,nwin,0],aspect='auto')
ax.plot((0,0),(nwin,0),'k-')
ax.set_title('%s.%s.%s %s.%s.%s dist:%5.2fkm' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax.set_xlabel('time [s]')
ax.set_xticks(t)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
# ax.set_yticklabels(np.arange(0,nwin,step=tick_inc))
ax.set_yticklabels(tmarks[0:nwin:tick_inc])
ax.set_xlim([-lag,lag])
ax.xaxis.set_ticks_position('bottom')
ax1 = fig.add_subplot(5,1,(4,5))
ax1.set_title('stack at %4.2f-%4.2f Hz'%(freqmin,freqmax))
tstack=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tstack)>len(dstack_mean):tstack=tstack[:-1]
ax1.plot(tstack,dstack_mean,'b-',linewidth=1,label='mean')
ax1.plot(tstack,dstack_robust,'r-',linewidth=1,label='robust')
ax1.set_xlabel('time [s]')
ax1.set_xticks(t)
ax1.set_xlim([-lag,lag])
ylim=ax1.get_ylim()
ax1.plot((0,0),ylim,'k-')
ax1.set_ylim(ylim)
ax1.legend(loc='upper right')
ax1.grid()
# ax2 = fig.add_subplot(414)
# ax2.plot(amax/min(amax),'r-')
# ax2.plot(ngood,'b-')
# ax2.set_xlabel('waveform number')
# ax2.set_xticks(np.arange(0,nwin,step=tick_inc))
# ax2.set_xticklabels(tmarks[0:nwin:tick_inc])
# #for tick in ax[2].get_xticklabels():
# # tick.set_rotation(30)
# ax2.legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.isdir(figdir):os.mkdir(figdir)
outfname = figdir+\
'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}_{6:s}-{7:s}Hz.png'.format(net1,sta1,\
chan1,net2,\
sta2,chan2,
str(freqmin),str(freqmax))
fig.savefig(outfname, format='png', dpi=400)
print('saved to: '+outfname)
plt.close()
else:
fig.show()
def plot_corrfile(sfile,freqmin,freqmax,lag=None,comp='ZZ',
save=True,figname=None,format='png',figdir=None):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
sfile: cross-correlation functions outputed by SeisGo workflow
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_corrfile('temp.h5',0.1,1,100,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
corrdict=noise.extract_corrdata(sfile,comp=comp)
clist=list(corrdict.keys())
for c in clist:
corr=corrdict[c]
if comp in list(corr.keys()):
corr[comp].plot(freqmin=freqmin,freqmax=freqmax,lag=lag,save=save,figdir=figdir,
figname=figname,format=format)
def plot_corrdata(corr,freqmin=None,freqmax=None,lag=None,save=False,figdir=None,figsize=(10,8)):
'''
display the 2D matrix of the cross-correlation functions for a certain time-chunck.
PARAMETERS:
--------------------------
corr: : class:`~seisgo.types.CorrData`
CorrData object containing the correlation functions and the metadata.
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
--------------------------
plot_corrdata(corr,0.1,1,100,save=True,figdir='./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
netstachan1 = corr.net[0]+'.'+corr.sta[0]+'.'+corr.loc[0]+'.'+corr.chan[0]
netstachan2 = corr.net[1]+'.'+corr.sta[1]+'.'+corr.loc[1]+'.'+corr.chan[1]
dt,maxlag,dist,ngood,ttime,substack = [corr.dt,corr.lag,corr.dist,corr.ngood,corr.time,corr.substack]
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
lag0=np.min([1.0*lag,maxlag])
# t is the time labels for plotting
if lag>=5:
tstep=int(int(lag)/5)
t1=np.arange(-int(lag),0,step=tstep);t2=np.arange(0,int(lag+0.5*tstep),step=tstep)
t=np.concatenate((t1,t2))
else:
tstep=lag/5
t1=np.arange(-lag,0,step=tstep);t2=np.arange(0,lag+0.5*tstep,step=tstep)
t=np.concatenate((t1,t2))
indx1 = int((maxlag-lag0)/dt);indx2 = indx1+2*int(lag0/dt)+1
# cc matrix
if substack:
data = corr.data[:,indx1:indx2]
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
# print(data.shape)
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
if nwin==0 or len(ngood)==1:
print('continue! no enough trace to plot!')
return
tmarks = []
data_normalizd=data
# load cc for each station-pair
for ii in range(nwin):
if freqmin is not None and freqmax is not None:
data[ii] = bandpass(data[ii],freqmin,freqmax,1/dt,corners=4, zerophase=True)
data[ii] = data[ii]-np.mean(data[ii])
amax[ii] = np.max(np.abs(data[ii]))
data_normalizd[ii] = data[ii]/amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
tmarks.append(obspy.UTCDateTime(ttime[ii]).strftime('%Y-%m-%dT%H:%M:%S'))
dstack_mean=np.mean(data,axis=0)
# dstack_robust=stack.robust_stack(data)[0]
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(6,1,(1,4))
ax.matshow(data_normalizd,cmap='seismic',extent=[-lag0,lag0,nwin,0],aspect='auto')
ax.plot((0,0),(nwin,0),'k-')
if freqmin is not None and freqmax is not None:
ax.set_title('%s-%s : dist : %5.2f km : %4.2f-%4.2f Hz' % (netstachan1,netstachan2,
dist,freqmin,freqmax))
else:
ax.set_title('%s-%s : dist : %5.2f km : unfiltered' % (netstachan1,netstachan2,dist))
ax.set_xlabel('time [s]')
ax.set_xticks(t)
ax.set_yticks(np.arange(0,nwin,step=tick_inc))
ax.set_yticklabels(tmarks[0:nwin:tick_inc])
ax.set_xlim([-lag,lag])
ax.xaxis.set_ticks_position('bottom')
ax1 = fig.add_subplot(6,1,(5,6))
if freqmin is not None and freqmax is not None:
ax1.set_title('stack at %4.2f-%4.2f Hz'%(freqmin,freqmax))
else:
ax1.set_title('stack: unfiltered')
tstack=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tstack)>len(dstack_mean):tstack=tstack[:-1]
ax1.plot(tstack,dstack_mean,'b-',linewidth=1,label='mean')
# ax1.plot(tstack,dstack_robust,'r-',linewidth=1,label='robust')
ax1.set_xlabel('time [s]')
ax1.set_xticks(t)
ax1.set_xlim([-lag,lag])
ylim=ax1.get_ylim()
ax1.plot((0,0),ylim,'k-')
ax1.set_ylim(ylim)
ax1.legend(loc='upper right')
ax1.grid()
fig.tight_layout()
else: #only one trace available
data = corr.data[indx1:indx2]
# load cc for each station-pair
if freqmin is not None and freqmax is not None:
data = bandpass(data,freqmin,freqmax,1/dt,corners=4, zerophase=True)
data = data-np.mean(data)
amax = np.max(np.abs(data))
data /= amax
timestamp = obspy.UTCDateTime(ttime)
tmarks=obspy.UTCDateTime(ttime).strftime('%Y-%m-%dT%H:%M:%S')
tx=np.arange(-lag0,lag0+0.5*dt,dt)
if len(tx)>len(data):tx=tx[:-1]
plt.figure(figsize=figsize)
ax=plt.gca()
plt.plot(tx,data,'k-',linewidth=1)
if freqmin is not None and freqmax is not None:
plt.title('%s-%s : dist : %5.2f km : %4.2f-%4.2f Hz' % (netstachan1,netstachan2,
dist,freqmin,freqmax))
else:
plt.title('%s-%s : dist : %5.2f km : unfiltered' % (netstachan1,netstachan2,dist))
plt.xlabel('time [s]')
plt.xticks(t)
ylim=ax.get_ylim()
plt.plot((0,0),ylim,'k-')
plt.ylim(ylim)
plt.xlim([-lag,lag])
ax.grid()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.isdir(figdir):os.mkdir(figdir)
outfname = figdir+\
'/{0:s}_{1:s}_{2:s}-{3:s}Hz.png'.format(netstachan1,netstachan2,
str(freqmin),str(freqmax))
plt.savefig(outfname, format='png', dpi=300)
print('saved to: '+outfname)
plt.close()
else:
plt.show()
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_xcorr_substack_spect(sfile,freqmin,freqmax,lag=None,save=True,figdir='./'):
'''
display the amplitude spectrum of the cross-correlation functions for a time-chunck.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S1
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
USAGE:
-----------------------
plot_xcorr_substack_spect('temp.h5',0.1,1,200,True,'./')
Note: IMPORTANT!!!! this script only works for the cross-correlation with sub-stacks in S1.
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
spairs = ds.auxiliary_data.list()
path_lists = ds.auxiliary_data[spairs[0]].list()
flag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['substack']
dt = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['dt']
maxlag = ds.auxiliary_data[spairs[0]][path_lists[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
# only works for cross-correlation with substacks generated
if not flag:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
for spair in spairs:
ttr = spair.split('_')
net1,sta1 = ttr[0].split('.')
net2,sta2 = ttr[1].split('.')
for ipath in path_lists:
chan1,chan2 = ipath.split('_')
try:
dist = ds.auxiliary_data[spair][ipath].parameters['dist']
ngood= ds.auxiliary_data[spair][ipath].parameters['ngood']
ttime= ds.auxiliary_data[spair][ipath].parameters['time']
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
except Exception:
print('continue! something wrong with %s %s'%(spair,ipath))
continue
# cc matrix
data = ds.auxiliary_data[spair][ipath].data[:,indx1:indx2]
nwin = data.shape[0]
amax = np.zeros(nwin,dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
if nwin==0 or len(ngood)==1: print('continue! no enough substacks!');continue
# load cc for each station-pair
for ii in range(nwin):
spec[ii] = scipy.fftpack.fft(data[ii],nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]),axis=0)
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = max(data[ii])
data[ii] /= amax[ii]
timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# plotting
if nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s.%s.%s %s.%s.%s dist:%5.2f km' % (net1,sta1,chan1,net2,sta2,chan2,dist))
ax[0].set_xlabel('time [s]')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:-1:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/min(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
#ax[1].set_xticks(np.arange(0,nwin,int(nwin/5)))
ax[2].legend(['relative amp','ngood'],loc='upper right')
fig.tight_layout()
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}.{1:s}.{2:s}_{3:s}.{4:s}.{5:s}.pdf'.format(net1,sta1,chan1,net2,sta2,chan2)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
#############################################################################
###############PLOTTING THE POST-STACKING XCORR FUNCTIONS AS OUTPUT OF S2 STEP IN NOISEPY ##########################
#############################################################################
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_substack_all(sfile,freqmin,freqmax,comp,lag=None,save=False,figdir=None):
'''
display the 2D matrix of the cross-correlation functions stacked for all time windows.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
comp: cross component of the targeted cc functions
USAGE:
----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
paths = comp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[2:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
data[ii] = ds.auxiliary_data[itype][paths].data[indx1:indx2]
data[ii] = bandpass(data[ii],freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
if nwin>100:
tick_inc = int(nwin/10)
elif nwin>10:
tick_inc = int(nwin/5)
else:
tick_inc = 2
fig,ax = plt.subplots(2,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km filtered at %4.2f-%4.2fHz' % (sfile.split('/')[-1],dist,freqmin,freqmax))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].plot(amax/max(amax),'r-')
ax[1].plot(ngood,'b-')
ax[1].set_xlabel('waveform number')
ax[1].set_xticks(np.arange(0,nwin,nwin//5))
ax[1].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}_{1:4.2f}_{2:4.2f}Hz.pdf'.format(sfile.split('/')[-1],freqmin,freqmax)
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
'''
Inherited and modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_substack_all_spect(sfile,freqmin,freqmax,comp,lag=None,save=False,figdir=None):
'''
display the amplitude spectrum of the cross-correlation functions stacked for all time windows.
PARAMETERS:
-----------------------
sfile: cross-correlation functions outputed by S2
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
lag: time ranges for display
comp: cross component of the targeted cc functions
USAGE:
-----------------------
plot_substack_all('temp.h5',0.1,1,'ZZ',50,True,'./')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
paths = comp
try:
ds = pyasdf.ASDFDataSet(sfile,mode='r')
# extract common variables
dtype_lists = ds.auxiliary_data.list()
dt = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dt']
dist = ds.auxiliary_data[dtype_lists[0]][paths].parameters['dist']
maxlag = ds.auxiliary_data[dtype_lists[0]][paths].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfile);sys.exit()
if len(dtype_lists)==1:
raise ValueError('Abort! seems no substacks have been done')
# lags for display
if not lag:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=int(2*int(lag)/4))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
nfft = int(next_fast_len(indx2-indx1))
freq = scipy.fftpack.fftfreq(nfft,d=dt)[:nfft//2]
# other parameters to keep
nwin = len(dtype_lists)-1
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
spec = np.zeros(shape=(nwin,nfft//2),dtype=np.complex64)
ngood= np.zeros(nwin,dtype=np.int16)
ttime= np.zeros(nwin,dtype=np.int)
timestamp = np.empty(ttime.size,dtype='datetime64[s]')
amax = np.zeros(nwin,dtype=np.float32)
for ii,itype in enumerate(dtype_lists[1:]):
timestamp[ii] = obspy.UTCDateTime(np.float(itype[1:]))
try:
ngood[ii] = ds.auxiliary_data[itype][paths].parameters['ngood']
ttime[ii] = ds.auxiliary_data[itype][paths].parameters['time']
#timestamp[ii] = obspy.UTCDateTime(ttime[ii])
# cc matrix
tdata = ds.auxiliary_data[itype][paths].data[indx1:indx2]
spec[ii] = scipy.fftpack.fft(tdata,nfft,axis=0)[:nfft//2]
spec[ii] /= np.max(np.abs(spec[ii]))
data[ii] = bandpass(tdata,freqmin,freqmax,int(1/dt),corners=4, zerophase=True)
amax[ii] = np.max(data[ii])
data[ii] /= amax[ii]
except Exception as e:
print(e);continue
if len(ngood)==1:
raise ValueError('seems no substacks have been done! not suitable for this plotting function')
# plotting
tick_inc = 50
fig,ax = plt.subplots(3,sharex=False)
ax[0].matshow(data,cmap='seismic',extent=[-lag,lag,nwin,0],aspect='auto')
ax[0].set_title('%s dist:%5.2f km' % (sfile.split('/')[-1],dist))
ax[0].set_xlabel('time [s]')
ax[0].set_ylabel('wavefroms')
ax[0].set_xticks(t)
ax[0].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[0].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[0].xaxis.set_ticks_position('bottom')
ax[1].matshow(np.abs(spec),cmap='seismic',extent=[freq[0],freq[-1],nwin,0],aspect='auto')
ax[1].set_xlabel('freq [Hz]')
ax[1].set_ylabel('amplitudes')
ax[1].set_yticks(np.arange(0,nwin,step=tick_inc))
ax[1].set_yticklabels(timestamp[0:nwin:tick_inc])
ax[1].xaxis.set_ticks_position('bottom')
ax[2].plot(amax/max(amax),'r-')
ax[2].plot(ngood,'b-')
ax[2].set_xlabel('waveform number')
ax[2].set_xticks(np.arange(0,nwin,nwin//15))
ax[2].legend(['relative amp','ngood'],loc='upper right')
# save figure or just show
if save:
if figdir==None:figdir = sfile.split('.')[0]
if not os.path.ifigdir(figdir):os.mkdir(figdir)
outfname = figdir+'/{0:s}.pdf'.format(sfile.split('/')[-1])
fig.savefig(outfname, format='pdf', dpi=400)
plt.close()
else:
fig.show()
'''
Modified from the plotting functions in the plotting_module of NoisePy (https://github.com/mdenolle/NoisePy).
Credits should be given to the development team for NoisePy (<NAME> and <NAME>).
'''
def plot_xcorr_moveout_heatmap(sfiles,sta,dtype,freq,comp,dist_inc,lag=None,save=False,\
figsize=None,format='png',figdir=None):
'''
display the moveout (2D matrix) of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
sta: station name as the virtual source.
dtype: datatype either 'Allstack_pws' or 'Allstack_linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
comp: cross component
dist_inc: distance bins to stack over
lag: lag times for displaying
save: set True to save the figures (in pdf format)
figdir: diresied directory to save the figure (if not provided, save to default dir)
USAGE:
----------------------
plot_xcorr_moveout_heatmap('temp.h5','sta','Allstack_pws',0.1,0.2,1,'ZZ',200,True,'./temp')
'''
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
if not isinstance(freq[0],list):freq=[freq]
freq=np.array(freq)
figlabels=['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)','(i)']
if freq.shape[0]>9:
raise ValueError('freq includes more than 9 (maximum allowed for now) elements!')
elif freq.shape[0]==9:
subplot=[3,3]
figsize0=[14,7.5]
elif freq.shape[0] >=7 and freq.shape[0] <=8:
subplot=[2,4]
figsize0=[18,10]
elif freq.shape[0] >=5 and freq.shape[0] <=6:
subplot=[2,3]
figsize0=[14,7.5]
elif freq.shape[0] ==4:
subplot=[2,2]
figsize0=[10,6]
else:
subplot=[1,freq.shape[0]]
if freq.shape[0]==3:
figsize0=[13,3]
elif freq.shape[0]==2:
figsize0=[8,3]
else:
figsize0=[4,3]
if figsize is None:figsize=figsize0
path = comp
receiver = sta+'.h5'
stack_method = dtype.split('_')[-1]
# extract common variables
try:
ds = pyasdf.ASDFDataSet(sfiles[0],mpi=False,mode='r')
dt = ds.auxiliary_data[dtype][path].parameters['dt']
maxlag= ds.auxiliary_data[dtype][path].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfiles[0]);sys.exit()
# lags for display
if lag is None:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
t = np.arange(-int(lag),int(lag)+dt,step=(int(2*int(lag)/4)))
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
# cc matrix
nwin = len(sfiles)
data0 = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
dist = np.zeros(nwin,dtype=np.float32)
ngood= np.zeros(nwin,dtype=np.int16)
# load cc and parameter matrix
for ii in range(len(sfiles)):
sfile = sfiles[ii]
treceiver = sfile.split('_')[-1]
ds = pyasdf.ASDFDataSet(sfile,mpi=False,mode='r')
try:
# load data to variables
dist[ii] = ds.auxiliary_data[dtype][path].parameters['dist']
ngood[ii]= ds.auxiliary_data[dtype][path].parameters['ngood']
tdata = ds.auxiliary_data[dtype][path].data[indx1:indx2]
if treceiver == receiver: tdata=np.flip(tdata,axis=0)
except Exception:
print("continue! cannot read %s "%sfile);continue
data0[ii] = tdata
ntrace = int(np.round(np.max(dist)+0.51)/dist_inc)
fig=plt.figure(figsize=figsize)
for f in range(len(freq)):
freqmin=freq[f][0]
freqmax=freq[f][1]
data = np.zeros(shape=(nwin,indx2-indx1),dtype=np.float32)
for i2 in range(data0.shape[0]):
data[i2]=bandpass(data0[i2],freqmin,freqmax,1/dt,corners=4, zerophase=True)
# average cc
ndata = np.zeros(shape=(ntrace,indx2-indx1),dtype=np.float32)
ndist = np.zeros(ntrace,dtype=np.float32)
for td in range(ndata.shape[0]):
tindx = np.where((dist>=td*dist_inc)&(dist<(td+1)*dist_inc))[0]
if len(tindx):
ndata[td] = np.mean(data[tindx],axis=0)
ndist[td] = (td+0.5)*dist_inc
# normalize waveforms
indx = np.where(ndist>0)[0]
ndata = ndata[indx]
ndist = ndist[indx]
for ii in range(ndata.shape[0]):
# print(ii,np.max(np.abs(ndata[ii])))
ndata[ii] /= np.max(np.abs(ndata[ii]))
# plotting figures
ax=fig.add_subplot(subplot[0],subplot[1],f+1)
ax.matshow(ndata,cmap='seismic',extent=[-lag,lag,ndist[-1],ndist[0]],aspect='auto')
ax.set_title('%s %s stack %s %5.3f-%5.2f Hz'%(figlabels[f],sta,stack_method,freqmin,freqmax))
ax.set_xlabel('time [s]')
ax.set_ylabel('distance [km]')
ax.set_xticks(t)
ax.xaxis.set_ticks_position('bottom')
#ax.text(np.ones(len(ndist))*(lag-5),dist[ndist],ngood[ndist],fontsize=8)
plt.tight_layout()
# save figure or show
if save:
outfname = figdir+'/moveout_'+sta+'_heatmap_'+str(stack_method)+'_'+str(dist_inc)+'kmbin_'+comp+'.'+format
plt.savefig(outfname, format=format, dpi=300)
plt.close()
else:
plt.show()
#test functions
def plot_xcorr_moveout_wiggle(sfiles,sta,dtype,freq,ccomp=None,scale=1.0,lag=None,\
ylim=None,save=False,figsize=None,figdir=None,format='png',minsnr=None):
'''
display the moveout waveforms of the cross-correlation functions stacked for all time chuncks.
PARAMETERS:
---------------------
sfile: cross-correlation functions outputed by S2
sta: source station name
dtype: datatype either 'Allstack0pws' or 'Allstack0linear'
freqmin: min frequency to be filtered
freqmax: max frequency to be filtered
ccomp: x-correlation component names, could be a string or a list of strings.
scale: plot the waveforms with scaled amplitudes
lag: lag times for displaying
save: set True to save the figures (in pdf format)
figdir: diresied directory to save the figure (if not provided, save to default dir)
minsnr: mimumum SNR as a QC criterion, the SNR is computed as max(abs(trace))/mean(abs(trace)),
without signal and noise windows.
USAGE:
----------------------
plot_xcorr_moveout_wiggle('temp.h5','Allstack0pws',0.1,0.2,'ZZ',200,True,'./temp')
'''
if not isinstance(freq[0],list):freq=[freq]
freq=np.array(freq)
figlabels=['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)','(i)']
if freq.shape[0]>9:
raise ValueError('freq includes more than 9 (maximum allowed for now) elements!')
elif freq.shape[0]==9:
subplot=[3,3]
figsize0=[14,7.5]
elif freq.shape[0] >=7 and freq.shape[0] <=8:
subplot=[2,4]
figsize0=[18,10]
elif freq.shape[0] >=5 and freq.shape[0] <=6:
subplot=[2,3]
figsize0=[14,7.5]
elif freq.shape[0] ==4:
subplot=[2,2]
figsize0=[10,6]
else:
subplot=[1,freq.shape[0]]
if freq.shape[0]==3:
figsize0=[13,3]
elif freq.shape[0]==2:
figsize0=[8,3]
else:
figsize0=[4,3]
if figsize is None:figsize=figsize0
#
qc=False
if minsnr is not None:
qc=True
# open data for read
if save:
if figdir==None:print('no path selected! save figures in the default path')
receiver = sta+'.h5'
stack_method = dtype.split('_')[-1]
if isinstance(ccomp,str):ccomp=[ccomp]
# extract common variables
try:
ds = pyasdf.ASDFDataSet(sfiles[0],mpi=False,mode='r')
complist=ds.auxiliary_data[dtype].list()
dt = ds.auxiliary_data[dtype][complist[0]].parameters['dt']
maxlag= ds.auxiliary_data[dtype][complist[0]].parameters['maxlag']
except Exception:
print("exit! cannot open %s to read"%sfiles[0]);sys.exit()
if ccomp is None:ccomp=complist
# lags for display
if lag is None:lag=maxlag
if lag>maxlag:raise ValueError('lag excceds maxlag!')
tt = np.arange(-lag,lag+dt,dt)
indx0= int(maxlag/dt) #zero time index
indx1 = int((maxlag-lag)/dt)
indx2 = indx1+2*int(lag/dt)+1
# load cc and parameter matrix
for ic in range(len(ccomp)):
comp = ccomp[ic]
data0 = np.zeros(shape=(len(sfiles),indx2-indx1),dtype=np.float32)
dist = np.zeros(len(sfiles),dtype=np.float32)
snrneg = np.zeros(len(sfiles),dtype=np.float32)
snrpos = np.zeros(len(sfiles),dtype=np.float32)
iflip = np.zeros(len(sfiles),dtype=np.int16)
for ii in range(len(sfiles)):
sfile = sfiles[ii]
iflip[ii] = 0
treceiver = sfile.split('_')[-1]
if treceiver == receiver:
iflip[ii] = 1
ds = pyasdf.ASDFDataSet(sfile,mpi=False,mode='r')
try:
# load data to variables
dist[ii] = ds.auxiliary_data[dtype][comp].parameters['dist']
ngood= ds.auxiliary_data[dtype][comp].parameters['ngood']
data0[ii] = ds.auxiliary_data[dtype][comp].data[indx1:indx2]
if qc:
#get the pseudo-SNR: maximum absolute amplitude/mean absolute amplitude.
dneg=ds.auxiliary_data[dtype][comp].data[indx1:indx0-1]
dpos=ds.auxiliary_data[dtype][comp].data[indx0+1:indx2]
snrneg[ii]=np.max(np.abs(dneg))/np.mean(np.abs(dneg))
snrpos[ii]=np.max(np.abs(dpos))/np.mean(np.abs(dpos))
# print([snrneg,snrpos])
except Exception as e:
print("continue! error working on %s "%sfile);
print(e)
continue
mdist= | np.max(dist) | numpy.max |
#!/usr/bin/env python
## Copyright 2002 by PyMMLib Development Group, http://pymmlib.sourceforge.net/
## This code is part of the PyMMLib distribution and governed by
## its license. Please see the LICENSE_pymmlib file that should have been
## included as part of this package.
"""Symmetry operations as functions on vectors or arrays.
"""
import numpy
## 64 unique rotation matricies
Rot_Z_mY_X = numpy.array([[ 0.0, 0.0, 1.0], [ 0.0,-1.0, 0.0], [ 1.0, 0.0, 0.0]], float)
Rot_Y_mX_mZ = numpy.array([[ 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_XmY_X_mZ = numpy.array([[ 1.0,-1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mX_Y_mZ = numpy.array([[-1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_X_mZ_Y = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0], [ 0.0, 1.0, 0.0]], float)
Rot_Y_mXY_Z = numpy.array([[ 0.0, 1.0, 0.0], [-1.0, 1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_Y_mX_Z = numpy.array([[ 0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_XmY_X_Z = numpy.array([[ 1.0,-1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mX_mXY_mZ = numpy.array([[-1.0, 0.0, 0.0], [-1.0, 1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_Y_Z_X = numpy.array([[ 0.0, 1.0, 0.0], [ 0.0, 0.0, 1.0], [ 1.0, 0.0, 0.0]], float)
Rot_mY_mZ_X = numpy.array([[ 0.0,-1.0, 0.0], [ 0.0, 0.0,-1.0], [ 1.0, 0.0, 0.0]], float)
Rot_X_Z_mY = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0], [ 0.0,-1.0, 0.0]], float)
Rot_XmY_mY_Z = numpy.array([[ 1.0,-1.0, 0.0], [ 0.0,-1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_Y_X_mZ = numpy.array([[ 0.0, 1.0, 0.0], [ 1.0, 0.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_Y_mZ_X = numpy.array([[ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0], [ 1.0, 0.0, 0.0]], float)
Rot_mXY_Y_Z = numpy.array([[-1.0, 1.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mX_mY_mZ = numpy.array([[-1.0, 0.0, 0.0], [ 0.0,-1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_X_Y_mZ = numpy.array([[ 1.0, 0.0, 0.0], [ 0.0, 1.0, 0.0], [ 0.0, 0.0,-1.0]], float)
Rot_mXY_mX_Z = numpy.array([[-1.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [ 0.0, 0.0, 1.0]], float)
Rot_mZ_mY_mX = | numpy.array([[ 0.0, 0.0,-1.0], [ 0.0,-1.0, 0.0], [-1.0, 0.0, 0.0]], float) | numpy.array |
"""
Generate a synthetic set of microsomes with different membrane bound proteins
Input: - Data set parameters:
+ Number of tomograms (one microsome each)
+ Tomogram size
+ Resolution (pixel size)
+ SNR range
+ Missing wedge (semi-angle in degrees or input file)
+ binning factor for segmentation an particle picking
- Microsome parameters:
+ Membrane thickness (in nm)
+ Density model for each protein type inserted
+ Averaged number of particles per microsome and per model
+ Model for protein insertion, available: CSRV, SRPV, 2CCSRV
Output: - Tomograms generated with one microsome each:
+ Full resolution
+ Binned counterpart
- A STAR file pairing the originals and the binned tomograms
"""
################# Package import
import os
import sys
import time
import numpy as np
import scipy as sp
import multiprocessing as mp
from pyorg import disperse_io, sub, spatial
from pyorg.globals import *
###### Global variables
__author__ = '<NAME>'
########################################################################################
# PARAMETERS
########################################################################################
ROOT_PATH = ''
# Output directory
out_dir = ROOT_PATH + ''
out_stem = ''
# Tomogram settings
tm_nt = 10 # tomograms
tm_size = (800, 800, 400) # pixels
tm_res = 0.262 # nm/pixel
tm_snr_rg = (0.01, 0.05)
tm_wedge = 30 # semi-angle in degrees or input file
tm_bin = 2
# Microsome parameters
mc_mbt = 5 # nm
mc_mbs = 1.5 # nm
mc_ip_min_dst = 15 # nm
# By default 1st model has clusters randomly distributed of radius mc_1st_crad and 2nd clusters of size 2*mc_3rd_crad,
# 3rd is CSRV distributed, 4th particles are 2CCSRV placed at an averaged distance of mc_4th_dst,
mc_1st_crad = 50 # nm
mc_c_jump_prob = 0.1
mc_4th_dst = 20 # nm
mc_in_models = ('', '', '', '', '', '', '', '')
mc_avg_nparts = (20, 20, 20, 50, 50, 50, 30, 30)
mc_3sg_nparts = 5
mc_zh = 0
########################################################################################
# ADDITIONAL ROUTINES
########################################################################################
def gen_mask_msome(shape, rad1, rad2):
"""
Generates a microsome mask
:param shape: 3-tuple for the output tomogram
:param rad1: radius 1 for the microsome
:param rad2: radius 2 for the microsome
:return: the generated microme
"""
dx, dy, dz = float(shape[0]), float(shape[1]), float(shape[2])
dx2, dy2, dz2 = math.floor(.5 * dx), math.floor(.5 * dy), math.floor(.5 * dz)
x_l, y_l, z_l = -dx2, -dy2, -dz2
x_h, y_h, z_h = -dx2 + dx, -dy2 + dy, -dz2 + dz
X, Y, Z = np.meshgrid(np.arange(x_l, x_h), np.arange(y_l, y_h), np.arange(z_l, z_h), indexing='xy')
R = X*X + Y*Y + Z*Z
return (R >= (rad1*rad1)) & (R <= (rad2*rad2))
def add_dmb_msome(tomo, rad, res, mb_t, mb_s):
"""
Add a double Gaussian layered membrane of a microsome to a tomogram
:param tomo: tomogram where the membrane is added
:param rad: microsome radius
:param res: tomograms resolution (nm/px)
:param mb_t: membrane thickness in nm
:param mb_s: Gaussian sigma for each layer in nm
:return: None
"""
# Input parsing
t_v, s_v, rad_v = .5 * (mb_t / res), mb_s / res, rad / res
rad1, rad2 = rad_v - t_v, rad_v + t_v
g_cte = 2 * s_v * s_v
s_v_2 = .5 * s_v
g_cte_2 = 2 * s_v_2 * s_v_2
# Getting membrane gray intensity from input tomogram
tomo_bin = tomo > 0
tomo_vals = tomo(tomo_bin)
tomo_mn = tomo_vals.mean()
# Generating the bilayer
dx, dy, dz = float(tomo.shape[0]), float(tomo.shape[1]), float(tomo.shape[2])
dx2, dy2, dz2 = math.floor(.5 * dx), math.floor(.5 * dy), math.floor(.5 * dz)
x_l, y_l, z_l = -dx2, -dy2, -dz2
x_h, y_h, z_h = -dx2 + dx, -dy2 + dy, -dz2 + dz
X, Y, Z = np.meshgrid(np.arange(x_l, x_h), np.arange(y_l, y_h), np.arange(z_l, z_h), indexing='xy')
R = np.sqrt(X * X + Y * Y + Z * Z)
G_u = tomo_mn * np.exp(-(R-rad1)**2 / g_cte)
G_l = tomo_mn * np.exp(-(R-rad2)**2 / g_cte)
# Creating the softmaks for the model structure
BW = sp.ndimage.morphology.distance_transform_edt( | np.invert(tomo_bin) | numpy.invert |
"""
Build Training dataset give orderlist and polynomial type
"""
import numpy as np
from .poly import Hermite, Plain, Legendre
def build_xy(order_list, poly, x, y, xdev=[], dydx=[]):
'''
build large X, Y for linear regression
'''
X = expand_x(order_list, x, poly)
Y = np.array(y).flatten()
if len(xdev) != 0:
Xdev = expand_dev_xy(order_list, xdev, poly)
Dydx = np.array(dydx).flatten()
X = np.vstack((X, Xdev))
Y = np.append(Y, Dydx)
return X, Y
def expand_dev_single(order_list, _x, Poly):
nvar = len(_x)
norder = len(order_list)
no = len(order_list[0])
_XX = np.empty([nvar, norder], dtype=float)
xx = np.empty((norder,), dtype=float)
for i in range(nvar):
xx = np.empty((norder,), dtype=float)
for j in range(norder):
order = order_list[j]
_xx = 0
if order[i] != 0:
_xx = 1
for k in range(no):
o = order[k]
if o != 0:
if k != i:
_xx *= Poly(order=o).evaluate(_x[k])
else:
_xx *= Poly(order=o).der(m=1).evaluate(_x[k])
xx[j] = _xx
_XX[i, :] = xx
return _XX
def expand_x(order_list, x, polytype):
'''
expand x according to orderlist
'''
if polytype == 'Herm':
Poly = Hermite
elif polytype == 'Plain':
Poly = Plain
elif polytype == 'Legd':
Poly = Legendre
else:
raise ValueError("%s is not available" % polytype)
ndata = np.shape(x)[0]
nvar = np.shape(x)[1]
norder = len(order_list)
no = len(order_list[0])
X = np.empty((ndata, norder), dtype=float) # initialize the input matrix X
xx = np.ones((ndata, ), dtype=float)
for i in range(norder):
order = order_list[i]
xx = np.ones((ndata, ), dtype=float)
for j in range(no):
o = order[j]
xx *= Poly(order=o).evaluate(x[:, j])
X[:, i] = xx
return X
def expand_dev_xy(order_list, xdev, polytype):
if polytype == 'Herm':
Poly = Hermite
elif polytype == 'Plain':
Poly = Plain
elif polytype == 'Legd':
Poly = Legendre
else:
raise ValueError("%s is not available" % polytype)
nx = np.shape(xdev)[0]
nvar = np.shape(xdev)[1]
Xfull = [expand_dev_single(order_list, _x, Poly) for _x in xdev]
Xfull = | np.concatenate(Xfull, axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
""" Variation of Information
Variation of Information (*VI*) [Meilla2007]_ is an information theoretic criterion
for comparing two partitions. It is based on the classic notions of entropy and mutual information.
In a nutshell, VI measures the amount of information that is lost or gained in changing from
clustering :math:`A` to clustering :math:`B`. VI is a true metric, is always non-negative and symmetric.
The following formula is used to compute the VI between two groups:
.. math::
VI(A, B) = [H(A) - I(A, B)] + [H(B) - I(A, B)]
Where :math:`H` denotes the entropy computed for each partition separately,
and :math:`I` the mutual information between clusterings :math:`A` and :math:`B`.
The resulting distance score can be adjusted to bound it between :math:`[0, 1]` as follows:
.. math::
VI^{*}(A,B) = \\frac{1}{\\log{n}}VI(A, B)
|
-----
.. [Meilla2007] <NAME>. (2007). Comparing clusterings—an information based distance. Journal of multivariate analysis, 98(5), 873-895.
.. [Dimitriadis2009] <NAME>., <NAME>., <NAME>, Y., & <NAME>. (2009). Characterizing dynamic functional connectivity across sleep stages from EEG. Brain topography, 22(2), 119-133.
.. [Dimitriadis2012] <NAME>., <NAME>., <NAME>., & <NAME>. (2012). An EEG study of brain connectivity dynamics at the resting state. Nonlinear Dynamics-Psychology and Life Sciences, 16(1), 5.
"""
# Author: <NAME> <avra<EMAIL>>
# Author: <NAME> <<EMAIL>>
import numpy as np
from dyconnmap.ts.entropy import entropy
def variation_information(indices_a: np.ndarray, indices_b: np.ndarray) -> float:
""" Variation of Information
Parameters
----------
indices_a : array-like, shape(n_samples)
Symbolic time series.
indices_b : array-like, shape(n_samples)
Symbolic time series.
Returns
-------
vi : float
Variation of information.
"""
n1 = len(indices_a)
n2 = len(indices_b)
if n1 != n2:
pass
entropy1 = entropy(indices_a)
entropy2 = entropy(indices_b)
MI, _ = __mi(indices_a, -entropy1, indices_b, -entropy2)
entropy1 = -entropy1
entropy2 = -entropy2
VI_value = entropy1 + entropy2 - 2 * MI
NVI = VI_value / np.log(n1)
return VI_value, NVI
def __unique_symbols(indices):
"""
"""
N = len(indices)
unique, counts = np.unique(indices, return_counts=True)
len_counts = len(counts)
U = np.zeros((len_counts, N))
indices = indices.flatten()
for i in range(len_counts):
tmp = np.where(indices == unique[i])
U[i, tmp[0]] = 1
return U
def __mi(indices_a, entropy_a, indices_b, entropy_b):
"""
"""
N = len(indices_a)
Ua = __unique_symbols(indices_a)
Ub = __unique_symbols(indices_b)
Sab = Ua.dot(Ub.T) / np.float32(N)
Sa = np.diag(Ua.dot(Ua.T) / np.float32(N))
Sb = np.diag(Ub.dot(Ub.T) / np.float32(N))
# Add dummy dimension (needed for following computations).
Sa = | np.expand_dims(Sa, axis=1) | numpy.expand_dims |
"""
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
from itertools import repeat
from utils.bbox_transform import forward_convert
from utils.bbox_transform import backward_convert
from utils.bbox_transform import get_best_begin_point
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, float, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, float, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, float, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, float, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, float, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, float, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, float, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, float, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def count_elements(seq) -> dict:
"""Tally elements from `seq`."""
hist = {}
for i in seq:
hist[i] = hist.get(i, 0) + 1
return hist
class CSVGenerator(Generator):
"""
Generate data for a custom CSV dataset.
See https://github.com/fizyr/keras-retinanet#csv-datasets for more information.
"""
def __init__(
self,
csv_data_file,
csv_class_file,
base_dir=None,
detect_quadrangle=False,
detect_text=False,
**kwargs
):
"""
Initialize a CSV data generator.
Args
csv_data_file: Path to the CSV annotations file.
csv_class_file: Path to the CSV classes file.
detect_text: if do text detection
base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file).
"""
self.image_names = []
self.image_data = {}
self.base_dir = base_dir
self.detect_quadrangle = detect_quadrangle
self.detect_text = detect_text
# Take base_dir from annotations file if not explicitly specified.
if self.base_dir is None:
if osp.exists(csv_data_file):
self.base_dir = ''
else:
self.base_dir = osp.dirname(csv_data_file)
# parse the provided class file
try:
with _open_for_csv(csv_class_file) as file:
# class_name --> class_id
self.classes = _read_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None)
self.labels = {}
# class_id --> class_name
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, x3, y3, x4, y4, class_name
try:
with _open_for_csv(csv_data_file) as file:
# {'img_path1':[{'x1':xx,'y1':xx,'x2':xx,'y2':xx,'x3':xx,'y3':xx,'x4':xx,'y4':xx, 'class':xx}...],...}
if self.detect_quadrangle:
self.image_data = _read_quadrangle_annotations(csv.reader(file, delimiter=','), self.classes,
self.detect_text)
else:
self.image_data = _read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None)
cls=list(self.classes.keys())
o=dict()
y=dict()
s=[]
for cl in cls:
b=0
p=list()
for k,v in self.image_data.items():
for vv in v:
if vv['class']==cl:
b+=1
p.append(k)
o[cl]=b
y[cl]=p
for k,v in y.items():
y[k]=set(v)
if (o[k])<5000:
new_list=[]
new_list.extend(
repeat(list(y[k]),np.ceil((5000/o[k])).astype(np.int))
)
y[k]=np.array(new_list).reshape(-1)
s.append(count_elements(y[k]))
a={}
for ss in s:
for sss in ss.keys():
if sss in a.keys():
a[sss]=max(a[sss],ss[sss])
else:
a[sss]=ss[sss]
image_list=[]
for k,v in a.items():
image_list.extend(repeat(k,v))
self.image_names = image_list
# self.image_names = list(self.image_data.keys())
super(CSVGenerator, self).__init__(detect_text=detect_text, detect_quadrangle=detect_quadrangle, **kwargs)
def size(self):
"""
Size of the dataset.
"""
return len(self.image_names)
def num_classes(self):
"""
Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def has_label(self, label):
"""
Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
"""
Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
"""
Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
"""
Map label to name.
"""
return self.labels[label]
def image_path(self, image_index):
"""
Returns the image path for image_index.
"""
return osp.join(self.base_dir, self.image_names[image_index])
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
# PIL is fast for metadata
image = Image.open(self.image_path(image_index))
return float(image.width) / float(image.height)
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
image = cv2.imread(self.image_path(image_index))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
path = self.image_names[image_index]
annotations = {'labels': np.empty((0,), dtype=np.int32),
'bboxes': | np.empty((0, 4), dtype=np.float32) | numpy.empty |
import numpy as np
import time as tm
import re
import warnings
from .. import Utils
from discretize import TreeMesh
def read_GOCAD_ts(tsfile):
"""
Read GOCAD triangulated surface (*.ts) file
INPUT:
tsfile: Triangulated surface
OUTPUT:
vrts : Array of vertices in XYZ coordinates [n x 3]
trgl : Array of index for triangles [m x 3]. The order of the vertices
is important and describes the normal
n = cross( (P2 - P1 ) , (P3 - P1) )
Author: @fourndo
.. note::
Remove all attributes from the GoCAD surface before exporting it!
"""
fid = open(tsfile)
line = fid.readline()
# Skip all the lines until the vertices
VRTX, TRGL = [], []
while "END" not in line:
while "VRTX" not in line:
line = fid.readline()
if "END\n" in line:
return VRTX, TRGL
vrtx = []
# Run down all the vertices and save in array
while np.any(["VRTX" in line, "PVRTX" in line]):
l_input = re.split(r"[\s*]", line)
temp = np.array(l_input[2:5])
vrtx.append(temp.astype(np.float))
# Read next line
line = fid.readline()
VRTX += [np.asarray(vrtx)]
# Skip lines to the triangles
while "TRGL" not in line:
line = fid.readline()
# Run down the list of triangles
trgl = []
# Run down all the vertices and save in array
while "TRGL" in line:
l_input = re.split(r"[\s*]", line)
temp = np.array(l_input[1:4])
trgl.append(temp.astype(np.int))
# Read next line
line = fid.readline()
TRGL += [np.asarray(trgl)]
return VRTX, TRGL
def read_GOCAD_pl(plFile):
"""
Read GOCAD polyline file (*.pl)
INPUT:
plFile: Polyline object
OUTPUT:
vrts : List Array of vertices in XYZ coordinates [n x 3]
segs : List Array of index for segments [m x 3]. The order of the vertices
is important and describes the normal
n = cross( (P2 - P1 ) , (P3 - P1) )
Author: @fourndo
.. note::
Remove all attributes from the GoCAD surface before exporting it!
"""
fid = open(plFile)
line = fid.readline()
# Skip all the lines until the vertices
VRTX, SEGS = [], []
while "END" not in line:
while "VRTX" not in line:
line = fid.readline()
vrtx = []
# Run down all the vertices and save in array
while np.any(["VRTX" in line, "PVRTX" in line]):
l_input = re.split(r"[\s*]", line)
temp = np.array(l_input[2:5])
vrtx.append(temp.astype(np.float))
# Read next line
line = fid.readline()
VRTX += [np.asarray(vrtx)]
# Skip lines to the triangles
while "SEG" not in line:
line = fid.readline()
# Run down the list of triangles
segs = []
# Run down all the vertices and save in array
while "SEG" in line:
l_input = re.split(r"[\s*]", line)
temp = np.array(l_input[1:3])
segs.append(temp.astype(np.int))
# Read next line
line = fid.readline()
SEGS += [np.asarray(segs)]
return VRTX, SEGS
def surface2inds(vrtx, trgl, mesh, boundaries=True, internal=True):
""""
Function to read gocad polystructure file and output indexes of
mesh with in the structure.
"""
import vtk
import vtk.util.numpy_support as npsup
# Adjust the index
trgl = trgl - 1
# Make vtk pts
ptsvtk = vtk.vtkPoints()
ptsvtk.SetData(npsup.numpy_to_vtk(vrtx, deep=1))
# Make the polygon connection
polys = vtk.vtkCellArray()
for face in trgl:
poly = vtk.vtkPolygon()
poly.GetPointIds().SetNumberOfIds(len(face))
for nrv, vert in enumerate(face):
poly.GetPointIds().SetId(nrv, vert)
polys.InsertNextCell(poly)
# Make the polydata, structure of connections and vrtx
polyData = vtk.vtkPolyData()
polyData.SetPoints(ptsvtk)
polyData.SetPolys(polys)
# Make implicit func
ImpDistFunc = vtk.vtkImplicitPolyDataDistance()
ImpDistFunc.SetInput(polyData)
# Convert the mesh
vtkMesh = vtk.vtkRectilinearGrid()
vtkMesh.SetDimensions(mesh.nNx, mesh.nNy, mesh.nNz)
vtkMesh.SetXCoordinates(npsup.numpy_to_vtk(mesh.vectorNx, deep=1))
vtkMesh.SetYCoordinates(npsup.numpy_to_vtk(mesh.vectorNy, deep=1))
vtkMesh.SetZCoordinates(npsup.numpy_to_vtk(mesh.vectorNz, deep=1))
# Add indexes
vtkInd = npsup.numpy_to_vtk(np.arange(mesh.nC), deep=1)
vtkInd.SetName("Index")
vtkMesh.GetCellData().AddArray(vtkInd)
extractImpDistRectGridFilt = vtk.vtkExtractGeometry() # Object constructor
extractImpDistRectGridFilt.SetImplicitFunction(ImpDistFunc) #
extractImpDistRectGridFilt.SetInputData(vtkMesh)
if boundaries is True:
extractImpDistRectGridFilt.ExtractBoundaryCellsOn()
else:
extractImpDistRectGridFilt.ExtractBoundaryCellsOff()
if internal is True:
extractImpDistRectGridFilt.ExtractInsideOn()
else:
extractImpDistRectGridFilt.ExtractInsideOff()
print("Extracting indices from grid...")
# Executing the pipe
extractImpDistRectGridFilt.Update()
# Get index inside
insideGrid = extractImpDistRectGridFilt.GetOutput()
insideGrid = npsup.vtk_to_numpy(insideGrid.GetCellData().GetArray("Index"))
# Return the indexes inside
return insideGrid
def download(url, folder=".", overwrite=False, verbose=True):
"""
Function to download all files stored in a cloud directory
:param str url: url or list of urls for the file(s) to be downloaded ("https://...")
:param str folder: folder to where the directory is created and files downloaded (default is the current directory)
:param bool overwrite: overwrite if a file with the specified name already exists
:param bool verbose: print out progress
"""
# Download from cloud
import urllib
import os
import sys
def rename_path(downloadpath):
splitfullpath = downloadpath.split(os.path.sep)
# grab just the filename
fname = splitfullpath[-1]
fnamesplit = fname.split(".")
newname = fnamesplit[0]
# check if we have already re-numbered
newnamesplit = newname.split("(")
# add (num) to the end of the filename
if len(newnamesplit) == 1:
num = 1
else:
num = int(newnamesplit[-1][:-1])
num += 1
newname = "{}({}).{}".format(newnamesplit[0], num, fnamesplit[-1])
return os.path.sep.join(splitfullpath[:-1] + newnamesplit[:-1] + [newname])
# grab the correct url retriever
urlretrieve = urllib.request.urlretrieve
# ensure we are working with absolute paths and home directories dealt with
folder = os.path.abspath(os.path.expanduser(folder))
# make the directory if it doesn't currently exist
if not os.path.exists(folder):
os.makedirs(folder)
if isinstance(url, str):
filenames = [url.split("/")[-1]]
elif isinstance(url, list):
filenames = [u.split("/")[-1] for u in url]
downloadpath = [os.path.sep.join([folder, f]) for f in filenames]
# check if the directory already exists
for i, download in enumerate(downloadpath):
if os.path.exists(download):
if overwrite is True:
if verbose is True:
print(f"overwriting {download}")
elif overwrite is False:
while os.path.exists is True:
download = rename_path(download)
if verbose is True:
print(f"file already exists, new file is called {download}")
downloadpath[i] = download
# download files
urllist = url if isinstance(url, list) else [url]
for u, f in zip(urllist, downloadpath):
print(f"Downloading {u}")
urlretrieve(u, f)
print(" saved to: " + f)
print("Download completed!")
return downloadpath if isinstance(url, list) else downloadpath[0]
def readUBCmagneticsObservations(obs_file):
"""
Read and write UBC mag file format
INPUT:
:param fileName, path to the UBC obs mag file
OUTPUT:
:param survey
:param M, magnetization orentiaton (MI, MD)
"""
from geoapps.simpegPF.PF import BaseMag
fid = open(obs_file)
# First line has the inclination,declination and amplitude of B0
line = fid.readline()
B = np.array(line.split(), dtype=float)
# Second line has the magnetization orientation and a flag
line = fid.readline()
M = np.array(line.split(), dtype=float)
# Third line has the number of rows
line = fid.readline()
ndat = int(line.strip())
# Pre-allocate space for obsx, obsy, obsz, data, uncert
line = fid.readline()
temp = np.array(line.split(), dtype=float)
d = np.zeros(ndat, dtype=float)
wd = np.zeros(ndat, dtype=float)
locXYZ = np.zeros((ndat, 3), dtype=float)
ii = 0
while ii < ndat:
temp = np.array(line.split(), dtype=float)
if len(temp) > 0:
locXYZ[ii, :] = temp[:3]
if len(temp) > 3:
d[ii] = temp[3]
if len(temp) == 5:
wd[ii] = temp[4]
ii += 1
line = fid.readline()
rxLoc = BaseMag.RxObs(locXYZ)
srcField = BaseMag.SrcField([rxLoc], param=(B[2], B[0], B[1]))
survey = BaseMag.LinearSurvey(srcField)
survey.dobs = d
survey.std = wd
return survey, M
def writeUBCmagneticsObservations(filename, survey, d):
"""
writeUBCobs(filename,B,M,rxLoc,d,wd)
Function writing an observation file in UBC-MAG3D format.
INPUT
filename : Name of out file including directory
survey
flag : dobs | dpred
OUTPUT
Obsfile
Created on Dec, 27th 2015
@author: dominiquef
"""
B = survey.srcField.param
rxLoc = survey.srcField.rxList[0].locs
wd = survey.std
if d.ndim == 2:
d = d[0]
data = np.c_[rxLoc, d, wd]
head = (
"{:6.2f} {:6.2f} {:6.2f}\n".format(B[1], B[2], B[0])
+ "{:6.2f} {:6.2f} {:6.2f}\n".format(B[1], B[2], 1)
+ "%i" % len(d)
)
np.savetxt(
filename, data, fmt="%e", delimiter=" ", newline="\n", header=head, comments=""
)
def readUBCgravityObservations(obs_file):
"""
Read UBC grav file format
INPUT:
:param fileName, path to the UBC obs grav file
OUTPUT:
:param survey
"""
from geoapps.simpegPF.PF import BaseGrav
fid = open(obs_file)
# First line has the number of rows
line = fid.readline()
ndat = int(line.split()[0])
# Pre-allocate space for obsx, obsy, obsz, data, uncert
line = fid.readline()
d = np.zeros(ndat, dtype=float)
wd = np.zeros(ndat, dtype=float)
locXYZ = np.zeros((ndat, 3), dtype=float)
ii = 0
while ii < ndat:
temp = np.array(line.split(), dtype=float)
if len(temp) > 0:
locXYZ[ii, :] = temp[:3]
d[ii] = temp[3]
wd[ii] = temp[4]
ii += 1
line = fid.readline()
rxLoc = BaseGrav.RxObs(locXYZ)
srcField = BaseGrav.SrcField([rxLoc])
survey = BaseGrav.LinearSurvey(srcField)
survey.dobs = -d
survey.std = wd
return survey
def writeUBCgravityObservations(filename, survey, d):
"""
Write UBC grav file format
INPUT:
:param: fileName, path to the UBC obs grav file
:param: survey Gravity object
:param: data array
"""
rxLoc = survey.srcField.rxList[0].locs
wd = survey.std
data = np.c_[rxLoc, -d, wd]
head = "%i" % len(d)
np.savetxt(
filename, data, fmt="%e", delimiter=" ", newline="\n", header=head, comments=""
)
def writeVectorUBC(mesh, fileName, model):
"""
Writes a vector model associated with a SimPEG TensorMesh
to a UBC-GIF format model file.
:param string fileName: File to write to
:param numpy.ndarray model: The model
"""
if model.ndim == 1:
# Catch the standard case that the model is (3*nC,1) instead of (nC,3)
model = model.reshape((-1, 3), order="F")
modelMatTR = np.zeros_like(model)
if isinstance(mesh, TreeMesh):
ubc_order = mesh._ubc_order
for ii in range(3):
modelMatTR[:, ii] = model[ubc_order, ii]
else:
for ii in range(3):
# Reshape model to a matrix
modelMat = mesh.r(model[:, ii], "CC", "CC", "M")
# Transpose the axes
modelMatT = modelMat.transpose((2, 0, 1))
# Flip z to positive down
modelMatTR[:, ii] = Utils.mkvc(modelMatT[::-1, :, :])
# Flip Z for UBC file format
modelMatTR[:, 2] *= -1
np.savetxt(fileName, modelMatTR)
def readVectorUBC(mesh, fileName):
"""
Read a vector model associated with a SimPEG TensorMesh
to a UBC-GIF format model file.
:param string fileName: File to write to
:param numpy.ndarray model: The model
"""
# f = open(fileName, 'r')
model = np.loadtxt(fileName)
vModel = np.zeros((mesh.nC, 3))
# f.close()
if isinstance(mesh, TreeMesh):
ubc_order = mesh._ubc_order
for ii in range(3):
vModel[ubc_order, ii] = model[:, ii]
else:
for ii in range(3):
comp = np.reshape(model[:, ii], (mesh.nCz, mesh.nCx, mesh.nCy), order="F")
comp = comp[::-1, :, :]
comp = | np.transpose(comp, (1, 2, 0)) | numpy.transpose |
import numpy as np
import pandas as pd
import skfuzzy as fuzz
from skfuzzy import control as ctrl
## Criando as variáveis do problema
# Antecedentes
design = ctrl.Antecedent( | np.arange(1, 6) | numpy.arange |
# -*- coding: utf-8 -*-
import numpy as np
from . import fast_dawson
import torch
from torch import Tensor
from typing import Tuple
class Param_Container:
"""
args:
_vol_rest: the rest voltage of a neuron
_vol_th: the fire threshold of a neuron
_t_ref: the refractory time of a neuoron after it fired
_conductance: the conductance of a neuron's membrane
_ratio: num Excitation neurons : num Inhibition neurons
degree: from Balanced network, the in-degree of synaptic connection follows Poisson Distribution
with mean and variance K
"""
def __init__(self):
self.ratio = 0.0
self.L = 0.05
self.t_ref = 5.0
self.vol_th = 20.0
self.vol_rest = 0.0
self.eps = 1e-5
self.special_factor = 4 / np.sqrt(2 * np.pi * self.L) * 0.8862269251743827
self.correction_factor = 2 / np.sqrt(2 * self.L)
self.cut_off = 10.0
self.ignore_t_ref = True
self.degree = 100
def get_degree(self):
return self.degree
def set_degree(self, degree):
self.degree = degree
def get_ratio(self):
return self.ratio
def set_ratio(self, ratio):
self.ratio = ratio
def get_t_ref(self):
return self.t_ref
def set_t_ref(self, t_ref):
self.t_ref = t_ref
def get_vol_th(self):
return self.vol_th
def set_vol_th(self, vol_th):
self.vol_th = vol_th
def get_vol_rest(self):
return self.vol_rest
def set_vol_rest(self, vol_rest):
self.vol_rest = vol_rest
def get_conductance(self):
return self.L
def set_conductance(self, conductance):
self.L = conductance
self.correction_factor = 2 / np.sqrt(2 * self.L)
self.special_factor = 4 / np.sqrt(2 * np.pi * self.L) * 0.8862269251743827
def get_special_factor(self):
return self.special_factor
def set_special_factor(self, factor):
self.special_factor = factor
def set_ignore_t_ref(self, flag: bool = True):
self.ignore_t_ref = flag
def is_ignore_t_ref(self):
return self.ignore_t_ref
def get_eps(self):
return self.eps
def set_eps(self, eps):
self.eps = eps
def get_cut_off(self):
return self.cut_off
def set_cut_off(self, cut_off):
self.cut_off = cut_off
def reset_params(self):
self.ratio = 0.0
self.L = 0.05
self.t_ref = 5.0
self.vol_th = 20.0
self.vol_rest = 0.0
self.eps = 1e-5
self.special_factor = 4 / np.sqrt(2 * np.pi * self.L) * 0.8862269251743827
self.correction_factor = 2 / np.sqrt(2 * self.L)
self.cut_off = 10.0
self.ignore_t_ref = True
self.degree = 100
def print_params(self):
print("Voltage threshold:", self.get_vol_th())
print("Voltage rest:", self.get_vol_rest())
print("Refractory time:", self.get_t_ref())
print("Membrane conductance:", self.get_conductance())
print("E-I ratio:", self.get_ratio())
print("eps: ", self.get_eps())
print("cut_off:", self.get_cut_off())
print("degree:", self.get_degree())
class Mnn_Core_Func(Param_Container):
def __init__(self):
super(Mnn_Core_Func, self).__init__()
self.Dawson1 = fast_dawson.Dawson1()
self.Dawson2 = fast_dawson.Dawson2()
# compute the up and low bound of integral
def compute_bound(self, ubar, sbar):
indx0 = sbar > 0
with np.errstate(all="raise"):
ub = (self.vol_th * self.L - ubar) / (np.sqrt(self.L) * sbar + ~indx0)
lb = (self.vol_rest * self.L - ubar) / (sbar * np.sqrt(self.L) + ~indx0)
return ub, lb, indx0
def forward_fast_mean(self, ubar, sbar):
'''Calculates the mean output firing rate given the mean & std of input firing rate'''
# Divide input domain to several regions
indx0 = sbar > 0
indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)
indx2 = indx0 & indx1
mean_out = np.zeros(ubar.shape)
# Region 0 is approx zero for sufficiently large cut_off
# Region 1 is calculate normally
ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))
lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))
temp_mean = 2 / self.L * (self.Dawson1.int_fast(ub) - self.Dawson1.int_fast(lb))
mean_out[indx2] = 1 / (temp_mean + self.t_ref)
# Region 2 is calculated with analytical limit as sbar --> 0
indx3 = np.logical_and(~indx0, ubar <= self.vol_th * self.L)
indx4 = np.logical_and(~indx0, ubar > self.vol_th * self.L)
mean_out[indx3] = 0.0
mean_out[indx4] = 1 / (self.t_ref - 1 / self.L * np.log(1 - 1 / ubar[indx4]))
return mean_out
def backward_fast_mean(self, ubar, sbar, u_a):
indx0 = sbar > 0
indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)
indx2 = indx0 & indx1
grad_uu = np.zeros(ubar.shape) # Fano factor
# Region 0 is approx zero for sufficiently large cut_off
# Region 1 is calculate normally
ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))
lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))
delta_g = self.Dawson1.dawson1(ub) - self.Dawson1.dawson1(lb)
grad_uu[indx2] = u_a[indx2] * u_a[indx2] / sbar[indx2] * delta_g * 2 / self.L / np.sqrt(self.L)
# Region 2 is calculated with analytical limit as sbar --> 0
indx6 = np.logical_and(~indx0, ubar <= 1)
indx4 = np.logical_and(~indx0, ubar > 1)
grad_uu[indx6] = 0.0
grad_uu[indx4] = self.vol_th * u_a[indx4] * u_a[indx4] / ubar[indx4] / (ubar[indx4] - self.vol_th * self.L)
# ---------------
grad_us = np.zeros(ubar.shape)
temp = self.Dawson1.dawson1(ub) * ub - self.Dawson1.dawson1(lb) * lb
grad_us[indx2] = u_a[indx2] * u_a[indx2] / sbar[indx2] * temp * 2 / self.L
return grad_uu, grad_us
def forward_fast_std(self, ubar, sbar, u_a):
'''Calculates the std of output firing rate given the mean & std of input firing rate'''
# Divide input domain to several regions
indx0 = sbar > 0
indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * np.sqrt(self.L) * sbar)
indx2 = indx0 & indx1
fano_factor = np.zeros(ubar.shape) # Fano factor
# Region 0 is approx zero for sufficiently large cut_off
# Region 1 is calculate normally
ub = (self.vol_th * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))
lb = (self.vol_rest * self.L - ubar[indx2]) / (sbar[indx2] * np.sqrt(self.L))
# cached mean used
varT = 8 / self.L / self.L * (self.Dawson2.int_fast(ub) - self.Dawson2.int_fast(lb))
fano_factor[indx2] = varT * u_a[indx2] * u_a[indx2]
# Region 2 is calculated with analytical limit as sbar --> 0
fano_factor[~indx0] = (ubar[~indx0] < 1) + 0.0
std_out = np.sqrt(fano_factor * u_a)
return std_out
def backward_fast_std(self, ubar, sbar, u_a, s_a):
'''Calculates the gradient of the std of the firing rate with respect to the mean & std of input firing rate'''
# Divide input domain to several regions
indx0 = sbar > 0
indx1 = (self.vol_th * self.L - ubar) < (self.cut_off * | np.sqrt(self.L) | numpy.sqrt |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), np.arange(57, 71), np.arange(72, 95)))
onsetelecs = set(['LA1', 'LA2', 'LA3', 'LA4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2', 'LPH3', 'LPH4'])
resectelecs = set(['LALT1', 'LALT2', 'LALT3', 'LALT4', 'LALT5', 'LALT6',
'LAST1', 'LAST2', 'LAST3', 'LAST4',
'LA1', 'LA2', 'LA3', 'LA4', 'LPST4',
'LAH1', 'LAH2', 'LAH3', 'LAH4',
'LPH1', 'LPH2'])
clinresult = 2
elif pat_id == 'pt7':
# [1:17 19:35 37:38 41:62 67:109]
included_indices = np.concatenate((np.arange(0, 17), np.arange(18, 35),
np.arange(36, 38), np.arange(40, 62), np.arange(66, 109)))
onsetelecs = set(['MFP1', 'LFP3',
'PT2', 'PT3', 'PT4', 'PT5',
'MT2', 'MT3',
'AT3', 'AT4',
'G29', 'G30', 'G39', 'G40', 'G45', 'G46'])
resectelecs = set(['G28', 'G29', 'G30', 'G36', 'G37', 'G38', 'G39',
'G41', 'G44', 'G45', 'G46',
'LFP1', 'LFP2', 'LSF3', 'LSF4'])
clinresult = 3
elif pat_id == 'pt8':
# [1:19 21 23 30:37 39:40 43:64 71:76]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 21),
np.arange(
22, 23), np.arange(
29, 37), np.arange(
38, 40),
np.arange(42, 64), np.arange(70, 76)))
onsetelecs = set(['G19', 'G23', 'G29', 'G30', 'G31',
'TO6', 'TO5',
'MST3', 'MST4',
'O8', 'O9'])
resectelecs = set(['G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'MST2', 'MST3', 'MST4', 'PST2', 'PST3', 'PST4'])
clinresult = 1
elif pat_id == 'pt10':
# [1:3 5:19 21:35 48:69]
included_indices = np.concatenate((np.arange(0, 3), np.arange(4, 19),
np.arange(20, 35), np.arange(47, 69)))
onsetelecs = set(['TT1', 'TT2', 'TT4', 'TT6',
'MST1',
'AST2'])
resectelecs = set(['G3', 'G4', 'G5', 'G6', 'G11', 'G12', 'G13', 'G14',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6', 'AST1', 'AST2', 'AST3', 'AST4'])
clinresult = 2
elif pat_id == 'pt11':
# [1:19 21:35 37 39 40 43:74 76:81 83:84]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 35),
np.arange(
36, 37), np.arange(
38, 40), np.arange(
42, 74),
np.arange(75, 81), np.arange(82, 84)))
onsetelecs = set(['RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39',
'RG44', 'RG45'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12', 'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30', 'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
resectelecs = set(['RG4', 'RG5', 'RG6', 'RG7', 'RG12',
'RG13', 'RG14', 'RG15',
'RG21', 'RG22', 'RG23', 'RG29', 'RG30',
'RG31', 'RG37', 'RG38', 'RG39', 'RG45', 'RG46', 'RG47'])
clinresult = 1
elif pat_id == 'pt12':
# [1:15 17:33 38:39 42:61]
included_indices = np.concatenate((np.arange(0, 15), np.arange(16, 33),
np.arange(37, 39), np.arange(41, 61)))
onsetelecs = set(['AST1', 'AST2',
'TT2', 'TT3', 'TT4', 'TT5'])
resectelecs = set(['G19', 'G20', 'G21', 'G22', 'G23', 'G27', 'G28', 'G29', 'G30', 'G31',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 2
elif pat_id == 'pt13':
# [1:36 39:40 43:66 69:74 77 79:94 96:103 105:130]
included_indices = np.concatenate((np.arange(0, 36), np.arange(38, 40),
np.arange(
42, 66), np.arange(
68, 74), np.arange(
76, 77),
np.arange(78, 94), np.arange(95, 103), np.arange(104, 130)))
onsetelecs = set(['G1', 'G2', 'G9', 'G10', 'G17', 'G18'])
resectelecs = set(['G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11',
'G17', 'G18', 'G19',
'AP2', 'AP3', 'AP4'])
clinresult = 1
elif pat_id == 'pt14':
# [1:19 21:37 41:42 45:61 68:78]
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 10),
np.arange(
11, 17), np.arange(
18, 19), np.arange(
20, 37),
np.arange(40, 42), np.arange(44, 61), np.arange(67, 78)))
onsetelecs = set(['MST1', 'MST2',
'TT1', 'TT2', 'TT3',
'AST1', 'AST2'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'AST1', 'AST2',
'MST1', 'MST2', 'PST1'])
clinresult = 4
elif pat_id == 'pt15':
# [2:7 9:30 32:36 41:42 45:47 49:66 69 71:85];
included_indices = np.concatenate((np.arange(1, 7), np.arange(8, 30),
np.arange(
31, 36), np.arange(
40, 42), np.arange(
44, 47),
np.arange(48, 66), np.arange(68, 69), np.arange(70, 85)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4',
'MST1', 'MST2', 'AST1', 'AST2', 'AST3'])
resectelecs = set(['G2', 'G3', 'G4', 'G5', 'G10', 'G11', 'G12', 'G13',
'TT1', 'TT2', 'TT3', 'TT4', 'TT5',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt16':
# [1:19 21:37 42:43 46:53]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 53)))
onsetelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST3', 'MST4',
'G26', 'G27', 'G28', 'G18', 'G19', 'G20', 'OF4'])
resectelecs = set(['G18', 'G19', 'G20', 'G26', 'G27', 'G28',
'G29', 'G30', 'TT1', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'
])
clinresult = 1
elif pat_id == 'pt17':
# [1:19 21:37 42:43 46:51]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 51)))
onsetelecs = set(['TT1', 'TT2'])
resectelecs = set(['G27', 'G28', 'G29', 'G30',
'TT', 'TT2', 'TT3', 'TT4', 'TT5', 'TT6',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
return included_indices, onsetelecs, clinresult
def returnlaindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
spreadelecs = None
if pat_id == 'la01':
# [1 3 7:8 11:13 17:19 22:26 32 34:35 37 42 50:55 58 ...
# 62:65 70:72 77:81 84:97 100:102 105:107 110:114 120:121 130:131];
# onset_electrodes = {'Y''1', 'X''4', ...
# 'T''5', 'T''6', 'O''1', 'O''2', 'B1', 'B2',...% rare onsets
# }
included_indices = np.concatenate((np.arange(0, 3), np.arange(6, 8), np.arange(10, 13),
np.arange(
16, 19), np.arange(
21, 26), np.arange(
31, 32),
np.arange(
33, 35), np.arange(
36, 37), np.arange(
41, 42),
np.arange(
49, 55), np.arange(
57, 58), np.arange(
61, 65),
np.arange(
69, 72), np.arange(
76, 81), np.arange(
83, 97),
np.arange(
99, 102), np.arange(
104, 107), np.arange(
109, 114),
np.arange(119, 121), np.arange(129, 131)))
onsetelecs = ["X'4", "T'5", "T'6", "O'1", "O'2", "B1", "B2"]
spreadelecs = ["P1", "P2", 'P6', "X1", "X8", "X9", "E'2", "E'3"
"T'1"]
if seiz_id == 'inter2':
included_indices = np.concatenate((np.arange(0, 1), np.arange(7, 16), np.arange(21, 28),
np.arange(
33, 36), np.arange(
39, 40), np.arange(
42, 44), np.arange(
46, 50),
np.arange(
56, 58), np.arange(
62, 65), np.arange(
66, 68), np.arange(
69, 75),
np.arange(76, 83), np.arange(85, 89), np.arange(96, 103),
np.arange(106, 109), np.arange(111, 115), np.arange(116, 117),
np.arange(119, 123), np.arange(126, 127), np.arange(130, 134),
np.arange(136, 137), np.arange(138, 144), np.arange(146, 153)))
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19), np.arange(20, 33),
np.arange(
34, 37), np.arange(
38, 40), np.arange(
42, 98),
np.arange(107, 136), np.arange(138, 158)))
onsetelecs = ["Y'1"]
clinresult = 1
elif pat_id == 'la02':
# [1:4 7 9 11:12 15:18 21:28 30:34 47 50:62 64:67 ...
# 70:73 79:87 90 95:99]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 7), np.arange(8, 9),
np.arange(
10, 12), np.arange(
14, 18), np.arange(
20, 28),
np.arange(
29, 34), np.arange(
46, 47), np.arange(
49, 62),
np.arange(
63, 67), np.arange(
69, 73), np.arange(
78, 87),
np.arange(89, 90), np.arange(94, 99)))
onsetelecs = ["L'2", "L'3", "L'4"]
clinresult = 1
elif pat_id == 'la03':
# [1:3 6:33 36:68 77:163]
included_indices = np.concatenate((np.arange(0, 3), np.arange(5, 33),
np.arange(35, 68), np.arange(76, 163)))
onsetelecs = ["L7"]
clinresult = 2
elif pat_id == 'la04':
# [1:4 9:13 15:17 22 24:32 44:47 52:58 60 63:64 ...
# 67:70 72:74 77:84 88:91 94:96 98:101 109:111 114:116 121 123:129];
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 13),
np.arange(
14, 17), np.arange(
21, 22), np.arange(
23, 32),
np.arange(43, 47), np.arange(51, 58), np.arange(59, 60),
np.arange(62, 64), np.arange(66, 70), np.arange(71, 74),
np.arange(76, 84), np.arange(87, 91), np.arange(93, 96),
np.arange(97, 101), np.arange(108, 111), np.arange(113, 116),
np.arange(120, 121), np.arange(122, 129)))
# FIRST ABLATION WAS A FAILURE
onsetelecs = ["L'4", "G'1", # 2ND RESECTION REMOVED ALL OF M' ELECTRODES
"M'1", "M'2", "M'3", "M'4", "M'5", "M'6", "M'7",
"M'8", "M'9", "M'10", "M'11", "M'12", "M'13", "M'14", "M'15", "M'16"]
clinresult = 2
elif pat_id == 'la05':
# [2:4 7:15 21:39 42:82 85:89 96:101 103:114 116:121 ...
# 126:145 147:152 154:157 160:161 165:180 182:191];
included_indices = np.concatenate((np.arange(1, 4), np.arange(6, 15),
np.arange(
20, 39), np.arange(
41, 82), np.arange(
84, 89),
np.arange(95, 101), np.arange(102, 114), np.arange(115, 121),
np.arange(125, 145), np.arange(146, 152), np.arange(153, 157),
np.arange(159, 161), np.arange(164, 180), np.arange(181, 191)))
onsetelecs = ["T'1", "T'2", "D'1", "D'2"]
clinresult = 1
elif pat_id == 'la06':
# [1:4 7:12 14:17 19 21:33 37 46:47 50:58 61:62 70:73 77:82 ...
# 84:102 104:112 114:119];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
13, 17), np.arange(
18, 19), np.arange(
20, 33),
np.arange(36, 37), np.arange(45, 47), np.arange(49, 58),
np.arange(60, 62), np.arange(69, 73), np.arange(76, 82),
np.arange(83, 102), np.arange(103, 112), np.arange(113, 119)))
onsetelecs = ["Q'3", "Q'4", "R'3", "R'4"]
clinresult = 2
elif pat_id == 'la07':
# [1:18 22:23 25 34:37 44 48:51 54:55 57:69 65:66 68:78 ...
# 82:83 85:93 96:107 114:120];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 18), np.arange(21, 23),
np.arange(
24, 25), np.arange(
33, 37), np.arange(
43, 44),
np.arange(47, 51), np.arange(53, 55), np.arange(56, 69),
np.arange(64, 66), np.arange(67, 78), np.arange(81, 83),
np.arange(84, 93), np.arange(95, 107), np.arange(113, 120)))
onsetelecs = ["T'1", "T'3", "R'8", "R'9"]
clinresult = 1
elif pat_id == 'la08':
# [1:2 8:13 15:19 22 25 27:30 34:35 46:48 50:57 ...
# 65:68 70:72 76:78 80:84 87:93 100:102 105:108 110:117 123:127 130:131 133:137 ...
# 140:146]
included_indices = np.concatenate((np.arange(0, 2), np.arange(7, 13),
np.arange(
14, 19), np.arange(
21, 22), np.arange(
24, 25),
np.arange(26, 30), np.arange(33, 35), np.arange(45, 48),
np.arange(49, 57), np.arange(64, 68), np.arange(69, 72),
np.arange(75, 78), np.arange(79, 84), np.arange(86, 93),
np.arange(99, 102), np.arange(104, 108), np.arange(109, 117),
np.arange(122, 127), np.arange(129, 131), np.arange(132, 137),
np.arange(139, 146)))
onsetelecs = ["Q2"]
clinresult = 2
elif pat_id == 'la09':
# [3:4 7:17 21:28 33:38 42:47 51:56 58:62 64:69 ...
# 73:80 82:84 88:92 95:103 107:121 123 126:146 150:161 164:169 179:181 ...
# 183:185 187:191]
# 2/7/18 - got rid of F10 = looking at edf was super noisy
included_indices = np.concatenate((np.arange(2, 3), np.arange(6, 17),
np.arange(
20, 28), np.arange(
32, 38), np.arange(
41, 47),
np.arange(
50, 56), np.arange(
57, 62), np.arange(
63, 66), np.arange(
67, 69),
np.arange(72, 80), np.arange(81, 84), np.arange(87, 92),
np.arange(94, 103), np.arange(106, 121), np.arange(122, 123),
np.arange(125, 146), np.arange(149, 161), np.arange(163, 169),
np.arange(178, 181), np.arange(182, 185), np.arange(186, 191)))
onsetelecs = ["X'1", "X'2", "X'3", "X'4", "U'1", "U'2"]
if seiz_id == 'ictal2':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(20, 39), np.arange(41, 189)))
onsetelecs = ["P'1", "P'2"]
clinresult = 2
elif pat_id == 'la10':
# [1:4 7:13 17:19 23:32 36:37 46:47 50 54:59 62:66 68:79 82:96 ...
# 99:106 108:113 117:127 135:159 163:169 172:173 176:179 181:185];
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 13),
np.arange(
16, 19), np.arange(
22, 32), np.arange(
35, 37),
np.arange(45, 47), np.arange(49, 50), np.arange(53, 59),
np.arange(61, 66), np.arange(67, 79), np.arange(81, 96),
np.arange(98, 106), np.arange(107, 113), np.arange(116, 127),
np.arange(134, 159), np.arange(162, 169), np.arange(171, 173),
np.arange(175, 179), np.arange(180, 185)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 2
elif pat_id == 'la11':
# [3:4 7:16 22:30 33:39 42 44:49 53:62 64:87 91:100 ...
# 102:117 120:127 131:140 142:191];
included_indices = np.concatenate((np.arange(2, 4), np.arange(6, 16),
np.arange(
21, 30), np.arange(
32, 39), np.arange(
41, 42), np.arange(
43, 49),
np.arange(
52, 62), np.arange(
63, 87), np.arange(
90, 100), np.arange(
101, 117),
np.arange(119, 127), np.arange(130, 140), np.arange(141, 191)))
onsetelecs = ["D6", "Z10"]
clinresult = 2
elif pat_id == 'la12':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 15),
np.arange(
19, 23), np.arange(
24, 31), np.arange(
34, 36), np.arange(
42, 44), np.arange(
47, 48),
np.arange(
49, 59), np.arange(
61, 66), np.arange(
68, 86), np.arange(
87, 90),
np.arange(
91, 100), np.arange(
101, 119), np.arange(
121, 129), np.arange(
131, 134),
np.arange(136, 150), np.arange(153, 154), np.arange(156, 161),
np.arange(167, 178), np.arange(187, 191)))
onsetelecs = ["S1", "S2", "R2", "R3"]
clinresult = 3
elif pat_id == 'la13':
# [1:4 7:12 23:33 36:37 44:45 48:70 72:93]
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 12),
np.arange(
22, 33), np.arange(
35, 37), np.arange(
43, 45),
np.arange(47, 70), np.arange(71, 93)))
onsetelecs = ["Y13", "Y14"]
clinresult = 2
elif pat_id == 'la15':
# included_channels = [1:4 9:12 15:19 21:27 30:34 36:38 43:57 62:66 ...
# 68:71 76:85 89:106 108:112 114:115 118:124 127:132 135:158 ...
# 161:169 171:186]
included_indices = np.concatenate((np.arange(0, 4), np.arange(8, 12),
np.arange(
14, 19), np.arange(
20, 27), np.arange(
29, 34),
np.arange(35, 38), np.arange(42, 57), np.arange(61, 66),
np.arange(67, 71), np.arange(75, 85), np.arange(88, 106),
np.arange(107, 112), np.arange(113, 115), np.arange(117, 124),
np.arange(126, 132), np.arange(134, 158), np.arange(160, 169), np.arange(170, 186)))
if seiz_id == 'ictal':
included_indices = np.concatenate((np.arange(0, 4), np.arange(6, 19),
np.arange(
20, 39), np.arange(
41, 95), np.arange(
96, 112),
np.arange(113, 132), np.arange(134, 187)))
onsetelecs = ["R1", "R2", "R3"]
clinresult = 4
elif pat_id == 'la16':
# [1:3 10:16 23:24 28 31:35 37:39 42:44 46:47 ...
# 49:54 58:62 64:65 68:70 76:89 93:98 100:101 105:124 126 128:130 ...
# 132:134 136:140 142:144 149:156 158:163 165:166 168:170 173:181
# 183:189];
included_indices = np.concatenate((np.arange(0, 3), np.arange(9, 16),
np.arange(
22, 24), np.arange(
27, 28), np.arange(
30, 35),
np.arange(36, 39), np.arange(41, 44), np.arange(45, 47),
np.arange(48, 54), np.arange(57, 62), np.arange(63, 65),
| np.arange(67, 70) | numpy.arange |
# - * - coding: utf-8 - * -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
from ..signal import signal_smooth
from ..signal import signal_zerocrossings
def ecg_findpeaks(ecg_cleaned, sampling_rate=1000, method="neurokit", show=False):
"""Find R-peaks in an ECG signal.
Low-level function used by `ecg_peaks()` to identify R-peaks in an ECG signal using a different set of algorithms. See `ecg_peaks()` for details.
Parameters
----------
ecg_cleaned : list, array or Series
The cleaned ECG channel as returned by `ecg_clean()`.
sampling_rate : int
The sampling frequency of `ecg_signal` (in Hz, i.e., samples/second).
Defaults to 1000.
method : string
The algorithm to be used for R-peak detection. Can be one of 'neurokit' (default),
'pamtompkins1985', 'hamilton2002', 'christov2004', 'gamboa2008', 'elgendi2010', 'engzeemod2012', 'kalidas2017', 'martinez2003' or 'rodrigues2020'.
show : bool
If True, will return a plot to visualizing the thresholds used in the
algorithm. Useful for debugging.
Returns
-------
info : dict
A dictionary containing additional information, in this case the
samples at which R-peaks occur, accessible with the key "ECG_R_Peaks".
See Also
--------
ecg_clean, signal_fixpeaks, ecg_peaks, ecg_rate, ecg_process, ecg_plot
Examples
--------
>>> import neurokit2 as nk
>>>
>>> ecg = nk.ecg_simulate(duration=10, sampling_rate=1000)
>>> cleaned = nk.ecg_clean(ecg, sampling_rate=1000)
>>> info = nk.ecg_findpeaks(cleaned)
>>> nk.events_plot(info["ECG_R_Peaks"], cleaned)
>>>
>>> # Different methods
>>> neurokit = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="neurokit"), method="neurokit")
>>> pantompkins1985 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="pantompkins1985"), method="pantompkins1985")
>>> hamilton2002 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="hamilton2002"), method="hamilton2002")
>>> christov2004 = nk.ecg_findpeaks(cleaned, method="christov2004")
>>> gamboa2008 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="gamboa2008"), method="gamboa2008")
>>> elgendi2010 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="elgendi2010"), method="elgendi2010")
>>> engzeemod2012 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="engzeemod2012"), method="engzeemod2012")
>>> kalidas2017 = nk.ecg_findpeaks(nk.ecg_clean(ecg, method="kalidas2017"), method="kalidas2017")
>>> martinez2003 = nk.ecg_findpeaks(cleaned, method="martinez2003")
>>>
>>> # Visualize
>>> nk.events_plot([neurokit["ECG_R_Peaks"],
pantompkins1985["ECG_R_Peaks"],
hamilton2002["ECG_R_Peaks"],
christov2004["ECG_R_Peaks"],
gamboa2008["ECG_R_Peaks"],
elgendi2010["ECG_R_Peaks"],
engzeemod2012["ECG_R_Peaks"],
kalidas2017["ECG_R_Peaks"]],
martinez2003["ECG_R_Peaks"]], cleaned)
References
--------------
- <NAME>. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology. PhD ThesisUniversidade.
- <NAME>, <NAME>, <NAME>, and <NAME>. An open-source algorithm to detect onset of arterial blood pressure pulses. In Computers in Cardiology, 2003, pages 259–262, 2003.
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
- <NAME> and <NAME>. A Real-Time QRS Detection Algorithm. In: IEEE Transactions on Biomedical Engineering BME-32.3 (1985), pp. 230–236.
- <NAME>, A single scan algorithm for QRS detection and feature extraction, IEEE Comp. in Cardiology, vol. 6, pp. 37-42, 1979
- <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Real Time Electrocardiogram Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
# Try retrieving right column
if isinstance(ecg_cleaned, pd.DataFrame):
try:
ecg_cleaned = ecg_cleaned["ECG_Clean"]
except NameError:
try:
ecg_cleaned = ecg_cleaned["ECG_Raw"]
except NameError:
ecg_cleaned = ecg_cleaned["ECG"]
method = method.lower() # remove capitalised letters
# Run peak detection algorithm
if method in ["nk", "nk2", "neurokit", "neurokit2"]:
rpeaks = _ecg_findpeaks_neurokit(ecg_cleaned, sampling_rate,
show=show)
elif method in ["pantompkins", "pantompkins1985"]:
rpeaks = _ecg_findpeaks_pantompkins(ecg_cleaned, sampling_rate)
elif method in ["gamboa2008", "gamboa"]:
rpeaks = _ecg_findpeaks_gamboa(ecg_cleaned, sampling_rate)
elif method in ["ssf", "slopesumfunction", "zong", "zong2003"]:
rpeaks = _ecg_findpeaks_ssf(ecg_cleaned, sampling_rate)
elif method in ["hamilton", "hamilton2002"]:
rpeaks = _ecg_findpeaks_hamilton(ecg_cleaned, sampling_rate)
elif method in ["christov", "christov2004"]:
rpeaks = _ecg_findpeaks_christov(ecg_cleaned, sampling_rate)
elif method in ["engzee", "engzee2012", "engzeemod", "engzeemod2012"]:
rpeaks = _ecg_findpeaks_engzee(ecg_cleaned, sampling_rate)
elif method in ["elgendi", "elgendi2010"]:
rpeaks = _ecg_findpeaks_elgendi(ecg_cleaned, sampling_rate)
elif method in ["kalidas2017", "swt", "kalidas", "kalidastamil", "kalidastamil2017"]:
rpeaks = _ecg_findpeaks_kalidas(ecg_cleaned, sampling_rate)
elif method in ["martinez2003", "martinez"]:
rpeaks = _ecg_findpeaks_WT(ecg_cleaned, sampling_rate)
elif method in ["rodrigues2020", "rodrigues", "asi"]:
rpeaks = _ecg_findpeaks_rodrigues(ecg_cleaned, sampling_rate)
else:
raise ValueError("NeuroKit error: ecg_findpeaks(): 'method' should be "
"one of 'neurokit' or 'pamtompkins'.")
# Prepare output.
info = {"ECG_R_Peaks": rpeaks}
return info
# =============================================================================
# NeuroKit
# =============================================================================
def _ecg_findpeaks_neurokit(signal, sampling_rate=1000, smoothwindow=.1, avgwindow=.75,
gradthreshweight=1.5, minlenweight=0.4, mindelay=0.3,
show=False):
"""
All tune-able parameters are specified as keyword arguments. The `signal`
must be the highpass-filtered raw ECG with a lowcut of .5 Hz.
"""
if show is True:
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, sharex=True)
# Compute the ECG's gradient as well as the gradient threshold. Run with
# show=True in order to get an idea of the threshold.
grad = np.gradient(signal)
absgrad = np.abs(grad)
smooth_kernel = int(np.rint(smoothwindow * sampling_rate))
avg_kernel = int(np.rint(avgwindow * sampling_rate))
smoothgrad = signal_smooth(absgrad, kernel="boxcar", size=smooth_kernel)
avggrad = signal_smooth(smoothgrad, kernel="boxcar", size=avg_kernel)
gradthreshold = gradthreshweight * avggrad
mindelay = int(np.rint(sampling_rate * mindelay))
if show is True:
ax1.plot(signal)
ax2.plot(smoothgrad)
ax2.plot(gradthreshold)
# Identify start and end of QRS complexes.
qrs = smoothgrad > gradthreshold
beg_qrs = np.where(np.logical_and(np.logical_not(qrs[0:-1]), qrs[1:]))[0]
end_qrs = np.where(np.logical_and(qrs[0:-1], np.logical_not(qrs[1:])))[0]
# Throw out QRS-ends that precede first QRS-start.
end_qrs = end_qrs[end_qrs > beg_qrs[0]]
# Identify R-peaks within QRS (ignore QRS that are too short).
num_qrs = min(beg_qrs.size, end_qrs.size)
min_len = np.mean(end_qrs[:num_qrs] - beg_qrs[:num_qrs]) * minlenweight
peaks = [0]
for i in range(num_qrs):
beg = beg_qrs[i]
end = end_qrs[i]
len_qrs = end - beg
if len_qrs < min_len:
continue
if show is True:
ax2.axvspan(beg, end, facecolor="m", alpha=0.5)
# Find local maxima and their prominence within QRS.
data = signal[beg:end]
locmax, props = scipy.signal.find_peaks(data, prominence=(None, None))
if locmax.size > 0:
# Identify most prominent local maximum.
peak = beg + locmax[np.argmax(props["prominences"])]
# Enforce minimum delay between peaks.
if peak - peaks[-1] > mindelay:
peaks.append(peak)
peaks.pop(0)
if show is True:
ax1.scatter(peaks, signal[peaks], c="r")
peaks = np.asarray(peaks).astype(int) # Convert to int
return peaks
# =============================================================================
# Pan & Tompkins (1985)
# =============================================================================
def _ecg_findpeaks_pantompkins(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME> and <NAME>. A Real-Time QRS Detection Algorithm.
In: IEEE Transactions on Biomedical Engineering BME-32.3 (1985), pp. 230–236.
"""
diff = np.diff(signal)
squared = diff * diff
N = int(0.12 * sampling_rate)
mwa = _ecg_findpeaks_MWA(squared, N)
mwa[:int(0.2 * sampling_rate)] = 0
mwa_peaks = _ecg_findpeaks_peakdetect(mwa, sampling_rate)
mwa_peaks = np.array(mwa_peaks, dtype='int')
return mwa_peaks
# =============================================================================
# Hamilton (2002)
# =============================================================================
def _ecg_findpeaks_hamilton(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- Hamilton, Open Source ECG Analysis Software Documentation, E.P.Limited, 2002.
"""
diff = abs(np.diff(signal))
b = np.ones(int(0.08 * sampling_rate))
b = b/int(0.08 * sampling_rate)
a = [1]
ma = scipy.signal.lfilter(b, a, diff)
ma[0:len(b) * 2] = 0
n_pks = []
n_pks_ave = 0.0
s_pks = []
s_pks_ave = 0.0
QRS = [0]
RR = []
RR_ave = 0.0
th = 0.0
i = 0
idx = []
peaks = []
for i in range(len(ma)):
if i > 0 and i < len(ma) - 1:
if ma[i-1] < ma[i] and ma[i + 1] < ma[i]:
peak = i
peaks.append(i)
if ma[peak] > th and (peak-QRS[-1]) > 0.3 * sampling_rate:
QRS.append(peak)
idx.append(i)
s_pks.append(ma[peak])
if len(n_pks) > 8:
s_pks.pop(0)
s_pks_ave = np.mean(s_pks)
if RR_ave != 0.0:
if QRS[-1]-QRS[-2] > 1.5 * RR_ave:
missed_peaks = peaks[idx[-2] + 1:idx[-1]]
for missed_peak in missed_peaks:
if missed_peak - peaks[idx[-2]] > int(0.360 * sampling_rate) and ma[missed_peak] > 0.5 * th:
QRS.append(missed_peak)
QRS.sort()
break
if len(QRS) > 2:
RR.append(QRS[-1]-QRS[-2])
if len(RR) > 8:
RR.pop(0)
RR_ave = int(np.mean(RR))
else:
n_pks.append(ma[peak])
if len(n_pks) > 8:
n_pks.pop(0)
n_pks_ave = np.mean(n_pks)
th = n_pks_ave + 0.45 * (s_pks_ave-n_pks_ave)
i += 1
QRS.pop(0)
QRS = np.array(QRS, dtype='int')
return QRS
# =============================================================================
# Slope Sum Function (SSF) - Zong et al. (2003)
# =============================================================================
def _ecg_findpeaks_ssf(signal, sampling_rate=1000, threshold=20, before=0.03, after=0.01):
"""
From https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L448
- <NAME>, <NAME>, <NAME>, and <NAME>. An open-source algorithm to detect onset of arterial blood pressure pulses. In Computers in
Cardiology, 2003, pages 259–262, 2003.
"""
# TODO: Doesn't really seems to work
# convert to samples
winB = int(before * sampling_rate)
winA = int(after * sampling_rate)
Rset = set()
length = len(signal)
# diff
dx = np.diff(signal)
dx[dx >= 0] = 0
dx = dx ** 2
# detection
idx, = np.nonzero(dx > threshold)
idx0 = np.hstack(([0], idx))
didx = np.diff(idx0)
# search
sidx = idx[didx > 1]
for item in sidx:
a = item - winB
if a < 0:
a = 0
b = item + winA
if b > length:
continue
r = np.argmax(signal[a:b]) + a
Rset.add(r)
# output
rpeaks = list(Rset)
rpeaks.sort()
rpeaks = np.array(rpeaks, dtype='int')
return rpeaks
# =============================================================================
# Christov (2004)
# =============================================================================
def _ecg_findpeaks_christov(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME>, Real time electrocardiogram QRS detection using combined adaptive threshold, BioMedical Engineering OnLine 2004, vol. 3:28, 2004.
"""
total_taps = 0
b = np.ones(int(0.02 * sampling_rate))
b = b/int(0.02 * sampling_rate)
total_taps += len(b)
a = [1]
MA1 = scipy.signal.lfilter(b, a, signal)
b = np.ones(int(0.028 * sampling_rate))
b = b/int(0.028 * sampling_rate)
total_taps += len(b)
a = [1]
MA2 = scipy.signal.lfilter(b, a, MA1)
Y = []
for i in range(1, len(MA2)-1):
diff = abs(MA2[i + 1]-MA2[i-1])
Y.append(diff)
b = np.ones(int(0.040 * sampling_rate))
b = b/int(0.040 * sampling_rate)
total_taps += len(b)
a = [1]
MA3 = scipy.signal.lfilter(b, a, Y)
MA3[0:total_taps] = 0
ms50 = int(0.05 * sampling_rate)
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms350 = int(0.35 * sampling_rate)
M = 0
newM5 = 0
M_list = []
MM = []
M_slope = np.linspace(1.0, 0.6, ms1200-ms200)
F = 0
F_list = []
R = 0
RR = []
Rm = 0
R_list = []
MFR = 0
MFR_list = []
QRS = []
for i in range(len(MA3)):
# M
if i < 5 * sampling_rate:
M = 0.6 * np.max(MA3[:i + 1])
MM.append(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * np.max(MA3[QRS[-1]:i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif QRS and i == QRS[-1] + ms200:
if newM5 == 0:
newM5 = MM[-1]
MM.append(newM5)
if len(MM) > 5:
MM.pop(0)
M = np.mean(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = np.mean(MM) * M_slope[i-(QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * np.mean(MM)
# F
if i > ms350:
F_section = MA3[i-ms350:i]
max_latest = np.max(F_section[-ms50:])
max_earliest = np.max(F_section[:ms50])
F = F + ((max_latest-max_earliest)/150.0)
# R
if QRS and i < QRS[-1] + int((2.0/3.0 * Rm)):
R = 0
elif QRS and i > QRS[-1] + int((2.0/3.0 * Rm)) and i < QRS[-1] + Rm:
dec = (M-np.mean(MM))/1.4
R = 0 + dec
MFR = M + F + R
M_list.append(M)
F_list.append(F)
R_list.append(R)
MFR_list.append(MFR)
if not QRS and MA3[i] > MFR:
QRS.append(i)
elif QRS and i > QRS[-1] + ms200 and MA3[i] > MFR:
QRS.append(i)
if len(QRS) > 2:
RR.append(QRS[-1] - QRS[-2])
if len(RR) > 5:
RR.pop(0)
Rm = int(np.mean(RR))
QRS.pop(0)
QRS = np.array(QRS, dtype='int')
return QRS
# =============================================================================
# Gamboa (2008)
# =============================================================================
def _ecg_findpeaks_gamboa(signal, sampling_rate=1000, tol=0.002):
"""
From https://github.com/PIA-Group/BioSPPy/blob/e65da30f6379852ecb98f8e2e0c9b4b5175416c3/biosppy/signals/ecg.py#L834
- <NAME>. (2008). Multi-modal behavioral biometrics based on hci and electrophysiology. PhD ThesisUniversidade.
"""
# convert to samples
v_100ms = int(0.1 * sampling_rate)
v_300ms = int(0.3 * sampling_rate)
hist, edges = np.histogram(signal, 100, density=True)
TH = 0.01
F = np.cumsum(hist)
v0 = edges[np.nonzero(F > TH)[0][0]]
v1 = edges[np.nonzero(F < (1 - TH))[0][-1]]
nrm = max([abs(v0), abs(v1)])
norm_signal = signal / float(nrm)
d2 = np.diff(norm_signal, 2)
b = np.nonzero((np.diff(np.sign(np.diff(-d2)))) == -2)[0] + 2
b = np.intersect1d(b, np.nonzero(-d2 > tol)[0])
if len(b) < 3:
rpeaks = []
else:
b = b.astype('float')
rpeaks = []
previous = b[0]
for i in b[1:]:
if i - previous > v_300ms:
previous = i
rpeaks.append(np.argmax(signal[int(i):int(i + v_100ms)]) + i)
rpeaks = sorted(list(set(rpeaks)))
rpeaks = np.array(rpeaks, dtype='int')
return rpeaks
# =============================================================================
# Engzee Modified (2012)
# =============================================================================
def _ecg_findpeaks_engzee(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME>, A single scan algorithm for QRS detection and feature extraction, IEEE Comp. in Cardiology, vol. 6, pp. 37-42, 1979
- <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Real Time Electrocardiogram Segmentation for Finger Based ECG Biometrics", BIOSIGNALS 2012, pp. 49-54, 2012.
"""
engzee_fake_delay = 0
diff = np.zeros(len(signal))
for i in range(4, len(diff)):
diff[i] = signal[i]-signal[i-4]
ci = [1, 4, 6, 4, 1]
low_pass = scipy.signal.lfilter(ci, 1, diff)
low_pass[:int(0.2 * sampling_rate)] = 0
ms200 = int(0.2 * sampling_rate)
ms1200 = int(1.2 * sampling_rate)
ms160 = int(0.16 * sampling_rate)
neg_threshold = int(0.01 * sampling_rate)
M = 0
M_list = []
neg_m = []
MM = []
M_slope = np.linspace(1.0, 0.6, ms1200-ms200)
QRS = []
r_peaks = []
counter = 0
thi_list = []
thi = False
thf_list = []
thf = False
for i in range(len(low_pass)):
# M
if i < 5 * sampling_rate:
M = 0.6 * np.max(low_pass[:i + 1])
MM.append(M)
if len(MM) > 5:
MM.pop(0)
elif QRS and i < QRS[-1] + ms200:
newM5 = 0.6 * np.max(low_pass[QRS[-1]:i])
if newM5 > 1.5 * MM[-1]:
newM5 = 1.1 * MM[-1]
elif QRS and i == QRS[-1] + ms200:
MM.append(newM5)
if len(MM) > 5:
MM.pop(0)
M = np.mean(MM)
elif QRS and i > QRS[-1] + ms200 and i < QRS[-1] + ms1200:
M = np.mean(MM) * M_slope[i-(QRS[-1] + ms200)]
elif QRS and i > QRS[-1] + ms1200:
M = 0.6 * np.mean(MM)
M_list.append(M)
neg_m.append(-M)
if not QRS and low_pass[i] > M:
QRS.append(i)
thi_list.append(i)
thi = True
elif QRS and i > QRS[-1] + ms200 and low_pass[i] > M:
QRS.append(i)
thi_list.append(i)
thi = True
if thi and i < thi_list[-1] + ms160:
if low_pass[i] < -M and low_pass[i-1] > -M:
# thf_list.append(i)
thf = True
if thf and low_pass[i] < -M:
thf_list.append(i)
counter += 1
elif low_pass[i] > -M and thf:
counter = 0
thi = False
thf = False
elif thi and i > thi_list[-1] + ms160:
counter = 0
thi = False
thf = False
if counter > neg_threshold:
unfiltered_section = signal[thi_list[-1] - int(0.01 * sampling_rate):i]
r_peaks.append(engzee_fake_delay + np.argmax(unfiltered_section) + thi_list[-1] - int(0.01 * sampling_rate))
counter = 0
thi = False
thf = False
r_peaks = np.array(r_peaks, dtype='int')
return r_peaks
# =============================================================================
# Stationary Wavelet Transform (SWT) - Kalidas and Tamil (2017)
# =============================================================================
def _ecg_findpeaks_kalidas(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME> and <NAME> (2017). Real-time QRS detector using Stationary Wavelet Transform for Automated ECG Analysis. In: 2017 IEEE 17th International Conference on Bioinformatics and Bioengineering (BIBE). Uses the Pan and Tompkins thresolding.
"""
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_findpeaks(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).")
swt_level = 3
padding = -1
for i in range(1000):
if (len(signal) + i) % 2 ** swt_level == 0:
padding = i
break
if padding > 0:
signal = np.pad(signal, (0, padding), 'edge')
elif padding == -1:
print("Padding greater than 1000 required\n")
swt_ecg = pywt.swt(signal, 'db3', level=swt_level)
swt_ecg = np.array(swt_ecg)
swt_ecg = swt_ecg[0, 1, :]
squared = swt_ecg * swt_ecg
f1 = 0.01/sampling_rate
f2 = 10/sampling_rate
b, a = scipy.signal.butter(3, [f1 * 2, f2 * 2], btype='bandpass')
filtered_squared = scipy.signal.lfilter(b, a, squared)
filt_peaks = _ecg_findpeaks_peakdetect(filtered_squared, sampling_rate)
filt_peaks = np.array(filt_peaks, dtype='int')
return filt_peaks
# =============================================================================
# Elgendi et al. (2010)
# =============================================================================
def _ecg_findpeaks_elgendi(signal, sampling_rate=1000):
"""
From https://github.com/berndporr/py-ecg-detectors/
- <NAME> & <NAME> & <NAME>. (2010). Frequency Bands Effects on QRS Detection. The 3rd International Conference on Bio-inspired Systems and Signal Processing (BIOSIGNALS2010). 428-431.
"""
window1 = int(0.12 * sampling_rate)
mwa_qrs = _ecg_findpeaks_MWA(abs(signal), window1)
window2 = int(0.6 * sampling_rate)
mwa_beat = _ecg_findpeaks_MWA(abs(signal), window2)
blocks = np.zeros(len(signal))
block_height = np.max(signal)
for i in range(len(mwa_qrs)):
if mwa_qrs[i] > mwa_beat[i]:
blocks[i] = block_height
else:
blocks[i] = 0
QRS = []
for i in range(1, len(blocks)):
if blocks[i-1] == 0 and blocks[i] == block_height:
start = i
elif blocks[i-1] == block_height and blocks[i] == 0:
end = i-1
if end-start > int(0.08 * sampling_rate):
detection = np.argmax(signal[start:end + 1]) + start
if QRS:
if detection-QRS[-1] > int(0.3 * sampling_rate):
QRS.append(detection)
else:
QRS.append(detection)
QRS = np.array(QRS, dtype='int')
return QRS
# =============================================================================
# Continuous Wavelet Transform (CWT) - Martinez et al. (2003)
# =============================================================================
#
def _ecg_findpeaks_WT(signal, sampling_rate=1000):
# Try loading pywt
try:
import pywt
except ImportError:
raise ImportError("NeuroKit error: ecg_delineator(): the 'PyWavelets' "
"module is required for this method to run. ",
"Please install it first (`pip install PyWavelets`).")
# first derivative of the Gaissian signal
scales = np.array([1, 2, 4, 8, 16])
cwtmatr, freqs = pywt.cwt(signal, scales, 'gaus1', sampling_period=1.0/sampling_rate)
# For wt of scale 2^4
signal_4 = cwtmatr[4, :]
epsilon_4 = np.sqrt(np.mean(np.square(signal_4)))
peaks_4, _ = scipy.signal.find_peaks(np.abs(signal_4), height=epsilon_4)
# For wt of scale 2^3
signal_3 = cwtmatr[3, :]
epsilon_3 = np.sqrt(np.mean(np.square(signal_3)))
peaks_3, _ = scipy.signal.find_peaks(np.abs(signal_3), height=epsilon_3)
# Keep only peaks_3 that are nearest to peaks_4
peaks_3_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = abs(peaks_4[i] - peaks_3)
peaks_3_keep[i] = peaks_3[np.argmin(peaks_distance)]
# For wt of scale 2^2
signal_2 = cwtmatr[2, :]
epsilon_2 = np.sqrt(np.mean(np.square(signal_2)))
peaks_2, _ = scipy.signal.find_peaks(np.abs(signal_2), height=epsilon_2)
# Keep only peaks_2 that are nearest to peaks_3
peaks_2_keep = np.zeros_like(peaks_4)
for i in range(len(peaks_4)):
peaks_distance = abs(peaks_3_keep[i] - peaks_2)
peaks_2_keep[i] = peaks_2[ | np.argmin(peaks_distance) | numpy.argmin |
#!/usr/local/bin/python
"""
Node objective functions
Algorithms implemented:
1. naive_greedy_parallel - In each iteration pick 1 node greedily.
2. naive_greedy_heuristic - Pick all k nodes at once greedily.
3. smart_greedy_parallel - In each iteration pick 1 node smart greedily.
"""
from __future__ import division
from markov_chain import MarkovChain
import copy
import random
import numpy as np
import networkx as nx
from multiprocessing import Pool
from multiprocessing import cpu_count
from itertools import combinations
import argparse
np.seterr(all="raise")
cores = cpu_count()
# ------------------------------------------------------------
# Naive Greedy algorithm
# ------------------------------------------------------------
def calculate_F(S):
F = np.float128(0.0)
V_minus_S = set(mc.G.nodes()) - set(S)
predecessors = []
for i in V_minus_S:
predecessors += mc.G.predecessors(i)
predecessors = set(predecessors)
for u in predecessors:
F_u = np.float128(0.0)
successors = mc.G[u]
# Calculate rho
rho = np.float128(0.0)
for v in successors:
if v in S:
rho += successors[v]['weight']
# Calculate F_u
x_dash = mc.G.node[u]['num_items'] * ( 1 - rho)
for v in successors:
if v not in S:
P_dash = mc.G.edge[u][v]['weight'] / (1 - rho)
F_u += P_dash * (1 - P_dash)
F_u = x_dash * F_u
if np.abs(F_u) < 1e-5:
F += 0
else:
F += F_u
return (S, F)
def naive_greedy_parallel(k):
pool = Pool(cores)
picked_set = []
for i in xrange(k):
candidate_nodes = set(mc.G.nodes()) - set(picked_set)
candidate_sets = [picked_set + [v] for v in candidate_nodes]
objective_values = pool.imap(calculate_F, candidate_sets)
objective_values = sorted(objective_values, key=lambda x: x[1])
picked_set = objective_values[0][0]
pool.close()
pool.join()
return calculate_F(picked_set)
def naive_greedy_heuristic(k):
pool = Pool(cores)
picked_set = []
candidate_nodes = [[x] for x in set(mc.G.nodes()) - set(picked_set)]
objective_values = pool.imap(calculate_F, candidate_nodes)
objective_values = sorted(objective_values, key=lambda x: x[1])
picked_set = [x[0][0] for x in objective_values[:k]]
pool.close()
pool.join()
return calculate_F(picked_set)
# ------------------------------------------------------------
# Smart Greedy algorithm
# ------------------------------------------------------------
def calculate_smart_F(args):
v = args[0]
rho_dict = args[1]
B_dict = args[2]
picked_set = list(args[3]) + list([v])
F = | np.float128(0.0) | numpy.float128 |
import numpy as np
from scipy import signal
def data_quality_check(array):
"""
Check whether the input signal contains NaN, print relevant information
input:
array: numpy array. The temporal signal with 1d or with multiple dimensions
return:
None or dict. Dict stores indicator of Nan/Abnormal values.
If there is no NaN in signal, return None, else return the index where NaN occurs
"""
print("*** Data Quality Check ***")
def check_nan(array):
return np.isnan(array).any()
def check_abnormal(array):
"""
Todo: Define what kinds of EEG data are abnormal in some kinds?
"""
return None
if not check_nan(array) and check_abnormal(array):
print("*** Data Quality Check Passed ***")
return None
else:
# return format: tuple(array([a,b,c,...]), array([j,k,l,...]), ...)
# Each element in tuple indicates the index list
dict = {}
if check_nan(array):
print("Nan Values appear in data")
print("The percentage of Nan Values is {val}%").format(val=sum(np.isnan(array)) / array.shape[0])
print("The location is at {tuple}").format(tuple=np.where(np.isnan(array)))
dict["nan"] = np.where(np.isnan(array))
if check_abnormal(array):
#Todo: add abnormal values' locations to dict.
pass
return dict
def savgol(array, window_size=5, polyorder=2):
"""
Apply the Savitzky-Golay fiklter to smooth signals.
We support both 1d array input and DEEG standard input (p,m,e).
For the later format, we calculate DE on dim "e" and return a (p,m) numpy array.
Input
-------
array: numpy array or list
window_size: int
the length of the filter window (i.e., the number of coefficients).
window_length` must be a positive odd integer.
default is 5.
polyorder : int
the order of the polynomial used to fit the samples.
polyorder must be less than window_length.
default is 2.
Return
-------
y :ndarray
same shape as x.
The filtered data.
"""
def create_x(size, rank):
# creat weighting coefficients
x = []
for i in range(2 * size + 1):
m = i - size
row = [m ** j for j in range(rank)]
x.append(row)
x = np.mat(x)
return x
def SG(array, window_size, polyorder):
if window_size % 2 != 1:
raise ValueError("window_size must be odd")
if polyorder >= window_size:
raise ValueError("polyorder must be less than window_lengthd")
m = int((window_size - 1) / 2)
odata = np.array(array).tolist()
for i in range(m):
odata.insert(0, odata[0])
odata.insert(len(odata), odata[len(odata) - 1])
# creat X matrix
x = create_x(m, polyorder)
# calculate the weighting coefficients
b = (x * (x.T * x).I) * x.T
a0 = b[m]
a0 = a0.T
# calculate the filtered signal
filtered_signal = []
for i in range(len(array)):
y = [odata[i + j] for j in range(window_size)]
y1 = np.mat(y) * a0
y1 = float(y1)
filtered_signal.append(y1)
return np.array(filtered_signal)
if len(np.array(array).shape) == 1:
return SG(array, window_size, polyorder)
else:
return np.apply_along_axis(SG, axis=-1, arr=array, window_size=window_size, polyorder=polyorder)
def filter(array, name="delta", order=5):
"""
A band-pass filter to suppress signal out of frequency band
input:
array: numpy array. The input temporal signal with 1d
name: str. Specify the filter type commonly used in EEG analysis. (delta, theta, alpha, beta, gamma)
order: int. The order of Butterworth filter. Default is 5
return:
ts: numpy array. Filtered signal in temporal domain
"""
if name == "delta":
band = [1,3]
elif name == "theta":
band = [4,7]
elif name == "alpha":
band = [8,13]
elif name == "beta":
band = [14,30]
elif name == "gamma":
band = [31,50]
else:
raise(Exception("Invalid filter name!"))
sos = signal.butter(order, band, 'bp', fs=1000, output='sos')
ts = signal.sosfilt(sos, array)
return ts
def band(array):
"""
Filter temporal signal by all the 5 filters commonly used in EEG analysis
input:
signal: numpy array. The input temporal signal. 1d or with multiple dimensions
return
ts_dict: dictionary. Keys are filter name and values are filtered signal in temporal domain.
"""
ts_dict = {}
if len(array.shape) < 2:
for name in ["delta","theta","alpha","beta","gamma"]:
ts_dict[name] = filter(array)
else:
for name in ["delta","theta","alpha","beta","gamma"]:
ts_dict[name] = | np.apply_along_axis(filter, -1, array, name) | numpy.apply_along_axis |
from __future__ import print_function
from __future__ import division
__author__ = """Alex "<NAME>""" ## double-quotes will be silently removed, single quotes will be left, eg, O'Connor
import numpy as np
import itertools #to calculate all subsets
from copy import deepcopy
from math import atan, pi, cos, sin, sqrt, ceil
import time, sys, platform, os, StringIO, gc
from psychopy import visual, core
import random
#If you run this code stand-alone, it will do a demo of the basic stimulus it is designed to provide
#BEGIN helper functions from primes.py
def gcd(a,b):
"""Return greatest common divisor using Euclid's Algorithm."""
while b:
a, b = b, a % b
return a
def lcm(a,b):
"""Return lowest common multiple."""
return (a*b)/gcd(a,b)
def LCM(terms):
"Return lcm of a list of numbers."
return reduce(lambda a,b: lcm(a,b), terms)
#END helper functions from primes.py
def calcCondsPerNumTargets(numRings,numTargets):
#numRings is number of rings, each of which can have up to one target
#numTargets is list or array of numTarget conditions, e.g. 1,2,3 means the experiment includes 1, 2, and 3 targets
#Each target can be placed randomly in any of the rings.
#Want all possibilities to be covered equally often. That means each target number condition has to include all the combinations
# of places that number of targets can go.
#So that some targetNum conditinos don't have more trials than others, have to scale up each targetNum condition to the worst case.
#Actually it's worse than that. To make them fit evenly, have to use least common multiple
#3 rings choose 2 for targets, 3 rings choose 1 for target, have to have as many conditions as the maximum.
#To find maximum, determine length of each.
ringNums = np.arange(numRings)
numPossibilitiesEach = list()
for k in numTargets:
numPossibilitiesCouldPutKtargets = len( list(itertools.combinations(ringNums,k)) )
#print(numPossibilitiesCouldPutKtargets)
numPossibilitiesEach.append( numPossibilitiesCouldPutKtargets )
m = max( numPossibilitiesEach ) #because the worst case (number of targets) requires this many, have to have this many for all. Actually,
leastCommonMultiple = LCM( numPossibilitiesEach ) #to have equal number of trials per numtargets, would have to use this figure for each
#print('biggest=',m, ' Least common multiple=', leastCommonMultiple)
return leastCommonMultiple
def accelerateComputer(slowFast, process_priority, disable_gc):
# process_priority = 'normal' 'high' or 'realtime'
if slowFast:
if process_priority == 'normal':
pass
elif process_priority == 'high':
core.rush(True)
elif process_priority == 'realtime': # Only makes a diff compared to 'high' on Windows.
core.rush(True, realtime = True)
else:
print('Invalid process priority:',process_priority,"Process running at normal.")
process_priority = 'normal'
if disable_gc:
gc.disable()
if slowFast==0: #turn off the speed-up
if disable_gc:
gc.enable()
core.rush(False)
def openMyStimWindow(monitorSpec,widthPix,heightPix,bgColor,allowGUI,units,fullscr,scrn,waitBlank): #make it a function because have to do it several times, want to be sure is identical each time
myWin = visual.Window(monitor=monitorSpec,size=(widthPix,heightPix),allowGUI=allowGUI,units=units,color=bgColor,colorSpace='rgb',fullscr=fullscr,screen=scrn,waitBlanking=waitBlank) #Holcombe lab monitor
if myWin is None:
print('ERROR: Failed to open window in openMyStimWindow!')
core.quit()
return myWin
def constructRingsAsGratings(myWin,numRings,radii,ringRadialMaskEachRing,numObjects,patchAngle,colors,stimColorIdxsOrder,gratingTexPix,blobToCueEachRing,ppLog):
#Originally to construct a grating formed of the colors in order of stimColorIdxsOrder
antialiasGrating = True
autoLogging = False
texEachRing=list() #texture which will draw the ring of objects via openGL texture on grating
cueTexEachRing=list() #making a separate grating for the cue, wherein everything background color except the location of the cue
ringsRadial=list(); #after making the rings of object, put them in this list
cueRings=list() #after making grating for each cue, put it in this cue
stimColorIdxsOrder= stimColorIdxsOrder[::-1] #reverse order of indices, because grating texture is rendered in reverse order than is blobs version
radialMaskEachRing=[[0,0,0,1,1,] ,[0,0,0,0,0,0,1,1,],[0,0,0,0,0,0,0,0,0,0,1,1,]]
numUniquePatches= len( max(stimColorIdxsOrder,key=len) )
numCycles =(1.0*numObjects) / numUniquePatches
angleSegment = 360./(numUniquePatches*numCycles)
if gratingTexPix % numUniquePatches >0: #gratingTexPix contains numUniquePatches. numCycles will control how many total objects there are around circle
ppLog.warn('Warning: could not exactly render a '+str(numUniquePatches)+'-segment pattern radially, will be off by '+str( (gratingTexPix%numUniquePatches)*1.0 /gratingTexPix ) )
if numObjects % numUniquePatches >0:
msg= 'Warning: numUniquePatches ('+str(numUniquePatches)+') not go evenly into numObjects'; ppLog.warn(msg)
#create texture for red-green-blue-red-green-blue etc. radial grating
for i in range(numRings):
#myTex.append(np.zeros([gratingTexPix,gratingTexPix,3])+[1,-1,1])
texEachRing.append( np.zeros([gratingTexPix,gratingTexPix,3])+bgColor[0] ) #start with all channels in all locs = bgColor
cueTexEachRing.append( np.ones([gratingTexPix,gratingTexPix,3])*bgColor[0] )
if patchAngle > angleSegment:
msg='Error: patchAngle requested ('+str(patchAngle)+') bigger than maximum possible ('+str(angleSegment)+') numUniquePatches='+str(numUniquePatches)+' numCycles='+str(numCycles);
print(msg); ppLog.error(msg)
oneCycleAngle = 360./numCycles
segmentSizeTexture = angleSegment/oneCycleAngle *gratingTexPix #I call it segment because includes spaces in between, that I'll write over subsequently
patchSizeTexture = patchAngle/oneCycleAngle *gratingTexPix
patchSizeTexture = round(patchSizeTexture) #best is odd number, even space on either size
patchFlankSize = (segmentSizeTexture-patchSizeTexture)/2.
patchAngleActual = patchSizeTexture / gratingTexPix * oneCycleAngle
if abs(patchAngleActual - patchAngle) > .01:
msg = 'Desired patchAngle = '+str(patchAngle)+' but closest can get with '+str(gratingTexPix)+' gratingTexPix is '+str(patchAngleActual);
ppLog.warn(msg)
for colrI in range(numUniquePatches): #for that portion of texture, set color
start = colrI*segmentSizeTexture
end = start + segmentSizeTexture
start = round(start) #don't round until after do addition, otherwise can fall short
end = round(end)
ringColr=list();
for i in range(numRings):
ringColr.append(colors[ stimColorIdxsOrder[i][colrI] ])
for colorChannel in range(3):
for i in range(numRings):
texEachRing[i][:, start:end, colorChannel] = ringColr[i][colorChannel];
for cycle in range(int(round(numCycles))):
base = cycle*gratingTexPix/numCycles
for i in range(numRings):
cueTexEachRing[i][:, base+start/numCycles:base+end/numCycles, colorChannel] = ringColr[1][colorChannel]
#draw bgColor area (emptySizeEitherSideOfPatch) by overwriting first and last entries of segment
for i in range(numRings):
texEachRing[i][:, start:start+patchFlankSize, :] = bgColor[0]; #one flank
texEachRing[i][:, end-1-patchFlankSize:end, :] = bgColor[0]; #other flank
for cycle in range(int(round(numCycles))):
base = cycle*gratingTexPix/numCycles
for i in range(numRings):
cueTexEachRing[i][:,base+start/numCycles:base+(start+patchFlankSize)/numCycles,:] =bgColor[0];
cueTexEachRing[i][:,base+(end-1-patchFlankSize)/numCycles:base+end/numCycles,:] =bgColor[0]
#color the segment to be cued white. First, figure out cue segment len
segmentLen = gratingTexPix/numCycles*1/numUniquePatches
WhiteCueSizeAdj=0 # adust the white cue marker wingAdd 20110923
if numObjects==3:WhiteCueSizeAdj=110
elif numObjects==6:WhiteCueSizeAdj=25
elif numObjects==12:WhiteCueSizeAdj=-15
elif numObjects==2:WhiteCueSizeAdj=200
for i in range(numRings): #color cue position white
if blobToCueEachRing[i] >=0: #-999 means dont cue anything
blobToCueCorrectForRingReversal = numObjects-1 - blobToCueEachRing[i] #grating seems to be laid out in opposite direction than blobs, this fixes postCueNumBlobsAway so positive is in direction of motion
if blobToCueCorrectForRingReversal==0 and numObjects==12: WhiteCueSizeAdj=0
cueStartEntry = blobToCueCorrectForRingReversal*segmentLen+WhiteCueSizeAdj
cueEndEntry = cueStartEntry + segmentLen-2*WhiteCueSizeAdj
cueTexEachRing[i][:, cueStartEntry:cueEndEntry, :] = -1*bgColor[0] #-1*bgColor is that what makes it white?
blackGrains = round( .25*(cueEndEntry-cueStartEntry) )#number of "pixels" of texture at either end of cue sector to make black. Need to update this to reflect patchAngle
cueTexEachRing[i][:, cueStartEntry:cueStartEntry+blackGrains, :] = bgColor[0]; #this one doesn't seem to do anything?
cueTexEachRing[i][:, cueEndEntry-1-blackGrains:cueEndEntry, :] = bgColor[0];
angRes = 100 #100 is default. I have not seen any effect. This is currently not printed to log file!
for i in range(numRings):
ringsRadial.append(visual.RadialStim(myWin, tex=texEachRing[i], color=[1,1,1],size=radii[i],#myTexInner is the actual colored pattern. radial grating used to make it an annulus
mask=ringRadialMaskEachRing[i], # this is a 1-D mask dictating the behaviour from the centre of the stimulus to the surround.
radialCycles=0, angularCycles=numObjects*1.0/numUniquePatches,
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging))
#the mask is radial and indicates that should show only .3-.4 as one moves radially, creating an annulus
#end preparation of colored rings
#draw cueing grating for tracking task. Have entire grating be empty except for one white sector
cueRings.append(visual.RadialStim(myWin, tex=cueTexEachRing[i], color=[1,1,1],size=radii[i], #cueTexInner is white. Only one sector of it shown by mask
mask = radialMaskEachRing[i], radialCycles=0, angularCycles=1, #only one cycle because no pattern actually repeats- trying to highlight only one sector
angularRes=angRes, interpolate=antialiasGrating, autoLog=autoLogging) )#depth doesn't seem to work, just always makes it invisible?
currentlyCuedBlobEachRing = blobToCueEachRing #this will mean that don't have to redraw
return ringsRadial,cueRings,currentlyCuedBlobEachRing
######### End constructRingAsGrating ###########################################################
#########################################
def constructThickThinWedgeRingsTargetAndCue(myWin,radius,radialMask,radialMaskTarget,cueRadialMask,visibleWedge,numObjects,patchAngleThick,patchAngleThin,bgColor,
thickWedgeColor,thinWedgeColor,targetAngleOffset,targetRadialOffset,gratingTexPix,cueColor,objToCue,ppLog):
#Construct a grating formed of the colors in order of stimColorIdxsOrder
#Also construct a similar cueRing grating with same colors, but one blob potentially highlighted.
#cueRing Has different spacing than ringRadial, not sure why, I think because calculations tend to be off as it's
#always one cycle.
#radialMask doesn't seem to eliminate very-central part, bizarre
antialiasGrating = False #Don't set this to true because in present context, it's like imposing a radial Gaussian ramp on each object
autoLogging = False
numCycles = numObjects
segmentAngle = 360./numCycles
#create texture for red-green-blue-red-green-blue etc. radial grating
#2-D texture which will draw the ring of objects via openGL texture on grating
ringTex = | np.zeros([gratingTexPix,gratingTexPix,3]) | numpy.zeros |
import operator
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tests.frame.common import zip_frames
def test_agg_transform(axis, float_frame):
other_axis = 1 if axis in {0, "index"} else 0
with | np.errstate(all="ignore") | numpy.errstate |
"""
This module contains methods used our implementation of the Asynchronously
Parallel Optimization Solver for finding Multiple Minima (APOSMM) method
described in detail in the paper
`https://doi.org/10.1007/s12532-017-0131-4 <https://doi.org/10.1007/s12532-017-0131-4>`_
"""
from __future__ import division
from __future__ import absolute_import
__all__ = ['aposmm_logic','initialize_APOSMM', 'decide_where_to_start_localopt', 'update_history_dist']
import sys, os, traceback
import numpy as np
# import scipy as sp
from scipy.spatial.distance import cdist
from mpi4py import MPI
from numpy.lib.recfunctions import merge_arrays
from math import log, gamma, pi, sqrt
from petsc4py import PETSc
import nlopt
def aposmm_logic(H,persis_info,gen_specs,_):
"""
APOSMM as a libEnsemble generation function. Coordinates multiple local
optimization runs, starting from points which do not have a better point
nearby them. This generation function produces/requires the following
fields in ``H``:
- ``'x' [n floats]``: Parameters being optimized over
- ``'x_on_cube' [n floats]``: Parameters scaled to the unit cube
- ``'f' [float]``: Objective function being minimized
- ``'local_pt' [bool]``: True if point from a local optimization run, false if it is a sample point
- ``'dist_to_unit_bounds' [float]``: Distance to domain boundary
- ``'dist_to_better_l' [float]``: Distance to closest better local optimization point
- ``'dist_to_better_s' [float]``: Distance to closest better sample optimization point
- ``'ind_of_better_l' [int]``: Index of point ``'dist_to_better_l``' away
- ``'ind_of_better_s' [int]``: Index of point ``'dist_to_better_s``' away
- ``'started_run' [bool]``: True if point has started a local optimization run
- ``'num_active_runs' [int]``: Counts number of non-terminated local runs the point is in
- ``'local_min' [float]``: True if point has been ruled a local minima
and optionally
- ``'priority' [float]``: Value quantifying a point's desirability
- ``'f_i' [float]``: Value of ith objective component (if calculated one at a time)
- ``'fvec' [m floats]``: All objective components (if calculated together)
- ``'obj_component' [int]``: Index corresponding to value in ``'f_i``'
- ``'pt_id' [int]``: Identify the point
When using libEnsemble to do individual objective component evaluations,
APOSMM will return ``gen_specs['components']`` copies of each point, but
each component=0 version of the point will only be considered when
- deciding where to start a run,
- best nearby point,
- storing the order of the points is the run
- storing the combined objective function value
- etc
Necessary quantities in ``gen_specs`` are:
- ``'lb' [n floats]``: Lower bound on search domain
- ``'ub' [n floats]``: Upper bound on search domain
- ``'initial_sample_size' [int]``: Number of uniformly sampled points that must be returned (with a non-nan value) before a local optimization run is started.
- ``'localopt_method' [str]``: Name of an NLopt or PETSc/TAO method
Optional ``gen_specs`` entries are:
- ``'sample_points' [int]``: The points to be sampled (in the original domain)
- ``'combine_component_func' [func]``: Function to combine objective components
- ``'components' [int]``: Number of objective components
- ``'dist_to_bound_multiple' [float in (0,1]]``: What fraction of the distance to the nearest boundary should the initial step size be in localopt runs
- ``'high_priority_to_best_localopt_runs': [bool]``: True if localopt runs with smallest observed function value are given priority
- ``'lhs_divisions' [int]``: Number of Latin hypercube sampling partitions (0 or 1 results in uniform sampling)
- ``'min_batch_size' [int]``: Lower bound on the number of points given every time APOSMM is called
- ``'mu' [float]``: Distance from the boundary that all localopt starting points must satisfy
- ``'nu' [float]``: Distance from identified minima that all starting points must satisfy
- ``'single_component_at_a_time' [bool]``: True if single objective components will be evaluated at a time
- ``'rk_const' [float]``:
And ``gen_specs`` convergence tolerances for NLopt and PETSc/TAO:
- ``'fatol' [float]``:
- ``'ftol_abs' [float]``:
- ``'ftol_rel' [float]``:
- ``'gatol' [float]``:
- ``'grtol' [float]``:
- ``'xtol_abs' [float]``:
- ``'xtol_rel' [float]``:
:Note:
``gen_specs['combine_component_func']`` must be defined when there are
multiple objective components.
:Note:
APOSMM critically uses ``persis_info`` to store information about
active runs, order of points in each run, etc. The allocation function
must ensure it's always given.
:See:
``libensemble/tests/regression_tests/test_branin_aposmm.py``
for basic APOSMM usage.
:See:
``libensemble/tests/regression_tests/test_chwirut_aposmm_one_residual_at_a_time.py``
for an example of APOSMM coordinating multiple local optimization runs
for an objective with more than one component.
"""
"""
Description of intermediate variables in aposmm_logic:
n: domain dimension
c_flag: True if giving libEnsemble individual components of fvec to evaluate. (Note if c_flag is True, APOSMM will only use the com
n_s: the number of complete evaluations (not just component evaluations)
updated_inds: indices of H that have been updated (and so all their information must be sent back to libE manager to update)
O: new points to be sent back to the history
x_new: when re-running a local opt method to get the next point: stores the first new point requested by a local optimization method
pt_in_run: when re-running a local opt method to get the next point: counts function evaluations to know when a new point is given
total_pts_in_run: when re-running a local opt method to get the next point: total evaluations in run to be incremented
starting_inds: indices where a runs should be started.
active_runs: indices of active local optimization runs (currently saved to disk between calls to APOSMM)
sorted_run_inds: indices of the considered run (in the order they were requested by the localopt method)
x_opt: the reported minimum from a localopt run (disregarded unless exit_code isn't 0)
exit_code: 0 if a new localopt point has been found, otherwise it's the NLopt/POUNDERS code
samples_needed: counts the number of additional uniformly drawn samples needed
"""
n, n_s, c_flag, O, rk_const, lhs_divisions, mu, nu = initialize_APOSMM(H, gen_specs)
# np.savez('H'+str(len(H)),H=H,gen_specs=gen_specs,persis_info=persis_info)
if n_s < gen_specs['initial_sample_size']:
updated_inds = set()
else:
global x_new, pt_in_run, total_pts_in_run # Used to generate a next local opt point
updated_inds = update_history_dist(H, gen_specs, c_flag)
starting_inds = decide_where_to_start_localopt(H, n_s, rk_const, lhs_divisions, mu, nu)
updated_inds.update(starting_inds)
for ind in starting_inds:
# Find the run number
if not np.any(H['started_run']):
persis_info['active_runs'] = set()
persis_info['run_order'] = {}
persis_info['total_runs'] = 0
new_run_num = persis_info['total_runs']
H['started_run'][ind] = 1
H['num_active_runs'][ind] += 1
persis_info['run_order'][new_run_num] = [ind]
persis_info['active_runs'].update([new_run_num])
persis_info['total_runs'] +=1
inactive_runs = set()
# Find next point in any uncompleted runs using information stored in persis_info
for run in persis_info['active_runs']:
x_opt, exit_code, persis_info, sorted_run_inds = advance_localopt_method(H, gen_specs, c_flag, run, persis_info)
if np.isinf(x_new).all():
assert exit_code>0, "Exit code not zero, but no information in x_new.\n Local opt run " + str(run) + " after " + str(len(sorted_run_inds)) + " evaluations.\n Worker crashing!"
# No new point was added. Hopefully at a minimum
update_history_optimal(x_opt, H, sorted_run_inds)
inactive_runs.add(run)
updated_inds.update(sorted_run_inds)
else:
matching_ind = np.where(np.equal(x_new,O['x_on_cube']).all(1))[0]
if len(matching_ind) == 0:
persis_info = add_points_to_O(O, x_new, H, gen_specs, c_flag, persis_info, local_flag=1, sorted_run_inds=sorted_run_inds, run=run)
else:
assert len(matching_ind) == 1, "This point shouldn't have ended up in the O twice!"
persis_info['run_order'][run].append(O['sim_id'][matching_ind[0]])
for i in inactive_runs:
persis_info['active_runs'].remove(i)
persis_info['run_order'].pop(i) # Deletes any information about this run
if len(H) == 0:
samples_needed = gen_specs['initial_sample_size']
elif 'min_batch_size' in gen_specs:
samples_needed = gen_specs['min_batch_size'] - len(O)
else:
samples_needed = int(not bool(len(O))) # 1 if len(O)==0, 0 otherwise
if samples_needed > 0:
if 'sample_points' in gen_specs:
v = sum(H['local_pt'])
x_new = gen_specs['sample_points'][v:v+samples_needed]
on_cube = False # We assume the points are on the original domain, not unit cube
else:
x_new = persis_info['rand_stream'].uniform(0,1,(samples_needed,n))
on_cube = True
persis_info = add_points_to_O(O, x_new, H, gen_specs, c_flag, persis_info, on_cube=on_cube)
O = np.append(H[np.array(list(updated_inds),dtype=int)][[o[0] for o in gen_specs['out']]],O)
return O, persis_info
def add_points_to_O(O, pts, H, gen_specs, c_flag, persis_info, local_flag=0, sorted_run_inds=[], run=[], on_cube=True):
"""
Adds points to O, the numpy structured array to be sent back to the manager
"""
assert not local_flag or len(pts) == 1, "add_points_to_O does not support this functionality"
original_len_O = len(O)
len_H = len(H)
ub = gen_specs['ub']
lb = gen_specs['lb']
if c_flag:
m = gen_specs['components']
assert len_H % m == 0, "Number of points in len_H not congruent to 0 mod 'components'"
pt_ids = np.sort(np.tile(np.arange((len_H+original_len_O)/m,(len_H+original_len_O)/m + len(pts)),(1,m)))
pts = np.tile(pts,(m,1))
num_pts = len(pts)
O.resize(len(O)+num_pts,refcheck=False) # Adds (num_pts) rows of zeros to O
if on_cube:
O['x_on_cube'][-num_pts:] = pts
O['x'][-num_pts:] = pts*(ub-lb)+lb
else:
O['x_on_cube'][-num_pts:] = (pts-lb)/(ub-lb)
O['x'][-num_pts:] = pts
O['sim_id'][-num_pts:] = np.arange(len_H+original_len_O,len_H+original_len_O+num_pts)
O['local_pt'][-num_pts:] = local_flag
O['dist_to_unit_bounds'][-num_pts:] = np.inf
O['dist_to_better_l'][-num_pts:] = np.inf
O['dist_to_better_s'][-num_pts:] = np.inf
O['ind_of_better_l'][-num_pts:] = -1
O['ind_of_better_s'][-num_pts:] = -1
if c_flag:
O['obj_component'][-num_pts:] = np.tile(range(0,m),(1,num_pts//m))
O['pt_id'][-num_pts:] = pt_ids
if local_flag:
O['num_active_runs'][-num_pts] += 1
# O['priority'][-num_pts:] = 1
# O['priority'][-num_pts:] = np.random.uniform(0,1,num_pts)
if 'high_priority_to_best_localopt_runs' in gen_specs and gen_specs['high_priority_to_best_localopt_runs']:
O['priority'][-num_pts:] = -min(H['f'][persis_info['run_order'][run]]) # Give highest priority to run with lowest function value
else:
O['priority'][-num_pts:] = persis_info['rand_stream'].uniform(0,1,num_pts)
persis_info['run_order'][run].append(O[-num_pts]['sim_id'])
else:
if c_flag:
# p_tmp = np.sort(np.tile(np.random.uniform(0,1,num_pts/m),(m,1))) # If you want all "duplicate points" to have the same priority (meaning libEnsemble gives them all at once)
# p_tmp = np.random.uniform(0,1,num_pts)
p_tmp = persis_info['rand_stream'].uniform(0,1,num_pts)
else:
# p_tmp = np.random.uniform(0,1,num_pts)
# persis_info['rand_stream'].uniform(lb,ub,(1,n))
if 'high_priority_to_best_localopt_runs' in gen_specs and gen_specs['high_priority_to_best_localopt_runs']:
p_tmp = -np.inf*np.ones(num_pts)
else:
p_tmp = persis_info['rand_stream'].uniform(0,1,num_pts)
O['priority'][-num_pts:] = p_tmp
# O['priority'][-num_pts:] = 1
return persis_info
def update_history_dist(H, gen_specs, c_flag):
"""
Updates distances/indices after new points that have been evaluated.
:See:
``/libensemble/alloc_funcs/start_persistent_local_opt_gens.py``
"""
n = len(H['x_on_cube'][0])
updated_inds = set()
new_inds = np.where(~H['known_to_aposmm'])[0]
if c_flag:
for v in np.unique(H['pt_id'][new_inds]):
inds = H['pt_id']==v
H['f'][inds] = np.inf
H['f'][np.where(inds)[0][0]] = gen_specs['combine_component_func'](H['f_i'][inds])
p = np.logical_and.reduce((H['returned'],H['obj_component']==0,~np.isnan(H['f'])))
else:
p = np.logical_and.reduce((H['returned'],~np.isnan(H['f'])))
for new_ind in new_inds:
# Loop over new returned points and update their distances
if p[new_ind]:
H['known_to_aposmm'][new_ind] = True # These points are now known to APOSMM
# Compute distance to boundary
H['dist_to_unit_bounds'][new_ind] = min(min(np.ones(n) - H['x_on_cube'][new_ind]),min(H['x_on_cube'][new_ind] - np.zeros(n)))
dist_to_all = cdist(H['x_on_cube'][[new_ind]], H['x_on_cube'][p], 'euclidean').flatten()
new_better_than = H['f'][new_ind] < H['f'][p]
# Update any other points if new_ind is closer and better
if H['local_pt'][new_ind]:
inds_of_p = np.logical_and(dist_to_all < H['dist_to_better_l'][p], new_better_than)
updates = np.where(p)[0][inds_of_p]
H['dist_to_better_l'][updates] = dist_to_all[inds_of_p]
H['ind_of_better_l'][updates] = new_ind
else:
inds_of_p = np.logical_and(dist_to_all < H['dist_to_better_s'][p], new_better_than)
updates = np.where(p)[0][inds_of_p]
H['dist_to_better_s'][updates] = dist_to_all[inds_of_p]
H['ind_of_better_s'][updates] = new_ind
updated_inds.update(updates)
# Since we allow equality when deciding better_than_new_l and
# better_than_new_s, we have to prevent new_ind from being its own
# better point.
better_than_new_l = np.logical_and.reduce((~new_better_than, H['local_pt'][p], H['sim_id'][p] != new_ind))
better_than_new_s = np.logical_and.reduce((~new_better_than, ~H['local_pt'][p], H['sim_id'][p] != new_ind))
# Who is closest to ind and better
if np.any(better_than_new_l):
ind = dist_to_all[better_than_new_l].argmin()
H['ind_of_better_l'][new_ind] = H['sim_id'][p][np.nonzero(better_than_new_l)[0][ind]]
H['dist_to_better_l'][new_ind] = dist_to_all[better_than_new_l][ind]
if np.any(better_than_new_s):
ind = dist_to_all[better_than_new_s].argmin()
H['ind_of_better_s'][new_ind] = H['sim_id'][p][np.nonzero(better_than_new_s)[0][ind]]
H['dist_to_better_s'][new_ind] = dist_to_all[better_than_new_s][ind]
# if not ignore_L8:
# r_k = calc_rk(len(H['x_on_cube'][0]), n_s, rk_const, lhs_divisions)
# H['worse_within_rk'][new_ind][p] = np.logical_and.reduce((H['f'][new_ind] <= H['f'][p], dist_to_all <= r_k))
# # Add trues if new point is 'worse_within_rk'
# inds_to_change = np.logical_and.reduce((H['dist_to_all'][p,new_ind] <= r_k, H['f'][new_ind] >= H['f'][p], H['sim_id'][p] != new_ind))
# H['worse_within_rk'][inds_to_change,new_ind] = True
# if not H['local_pt'][new_ind]:
# H['worse_within_rk'][H['dist_to_all'] > r_k] = False
updated_inds.update(new_inds)
return updated_inds
def update_history_optimal(x_opt, H, run_inds):
"""
Updated the history after any point has been declared a local minimum
"""
opt_ind = np.where(np.logical_and(np.equal(x_opt,H['x_on_cube']).all(1),~np.isinf(H['f'])))[0]
assert len(opt_ind) == 1, "Why isn't there exactly one optimal point?"
assert opt_ind in run_inds, "Why isn't the run optimum a point in the run?"
H['local_min'][opt_ind] = 1
H['num_active_runs'][run_inds] -= 1
def advance_localopt_method(H, gen_specs, c_flag, run, persis_info):
"""
Moves a local optimization method one iteration forward. We currently do
this by feeding all past evaluations from a run to the method and then
storing the first new point generated
"""
global x_new, pt_in_run, total_pts_in_run # Used to generate a next local opt point
while 1:
sorted_run_inds = persis_info['run_order'][run]
assert all(H['returned'][sorted_run_inds])
x_new = np.ones((1,len(gen_specs['ub'])))*np.inf; pt_in_run = 0; total_pts_in_run = len(sorted_run_inds)
if gen_specs['localopt_method'] in ['LN_SBPLX', 'LN_BOBYQA', 'LN_NELDERMEAD', 'LD_MMA']:
if gen_specs['localopt_method'] in ['LD_MMA']:
fields_to_pass = ['x_on_cube','f','grad']
else:
fields_to_pass = ['x_on_cube','f']
try:
x_opt, exit_code = set_up_and_run_nlopt(H[fields_to_pass][sorted_run_inds], gen_specs)
except Exception as e:
exit_code = 0
print(e.__doc__)
print(e.args)
print("These are the points in the run that has failed:", H['x_on_cube'][sorted_run_inds])
_, _, tb = sys.exc_info()
traceback.print_tb(tb) # Fixed format
tb_info = traceback.extract_tb(tb)
filename, line, func, text = tb_info[-1]
print('An error occurred on line {} in statement {}'.format(line, text))
elif gen_specs['localopt_method'] in ['pounders']:
if c_flag:
Run_H_F = np.zeros(len(sorted_run_inds),dtype=[('fvec',float,gen_specs['components'])])
for i,ind in enumerate(sorted_run_inds):
for j in range(gen_specs['components']):
Run_H_F['fvec'][i][j] = H['f_i'][ | np.logical_and(H['pt_id']==H['pt_id'][ind], H['obj_component']==j) | numpy.logical_and |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import random
import numpy as np
from six.moves import xrange
import tensorflow as tf
from tensorflow.python.framework import random_seed
from tefla.core import rnn_cell
class RNN_CellTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
tf.set_random_seed(1)
def testAttentionCellFailures(self):
with self.assertRaisesRegexp(TypeError, "The parameter cell is not RNNCell."):
rnn_cell.AttentionCell(None, 0, None)
num_units = 8
with tf.Graph().as_default():
lstm_cell = rnn_cell.LSTMCell(num_units, None)
with self.assertRaisesRegexp(ValueError, "attn_length should be greater than zero, got 0"):
rnn_cell.AttentionCell(lstm_cell, 0, None)
with self.assertRaisesRegexp(ValueError, "attn_length should be greater than zero, got -1"):
rnn_cell.AttentionCell(lstm_cell, -1, True)
def testAttentionCellZeros(self):
num_units = 8
attn_length = 16
batch_size = 3
input_size = 4
with tf.Graph().as_default():
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple"):
lstm_cell = rnn_cell.LSTMCell(num_units, None)
cell = rnn_cell.AttentionCell(lstm_cell, attn_length, None)
zeros = tf.zeros([batch_size, num_units], dtype=np.float32)
attn_state_zeros = tf.zeros([batch_size, attn_length * num_units], dtype=np.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
inputs = tf.zeros([batch_size, input_size], dtype=tf.float32)
output, state = cell(inputs, zero_state)
self.assertEquals(output.get_shape(), [batch_size, num_units])
self.assertEquals(len(state), 3)
self.assertEquals(len(state[0]), 2)
self.assertEquals(state[0][0].get_shape(), [batch_size, num_units])
self.assertEquals(state[0][1].get_shape(), [batch_size, num_units])
self.assertEquals(state[1].get_shape(), [batch_size, num_units])
self.assertEquals(state[2].get_shape(), [batch_size, attn_length * num_units])
tensors = [output] + list(state)
zero_result = sum([tf.reduce_sum(tf.abs(x)) for x in tensors])
sess.run(tf.global_variables_initializer())
self.assertTrue(sess.run(zero_result) < 1e-6)
def testAttentionCellValues(self):
num_units = 8
attn_length = 16
batch_size = 3
with tf.Graph().as_default():
with self.test_session() as sess:
with tf.variable_scope("state_is_tuple"):
lstm_cell = rnn_cell.LSTMCell(num_units, None)
cell = rnn_cell.AttentionCell(lstm_cell, attn_length, None)
zeros = tf.constant(
0.1 * np.ones([batch_size, num_units], dtype=np.float32), dtype=tf.float32)
attn_state_zeros = tf.constant(
0.1 * np.ones([batch_size, attn_length * num_units], dtype=np.float32),
dtype=tf.float32)
zero_state = ((zeros, zeros), zeros, attn_state_zeros)
inputs = tf.constant(
np.array([[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32),
dtype=tf.float32)
output, state = cell(inputs, zero_state)
concat_state = tf.concat([state[0][0], state[0][1], state[1], state[2]], 1)
sess.run(tf.global_variables_initializer())
output, state = sess.run([output, concat_state])
for i in range(1, batch_size):
self.assertTrue(float(np.linalg.norm((output[0, :] - output[i, :]))) > 1e-6)
self.assertTrue(float(np.linalg.norm((state[0, :] - state[i, :]))) > 1e-6)
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m_bad = tf.zeros([1, 4])
m_good = (tf.zeros([1, 2]), tf.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2, None) for _ in range(2)], state_is_tuple=True)(x, m_bad)
_, ml = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2, None) for _ in range(2)], state_is_tuple=True)(x, m_good)
sess.run([tf.global_variables_initializer()])
res = sess.run(
ml, {
x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])
})
def testHighwayWrapper(self):
with self.test_session() as sess:
with tf.variable_scope("base_cell", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 3])
base_cell = rnn_cell.GRUCell(3, None, w_init=tf.constant_initializer(0.5))
g, m_new = base_cell(x, m)
with tf.variable_scope("hw_cell", initializer=tf.constant_initializer(0.5)):
hw_cell = rnn_cell.HighwayCell(
rnn_cell.GRUCell(3, None, w_init=tf.constant_initializer(0.5)),
None,
carry_bias_init=-100.0)
g_res, m_new_res = hw_cell(x, m)
sess.run([tf.global_variables_initializer()])
res = sess.run([g, g_res, m_new, m_new_res], {
x: np.array([[1., 1., 1.]]),
m: np.array([[0.1, 0.1, 0.1]])
})
# As carry_bias_init is very negative, the carry gate is 'open' and the
# transform gate is 'closed'. This means the output equals the input.
self.assertAllClose(res[1], res[0])
# States are left untouched
self.assertAllClose(res[2], res[3])
def testNASCell(self):
num_units = 6
batch_size = 3
expected_output = np.array([[0.576751, 0.576751, 0.576751, 0.576751, 0.576751, 0.576751],
[0.618936, 0.618936, 0.618936, 0.618936, 0.618936, 0.618936],
[0.627393, 0.627393, 0.627393, 0.627393, 0.627393, 0.627393]])
expected_state = np.array([[
0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.71579772, 0.57675087,
0.57675087, 0.57675087, 0.57675087, 0.57675087, 0.57675087
], [
0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.78041625, 0.6189357,
0.6189357, 0.61893570, 0.6189357, 0.6189357, 0.6189357
], [
0.79457647, 0.79457647, 0.79457647, 0.79457647, 0.79457653, 0.79457653, 0.62739348,
0.62739348, 0.62739348, 0.62739348, 0.62739348, 0.62739348
]])
with self.test_session() as sess:
with tf.variable_scope("nas_test", initializer=tf.constant_initializer(0.5)):
cell = rnn_cell.NASCell(num_units, None, w_init=tf.constant_initializer(0.5))
inputs = tf.constant(
np.array([[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32),
dtype=tf.float32)
state_value = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=tf.float32)
init_state = rnn_cell.core_rnn_cell.LSTMStateTuple(state_value, state_value)
output, state = cell(inputs, init_state)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_units)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testNASCellProj(self):
num_units = 6
batch_size = 3
num_proj = 5
expected_output = np.array([[1.697418, 1.697418, 1.697418, 1.697418,
1.697418], [1.840037, 1.840037, 1.840037, 1.840037, 1.840037],
[1.873985, 1.873985, 1.873985, 1.873985, 1.873985]])
expected_state = np.array([[
0.69855207, 0.69855207, 0.69855207, 0.69855207, 0.69855207, 0.69855207, 1.69741797,
1.69741797, 1.69741797, 1.69741797, 1.69741797
], [
0.77073824, 0.77073824, 0.77073824, 0.77073824, 0.77073824, 0.77073824, 1.84003687,
1.84003687, 1.84003687, 1.84003687, 1.84003687
], [
0.78973997, 0.78973997, 0.78973997, 0.78973997, 0.78973997, 0.78973997, 1.87398517,
1.87398517, 1.87398517, 1.87398517, 1.87398517
]])
with self.test_session() as sess:
with tf.variable_scope("nas_proj_test", initializer=tf.constant_initializer(0.5)):
cell = rnn_cell.NASCell(
num_units, None, w_init=tf.constant_initializer(0.5), num_proj=num_proj)
inputs = tf.constant(
np.array([[1., 1., 1., 1.], [2., 2., 2., 2.], [3., 3., 3., 3.]], dtype=np.float32),
dtype=tf.float32)
state_value_c = tf.constant(
0.1 * np.ones((batch_size, num_units), dtype=np.float32), dtype=tf.float32)
state_value_h = tf.constant(
0.1 * np.ones((batch_size, num_proj), dtype=np.float32), dtype=tf.float32)
init_state = rnn_cell.core_rnn_cell.LSTMStateTuple(state_value_c, state_value_h)
output, state = cell(inputs, init_state)
sess.run([tf.global_variables_initializer()])
res = sess.run([output, state])
# This is a smoke test: Only making sure expected values not change.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], expected_output)
# There should be 2 states in the tuple.
self.assertEqual(len(res[1]), 2)
# Checking the shape of each state to be batch_size * num_units
new_c, new_h = res[1]
self.assertEqual(new_c.shape[0], batch_size)
self.assertEqual(new_c.shape[1], num_units)
self.assertEqual(new_h.shape[0], batch_size)
self.assertEqual(new_h.shape[1], num_proj)
self.assertAllClose(np.concatenate(res[1], axis=1), expected_state)
def testConv1DLSTMCell(self):
with self.test_session() as sess:
shape = [2, 1]
filter_size = [3]
num_features = 1
batch_size = 2
expected_state_c = np.array(
[[[1.80168676], [1.80168676]], [[2.91189098], [2.91189098]]], dtype=np.float32)
expected_state_h = np.array(
[[[0.83409756], [0.83409756]], [[0.94695842], [0.94695842]]], dtype=np.float32)
with tf.variable_scope("root", initializer=tf.constant_initializer(1.0 / 2.0)):
x = tf.placeholder(tf.float32, [None, None, 1])
cell = rnn_cell.Conv1DLSTMCell(
input_shape=shape,
kernel_shape=filter_size,
output_channels=num_features,
reuse=None,
w_init=tf.constant_initializer(1.0 / 2.0))
hidden = cell.zero_state(tf.shape(x)[0], tf.float32)
output, state = cell(x, hidden)
sess.run([tf.global_variables_initializer()])
res = sess.run(
[output, state], {
hidden[0].name: np.array([[[1.], [1.]], [[2.], [2.]]]),
x.name: np.array([[[1.], [1.]], [[2.], [2.]]]),
})
# This is a smoke test, making sure expected values are unchanged.
self.assertEqual(len(res), 2)
self.assertAllClose(res[0], res[1].h)
self.assertAllClose(res[1].c, expected_state_c)
self.assertAllClose(res[1].h, expected_state_h)
def test_without_residuals(self):
inputs = tf.constant(np.random.randn(1, 2), dtype=tf.float32)
state = (tf.constant(np.random.randn(1, 2), dtype=tf.float32),
tf.constant(np.random.randn(1, 2), dtype=tf.float32))
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
standard_cell = rnn_cell.MultiRNNCell(
[rnn_cell.GRUCell(2, None, w_init=tf.constant_initializer(0.5)) for _ in range(2)],
state_is_tuple=True)
res_standard = standard_cell(inputs, state, scope="standard")
test_cell = rnn_cell.ExtendedMultiRNNCell(
[rnn_cell.GRUCell(2, None, w_init=tf.constant_initializer(0.5)) for _ in range(2)])
res_test = test_cell(inputs, state, scope="test")
with self.test_session() as sess:
sess.run([tf.global_variables_initializer()])
res_standard_, res_test_, = sess.run([res_standard, res_test])
# Make sure it produces the same results as the standard cell
self.assertAllClose(res_standard_[0], res_test_[0])
self.assertAllClose(res_standard_[1][0], res_test_[1][0])
self.assertAllClose(res_standard_[1][1], res_test_[1][1])
def _test_with_residuals(self, inputs, **kwargs):
"""Runs the cell in a session"""
inputs = tf.convert_to_tensor(inputs, dtype=tf.float32)
state = (tf.constant(np.random.randn(1, 2), dtype=tf.float32),
tf.constant(np.random.randn(1, 2), dtype=tf.float32))
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
test_cell = rnn_cell.ExtendedMultiRNNCell(
[rnn_cell.GRUCell(2, None, w_init=tf.constant_initializer(0.5)) for _ in range(2)],
residual_connections=True,
**kwargs)
res_test = test_cell(inputs, state, scope="test")
with self.test_session() as sess:
sess.run([tf.global_variables_initializer()])
return sess.run(res_test)
def _test_constant_shape(self, combiner):
"""Tests a residual combiner whose shape doesn't change
with depth"""
inputs = | np.random.randn(1, 2) | numpy.random.randn |
import os
import numpy as np
import cv2
from util import draw_flow
from flow_mode_detect import determine_flow_mode, Direction, Motion_pattern
import flow_vis
def downsample_flow(flow, n_grid_x, n_grid_y, smoothing=True):
h, w, _ = flow.shape
if h < n_grid_y or w < n_grid_x:
# TODO
pass
linspace_x = np.linspace(0, w, num=n_grid_x+1, endpoint=True)
linspace_y = np.linspace(0, h, num=n_grid_y+1, endpoint=True)
grids_x = np.round(linspace_x).astype(int)
grids_y = np.round(linspace_y).astype(int)
if smoothing:
flow = cv2.GaussianBlur(flow, ksize=(3,3), sigmaX=3, sigmaY=3, borderType=cv2.BORDER_REPLICATE)
flow_vector = np.zeros((n_grid_y, n_grid_x, 2))
for i in range(n_grid_x):
x1, x2 = grids_x[i:i+2]
for j in range(n_grid_y):
y1, y2 = grids_y[j:j+2]
flow_vector[i, j] = np.mean(flow[y1:y2, x1:x2, :], axis=(0,1))
return flow_vector
def forward_flow(h, w, cx, cy, random=False):
grid = np.mgrid[0:h, 0:w]
grid[0] -= cy
grid[1] -= cx
# grid = grid.transpose(1,2,0)
flow = np.zeros((h, w, 2))
flow[:, :, 0] = grid[1] / int(h/100)
flow[:, :, 1] = grid[0] / int(w/100)
if random:
flow += np.random.random(flow.shape) * 20 - 10
return flow / 2
def yaw_flow(h, w, cx, cy, random=False):
grid = np.mgrid[0:h, 0:w]
grid[0] -= cy
grid[1] -= cx
# grid = grid.transpose(1,2,0)
flow = np.zeros((h, w, 2))
flow[:, :, 0] = grid[0] / int(h/100)
flow[:, :, 1] = -grid[1] / int(w/100)
if random:
flow += np.random.random(flow.shape) * 20 - 10
return flow
def uniform_flow(h, w, mx, my, random=False):
flow = np.zeros((h, w, 2))
flow[:, :, 0] = mx
flow[:, :, 1] = my
if random:
# # TODO independent gaussian random is not a good idea for flow
# flow = np.random.normal(loc=[mx, my], scale=[0.05*mx, 0.05*my], size=(h, w, 2))
min_mag = min(mx, my)
min_mag = max(min_mag, 10)
flow += np.random.random(flow.shape) * (0.5*min_mag) - 0.25*min_mag
return flow
if __name__ == '__main__':
cv2.destroyAllWindows()
h, w = 601, 971
h0, w0 = 256, 832
# h0, w0 = 61, 200
cx, cy = int(w/2), int(h/2)
if False:
mode_name = 'Ascending'
# flow = forward_flow(h, w, cx, cy, False)
flow = uniform_flow(h, w, mx=50, my=10, random=True)
# flow = yaw_flow(h, w, cx, cy, False)
# flow = yaw_flow(h, w, cx-50, cy-50, False)
# n_grid = 16
# step = (int(flow.shape[0] / n_grid), int(flow.shape[1] / n_grid))
step = 32
flow = cv2.resize(flow, (w0, h0))
arrows_only = draw_flow(np.ones((h0, w0, 3)) * 255, flow, step=step, color=(0, 0, 0), random=True)
# arrows_only = cv2.resize(arrows_only, (832, 256))
cv2.imshow('flow arrows', arrows_only)
flow_color = flow_vis.flow_to_color(flow, convert_to_bgr=False)
# flow_color = cv2.resize(flow_color, (832, 256))
cv2.imshow('flow color', flow_color)
key = cv2.waitKey(0)
if key == 32:
# if False:
# save this frame
save_file = 'flow_pattern_vector_' + mode_name + '.png'
# cv2.imwrite(save_file, img_arrows)
cv2.imwrite(save_file, arrows_only)
print(save_file, 'saved')
save_file = 'flow_pattern_color_' + mode_name + '.png'
cv2.imwrite(save_file, flow_color)
print('\t', save_file, 'saved')
exit(0)
else:
# Ubuntu
flow_dir = '/home/yu/datasets/KITTI/dataset/sequences/00/flow/'
img_dir = '/home/yu/datasets/KITTI/dataset/sequences/00/image_2_reshape/'
# XPS
# flow_dir = r'C:\D_Drive\datasets\kitti\dataset\sequences\00\flow'
# img_dir = r'C:\D_Drive\datasets\kitti\dataset\sequences\00\image_2_reshape'
flow_files = sorted([f for f in os.listdir(flow_dir) if 'fwd' in f])
img_files = sorted(os.listdir(img_dir))
# pattern_sum = np.zeros((4,4))
# pattern_cnt = 0
last_motion_mode = None
# stds = []
start = 98
end = 99
n_grid = 8
# for flow_filename, img_filename in zip(flow_files[100:250], img_files[101:251]):
# for flow_filename, img_filename in zip(flow_files[0:85], img_files[1:86]):
for flow_filename, img_filename in zip(flow_files[start:end], img_files[start+1:end+1]):
# for flow_filename, img_filename in zip(flow_files, img_files[1:]):
flow = np.load(os.path.join(flow_dir, flow_filename))
flow = flow[0].transpose(1,2,0)
# resize flow
# # h_new, w_new = int(flow.shape[0]/n_grid), int(flow.shape[1]/n_grid)
# # flow = cv2.resize(flow, (w_new, h_new), interpolation=cv2.INTER_NEAREST)
# flow = cv2.resize(flow, (n_grid, n_grid), interpolation=cv2.INTER_NEAREST)
flow_vector = downsample_flow(flow, n_grid, n_grid, smoothing=True)
motion_mode, retval = determine_flow_mode(flow_vector)
print(flow_filename, motion_mode, retval)
# pattern, trend = determine_flow_mode(flow)
# pattern_sum = pattern_sum + pattern
# pattern_cnt += 1
# flow = cv2.resize(flow, (w, h), interpolation=cv2.INTER_NEAREST)
step = (int(flow.shape[0]/n_grid), int(flow.shape[1]/n_grid))
# step = 32
# # draw on white background
# img_arrows = draw_flow(np.ones((flow.shape[0], flow.shape[1], 3))*255, flow, step=step, random=True)
# # draw on color images
img = cv2.imread(os.path.join(img_dir, img_filename))
# img_arrows = draw_flow(img, flow, step=step, color=(0,255,0), random=True)
flow_color = flow_vis.flow_to_color(flow * 2, convert_to_bgr=False)
arrows_only = draw_flow( | np.ones_like(img) | numpy.ones_like |
from collections import defaultdict
from pathlib import Path
from textwrap import dedent
import networkx as nx
import numpy as np
from Qiber3D import Render, Figure, IO
from Qiber3D import config, helper
class Segment:
"""
Class representing the small element in a network
:param point: ordered list of points forming the Segment
:type point: ndarray
:param radius: radii (same order as `point`)
:type radius: ndarray
:param segment_index: unique identifier
:type segment_index: int
:ivar sid: unique segment identifier
:vartype aid: int
:ivar point: ordered points forming the Segment
:vartype point: ndarray
:ivar x: ordered list of x coordinates
:vartype x: ndarray
:ivar y: ordered list of y coordinates
:vartype y: ndarray
:ivar z: ordered list of z coordinates
:vartype z: ndarray
:ivar radius: radii in same order as `point` (also available as **r**)
:vartype radius: ndarray
:ivar average_radius: average radius
:vartype average_radius: float
:ivar cylinder_radius: radius if segment is interpreted as single cylinder
:vartype cylinder_radius: float
:ivar diameter: diameters in same order as `point` (also available as **d**)
:vartype diameter: ndarray
:ivar average_diameter: average diameters
:vartype average_diameter: float
:ivar start: start point coordinates
:vartype start: tuple
:ivar end: end point coordinates
:vartype end: tuple
:ivar vector: vectors between points
:vartype vector: ndarray
:ivar direction: vector pointing from `start` to `end`
:vartype direction: ndarray
:ivar length: length from `start` to `end`
:vartype length: float
:ivar volume: Segment volume modeled as truncated cones
:vartype volume: float
"""
def __init__(self, point, radius, segment_index):
self.sid = segment_index
if len(point) < 2:
raise ValueError
self.point = point
self.radius = radius
self.start = tuple(round(p, 4) for p in self.point[0])
self.end = tuple(round(p, 4) for p in self.point[-1])
self.vector = np.diff(self.point, axis=0)
self.direction = np.sum(self.vector, axis=0)
self.length = np.sum(np.linalg.norm(self.vector, axis=1))
self._color = config.render.color
self.volume = np.sum(np.linalg.norm(self.vector, axis=1) * np.pi / 3 * (
self.radius[1:] ** 2 + self.radius[:-1] ** 2 + (self.radius[1:]) * (self.radius[:-1])))
@property
def r(self):
return self.radius
@property
def diameter(self):
return self.radius * 2.0
@property
def d(self):
return self.diameter
@property
def average_diameter(self):
return np.average(self.diameter)
@property
def average_radius(self):
return np.average(self.radius)
@property
def cylinder_radius(self):
return np.sqrt((self.volume/self.length)/np.pi)
@property
def x(self):
return self.point[:, 0]
@property
def y(self):
return self.point[:, 1]
@property
def z(self):
return self.point[:, 2]
def __len__(self):
return len(self.point)
def __str__(self):
info = f"""\
Segment ID: {self.sid}
Number of parts: {len(self)}
Total length: {self.length:.2f}
Total volume: {self.volume:.2f}
Average radius: {self.average_radius:.2f}
Cylinder radius: {self.cylinder_radius:.2f}"""
return dedent(info)
def __repr__(self):
return f'Segment {self.sid} l={self.length:.2f}, V={self.volume:.2f}'
class Fiber:
"""
Class representing the large elements in a network
:param network: overarching network
:type network: :class:`Network`
:param fiber_id: unique fiber identifier
:type fiber_id: int
:param segment_ids: list of segment identifier forming the **Fiber**
:type segment_ids: list
:ivar fid: unique fiber identifier
:vartype aid: int
:ivar segment: directory of :class:`Segment` forming the :class:`Fiber`
:vartype aid: dict
:ivar average_radius: average radius
:vartype average_radius: float
:ivar cylinder_radius: radius if segment is interpreted as single cylinder
:vartype cylinder_radius: float
:ivar average_diameter: average diameters
:vartype average_diameter: float
:ivar length: overall length
:vartype length: float
:ivar volume: overall volume modeled as truncated cones
:vartype volume: float
:ivar graph: :class:`Fiber` represented as networkx graph
:vartype graph: nx.Graph
"""
def __init__(self, network, fiber_id, segment_ids):
self.fid = fiber_id
self.sid_list = list(segment_ids)
self.segment = {sid: network.segment[sid] for sid in segment_ids}
self.graph = nx.Graph()
for segment in self.segment.values():
self.graph.add_edge(network.node_lookup[segment.start], network.node_lookup[segment.end],
length=segment.length, radius=segment.cylinder_radius,
volume=segment.volume, tree_max_length=-segment.length,
tree_max_volume=-segment.volume, sid=segment.sid)
@property
def volume(self):
return sum([segment.volume for segment in self.segment.values()])
@property
def length(self):
return sum([segment.length for segment in self.segment.values()])
@property
def cylinder_radius(self):
return np.sqrt((self.volume / self.length) / np.pi)
@property
def average_radius(self):
return np.average([segment.average_radius for segment in self.segment.values()])
@property
def average_diameter(self):
return 2.0 * np.average([segment.average_radius for segment in self.segment.values()])
def __len__(self):
return len(self.sid_list)
def __str__(self):
info = f"""\
Fiber ID: {self.fid}
Number of segments: {len(self)}
Total length: {self.length:.2f}
Total volume: {self.volume:.2f}
Average radius: {self.average_radius:.2f}
Cylinder radius: {self.cylinder_radius:.2f}"""
return dedent(info)
def __repr__(self):
return f'Fiber {self.fid} l={self.length:.2f}, V={self.volume:.2f}'
class Network:
"""
Class representing the complete network
:param data: metadata and segment data collection
:type data: dict
:ivar segment: directory of :class:`Segment` forming the :class:`Network`
:vartype aid: dict
:ivar fiber: directory of :class:`Fiber` forming the :class:`Network`
:vartype aid: dict
:ivar average_radius: average radius
:vartype average_radius: float
:ivar cylinder_radius: radius if segment is interpreted as single cylinder
:vartype cylinder_radius: float
:ivar average_diameter: average diameters
:vartype average_diameter: float
:ivar length: overall length
:vartype length: float
:ivar volume: overall volume modeled as truncated cones
:vartype volume: float
:ivar number_of_fibers: fiber count
:vartype volume: int
:ivar vector: vectors between points
:vartype vector: ndarray
:ivar direction: vector pointing from `start` to `end`
:vartype direction: ndarray
:ivar bbox: bounding box corners
:vartype bbox: ndarray
:ivar bbox_volume: bounding box volume
:vartype bbox: float
:ivar center: bounding box center
:vartype center: ndarray
:ivar bbox_size: bounding box size
:vartype bbox_size: ndarray
"""
def __init__(self, data):
self.logger = helper.get_logger()
if isinstance(data['path'], Path):
self.input_file = data['path']
self.input_file_name = self.input_file.name
else:
if data['path'] is None:
self.input_file_name = 'memory'
self.input_file = None
else:
self.input_file_name = Path(data['path'])
self.input_file = self.input_file.name
self.name = data['name']
raw_segments = data['segments']
self.extractor_steps = None
self.extractor_data = None
self.segment = {}
points = set()
self.available_segments = list(raw_segments.keys())
self.available_segments.sort()
self.cross_point_dict = defaultdict(list)
for i in self.available_segments:
i = int(i)
try:
self.segment[i] = Segment(raw_segments[i]['points'], raw_segments[i]['radius'], i)
for point in self.segment[i].point:
self.cross_point_dict[(round(point[0], 4), round(point[1], 4), round(point[2], 4))].append(i)
points.add((point[0], point[1], point[2]))
except ValueError:
self.logger.warning('Missing Segment {i}')
pass
self.cross_point_dict = {point: tuple(sids) for point, sids in self.cross_point_dict.items() if len(sids) > 1}
self.node_lookup = helper.LookUp()
node_id = 0
for segment in self.segment.values():
for key in ('start', 'end'):
point = getattr(segment, key)
if point not in self.node_lookup:
self.node_lookup[point] = node_id
node_id += 1
self.fiber = self.__cluster_segments()
self.separate_segments = [fiber.sid_list[0] for fiber in self.fiber.values() if len(fiber) == 1]
self.clustered_segments = [fiber.sid_list for fiber in self.fiber.values() if len(fiber) > 1]
self.point = np.array(list(points))
self.bbox = np.zeros((2, 3), dtype='f8')
self.bbox[0] = np.min(self.point, axis=0)
self.bbox[1] = np.max(self.point, axis=0)
self.center = self.bbox[0] + (self.bbox[1] - self.bbox[0]) / 2.0
self.bbox_size = self.bbox[1] - self.bbox[0]
self.bbox_volume = np.product(self.bbox_size)
self.vector = np.vstack([seg.vector for seg in self.segment.values()])
self.direction = np.array([seg.direction for seg in self.segment.values()])
self.spherical_vector = helper.convert_to_spherical(helper.remove_direction(self.vector))
self.spherical_direction = helper.convert_to_spherical(helper.remove_direction(self.direction))
self.render = Render(self)
self.figure = Figure(self)
pass
def save(self, out_path='.', overwrite=False, save_steps=False):
"""
Save network to file.
:param out_path: file or folder path where to save the network
:type out_path: str, Path
:param overwrite: allow file overwrite
:type overwrite: bool
:param save_steps: add extraction steps to the saved file
:type save_steps: bool
:return: path to saved file
:rtype: Path
"""
out_path = IO.export.binary(self, out_path=out_path, overwrite=overwrite, save_steps=save_steps)
if out_path is not None:
self.logger.info(f"Network saved to {out_path.absolute()}")
return out_path
def export(self, out_path='.', overwrite=False, mode=None, **kwargs):
"""
Export the network data. Available file types: :file:`.json`, :file:`.qiber`, :file:`.swc`, :file:`.xlsx`,
:file:`.csv`, :file:`.static`, :file:`.tif` or :file:`.mv3d`. For more details see :class:`Qiber3D.io.IO`.
:param out_path: file or folder path where to save the network
:type out_path: str, Path
:param overwrite:
:param mode:
:param kwargs:
:return:
"""
out_path = IO.export(self, out_path, overwrite=overwrite, mode=mode, **kwargs)
if out_path is not None:
self.logger.info(f"Network exported to {out_path.absolute()}")
return out_path
@staticmethod
def load(path, **kwargs):
"""
Load a :class:`Network`. Available file types: :file:`.tif`, :file:`.nd2`, :file:`.json`, :file:`.qiber`,
:file:`.swc`, :file:`.ntr`, :file:`.csv`, or :file:`.mv3d`. For more details see :class:`Qiber3D.io.IO`.
:param path: file path to input file
:type path: str, Path
:return: :class:`Qiber3D.Network`
"""
return IO.load(path, **kwargs)
def __cluster_segments(self):
clusters = []
sid_connections = set(self.cross_point_dict.values())
work_list = set(self.available_segments)
while work_list:
i = work_list.pop()
new_cluster = {i}
connections_to_clean = True
while connections_to_clean:
connections_to_clean = []
for connection in sid_connections:
for part in connection:
if part in new_cluster:
new_cluster.update(connection)
connections_to_clean.append(connection)
break
for connection in connections_to_clean:
sid_connections.remove(connection)
clusters.append(new_cluster)
work_list = work_list.difference(new_cluster)
fibers = {}
for fid, cluster in enumerate(clusters):
fibers[fid] = Fiber(self, fid, cluster)
return fibers
@property
def volume(self):
return sum([segment.volume for segment in self.segment.values()])
@property
def raster_volume(self):
raw_volume = | np.sum(self.render.raster) | numpy.sum |
import os
from collections import defaultdict
import numpy as np
import xml.dom.minidom
import math
from datetime import datetime as dt
from datetime import timezone
from echopype.convert.utils.set_groups import SetGroups
from struct import unpack
from echopype._version import get_versions
ECHOPYPE_VERSION = get_versions()['version']
del get_versions
class ConvertAZFP:
"""Class for converting AZFP `.01A` files """
def __init__(self, _path='', _xml_path=''):
self.path = _path
self.xml_path = _xml_path
self.file_name = os.path.basename(self.path)
self.FILE_TYPE = 64770
self.HEADER_SIZE = 124
self.HEADER_FORMAT = ">HHHHIHHHHHHHHHHHHHHHHHHHHHHHHHHHHHBBBBHBBBBBBBBHHHHHHHHHHHHHHHHHHHH"
self.parameters = { # a dict container for various params
# FILE LOADING AND AVERAGING:
# WJ: choice of folder and file should be explicit in from Class Convert, so comment out the below
# 'proc_dir': 1, # 1 will prompt for an entire directory to process
# 0 will prompt to load individual files in a directory
# WJ: remove the hard-coded filenames and require user to specify as inputs
# 'data_file_name': "12022316.01A", # "" will prompt for hourly AZFP files to load
# "" will prompt for XML filename if no XML file exists in the directory
# 'xml_file_name': "12022310.XML",
'platform_name': "", # Name of the platform. Set with actual value
'platform_type': "", # Type of platform. Set with actual value
'platform_code_ICES': "", # Code for the platform. Set with actual value
# WJ: there's setter and getter for salinity and pressure, so comment out below for now
# 'salinity': 29.6, # Salinity in psu
# 'pressure': 60, # in dbars (~ depth of instrument in meters)
# can be approximate. Used in soundspeed and absorption calc
# 'hourly_avg_temp': 5, # Default value if no AZFP temperature is found.
# Used to calculate sound-speed and range
# we require users to explicitly set temp for calculating ss and range in mode class
# PLOTTING WJ: delete plot/channel/value_2_plot below since plotting is in viz module
# 'plot': 1, # Show an echogram plot for each channel
# 'channel': 1, # freq to plot #1-4, Default = 1
# 'value_2_plot': 2, # 1,2,3,4 = Counts, Sv, TS, Temperature/Tilts, default 2
# for Sv and Ts plotting only, values with counts < NoiseFloor will set to -150,
# can use individual values for each frequency, ex. "noise_floor: [10000,11000,11000,11500]"
# 'noise_floor': 10000, # Default = 10000 WJ: this should be in model module, have made a note there
# Instrument on the bottom looking up (range bins), 1 at surface looking down (depth bins).
# This changes the y dir on the echogram plots only.
# 'orientation': 1, # Default = 1 WJ: not used as echogram plotting is in viz module
# Use tilt corrected ranges for the echogram plots
# Will give a warning if the tilt magnitudes are unreasonable (>20 deg)
# 'use_tilt_corr': 0 # Default = 0 WJ: now an input flag for in AZFP model method calc_range
}
# Adds to self.parameters the contents of the xml file
self.loadAZFPxml()
# Initialize variables that'll be filled later
self.nc_path = None
self.unpacked_data = None
def loadAZFPxml(self):
"""Parses the AZFP XML file.
"""
def get_value_by_tag_name(tag_name, element=0):
"""Returns the value in an XML tag given the tag name and the number of occurrences."""
return px.getElementsByTagName(tag_name)[element].childNodes[0].data
# TODO: consider writing a ParamAZFPxml class for storing parameters
px = xml.dom.minidom.parse(self.xml_path)
self.parameters['num_freq'] = int(get_value_by_tag_name('NumFreq'))
self.parameters['serial_number'] = int(get_value_by_tag_name('SerialNumber'))
self.parameters['burst_interval'] = float(get_value_by_tag_name('BurstInterval'))
self.parameters['pings_per_burst'] = int(get_value_by_tag_name('PingsPerBurst'))
self.parameters['average_burst_pings'] = int(get_value_by_tag_name('AverageBurstPings'))
# Temperature coeff
self.parameters['ka'] = float(get_value_by_tag_name('ka'))
self.parameters['kb'] = float(get_value_by_tag_name('kb'))
self.parameters['kc'] = float(get_value_by_tag_name('kc'))
self.parameters['A'] = float(get_value_by_tag_name('A'))
self.parameters['B'] = float(get_value_by_tag_name('B'))
self.parameters['C'] = float(get_value_by_tag_name('C'))
# tilts
self.parameters['X_a'] = float(get_value_by_tag_name('X_a'))
self.parameters['X_b'] = float(get_value_by_tag_name('X_b'))
self.parameters['X_c'] = float(get_value_by_tag_name('X_c'))
self.parameters['X_d'] = float(get_value_by_tag_name('X_d'))
self.parameters['Y_a'] = float(get_value_by_tag_name('Y_a'))
self.parameters['Y_b'] = float(get_value_by_tag_name('Y_b'))
self.parameters['Y_c'] = float(get_value_by_tag_name('Y_c'))
self.parameters['Y_d'] = float(get_value_by_tag_name('Y_d'))
# Initializing fields for each transducer frequency
self.parameters['dig_rate'] = []
self.parameters['lock_out_index'] = []
self.parameters['gain'] = []
self.parameters['pulse_length'] = []
self.parameters['DS'] = []
self.parameters['EL'] = []
self.parameters['TVR'] = []
self.parameters['VTX'] = []
self.parameters['BP'] = []
self.parameters['range_samples'] = []
self.parameters['range_averaging_samples'] = []
# Get parameters for each transducer frequency
for freq_ch in range(self.parameters['num_freq']):
self.parameters['range_samples'].append(int(get_value_by_tag_name('RangeSamples', freq_ch)))
self.parameters['range_averaging_samples'].append(int(get_value_by_tag_name('RangeAveragingSamples', freq_ch)))
self.parameters['dig_rate'].append(float(get_value_by_tag_name('DigRate', freq_ch)))
self.parameters['lock_out_index'].append(float(get_value_by_tag_name('LockOutIndex', freq_ch)))
self.parameters['gain'].append(float(get_value_by_tag_name('Gain', freq_ch)))
self.parameters['pulse_length'].append(float(get_value_by_tag_name('PulseLen', freq_ch)))
self.parameters['DS'].append(float(get_value_by_tag_name('DS', freq_ch)))
self.parameters['EL'].append(float(get_value_by_tag_name('EL', freq_ch)))
self.parameters['TVR'].append(float(get_value_by_tag_name('TVR', freq_ch)))
self.parameters['VTX'].append(float(get_value_by_tag_name('VTX0', freq_ch)))
self.parameters['BP'].append(float(get_value_by_tag_name('BP', freq_ch)))
self.parameters['sensors_flag'] = float(get_value_by_tag_name('SensorsFlag'))
@staticmethod
def get_fields():
"""Returns the fields contained in each header of the raw file."""
_fields = (
('profile_flag', 'u2'),
('profile_number', 'u2'),
('serial_number', 'u2'),
('ping_status', 'u2'),
('burst_int', 'u4'),
('year', 'u2'), # Year
('month', 'u2'), # Month
('day', 'u2'), # Day
('hour', 'u2'), # Hour
('minute', 'u2'), # Minute
('second', 'u2'), # Second
('hundredths', 'u2'), # Hundredths of a second
('dig_rate', 'u2', 4), # Digitalization rate for each channel
('lockout_index', 'u2', 4), # Lockout index for each channel
('num_bins', 'u2', 4), # Number of bins for each channel
('range_samples_per_bin', 'u2', 4), # Range samples per bin for each channel
('ping_per_profile', 'u2'), # Number of pings per profile
('avg_pings', 'u2'), # Flag indicating whether the pings average in time
('num_acq_pings', 'u2'), # Pings acquired in the burst
('ping_period', 'u2'), # Ping period in seconds
('first_ping', 'u2'),
('last_ping', 'u2'),
('data_type', "u1", 4), # Datatype for each channel 1=Avg unpacked_data (5bytes), 0=raw (2bytes)
('data_error', 'u2'), # Error number is an error occurred
('phase', 'u1'), # Phase number used to acquire this profile
('overrun', 'u1'), # 1 if an overrun occurred
('num_chan', 'u1'), # 1, 2, 3, or 4
('gain', 'u1', 4), # gain channel 1-4
('spare_chan', 'u1'), # spare channel
('pulse_length', 'u2', 4), # Pulse length chan 1-4 uS
('board_num', 'u2', 4), # The board the data came from channel 1-4
('frequency', 'u2', 4), # frequency for channel 1-4 in kHz
('sensor_flag', 'u2'), # Flag indicating if pressure sensor or temperature sensor is available
('ancillary', 'u2', 5), # Tilt-X, Y, Battery, Pressure, Temperature
('ad', 'u2', 2) # AD channel 6 and 7
)
return _fields
# TODO: move these setter and getter to the Convert class
"""Setters and getters for platform information"""
@property
def platform_name(self):
return self.parameters['platform_name']
@platform_name.setter
def platform_name(self, platform_name):
self.parameters['platform_name'] = platform_name
@property
def platform_type(self):
return self.parameters['platform_type']
@platform_type.setter
def platform_type(self, platform_type):
self.parameters['platform_type'] = platform_type
@property
def platform_code_ICES(self):
return self.parameters['platform_code_ICES']
@platform_code_ICES.setter
def platform_code_ICES(self, platform_code_ICES):
self.parameters['platform_code_ICES'] = platform_code_ICES
def _split_header(self, raw, header_unpacked, ping_num, unpacked_data, fields):
"""Splits the header information into a dictionary.
Parameters
----------
raw
open binary file
header_unpacked
output of struct unpack of raw file
ping_num
ping number
unpacked_data
current unpacked data
fields
fields to be unpacked for each ping, defined in ``get_fields``
Returns
-------
True or False depending on whether the unpacking was successful
"""
if header_unpacked[0] != self.FILE_TYPE: # first field should match hard-coded FILE_TYPE from manufacturer
check_eof = raw.read(1)
if check_eof:
print("Error: Unknown file type")
return False
header_byte_cnt = 0
firmware_freq_len = 4 # fields with num_freq data still takes 4 bytes, the extra bytes contain random numbers
field_w_freq = ('dig_rate', 'lockout_index', 'num_bins', 'range_samples_per_bin', # fields with num_freq data
'data_type', 'gain', 'pulse_length', 'board_num', 'frequency')
for field in fields:
if field[0] in field_w_freq: # fields with num_freq data
unpacked_data[field[0]].append(
header_unpacked[header_byte_cnt:header_byte_cnt + self.parameters['num_freq']])
# unpacked_data[ping_num][field[0]] = \
# header_unpacked[header_byte_cnt:header_byte_cnt + self.parameters['num_freq']]
header_byte_cnt += firmware_freq_len
elif len(field) == 3: # other longer fields ('ancillary' and 'ad')
unpacked_data[field[0]].append(header_unpacked[header_byte_cnt:header_byte_cnt + field[2]])
# unpacked_data[ping_num][field[0]] = \
# header_unpacked[header_byte_cnt:header_byte_cnt + field[2]]
header_byte_cnt += field[2]
else:
unpacked_data[field[0]].append(header_unpacked[header_byte_cnt])
# unpacked_data[ping_num][field[0]] = header_unpacked[header_byte_cnt]
header_byte_cnt += 1
return True
def _add_counts(self, raw, ping_num, unpacked_data):
"""Unpacks the echosounder raw data. Modifies unpacked_data in place.
Parameters
----------
raw
open binary file
ping_num
ping number
unpacked_data
current unpacked data
"""
vv_tmp = [[]] * unpacked_data['num_chan'][ping_num]
for freq_ch in range(unpacked_data['num_chan'][ping_num]):
counts_byte_size = unpacked_data['num_bins'][ping_num][freq_ch]
if unpacked_data['data_type'][ping_num][freq_ch]:
if unpacked_data['avg_pings'][ping_num]: # if pings are averaged over time
divisor = unpacked_data['ping_per_profile'][ping_num] * \
unpacked_data['range_samples_per_bin'][ping_num][freq_ch]
else:
divisor = unpacked_data['range_samples_per_bin'][ping_num][freq_ch]
ls = unpack(">" + "I" * counts_byte_size, raw.read(counts_byte_size * 4)) # Linear sum
lso = unpack(">" + "B" * counts_byte_size, raw.read(counts_byte_size * 1)) # linear sum overflow
v = (np.array(ls) + np.array(lso) * 4294967295) / divisor
v = (np.log10(v) - 2.5) * (8 * 65535) * self.parameters['DS'][freq_ch]
v[np.isinf(v)] = 0
vv_tmp[freq_ch] = v
else:
counts_chunk = raw.read(counts_byte_size * 2)
counts_unpacked = unpack(">" + "H" * counts_byte_size, counts_chunk)
vv_tmp[freq_ch] = counts_unpacked
unpacked_data['counts'].append(vv_tmp)
def _print_status(self, path, unpacked_data):
"""Prints message to console giving information about the raw file being parsed
Parameters
----------
path
path to the 01A file
unpacked_data
current unpacked data
"""
filename = os.path.basename(path)
timestamp = dt(unpacked_data['year'][0], unpacked_data['month'][0], unpacked_data['day'][0],
unpacked_data['hour'][0], unpacked_data['minute'][0],
int(unpacked_data['second'][0] + unpacked_data['hundredths'][0] / 100))
timestr = timestamp.strftime("%Y-%b-%d %H:%M:%S")
(pathstr, xml_name) = os.path.split(self.xml_path)
print(f"{dt.now().strftime('%H:%M:%S')} converting file {filename} with {xml_name}, "
f"time of first ping {timestr}")
def check_uniqueness(self):
"""Check for ping-by-ping consistency of sampling parameters and reduce if identical.
Those included in this function should be identical throughout all pings.
Therefore raise error if not identical.
"""
if not self.unpacked_data:
self.parse_raw()
field_w_freq = ('dig_rate', 'lockout_index', 'num_bins', 'range_samples_per_bin', # fields with num_freq data
'data_type', 'gain', 'pulse_length', 'board_num', 'frequency')
field_include = ('profile_flag', 'serial_number', # fields to reduce size if the same for all pings
'burst_int', 'ping_per_profile', 'avg_pings', 'ping_period',
'phase', 'num_chan', 'spare_chan')
for field in field_w_freq:
uniq = np.unique(self.unpacked_data[field], axis=0)
if uniq.shape[0] == 1:
self.unpacked_data[field] = uniq.squeeze()
else:
raise ValueError(f"Header value {field} is not constant for each ping")
for field in field_include:
uniq = np.unique(self.unpacked_data[field])
if uniq.shape[0] == 1:
self.unpacked_data[field] = uniq.squeeze()
else:
raise ValueError(f"Header value {field} is not constant for each ping")
def parse_raw(self):
"""Parses a raw AZFP file of the 01A file format"""
# Start of computation subfunctions
def compute_temp(counts):
"""Returns the temperature in celsius given from xml data and the counts from ancillary"""
v_in = 2.5 * (counts / 65535)
R = (self.parameters['ka'] + self.parameters['kb'] * v_in) / (self.parameters['kc'] - v_in)
T = 1 / (self.parameters['A'] + self.parameters['B'] * (math.log(R)) +
self.parameters['C'] * (math.log(R) ** 3)) - 273
return T
def compute_tilt(N, a, b, c, d):
return a + b * N + c * N**2 + d * N**3
with open(self.path, 'rb') as raw:
ping_num = 0
fields = self.get_fields()
unpacked_data = defaultdict(list)
eof = False
while not eof:
header_chunk = raw.read(self.HEADER_SIZE)
if header_chunk:
header_unpacked = unpack(self.HEADER_FORMAT, header_chunk)
# Reading will stop if the file contains an unexpected flag
if self._split_header(raw, header_unpacked, ping_num, unpacked_data, fields):
# Appends the actual 'data values' to unpacked_data
self._add_counts(raw, ping_num, unpacked_data)
if ping_num == 0:
# Display information about the file that was loaded in
self._print_status(self.file_name, unpacked_data)
# Compute temperature from unpacked_data[ii]['ancillary][4]
unpacked_data['temperature'].append(compute_temp(unpacked_data['ancillary'][ping_num][4]))
# compute x tilt from unpacked_data[ii]['ancillary][0]
unpacked_data['tilt_x'].append(
compute_tilt(unpacked_data['ancillary'][ping_num][0],
self.parameters['X_a'], self.parameters['X_b'],
self.parameters['X_c'], self.parameters['X_d']))
# Compute y tilt from unpacked_data[ii]['ancillary][1]
unpacked_data['tilt_y'].append(
compute_tilt(unpacked_data['ancillary'][ping_num][1],
self.parameters['Y_a'], self.parameters['Y_b'],
self.parameters['Y_c'], self.parameters['Y_d']))
# Compute cos tilt magnitude from tilt x and y values
unpacked_data['cos_tilt_mag'].append(
math.cos((math.sqrt(unpacked_data['tilt_x'][ping_num] ** 2 +
unpacked_data['tilt_y'][ping_num] ** 2)) * math.pi / 180))
else:
break
else:
# End of file
eof = True
ping_num += 1
self.unpacked_data = unpacked_data
def get_ping_time(self):
"""Returns the ping times"""
if not self.unpacked_data:
self.parse_raw()
ping_time = []
for ping_num, year in enumerate(self.unpacked_data['year']):
ping_time.append(dt(year,
self.unpacked_data['month'][ping_num],
self.unpacked_data['day'][ping_num],
self.unpacked_data['hour'][ping_num],
self.unpacked_data['minute'][ping_num],
int(self.unpacked_data['second'][ping_num] +
self.unpacked_data['hundredths'][ping_num] / 100)
).replace(tzinfo=timezone.utc).timestamp())
return ping_time
def raw2nc(self):
"""Save data from raw 01A format to netCDF4 .nc format
"""
# Subfunctions to set various dictionaries
def calc_Sv_offset(f, pulse_length):
"""Calculate a compensation for the effects of finite response
times of both the receiving and transmitting parts of the transducer.
The correction magnitude depends on the length of the transmitted pulse
and the response time (transmission and reception) of the transducer.
Called by ``_set_beam_dict()``
Parameters
----------
f
frequency in Hz
pulse_length
pulse length in ms
"""
if f > 38000:
if pulse_length == 300:
return 1.1
elif pulse_length == 500:
return 0.8
elif pulse_length == 700:
return 0.5
elif pulse_length == 900:
return 0.3
elif pulse_length == 1000:
return 0.3
else:
if pulse_length == 500:
return 1.1
elif pulse_length == 1000:
return 0.7
def _set_toplevel_dict():
out_dict = dict(conventions='CF-1.7, SONAR-netCDF4-1.0, ACDD-1.3',
keywords='AZFP',
sonar_convention_authority='ICES',
sonar_convention_name='SONAR-netCDF4',
sonar_convention_version='1.0',
summary='',
title='')
return out_dict
def _set_env_dict():
out_dict = dict(temperature=self.unpacked_data['temperature'], # temperature measured at instrument
ping_time=ping_time)
return out_dict
def _set_platform_dict():
out_dict = dict(platform_name=self.parameters['platform_name'],
platform_type=self.parameters['platform_type'],
platform_code_ICES=self.parameters['platform_code_ICES'])
return out_dict
def _set_prov_dict():
attrs = ('conversion_software_name', 'conversion_software_version', 'conversion_time')
vals = ('echopype', ECHOPYPE_VERSION, dt.utcnow().isoformat(timespec='seconds') + 'Z') # use UTC time
return dict(zip(attrs, vals))
def _set_sonar_dict():
attrs = ('sonar_manufacturer', 'sonar_model', 'sonar_serial_number',
'sonar_software_name', 'sonar_software_version', 'sonar_type')
vals = ('ASL Environmental Sciences', 'Acoustic Zooplankton Fish Profiler',
self.unpacked_data['serial_number'], # should have only 1 value (identical for all pings)
'Based on AZFP Matlab Toolbox', '1.4', 'echosounder')
return dict(zip(attrs, vals))
def _set_beam_dict():
anc = np.array(self.unpacked_data['ancillary']) # convert to np array for easy slicing
dig_rate = self.unpacked_data['dig_rate'] # dim: freq
# Build variables in the output xarray Dataset
N = [] # for storing backscatter_r values for each frequency
Sv_offset = | np.zeros(freq.shape) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import torch
def plot_weight_graph(epochs, loss_lists, labels, name=''):
epochs_array = np.arange(epochs)
ax = plt.axes(xlabel='epoch', ylabel='weight', xticks= | np.arange(0, epochs, 10) | numpy.arange |
import pickle
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
from scipy.optimize import curve_fit
from scipy.interpolate import InterpolatedUnivariateSpline
from astropy.convolution import Box1DKernel, Gaussian1DKernel, convolve, convolve_fft
from pysyd import functions
from pysyd import models
from pysyd import utils
from pysyd import plots
class Target:
def __init__(self, star, args):
"""
A pySYD pipeline target. Initialization stores all the relevant information and
checks/loads in data for the given target. pySYD no longer requires BOTH the time
series data and the power spectrum, but requires additional information via CLI if
the former is not provided i.e. cadence or nyquist frequency, the oversampling
factor (if relevant), etc.
Attributes
----------
star : int
the star ID
params : Dict[str,object]
the pipeline parameters
findex : Dict[str,object]
the parameters of the find excess routine
fitbg : Dict[str,object]
the parameters relevant for the background-fitting procedure
globe : Dict[str,object]
parameters relevant for estimating global asteroseismic parameters numax and dnu
verbose : bool
if true, turns on the verbose output
Parameters
----------
args : argparse.Namespace
the parsed and updated command line arguments
Methods
-------
TODO: Add methods
"""
self.name = star
self.params = args.params
self.findex = args.findex
self.fitbg = args.fitbg
self.globe = args.globe
self.verbose = args.verbose
self = utils.load_data(self, args)
def run_syd(self):
"""
Run the pySYD pipeline routines sequentially:
1) the find excess module to identify the any solar-like oscillations
2) estimates the stellar background contributions before estimating the
global asteroseismic parameters
Returns
-------
None
"""
# Run the find excess routine
if self.params[self.name]['excess']:
self = utils.get_findex(self)
self.find_excess()
# Run the global fitting routine
if utils.check_fitbg(self):
self = utils.get_fitbg(self)
self.fit_global()
if self.params['show']:
note=''
if self.verbose:
note+=' - displaying figures'
print(note)
plt.show(block=False)
input(' - press RETURN to exit')
if not self.verbose:
print('')
def find_excess(self):
"""
Automatically finds power excess due to solar-like oscillations using a
frequency-resolved, collapsed autocorrelation function (ACF).
Returns
-------
None
"""
# Make sure the binning is specified, otherwise it cannot run
if self.findex['binning'] is not None:
# Smooth the power in log-space
self.bin_freq, self.bin_pow, self.bin_pow_err = functions.bin_data(self.freq, self.pow, width=self.findex['binning'], log=True, mode=self.findex['mode'])
# Smooth the power in linear-space
self.smooth_freq, self.smooth_pow, self.smooth_pow_err = functions.bin_data(self.bin_freq, self.bin_pow, width=self.findex['smooth_width'])
if self.verbose:
print('------------------------------------------------------')
print('Running find_excess module:')
print('PS binned to %d datapoints' % len(self.smooth_freq))
# Interpolate and divide to get a crude background-corrected power spectrum
s = InterpolatedUnivariateSpline(self.smooth_freq, self.smooth_pow, k=1)
self.interp_pow = s(self.freq)
self.bgcorr_pow = self.pow/self.interp_pow
# Calculate collapsed ACF using different box (or bin) sizes
self.findex['results'][self.name] = {}
self.compare = []
for b in range(self.findex['n_trials']):
self.collapsed_acf(b)
# Select trial that resulted with the highest SNR detection
self.findex['results'][self.name]['best'] = self.compare.index(max(self.compare))+1
if self.verbose:
print('selecting model %d'%self.findex['results'][self.name]['best'])
utils.save_findex(self)
plots.plot_excess(self)
self.pickles.append('excess.pickle')
def collapsed_acf(self, b, start=0, max_iterations=5000, max_snr=100.):
"""
Computes a collapsed autocorrelation function (ACF).
Parameters
----------
b : int
the trial number
start : int
what index of the frequency array to start with, which is `0` by default.
max_iterations : int
maximum number of times to run the scipy.optimization before calling it quits
j : int
index at which to start storing the cumulative sum and mean of ACF. Default value is `0`.
start : int
index at which to start masking the frequency and power spectrum. Default value is `0`.
max_iterations : int
maximum number of interations to try in curve fitting routine. Default value is `5000`.
max_snr : float
maximum SNR corresponding to power excess. Default value is `100.0`.
Returns
-------
None
"""
constants = utils.Constants()
# Computes a collapsed ACF using different "box" (or bin) sizes
self.findex['results'][self.name][b+1] = {}
subset = np.ceil(self.boxes[b]/self.resolution)
steps = np.ceil((self.boxes[b]*self.findex['step'])/self.resolution)
cumsum = []
md = []
# Iterates through entire power spectrum using box width
while True:
if (start+subset) > len(self.freq):
break
p = self.bgcorr_pow[int(start):int(start+subset)]
auto = np.real(np.fft.fft(np.fft.ifft(p)*np.conj(np.fft.ifft(p))))
cumsum.append(np.sum(np.absolute(auto-np.mean(auto))))
md.append(np.mean(self.freq[int(start):int(start+subset)]))
start += steps
# subtract/normalize the summed ACF and take the max
md = np.array(md)
cumsum = np.array(cumsum)-min(cumsum)
csum = list(cumsum/max(cumsum))
# Pick the maximum value as an initial guess for numax
idx = csum.index(max(csum))
csum = np.array(csum)
self.findex['results'][self.name][b+1].update({'x':md,'y':csum,'maxx':md[idx],'maxy':csum[idx]})
# Fit Gaussian to get estimate value for numax
try:
best_vars, _ = curve_fit(models.gaussian, md, csum, p0=[np.median(csum), 1.0- | np.median(csum) | numpy.median |
#!/usr/bin/env python
# Built-in imports
import math
import cmath
# General module imports
import numpy as np
# Own imports
import baxter_essentials.denavit_hartenberg as dh
class BaxterIPK:
"""
Calculate Baxter's Inverse Pose Kinematics for each limb and with the
desired degrees of freedom for the total joints.
:param TM_w0_tool: Transformation Matrix from W0
(origin of the workspace), to Baxter's Tool (end of the arm).
:param baxter_distances: list of baxter_distances from BaxterClass.
:param baxter_transformation_matrices: list of
baxter_transformation_matrices from BaxterClass.
:param limb: arm to calculate fpk.
example: "left" or "right".
:param elbow_disposition: Elbow disposition for the mathematical
ipk solution.
example: "up", "down".
"""
def __init__(self, baxter_distances, baxter_transformation_matrices,
TM_w0_tool, limb, elbow_disposition):
self.TM_w0_tool = TM_w0_tool
self.baxter_distances = baxter_distances
self.baxter_transformation_matrices = baxter_transformation_matrices
self.calibrate_baxter_transformation_matrices()
self.limb = limb
self.elbow_disposition = elbow_disposition
def calibrate_baxter_transformation_matrices(self):
X_OFFSET = 0
Y_OFFSET = 0
Z_OFFSET = - 0.06012
calibration_matrix = np.array(
[[1, 0, 0, X_OFFSET],
[0, 1, 0, Y_OFFSET],
[0, 0, 1, Z_OFFSET],
[0, 0, 0, 1]]
)
# Add extra tool distance for Baxter's right limb (spoon)
self.baxter_transformation_matrices[1] = \
np.dot(self.baxter_transformation_matrices[1], calibration_matrix)
def ipk(self):
"""
Main method to calculate the inverse pose kinematics.
:returns: list of joint-values to calculate fpk.
example: [value_limb_s0, value_limb_s1, value_limb_left_e0,
value_limb_left_e1, value_limb_left_w0, value_limb_left_w1,
value_limb_left_w2]
"""
# Simplified name for clarity in process (to avoid long initial name)
TM_list = self.baxter_transformation_matrices
# Transformation matrix from 0 to 6 (main Baxter-joints)
if self.limb == 'left':
TM_0_6 = np.dot(
np.dot(
np.linalg.inv(
| np.dot(TM_list[0], TM_list[2]) | numpy.dot |
import lib
import datetime
import torch
import argparse
import sys
import utils
import logging
from tensorboardX import SummaryWriter
from lib.model.mpnn import mp_sequential, mp_conv_residual, mp_conv_type, mp_conv_v2, global_pooling
import numpy as np
import time
import os
from utils.types import str2bool
from tqdm import tqdm
import statistics as st
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--chain_length',
type=int,
default=30,
help="The length of generated chain structured MRF")
parser.add_argument(
'--hop_cap',
type=int,
default=5,
help="The seed to generate parameter of budget factors")
parser.add_argument('--nfactors',
type=int,
default=8,
help="Number of higher order factors")
parser.add_argument('--hop_order',
type=int,
default=9,
help="Order of higher order factors")
parser.add_argument('--train_epoches',
type=int,
default=10,
help="training epoches")
parser.add_argument('--model_path',
type=str,
default='gnn',
help="Saved model path")
parser.add_argument('--model_name',
type=str,
default='mp_nn',
help="model name (PointNet, GNN)")
parser.add_argument('--neighbour',
type=int,
default=8,
help="number of neighbour in the graph")
parser.add_argument('--log_level',
type=str,
default='info',
help="log level")
parser.add_argument('--use_cuda',
type=str2bool,
default=True,
help="Use cuda or not")
parser.add_argument('--train_path',
type=str,
default="synthetic_data/raw_train.dat",
help="path of the training dataset")
parser.add_argument('--test_path',
type=str,
default="synthetic_data/raw_test.dat",
help="path of the testing dataset")
parser.add_argument('--train_size', type=int, default=90000,
help="size of training dataset")
parser.add_argument('--test_size', type=int, default=10000,
help="size of testing dataset")
parser.add_argument('--batch_size', type=int, default=32)
return parser.parse_args()
def generate_knn_table(n, k):
nn_idx = np.zeros([n, k]).astype(np.int64)
efeature = np.zeros([1, n, k]).astype(np.float32)
hk = k // 2
for i in range(n):
arr = list(range(i-hk, i)) + list(range(i+1, i+hk))
for idx, j in enumerate(arr):
if j < 0:
j = 0
if j >= n:
j = n - 1
nn_idx[i, idx] = j
efeature[0, i, idx] = i - j
nn_idx = torch.from_numpy(np.expand_dims(nn_idx, 0))
efeature = torch.from_numpy(np.expand_dims(efeature, 0))
return nn_idx, efeature
def worker_init_fn(idx):
t = int(time.time() * 1000.0) + idx
np.random.seed(((t & 0xff000000) >> 24) + ((t & 0x00ff0000) >> 8) +
((t & 0x0000ff00) << 8) + ((t & 0x000000ff) << 24))
def main():
args = parse_args()
subdir = f'raw_nn_{args.neighbour}_at_{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}'
utils.init_logger('./logs/', subdir, print_log=False)
logging.info(str(args))
writer = SummaryWriter(log_dir=f'./tf_logs/{subdir}')
nfeature_dim = 2
print(nfeature_dim)
if args.model_name == 'mp_nn':
model = mp_sequential(
mp_conv_v2(nfeature_dim,
64,
16,
extension=mp_conv_type.ORIG_WITH_NEIGHBOR),
mp_conv_residual(64, 64, 16), torch.nn.Conv2d(64, 128, 1),
torch.nn.BatchNorm2d(128), torch.nn.ReLU(inplace=True),
mp_conv_residual(128, 64, 16), torch.nn.Conv2d(128, 256, 1),
torch.nn.BatchNorm2d(256), torch.nn.ReLU(inplace=True),
mp_conv_residual(256, 64, 16), torch.nn.Conv2d(256, 128, 1),
torch.nn.BatchNorm2d(128), torch.nn.ReLU(inplace=True),
mp_conv_residual(128, 64, 16), torch.nn.Conv2d(128, 64, 1),
torch.nn.BatchNorm2d(64), torch.nn.ReLU(inplace=True),
mp_conv_residual(64, 64, 16), torch.nn.Conv2d(64, 2, 1))
emodel = torch.nn.Sequential(torch.nn.Conv2d(1, 64, 1),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(64, 16, 1))
elif args.model_name == 'mp_nn_comp':
model = mp_sequential(
mp_conv_v2(nfeature_dim,
64,
16,
extension=mp_conv_type.ORIG_WITH_NEIGHBOR),
mp_conv_residual(64, 64, 16), torch.nn.Conv2d(64, 128, 1),
torch.nn.BatchNorm2d(128), torch.nn.ReLU(inplace=True),
mp_conv_residual(128, 64, 16), torch.nn.Conv2d(128, 256, 1),
torch.nn.BatchNorm2d(256), torch.nn.ReLU(inplace=True),
mp_conv_residual(256, 64, 16), mp_conv_residual(256, 64, 16),
mp_conv_residual(256, 64, 16), mp_conv_residual(256, 64, 16),
mp_conv_residual(256, 64, 16), torch.nn.Conv2d(256, 128, 1),
torch.nn.BatchNorm2d(128), torch.nn.ReLU(inplace=True),
mp_conv_residual(128, 64, 16), torch.nn.Conv2d(128, 64, 1),
torch.nn.BatchNorm2d(64), torch.nn.ReLU(inplace=True),
mp_conv_residual(64, 64, 16), torch.nn.Conv2d(64, 2, 1))
emodel = torch.nn.Sequential(torch.nn.Conv2d(1, 64, 1),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(64, 16, 1))
elif args.model_name == 'simple_gnn':
model = mp_sequential(
mp_conv_v2(nfeature_dim,
64,
16,
extension=mp_conv_type.ORIG_WITH_NEIGHBOR),
mp_conv_residual(64, 64, 16), torch.nn.Conv2d(64, 2, 1))
emodel = torch.nn.Sequential(torch.nn.Conv2d(1, 64, 1),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(64, 16, 1))
elif args.model_name == 'iid':
model = mp_sequential(torch.nn.Conv2d(nfeature_dim, 64, 1),
torch.nn.ReLU(True), torch.nn.Conv2d(64, 2, 1))
emodel = torch.nn.Sequential(torch.nn.Conv2d(1, 64, 1),
torch.nn.ReLU(inplace=True),
torch.nn.Conv2d(64, 16, 1))
logging.info('model {} created'.format(str(model)))
| np.random.seed(23456) | numpy.random.seed |
#!/usr/bin/env python
"""
dynspec.py
----------------------------------
Dynamic spectrum class
"""
from __future__ import (absolute_import, division,
print_function, unicode_literals)
import time
import os
from os.path import split
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as sc
from copy import deepcopy as cp
from scint_models import scint_acf_model, scint_sspec_model, tau_acf_model,\
dnu_acf_model, tau_sspec_model, dnu_sspec_model,\
fit_parabola, fit_log_parabola
from scint_utils import is_valid
from scipy.ndimage import map_coordinates
from scipy.interpolate import griddata, interp1d
from scipy.signal import convolve2d, medfilt, savgol_filter
from scipy.io import loadmat
class Dynspec:
def __init__(self, filename=None, dyn=None, verbose=True, process=True,
lamsteps=False):
""""
Initialise a dynamic spectrum object by either reading from file
or from existing object
"""
if filename:
self.load_file(filename, verbose=verbose, process=process,
lamsteps=lamsteps)
elif dyn:
self.load_dyn_obj(dyn, verbose=verbose, process=process,
lamsteps=lamsteps)
else:
print("Error: No dynamic spectrum file or object")
def __add__(self, other):
"""
Defines dynamic spectra addition, which is concatination in time,
with the gaps filled
"""
print("Now adding {} ...".format(other.name))
if self.freq != other.freq \
or self.bw != other.bw or self.df != other.df:
print("WARNING: Code does not yet account for different \
frequency properties")
# Set constant properties
bw = self.bw
df = self.df
freqs = self.freqs
freq = self.freq
nchan = self.nchan
dt = self.dt
# Calculate properties for the gap
timegap = round((other.mjd - self.mjd)*86400
- self.tobs, 1) # time between two dynspecs
extratimes = np.arange(self.dt/2, timegap, dt)
if timegap < dt:
extratimes = [0]
nextra = 0
else:
nextra = len(extratimes)
dyngap = np.zeros([np.shape(self.dyn)[0], nextra])
# Set changed properties
name = self.name.split('.')[0] + "+" + other.name.split('.')[0] \
+ ".dynspec"
header = self.header + other.header
# times = np.concatenate((self.times, self.times[-1] + extratimes,
# self.times[-1] + extratimes[-1] + other.times))
nsub = self.nsub + nextra + other.nsub
tobs = self.tobs + timegap + other.tobs
# Note: need to check "times" attribute for added dynspec
times = np.linspace(0, tobs, nsub)
mjd = np.min([self.mjd, other.mjd]) # mjd for earliest dynspec
newdyn = np.concatenate((self.dyn, dyngap, other.dyn), axis=1)
# Get new dynspec object with these properties
newdyn = BasicDyn(newdyn, name=name, header=header, times=times,
freqs=freqs, nchan=nchan, nsub=nsub, bw=bw,
df=df, freq=freq, tobs=tobs, dt=dt, mjd=mjd)
return Dynspec(dyn=newdyn, verbose=False, process=False)
def load_file(self, filename, verbose=True, process=True, lamsteps=False):
"""
Load a dynamic spectrum from psrflux-format file
"""
start = time.time()
# Import all data from filename
if verbose:
print("LOADING {0}...".format(filename))
head = []
with open(filename, "r") as file:
for line in file:
if line.startswith("#"):
headline = str.strip(line[1:])
head.append(headline)
if str.split(headline)[0] == 'MJD0:':
# MJD of start of obs
self.mjd = float(str.split(headline)[1])
self.name = os.path.basename(filename)
self.header = head
rawdata = np.loadtxt(filename).transpose() # read file
self.times = np.unique(rawdata[2]*60) # time since obs start (secs)
self.freqs = rawdata[3] # Observing frequency in MHz.
fluxes = rawdata[4] # fluxes
fluxerrs = rawdata[5] # flux errors
self.nchan = int(np.unique(rawdata[1])[-1]) # number of channels
self.bw = self.freqs[-1] - self.freqs[0] # obs bw
self.df = round(self.bw/self.nchan, 5) # channel bw
self.bw = round(self.bw + self.df, 2) # correct bw
self.nchan += 1 # correct nchan
self.nsub = int(np.unique(rawdata[0])[-1]) + 1
self.tobs = self.times[-1]+self.times[0] # initial estimate of tobs
self.dt = self.tobs/self.nsub
if self.dt > 1:
self.dt = round(self.dt)
else:
self.times = np.linspace(self.times[0], self.times[-1], self.nsub)
self.tobs = self.dt * self.nsub # recalculated tobs
# Now reshape flux arrays into a 2D matrix
self.freqs = np.unique(self.freqs)
self.freq = round(np.mean(self.freqs), 2)
fluxes = fluxes.reshape([self.nsub, self.nchan]).transpose()
fluxerrs = fluxerrs.reshape([self.nsub, self.nchan]).transpose()
if self.df < 0: # flip things
self.df = -self.df
self.bw = -self.bw
# Flip flux matricies since self.freqs is now in ascending order
fluxes = np.flip(fluxes, 0)
fluxerrs = np.flip(fluxerrs, 0)
# Finished reading, now setup dynamic spectrum
self.dyn = fluxes # initialise dynamic spectrum
self.lamsteps = lamsteps
if process:
self.default_processing(lamsteps=lamsteps) # do default processing
end = time.time()
if verbose:
print("...LOADED in {0} seconds\n".format(round(end-start, 2)))
self.info()
def load_dyn_obj(self, dyn, verbose=True, process=True, lamsteps=False):
"""
Load in a dynamic spectrum object of different type.
"""
start = time.time()
# Import all data from filename
if verbose:
print("LOADING DYNSPEC OBJECT {0}...".format(dyn.name))
self.name = dyn.name
self.header = dyn.header
self.times = dyn.times # time since obs start (secs)
self.freqs = dyn.freqs # Observing frequency in MHz.
self.nchan = dyn.nchan # number of channels
self.nsub = dyn.nsub
self.bw = dyn.bw # obs bw
self.df = dyn.df # channel bw
self.freq = dyn.freq
self.tobs = dyn.tobs # initial estimate of tobs
self.dt = dyn.dt
self.mjd = dyn.mjd
self.dyn = dyn.dyn
self.lamsteps = lamsteps
if process:
self.default_processing(lamsteps=lamsteps) # do default processing
end = time.time()
if verbose:
print("...LOADED in {0} seconds\n".format(round(end-start, 2)))
self.info()
def default_processing(self, lamsteps=False):
"""
Default processing of a Dynspec object
"""
self.trim_edges() # remove zeros on band edges
self.refill() # refill with linear interpolation
self.calc_acf() # calculate the ACF
if lamsteps:
self.scale_dyn()
self.calc_sspec(lamsteps=lamsteps) # Calculate secondary spectrum
def plot_dyn(self, lamsteps=False, input_dyn=None, filename=None,
input_x=None, input_y=None, trap=False, display=True):
"""
Plot the dynamic spectrum
"""
plt.figure(1, figsize=(12, 6))
if input_dyn is None:
if lamsteps:
if not hasattr(self, 'lamdyn'):
self.scale_dyn()
dyn = self.lamdyn
elif trap:
if not hasattr(self, 'trapdyn'):
self.scale_dyn(scale='trapezoid')
dyn = self.trapdyn
else:
dyn = self.dyn
else:
dyn = input_dyn
medval = np.median(dyn[is_valid(dyn)*np.array(np.abs(
is_valid(dyn)) > 0)])
minval = np.min(dyn[is_valid(dyn)*np.array(np.abs(
is_valid(dyn)) > 0)])
# standard deviation
std = np.std(dyn[is_valid(dyn)*np.array(np.abs(
is_valid(dyn)) > 0)])
vmin = minval
vmax = medval+5*std
if input_dyn is None:
if lamsteps:
plt.pcolormesh(self.times/60, self.lam, dyn,
vmin=vmin, vmax=vmax)
plt.ylabel('Wavelength (m)')
else:
plt.pcolormesh(self.times/60, self.freqs, dyn,
vmin=vmin, vmax=vmax)
plt.ylabel('Frequency (MHz)')
plt.xlabel('Time (mins)')
# plt.colorbar() # arbitrary units
else:
plt.pcolormesh(input_x, input_y, dyn, vmin=vmin, vmax=vmax)
if filename is not None:
plt.savefig(filename, dpi=200, papertype='a4', bbox_inches='tight',
pad_inches=0.1)
plt.close()
elif input_dyn is None and display:
plt.show()
def plot_acf(self, contour=False, filename=None, input_acf=None,
input_t=None, input_f=None, fit=True, display=True):
"""
Plot the ACF
"""
if not hasattr(self, 'acf'):
self.calc_acf()
if not hasattr(self, 'tau') and input_acf is None and fit:
self.get_scint_params()
if input_acf is None:
arr = self.acf
tspan = self.tobs
fspan = self.bw
else:
arr = input_acf
tspan = max(input_t) - min(input_t)
fspan = max(input_f) - min(input_f)
arr = np.fft.ifftshift(arr)
wn = arr[0][0] - arr[0][1] # subtract the white noise spike
arr[0][0] = arr[0][0] - wn # Set the noise spike to zero for plotting
arr = np.fft.fftshift(arr)
t_delays = np.linspace(-tspan/60, tspan/60, np.shape(arr)[1])
f_shifts = np.linspace(-fspan, fspan, np.shape(arr)[0])
if input_acf is None: # Also plot scintillation scales axes
fig, ax1 = plt.subplots()
if contour:
im = ax1.contourf(t_delays, f_shifts, arr)
else:
im = ax1.pcolormesh(t_delays, f_shifts, arr)
ax1.set_ylabel('Frequency lag (MHz)')
ax1.set_xlabel('Time lag (mins)')
miny, maxy = ax1.get_ylim()
if fit:
ax2 = ax1.twinx()
ax2.set_ylim(miny/self.dnu, maxy/self.dnu)
ax2.set_ylabel('Frequency lag / (dnu_d = {0})'.
format(round(self.dnu, 2)))
ax3 = ax1.twiny()
minx, maxx = ax1.get_xlim()
ax3.set_xlim(minx/(self.tau/60), maxx/(self.tau/60))
ax3.set_xlabel('Time lag/(tau_d={0})'.format(round(
self.tau/60, 2)))
fig.colorbar(im, pad=0.15)
else: # just plot acf without scales
if contour:
plt.contourf(t_delays, f_shifts, arr)
else:
plt.pcolormesh(t_delays, f_shifts, arr)
plt.ylabel('Frequency lag (MHz)')
plt.xlabel('Time lag (mins)')
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0.1)
plt.close()
elif input_acf is None and display:
plt.show()
def plot_sspec(self, lamsteps=False, input_sspec=None, filename=None,
input_x=None, input_y=None, trap=False, prewhite=True,
plotarc=False, maxfdop=np.inf, delmax=None, ref_freq=1400,
cutmid=0, startbin=0, display=True):
"""
Plot the secondary spectrum
"""
if input_sspec is None:
if lamsteps:
if not hasattr(self, 'lamsspec'):
self.calc_sspec(lamsteps=lamsteps, prewhite=prewhite)
sspec = cp(self.lamsspec)
elif trap:
if not hasattr(self, 'trapsspec'):
self.calc_sspec(trap=trap, prewhite=prewhite)
sspec = cp(self.trapsspec)
else:
if not hasattr(self, 'sspec'):
self.calc_sspec(lamsteps=lamsteps, prewhite=prewhite)
sspec = cp(self.sspec)
xplot = cp(self.fdop)
else:
sspec = input_sspec
xplot = input_x
medval = np.median(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
# std = np.std(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
maxval = np.max(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
vmin = medval - 3
vmax = maxval - 3
# Get fdop plotting range
indicies = np.argwhere(np.abs(xplot) < maxfdop)
xplot = xplot[indicies].squeeze()
sspec = sspec[:, indicies].squeeze()
nr, nc = np.shape(sspec)
sspec[:, int(nc/2-np.floor(cutmid/2)):int(nc/2 +
np.ceil(cutmid/2))] = np.nan
sspec[:startbin, :] = np.nan
# Maximum value delay axis (us @ ref_freq)
delmax = np.max(self.tdel) if delmax is None else delmax
delmax = delmax*(ref_freq/self.freq)**2
ind = np.argmin(abs(self.tdel-delmax))
if input_sspec is None:
if lamsteps:
plt.pcolormesh(xplot, self.beta[:ind], sspec[:ind, :],
vmin=vmin, vmax=vmax)
plt.ylabel(r'$f_\lambda$ (m$^{-1}$)')
else:
plt.pcolormesh(xplot, self.tdel[:ind], sspec[:ind, :],
vmin=vmin, vmax=vmax)
plt.ylabel(r'$f_\nu$ ($\mu$s)')
plt.xlabel(r'$f_t$ (mHz)')
bottom, top = plt.ylim()
if plotarc:
if lamsteps:
eta = self.betaeta
else:
eta = self.eta
plt.plot(xplot, eta*np.power(xplot, 2),
'r--', alpha=0.5)
plt.ylim(bottom, top)
plt.colorbar()
else:
plt.pcolormesh(xplot, input_y, sspec, vmin=vmin, vmax=vmax)
plt.colorbar()
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0.1)
plt.close()
elif input_sspec is None and display:
plt.show()
def plot_all(self, dyn=1, sspec=3, acf=2, norm_sspec=4, colorbar=True,
lamsteps=False, filename=None, display=True):
"""
Plots multiple figures in one
"""
# Dynamic Spectrum
plt.subplot(2, 2, dyn)
self.plot_dyn(lamsteps=lamsteps)
plt.title("Dynamic Spectrum")
# Autocovariance Function
plt.subplot(2, 2, acf)
self.plot_acf(subplot=True)
plt.title("Autocovariance")
# Secondary Spectrum
plt.subplot(2, 2, sspec)
self.plot_sspec(lamsteps=lamsteps)
plt.title("Secondary Spectrum")
# Normalised Secondary Spectrum
plt.subplot(2, 2, norm_sspec)
self.norm_sspec(plot=True, scrunched=False, lamsteps=lamsteps,
plot_fit=False)
plt.title("Normalised fdop secondary spectrum")
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0.1)
plt.close()
elif display:
plt.show()
def fit_arc(self, method='norm_sspec', asymm=False, plot=False,
delmax=None, numsteps=1e4, startbin=3, cutmid=3, lamsteps=True,
etamax=None, etamin=None, low_power_diff=-3,
high_power_diff=-1.5, ref_freq=1400, constraint=[0, np.inf],
nsmooth=5, filename=None, noise_error=True, display=True):
"""
Find the arc curvature with maximum power along it
constraint: Only search for peaks between constraint[0] and
constraint[1]
"""
if not hasattr(self, 'tdel'):
self.calc_sspec()
delmax = np.max(self.tdel) if delmax is None else delmax
delmax = delmax*(ref_freq/self.freq)**2 # adjust for frequency
if lamsteps:
if not hasattr(self, 'lamsspec'):
self.calc_sspec(lamsteps=lamsteps)
sspec = np.array(cp(self.lamsspec))
yaxis = cp(self.beta)
ind = np.argmin(abs(self.tdel-delmax))
ymax = self.beta[ind] # cut beta at equivalent value to delmax
else:
if not hasattr(self, 'sspec'):
self.calc_sspec()
sspec = np.array(cp(self.sspec))
yaxis = cp(self.tdel)
ymax = delmax
nr, nc = np.shape(sspec)
# Estimate noise in secondary spectrum
a = np.array(sspec[int(nr/2):,
int(nc/2 + np.ceil(cutmid/2)):].ravel())
b = np.array(sspec[int(nr/2):, 0:int(nc/2 -
np.floor(cutmid/2))].ravel())
noise = np.std(np.concatenate((a, b)))
# Adjust secondary spectrum
ind = np.argmin(abs(self.tdel-delmax))
sspec[0:startbin, :] = np.nan # mask first N delay bins
# mask middle N Doppler bins
sspec[:, int(nc/2 - np.floor(cutmid/2)):int(nc/2 +
np.ceil(cutmid/2))] = np.nan
sspec = sspec[0:ind, :] # cut at delmax
yaxis = yaxis[0:ind]
# noise of mean out to delmax
noise = np.sqrt(np.sum(np.power(noise, 2)))/len(yaxis[startbin:])
if etamax is None:
etamax = ymax/((self.fdop[1]-self.fdop[0])*cutmid)**2
if etamin is None:
etamin = (yaxis[1]-yaxis[0])*startbin/(max(self.fdop))**2
try:
len(etamin)
etamin_array = np.array(etamin).squeeze()
etamax_array = np.array(etamax).squeeze()
except TypeError:
# Force to be arrays for iteration
etamin_array = np.array([etamin])
etamax_array = np.array([etamax])
# At 1mHz for 1400MHz obs, the maximum arc terminates at delmax
max_sqrt_eta = np.sqrt(np.max(etamax_array))
min_sqrt_eta = np.sqrt(np.min(etamin_array))
# Create an array with equal steps in sqrt(curvature)
sqrt_eta_all = np.linspace(min_sqrt_eta, max_sqrt_eta, numsteps)
for iarc in range(0, len(etamin_array)):
if len(etamin_array) == 1:
etamin = etamin
etamax = etamax
else:
etamin = etamin_array.squeeze()[iarc]
etamax = etamax_array.squeeze()[iarc]
if not lamsteps:
c = 299792458.0 # m/s
beta_to_eta = c*1e6/((ref_freq*10**6)**2)
etamax = etamax/(self.freq/ref_freq)**2 # correct for freq
etamax = etamax*beta_to_eta
etamin = etamin/(self.freq/ref_freq)**2
etamin = etamin*beta_to_eta
constraint = constraint/(self.freq/ref_freq)**2
constraint = constraint*beta_to_eta
sqrt_eta = sqrt_eta_all[(sqrt_eta_all <= np.sqrt(etamax)) *
(sqrt_eta_all >= np.sqrt(etamin))]
numsteps_new = len(sqrt_eta)
# Define data
x = self.fdop
y = yaxis
z = sspec
# initiate arrays
sumpowL = []
sumpowR = []
etaArray = []
if method == 'gridmax':
for ii in range(0, len(sqrt_eta)):
ieta = sqrt_eta[ii]**2
etaArray.append(ieta)
ynew = ieta*np.power(x, 2) # tdel coordinates to sample
# convert to pixel coordinates
xpx = ((x-np.min(x))/(max(x) - np.min(x)))*np.shape(z)[1]
ynewpx = ((ynew-np.min(ynew)) /
(max(y) - np.min(ynew)))*np.shape(z)[0]
# left side
ind = np.where(x < 0) # find -ve doppler
ynewL = ynew[ind]
xnewpxL = xpx[ind]
ynewpxL = ynewpx[ind]
ind = np.where(ynewL < np.max(y)) # inds below tdel cutoff
xnewL = xnewpxL[ind]
ynewL = ynewpxL[ind]
xynewL = np.array([[ynewL[ii], xnewL[ii]] for ii in
range(0, len(xnewL))]).T
znewL = map_coordinates(z, xynewL, order=1, cval=np.nan)
sumpowL.append(np.mean(znewL[~np.isnan(znewL)]))
# Right side
ind = np.where(x > 0) # find +ve doppler
ynewR = ynew[ind]
xnewpxR = xpx[ind]
ynewpxR = ynewpx[ind]
ind = np.where(ynewR < np.max(y)) # inds below tdel cutoff
xnewR = xnewpxR[ind]
ynewR = ynewpxR[ind]
xynewR = np.array([[ynewR[ii], xnewR[ii]] for ii in
range(0, len(xnewR))]).T
znewR = map_coordinates(z, xynewR, order=1, cval=np.nan)
sumpowR.append(np.mean(znewR[~np.isnan(znewR)]))
# Total
sumpow = np.add(sumpowL, sumpowR)/2 # average
# Ignore nan sums and smooth
indicies = np.argwhere(is_valid(sumpow)).ravel()
etaArray = np.array(etaArray)[indicies]
sumpow = np.array(sumpow)[indicies]
sumpowL = np.array(sumpowL)[indicies]
sumpowR = np.array(sumpowR)[indicies]
sumpow_filt = savgol_filter(sumpow, nsmooth, 1)
sumpowL_filt = savgol_filter(sumpowL, nsmooth, 1)
sumpowR_filt = savgol_filter(sumpowR, nsmooth, 1)
indrange = np.argwhere((etaArray > constraint[0]) *
(etaArray < constraint[1]))
sumpow_inrange = sumpow_filt[indrange]
sumpowL_inrange = sumpow_filt[indrange]
sumpowR_inrange = sumpow_filt[indrange]
ind = np.argmin(np.abs(sumpow_filt - np.max(sumpow_inrange)))
indL = np.argmin(np.abs(sumpow_filt - np.max(sumpowL_inrange)))
indR = np.argmin(np.abs(sumpow_filt - np.max(sumpowR_inrange)))
eta = etaArray[ind]
etaL = etaArray[indL]
etaR = etaArray[indR]
# Now find eta and estimate error by fitting parabola
# Data from -3db on low curvature side to -1.5db on high side
max_power = sumpow_filt[ind]
power = max_power
ind1 = 1
while (power > max_power + low_power_diff and
ind + ind1 < len(sumpow_filt)-1): # -3db, or half power
ind1 += 1
power = sumpow_filt[ind - ind1]
power = max_power
ind2 = 1
while (power > max_power + high_power_diff and
ind + ind2 < len(sumpow_filt)-1): # -1db power
ind2 += 1
power = sumpow_filt[ind + ind2]
# Now select this region of data for fitting
xdata = etaArray[int(ind-ind1):int(ind+ind2)]
ydata = sumpow[int(ind-ind1):int(ind+ind2)]
# Do the fit
# yfit, eta, etaerr = fit_parabola(xdata, ydata)
yfit, eta, etaerr = fit_log_parabola(xdata, ydata)
if np.mean(np.gradient(np.diff(yfit))) > 0:
raise ValueError('Fit returned a forward parabola.')
eta = eta
if noise_error:
# Now get error from the noise in secondary spectra instead
etaerr2 = etaerr
power = max_power
ind1 = 1
while (power > max_power - noise and
ind + ind1 < len(sumpow_filt)-1): # -3db
ind1 += 1
power = sumpow_filt[ind - ind1]
power = max_power
ind2 = 1
while (power > max_power - noise and
ind + ind2 < len(sumpow_filt)-1): # -1db power
ind2 += 1
power = sumpow_filt[ind + ind2]
etaerr = np.ptp(etaArray[int(ind-ind1):int(ind+ind2)])/2
# Now plot
if plot and iarc == 0:
if asymm:
plt.subplot(2, 1, 1)
plt.plot(etaArray, sumpowL)
plt.plot(etaArray, sumpowL_filt)
bottom, top = plt.ylim()
plt.plot([etaL, etaL], [bottom, top])
plt.ylabel('mean power (db)')
plt.xscale('log')
plt.subplot(2, 1, 2)
plt.plot(etaArray, sumpowR)
plt.plot(etaArray, sumpowR_filt)
bottom, top = plt.ylim()
plt.plot([etaR, etaR], [bottom, top])
else:
plt.plot(etaArray, sumpow)
plt.plot(etaArray, sumpow_filt)
plt.plot(xdata, yfit)
bottom, top = plt.ylim()
plt.axvspan(xmin=eta-etaerr, xmax=eta+etaerr,
facecolor='C2', alpha=0.5)
if lamsteps:
plt.xlabel(r'Arc curvature, $\eta$ (${\rm m}^{-1}\,'
'{\rm mHz}^{-2}$)')
else:
plt.xlabel('eta (tdel)')
plt.ylabel('mean power (dB)')
plt.xscale('log')
elif plot:
plt.axvspan(xmin=eta-etaerr, xmax=eta+etaerr,
facecolor='C{0}'.format(str(int(3+iarc))),
alpha=0.3)
if plot and iarc == len(etamin_array) - 1:
if filename is not None:
plt.savefig(filename, figsize=(6, 6), dpi=150,
bbox_inches='tight', pad_inches=0.1)
plt.close()
elif display:
plt.show()
elif method == 'norm_sspec':
# Get the normalised secondary spectrum, set for minimum eta as
# normalisation. Then calculate peak as
self.norm_sspec(eta=etamin, delmax=delmax, plot=False,
startbin=startbin, maxnormfac=1, cutmid=cutmid,
lamsteps=lamsteps, scrunched=True,
plot_fit=False, numsteps=numsteps_new)
norm_sspec = self.normsspecavg.squeeze()
etafrac_array = np.linspace(-1, 1, len(norm_sspec))
ind1 = np.argwhere(etafrac_array > 1/(2*len(norm_sspec)))
ind2 = np.argwhere(etafrac_array < -1/(2*len(norm_sspec)))
norm_sspec_avg = np.add(norm_sspec[ind1],
np.flip(norm_sspec[ind2], axis=0))/2
norm_sspec_avg = norm_sspec_avg.squeeze()
etafrac_array_avg = 1/etafrac_array[ind1].squeeze()
# Make sure is valid
filt_ind = is_valid(norm_sspec_avg)
norm_sspec_avg = np.flip(norm_sspec_avg[filt_ind], axis=0)
etafrac_array_avg = np.flip(etafrac_array_avg[filt_ind],
axis=0)
# Form eta array and cut at maximum
etaArray = etamin*etafrac_array_avg**2
ind = np.argwhere(etaArray < etamax)
etaArray = etaArray[ind].squeeze()
norm_sspec_avg = norm_sspec_avg[ind].squeeze()
# Smooth data
norm_sspec_avg_filt = \
savgol_filter(norm_sspec_avg, nsmooth, 1)
# search for peaks within constraint range
indrange = np.argwhere((etaArray > constraint[0]) *
(etaArray < constraint[1]))
sumpow_inrange = norm_sspec_avg_filt[indrange]
ind = np.argmin(np.abs(norm_sspec_avg_filt -
np.max(sumpow_inrange)))
# Now find eta and estimate error by fitting parabola
# Data from -3db on low curvature side to -1.5db on high side
max_power = norm_sspec_avg_filt[ind]
power = max_power
ind1 = 1
while (power > max_power + low_power_diff and
ind + ind1 < len(norm_sspec_avg_filt)-1): # -3db
ind1 += 1
power = norm_sspec_avg_filt[ind - ind1]
power = max_power
ind2 = 1
while (power > max_power + high_power_diff and
ind + ind2 < len(norm_sspec_avg_filt)-1): # -1db power
ind2 += 1
power = norm_sspec_avg_filt[ind + ind2]
# Now select this region of data for fitting
xdata = etaArray[int(ind-ind1):int(ind+ind2)]
ydata = norm_sspec_avg[int(ind-ind1):int(ind+ind2)]
# Do the fit
# yfit, eta, etaerr = fit_parabola(xdata, ydata)
yfit, eta, etaerr = fit_parabola(xdata, ydata)
if np.mean(np.gradient(np.diff(yfit))) > 0:
raise ValueError('Fit returned a forward parabola.')
eta = eta
if noise_error:
# Now get error from the noise in secondary spectra instead
etaerr2 = etaerr # error from parabola fit
power = max_power
ind1 = 1
while (power > max_power - noise and
ind + ind1 < len(norm_sspec_avg_filt)-1): # -3db
ind1 += 1
power = norm_sspec_avg_filt[ind - ind1]
power = max_power
ind2 = 1
while (power > max_power - noise and
# -1db power
ind + ind2 < len(norm_sspec_avg_filt)-1):
ind2 += 1
power = norm_sspec_avg_filt[ind + ind2]
etaerr = np.ptp(etaArray[int(ind-ind1):int(ind+ind2)])/2
if plot and iarc == 0:
plt.plot(etaArray, norm_sspec_avg)
plt.plot(etaArray, norm_sspec_avg_filt)
plt.plot(xdata, yfit)
plt.axvspan(xmin=eta-etaerr, xmax=eta+etaerr,
facecolor='C2', alpha=0.5)
plt.xscale('log')
if lamsteps:
plt.xlabel(r'Arc curvature, '
r'$\eta$ (${\rm m}^{-1}\,{\rm mHz}^{-2}$)')
else:
plt.xlabel('eta (tdel)')
plt.ylabel('Mean power (dB)')
elif plot:
plt.plot(xdata, yfit,
color='C{0}'.format(str(int(3+iarc))))
plt.axvspan(xmin=eta-etaerr, xmax=eta+etaerr,
facecolor='C{0}'.format(str(int(3+iarc))),
alpha=0.3)
if plot and iarc == len(etamin_array)-1:
if filename is not None:
plt.savefig(filename, figsize=(6, 6), dpi=150,
bbox_inches='tight', pad_inches=0.1)
plt.close()
elif display:
plt.show()
else:
raise ValueError('Unknown arc fitting method. Please choose \
from gidmax or norm_sspec')
if iarc == 0: # save primary
if lamsteps:
self.betaeta = eta
self.betaetaerr = etaerr
self.betaetaerr2 = etaerr2
else:
self.eta = eta
self.etaerr = etaerr
self.etaerr2 = etaerr2
def norm_sspec(self, eta=None, delmax=None, plot=False, startbin=1,
maxnormfac=2, cutmid=3, lamsteps=False, scrunched=True,
plot_fit=True, ref_freq=1400, numsteps=None, filename=None,
display=True, unscrunched=True, powerspec=True):
"""
Normalise fdop axis using arc curvature
"""
# Maximum value delay axis (us @ ref_freq)
delmax = np.max(self.tdel) if delmax is None else delmax
delmax = delmax*(ref_freq/self.freq)**2
if lamsteps:
if not hasattr(self, 'lamsspec'):
self.calc_sspec(lamsteps=lamsteps)
sspec = cp(self.lamsspec)
yaxis = cp(self.beta)
if not hasattr(self, 'betaeta') and eta is None:
self.fit_arc(lamsteps=lamsteps, delmax=delmax, plot=plot,
startbin=startbin)
else:
if not hasattr(self, 'sspec'):
self.calc_sspec()
sspec = cp(self.sspec)
yaxis = cp(self.tdel)
if not hasattr(self, 'eta') and eta is None:
self.fit_arc(lamsteps=lamsteps, delmax=delmax, plot=plot,
startbin=startbin)
if eta is None:
if lamsteps:
eta = self.betaeta
else:
eta = self.eta
else: # convert to beta
if not lamsteps:
c = 299792458.0 # m/s
beta_to_eta = c*1e6/((ref_freq*10**6)**2)
eta = eta/(self.freq/ref_freq)**2 # correct for frequency
eta = eta*beta_to_eta
medval = np.median(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
std = np.std(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
maxval = np.max(sspec[is_valid(sspec)*np.array(np.abs(sspec) > 0)])
vmin = medval - std
vmax = maxval - 3
ind = np.argmin(abs(self.tdel-delmax))
sspec = sspec[startbin:ind, :] # cut first N delay bins and at delmax
# sspec[0:startbin] = np.nan
nr, nc = np.shape(sspec)
# mask out centre bins
sspec[:, int(nc/2 - np.floor(cutmid/2)):int(nc/2 +
np.floor(cutmid/2))] = np.nan
tdel = yaxis[startbin:ind]
# tdel = yaxis[:ind]
fdop = self.fdop
maxfdop = maxnormfac*np.sqrt(tdel[-1]/eta) # Maximum fdop for plot
if maxfdop > max(fdop):
maxfdop = max(fdop)
# Number of fdop bins to use. Oversample by factor of 2
nfdop = 2*len(fdop[abs(fdop) <=
maxfdop]) if numsteps is None else numsteps
fdopnew = np.linspace(-maxnormfac, maxnormfac,
nfdop) # norm fdop
normSspec = []
isspectot = np.zeros(np.shape(fdopnew))
for ii in range(0, len(tdel)):
itdel = tdel[ii]
imaxfdop = maxnormfac*np.sqrt(itdel/eta)
ifdop = fdop[abs(fdop) <= imaxfdop]/np.sqrt(itdel/eta)
isspec = sspec[ii, abs(fdop) <= imaxfdop] # take the iith row
ind = np.argmin(abs(fdopnew))
normline = np.interp(fdopnew, ifdop, isspec)
normSspec.append(normline)
isspectot = np.add(isspectot, normline)
isspecavg = np.nanmean(normSspec, axis=0) # make average
powerspectrum = np.nanmean(normSspec, axis=1)
ind1 = np.argmin(abs(fdopnew-1)-2)
if isspecavg[ind1] < 0:
isspecavg = isspecavg + 2 # make 1 instead of -1
if plot:
# Plot delay-scrunched "power profile"
if scrunched:
plt.plot(fdopnew, isspecavg)
bottom, top = plt.ylim()
plt.xlabel("Normalised $f_t$")
plt.ylabel("Mean power (dB)")
if plot_fit:
plt.plot([1, 1], [bottom*0.9, top*1.1], 'r--', alpha=0.5)
plt.plot([-1, -1], [bottom*0.9, top*1.1], 'r--', alpha=0.5)
plt.ylim(bottom*0.9, top*1.1) # always plot from zero!
plt.xlim(-maxnormfac, maxnormfac)
if filename is not None:
filename_name = filename.split('.')[0]
filename_extension = filename.split('.')[1]
plt.savefig(filename_name + '_1d.' + filename_extension,
bbox_inches='tight', pad_inches=0.1)
plt.close()
elif display:
plt.show()
# Plot 2D normalised secondary spectrum
if unscrunched:
plt.pcolormesh(fdopnew, tdel, normSspec, vmin=vmin, vmax=vmax)
if lamsteps:
plt.ylabel(r'$f_\lambda$ (m$^{-1}$)')
else:
plt.ylabel(r'$f_\nu$ ($\mu$s)')
bottom, top = plt.ylim()
plt.xlabel("Normalised $f_t$")
if plot_fit:
plt.plot([1, 1], [bottom, top], 'r--', alpha=0.5)
plt.plot([-1, -1], [bottom, top], 'r--', alpha=0.5)
plt.ylim(bottom, top)
plt.colorbar()
if filename is not None:
plt.savefig(filename, bbox_inches='tight', pad_inches=0.1)
plt.close()
elif display:
plt.show()
# plot power spectrum
if powerspec:
plt.loglog(np.sqrt(tdel), powerspectrum)
if lamsteps:
plt.xlabel(r'$f_\lambda^{1/2}$ (m$^{-1/2}$)')
else:
plt.xlabel(r'$f_\nu^{1/2}$ ($\mu$s$^{1/2}$)')
plt.ylabel("Mean power (dB)")
if filename is not None:
filename_name = filename.split('.')[0]
filename_extension = filename.split('.')[1]
plt.savefig(filename_name + '_power.' + filename_extension,
bbox_inches='tight', pad_inches=0.1)
plt.close()
elif display:
plt.show()
self.normsspecavg = isspecavg
self.normsspec = np.array(normSspec).squeeze()
self.normsspec_tdel = tdel
return
def get_scint_params(self, method="acf1d", plot=False, alpha=5/3,
mcmc=False, display=True):
"""
Measure the scintillation timescale
Method:
acf1d - takes a 1D cut through the centre of the ACF for
sspec - measures timescale from the power spectrum
acf2d - uses an analytic approximation to the ACF including
phase gradient
"""
from lmfit import Minimizer, Parameters
import corner
if not hasattr(self, 'acf'):
self.calc_acf()
if not hasattr(self, 'sspec'):
self.calc_sspec()
if method == 'acf1d':
scint_model = scint_acf_model
ydata_f = self.acf[int(self.nchan):, int(self.nsub)]
xdata_f = self.df*np.linspace(0, len(ydata_f), len(ydata_f))
ydata_t = self.acf[int(self.nchan), int(self.nsub):]
xdata_t = self.dt*np.linspace(0, len(ydata_t), len(ydata_t))
elif method == 'sspec':
scint_model = scint_sspec_model
arr = cp(self.acf)
arr = np.fft.ifftshift(arr)
# sspec = np.fft.fft2(arr)
# concatenate x and y arrays
xdata = np.array(np.concatenate((xdata_t, xdata_f)))
ydata = np.array(np.concatenate((ydata_t, ydata_f)))
weights = np.ones(np.shape(ydata))
# Get initial parameter values
nt = len(xdata_t) # number of t-lag samples
# Estimate amp and white noise level
wn = min([ydata_f[0]-ydata_f[1], ydata_t[0]-ydata_t[1]])
amp = max([ydata_f[1], ydata_t[1]])
# Estimate tau for initial guess. Closest index to 1/e power
tau = xdata_t[np.argmin(abs(ydata_t - amp/np.e))]
# Estimate dnu for initial guess. Closest index to 1/2 power
dnu = xdata_f[np.argmin(abs(ydata_f - amp/2))]
# Define fit parameters
params = Parameters()
params.add('tau', value=tau, min=0.0, max=np.inf)
params.add('dnu', value=dnu, min=0.0, max=np.inf)
params.add('amp', value=amp, min=0.0, max=np.inf)
params.add('wn', value=wn, min=0.0, max=np.inf)
params.add('nt', value=nt, vary=False)
if alpha is None:
params.add('alpha', value=5/3, min=0, max=8)
else:
params.add('alpha', value=alpha, vary=False)
# Do fit
func = Minimizer(scint_model, params, fcn_args=(xdata, ydata, weights))
results = func.minimize()
if mcmc:
print('Doing mcmc posterior sample')
mcmc_results = func.emcee()
results = mcmc_results
self.tau = results.params['tau'].value
self.tauerr = results.params['tau'].stderr
self.dnu = results.params['dnu'].value
self.dnuerr = results.params['dnu'].stderr
self.talpha = results.params['alpha'].value
if alpha is None:
self.talphaerr = results.params['alpha'].stderr
if plot:
# get models:
if method == 'acf1d':
# Get tau model
tmodel_res = tau_acf_model(results.params, xdata_t, ydata_t,
weights[:nt])
tmodel = ydata_t - tmodel_res/weights[:nt]
# Get dnu model
fmodel_res = dnu_acf_model(results.params, xdata_f, ydata_f,
weights[nt:])
fmodel = ydata_f - fmodel_res/weights[nt:]
plt.subplot(2, 1, 1)
plt.plot(xdata_t, ydata_t)
plt.plot(xdata_t, tmodel)
plt.xlabel('Time lag (s)')
plt.subplot(2, 1, 2)
plt.plot(xdata_f, ydata_f)
plt.plot(xdata_f, fmodel)
plt.xlabel('Frequency lag (MHz)')
if display:
plt.show()
if mcmc:
corner.corner(mcmc_results.flatchain,
labels=mcmc_results.var_names,
truths=list(mcmc_results.params.
valuesdict().values()))
if display:
plt.show()
return
def cut_dyn(self, tcuts=0, fcuts=0, plot=False, filename=None,
lamsteps=False, maxfdop=np.inf, figsize=(8, 13), display=True):
"""
Cuts the dynamic spectrum into tcuts+1 segments in time and
fcuts+1 segments in frequency
"""
if filename is not None:
plt.ioff() # turn off interactive plotting
nchan = len(self.freqs) # re-define in case of trimming
nsub = len(self.times)
fnum = np.floor(nchan/(fcuts + 1))
tnum = np.floor(nsub/(tcuts + 1))
cutdyn = np.empty(shape=(fcuts+1, tcuts+1, int(fnum), int(tnum)))
# find the right fft lengths for rows and columns
nrfft = int(2**(np.ceil(np.log2(int(fnum)))+1)/2)
ncfft = int(2**(np.ceil(np.log2(int(tnum)))+1))
cutsspec = np.empty(shape=(fcuts+1, tcuts+1, nrfft, ncfft))
cutacf = np.empty(shape=(fcuts+1, tcuts+1, 2*int(fnum), 2*int(tnum)))
plotnum = 1
for ii in reversed(range(0, fcuts+1)): # plot from high to low
for jj in range(0, tcuts+1):
cutdyn[int(ii)][int(jj)][:][:] =\
self.dyn[int(ii*fnum):int((ii+1)*fnum),
int(jj*tnum):int((jj+1)*tnum)]
input_dyn_x = self.times[int(jj*tnum):int((jj+1)*tnum)]
input_dyn_y = self.freqs[int(ii*fnum):int((ii+1)*fnum)]
input_sspec_x, input_sspec_y, cutsspec[int(ii)][int(jj)][:][:]\
= self.calc_sspec(input_dyn=cutdyn[int(ii)][int(jj)][:][:],
lamsteps=lamsteps)
cutacf[int(ii)][int(jj)][:][:] \
= self.calc_acf(input_dyn=cutdyn[int(ii)][int(jj)][:][:])
if plot:
# Plot dynamic spectra
plt.figure(1, figsize=figsize)
plt.subplot(fcuts+1, tcuts+1, plotnum)
self.plot_dyn(input_dyn=cutdyn[int(ii)][int(jj)][:][:],
input_x=input_dyn_x/60, input_y=input_dyn_y)
plt.xlabel('t (mins)')
plt.ylabel('f (MHz)')
# Plot acf
plt.figure(2, figsize=figsize)
plt.subplot(fcuts+1, tcuts+1, plotnum)
self.plot_acf(input_acf=cutacf[int(ii)][int(jj)][:][:],
input_t=input_dyn_x,
input_f=input_dyn_y)
plt.xlabel('t lag (mins)')
plt.ylabel('f lag ')
# Plot secondary spectra
plt.figure(3, figsize=figsize)
plt.subplot(fcuts+1, tcuts+1, plotnum)
self.plot_sspec(input_sspec=cutsspec[int(ii)]
[int(jj)][:][:],
input_x=input_sspec_x,
input_y=input_sspec_y, lamsteps=lamsteps,
maxfdop=maxfdop)
plt.xlabel(r'$f_t$ (mHz)')
if lamsteps:
plt.ylabel(r'$f_\lambda$ (m$^{-1}$)')
else:
plt.ylabel(r'$f_\nu$ ($\mu$s)')
plotnum += 1
if plot:
plt.figure(1)
if filename is not None:
filename_name = filename.split('.')[0]
filename_extension = filename.split('.')[1]
plt.savefig(filename_name + '_dynspec.' + filename_extension,
figsize=(6, 10), dpi=150, bbox_inches='tight',
pad_inches=0.1)
plt.close()
elif display:
plt.show()
plt.figure(2)
if filename is not None:
plt.savefig(filename_name + '_acf.' + filename_extension,
figsize=(6, 10), dpi=150, bbox_inches='tight',
pad_inches=0.1)
plt.close()
elif display:
plt.show()
plt.figure(3)
if filename is not None:
plt.savefig(filename_name + '_sspec.' + filename_extension,
figsize=(6, 10), dpi=150, bbox_inches='tight',
pad_inches=0.1)
plt.close()
elif display:
plt.show()
self.cutdyn = cutdyn
self.cutsspec = cutsspec
def trim_edges(self):
"""
Find and remove the band edges
"""
rowsum = sum(abs(self.dyn[0][:]))
# Trim bottom
while rowsum == 0 or np.isnan(rowsum):
self.dyn = np.delete(self.dyn, (0), axis=0)
self.freqs = np.delete(self.freqs, (0))
rowsum = sum(abs(self.dyn[0][:]))
rowsum = sum(abs(self.dyn[-1][:]))
# Trim top
while rowsum == 0 or np.isnan(rowsum):
self.dyn = np.delete(self.dyn, (-1), axis=0)
self.freqs = np.delete(self.freqs, (-1))
rowsum = sum(abs(self.dyn[-1][:]))
# Trim left
colsum = sum(abs(self.dyn[:][0]))
while colsum == 0 or np.isnan(rowsum):
self.dyn = np.delete(self.dyn, (0), axis=1)
self.times = np.delete(self.times, (0))
colsum = sum(abs(self.dyn[:][0]))
colsum = sum(abs(self.dyn[:][-1]))
# Trim right
while colsum == 0 or np.isnan(rowsum):
self.dyn = np.delete(self.dyn, (-1), axis=1)
self.times = np.delete(self.times, (-1))
colsum = sum(abs(self.dyn[:][-1]))
self.nchan = len(self.freqs)
self.bw = round(max(self.freqs) - min(self.freqs) + self.df, 2)
self.freq = round(np.mean(self.freqs), 2)
self.nsub = len(self.times)
self.tobs = round(max(self.times) - min(self.times) + self.dt, 2)
self.mjd = self.mjd + self.times[0]/86400
def refill(self, linear=True, zeros=True):
"""
Replaces the nan values in array. Also replaces zeros by default
"""
if zeros:
self.dyn[self.dyn == 0] = np.nan
array = cp(self.dyn)
x = np.arange(0, array.shape[1])
y = np.arange(0, array.shape[0])
if linear: # do linear interpolation
# mask invalid values
array = np.ma.masked_invalid(array)
xx, yy = np.meshgrid(x, y)
# get only the valid values
x1 = xx[~array.mask]
y1 = yy[~array.mask]
newarr = np.ravel(array[~array.mask])
self.dyn = griddata((x1, y1), newarr, (xx, yy),
method='linear')
# Fill remainder with the mean
meanval = np.mean(self.dyn[is_valid(self.dyn)])
self.dyn[np.isnan(self.dyn)] = meanval
def correct_band(self, frequency=True, time=False, lamsteps=False,
nsmooth=5):
"""
Correct for the bandpass
"""
if lamsteps:
if not self.lamsteps:
self.scale_dyn()
dyn = self.lamdyn
else:
dyn = self.dyn
dyn[np.isnan(dyn)] = 0
if frequency:
self.bandpass = np.mean(dyn, axis=1)
# Make sure there are no zeros
self.bandpass[self.bandpass == 0] = np.mean(self.bandpass)
if nsmooth is not None:
bandpass = savgol_filter(self.bandpass, nsmooth, 1)
else:
bandpass = self.bandpass
dyn = np.divide(dyn, np.reshape(bandpass,
[len(bandpass), 1]))
if time:
timestructure = np.mean(dyn, axis=0)
# Make sure there are no zeros
timestructure[timestructure == 0] = np.mean(timestructure)
if nsmooth is not None:
timestructure = savgol_filter(timestructure, nsmooth, 1)
dyn = np.divide(dyn, np.reshape(timestructure,
[1, len(timestructure)]))
if lamsteps:
self.lamdyn = dyn
else:
self.dyn = dyn
def calc_sspec(self, prewhite=True, plot=False, lamsteps=False,
input_dyn=None, input_x=None, input_y=None, trap=False,
window='blackman', window_frac=0.1):
"""
Calculate secondary spectrum
"""
if input_dyn is None: # use self dynamic spectrum
if lamsteps:
if not self.lamsteps:
self.scale_dyn()
dyn = cp(self.lamdyn)
elif trap:
if not hasattr(self, 'trap'):
self.scale_dyn(scale='trapezoid')
dyn = cp(self.trapdyn)
else:
dyn = cp(self.dyn)
else:
dyn = input_dyn # use imput dynamic spectrum
nf = np.shape(dyn)[0]
nt = np.shape(dyn)[1]
dyn = dyn - np.mean(dyn) # subtract mean
if window is not None:
# Window the dynamic spectrum
if window == 'hanning':
cw = np.hanning(np.floor(window_frac*nt))
sw = np.hanning(np.floor(window_frac*nf))
elif window == 'hamming':
cw = np.hamming(np.floor(window_frac*nt))
sw = np.hamming(np.floor(window_frac*nf))
elif window == 'blackman':
cw = np.blackman(np.floor(window_frac*nt))
sw = np.blackman(np.floor(window_frac*nf))
elif window == 'bartlett':
cw = np.bartlett(np.floor(window_frac*nt))
sw = np.bartlett(np.floor(window_frac*nf))
else:
print('Window unknown.. Please add it!')
chan_window = np.insert(cw, int(np.ceil(len(cw)/2)),
np.ones([nt-len(cw)]))
subint_window = np.insert(sw, int(np.ceil(len(sw)/2)),
np.ones([nf-len(sw)]))
dyn = np.multiply(chan_window, dyn)
dyn = np.transpose(np.multiply(subint_window,
np.transpose(dyn)))
# find the right fft lengths for rows and columns
nrfft = int(2**(np.ceil(np.log2(nf))+1))
ncfft = int(2**(np.ceil(np.log2(nt))+1))
dyn = dyn - np.mean(dyn) # subtract mean
if prewhite:
simpw = convolve2d([[1, -1], [-1, 1]], dyn, mode='valid')
else:
simpw = dyn
simf = np.fft.fft2(simpw, s=[nrfft, ncfft])
simf = np.real(np.multiply(simf, np.conj(simf))) # is real
sec = np.fft.fftshift(simf) # fftshift
sec = sec[int(nrfft/2):][:] # crop
td = np.array(list(range(0, int(nrfft/2))))
fd = np.array(list(range(int(-ncfft/2), int(ncfft/2))))
fdop = np.reshape( | np.multiply(fd, 1e3/(ncfft*self.dt)) | numpy.multiply |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
"""Compute minibatch blobs for training a Fast R-CNN network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
import numpy.random as npr
import cv2
import sys
sys.path.insert(0, osp.join(osp.dirname(__file__), ".."))
from model.config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
from datasets.Liver_Kits import METType, abdominal_mask, raw_reader
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES),
size=num_images)
assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \
'num_images ({}) must divide BATCH_SIZE ({})'. \
format(num_images, cfg.TRAIN.BATCH_SIZE)
# Get the input image blob, formatted for caffe
if not cfg.MED_IMG:
im_blob, im_scales = _get_image_blob(roidb, random_scale_inds)
else:
im_blob, abdo_mask = _get_medical_image_blob(roidb)
im_scales = [1] # compatible with original version
blobs = {"data": im_blob, "abdo_mask": abdo_mask}
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
# gt boxes: (x1, y1, x2, y2, cls)
if cfg.TRAIN.USE_ALL_GT:
# Include all ground truth boxes
gt_inds = np.where(roidb[0]['gt_classes'] != 0)[0]
else:
# For the COCO ground truth boxes, exclude the ones that are ''iscrowd''
gt_inds = np.where(roidb[0]['gt_classes'] != 0 & np.all(
roidb[0]['gt_overlaps'].toarray() > -1.0, axis=1))[0]
gt_boxes = np.empty((len(gt_inds), 5), dtype=np.float32)
gt_boxes[:, 0:4] = roidb[0]['boxes'][gt_inds, :] * im_scales[0]
gt_boxes[:, 4] = roidb[0]['gt_classes'][gt_inds]
blobs['gt_boxes'] = gt_boxes
blobs['im_info'] = np.array(
[im_blob.shape[1], im_blob.shape[2], im_scales[0]],
dtype=np.float32)
return blobs
def _get_image_blob(roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_medical_image_blob(roidb):
""" Builds an input blob from the medical image in the roidb
"""
num_images = len(roidb)
processed_ims = []
pre_ims = []
post_ims = []
abdo_masks = []
for i in range(num_images):
im = raw_reader(roidb[i]["image"], cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]])
if roidb[i]['flipped']:
im = im[:, ::-1]
processed_ims.append(im)
mask = abdominal_mask(im.copy())
abdo_masks.append(mask)
if cfg.THREE_SLICES:
# get pre-image
basename = osp.basename(roidb[i]["image"])
names = basename[:-4].split("_")
slice_num = int(names[-1])
if slice_num == 0:
pre_im = im
else:
slice_num -= 1
names[-1] = str(slice_num)
basename = "_".join(names) + ".raw"
pre_path = osp.join(osp.dirname(roidb[i]["image"]), basename)
pre_im = raw_reader(pre_path, cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]])
if roidb[i]['flipped']:
pre_im = pre_im[:, ::-1]
pre_ims.append(pre_im)
# get post-image
basename = osp.basename(roidb[i]["image"])
names = basename[:-4].split("_")
names[-1] = str(int(names[-1]) + 1)
basename = "_".join(names) + ".raw"
post_path = osp.join(osp.dirname(roidb[i]["image"]), basename)
try:
post_im = raw_reader(post_path, cfg.MET_TYPE, [roidb[i]["height"], roidb[i]["width"]])
if roidb[i]['flipped']:
post_im = post_im[:, ::-1]
except FileNotFoundError:
post_im = im
post_ims.append(post_im)
num_images = len(processed_ims)
blob = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE, 3), dtype=np.float32)
abdo_mask = np.zeros((num_images, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MAX_SIZE), dtype=np.bool)
if cfg.THREE_SLICES:
for i in range(num_images):
blob[i,:,:,0] = pre_ims[i]
blob[i,:,:,1] = processed_ims[i]
blob[i,:,:,2] = post_ims[i]
abdo_mask[i,:,:] = abdo_masks[i]
else:
for i in range(num_images):
blob[i,:,:,0] = processed_ims[i]
blob[i,:,:,1] = processed_ims[i]
blob[i,:,:,2] = processed_ims[i]
abdo_mask[i,:,:] = abdo_masks[i]
if cfg.USE_WIDTH_LEVEL:
win, wind2, lev = cfg.WIDTH, cfg.WIDTH / 2, cfg.LEVEL
blob = (np.clip(blob, lev - wind2, lev + wind2) - (lev - wind2)) / 2**16 * win
else:
blob /= cfg.MED_IMG_UPPER
blob = | np.clip(blob, -1., 1.) | numpy.clip |
# Copyright 2017 Regents of the University of California
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, copy, collections, math, json
import numpy as np
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
import llops as yp
# Custom scale bar object
from matplotlib_scalebar.scalebar import ScaleBar
# Libwallerlab imports
from llops import display
from llops import Roi
class StopAndStareAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illumination_type='bf',
illumination_sequence=None,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illumination_sequence=True,
max_exposure_time_s=2,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
trigger_mode='software',
motion_acceleration_mm_s_2=1e3,
flip_pathway=False,
acquisition_timeout_s=3,
illumination_na_pad=0.03,
illumination_color={'w': 127},
settle_time_s=0):
# Parse options
self.illumination_type = illumination_type
self.settle_time_s = settle_time_s
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.velocity_mm_s = velocity_mm_s
self.max_exposure_time_s = max_exposure_time_s
self.illumination_na_pad = illumination_na_pad
self.illumination_color = illumination_color
self.acquisition_timeout_s = acquisition_timeout_s
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add software triggered elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'software':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have all necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_timing = None
# Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].use_fast_sequence = False
# print(type(self.))
self.metadata.type = 'stop and stare'
assert 'illumination' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable positioning device'
# Generate motion pathway
self.hardware_controller_list['position'].state_sequence = self.genStopAndStarePathwayRaster(
self.object_size_mm, self.frame_spacing_mm)
# Generate illumination sequence
illuminaiton_pattern_sequence = [self.illumination_type] * \
len(self.hardware_controller_list['position'].state_sequence)
self.hardware_controller_list['illumination'].state_sequence = self.genMultiContrastSequence(
illuminaiton_pattern_sequence)
# Tell device not to use feedback
self.hardware_controller_list['illumination'].trigger_wait_flag = False
self.hardware_controller_list['illumination'].command('trs.0.500.0')
self.hardware_controller_list['illumination'].command('trs.1.500.0')
self.hardware_controller_list['position'].goToPosition((0,0))
self.hardware_controller_list['position'].command('ENCODER X 1')
self.hardware_controller_list['position'].command('ENCODER Y 1')
self.hardware_controller_list['position'].command('ENCW X 100')
self.hardware_controller_list['position'].command('ENCW Y 100')
def acquire(self, exposure_time_ms=50):
# Allocate memory for frames
if self.hardware_controller_list['camera'].isSequenceRunning():
self.hardware_controller_list['camera'].sequenceStop()
self.hardware_controller_list['camera'].setBufferSizeMb(
20 * len(self.hardware_controller_list['position'].state_sequence))
# Set camera exposure
self.hardware_controller_list['camera'].setExposure(exposure_time_ms / 1e3)
self.hardware_controller_list['camera'].setTriggerMode('hardware')
self.hardware_controller_list['camera'].runSequence()
self.hardware_controller_list['illumination'].bf()
# Snap one image to ensure all acquisitons are started
self.hardware_controller_list['camera'].snap()
# generate frame_list
t0 = time.time()
frames_acquired = 0
frame_list = []
for frame in yp.display.progressBar(self.hardware_controller_list['position'].state_sequence, name='Frames Acquired'):
pos = frame['states']
x = pos[0][0]['value']['x']
y = pos[0][0]['value']['y']
self.hardware_controller_list['position'].goToPosition((x, y), blocking=True)
time.sleep(self.settle_time_s)
frame_list.append(self.hardware_controller_list['camera'].snap())
frames_acquired += 1
# print('Acquired %d of %d frames' % (frames_acquired, len(self.hardware_controller_list['position'].state_sequence)))
t_acq_sns = time.time() - t0
print("Acquisition took %.4f seconds" % (t_acq_sns))
# Create dataset
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Assign acquisition time
self.metadata.acquisition_time_s = t_acq_sns
# Apply simple geometric transformations
if self.metadata.camera.transpose:
frame_list = frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
frame_list = np.flip(frame_list, 2)
if self.metadata.camera.flip_y:
frame_list = np.flip(frame_list, 1)
# Assign
dataset.frame_list = [frame for frame in frame_list]
# Set frame state list
self.n_frames = len(self.hardware_controller_list['position'].state_sequence)
frame_state_list = []
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = [0]
# Add to list of all frames
frame_state_list.append(single_frame_state_list)
dataset.metadata = self.metadata
dataset.type = 'stop_and_stare'
dataset.frame_state_list = frame_state_list
return dataset
def genStopAndStarePathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=1, include_minor_axis=False):
# Determine major axis
if major_axis is None:
major_axis = np.argmax(np.asarray(object_size_mm))
if object_size_mm[0] == object_size_mm[1]:
major_axis = 1
# Detemine number of measurements
measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)
).astype(np.int) # two components in x and y
# Determine slightly smaller frame spacing for optimal coverage of object
frame_spacing_mm = (object_size_mm[0] / measurement_count[0], object_size_mm[1] / measurement_count[1])
# Error checking
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
# This variable will be populated by the loop below
raster_segments = np.zeros((measurement_count[0] * 2, 2))
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in np.arange(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
frame_spacing_mm[0] * row
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
linear_segment_index += 1
# make the center the mean of the pathway
path_means = []
for path in pathway:
path_mean = ((path['y_start']), (path['x_start']))
path_means.append(path_mean)
# mean = np.sum(np.asarray(path_means), axis=1) / len(path_means)
mean = np.sum(np.asarray(path_means), axis=0) / len(path_means)
for path in pathway:
path['x_start'] -= mean[1]
path['x_end'] -= mean[1]
path['y_start'] -= mean[0]
path['y_end'] -= mean[0]
# return pathway
state_sequence = []
for path in pathway:
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = self.hardware_controller_list['camera'].getExposure()
common_state_dict['led_update_rate_us'] = None
common_state_dict['linear_segment_index'] = None
common_state_dict['frame_distance'] = 0
common_state_dict['exposure_distance'] = 0
common_state_dict['velocity'] = self.velocity_mm_s
common_state_dict['acceleration'] = self.motion_acceleration_mm_s_2
common_state_dict['n_blur_positions_exposure'] = 1
common_state_dict['position_delta_x_mm'] = 0
common_state_dict['position_delta_y_mm'] = 0
path_dict = {'value': {'time_index' : 0,
'x': path['x_start'],
'y': path['y_start']}}
state_sequence.append({'states' : [[path_dict]], 'common' : common_state_dict})
return(state_sequence)
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.append(start_pos)
point_list_end.append(end_pos)
point_list_start = np.asarray(point_list_start)
point_list_end = np.asarray(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().invert_yaxis()
def genMultiContrastSequence(self, illumination_pattern_sequence, n_acquisitions=1,
darkfield_annulus_width_na=0.1):
led_list = np.arange(self.metadata.illumination.state_list.design.shape[0])
bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illumination_na_pad) ** 2
led_list_bf = led_list[bf_mask]
led_list_df = led_list[~bf_mask]
led_list_an = led_list[~bf_mask & (self.metadata.illumination.state_list.design[:, 0] ** 2
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (self.metadata.objective.na + darkfield_annulus_width_na) ** 2)]
illumination_sequence = []
self.pattern_type_list = []
pattern_dict = {'dpc.top': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] > 0]),
'dpc.bottom': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] < 0]),
'dpc.left': | np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] > 0]) | numpy.ndarray.tolist |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 5 17:56:11 2021
v0.8 28Oct21
@author: <NAME>
CHANGE POINT DETECTOR
This module takes a time series and returns the uunderlying linear trend and changpoints. It uses EVT as described in https://www.robots.ox.ac.uk/~sjrob/Pubs/LeeRoberts_EVT.pdf
INSTRUCTIONS:
1. from ChangePointDetector import ChangePointDetector
2. Prepare your time series as data plus Panda dates
3. Create the necessary Kalman representation by creating a "session" object by calling the ChangePoint class, e.g.:
Session=ChangePointDetector.ChangePointDetectorSession(data,dates).
- 'SeasonalityPeriods' is an optional input, e.g 12 = calendar month seasonality
- 'ForecastPeriods' is another optional input, indicating how many periods to forecast. Default = 3
4. Determine the changepoints by running the ChangePointDetectorFunction on your "session", e.g.
Results=Session.ChangePointDetectorFunction()
5. This will return a "Results" object that contains the following:
- ChangePoints. This is a list of 0s and 1s the length of the data, where 1s represent changepoints
- Prediction. This is the Kalman smoothed actuals, plus a 3 period forecast. Note no forecast will be made if there is a changepoint in the last 3 dates
- PredictionVariance
- ExtendedDates. These are the original dates plus 3 exta for the forecast (if a forecast has been made)
- Trend. This is the linear change factor
- TrendVariance. Variance of the trend
"""
import numpy as np
from numpy.linalg import inv
from statsmodels.tsa.statespace.mlemodel import MLEModel
# import math
from math import sqrt, log, exp,pi
from scipy.stats.distributions import chi2
from dateutil.relativedelta import relativedelta
class ModuleResults:
def __init__(self,Trend,TrendVariance,ChangePoints,Prediction,PredictionVariance,ExtendedDates):
self.Trend=Trend
self.TrendVariance=TrendVariance
self.ChangePoints=ChangePoints
self.Prediction=Prediction
self.ExtendedDates=ExtendedDates
self.PredictionVariance=PredictionVariance
class TimeSeriesData:
def __init__(self,data,dates):
self.data=data
self.dates=dates
#Determine periodicity
Delta=(dates[1]-dates[0])
if Delta in range(27,32):
Period = "months"
elif Delta ==1:
Period = 'days'
elif Delta ==7:
Period = 'weeks'
else:
Period ="months"
self.Period=Period
class SeasonalStateArrays:
def __init__(self,SeasonalityPeriods):
if SeasonalityPeriods ==0:
A=np.diag(np.ones(1))
H=np.ones(1)
elif SeasonalityPeriods ==1:
A=np.array([[1,1],[0,1]])
H=np.array([1,0])
else:
A=np.diag(np.ones(SeasonalityPeriods+1))
B=np.zeros((SeasonalityPeriods+1,1))
C=-1* | np.ones(SeasonalityPeriods+2) | numpy.ones |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import unittest
from copy import deepcopy
from typing import List
from unittest.mock import Mock, patch
import numpy as np
import reagent.core.types as rlt
import torch
from reagent.preprocessing import transforms
from reagent.preprocessing.types import InputColumn
class TestTransforms(unittest.TestCase):
def setUp(self):
# add custom compare function for torch.Tensor
self.addTypeEqualityFunc(torch.Tensor, TestTransforms.are_torch_tensor_equal)
@staticmethod
def are_torch_tensor_equal(tensor_0, tensor_1, msg=None):
if torch.all(tensor_0 == tensor_1):
return True
raise TestTransforms.failureException("non-equal pytorch tensors found", msg)
def assertTorchTensorEqual(self, tensor_0, tensor_1, msg=None):
self.assertIsInstance(
tensor_0, torch.Tensor, "first argument is not a torch.Tensor"
)
self.assertIsInstance(
tensor_1, torch.Tensor, "second argument is not a torch.Tensor"
)
self.assertEqual(tensor_0, tensor_1, msg=msg)
def assertDictComparatorEqual(self, a, b, cmp):
"""
assertDictEqual() compares args with ==. This allows caller to override
comparator via cmp argument.
"""
self.assertIsInstance(a, dict, "First argument is not a dictionary")
self.assertIsInstance(b, dict, "Second argument is not a dictionary")
self.assertSequenceEqual(a.keys(), b.keys())
for key in a.keys():
self.assertTrue(cmp(a[key], b[key]), msg=f"Different at key {key}")
def assertDictOfTensorEqual(self, a, b):
"""
Helper method to compare dicts with values of type Tensor.
Cannot use assertDictEqual when values are of type Tensor since
tensor1 == tensor2 results in a tensor of bools. Use this instead.
"""
def _tensor_cmp(a, b):
return torch.all(a == b)
self.assertDictComparatorEqual(a, b, _tensor_cmp)
def test_Compose(self):
t1, t2 = Mock(return_value=2), Mock(return_value=3)
compose = transforms.Compose(t1, t2)
data = 1
out = compose(data)
t1.assert_called_with(1)
t2.assert_called_with(2)
self.assertEqual(out, 3)
def test_ValuePresence(self):
vp = transforms.ValuePresence()
d1 = {"a": 1, "a_presence": 0, "b": 2}
d2 = {"a_presence": 0, "b": 2}
o1 = vp(d1)
o2 = vp(d2)
self.assertEqual(o1, {"a": (1, 0), "b": 2})
self.assertEqual(o2, {"a_presence": 0, "b": 2})
def test_MaskByPresence(self):
keys = ["a", "b"]
mbp = transforms.MaskByPresence(keys)
data = {
"a": (torch.tensor(1), torch.tensor(0)),
"b": (torch.tensor(3), torch.tensor(1)),
}
expected = {"a": torch.tensor(0), "b": torch.tensor(3)}
out = mbp(data)
self.assertEqual(out["a"], expected["a"])
self.assertEqual(out["b"], expected["b"])
with self.assertRaisesRegex(Exception, "Not valid value"):
data2 = {
"a": torch.tensor(1),
"b": (torch.tensor(3), torch.tensor(1)),
}
out = mbp(data2)
with self.assertRaisesRegex(Exception, "Unmatching value shape"):
data3 = {
"a": (torch.tensor(1), torch.tensor([0, 2])),
"b": (torch.tensor(3), torch.tensor(1)),
}
out = mbp(data3)
def test_StackDenseFixedSizeArray(self):
# happy path: value is type Tensor; check cast to float
value = torch.eye(4).to(dtype=torch.int) # start as int
data = {"a": value}
out = transforms.StackDenseFixedSizeArray(data.keys(), size=4)(data)
expected = {"a": value.to(dtype=torch.float)}
self.assertDictOfTensorEqual(out, expected)
self.assertTrue(out["a"].dtype == torch.float, msg="dtype != float")
# happy path: value is list w/ elements type Tuple[Tensor, Tensor]
presence = torch.tensor([[1, 1, 1], [1, 1, 1]])
data = {
"a": [
(torch.tensor([[0, 0, 0], [1, 1, 1]]), presence),
(torch.tensor([[2, 2, 2], [3, 3, 3]]), presence),
],
"b": [
(torch.tensor([[3, 3, 3], [2, 2, 2]]), presence),
(torch.tensor([[1, 1, 1], [0, 0, 0]]), presence),
],
}
out = transforms.StackDenseFixedSizeArray(data.keys(), size=3)(data)
expected = {
"a": torch.tile(torch.arange(4).view(-1, 1).to(dtype=torch.float), (1, 3)),
"b": torch.tile(
torch.arange(4).flip(dims=(0,)).view(-1, 1).to(dtype=torch.float),
(1, 3),
),
}
self.assertDictOfTensorEqual(out, expected)
# raise for tensor wrong shape
with self.assertRaisesRegex(ValueError, "Wrong shape"):
sdf = transforms.StackDenseFixedSizeArray(["a"], size=3)
sdf({"a": torch.ones(2)})
# raise for tensor wrong ndim
with self.assertRaisesRegex(ValueError, "Wrong shape"):
sdf = transforms.StackDenseFixedSizeArray(["a"], size=2)
sdf({"a": torch.zeros(2, 2, 2)})
def test_Lambda(self):
lam = transforms.Lambda(keys=["a", "b", "c"], fn=lambda x: x + 1)
data = {"a": 1, "b": 2, "c": 3, "d": 4}
out = lam(data)
self.assertEqual(out, {"a": 2, "b": 3, "c": 4, "d": 4})
def test_SelectValuePresenceColumns(self):
block = np.reshape( | np.arange(16) | numpy.arange |
import numpy as np
import time
import subprocess
import sys, os
import copy
import matplotlib.pyplot as plt
class Evaluator(object):
# Each evaluator must contain two major functions: set_params(params) and evaluate(params)
def set_params(self, params):
# Set the params to the desired component
raise NotImplementedError
def evaluate(self, params):
# Execute and collect reward.
raise NotImplementedError
def visualize(self, final_params):
# visualize the learned final params.
pass
@property
def log(self):
return ""
class TestEvaluator(Evaluator):
# Each evaluator must contain two major functions: set_params(params) and evaluate(params)
def __init__(self):
self.x = 0
def evaluate(self, params):
# Execute and collect reward.
self.x = params[0]
return -(self.x - 1)** 2
def visualize(self, final_params):
xs = np.linspace(-3, 3, 100)
ys = -(xs - 1) ** 2
plt.plot(xs, ys)
plt.vlines(x=final_params[0], ymin=-15, ymax=3, label="Learned", colors="g")
plt.vlines(x=1, ymin=-15, ymax=3, label="Optima", colors="r", linestyles=":")
plt.legend()
plt.show()
@property
def log(self):
return "x = " + str(self.x)
class PDEvaluator(Evaluator):
# Each evaluator must contain two major functions: set_params(params) and evaluate(params)
def __init__(self):
self.xf = [10,0]
self.dt = 0.01
def simulation(self, kp, kd):
x = | np.zeros(2) | numpy.zeros |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.