prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright <NAME> and <NAME> 2020
# Author: <NAME>
"""
Script for performing inference on new candidates. Criterion for new candidates is that the E isomer pi-pi*
value be between 450-600nm. The separation between the E isomer n-pi* and Z isomer n-pi* is not less than 15nm.
The separation between E isomer pi-pi* and Z isomer pi-pi* is greater than 40nm.
"""
import gpflow
from gpflow.ci_utils import ci_niter
from gpflow.mean_functions import Constant
from gpflow.utilities import print_summary
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from data_utils import TaskDataLoader, featurise_mols
from kernels import Tanimoto
representation = 'fragprints'
task = 'e_iso_pi'
path = '../dataset/photoswitches.csv'
df = pd.read_csv('../dataset/purchasable_switch.csv')
candidate_list = df['SMILES'].to_list()
if __name__ == '__main__':
X_test = featurise_mols(candidate_list, representation)
data_loader_e_iso_pi = TaskDataLoader('e_iso_pi', path)
data_loader_z_iso_pi = TaskDataLoader('z_iso_pi', path)
data_loader_e_iso_n = TaskDataLoader('e_iso_n', path)
data_loader_z_iso_n = TaskDataLoader('z_iso_n', path)
smiles_list_e_iso_pi, y_e_iso_pi = data_loader_e_iso_pi.load_property_data()
smiles_list_z_iso_pi, y_z_iso_pi = data_loader_z_iso_pi.load_property_data()
smiles_list_e_iso_n, y_e_iso_n = data_loader_e_iso_n.load_property_data()
smiles_list_z_iso_n, y_z_iso_n = data_loader_z_iso_n.load_property_data()
y_e_iso_pi = y_e_iso_pi.reshape(-1, 1)
y_z_iso_pi = y_z_iso_pi.reshape(-1, 1)
y_e_iso_n = y_e_iso_n.reshape(-1, 1)
y_z_iso_n = y_z_iso_n.reshape(-1, 1)
X_e_iso_pi = featurise_mols(smiles_list_e_iso_pi, representation)
X_z_iso_pi = featurise_mols(smiles_list_z_iso_pi, representation)
X_e_iso_n = featurise_mols(smiles_list_e_iso_n, representation)
X_z_iso_n = featurise_mols(smiles_list_z_iso_n, representation)
output_dim = 4 # Number of outputs
rank = 1 # Rank of W
feature_dim = len(X_e_iso_pi[0, :])
tanimoto_active_dims = [i for i in range(feature_dim)] # active dims for Tanimoto base kernel.
if task == 'e_iso_pi':
X_train = X_e_iso_pi
y_train = y_e_iso_pi
elif task == 'z_iso_pi':
X_train = X_z_iso_pi
y_train = y_z_iso_pi
elif task == 'e_iso_n':
X_train = X_e_iso_n
y_train = y_e_iso_n
else:
X_train = X_z_iso_n
y_train = y_z_iso_n
if task == 'e_iso_pi':
# Augment the input with zeroes, ones, twos, threes to indicate the required output dimension
X_augmented = np.vstack((np.append(X_train, np.zeros((len(X_train), 1)), axis=1),
np.append(X_z_iso_pi, np.ones((len(X_z_iso_pi), 1)), axis=1),
np.append(X_e_iso_n, np.ones((len(X_e_iso_n), 1)) * 2, axis=1),
np.append(X_z_iso_n, np.ones((len(X_z_iso_n), 1)) * 3, axis=1)))
X_test = np.append(X_test, np.zeros((len(X_test), 1)), axis=1)
X_train = np.append(X_train, np.zeros((len(X_train), 1)), axis=1)
# Augment the Y data with zeroes, ones, twos and threes that specify a likelihood from the list of likelihoods
Y_augmented = np.vstack((np.hstack((y_train, np.zeros_like(y_train))),
np.hstack((y_z_iso_pi, np.ones_like(y_z_iso_pi))),
np.hstack((y_e_iso_n, np.ones_like(y_e_iso_n) * 2)),
np.hstack((y_z_iso_n, np.ones_like(y_z_iso_n) * 3))))
elif task == 'z_iso_pi':
# Augment the input with zeroes, ones, twos, threes to indicate the required output dimension
X_augmented = np.vstack((np.append(X_e_iso_pi, np.zeros((len(X_e_iso_pi), 1)), axis=1),
np.append(X_train, np.ones((len(X_train), 1)), axis=1),
np.append(X_e_iso_n, np.ones((len(X_e_iso_n), 1)) * 2, axis=1),
np.append(X_z_iso_n, np.ones((len(X_z_iso_n), 1)) * 3, axis=1)))
X_test = np.append(X_test, np.ones((len(X_test), 1)), axis=1)
X_train = np.append(X_train, np.ones((len(X_train), 1)), axis=1)
# Augment the Y data with zeroes, ones, twos and threes that specify a likelihood from the list of likelihoods
Y_augmented = np.vstack((np.hstack((y_e_iso_pi, np.zeros_like(y_e_iso_pi))),
np.hstack((y_train, np.ones_like(y_train))),
np.hstack((y_e_iso_n, np.ones_like(y_e_iso_n) * 2)),
np.hstack((y_z_iso_n, np.ones_like(y_z_iso_n) * 3))))
elif task == 'e_iso_n':
# Augment the input with zeroes, ones, twos, threes to indicate the required output dimension
X_augmented = np.vstack((np.append(X_e_iso_pi, np.zeros((len(X_e_iso_pi), 1)), axis=1),
np.append(X_z_iso_pi, np.ones((len(X_z_iso_pi), 1)), axis=1),
np.append(X_train, np.ones((len(X_train), 1)) * 2, axis=1),
np.append(X_z_iso_n, np.ones((len(X_z_iso_n), 1)) * 3, axis=1)))
X_test = np.append(X_test, np.ones((len(X_test), 1)) * 2, axis=1)
X_train = np.append(X_train, np.ones((len(X_train), 1)) * 2, axis=1)
# Augment the Y data with zeroes, ones, twos and threes that specify a likelihood from the list of likelihoods
Y_augmented = np.vstack((np.hstack((y_e_iso_pi, np.zeros_like(y_e_iso_pi))),
np.hstack((y_z_iso_pi, np.ones_like(y_z_iso_pi))),
np.hstack((y_train, | np.ones_like(y_train) | numpy.ones_like |
# Copyright 2013-2016 The Salish Sea MEOPAR contributors
# and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Locates and concatenates datafiles needed for MOHID based on user input time range
"""
import os
from datetime import datetime, timedelta
from dateutil.parser import parse
import numpy as np
import errno
import time
import shlex
import subprocess
# NEMO input files directory
nemoinput = '/results2/SalishSea/nowcast-green.201806/'
# HRDPS input files directory
hdinput = '/results/forcing/atmospheric/GEM2.5/operational/'
# WW3 input files directory
wwinput = '/opp/wwatch3/nowcast/'
# Output filepath
outpath = '/results2/MIDOSS/forcing/SalishSeaCast/'
## Integer -> String
## consumes time in seconds and outputs a string that gives the time in HH:MM:SS format
def conv_time(time):
"""Give time in HH:MM:SS format.
:arg time: time elapsed in seconds
:type integer: :py:class:'int'
:returns: time elapsed in HH:MM:SS format
:rtype: :py:class:`str'
"""
hours = int(time/3600)
mins = int((time - (hours*3600))/60)
secs = int((time - (3600 * hours) - (mins *60)))
return '{}:{}:{}'.format(hours, mins, secs)
def generate_paths_NEMO(timestart, timeend, path, outpath):
"""Concatenate NEMO U, V, W and T files for MOHID.
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:arg outpath: path for output files
:type string: :py:class:'str'
:returns: None
:rtype: :py:class:`NoneType'
"""
# generate list of dates from daterange given
daterange = [parse(t) for t in [timestart, timeend]]
U_files = []
V_files = []
W_files = []
T_files = []
# string: output folder name with date ranges used. end date will be lower by a day than timeend because datasets only go until midnight
folder = str(datetime(parse(timestart).year, parse(timestart).month, parse(timestart).day).strftime('%d%b%y').lower()) + '-' + str(datetime(parse(timeend).year, parse(timeend).month, parse(timeend).day-1).strftime('%d%b%y').lower())
# append all filename strings within daterange to lists
for day in range(np.diff(daterange)[0].days):
datestamp = daterange[0] + timedelta(days=day)
datestr1 = datestamp.strftime('%d%b%y').lower()
datestr2 = datestamp.strftime('%Y%m%d')
U_files.append(f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_grid_U.nc')
V_files.append(f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_grid_V.nc')
W_files.append(f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_grid_W.nc')
T_files.append(f'{path}{datestr1}/SalishSea_1h_{datestr2}_{datestr2}_grid_T.nc')
shell_U = 'ncrcat '
# concatenate all U parameter filename strings to create shell command
for file in U_files:
shell_U = shell_U + file + ' '
# concatenate output filename and directory to end of shell command
shell_U = shell_U + f'{outpath}nowcast-green/{folder}/' + 'U.nc'
# concatenate all V parameter filename strings to create shell command
shell_V = 'ncrcat '
for file in V_files:
shell_V = shell_V + file + ' '
# concatenate output filename and directory to end of shell command
shell_V = shell_V + f'{outpath}nowcast-green/{folder}/' + 'V.nc'
# concatenate all W parameter filename strings to create shell command
shell_W = 'ncrcat '
for file in W_files:
shell_W = shell_W + file + ' '
# concatenate output filename and directory to end of shell command
shell_W = shell_W + f'{outpath}nowcast-green/{folder}/' + 'W.nc'
# concatenate all T parameter filename strings to create shell command
shell_T = 'ncrcat '
for file in T_files:
shell_T = shell_T + file + ' '
# concatenate output filename and directory to end of shell command
shell_T = shell_T + f'{outpath}nowcast-green/{folder}/' + 'T.nc'
# create output directory
dirname = f'{outpath}nowcast-green/{folder}/'
if not os.path.exists(os.path.dirname(dirname)):
try:
os.makedirs(os.path.dirname(dirname))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
# run shell scripts to concatenate netcdf files
processes = []
for cmd in (shell_U, shell_V, shell_W, shell_T):
p = subprocess.Popen(shlex.split(cmd))
# make a list of the processes we have just launched
processes.append(p)
while processes:
for p in processes:
if p.poll():
# poll() Returns None if the process is still running, otherwise the process returns code
# so if the process is done, we will remove it from the list
processes.remove(p)
# wait for 5 seconds before we check again
time.sleep(5)
return None
def generate_paths_HRDPS(timestart, timeend, path, outpath):
"""Concatenate HRDPS files for MOHID.
:arg timestart: date from when to start concatenating
:type string: :py:class:'str'
:arg timeend: date at which to stop concatenating
:type string: :py:class:'str'
:arg path: path of input files
:type string: :py:class:'str'
:arg outpath: path for output files
:type string: :py:class:'str'
:returns: None
:rtype: :py:class:`NoneType'
"""
# generate list of dates from daterange given
daterange = [parse(t) for t in [timestart, timeend]]
wind_files = []
# string: output folder name with date ranges used. end date will be lower by a day than timeend because datasets only go until midnight
folder = str(datetime(parse(timestart).year, parse(timestart).month, parse(timestart).day).strftime('%d%b%y').lower()) + '-' + str(datetime(parse(timeend).year, parse(timeend).month, parse(timeend).day-1).strftime('%d%b%y').lower())
# append all filename strings within daterange to list
for day in range( | np.diff(daterange) | numpy.diff |
"""Functions for importing and analyzing traffic traces"""
from __future__ import division
import math
import collections
import time
import dateutil
import types
import numpy as np
from scipy.stats import chisquare
from icarus.tools import TruncatedZipfDist
__all__ = [
'frequencies',
'one_timers',
'trace_stats',
'zipf_fit',
'parse_url_list',
'parse_wikibench',
'parse_squid',
'parse_youtube_umass',
'parse_common_log_format'
]
def frequencies(data):
"""Extract frequencies from traces. Returns array of sorted frequencies
Parameters
----------
data : array-like
An array of generic data (i.e. URLs of web pages)
Returns
-------
frequencies : array of int
The frequencies of the data sorted in descending order
Notes
-----
This function does not return the mapping between data elements and their
frequencies, it only returns frequencies.
This function can be used to get frequencies to pass to the *zipf_fit*
function given a set of data, e.g. content request traces.
"""
return np.asarray(sorted(collections.Counter(data).values(), reverse=True))
def one_timers(data):
"""Return fraction of contents requested only once (i.e., one-timers)
Parameters
----------
data : array-like
An array of generic data (i.e. URLs of web pages)
Returns
-------
one_timers : float
Fraction of content objects requested only once.
"""
n_items = 0
n_onetimers = 0
counter = collections.Counter(data)
for i in counter.itervalues():
n_items += 1
if i == 1:
n_onetimers += 1
return n_onetimers / n_items
def trace_stats(data):
"""Print full stats of a trace
Parameters
----------
data : array-like
An array of generic data (i.e. URLs of web pages)
Return
------
stats : dict
Metrics of the trace
"""
if isinstance(data, types.GeneratorType):
data = collections.deque(data)
freqs = frequencies(data)
alpha, p = zipf_fit(freqs)
n_reqs = len(data)
n_contents = len(freqs)
n_onetimers = len(freqs[freqs == 1])
return dict(n_contents=n_contents,
n_reqs=n_reqs,
n_onetimers=n_onetimers,
alpha=alpha,
p=p,
onetimers_contents_ratio=n_onetimers / n_contents,
onetimers_reqs_ratio=n_onetimers / n_reqs,
mean_reqs_per_content=n_reqs / n_contents
)
def zipf_fit(obs_freqs, need_sorting=False):
"""Returns the value of the Zipf's distribution alpha parameter that best
fits the data provided and the p-value of the fit test.
Parameters
----------
obs_freqs : array
The array of observed frequencies sorted in descending order
need_sorting : bool, optional
If True, indicates that obs_freqs is not sorted and this function will
sort it. If False, assume that the array is already sorted
Returns
-------
alpha : float
The alpha parameter of the best Zipf fit
p : float
The p-value of the test
Notes
-----
This function uses the method described in
http://stats.stackexchange.com/questions/6780/how-to-calculate-zipfs-law-coefficient-from-a-set-of-top-frequencies
"""
try:
from scipy.optimize import minimize_scalar
except ImportError:
raise ImportError("Cannot import scipy.optimize minimize_scalar. "
"You either don't have scipy install or you have a "
"version too old (required 0.12 onwards)")
obs_freqs = np.asarray(obs_freqs)
if need_sorting:
# Sort in descending order
obs_freqs = -np.sort(-obs_freqs)
n = len(obs_freqs)
def log_likelihood(alpha):
return np.sum(obs_freqs * (alpha * np.log(np.arange(1.0, n + 1)) + \
math.log(sum(1.0 / | np.arange(1.0, n + 1) | numpy.arange |
import numpy as np
import sys
class ArffFeatures:
def __init__(self, filepath):
self.ID_ATTR = 'ID'
self.attrs = dict()
self.entities = list()
self.id_lookup = dict()
self._parse_arff_features(filepath)
self._create_id_lookup()
self._calculate_factors()
print('Number of entities in .arff file:', len(self.entities))
def _parse_arff_features(self, filepath):
self.attrs = dict()
self.entities = list()
f = open(filepath, 'r')
mode = 'meta_data'
for line in f:
if mode == 'meta_data':
if line.startswith('@attribute'):
self._parse_attr_def(line)
if line.startswith('@data'):
mode = 'data'
elif mode == 'data':
self._parse_entity(line)
def _parse_attr_def(self, line):
_, attr_name, attr_type = line.split()
self.attrs[len(self.attrs)] = (attr_name, attr_type)
def _parse_entity(self, line):
entity = dict()
values = line.split(',')
for i, value in enumerate(values):
entity[self.attrs[i][0]] = value
self.entities.append(entity)
return
def _create_id_lookup(self):
self.id_lookkup = dict()
for entity in self.entities:
entity_id = int(entity[self.ID_ATTR])
self.id_lookup[entity_id] = entity
return
def _calculate_factors(self):
self.factors = dict()
for attr_id in self.attrs:
if (self.attrs[attr_id][0] != self.ID_ATTR) and (self.attrs[attr_id][1] in ('numeric', 'numerical')):
values = [float(entity[self.attrs[attr_id][0]])
for entity in self.entities]
mu = np.mean(values)
sig = | np.std(values) | numpy.std |
import numpy as np
import tensorflow as tf
from tasks import Task
class CopyTask(Task):
epsilon = 1e-2
def __init__(self, vector_size, min_seq, train_max_seq, n_copies):
self.vector_size = vector_size
self.min_seq = min_seq
self.train_max_seq = train_max_seq
self.n_copies = n_copies
self.max_seq_curriculum = self.min_seq + 1
self.max_copies = 5
self.x_shape = [None, None, self.vector_size]
self.y_shape = [None, None, self.vector_size]
self.mask = [None, None, self.vector_size]
# Used for curriculum training
self.state = 0
self.consecutive_thresh = 100
def update_training_state(self, cost):
if cost <= CopyTask.epsilon:
self.state += 1
else:
self.state = 0
def check_lesson_learned(self):
if self.state < self.consecutive_thresh:
return False
else:
return True
def next_lesson(self):
self.state = 0
# if self.max_seq_curriculum < self.train_max_seq:
# self.max_seq_curriculum += 1
# print("Increased max_seq to", self.max_seq_curriculum)
if self.n_copies < 5:
self.n_copies += 1
print("Increased n_copies to", self.n_copies)
else:
print("Done with the training!!!")
def update_state(self, cost):
self.update_training_state(cost)
if self.check_lesson_learned():
self.next_lesson()
def cost(self, outputs, correct_output, mask=None):
sigmoid_cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=outputs,
labels=correct_output)
return tf.reduce_mean(sigmoid_cross_entropy)
def generate_data(self, batch_size=16, train=True, cost=9999):
if train:
# Update curriculum training state
self.update_state(cost)
self.max_seq_curriculum = self.train_max_seq
# self.n_copies = self.max_copies
data_batch = CopyTask.generate_n_copies(batch_size, self.vector_size, self.min_seq,
self.max_seq_curriculum,
self.n_copies)
else:
data_batch = CopyTask.generate_n_copies(batch_size, self.vector_size, self.train_max_seq,
self.train_max_seq,
self.n_copies)
return data_batch
def display_output(self, prediction, data_batch, mask):
pass
def test(self, sess, output, pl, batch_size):
pass
@staticmethod
def generate_n_copies(batch_size, inp_vector_size, min_seq, max_seq, n_copies):
copies_list = [
CopyTask.generate_copy_pair(batch_size, inp_vector_size, min_seq, max_seq)
for _ in range(n_copies)]
output = np.concatenate([i[0] for i in copies_list], axis=2)
total_length = np.sum([i[1] for i in copies_list])
mask = | np.ones((batch_size, total_length, inp_vector_size)) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.colorimetry.luminance` module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from colour.colorimetry import (
luminance_Newhall1943, intermediate_luminance_function_CIE1976,
luminance_CIE1976, luminance_ASTMD1535, luminance_Fairchild2010,
luminance_Fairchild2011)
from colour.colorimetry.luminance import luminance
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestLuminanceNewhall1943', 'TestLuminanceASTMD1535',
'TestIntermediateLuminanceFunctionCIE1976', 'TestLuminanceCIE1976',
'TestLuminanceFairchild2010', 'TestLuminanceFairchild2011', 'TestLuminance'
]
class TestLuminanceNewhall1943(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition unit tests methods.
"""
def test_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition.
"""
self.assertAlmostEqual(
luminance_Newhall1943(4.08244375), 12.550078816731881, places=7)
self.assertAlmostEqual(
luminance_Newhall1943(5.39132685), 23.481252371310738, places=7)
self.assertAlmostEqual(
luminance_Newhall1943(2.97619312), 6.4514266875601924, places=7)
def test_n_dimensional_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition n-dimensional arrays support.
"""
V = 4.08244375
Y = luminance_Newhall1943(V)
V = np.tile(V, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
V = np.reshape(V, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
V = np.reshape(V, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
def test_domain_range_scale_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition domain and range scale support.
"""
Y = luminance_Newhall1943(4.08244375)
d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Newhall1943(4.08244375 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition nan support.
"""
luminance_Newhall1943(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceASTMD1535(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition unit tests methods.
"""
def test_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition.
"""
self.assertAlmostEqual(
luminance_ASTMD1535(4.08244375), 12.236342675366036, places=7)
self.assertAlmostEqual(
luminance_ASTMD1535(5.39132685), 22.893999867280378, places=7)
self.assertAlmostEqual(
luminance_ASTMD1535(2.97619312), 6.2902253509053132, places=7)
def test_n_dimensional_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition n-dimensional arrays support.
"""
V = 4.08244375
Y = luminance_ASTMD1535(V)
V = np.tile(V, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
V = np.reshape(V, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
V = np.reshape(V, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
def test_domain_range_scale_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition domain and range scale support.
"""
Y = luminance_ASTMD1535(4.08244375)
d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_ASTMD1535(4.08244375 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition nan support.
"""
luminance_ASTMD1535(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestIntermediateLuminanceFunctionCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition unit tests methods.
"""
def test_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition.
"""
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.495929964178047),
12.197225350000002,
places=7)
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.613072093530391),
23.042767810000004,
places=7)
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.394876333449113),
6.157200790000001,
places=7)
def test_n_dimensional_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition n-dimensional arrays
support.
"""
f_Y_Y_n = 0.495929964178047
Y = intermediate_luminance_function_CIE1976(f_Y_Y_n)
f_Y_Y_n = np.tile(f_Y_Y_n, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
def test_domain_range_scale_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition domain and range scale
support.
"""
Y = intermediate_luminance_function_CIE1976(41.527875844653451, 100)
for scale in ('reference', 1, 100):
with domain_range_scale(scale):
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(
41.527875844653451, 100),
Y,
decimal=7)
@ignore_numpy_errors
def test_nan_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition nan support.
"""
intermediate_luminance_function_CIE1976(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_CIE1976` definition
unit tests methods.
"""
def test_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition.
"""
self.assertAlmostEqual(
luminance_CIE1976(41.527875844653451),
12.197225350000002,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(55.116362849525402),
23.042767810000004,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(29.805654680097106), 6.157200790000001, places=7)
self.assertAlmostEqual(
luminance_CIE1976(56.480581732417676, 50),
12.197225349999998,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(47.317620274162735, 75),
12.197225350000002,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(42.519930728120940, 95),
12.197225350000005,
places=7)
def test_n_dimensional_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition n-dimensional arrays support.
"""
L_star = 41.527875844653451
Y = luminance_CIE1976(L_star)
L_star = np.tile(L_star, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
L_star = np.reshape(L_star, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
L_star = np.reshape(L_star, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
def test_domain_range_scale_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition domain and range scale support.
"""
Y = luminance_CIE1976(41.527875844653451, 100)
d_r = (('reference', 1), (1, 0.01), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_CIE1976(41.527875844653451 * factor, 100),
Y * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition nan support.
"""
luminance_CIE1976(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceFairchild2010(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition unit tests methods.
"""
def test_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition.
"""
self.assertAlmostEqual(
luminance_Fairchild2010(31.996390226262736),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(60.203153682783302),
0.23042767809999998,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(11.836517240976489),
0.06157200790000001,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(24.424283249379986, 2.75),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(100.019986327374240),
1008.00000024,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(100.019999997090270),
100799.92312466,
places=7)
def test_n_dimensional_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition n-dimensional arrays support.
"""
L_hdr = 31.996390226262736
Y = luminance_Fairchild2010(L_hdr)
L_hdr = np.tile(L_hdr, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
def test_domain_range_scale_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition domain and range scale support.
"""
Y = luminance_Fairchild2010(31.996390226262736)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Fairchild2010(31.996390226262736 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition nan support.
"""
luminance_Fairchild2010(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceFairchild2011(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition unit tests methods.
"""
def test_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition.
"""
self.assertAlmostEqual(
luminance_Fairchild2011(51.852958445912506),
0.12197225350000007,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(65.275207956353853),
0.23042767809999998,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(39.818935510715917),
0.061572007900000038,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(0.13268968410139345, 2.75),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(234.72925681957565),
1008.00000000,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(245.57059778237573),
100800.00000000,
places=7)
def test_n_dimensional_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition n-dimensional arrays support.
"""
L_hdr = 51.852958445912506
Y = luminance_Fairchild2011(L_hdr)
L_hdr = np.tile(L_hdr, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
def test_domain_range_scale_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition domain and range scale support.
"""
Y = luminance_Fairchild2011(26.459509817572265)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Fairchild2011(26.459509817572265 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition nan support.
"""
luminance_Fairchild2011(
| np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]) | numpy.array |
from collections import namedtuple
import numpy as np
from matplotlib import pyplot as plt
plt.switch_backend('agg')
import matplotlib.cm as cm
import time
from read_starcd import write_tecplot
def f_maxwell(vx, vy, vz, T, n, ux, uy, uz, Rg):
"""Compute maxwell distribution function on cartesian velocity mesh
vx, vy, vz - 3d numpy arrays with x, y, z components of velocity mesh
in each node
T - float, temperature in K
n - float, numerical density
ux, uy, uz - floats, x,y,z components of equilibrium velocity
Rg - gas constant for specific gas
"""
return n * ((1. / (2. * np.pi * Rg * T)) ** (3. / 2.)) * (np.exp(-((vx - ux)**2 + (vy - uy)**2 + (vz - uz)**2) / (2. * Rg * T)))
class GasParams:
Na = 6.02214129e+23 # Avogadro constant
kB = 1.381e-23 # Boltzmann constant, J / K
Ru = 8.3144598 # Universal gas constant
def __init__(self, Mol = 40e-3, Pr = 2. / 3., g = 5. / 3., d = 3418e-13):
self.Mol = Mol
self.Rg = self.Ru / self.Mol # J / (kg * K)
self.m = self.Mol / self.Na # kg
self.Pr = Pr
self.C = 144.4
self.T_0 = 273.11
self.mu_0 = 2.125e-05
self.mu_suth = lambda T: self.mu_0 * ((self.T_0 + self.C) / (T + self.C)) * ((T / self.T_0) ** (3. / 2.))
self.mu = lambda T: self.mu_suth(200.) * (T/200.)**0.734
self.g = g # specific heat ratio
self.d = d # diameter of molecule
class Problem:
def __init__(self, bc_type_list = None, bc_data = None, f_init = None):
# list of boundary conditions' types
# acording to order in starcd '.bnd' file
# list of strings
self.bc_type_list = bc_type_list
# data for b.c.: wall temperature, inlet n, u, T and so on.
# list of lists
self.bc_data = bc_data
# Function to set initial condition
self.f_init = f_init
def set_bc(gas_params, bc_type, bc_data, f, vx, vy, vz, vn):
"""Set boundary condition
"""
if (bc_type == 'sym-x'): # symmetry in x
return f[::-1, :, :]
elif (bc_type == 'sym-y'): # symmetry in y
return f[:, ::-1, :]
elif (bc_type == 'sym-z'): # symmetry in z
return f[:, :, ::-1]
elif (bc_type == 'sym'): # zero derivative
return f[:, :, :]
elif (bc_type == 'in'): # inlet
# unpack bc_data
n = bc_data[0]
ux = bc_data[1]
uy = bc_data[2]
uz = bc_data[3]
T = bc_data[4]
return f_maxwell(vx, vy, vz, T, n, ux, uy, uz, gas_params.Rg)
elif (bc_type == 'out'): # outlet
# unpack bc_data
n = bc_data[0]
ux = bc_data[1]
uy = bc_data[2]
uz = bc_data[3]
T = bc_data[4]
return f_maxwell(vx, vy, vz, T, n, ux, uy, uz, gas_params.Rg)
elif (bc_type == 'wall'): # wall
# unpack bc_data
T_w = bc_data[0]
hv = vx[1, 0, 0] - vx[0, 0, 0]
fmax = f_maxwell(vx, vy, vz, T_w, 1., 0., 0., 0., gas_params.Rg)
Ni = (hv**3) * np.sum(f * np.where(vn > 0, vn, 0.))
Nr = (hv**3) * np.sum(fmax * np.where(vn < 0, vn, 0.))
# TODO: replace np.sqrt(2 * np.pi / (gas_params.Rg * T_w))
# with discrete quarature, as in the dissertation
n_wall = - Ni/ Nr
# n_wall = 2e+23 # temprorary
return n_wall * fmax
def comp_macro_param_and_j(f, vx, vy, vz, gas_params):
Rg = gas_params.Rg
hv = vx[1, 0, 0] - vx[0, 0, 0]
n = (hv ** 3) * np.sum(f)
ux = (1. / n) * (hv ** 3) * np.sum(vx * f)
uy = (1. / n) * (hv ** 3) * np.sum(vy * f)
uz = (1. / n) * (hv ** 3) * np.sum(vz * f)
v2 = vx*vx + vy*vy + vz*vz
u2 = ux*ux + uy*uy + uz*uz
T = (1. / (3. * n * Rg)) * ((hv ** 3) * np.sum(v2 * f) - n * u2)
Vx = vx - ux
Vy = vy - uy
Vz = vz - uz
rho = gas_params.m * n
p = rho * Rg * T
cx = Vx / ((2. * Rg * T) ** (1. / 2.))
cy = Vy / ((2. * Rg * T) ** (1. / 2.))
cz = Vz / ((2. * Rg * T) ** (1. / 2.))
c2 = cx*cx + cy*cy + cz*cz
Sx = (1. / n) * (hv ** 3) * np.sum(cx * c2 * f)
Sy = (1. / n) * (hv ** 3) * np.sum(cy * c2 * f)
Sz = (1. / n) * (hv ** 3) * | np.sum(cz * c2 * f) | numpy.sum |
import numpy as np
import random
import tensorflow as tf
from lwau import LWAU
from tensorflow.python.platform import flags
import os
from task_generator import TaskGenerator
FLAGS = flags.FLAGS
flags.DEFINE_integer('metatrain_iterations', 60000, 'number of metatraining iterations.')
# Training options
flags.DEFINE_integer('num_classes', 5, 'number of classes used in classification')
flags.DEFINE_integer('meta_batch_size', 4, 'number of tasks sampled per meta-training iteration')
flags.DEFINE_float('meta_lr', 0.001, 'the meta learning rate')
flags.DEFINE_float('update_lr', 0.01, 'the inner-update learning rate')
flags.DEFINE_integer('update_batch_size', 1, 'K for K-shot learning.')
flags.DEFINE_integer('num_updates', 5, 'number of inner update steps during training.')
flags.DEFINE_integer('num_train_tasks', 20, 'number of meta training tasks.')
flags.DEFINE_float('l2_alpha', 0.001, 'param of the l2_norm')
flags.DEFINE_float('l1_alpha', 0.001, 'param of the l1_norm')
flags.DEFINE_float('dropout_rate', 0, 'dropout_rate of the FC layer')
flags.DEFINE_integer('base_num_filters', 16, 'number of filters for conv nets.')
flags.DEFINE_integer('test_num_updates', 10, 'number of inner update steps during testing')
## Logging, saving, and testing options
flags.DEFINE_bool('log', True, 'if false, do not log summaries, for debugging code.')
flags.DEFINE_string('logdir', 'logs/miniimagenet1shot/', 'directory for summaries and checkpoints.')
flags.DEFINE_bool('resume', False, 'resume training if there is a model available')
flags.DEFINE_bool('train', True, 'True to train, False to test.')
flags.DEFINE_integer('test_iter', -1, 'iteration to load model (-1 for latest model)')
flags.DEFINE_bool('test_set', False, 'Set to true to test on the the test set, False for the validation set.')
flags.DEFINE_bool('data_aug', False, 'whether use the data augmentation.')
flags.DEFINE_string('backbone', 'Conv4', 'Conv4 or ResNet12 backone.')
if FLAGS.train:
NUM_TEST_POINTS = int(600/FLAGS.meta_batch_size)
else:
NUM_TEST_POINTS = 600
LEN_MODELS = 50
PRINT_INTERVAL = 50
TEST_PRINT_INTERVAL = PRINT_INTERVAL*6
def train(model, saver, sess, exp_string, task_generator, resume_itr=0):
print('Done initializing, starting training.')
print(exp_string)
prelosses, postlosses = [], []
models = {}
for itr in range(resume_itr, FLAGS.metatrain_iterations):
if FLAGS.backbone == 'Conv4':
feed_dict = {model.meta_lr: FLAGS.meta_lr}
else:
lr = FLAGS.meta_lr * 0.5 ** int(itr / 15000)
feed_dict = {model.meta_lr: lr}
inputa, labela, inputb, labelb = task_generator.get_data_n_tasks(FLAGS.meta_batch_size, train=True)
feed_dict[model.inputa] = inputa
feed_dict[model.labela] = labela
feed_dict[model.inputb] = inputb
feed_dict[model.labelb] = labelb
input_tensors = [model.metatrain_op]
input_tensors.extend([model.total_loss1, model.total_losses2[FLAGS.num_updates-1]])
input_tensors.extend([model.total_accuracy1, model.total_accuracies2[FLAGS.num_updates-1]])
result = sess.run(input_tensors, feed_dict)
prelosses.append(result[-2])
postlosses.append(result[-1])
if (itr!=0) and itr % PRINT_INTERVAL == 0:
print_str = 'Iteration ' + str(itr)
print_str += ': ' + str(np.mean(prelosses)) + ', ' + str(np.mean(postlosses))
print(print_str)
prelosses, postlosses = [], []
# sinusoid is infinite data, so no need to test on meta-validation set.
if (itr!=0) and itr % TEST_PRINT_INTERVAL == 0:
metaval_accuracies = []
for _ in range(NUM_TEST_POINTS):
feed_dict = {}
inputa, labela, inputb, labelb = task_generator.get_data_n_tasks(FLAGS.meta_batch_size, train=False)
feed_dict[model.inputa] = inputa
feed_dict[model.labela] = labela
feed_dict[model.inputb] = inputb
feed_dict[model.labelb] = labelb
input_tensors = [[model.metaval_total_accuracy1] + model.metaval_total_accuracies2]
result = sess.run(input_tensors, feed_dict)
metaval_accuracies.append(result[0])
metaval_accuracies = np.array(metaval_accuracies)
means = np.mean(metaval_accuracies, 0)
stds = np.std(metaval_accuracies, 0)
ci95 = 1.96 * stds / np.sqrt(NUM_TEST_POINTS)
print('----------------------------------------', itr)
print('Mean validation accuracy:', means)
print('Mean validation loss:', stds)
print('Mean validation stddev', ci95)
print('----------------------------------------', )
val_postaccs = max(means)
model_name = FLAGS.logdir + '/' + exp_string + '/model' + str(itr)
if len(models) >= LEN_MODELS:
min_acc, min_model = min(zip(models.values(), models.keys()))
if val_postaccs > min_acc:
del models[min_model]
models[model_name] = val_postaccs
saver.save(sess, model_name)
# os.remove(min_model+'.meta')
os.remove(min_model + '.data-00000-of-00001')
os.remove(min_model + '.index')
os.remove(model_name + '.meta')
else:
pass
max_acc, max_model = max(zip(models.values(), models.keys()))
print(max_model, ':', max_acc)
else:
models[model_name] = val_postaccs
saver.save(sess, model_name)
os.remove(model_name + '.meta')
saver.save(sess, FLAGS.logdir + '/' + exp_string + '/model' + str(itr))
def test(model, sess, task_generator):
np.random.seed(1)
random.seed(1)
metaval_accuracies = []
max_acc = 0
print(NUM_TEST_POINTS)
for _ in range(NUM_TEST_POINTS):
feed_dict = {model.meta_lr : 0.0}
inputa, labela, inputb, labelb = task_generator.get_data_n_tasks(FLAGS.meta_batch_size, train=False)
feed_dict[model.inputa] = inputa
feed_dict[model.labela] = labela
feed_dict[model.inputb] = inputb
feed_dict[model.labelb] = labelb
result = sess.run([model.metaval_total_accuracy1] + model.metaval_total_accuracies2, feed_dict)
metaval_accuracies.append(result)
metaval_accuracies = | np.array(metaval_accuracies) | numpy.array |
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from astropy.modeling import models, fitting
# Reading in all data files at once
import glob
path_normal ='/projects/p30137/ageller/testing/EBLSST/add_m5/output_files'
allFiles_normal = glob.glob(path_normal + "/*.csv")
path_fast = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/output_files'
allFiles_fast = glob.glob(path_fast + "/*.csv")
path_obsDist = '/projects/p30137/ageller/testing/EBLSST/add_m5/fast/old/obsDist/output_files'
allFiles_obsDist = glob.glob(path_obsDist + "/*.csv")
N_totalnormal_array = []
N_totalobservablenormal_array = []
N_totalrecoverablenormal_array = []
N_totalnormal_array_03 = []
N_totalobservablenormal_array_03 = []
N_totalrecoverablenormal_array_03 = []
N_totalnormal_array_1 = []
N_totalobservablenormal_array_1 = []
N_totalrecoverablenormal_array_1 = []
N_totalnormal_array_10 = []
N_totalobservablenormal_array_10 = []
N_totalrecoverablenormal_array_10 = []
N_totalnormal_array_30 = []
N_totalobservablenormal_array_30 = []
N_totalrecoverablenormal_array_30 = []
N_totalnormal_array_100 = []
N_totalobservablenormal_array_100 = []
N_totalrecoverablenormal_array_100 = []
N_totalnormal_array_1000 = []
N_totalobservablenormal_array_1000 = []
N_totalrecoverablenormal_array_1000 = []
N_totalnormal22_array = []
N_totalobservablenormal22_array = []
N_totalrecoverablenormal22_array = []
N_totalnormal22_array_03 = []
N_totalobservablenormal22_array_03 = []
N_totalrecoverablenormal22_array_03 = []
N_totalnormal22_array_1 = []
N_totalobservablenormal22_array_1 = []
N_totalrecoverablenormal22_array_1 = []
N_totalnormal22_array_10 = []
N_totalobservablenormal22_array_10 = []
N_totalrecoverablenormal22_array_10 = []
N_totalnormal22_array_30 = []
N_totalobservablenormal22_array_30 = []
N_totalrecoverablenormal22_array_30 = []
N_totalnormal22_array_100 = []
N_totalobservablenormal22_array_100 = []
N_totalrecoverablenormal22_array_100 = []
N_totalnormal22_array_1000 = []
N_totalobservablenormal22_array_1000 = []
N_totalrecoverablenormal22_array_1000 = []
N_totalnormal195_array = []
N_totalobservablenormal195_array = []
N_totalrecoverablenormal195_array = []
N_totalnormal195_array_03 = []
N_totalobservablenormal195_array_03 = []
N_totalrecoverablenormal195_array_03 = []
N_totalnormal195_array_1 = []
N_totalobservablenormal195_array_1 = []
N_totalrecoverablenormal195_array_1 = []
N_totalnormal195_array_10 = []
N_totalobservablenormal195_array_10 = []
N_totalrecoverablenormal195_array_10 = []
N_totalnormal195_array_30 = []
N_totalobservablenormal195_array_30 = []
N_totalrecoverablenormal195_array_30 = []
N_totalnormal195_array_100 = []
N_totalobservablenormal195_array_100 = []
N_totalrecoverablenormal195_array_100 = []
N_totalnormal195_array_1000 = []
N_totalobservablenormal195_array_1000 = []
N_totalrecoverablenormal195_array_1000 = []
N_totalfast_array = []
N_totalobservablefast_array = []
N_totalrecoverablefast_array = []
N_totalfast_array_03 = []
N_totalobservablefast_array_03 = []
N_totalrecoverablefast_array_03 = []
N_totalfast_array_1 = []
N_totalobservablefast_array_1 = []
N_totalrecoverablefast_array_1 = []
N_totalfast_array_10 = []
N_totalobservablefast_array_10 = []
N_totalrecoverablefast_array_10 = []
N_totalfast_array_30 = []
N_totalobservablefast_array_30 = []
N_totalrecoverablefast_array_30 = []
N_totalfast_array_100 = []
N_totalobservablefast_array_100 = []
N_totalrecoverablefast_array_100 = []
N_totalfast_array_1000 = []
N_totalobservablefast_array_1000 = []
N_totalrecoverablefast_array_1000 = []
N_totalfast22_array = []
N_totalobservablefast22_array = []
N_totalrecoverablefast22_array = []
N_totalfast22_array_03 = []
N_totalobservablefast22_array_03 = []
N_totalrecoverablefast22_array_03 = []
N_totalfast22_array_1 = []
N_totalobservablefast22_array_1 = []
N_totalrecoverablefast22_array_1 = []
N_totalfast22_array_10 = []
N_totalobservablefast22_array_10 = []
N_totalrecoverablefast22_array_10 = []
N_totalfast22_array_30 = []
N_totalobservablefast22_array_30 = []
N_totalrecoverablefast22_array_30 = []
N_totalfast22_array_100 = []
N_totalobservablefast22_array_100 = []
N_totalrecoverablefast22_array_100 = []
N_totalfast22_array_1000 = []
N_totalobservablefast22_array_1000 = []
N_totalrecoverablefast22_array_1000 = []
N_totalfast195_array = []
N_totalobservablefast195_array = []
N_totalrecoverablefast195_array = []
N_totalfast195_array_03 = []
N_totalobservablefast195_array_03 = []
N_totalrecoverablefast195_array_03 = []
N_totalfast195_array_1 = []
N_totalobservablefast195_array_1 = []
N_totalrecoverablefast195_array_1 = []
N_totalfast195_array_10 = []
N_totalobservablefast195_array_10 = []
N_totalrecoverablefast195_array_10 = []
N_totalfast195_array_30 = []
N_totalobservablefast195_array_30 = []
N_totalrecoverablefast195_array_30 = []
N_totalfast195_array_100 = []
N_totalobservablefast195_array_100 = []
N_totalrecoverablefast195_array_100 = []
N_totalfast195_array_1000 = []
N_totalobservablefast195_array_1000 = []
N_totalrecoverablefast195_array_1000 = []
N_totalobsDist_array = []
N_totalobservableobsDist_array = []
N_totalrecoverableobsDist_array = []
N_totalobsDist_array_03 = []
N_totalobservableobsDist_array_03 = []
N_totalrecoverableobsDist_array_03 = []
N_totalobsDist_array_1 = []
N_totalobservableobsDist_array_1 = []
N_totalrecoverableobsDist_array_1 = []
N_totalobsDist_array_10 = []
N_totalobservableobsDist_array_10 = []
N_totalrecoverableobsDist_array_10 = []
N_totalobsDist_array_30 = []
N_totalobservableobsDist_array_30 = []
N_totalrecoverableobsDist_array_30 = []
N_totalobsDist_array_100 = []
N_totalobservableobsDist_array_100 = []
N_totalrecoverableobsDist_array_100 = []
N_totalobsDist_array_1000 = []
N_totalobservableobsDist_array_1000 = []
N_totalrecoverableobsDist_array_1000 = []
N_totalobsDist22_array = []
N_totalobservableobsDist22_array = []
N_totalrecoverableobsDist22_array = []
N_totalobsDist22_array_03 = []
N_totalobservableobsDist22_array_03 = []
N_totalrecoverableobsDist22_array_03 = []
N_totalobsDist22_array_1 = []
N_totalobservableobsDist22_array_1 = []
N_totalrecoverableobsDist22_array_1 = []
N_totalobsDist22_array_10 = []
N_totalobservableobsDist22_array_10 = []
N_totalrecoverableobsDist22_array_10 = []
N_totalobsDist22_array_30 = []
N_totalobservableobsDist22_array_30 = []
N_totalrecoverableobsDist22_array_30 = []
N_totalobsDist22_array_100 = []
N_totalobservableobsDist22_array_100 = []
N_totalrecoverableobsDist22_array_100 = []
N_totalobsDist22_array_1000 = []
N_totalobservableobsDist22_array_1000 = []
N_totalrecoverableobsDist22_array_1000 = []
N_totalobsDist195_array = []
N_totalobservableobsDist195_array = []
N_totalrecoverableobsDist195_array = []
N_totalobsDist195_array_03 = []
N_totalobservableobsDist195_array_03 = []
N_totalrecoverableobsDist195_array_03 = []
N_totalobsDist195_array_1 = []
N_totalobservableobsDist195_array_1 = []
N_totalrecoverableobsDist195_array_1 = []
N_totalobsDist195_array_10 = []
N_totalobservableobsDist195_array_10 = []
N_totalrecoverableobsDist195_array_10 = []
N_totalobsDist195_array_30 = []
N_totalobservableobsDist195_array_30 = []
N_totalrecoverableobsDist195_array_30 = []
N_totalobsDist195_array_100 = []
N_totalobservableobsDist195_array_100 = []
N_totalrecoverableobsDist195_array_100 = []
N_totalobsDist195_array_1000 = []
N_totalobservableobsDist195_array_1000 = []
N_totalrecoverableobsDist195_array_1000 = []
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https:/sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
fbFit= fitRagfb()
mbins = np.arange(0,10, 0.1, dtype='float')
cutP = 0.10 #condition on recoverability/tolerance
for filenormal_ in sorted(allFiles_normal):
filename = filenormal_[60:]
fileid = filename.strip('output_file.csv')
print ("I'm starting " + fileid)
datnormal = pd.read_csv(filenormal_, sep = ',', header=2)
PeriodIn = datnormal['p'] # input period -- 'p' in data file
##########################################################
datnormal1 = pd.read_csv(filenormal_, sep = ',', header=0, nrows=1)
N_tri = datnormal1["NstarsTRILEGAL"][0]
#print("N_tri = ", N_tri)
Nall = len(PeriodIn)
m1hAll0, m1b = | np.histogram(datnormal["m1"], bins=mbins) | numpy.histogram |
import pickle
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from .utils import I, Z
from .utils import rx
class Layerwise:
def __init__(self, G):
self.graph = G
self.num_qubits = len(G.nodes)
self.edge_list = list(G.edges)
self.hamiltonian = self.get_maxcut_hmt()
self.maxcut = np.max(self.hamiltonian)
self.best_ansatz = self.plusxn()
self.best_params = None
self.exp_arr = None
self.npts = None
self.gmesh = None
self.bmesh = None
self.max_exps = None
self.depth = 0
def tensor_prod(self, u3, qubits):
if 0 in qubits:
ans = u3
else:
ans = I
for idx in range(1, self.num_qubits):
if idx in qubits:
ans = | np.kron(ans, u3) | numpy.kron |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import copy
import typing as tp
import numpy as np
import gym
import nevergrad as ng
from nevergrad.parametrization import parameter
from ..base import ExperimentFunction
# pylint: disable=unused-import,import-outside-toplevel
GUARANTEED_GYM_ENV_NAMES = [
"Copy-v0",
"RepeatCopy-v0",
"ReversedAddition-v0",
"ReversedAddition3-v0",
"DuplicatedInput-v0",
"Reverse-v0",
"CartPole-v0",
"CartPole-v1",
"MountainCar-v0",
"Acrobot-v1",
"Blackjack-v0",
"FrozenLake-v0",
"FrozenLake8x8-v0",
"CliffWalking-v0",
"NChain-v0",
"Roulette-v0",
"Taxi-v3",
"CubeCrash-v0",
"CubeCrashSparse-v0",
"CubeCrashScreenBecomesBlack-v0",
"MemorizeDigits-v0",
]
# We do not use "conformant" which is not consistent with the rest.
CONTROLLERS = [
"linear", # Simple linear controller.
"neural", # Simple neural controller.
"deep_neural", # Deeper neural controller.
"semideep_neural", # Deep, but not very deep.
"structured_neural", # Structured optimization of a neural net.
"memory_neural", # Uses a memory (i.e. recurrent net).
"deep_memory_neural",
"stackingmemory_neural", # Uses a memory and stacks a heuristic and the memory as inputs.
"deep_stackingmemory_neural",
"semideep_stackingmemory_neural",
"extrapolatestackingmemory_neural", # Same as stackingmemory_neural + suffix-based extrapolation.
"deep_extrapolatestackingmemory_neural",
"semideep_extrapolatestackingmemory_neural",
"semideep_memory_neural",
"multi_neural", # One neural net per time step.
"noisy_neural", # Do not start at 0 but at a random point.
"scrambled_neural", # Why not perturbating the order of variables ?
"noisy_scrambled_neural",
"stochastic_conformant", # Conformant planning, but still not deterministic.
]
NO_LENGTH = ["ANM", "Blackjack", "CliffWalking", "Cube", "Memorize", "ompiler", "llvm"]
# Environment used for CompilerGym: this class proposes a small ActionSpace.
class SmallActionSpaceLlvmEnv(gym.ActionWrapper):
"""A wrapper for the LLVM compiler environment that exposes a tiny subset of
the full discrete action space (the subset was hand pruned to contain a mix
of "good" and "bad" actions).
"""
action_space_subset = [
"-adce",
"-break-crit-edges",
"-constmerge",
"-correlated-propagation",
"-deadargelim",
"-dse",
"-early-cse-memssa",
"-functionattrs",
"-functionattrs",
"-globaldce",
"-globalopt",
"-gvn",
"-indvars",
"-inline",
"-instcombine",
"-ipsccp",
"-jump-threading",
"-lcssa",
"-licm",
"-loop-deletion",
"-loop-idiom",
"-loop-reduce",
"-loop-rotate",
"-loop-simplify",
"-loop-unroll",
"-loop-unswitch",
"-lower-expect",
"-loweratomic",
"-lowerinvoke",
"-lowerswitch",
"-mem2reg",
"-memcpyopt",
"-partial-inliner",
"-prune-eh",
"-reassociate",
"-sccp",
"-simplifycfg",
"-sink",
"-sroa",
"-strip",
"-strip-nondebug",
"-tailcallelim",
]
def __init__(self, env) -> None:
"""Creating a counterpart of a compiler gym environement with a reduced action space."""
super().__init__(env=env)
# Array for translating from this tiny action space to the action space of
# the wrapped environment.
self.true_action_indices = [self.action_space[f] for f in self.action_space_subset]
def action(self, action: tp.Union[int, tp.List[int]]):
if isinstance(action, int):
return self.true_action_indices[action]
else:
return [self.true_action_indices[a] for a in action]
class AutophaseNormalizedFeatures(gym.ObservationWrapper):
"""A wrapper for LLVM environments that use the Autophase observation space
to normalize and clip features to the range [0, 1].
"""
# The index of the "TotalInsts" feature of autophase.
TotalInsts_index = 51
def __init__(self, env: tp.Any):
"""Creating a counterpart of a compiler gym environement with an extended observation space."""
super().__init__(env=env)
assert env.observation_space_spec.id == "Autophase", "Requires autophase features"
# Adjust the bounds to reflect the normalized values.
self.observation_space = gym.spaces.Box(
low=np.full(self.observation_space.shape[0], 0, dtype=np.float32), # type: ignore
high=np.full(self.observation_space.shape[0], 1, dtype=np.float32), # type: ignore
dtype=np.float32,
)
def observation(self, observation):
if observation[self.TotalInsts_index] <= 0:
return np.zeros(observation.shape)
return np.clip(observation / observation[self.TotalInsts_index], 0, 1)
class ConcatActionsHistogram(gym.ObservationWrapper):
"""A wrapper that concatenates a histogram of previous actions to each
observation.
The actions histogram is concatenated to the end of the existing 1-D box
observation, expanding the space.
The actions histogram has bounds [0,inf]. If you specify a fixed episode
length `norm_to_episode_len`, each histogram update will be scaled by
1/norm_to_episode_len, so that `sum(observation) == 1` after episode_len
steps.
"""
def __init__(self, env: tp.Any, norm_to_episode_len: int = 0) -> None:
"""Creating a counterpart of a compiler gym environement with an extended observation space."""
super().__init__(env=env)
assert isinstance(
self.observation_space, gym.spaces.Box # type: ignore
), "Can only contatenate actions histogram to box shape"
assert isinstance(
self.action_space, gym.spaces.Discrete
), "Can only construct histograms from discrete spaces"
assert len(self.observation_space.shape) == 1, "Requires 1-D observation space" # type: ignore
self.increment = 1 / norm_to_episode_len if norm_to_episode_len else 1
# Reshape the observation space.
self.observation_space = gym.spaces.Box(
low=np.concatenate(
(
self.observation_space.low, # type: ignore
np.full(self.action_space.n, 0, dtype=np.float32),
)
),
high=np.concatenate(
(
self.observation_space.high, # type: ignore
np.full(
self.action_space.n,
1 if norm_to_episode_len else float("inf"),
dtype=np.float32,
),
)
),
dtype=np.float32,
)
self.histogram = np.zeros((self.action_space.n,))
def reset(self, *args, **kwargs):
self.histogram = np.zeros((self.action_space.n,))
return super().reset(*args, **kwargs)
def step(self, action: tp.Union[int, tp.List[int]]):
if not isinstance(action, tp.Iterable):
action = [action]
for a in action:
self.histogram[a] += self.increment
return super().step(action)
def observation(self, observation):
return np.concatenate((observation, self.histogram))
# Class for direct optimization of CompilerGym problems.
# We have two variants: a limited (small action space) and a full version.
class CompilerGym(ExperimentFunction):
def __init__(self, compiler_gym_pb_index: int, limited_compiler_gym: tp.Optional[bool] = None):
"""Creating a compiler gym environement.
Parameters:
compiler_gym_pb_index: integer
which pb we are working on.
limited_compiler_gym: bool
whether we use a limited action space.
"""
try:
import compiler_gym # noqa
except ImportError as e:
raise ng.errors.UnsupportedExperiment(
"Please install compiler_gym for CompilerGym experiments"
) from e
# CompilerGym sends http requests that CircleCI does not like.
if os.environ.get("CIRCLECI", False):
raise ng.errors.UnsupportedExperiment("No HTTP request in CircleCI")
env = gym.make("llvm-ic-v0", observation_space="Autophase", reward_space="IrInstructionCountOz")
action_space_size = (
len(SmallActionSpaceLlvmEnv.action_space_subset) if limited_compiler_gym else env.action_space.n
)
self.num_episode_steps = 45 if limited_compiler_gym else 50
parametrization = (
ng.p.Array(shape=(self.num_episode_steps,))
.set_bounds(0, action_space_size - 1)
.set_integer_casting()
).set_name("direct" + str(compiler_gym_pb_index))
self.uris = list(env.datasets["benchmark://cbench-v1"].benchmark_uris())
self.compilergym_index = compiler_gym_pb_index
env.reset(benchmark=self.uris[self.compilergym_index])
self.limited_compiler_gym = limited_compiler_gym
super().__init__(self.eval_actions_as_list, parametrization=parametrization)
def make_env(self) -> gym.Env:
"""Convenience function to create the environment that we'll use."""
# User the time-limited wrapper to fix the length of episodes.
if self.limited_compiler_gym:
import compiler_gym
env = gym.wrappers.TimeLimit(
env=SmallActionSpaceLlvmEnv(env=gym.make("llvm-v0", reward_space="IrInstructionCountOz")),
max_episode_steps=self.num_episode_steps,
)
env.unwrapped.benchmark = "cBench-v1/qsort"
env.action_space.n = len(SmallActionSpaceLlvmEnv.action_space_subset)
else:
env = gym.make("llvm-ic-v0", reward_space="IrInstructionCountOz")
assert env.action_space.n > len(SmallActionSpaceLlvmEnv.action_space_subset)
return env
# @lru_cache(maxsize=1024) # function is deterministic so we can cache results
def eval_actions(self, actions: tp.Tuple[int, ...]) -> float:
"""Create an environment, run the sequence of actions in order, and return the
negative cumulative reward. Intermediate observations/rewards are discarded.
This is the function that we want to minimize.
"""
with self.make_env() as env:
env.reset(benchmark=self.uris[self.compilergym_index])
_, _, _, _ = env.step(actions)
return -env.episode_reward
def eval_actions_as_list(self, actions: tp.List[int]):
"""Wrapper around eval_actions() that records the return value for later analysis."""
reward = self.eval_actions(tuple(actions[i] for i in range(len(actions))))
return reward
class GymMulti(ExperimentFunction):
"""Class for converting a gym environment, a controller style, and others into a black-box optimization benchmark."""
@staticmethod
def get_env_names() -> tp.List[str]:
import gym_anm # noqa
gym_env_names = []
for e in gym.envs.registry.all():
try:
assert "Kelly" not in str(e.id) # We should have another check than that.
assert "llvm" not in str(e.id) # We should have another check than that.
env = gym.make(e.id)
env.reset()
env.step(env.action_space.sample())
a1 = np.asarray(env.action_space.sample())
a2 = np.asarray(env.action_space.sample())
a3 = np.asarray(env.action_space.sample())
a1 = a1 + a2 + a3
if hasattr(a1, "size"):
try:
assert a1.size < 15000
except Exception: # pylint: disable=broad-except
assert a1.size() < 15000 # type: ignore
gym_env_names.append(e.id)
except Exception as exception: # pylint: disable=broad-except
print(f"{e.id} not included in full list becaue of {exception}.")
return gym_env_names
controllers = CONTROLLERS
ng_gym = [
"Copy-v0",
"RepeatCopy-v0",
"Reverse-v0",
"CartPole-v0",
"CartPole-v1",
"Acrobot-v1",
"FrozenLake-v0",
"FrozenLake8x8-v0",
"NChain-v0",
"Roulette-v0",
]
def wrap_env(self, input_env):
if self.limited_compiler_gym:
env = gym.wrappers.TimeLimit(
env=SmallActionSpaceLlvmEnv(input_env),
max_episode_steps=self.num_episode_steps,
)
env.unwrapped.benchmark = input_env.benchmark
env.action_space.n = len(SmallActionSpaceLlvmEnv.action_space_subset)
else:
env = gym.wrappers.TimeLimit(
env=input_env,
max_episode_steps=self.num_episode_steps,
)
return env
def observation_wrap(self, env):
env2 = AutophaseNormalizedFeatures(env)
env3 = ConcatActionsHistogram(env2)
return env3
def __init__(
self,
name: str = "gym_anm:ANM6Easy-v0",
control: str = "conformant",
neural_factor: tp.Optional[int] = 1,
randomized: bool = True,
compiler_gym_pb_index: tp.Optional[int] = None,
limited_compiler_gym: tp.Optional[bool] = None,
optimization_scale: int = 0,
greedy_bias: bool = False,
) -> None:
# limited_compiler_gym: bool or None.
# whether we work with the limited version
self.limited_compiler_gym = limited_compiler_gym
self.optimization_scale = optimization_scale
self.num_training_codes = 100 if limited_compiler_gym else 5000
self.uses_compiler_gym = "compiler" in name
self.stochastic_problem = "stoc" in name
self.greedy_bias = greedy_bias
if "conformant" in control or control == "linear":
assert neural_factor is None
if os.name == "nt":
raise ng.errors.UnsupportedExperiment("Windows is not supported")
if self.uses_compiler_gym: # Long special case for Compiler Gym.
# CompilerGym sends http requests that CircleCI does not like.
if os.environ.get("CIRCLECI", False):
raise ng.errors.UnsupportedExperiment("No HTTP request in CircleCI")
assert limited_compiler_gym is not None
self.num_episode_steps = 45 if limited_compiler_gym else 50
import compiler_gym
env = gym.make("llvm-v0", observation_space="Autophase", reward_space="IrInstructionCountOz")
env = self.observation_wrap(self.wrap_env(env))
self.uris = list(env.datasets["benchmark://cbench-v1"].benchmark_uris())
# For training, in the "stochastic" case, we use Csmith.
from itertools import islice
self.csmith = list(
islice(env.datasets["generator://csmith-v0"].benchmark_uris(), self.num_training_codes)
)
if self.stochastic_problem:
assert (
compiler_gym_pb_index is None
), "compiler_gym_pb_index should not be defined in the stochastic case."
self.compilergym_index = None
# In training, we randomly draw in csmith (but we are allowed to use 100x more budget :-) ).
o = env.reset(benchmark=np.random.choice(self.csmith))
else:
assert compiler_gym_pb_index is not None
self.compilergym_index = compiler_gym_pb_index
o = env.reset(benchmark=self.uris[self.compilergym_index])
# env.require_dataset("cBench-v1")
# env.unwrapped.benchmark = "benchmark://cBench-v1/qsort"
else: # Here we are not in CompilerGym anymore.
assert limited_compiler_gym is None
assert (
compiler_gym_pb_index is None
), "compiler_gym_pb_index should not be defined if not CompilerGym."
env = gym.make(name if "LANM" not in name else "gym_anm:ANM6Easy-v0")
o = env.reset()
self.env = env
# Build various attributes.
self.name = (
(name if not self.uses_compiler_gym else name + str(env))
+ "__"
+ control
+ "__"
+ str(neural_factor)
)
if randomized:
self.name += "_unseeded"
self.randomized = randomized
try:
self.num_time_steps = env._max_episode_steps # I know! This is a private variable.
except AttributeError: # Not all environements have a max number of episodes!
assert any(x in name for x in NO_LENGTH), name
if (
self.uses_compiler_gym and not self.limited_compiler_gym
): # The unlimited Gym uses 50 time steps.
self.num_time_steps = 50
elif self.uses_compiler_gym and self.limited_compiler_gym: # Other Compiler Gym: 45 time steps.
self.num_time_steps = 45
elif "LANM" not in name: # Most cases: let's say 100 time steps.
self.num_time_steps = 100
else: # LANM is a special case with 3000 time steps.
self.num_time_steps = 3000
self.gamma = 0.995 if "LANM" in name else 1.0
self.neural_factor = neural_factor
# Infer the action space.
if isinstance(env.action_space, gym.spaces.Discrete):
output_dim = env.action_space.n
output_shape = (output_dim,)
discrete = True
assert output_dim is not None, env.action_space.n
else: # Continuous action space
output_shape = env.action_space.shape
if output_shape is None:
output_shape = tuple(np.asarray(env.action_space.sample()).shape)
# When the shape is not available we might do:
# output_shape = tuple(np.asarray(env.action_space.sample()).shape)
discrete = False
output_dim = np.prod(output_shape)
self.discrete = discrete
# Infer the observation space.
assert (
env.observation_space is not None or self.uses_compiler_gym or "llvm" in name
), "An observation space should be defined."
if self.uses_compiler_gym:
input_dim = 98 if self.limited_compiler_gym else 179
self.discrete_input = False
elif env.observation_space is not None and env.observation_space.dtype == int:
# Direct inference for corner cases:
# if "int" in str(type(o)):
input_dim = env.observation_space.n
assert input_dim is not None, env.observation_space.n
self.discrete_input = True
else:
input_dim = np.prod(env.observation_space.shape) if env.observation_space is not None else 0
if input_dim is None:
input_dim = np.prod(np.asarray(o).shape)
self.discrete_input = False
# Infer the action type.
a = env.action_space.sample()
self.action_type = type(a)
self.subaction_type = None
if hasattr(a, "__iter__"):
self.subaction_type = type(a[0])
# Prepare the policy shape.
if neural_factor is None:
assert (
control == "linear" or "conformant" in control
), f"{control} has neural_factor {neural_factor}"
neural_factor = 1
self.output_shape = output_shape
self.num_stacking = 1
self.memory_len = neural_factor * input_dim if "memory" in control else 0
self.extended_input_len = (input_dim + output_dim) * self.num_stacking if "stacking" in control else 0
input_dim = input_dim + self.memory_len + self.extended_input_len
self.extended_input = np.zeros(self.extended_input_len)
output_dim = output_dim + self.memory_len
self.input_dim = input_dim
self.output_dim = output_dim
self.num_neurons = 1 + ((neural_factor * (input_dim - self.extended_input_len)) // 7)
self.num_neurons = neural_factor * (input_dim - self.extended_input_len)
self.num_internal_layers = 1 if "semi" in control else 3
internal = self.num_internal_layers * (self.num_neurons ** 2) if "deep" in control else 0
unstructured_neural_size = (
output_dim * self.num_neurons + self.num_neurons * (input_dim + 1) + internal,
)
neural_size = unstructured_neural_size
if self.greedy_bias:
neural_size = (unstructured_neural_size[0] + 1,)
assert "multi" not in control
assert "structured" not in control
assert control in CONTROLLERS or control == "conformant", f"{control} not known as a form of control"
self.control = control
if "neural" in control:
self.first_size = self.num_neurons * (self.input_dim + 1)
self.second_size = self.num_neurons * self.output_dim
self.first_layer_shape = (self.input_dim + 1, self.num_neurons)
self.second_layer_shape = (self.num_neurons, self.output_dim)
shape_dict = {
"conformant": (self.num_time_steps,) + output_shape,
"stochastic_conformant": (self.num_time_steps,) + output_shape,
"linear": (input_dim + 1, output_dim),
"memory_neural": neural_size,
"neural": neural_size,
"deep_neural": neural_size,
"semideep_neural": neural_size,
"deep_memory_neural": neural_size,
"semideep_memory_neural": neural_size,
"deep_stackingmemory_neural": neural_size,
"stackingmemory_neural": neural_size,
"semideep_stackingmemory_neural": neural_size,
"deep_extrapolatestackingmemory_neural": neural_size,
"extrapolatestackingmemory_neural": neural_size,
"semideep_extrapolatestackingmemory_neural": neural_size,
"structured_neural": neural_size,
"multi_neural": (min(self.num_time_steps, 50),) + unstructured_neural_size,
"noisy_neural": neural_size,
"noisy_scrambled_neural": neural_size,
"scrambled_neural": neural_size,
}
shape = shape_dict[control]
assert all(
c in shape_dict for c in self.controllers
), f"{self.controllers} subset of {shape_dict.keys()}"
shape = tuple(map(int, shape))
self.policy_shape = shape if "structured" not in control else None
# Create the parametrization.
parametrization = parameter.Array(shape=shape).set_name("ng_default")
if "structured" in control and "neural" in control and "multi" not in control:
parametrization = parameter.Instrumentation( # type: ignore
parameter.Array(shape=tuple(map(int, self.first_layer_shape))),
parameter.Array(shape=tuple(map(int, self.second_layer_shape))),
).set_name("ng_struct")
elif "conformant" in control:
try:
if env.action_space.low is not None and env.action_space.high is not None:
low = np.repeat(np.expand_dims(env.action_space.low, 0), self.num_time_steps, axis=0)
high = np.repeat(np.expand_dims(env.action_space.high, 0), self.num_time_steps, axis=0)
init = 0.5 * (low + high)
parametrization = parameter.Array(init=init)
parametrization.set_bounds(low, high)
except AttributeError: # Not all env.action_space have a low and a high.
pass
if self.subaction_type == int:
parametrization.set_integer_casting()
parametrization.set_name("conformant")
# Now initializing.
super().__init__(self.gym_multi_function, parametrization=parametrization)
self.greedy_coefficient = 0.0
self.parametrization.function.deterministic = not self.uses_compiler_gym
self.archive: tp.List[tp.Any] = []
self.mean_loss = 0.0
self.num_losses = 0
def evaluation_function(self, *recommendations) -> float:
"""Averages multiple evaluations if necessary."""
x = recommendations[0].value
if not self.randomized:
assert not self.uses_compiler_gym
return self.gym_multi_function(x, limited_fidelity=False)
if not self.uses_compiler_gym:
# Pb_index >= 0 refers to the test set.
return (
np.sum(
[
self.gym_multi_function(x, limited_fidelity=False)
for compiler_gym_pb_index in range(23)
]
)
/ 23.0 # This is not compiler_gym but we keep this 23 constant.
)
assert self.uses_compiler_gym
rewards = [
np.log(
max(
1e-5,
-self.gym_multi_function(
x, limited_fidelity=False, compiler_gym_pb_index=compiler_gym_pb_index
),
)
)
for compiler_gym_pb_index in range(23)
]
return -np.exp(sum(rewards) / len(rewards))
def forked_env(self):
assert "compiler" in self.name
env = self.env
forked = env.unwrapped.fork()
forked = self.wrap_env(forked)
# pylint: disable=W0201
assert hasattr(
env, "_elapsed_steps"
), f"{[hasattr(e, '_elapsed_steps') for e in [env, env.unwrapped, env.unwrapped.unwrapped, env.unwrapped.unwrapped.unwrapped]]}"
if env._elapsed_steps is not None:
forked._elapsed_steps = env._elapsed_steps
forked = self.observation_wrap(forked)
if hasattr(env, "histogram"):
forked.histogram = env.histogram.copy()
return forked
def discretize(self, a):
"""Transforms a logit into an int obtained through softmax."""
if self.greedy_bias:
a = np.asarray(a, dtype=np.float32)
for i, action in enumerate(range(len(a))):
if "compiler" in self.name:
tmp_env = self.forked_env()
else:
tmp_env = copy.deepcopy(self.env)
_, r, _, _ = tmp_env.step(action)
a[i] += self.greedy_coefficient * r
probabilities = np.exp(a - max(a))
probabilities = probabilities / sum(probabilities)
assert sum(probabilities) <= 1.0 + 1e-7, f"{probabilities} with greediness {self.greedy_coefficient}."
return int(list(np.random.multinomial(1, probabilities)).index(1))
def neural(self, x: np.ndarray, o: np.ndarray):
"""Applies a neural net parametrized by x to an observation o. Returns an action or logits of actions."""
if self.greedy_bias:
assert "multi" not in self.control
assert "structured" not in self.control
self.greedy_coefficient = x[-1:] # We have decided that we can not have two runs in parallel.
x = x[:-1]
o = o.ravel()
if "structured" not in self.name and self.optimization_scale != 0:
x = np.asarray((2 ** self.optimization_scale) * x, dtype=np.float32)
if self.control == "linear":
# The linear case is simplle.
output = np.matmul(o, x[1:, :])
output += x[0]
return output.reshape(self.output_shape), np.zeros(0)
if "structured" not in self.control:
# If not structured then we split into two matrices.
first_matrix = x[: self.first_size].reshape(self.first_layer_shape) / np.sqrt(len(o))
second_matrix = x[self.first_size : (self.first_size + self.second_size)].reshape(
self.second_layer_shape
) / np.sqrt(self.num_neurons)
else:
# In the structured case we should have two entries with the right shapes.
assert len(x) == 2
first_matrix = | np.asarray(x[0][0]) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 15:12:49 2016
@author: uzivatel
"""
import numpy as np
from copy import deepcopy
from .general import Energy
from .density import DensityGrid
class MO:
''' Class containing information about molecular orbitals.
name : string
Name of molecular orbital class
coeff : numpy array or real (dimension N_mo x N_AO_orient)
Usually N_mo=N_AO_orient. Matrix with expansion coefficients of molecular
orbital into AO basis.
nmo : integer
Number of molecular orbitals
energy : Energy class
Molecular orbital energies (``energy.value`` - vector with all energies)
- Fock single electron energies (energies are energy units managed).
occ : numpy.array of real (dimension Nmo_orbitals)
Occupation number of individual molecular orbitals. Number between 2.0
correspond to fully occupied molecular orbital and 0.0 wich correspond
to unoccupied MO. Summ of occupation numbers is equal to total number of electrons
symm : list of string (dimension Nmo_orbitals)
Symmetry of molecular orbitals
densmat_grnd : numpy array of float (dimension Nao_orient x Nao_orient)
Total ground state electron matrix in atomic orbitals, defined as:\n
``M_mat[mu,nu]=Sum_{n}{occ_n*C_n,mu*C_n,nu}``.\n
Where `C_n,mu` are expansion coefficients of molecular orbital `n` into
atomic orbital `mu` (contribution of atomic orbital `mu` in molecular
orbital `n`).
init : logical
information if molecular orbitals are initialized (if some information is
allready loaded)
Functions
-----------
add_all :
Add molecular orbital including expansion coefficients into atomic
orbitals, energy of the molecular orbital, occupation number and symmetry
rotate :
Rotate the molecular orbitals and ground state electron density matrix
by specified angles in radians in positive direction.
rotate_1 :
Inverse totation to rotate
copy :
Create 1 to 1 copy of the molecular orbitals with all classes and types.
get_MO_norm :
Calculate norm of secified molecular orbital
normalize :
Normalize molecular orbitals (if loaded from Gaussian calculation there
are some round errors and therefore orbitals have slightly different norm
~0.998 in this order)
get_mo_grid :
Evaluate selected molecular orbital on the grid
get_grnd_densmat :
Calculate ground state electron density matrix
get_MO_type :
Calculates if molecular orbital is sigma or pi type
'''
def __init__(self):
self.coeff=np.array([],dtype='f8')
self.nmo=0
self.energy=Energy(None)
self.occ=[]
self.symm=[]
self.name='Default'
self.init=False
self.densmat_grnd=None
def _add_coeffs(self,coeff):
if not self.init:
if np.ndim(coeff)==1:
self.coeff= | np.array([coeff],dtype='f8') | numpy.array |
import numpy as np
import torch
from styletransfer.cut.models import create_model
from styletransfer.cut.options.test_options import TestOptions
def prepare(name="dual_simple", mode="FastCUT" , path = "styletransfer/cut/checkpoints"):
opt = TestOptions().parse() # get test options
# hard-code some parameters for testmodel
opt.name = name # "road_FastCUT" # NAME "road_CUT" #
opt.checkpoints_dir = path
opt.CUT_mode = mode # "FastCUT" # CUT
opt.phase = "test"
opt.num_threads = 0 # test code only supports num_threads = 1
opt.batch_size = 1 # test code only supports batch_size = 1
opt.serial_batches = True # disable data shuffling; comment this line if results on randomly chosen images are needed.
opt.no_flip = True # no flip; comment this line if results on flipped images are needed.
opt.display_id = -1 # no visdom display; the test code saves the results to a HTML file.
opt.dataset_mode = "conditional"
opt.netG = "conditional_resnet_9"
opt.netD = "conditional"
opt.model = "conditional_cut"
opt.lambda_hist = 0
opt.lambda_edge = 0
opt.edge_warmup=0
model = create_model(opt) # create a model given opt.model and other options
model.setup(opt) # regular setup: load and print networks; create schedulers
model.parallelize()
return model
def generate(model, img, a, b):
img = np.transpose(img, axes=[2, 0, 1])
img = img[np.newaxis, :]
img = 2 * ((img / 255) - 1)
data = {
"A": torch.Tensor(img),
"B": torch.Tensor(img),
"B_class": torch.Tensor([[a, b]]),
"A_paths": "None",
"B_paths": "None"
}
model.set_input(data) # unpack data from data loader
model.test() # run inference
visuals = model.get_current_visuals() # get image result# s
img = visuals['fake_B'].cpu().numpy()
from matplotlib import pyplot as plt
img = | np.transpose(img, axes=[0, 2, 3, 1]) | numpy.transpose |
import os
import ast
import glob
import numpy as np
import pandas as pd
from tqdm import tqdm
from itertools import chain
from astropy.io import ascii
import multiprocessing as mp
from astropy.stats import mad_std
from astropy.timeseries import LombScargle as lomb
from pysyd import __file__
from pysyd.plots import set_plot_params
from pysyd.models import *
#####################################################################
# HIGHER-LEVEL FUNCTIONALITY OF THE SOFTWARE
#
def get_info(args):
"""
Loads todo.txt, sets up file paths, loads in any available star information, saves the
relevant parameters for each of the two main routines and sets the plotting parameters.
Parameters
----------
args : argparse.Namespace
command-line arguments
parallel : bool
if pysyd will be running in parallel mode
CLI : bool, optional
if CLI is not being used (i.e. `False`), the modules draw default values from a different location
Returns
-------
args : argparse.Namespace
the updated command-line arguments
"""
# Get parameters for all modules
args = get_parameters(args)
# Get invidual/specific star info from csv file (if it exists)
args = get_csv_info(args)
if args.cli:
# Check the input variables
check_input_args(args)
args = get_command_line(args)
set_plot_params()
return args
def get_parameters(args):
"""
Basic function to call the individual functions that load and
save parameters for different modules.
Parameters
----------
args : argparse.Namespace
command-line arguments
Returns
-------
args : argparse.Namespace
the updated command-line arguments
"""
# Initialize main 'params' dictionary
args = get_main_params(args)
args = get_groups(args)
# Initialize parameters for the find excess routine
args = get_excess_params(args)
# Initialize parameters for the fit background routine
args = get_background_params(args)
# Initialize parameters relevant for estimating global parameters
args = get_global_params(args)
return args
def get_main_params(args, cli=False, stars=None, excess=True, background=True, globe=True,
verbose=False, command='run', parallel=False, show=False, testing=False,
save=True, kep_corr=False, of_actual=None, of_new=None, overwrite=True):
"""
Get the parameters for the find excess routine.
Parameters
----------
args : argparse.Namespace
the command line arguments
stars : List[str], optional
list of targets to process. If `None`, will read in from `info/todo.txt` (default).
verbose : bool, optional
turn on verbose output. Default is `False`.
show : bool, optional
show output figures. Default is `False`.
save : bool, optional
save all data products. Default is `True`.
kep_corr : bool, optional
use the module that corrects for known kepler artefacts. Default is `False`.
of_actual : int, optional
oversampling factor of input PS. Default value is `None`.
of_new : int, optional
oversampling factor of newly-computed PS. Default value is `None`.
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.params : Dict[str,object]
the parameters of higher-level functionality
"""
vars = ['stars', 'inpdir', 'outdir', 'cli', 'command', 'info', 'show', 'save', 'testing',
'overwrite', 'excess', 'background', 'global', 'verbose']
if args.cli:
vals = [args.stars, args.inpdir, args.outdir, args.cli, args.command, args.info,
args.show, args.save, args.testing, args.overwrite, args.excess, args.background,
args.globe, args.verbose]
else:
args.todo = os.path.join(os.path.abspath(os.getcwd()), 'info', 'todo.txt')
info = os.path.join(os.path.abspath(os.getcwd()), 'info', 'star_info.csv')
inpdir = os.path.join(os.path.abspath(os.getcwd()), 'data')
args.command, args.parallel, args.of_actual, args.of_new, args.kep_corr, args.verbose = command, parallel, of_actual, of_new, kep_corr, verbose
vals = [stars, inpdir, os.path.join(os.path.abspath(os.getcwd()), 'results'), cli, command,
info, show, save, testing, overwrite, excess, background, globe, verbose]
args.params = dict(zip(vars,vals))
# Open star list
if args.params['stars'] is None or args.params['stars'] == []:
with open(args.todo, "r") as f:
args.params['stars'] = [line.strip().split()[0] for line in f.readlines()]
# Set file paths and make directories if they don't yet exist
for star in args.params['stars']:
args.params[star] = {}
args.params[star]['path'] = os.path.join(args.params['outdir'], star)
if args.params['save'] and not os.path.exists(args.params[star]['path']):
os.makedirs(args.params[star]['path'])
args.params[star]['ech_mask'] = None
return args
#####################################################################
# Sets up star "groups" -> mostly for parallel processing
#
def get_groups(args):
"""
Sets up star groups to run in parallel based on the number of threads.
Parameters
----------
args : argparse.Namespace
command line arguments
parallel : bool
run pySYD in parallel
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.params['groups'] : ndarray
star groups to process (groups == number of threads)
Returns
----------
None
"""
if args.parallel:
todo = np.array(args.params['stars'])
if args.n_threads == 0:
args.n_threads = mp.cpu_count()
if len(todo) < args.n_threads:
args.n_threads = len(todo)
# divide stars into groups set by number of cpus/nthreads available
digitized = np.digitize(np.arange(len(todo))%args.n_threads,np.arange(args.n_threads))
args.params['groups'] = np.array([todo[digitized == i] for i in range(1, args.n_threads+1)], dtype=object)
else:
args.params['groups'] = np.array(args.params['stars'])
return args
#####################################################################
# Parameters relevant to (optionally) estimate numax
#
def get_excess_params(args, n_trials=3, step=0.25, binning=0.005, smooth_width=20.0,
mode='mean', lower_ex=1.0, upper_ex=8000., ask=False,):
"""
Get the parameters for the find excess routine.
Parameters
----------
args : argparse.Namespace
the command line arguments
ask : bool, optional
If `True`, it will ask which trial to use as the estimate for numax.
n_trials : int, optional
the number of trials. Default value is `3`.
step : float, optional
TODO: Write description. Default value is `0.25`.
binning : float, optional
logarithmic binning width. Default value is `0.005`.
mode : {'mean', 'median', 'gaussian'}
mode to use when binning
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.findex : Dict[str,object]
the parameters of the find excess routine
"""
vars = ['step', 'binning', 'mode', 'smooth_width', 'ask', 'n_trials', 'lower_ex', 'upper_ex', 'results']
if args.cli:
vals = [args.step, args.binning, args.mode, args.smooth_width, args.ask, args.n_trials, args.lower_ex, args.upper_ex, {}]
else:
vals = [step, binning, mode, smooth_width, ask, n_trials, lower_ex, upper_ex, {}]
args.excess = dict(zip(vars,vals))
return args
#####################################################################
# Parameters relevant to background-fitting
#
def get_background_params(args, ind_width=20.0, box_filter=1.0, n_rms=20, metric='bic', include=False,
mc_iter=1, samples=False, n_laws=None, fix_wn=False, basis='tau_sigma',
lower_bg=1.0, upper_bg=8000.,):
"""
Get the parameters for the background-fitting routine.
Parameters
----------
args : argparse.Namespace
the command line arguments
box_filter : float
the size of the 1D box smoothing filter (in muHz). Default value is `1.0`.
ind_width : float
the independent average smoothing width (in muHz). Default value is `20.0`.
n_rms : int
number of data points to estimate red noise contributions. Default value is `20`.
metric : str
which metric to use (i.e. bic or aic) for model selection. Default is `'bic'`.
include : bool
include metric values in verbose output. Default is `False`.
basis : str
which basis to use for background fitting, e.g. {a,b} parametrization. Default is `tau_sigma`.
n_laws : int
force number of Harvey-like components in background fit. Default value is `None`.
fix_wn : bool
fix the white noise level in the background fit. Default is `False`.
mc_iter : int
number of samples used to estimate uncertainty. Default value is `1`.
samples : bool
if true, will save the monte carlo samples to a csv. Default value is `False`.
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.fitbg : Dict[str,object]
the parameters relevant for the fit background routine
"""
vars = ['ind_width', 'box_filter', 'n_rms', 'n_laws', 'fix_wn', 'basis', 'metric', 'include',
'functions', 'mc_iter', 'samples', 'lower_bg', 'upper_bg', 'results']
if args.cli:
vals = [args.ind_width, args.box_filter, args.n_rms, args.n_laws, args.fix_wn, args.basis,
args.metric, args.include, get_dict(type='functions'), args.mc_iter, args.samples,
args.lower_bg, args.upper_bg, {}]
else:
vals = [ind_width, box_filter, n_rms, n_laws, fix_wn, basis, metric, include,
get_dict(type='functions'), mc_iter, samples, lower_bg, upper_bg, {}]
args.background = dict(zip(vars,vals))
return args
#####################################################################
# Features related to determining numax and dnu
#
def get_global_params(args, sm_par=None, lower_ps=None, upper_ps=None, width=1.0,
method='D', smooth_ps=2.5, threshold=1.0, n_peaks=5, cmap='binary',
clip_value=3.0, smooth_ech=None, interp_ech=False, lower_ech=None,
upper_ech=None, nox=50, noy=0, notching=False):
"""
Get the parameters relevant for finding global asteroseismic parameters numax and dnu.
Parameters
----------
args : argparse.Namespace
the command line arguments
sm_par : float
Gaussian filter width for determining smoothed numax (values are typically between 1-4)
method : str
method to determine dnu, choices are ~['M','A','D'] (default is `'D'`).
lower_ps : float
lower bound of power excess (in muHz). Default value is `None`.
upper_ps : float
upper bound of power excess (in muHz). Default value is `None`.
width : float
fractional width to use for power excess centerd on numax. Default value is `1.0`.
smooth_ps : float
box filter [in muHz] for PS smoothing before calculating ACF. Default value is `1.5`.
threshold : float
fractional width of FWHM to use in ACF for later iterations. Default value is `1.0`.
n_peaks : int
the number of peaks to select. Default value is `5`.
lower_ech : float
lower bound of folded PS (in muHz) to 'whiten' mixed modes. Default value is `None`.
upper_ech : float
upper bound of folded PS (in muHz) to 'whiten' mixed modes. Default value is `None`.
clip_value : float
the minimum frequency of the echelle plot. Default value is `0.0`.
smooth_ech : float
option to smooth the output of the echelle plot
interp_ech : bool
turns on the bilinear smoothing in echelle plot
nox : int
x-axis resolution on the echelle diagram. Default value is `50`. (NOT CURRENTLY IMPLEMENTED YET)
noy : int
how many radial orders to plot on the echelle diagram. Default value is `5`. (NOT CURRENTLY IMPLEMENTED YET)
Returns
-------
args : argparse.Namespace
the updated command line arguments
args.globe : Dict[str,object]
the parameters relevant for determining the global parameters routine
"""
vars = ['sm_par', 'width', 'smooth_ps', 'threshold', 'n_peaks', 'method', 'cmap', 'clip_value',
'smooth_ech', 'interp_ech', 'nox', 'noy', 'notching', 'results']
if args.cli:
vals = [args.sm_par, args.width, args.smooth_ps, args.threshold, args.n_peaks, args.method, args.cmap,
args.clip_value, args.smooth_ech, args.interp_ech, args.nox, args.noy, args.notching, {}]
else:
vals = [sm_par, width, smooth_ps, threshold, n_peaks, method, clip_value, cmap, smooth_ech,
interp_ech, nox, noy, notching, {}]
args.globe = dict(zip(vars,vals))
return args
#####################################################################
# Can store different settings for individual stars
#
def get_csv_info(args, force=False, guess=None):
"""
Reads in any star information provided via args.info and is 'info/star_info.csv' by default.
** Please note that this is NOT required for pySYD to run successfully **
Parameters
----------
args : argparse.Namespace
the command line arguments
force : float
if not false (i.e. non-zero) will force dnu to be the equal to this value.
guess : float
estimate or guess for dnu
Returns
-------
args : argparse.Namespace
the updated command line arguments
"""
constants = Constants()
columns = get_dict(type='columns')['required']
# Open file if it exists
if os.path.exists(args.info):
df = pd.read_csv(args.info)
stars = [str(each) for each in df.stars.values.tolist()]
for i, star in enumerate(args.params['stars']):
args.params[star]['excess'] = args.params['excess']
args.params[star]['force'] = force
args.params[star]['guess'] = guess
if star in stars:
idx = stars.index(star)
# Update information from columns
for column in columns:
if not np.isnan(float(df.loc[idx,column])):
args.params[star][column] = float(df.loc[idx, column])
else:
args.params[star][column] = None
# Add estimate of numax if the column exists
if args.params[star]['numax'] is not None:
args.params[star]['excess'] = False
args.params[star]['dnu'] = 0.22*(args.params[star]['numax']**0.797)
elif args.params[star]['dnu'] is not None:
args.params[star]['force'] = True
args.params[star]['guess'] = args.params[star]['dnu']
# Otherwise estimate using other stellar parameters
else:
if args.params[star]['radius'] is not None and args.params[star]['logg'] is not None:
args.params[star]['mass'] = ((((args.params[star]['radius']*constants.r_sun)**(2.0))*10**(args.params[star]['logg'])/constants.G)/constants.m_sun)
args.params[star]['numax'] = constants.numax_sun*args.params[star]['mass']*(args.params[star]['radius']**(-2.0))*((args.params[star]['teff']/constants.teff_sun)**(-0.5))
args.params[star]['dnu'] = constants.dnu_sun*(args.params[star]['mass']**(0.5))*(args.params[star]['radius']**(-1.5))
# if target isn't in csv, still save basic parameters called througout the code
else:
for column in columns:
args.params[star][column] = None
# same if the file does not exist
else:
for star in args.stars:
args.params[star]['excess'] = args.params['excess']
args.params[star]['force'] = False
for column in columns:
args.params[star][column] = None
return args
#####################################################################
# If running from command line, checks input types and array lengths
#
def check_input_args(args, max_laws=3):
"""
Make sure that any command-line inputs are the proper lengths, types, etc.
Parameters
----------
args : argparse.Namespace
the command line arguments
max_laws : int
maximum number of resolvable Harvey components
Yields
------
???
"""
checks={'lower_ps':args.lower_ps,'upper_ps':args.upper_ps,'lower_ech':args.lower_ech,
'upper_ech':args.upper_ech,'dnu':args.dnu,'numax':args.numax}
for check in checks:
if checks[check] is not None:
assert len(args.stars) == len(checks[check]), "The number of values provided for %s does not equal the number of stars"%check
if args.of_actual is not None:
assert isinstance(args.of_actual,int), "The oversampling factor for the input PS must be an integer"
if args.of_new is not None:
assert isinstance(args.of_new,int), "The new oversampling factor must be an integer"
if args.n_laws is not None:
assert args.n_laws <= max_laws, "We likely cannot resolve %d Harvey-like components for point sources. Please select a smaller number."%args.n_laws
def get_command_line(args, numax=None, dnu=None, lower_ps=None, upper_ps=None,
lower_ech=None, upper_ech=None):
"""
If certain CLI options are provided, it saves it to the appropriate star. This
is called after the csv is checked and therefore, this will override any duplicated
information provided there (if applicable).
Parameters
----------
args : argparse.Namespace
the command line arguments
args.lower_ps : float, optional
the lower frequency bound for numax (in muHz). Default is `None`.
args.upper_ps : float, optional
the upper frequency bound for numax (in muHz). Default is `None`.
args.numax : List[float], optional
the estimated numax (in muHz). Default is `None`.
args.dnu : List[float], optional
the estimated frequency spacing or dnu (in muHz). Default is `None`.
args.lower_ech : List[float], optional
the lower frequency for whitening the folded PS (in muHz). Default is `None`.
args.upper_ech : List[float], optional
the upper frequency for whitening the folded PS (in muHz). Default is `None`.
Returns
-------
args : argparse.Namespace
the updated command line arguments
"""
override = {
'lower_ps': args.lower_ps,
'upper_ps': args.upper_ps,
'numax': args.numax,
'dnu': args.dnu,
'lower_ech': args.lower_ech,
'upper_ech': args.upper_ech,
}
for i, star in enumerate(args.params['stars']):
for each in override:
if override[each] is not None:
# if numax is provided via CLI, findex is skipped
if each == 'numax':
args.params[star]['excess'] = False
args.params[star]['numax'] = override[each][i]
args.params[star]['dnu'] = 0.22*(args.params[star]['numax']**0.797)
# if dnu is provided via CLI, this value is used instead of the derived dnu
elif each == 'dnu':
args.params[star]['force'] = True
args.params[star]['guess'] = override[each][i]
else:
args.params[star][each] = override[each][i]
if args.params[star]['lower_ech'] is not None and args.params[star]['upper_ech'] is not None:
args.params[star]['ech_mask'] = [args.params[star]['lower_ech'],args.params[star]['upper_ech']]
else:
args.params[star]['ech_mask'] = None
return args
#####################################################################
# Data and information related to a processed star
#
def load_data(star, args):
"""
Loads both the light curve and power spectrum data in for a given star,
which will return `False` if unsuccessful and therefore, not run the rest
of the pipeline.
Parameters
----------
star : target.Target
the pySYD pipeline object
args : argparse.Namespace
command line arguments
Returns
-------
star : target.Target
the pySYD pipeline object
star.lc : bool
will return `True` if the light curve data was loaded in properly otherwise `False`
star.ps : bool
will return `True` if the power spectrum file was successfully loaded otherwise `False`
"""
if not star.params['cli']:
star.pickles=[]
# Now done at beginning to make sure it only does this once per star
if glob.glob(os.path.join(args.inpdir,'%s*'%str(star.name))) != []:
if star.verbose:
print('\n\n------------------------------------------------------')
print('Target: %s'%str(star.name))
print('------------------------------------------------------')
# Load light curve
args, star, note = load_time_series(args, star)
if star.verbose:
print(note)
# Load power spectrum
args, star, note = load_power_spectrum(args, star)
if star.verbose:
print(note)
return star
def load_file(path):
"""
Load a light curve or a power spectrum from a basic 2xN txt file
and stores the data into the `x` (independent variable) and `y`
(dependent variable) arrays, where N is the length of the series.
Parameters
----------
path : str
the file path of the data file
Returns
-------
x : numpy.array
the independent variable i.e. the time or frequency array
y : numpy.array
the dependent variable, in this case either the flux or power array
"""
f = open(path, "r")
lines = f.readlines()
f.close()
# Set values
x = np.array([float(line.strip().split()[0]) for line in lines])
y = np.array([float(line.strip().split()[1]) for line in lines])
return x, y
def load_time_series(args, star, note=''):
"""
If available, star.lc is set to `True`, the time series data
is loaded in, and then it calculates the cadence and nyquist
freqency. If time series data is not provided, either the
cadence or nyquist frequency must be provided via CLI
Parameters
----------
star : target.Target
the pySYD pipeline object
args : argparse.Namespace
command line arguments
args.cadence : int
cadence of time series data (if known but data is not available)
args.nyquist : float
nyquist frequency of the provided power spectrum
note : str
optional suppressed verbose output
Returns
-------
star : target.Target
the pySYD pipeline object
star.lc : bool
will return `True` if the light curve data was loaded in properly otherwise `False`
star.time : numpy.array
time array in days
star.flux : numpy.array
relative or normalized flux array
"""
star.lc = False
star.nyquist = None
# Try loading the light curve
if os.path.exists(os.path.join(args.inpdir, '%s_LC.txt'%star.name)):
star.lc = True
star.time, star.flux = load_file(os.path.join(args.inpdir, '%s_LC.txt'%star.name))
star.time -= min(star.time)
star.cadence = int(round(np.nanmedian(np.diff(star.time)*24.0*60.0*60.0),0))
star.nyquist = 10**6./(2.0*star.cadence)
star.baseline = (max(star.time)-min(star.time))*24.*60.*60.
star.tau_upper = star.baseline/2.
note += '# LIGHT CURVE: %d lines of data read\n# Time series cadence: %d seconds'%(len(star.time),star.cadence)
return args, star, note
def load_power_spectrum(args, star, note='', long=10**6):
"""
Loads in the power spectrum data in for a given star,
which will return `False` if unsuccessful and therefore, not run the rest
of the pipeline.
Parameters
----------
star : target.Target
the pySYD pipeline object
args : argparse.Namespace
command line arguments
args.kep_corr : bool
if true, will run the module to mitigate the Kepler artefacts in the power spectrum. Default is `False`.
args.of_actual : int
the oversampling factor, if the power spectrum is already oversampled. Default is `1`, assuming a critically sampled PS.
args.of_new : float
the oversampling factor to use for the first iterations. Default is `5`.
note : str
optional suppressed verbose output
long : int
will display a warning if length of PS is longer than 10**6 lines
Returns
-------
star : target.Target
the pySYD pipeline object
star.ps : bool
will return `True` if the power spectrum file was successfully loaded otherwise `False`
star.frequency : numpy.array
frequency array in muHz
star.power : numpy.array
power spectral density array
"""
star.ps = False
# Try loading the power spectrum
if not os.path.exists(os.path.join(args.inpdir, '%s_PS.txt'%star.name)):
note += '# ERROR: %s/%s_PS.txt not found\n'%(args.inpdir, star.name)
else:
star.ps = True
star.frequency, star.power = load_file(os.path.join(args.inpdir, '%s_PS.txt'%star.name))
note += '# POWER SPECTRUM: %d lines of data read\n'%len(star.frequency)
if len(star.frequency) >= long:
note += '# WARNING: PS is large and will slow down the software'
star.resolution = star.frequency[1]-star.frequency[0]
if args.kep_corr:
note += '# **using Kepler artefact correction**\n'
star = remove_artefact(star)
if star.params[star.name]['ech_mask'] is not None:
note += '# **whitening the PS to remove mixed modes**\n'
star = whiten_mixed(star)
args, star, note = check_input_data(args, star, note)
return args, star, note
#####################################################################
# Relevant for Kepler (artefact) correction function
# -> this will save the seed for reproducibiity purposes
#
def set_seed(star, lower=1, upper=10**7, size=1):
"""
For Kepler targets that require a correction via CLI (--kc), a random seed is generated
from U~[1,10^7] and stored in stars_info.csv for reproducible results in later runs.
Parameters
----------
star : target.Target
the pySYD pipeline object
lower : int
lower limit for random seed value. Default value is `1`.
upper : int
upper limit for random seed value. Default value is `10**7`.
size : int
number of seed values returned. Default value is `1`.
Returns
-------
star : target.Target
the pySYD pipeline object
"""
seed = list(np.random.randint(lower,high=upper,size=size))
df = pd.read_csv(star.params['info'])
stars = [str(each) for each in df.stars.values.tolist()]
idx = stars.index(star.name)
df.loc[idx,'seed'] = int(seed[0])
star.params[star.name]['seed'] = seed[0]
df.to_csv(star.params['info'],index=False)
return star
#####################################################################
# Routine to correct for 1/LC Kepler harmonics, as well as
# known high frequency artefacts and the low frequency artefacts
# (primarily in Q0-Q3 data)
#
def remove_artefact(star, lcp=1.0/(29.4244*60*1e-6), lf_lower=[240.0,500.0], lf_upper =[380.0,530.0],
hf_lower = [4530.0,5011.0,5097.0,5575.0,7020.0,7440.0,7864.0],
hf_upper = [4534.0,5020.0,5099.0,5585.0,7030.0,7450.0,7867.0],):
"""
Removes SC artefacts in Kepler power spectra by replacing them with noise (using linear interpolation)
following a chi-squared distribution.
Known artefacts are:
1) 1./LC harmonics
2) high frequency artefacts (>5000 muHz)
3) low frequency artefacts 250-400 muHz (mostly present in Q0 and Q3 data)
Parameters
----------
star : target.Target
the pySYD pipeline object
lcp : float
long cadence period in Msec
lf_lower : List[float]
lower limit of low frequency artefact
lf_upper : List[float]
upper limit of low frequency artefact
hf_lower : List[float]
lower limit of high frequency artefact
hf_upper : List[float]
upper limit of high frequency artefact
Returns
-------
star : target.Target
the pySYD pipeline object
"""
if star.params[star.name]['seed'] is None:
star = set_seed(star)
# LC period in Msec -> 1/LC ~muHz
artefact = (1.0+np.arange(14))*lcp
# Estimate white noise
white = np.mean(star.power[(star.frequency >= max(star.frequency)-100.0)&(star.frequency <= max(star.frequency)-50.0)])
np.random.seed(int(star.params[star.name]['seed']))
# Routine 1: remove 1/LC artefacts by subtracting +/- 5 muHz given each artefact
for i in range(len(artefact)):
if artefact[i] < np.max(star.frequency):
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, artefact[i]-5.0*star.resolution, artefact[i]+5.0*star.resolution))
if np.sum(mask) != 0:
star.power[mask] = white*np.random.chisquare(2,np.sum(mask))/2.0
np.random.seed(int(star.params[star.name]['seed']))
# Routine 2: fix high frequency artefacts
for lower, upper in zip(hf_lower, hf_upper):
if lower < np.max(star.frequency):
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, lower, upper))
if np.sum(mask) != 0:
star.power[mask] = white*np.random.chisquare(2,np.sum(mask))/2.0
np.random.seed(int(star.params[star.name]['seed']))
# Routine 3: remove wider, low frequency artefacts
for lower, upper in zip(lf_lower, lf_upper):
low = np.ma.getmask(np.ma.masked_outside(star.frequency, lower-20., lower))
upp = np.ma.getmask(np.ma.masked_outside(star.frequency, upper, upper+20.))
# Coeffs for linear fit
m, b = np.polyfit(star.frequency[~(low*upp)], star.power[~(low*upp)], 1)
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, lower, upper))
# Fill artefact frequencies with noise
star.power[mask] = ((star.frequency[mask]*m)+b)*(np.random.chisquare(2, np.sum(mask))/2.0)
return star
#####################################################################
# For subgiants with mixed modes, this will "whiten" these modes
# by adding noise (by drawing from a chi-squared distribution with 2
# dof) to properly estimate dnu.
#
def whiten_mixed(star):
"""
Generates random white noise in place of ell=1 for subgiants with mixed modes to better
constrain the characteristic frequency spacing.
Parameters
----------
star : target.Target
pySYD pipeline target
star.frequency : np.ndarray
the frequency of the power spectrum
star.power : np.ndarray
the power spectrum
"""
if star.params[star.name]['seed'] is None:
star = set_seed(star)
# Estimate white noise
if not star.globe['notching']:
white = np.mean(star.power[(star.frequency >= max(star.frequency)-100.0)&(star.frequency <= max(star.frequency)-50.0)])
else:
white = min(star.power[(star.frequency >= max(star.frequency)-100.0)&(star.frequency <= max(star.frequency)-50.0)])
# Take the provided dnu and "fold" the power spectrum
folded_freq = np.copy(star.frequency)%star.params[star.name]['guess']
mask = np.ma.getmask(np.ma.masked_inside(folded_freq, star.params[star.name]['ech_mask'][0], star.params[star.name]['ech_mask'][1]))
np.random.seed(int(star.params[star.name]['seed']))
# Makes sure the mask is not empty
if np.sum(mask) != 0:
if star.globe['notching']:
star.power[mask] = white
else:
star.power[mask] = white*np.random.chisquare(2,np.sum(mask))/2.0
# Typically if dnu is provided, it will assume you want to "force" that value
# so we need to adjust this back
star.params[star.name]['force'] = False
star.params[star.name]['guess'] = None
return star
def check_input_data(args, star, note):
"""
Checks the type(s) of input data and creates any additional, optional
arrays as well as critically-sampled power spectra (when applicable).
Parameters
----------
args : argparse.Namespace
command line arguments
star : target.Target
pySYD target object
note : str, optional
optional verbose output
Returns
-------
args : argparse.Namespace
updated command line arguments
star : target.Target
updated pySYD target object
note : str, optional
updated optional verbose output
"""
if star.lc:
args.of_actual = int(round((1./((max(star.time)-min(star.time))*0.0864))/(star.frequency[1]-star.frequency[0])))
star.freq_cs = np.array(star.frequency[args.of_actual-1::args.of_actual])
star.pow_cs = np.array(star.power[args.of_actual-1::args.of_actual])
if args.of_new is not None:
note += '# Computing new PS using oversampling of %d\n'%args.of_new
freq_os, pow_os = lomb(star.time, star.flux).autopower(method='fast', samples_per_peak=args.of_new, maximum_frequency=star.nyquist)
star.freq_os = freq_os*(10.**6/(24.*60.*60.))
star.pow_os = 4.*pow_os*np.var(star.flux*1e6)/(np.sum(pow_os)*(star.freq_os[1]-star.freq_os[0]))
else:
star.freq_os, star.pow_os = np.copy(star.frequency), np.copy(star.power)
else:
if args.of_actual is not None:
star.freq_cs = np.array(star.frequency[args.of_actual-1::args.of_actual])
star.pow_cs = np.array(star.power[args.of_actual-1::args.of_actual])
star.freq_os, star.pow_os = np.copy(star.frequency), np.copy(star.power)
else:
star.freq_cs, star.pow_cs = np.copy(star.frequency), np.copy(star.power)
star.freq_os, star.pow_os = np.copy(star.frequency), np.copy(star.power)
note += '# WARNING: using input PS with no additional information\n'
if args.mc_iter > 1:
note += '# **uncertainties may not be reliable unless using a critically-sampled PS**'
star.baseline = 1./((star.freq_cs[1]-star.freq_cs[0])*10**-6.)
star.tau_upper = star.baseline/2.
if args.of_actual is not None and args.of_actual != 1:
note += '# PS is oversampled by a factor of %d\n'%args.of_actual
else:
note += '# PS is critically-sampled\n'
note += '# PS resolution: %.6f muHz'%(star.freq_cs[1]-star.freq_cs[0])
return args, star, note
#####################################################################
# Sets data up for first optional module
#
def get_estimates(star, max_trials=6):
"""
Parameters used with the first module, which is automated method to identify
power excess due to solar-like oscillations.
Parameters
----------
star : target.Target
pySYD target object
Returns
-------
star : target.Target
updated pySYD target object
"""
# If running the first module, mask out any unwanted frequency regions
star.frequency, star.power = np.copy(star.freq_os), np.copy(star.pow_os)
star.resolution = star.frequency[1]-star.frequency[0]
# mask out any unwanted frequencies
if star.params[star.name]['lower_ex'] is not None:
lower = star.params[star.name]['lower_ex']
else:
if star.excess['lower_ex'] is not None:
lower = star.excess['lower_ex']
else:
lower = min(star.frequency)
if star.params[star.name]['upper_ex'] is not None:
upper = star.params[star.name]['upper_ex']
else:
if star.excess['upper_ex'] is not None:
upper = star.excess['upper_ex']
else:
upper = max(star.frequency)
if star.nyquist is not None and star.nyquist < upper:
upper = star.nyquist
star.freq = star.frequency[(star.frequency >= lower)&(star.frequency <= upper)]
star.pow = star.power[(star.frequency >= lower)&(star.frequency <= upper)]
if star.excess['n_trials'] > max_trials:
star.excess['n_trials'] = max_trials
if (star.params[star.name]['numax'] is not None and star.params[star.name]['numax'] <= 500.) or (star.nyquist is not None and star.nyquist <= 300.):
star.boxes = np.logspace(np.log10(0.5), np.log10(25.), star.excess['n_trials'])
else:
star.boxes = np.logspace(np.log10(50.), np.log10(500.), star.excess['n_trials'])
return star
#####################################################################
# Checks if there's an estimate for numax (but no longer requires it)
# Still needs to be tested: no estimate of numax but global fit
#
def check_numax(star):
"""
Checks if there is a starting value for numax as pySYD needs this information to begin the
second module (whether be it from the first module, CLI or saved to info/star_info.csv).
Returns
-------
result : bool
will return `True` if there is prior value for numax otherwise `False`.
"""
# THIS MUST BE FIXED TOO
# Check if numax was provided as input
if star.params[star.name]['numax'] is None:
# If not, checks if findex was run
if not star.params['overwrite']:
dir = os.path.join(star.params[star.name]['path'],'estimates*')
else:
dir = os.path.join(star.params[star.name]['path'],'estimates.csv')
if glob.glob(dir) != []:
if not star.params['overwrite']:
list_of_files = glob.glob(os.path.join(star.params[star.name]['path'],'estimates*'))
file = max(list_of_files, key=os.path.getctime)
else:
file = os.path.join(star.params[star.name]['path'],'estimates.csv')
df = pd.read_csv(file)
for col in ['numax', 'dnu', 'snr']:
star.params[star.name][col] = df.loc[0, col]
# No estimate for numax provided and/or determined
else:
return False
return True
#####################################################################
# Sets data up for the derivation of asteroseismic parameters
#
def get_initial(star, lower_bg=1.0):
"""
Gets initial guesses for granulation components (i.e. timescales and amplitudes) using
solar scaling relations. This resets the power spectrum and has its own independent
filter (i.e. [lower,upper] mask) to use for this subroutine.
Parameters
----------
star : target.Target
pySYD target object
star.oversample : bool
if `True`, it will use an oversampled power spectrum for the first iteration or 'step'
minimum_freq : float
minimum frequency to use for the power spectrum if `None` is provided (via info/star_info.csv). Default = `10.0` muHz. Please note: this is typically sufficient for most stars but may affect evolved stars!
maximum_freq : float
maximum frequency to use for the power spectrum if `None` is provided (via info/star_info.csv). Default = `5000.0` muHz.
Returns
-------
star : target.Target
updated pySYD target object
"""
star.frequency, star.power = np.copy(star.freq_os), np.copy(star.pow_os)
star.resolution = star.frequency[1]-star.frequency[0]
if star.params[star.name]['lower_bg'] is not None:
lower = star.params[star.name]['lower_bg']
else:
lower = lower_bg
if star.params[star.name]['upper_bg'] is not None:
upper = star.params[star.name]['upper_bg']
else:
upper = max(star.frequency)
if star.nyquist is not None and star.nyquist < upper:
upper = star.nyquist
star.params[star.name]['bg_mask']=[lower,upper]
# Mask power spectrum for fitbg module
mask = np.ma.getmask(np.ma.masked_inside(star.frequency, star.params[star.name]['bg_mask'][0], star.params[star.name]['bg_mask'][1]))
star.frequency, star.power = np.copy(star.frequency[mask]), np.copy(star.power[mask])
star.random_pow = np.copy(star.power)
# Get other relevant initial conditions
star.i = 0
if star.params['background']:
star.background['results'][star.name] = {}
if star.params['global']:
star.globe['results'][star.name] = {}
star.globe['results'][star.name] = {'numax_smooth':[],'A_smooth':[],'numax_gauss':[],'A_gauss':[],'FWHM':[],'dnu':[]}
if star.params['testing']:
star.test='----------------------------------------------------\n\nTESTING INFORMATION:\n'
# Use scaling relations from sun to get starting points
star = solar_scaling(star)
return star
#####################################################################
# We use scaling relations to estimate initial guesses for
# several parameters
#
def solar_scaling(star, scaling='tau_sun_single', max_laws=3, times=1.5, scale=1.0):
"""
Uses scaling relations from the Sun to:
1) estimate the width of the region of oscillations using numax
2) guess starting values for granulation timescales
Parameters
----------
max_laws : int
the maximum number of resolvable Harvey-like components
"""
constants = Constants()
# Checks if there's an estimate for numax
# Use "excess" for different meaning now - i.e. is there a power excess
# as in, if it's (True by default, it will search for it but if it's False, it's saying there isn't any)
if check_numax(star):
star.exp_numax = star.params[star.name]['numax']
# Use scaling relations to estimate width of oscillation region to mask out of the background fit
width = constants.width_sun*(star.exp_numax/constants.numax_sun)
maxpower = [star.exp_numax-(width*star.globe['width']), star.exp_numax+(width*star.globe['width'])]
if star.params[star.name]['lower_ps'] is not None:
maxpower[0] = star.params[star.name]['lower_ps']
if star.params[star.name]['upper_ps'] is not None:
maxpower[1] = star.params[star.name]['upper_ps']
star.params[star.name]['ps_mask'] = [maxpower[0],maxpower[1]]
# Use scaling relation for granulation timescales from the sun to get starting points
scale = constants.numax_sun/star.exp_numax
# If not, uses entire power spectrum
else:
maxpower = [np.median(star.frequency), np.median(star.frequency)]
if star.params[star.name]['lower_ps'] is not None:
maxpower[0] = star.params[star.name]['lower_ps']
if star.params[star.name]['upper_ps'] is not None:
maxpower[1] = star.params[star.name]['upper_ps']
star.params[star.name]['ps_mask'] = [maxpower[0],maxpower[1]]
# Estimate granulation time scales
if scaling == 'tau_sun_single':
taus = np.array(constants.tau_sun_single)*scale
else:
taus = np.array(constants.tau_sun)*scale
taus = taus[taus <= star.baseline]
b = taus*10**-6.
mnu = (1.0/taus)*10**5.
star.b = b[mnu >= min(star.frequency)]
star.mnu = mnu[mnu >= min(star.frequency)]
if len(star.mnu)==0:
star.b = b[mnu >= 10.]
star.mnu = mnu[mnu >= 10.]
elif len(star.mnu) > max_laws:
star.b = b[mnu >= min(star.frequency)][-max_laws:]
star.mnu = mnu[mnu >= min(star.frequency)][-max_laws:]
else:
pass
# Save copies for plotting after the analysis
star.nlaws = len(star.mnu)
star.nlaws_orig = len(star.mnu)
star.mnu_orig = np.copy(star.mnu)
star.b_orig = np.copy(star.b)
return star
#####################################################################
# Save information
#
def save_file(star, formats=[">15.8f", ">18.10e"]):
"""
Saves the corrected power spectrum, which is computed by subtracting
the best-fit stellar background model from the power spectrum.
Parameters
----------
star : target.Target
the pySYD pipeline target
formats : List[str]
2x1 list of formats to save arrays as
star.params[star.name]['path'] : str
path to save the background-corrected power spectrum
star.frequency : ndarray
frequency array
star.bg_corr_sub : ndarray
background-subtracted power spectrum
"""
f_name = os.path.join(star.params[star.name]['path'],'bgcorr_ps.txt')
if not star.params['overwrite']:
f_name = get_next(star,'bgcorr_ps.txt')
with open(f_name, "w") as f:
for x, y in zip(star.frequency, star.bg_corr):
values = [x, y]
text = '{:{}}'*len(values) + '\n'
fmt = sum(zip(values, formats), ())
f.write(text.format(*fmt))
f.close()
if star.verbose:
print(' **background-corrected PS saved**')
def save_estimates(star):
"""
Save the results of the find excess routine into the save folder of the current star.
Parameters
----------
star : target.Target
pipeline target with the results of the `find_excess` routine
"""
best = star.excess['results'][star.name]['best']
variables = ['star', 'numax', 'dnu', 'snr']
results = [star.name, star.excess['results'][star.name][best]['numax'], star.excess['results'][star.name][best]['dnu'], star.excess['results'][star.name][best]['snr']]
save_path = os.path.join(star.params[star.name]['path'],'estimates.csv')
if not star.params['overwrite']:
save_path = get_next(star,'estimates.csv')
ascii.write( | np.array(results) | numpy.array |
import os
import cv2
import random
import numpy as np
import scipy.io as io
import torch
from torch.utils.data import Dataset
class CityscapesDataset(Dataset):
def __init__(self, root_data_path, im_path, period, num_im=3, crop_size=None, resize_size=None, aug=True):
self.dataset_dir = root_data_path
self.im_path = im_path
self.period = period
self.resize_size = resize_size
self.crop_size = crop_size
self.num_im = num_im
self.aug = aug
self.mean = np.array([123.675, 116.28, 103.53])
self.mean = np.expand_dims(np.expand_dims(self.mean, axis=1), axis=1)
self.std = np.array([58.395, 57.12, 57.375])
self.std = np.expand_dims(np.expand_dims(self.std, axis=1), axis=1)
self.get_list()
def get_list(self):
self.im_names = []
self.gt_names = []
file_path = os.path.join('/code/ST_Memory/data/Cityscapes/list', self.period + '.txt')
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
im_name, gt_name = line.split()
im_id = int(im_name.split('_')[2])
interval = 1
name_list = []
for i in range(self.num_im):
name = im_name.replace(
'{:06d}_leftImg8bit.png'.format(im_id),
'{:06d}_leftImg8bit.png'.format(im_id + (i - self.num_im // 2) * interval),
)
name_list.append(name)
self.im_names.append(name_list)
self.gt_names.append(gt_name)
def __len__(self):
return len(self.gt_names)
def __getitem__(self, idx):
im_list = []
for i in range(self.num_im):
im = cv2.imread(os.path.join(self.im_path, 'leftImg8bit_sequence', self.period, self.im_names[idx][i]))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im_list.append(im)
gt = cv2.imread(os.path.join(self.dataset_dir, 'gtFine', self.period, self.gt_names[idx]), 0)
if self.resize_size is not None:
im_list, gt = self.resize(im_list, gt)
if self.crop_size is not None:
im_list, gt = self.crop(im_list, gt)
if self.period == 'train' and self.aug:
im_list, gt = self.randomflip(im_list, gt)
h, w = gt.shape
im_list, gt = self.totentor(im_list, gt)
return im_list, gt
def resize(self, im_list, gt):
resize_h, resize_w = self.resize_size
for i in range(self.num_im):
im_list[i] = cv2.resize(im_list[i], (resize_w, resize_h), interpolation=cv2.INTER_LINEAR)
gt = cv2.resize(gt, (resize_w, resize_h), interpolation=cv2.INTER_NEAREST)
return im_list, gt
def crop(self, im_list, gt):
h, w = gt.shape
crop_h, crop_w = self.crop_size
start_h = np.random.randint(h - crop_h)
start_w = np.random.randint(w - crop_w)
for i in range(self.num_im):
im_list[i] = im_list[i][start_h:start_h+crop_h, start_w:start_w+crop_w, :]
gt = gt[start_h:start_h+crop_h, start_w:start_w+crop_w]
return im_list, gt
def randomflip(self, im_list, gt):
RANDOMFLIP = 0.5
if np.random.rand() < RANDOMFLIP:
for i in range(self.num_im):
im_list[i] = np.flip(im_list[i], axis=1)
gt = np.flip(gt, axis=1)
return im_list, gt
def totentor(self, im_list, gt):
for i in range(self.num_im):
im = im_list[i].transpose([2, 0, 1])
im = (im - self.mean) / self.std
im = torch.from_numpy(im.copy()).float()
im_list[i] = im
gt = torch.from_numpy(gt.copy()).long()
return im_list, gt
class CityscapesDataset_Image(Dataset):
def __init__(self, root_data_path, im_path, period, resize_size=None, aug=True):
self.dataset_dir = root_data_path
self.im_path = im_path
self.period = period
self.resize_size = resize_size
self.aug = aug
self.mean = np.array([123.675, 116.28, 103.53])
self.mean = np.expand_dims(np.expand_dims(self.mean, axis=1), axis=1)
self.std = np.array([58.395, 57.12, 57.375])
self.std = np.expand_dims(np.expand_dims(self.std, axis=1), axis=1)
self.get_list()
def get_list(self):
self.im_names = []
self.gt_names = []
file_path = os.path.join('/code/ST_Memory/data/Cityscapes/list', self.period + '.txt')
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
im_name, gt_name = line.split()
self.im_names.append(im_name)
self.gt_names.append(gt_name)
def __len__(self):
return len(self.gt_names)
def __getitem__(self, idx):
im = cv2.imread(os.path.join(self.im_path, 'leftImg8bit_sequence', self.period, self.im_names[idx]))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
gt = cv2.imread(os.path.join(self.dataset_dir, 'gtFine', self.period, self.gt_names[idx]), 0)
if self.period == 'train' and self.aug:
if self.resize_size is not None:
im, gt = self.resize(im, gt)
im, gt = self.randomflip(im, gt)
im, gt = self.totentor(im, gt)
return im, gt
def resize(self, im, gt):
resize_h, resize_w = self.resize_size
im = cv2.resize(im, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR)
gt = cv2.resize(gt, (resize_w, resize_h), interpolation=cv2.INTER_NEAREST)
return im, gt
def randomflip(self, im, gt):
RANDOMFLIP = 0.5
if np.random.rand() < RANDOMFLIP:
im = np.flip(im, axis=1)
gt = np.flip(gt, axis=1)
return im, gt
def totentor(self, im, gt):
im = im.transpose([2, 0, 1])
im = (im - self.mean) / self.std
im = torch.from_numpy(im.copy()).float()
gt = torch.from_numpy(gt.copy()).long()
return im, gt
class CityscapesDataset_Image_Aug(Dataset):
def __init__(self, root_data_path, im_path, period, crop_size=None, aug=True):
self.dataset_dir = root_data_path
self.im_path = im_path
self.period = period
self.crop_size = crop_size
self.aug = aug
self.mean = np.array([123.675, 116.28, 103.53])
self.mean = np.expand_dims(np.expand_dims(self.mean, axis=1), axis=1)
self.std = np.array([58.395, 57.12, 57.375])
self.std = np.expand_dims(np.expand_dims(self.std, axis=1), axis=1)
self.get_list()
def get_list(self):
self.im_names = []
self.gt_names = []
file_path = os.path.join('/code/ST_Memory/data/Cityscapes/list', self.period + '.txt')
with open(file_path, 'r') as f:
lines = f.readlines()
for line in lines:
im_name, gt_name = line.split()
self.im_names.append(im_name)
self.gt_names.append(gt_name)
def __len__(self):
return len(self.gt_names)
def __getitem__(self, idx):
im = cv2.imread(os.path.join(self.im_path, 'leftImg8bit_sequence', self.period, self.im_names[idx]))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
gt = cv2.imread(os.path.join(self.dataset_dir, 'gtFine', self.period, self.gt_names[idx]), 0)
if self.period == 'train' and self.aug:
if self.crop_size is not None:
im, gt = self.randomcrop(im, gt)
im, gt = self.randomflip(im, gt)
im, gt = self.totentor(im, gt)
return im, gt
def randomcrop(self, im, gt):
h, w = gt.shape
crop_h, crop_w = self.crop_size
h_start = random.randint(0, h - crop_h)
w_start = random.randint(0, w - crop_w)
im = im[h_start:h_start + crop_h, w_start:w_start + crop_w, :]
gt = gt[h_start:h_start + crop_h, w_start:w_start + crop_w]
return im, gt
def randomflip(self, im, gt):
RANDOMFLIP = 0.5
if | np.random.rand() | numpy.random.rand |
# %%
"""
This module contains copies of the classes SOMToolBox_Parse and SomViz provided by the lecturers.
"""
import pandas as pd
import numpy as np
import gzip
from scipy.spatial import distance_matrix, distance
from ipywidgets import Layout, HBox, Box, widgets, interact
import plotly.graph_objects as go
class SOMToolBox_Parse:
def __init__(self, filename):
self.filename = filename
def read_weight_file(self, ):
df = pd.DataFrame()
if self.filename[-3:len(self.filename)] == '.gz':
with gzip.open(self.filename, 'rb') as file:
df, vec_dim, xdim, ydim = self._read_vector_file_to_df(df, file)
else:
with open(self.filename, 'rb') as file:
df, vec_dim, xdim, ydim = self._read_vector_file_to_df(df, file)
file.close()
return df.astype('float64'), vec_dim, xdim, ydim
def _read_vector_file_to_df(self, df, file):
xdim, ydim, vec_dim, position = 0, 0, 0, 0
for byte in file:
line = byte.decode('UTF-8')
if line.startswith('$'):
xdim, ydim, vec_dim = self._parse_vector_file_metadata(line, xdim, ydim, vec_dim)
if xdim > 0 and ydim > 0 and len(df.columns) == 0:
df = pd.DataFrame(index=range(0, ydim * xdim), columns=range(0, vec_dim))
else:
if len(df.columns) == 0 or vec_dim == 0:
raise ValueError('Weight file has no correct Dimensional information.')
position = self._parse_weight_file_data(line, position, vec_dim, df)
return df, vec_dim, xdim, ydim
def _parse_weight_file_data(self, line, position, vec_dim, df):
splitted = line.split(' ')
try:
df.values[position] = list(np.array(splitted[0:vec_dim]).astype(float))
position += 1
except:
raise ValueError('The input-vector file does not match its unit-dimension.')
return position
def _parse_vector_file_metadata(self, line, xdim, ydim, vec_dim):
splitted = line.split(' ')
if splitted[0] == '$XDIM':
xdim = int(splitted[1])
elif splitted[0] == '$YDIM':
ydim = int(splitted[1])
elif splitted[0] == '$VEC_DIM':
vec_dim = int(splitted[1])
return xdim, ydim, vec_dim
# %%
class SomViz:
def __init__(self, weights, m, n):
self.weights = weights
self.m = m
self.n = n
def umatrix(self, som_map=None, color="Viridis", interp="best", title=""):
um = np.zeros((self.m * self.n, 1))
neuron_locs = list()
for i in range(self.m):
for j in range(self.n):
neuron_locs.append(np.array([i, j]))
neuron_distmat = distance_matrix(neuron_locs, neuron_locs)
for i in range(self.m * self.n):
neighbor_idxs = neuron_distmat[i] <= 1
neighbor_weights = self.weights[neighbor_idxs]
um[i] = distance_matrix(np.expand_dims(self.weights[i], 0), neighbor_weights).mean()
if som_map == None:
return self.plot(um.reshape(self.m, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = um.reshape(self.m, self.n)
def hithist(self, som_map=None, idata=[], color='RdBu', interp="best", title=""):
hist = [0] * self.n * self.m
for v in idata:
position = np.argmin(np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1)))
hist[position] += 1
if som_map == None:
return self.plot(np.array(hist).reshape(self.m, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = np.array(hist).reshape(self.m, self.n)
def component_plane(self, som_map=None, component=0, color="Viridis", interp="best", title=""):
if som_map == None:
return self.plot(self.weights[:, component].reshape(-1, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = self.weights[:, component].reshape(-1, self.n)
def sdh(self, som_map=None, idata=[], sdh_type=1, factor=1, draw=True, color="Cividis", interp="best", title=""):
import heapq
sdh_m = [0] * self.m * self.n
cs = 0
for i in range(0, factor): cs += factor - i
for vector in idata:
dist = np.sqrt(np.sum(np.power(self.weights - vector, 2), axis=1))
c = heapq.nsmallest(factor, range(len(dist)), key=dist.__getitem__)
if (sdh_type == 1):
for j in range(0, factor): sdh_m[c[j]] += (factor - j) / cs # normalized
if (sdh_type == 2):
for j in range(0, factor): sdh_m[c[j]] += 1.0 / dist[c[j]] # based on distance
if (sdh_type == 3):
dmin = min(dist)
for j in range(0, factor): sdh_m[c[j]] += 1.0 - (dist[c[j]] - dmin) / (max(dist) - dmin)
if som_map == None:
return self.plot(np.array(sdh_m).reshape(-1, self.n), color=color, interp=interp, title=title)
else:
som_map.data[0].z = np.array(sdh_m).reshape(-1, self.n)
def project_data(self, som_m=None, idata=[], title=""):
data_y = []
data_x = []
for v in idata:
position = np.argmin(np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1)))
x, y = position % self.n, position // self.n
data_x.extend([x])
data_y.extend([y])
if som_m != None: som_m.add_trace(
go.Scatter(x=data_x, y=data_y, mode="markers", marker_color='rgba(255, 255, 255, 0.8)', ))
def time_series(self, som_m=None, idata=[], wsize=50, title=""): # not tested
data_y = []
data_x = [i for i in range(0, len(idata))]
data_x2 = []
data_y2 = []
qmin = np.Inf
qmax = 0
step = 1
ps = []
for v in idata:
matrix = np.sqrt(np.sum(np.power(self.weights - v, 2), axis=1))
position = np.argmin(matrix)
qerror = matrix[position]
if qmin > qerror: qmin = qerror
if qmax < qerror: qmax = qerror
ps.append((position, qerror))
markerc = []
for v in ps:
data_y.extend([v[0]])
rez = v[1] / qmax
markerc.append('rgba(0, 0, 0, ' + str(rez) + ')')
x, y = v[0] % self.n, v[0] // self.n
if x == 0:
y = np.random.uniform(low=y, high=y + .1)
elif x == self.m - 1:
y = np.random.uniform(low=y - .1, high=y)
elif y == 0:
x = np.random.uniform(low=x, high=x + .1)
elif y == self.n - 1:
x = | np.random.uniform(low=x - .1, high=x) | numpy.random.uniform |
# May 2018 xyz
import numpy as np
import numba
def Rx( x ):
# ref to my master notes 2015
# anticlockwise, x: radian
Rx = np.zeros((3,3))
Rx[0,0] = 1
Rx[1,1] = np.cos(x)
Rx[1,2] = np.sin(x)
Rx[2,1] = -np.sin(x)
Rx[2,2] = np.cos(x)
return Rx
def Ry( y ):
# anticlockwise, y: radian
Ry = np.zeros((3,3))
Ry[0,0] = np.cos(y)
Ry[0,2] = -np.sin(y)
Ry[1,1] = 1
Ry[2,0] = np.sin(y)
Ry[2,2] = np.cos(y)
return Ry
@numba.jit(nopython=True)
def Rz( z ):
# anticlockwise, z: radian
Rz = np.zeros((3,3))
Rz[0,0] = np.cos(z)
Rz[0,1] = np.sin(z)
Rz[1,0] = -np.sin(z)
Rz[1,1] = np.cos(z)
Rz[2,2] = 1
return Rz
def R1D( angle, axis ):
if axis == 'x':
return Rx(angle)
elif axis == 'y':
return Ry(angle)
elif axis == 'z':
return Rz(angle)
else:
raise NotImplementedError
def EulerRotate( angles, order ='zxy' ):
R = np.eye(3)
for i in range(3):
R_i = R1D(angles[i], order[i])
R = np.matmul( R_i, R )
return R
def point_rotation_randomly( points, rxyz_max=np.pi*np.array([0.1,0.1,0.1]) ):
# Input:
# points: (B, N, 3)
# rx/y/z: in radians
# Output:
# points: (B, N, 3)
batch_size = points.shape[0]
for b in range(batch_size):
rxyz = [ np.random.uniform(-r_max, r_max) for r_max in rxyz_max ]
R = EulerRotate( rxyz, 'xyz' )
points[b,:,:] = np.matmul( points[b,:,:], np.transpose(R) )
return points
def angle_with_x(direc, scope_id=0):
if direc.ndim == 2:
x = np.array([[1.0,0]])
if direc.ndim == 3:
x = np.array([[1.0,0,0]])
x = np.tile(x, [direc.shape[0],1])
return angle_of_2lines(direc, x, scope_id)
def angle_of_2lines(line0, line1, scope_id=0):
'''
line0: [n,2/3]
line1: [n,2/3]
zero as ref
scope_id=0: [0,pi]
1: (-pi/2, pi/2]
angle: [n]
'''
assert line0.ndim == line1.ndim == 2
assert (line0.shape[0] == line1.shape[0]) or line0.shape[0]==1 or line1.shape[0]==1
assert line0.shape[1] == line1.shape[1] # 2 or 3
norm0 = np.linalg.norm(line0, axis=1, keepdims=True)
norm1 = np.linalg.norm(line1, axis=1, keepdims=True)
#assert norm0.min() > 1e-4 and norm1.min() > 1e-4 # return nan
line0 = line0 / norm0
line1 = line1 / norm1
angle = np.arccos( np.sum(line0 * line1, axis=1) )
if scope_id == 0:
pass
elif scope_id == 1:
# (-pi/2, pi/2]: offset=0.5, period=pi
angle = limit_period(angle, 0.5, np.pi)
else:
raise NotImplementedError
return angle
def limit_period(val, offset, period):
'''
[0, pi]: offset=0, period=pi
[-pi/2, pi/2]: offset=0.5, period=pi
[-pi, 0]: offset=1, period=pi
[0, pi/2]: offset=0, period=pi/2
[-pi/4, pi/4]: offset=0.5, period=pi/2
[-pi/2, 0]: offset=1, period=pi/2
'''
return val - | np.floor(val / period + offset) | numpy.floor |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision.datasets.folder
from PIL import Image, ImageFile
from torch.utils.data import TensorDataset
from torchvision import transforms
from torchvision.datasets import MNIST, ImageFolder, CIFAR10
from torchvision.transforms.functional import rotate
from wilds.datasets.camelyon17_dataset import Camelyon17Dataset
from wilds.datasets.fmow_dataset import FMoWDataset
ImageFile.LOAD_TRUNCATED_IMAGES = True
DATASETS = [
# Debug
"Debug28",
"Debug224",
# Small images
"VerticalLine",
"VHLine",
"FullColoredMNIST",
"ColoredMNIST",
"RotatedMNIST",
# Big images
"VLCS",
"PACS",
"OfficeHome",
"TerraIncognita",
"DomainNet",
"SVIRO",
# WILDS datasets
"WILDSCamelyon",
"WILDSFMoW",
]
def get_dataset_class(dataset_name):
"""Return the dataset class with the given name."""
if dataset_name not in globals():
raise NotImplementedError("Dataset not found: {}".format(dataset_name))
return globals()[dataset_name]
def num_environments(dataset_name):
return len(get_dataset_class(dataset_name).ENVIRONMENTS)
class MultipleDomainDataset:
N_STEPS = 5001 # Default, subclasses may override
CHECKPOINT_FREQ = 100 # Default, subclasses may override
N_WORKERS = 4 # Default, subclasses may override
ENVIRONMENTS = None # Subclasses should override
INPUT_SHAPE = None # Subclasses should override
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class Debug(MultipleDomainDataset):
def __init__(self, root, test_envs, hparams):
super().__init__()
self.input_shape = self.INPUT_SHAPE
self.num_classes = 2
self.datasets = []
for _ in [0, 1, 2]:
self.datasets.append(
TensorDataset(
torch.randn(16, *self.INPUT_SHAPE),
torch.randint(0, self.num_classes, (16,))
)
)
class Debug28(Debug):
INPUT_SHAPE = (3, 28, 28)
ENVIRONMENTS = ['0', '1', '2']
class Debug224(Debug):
INPUT_SHAPE = (3, 224, 224)
ENVIRONMENTS = ['0', '1', '2']
class MultipleEnvironmentCIFAR10(MultipleDomainDataset):
def __init__(self, root, environments, dataset_transform, input_shape,
num_classes):
super().__init__()
if root is None:
raise ValueError('Data directory not specified!')
original_dataset_tr = CIFAR10(root, train=True, download=True)
original_dataset_te = CIFAR10(root, train=False, download=True)
original_images = np.concatenate((original_dataset_tr.data, original_dataset_te.data))
original_labels = np.concatenate((original_dataset_tr.targets, original_dataset_te.targets))
shuffle = torch.randperm(len(original_images))
original_images = original_images[shuffle]
original_labels = original_labels[shuffle]
self.datasets = []
for i in range(len(environments)):
self.datasets.append(dataset_transform(original_images, original_labels, environments[i]))
self.input_shape = input_shape
self.num_classes = num_classes
class MultipleEnvironmentMNIST(MultipleDomainDataset):
def __init__(self, root, environments, dataset_transform, input_shape,
num_classes):
super().__init__()
if root is None:
raise ValueError('Data directory not specified!')
self.colors = torch.FloatTensor(
[[0, 100, 0], [188, 143, 143], [255, 0, 0], [255, 215, 0], [0, 255, 0], [65, 105, 225], [0, 225, 225],
[0, 0, 255], [255, 20, 147], [160, 160, 160]])
self.random_colors = torch.randint(255, (10, 3)).float()
original_dataset_tr = MNIST(root, train=True, download=True)
original_dataset_te = MNIST(root, train=False, download=True)
original_images = torch.cat((original_dataset_tr.data,
original_dataset_te.data))
original_labels = torch.cat((original_dataset_tr.targets,
original_dataset_te.targets))
shuffle = torch.randperm(len(original_images))
original_images = original_images[shuffle]
original_labels = original_labels[shuffle]
self.datasets = []
self.environments = environments
for i in range(len(environments)):
images = original_images[i::len(environments)]
labels = original_labels[i::len(environments)]
self.datasets.append(dataset_transform(images, labels, environments[i]))
self.input_shape = input_shape
self.num_classes = num_classes
def __getitem__(self, index):
return self.datasets[index]
def __len__(self):
return len(self.datasets)
class VHLine(MultipleEnvironmentCIFAR10):
ENVIRONMENT_NAMES = [0, 1]
N_WORKERS = 0
N_STEPS = 10001
def __init__(self, root, test_envs, hparams):
self.domain_label = [0, 1]
# print("MY COMBINE:", MY_COMBINE)
self.input_shape = (3, 32, 32)
self.num_classes = 10
super(VHLine, self).__init__(root, self.domain_label, self.color_dataset, (3, 32, 32,), 10)
def color_dataset(self, images, labels, environment):
# Add a line to the last channel and vary its brightness during testing.
images = self.add_vhline(images, labels, b_scale=1, env=environment)
for i in range(5):
rand_indx = np.random.randint(0, images.shape[0])
self._plot(images[rand_indx])
x = torch.Tensor(images).permute(0, 3, 1, 2)
y = torch.Tensor(labels).view(-1).long()
return TensorDataset(x, y)
def add_vhline(self, images, labels, b_scale, env):
images = np.divide(images, 255.0)
if env == 1:
return images
def configurations(images, cond_indx, cls):
# To create the ten-valued spurious feature, we consider a vertical line passing through the middle of each channel,
# and also additionally the horizontal line through the first channel.
if cls == 0:
images[cond_indx, :, 16:17, 0] = np.add(images[cond_indx, :, 16:17, 0], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 1] = np.add(images[cond_indx, :, 16:17, 1], 0.5 + 0.5 * np.random.uniform(-b_scale, b_scale))
images[cond_indx, :, 16:17, 2] = np.add(images[cond_indx, :, 16:17, 2], 0.5 + 0.5 * | np.random.uniform(-b_scale, b_scale) | numpy.random.uniform |
"""Use the pyDOE2 library to generate latin hypercube samples."""
import numpy as np
import warnings
try:
from pyDOE2 import lhs as lhs_pydoe
HAS_PYDOE2 = True
except ImportError:
HAS_PYDOE2 = False
try:
from scipy.stats.qmc import LatinHypercube as lhs_scipy
HAS_SCIPY_QMC = True
except ImportError:
HAS_SCIPY_QMC = False
def _format_inputs(xmins, xmaxs, n_dim, num_evaluations):
try:
len(xmins)
xmins_is_float = False
except TypeError:
xmins_is_float = True
try:
len(xmaxs)
xmaxs_is_float = False
except TypeError:
xmaxs_is_float = True
msg_nd1 = (
"Input n_dim=1 so input xmins and xmaxs should be "
"either a scalar or ndarray of shape (n_dim, num_evaluations)"
)
msg_nd2 = "Each entry of xmins must be a float or ndarray of shape num_evaluations"
msg_nd2b = "Input n_dim={0} so input xmins and xmaxs should have length {1}"
_zz = np.zeros(num_evaluations)
if n_dim == 1:
if xmins_is_float:
xmins = np.atleast_2d([xmins + _zz])
else:
assert np.shape(xmins) == (n_dim, num_evaluations), msg_nd1
xmins = np.atleast_2d(xmins)
if xmaxs_is_float:
xmaxs = np.atleast_2d([xmaxs + _zz])
else:
assert | np.shape(xmaxs) | numpy.shape |
"""
test_standard.py - This module provides unit tests on the qoc.standard module.
"""
### qoc.standard.constants ###
def test_constants():
import numpy as np
from qoc.standard.constants import (get_creation_operator,
get_annihilation_operator)
big = 100
# Use the fact that (create)(annihilate) is the number operator
# to test the creation and annihilation operator methods.
for i in range(1, big):
analytic_number_operator = np.diag(np.arange(i))
generated_number_operator = np.matmul(get_creation_operator(i), get_annihilation_operator(i))
assert np.allclose(generated_number_operator, analytic_number_operator)
### qoc.standard.costs ###
# TODO: implement me
def test_controlarea():
pass
# TODO: implement me
def test_controlnorm():
pass
# TODO: implement me
def test_controlvariation():
pass
def test_forbiddensities():
import numpy as np
from qoc.standard import conjugate_transpose
from qoc.standard.costs.forbiddensities import ForbidDensities
system_eval_count = 11
state0 = np.array([[1], [0]])
density0 = np.matmul(state0, conjugate_transpose(state0))
forbid0_0 = np.array([[1], [0]])
density0_0 = np.matmul(forbid0_0, conjugate_transpose(forbid0_0))
forbid0_1 = np.divide(np.array([[1], [1]]), np.sqrt(2))
density0_1 = np.matmul(forbid0_1, conjugate_transpose(forbid0_1))
state1 = np.array([[0], [1]])
density1 = np.matmul(state1, conjugate_transpose(state1))
forbid1_0 = np.divide(np.array([[1], [1]]), np.sqrt(2))
density1_0 = np.matmul(forbid1_0, conjugate_transpose(forbid1_0))
forbid1_1 = np.divide(np.array([[1j], [1j]]), np.sqrt(2))
density1_1 = np.matmul(forbid1_1, conjugate_transpose(forbid1_1))
densities = np.stack((density0, density1,))
forbidden_densities0 = np.stack((density0_0, density0_1,))
forbidden_densities1 = np.stack((density1_0, density1_1,))
forbidden_densities = np.stack((forbidden_densities0, forbidden_densities1,))
fd = ForbidDensities(forbidden_densities, system_eval_count)
cost = fd.cost(None, densities, None)
expected_cost = 7 / 640
assert(np.allclose(cost, expected_cost,))
def test_forbidstates():
import numpy as np
from qoc.standard.costs.forbidstates import ForbidStates
system_eval_count = 11
state0 = np.array([[1], [0]])
forbid0_0 = np.array([[1], [0]])
forbid0_1 = np.divide(np.array([[1], [1]]), np.sqrt(2))
state1 = np.array([[0], [1]])
forbid1_0 = np.divide(np.array([[1], [1]]), np.sqrt(2))
forbid1_1 = np.divide(np.array([[1j], [1j]]), np.sqrt(2))
states = np.stack((state0, state1,))
forbidden_states0 = np.stack((forbid0_0, forbid0_1,))
forbidden_states1 = np.stack((forbid1_0, forbid1_1,))
forbidden_states = np.stack((forbidden_states0, forbidden_states1,))
fs = ForbidStates(forbidden_states, system_eval_count)
cost = fs.cost(None, states, None)
expected_cost = np.divide(5, 80)
assert(np.allclose(cost, expected_cost,))
def test_targetdensityinfidelity():
import numpy as np
from qoc.standard import conjugate_transpose
from qoc.standard.costs.targetdensityinfidelity import TargetDensityInfidelity
state0 = np.array([[0], [1]])
density0 = np.matmul(state0, conjugate_transpose(state0))
target_state0 = np.array([[1], [0]])
target_density0 = np.matmul(target_state0, conjugate_transpose(target_state0))
densities = np.stack((density0,), axis=0)
targets = np.stack((target_density0,), axis=0)
ti = TargetDensityInfidelity(targets)
cost = ti.cost(None, densities, None)
assert(np.allclose(cost, 1))
ti = TargetDensityInfidelity(densities)
cost = ti.cost(None, densities, None)
assert(np.allclose(cost, 0.5))
state0 = np.array([[1], [0]])
state1 = (np.array([[1j], [1]]) / np.sqrt(2))
density0 = np.matmul(state0, conjugate_transpose(state0))
density1 = np.matmul(state1, conjugate_transpose(state1))
target_state0 = np.array([[1j], [0]])
target_state1 = np.array([[1], [0]])
target_density0 = np.matmul(target_state0, conjugate_transpose(target_state0))
target_density1 = np.matmul(target_state1, conjugate_transpose(target_state1))
densities = np.stack((density0, density1,), axis=0)
targets = np.stack((target_density0, target_density1,), axis=0)
ti = TargetDensityInfidelity(targets)
cost = ti.cost(None, densities, None)
expected_cost = 0.625
assert(np.allclose(cost, expected_cost))
def test_targetdensityinfidelitytime():
import numpy as np
from qoc.standard import conjugate_transpose
from qoc.standard.costs.targetdensityinfidelitytime import TargetDensityInfidelityTime
system_eval_count = 11
state0 = np.array([[0], [1]])
density0 = np.matmul(state0, conjugate_transpose(state0))
target_state0 = np.array([[1], [0]])
target_density0 = np.matmul(target_state0, conjugate_transpose(target_state0))
densities = np.stack((density0,), axis=0)
targets = np.stack((target_density0,), axis=0)
ti = TargetDensityInfidelityTime(system_eval_count, targets)
cost = ti.cost(None, densities, None)
assert(np.allclose(cost, 0.1))
ti = TargetDensityInfidelityTime(system_eval_count, densities)
cost = ti.cost(None, densities, None)
assert(np.allclose(cost, 0.05))
state0 = np.array([[1], [0]])
state1 = (np.array([[1j], [1]]) / np.sqrt(2))
density0 = np.matmul(state0, conjugate_transpose(state0))
density1 = np.matmul(state1, conjugate_transpose(state1))
target_state0 = np.array([[1j], [0]])
target_state1 = np.array([[1], [0]])
target_density0 = np.matmul(target_state0, conjugate_transpose(target_state0))
target_density1 = np.matmul(target_state1, conjugate_transpose(target_state1))
densities = np.stack((density0, density1,), axis=0)
targets = np.stack((target_density0, target_density1,), axis=0)
ti = TargetDensityInfidelityTime(system_eval_count, targets)
cost = ti.cost(None, densities, None)
expected_cost = 0.0625
assert(np.allclose(cost, expected_cost))
def test_targetstateinfidelity():
import numpy as np
from qoc.standard.costs.targetstateinfidelity import TargetStateInfidelity
state0 = np.array([[0], [1]])
target0 = np.array([[1], [0]])
states = np.stack((state0,), axis=0)
targets = np.stack((target0,), axis=0)
ti = TargetStateInfidelity(targets)
cost = ti.cost(None, states, None)
assert(np.allclose(cost, 1))
ti = TargetStateInfidelity(states)
cost = ti.cost(None, states, None)
assert(np.allclose(cost, 0))
state0 = np.array([[1], [0]])
state1 = (np.array([[1j], [1]]) / np.sqrt(2))
target0 = | np.array([[1j], [0]]) | numpy.array |
"""
Ranking
=======
Metrics to use for ranking models.
"""
import numpy as np
import numpy.ma as ma
from typing import Tuple
from pytypes import typechecked
@typechecked
def check_arrays(y_true: np.ndarray, y_prob: np.ndarray) -> None :
# Make sure that inputs this conforms to our expectations
assert isinstance(y_true, np.ndarray), AssertionError(
'Expect y_true to be a {expected}. Got {actual}'
.format(expected=np.ndarray, actual=type(y_true))
)
assert isinstance(y_prob, np.ndarray), AssertionError(
'Expect y_prob to be a {expected}. Got {actual}'
.format(expected=np.ndarray, actual=type(y_prob))
)
assert y_true.shape == y_prob.shape, AssertionError(
'Shapes must match. Got y_true={true_shape}, y_prob={prob_shape}'
.format(true_shape=y_true.shape, prob_shape=y_prob.shape)
)
assert len(y_true.shape) == 2, AssertionError(
'Shapes should be of rank 2. Got {rank}'
.format(rank=len(y_true.shape))
)
uniques = np.unique(y_true)
assert len(uniques) <= 2, AssertionError(
'Expected labels: [0, 1]. Got: {uniques}'
.format(uniques=uniques)
)
@typechecked
def check_k(n_items: int, k: int) -> None:
# Make sure that inputs conform to our expectations
assert isinstance(k, int), AssertionError(
'Expect k to be a {expected}. Got {actual}'
.format(expected=int, actual=type(k))
)
assert 0 <= k <= n_items, AssertionError(
'Expect 0 <= k <= {n_items}. Got {k}'
.format(n_items=n_items, k=k)
)
@typechecked
def recall_at_k(y_true: np.ndarray, y_prob: np.ndarray, k: int) -> float:
"""
Calculates recall at k for binary classification ranking problems. Recall
at k measures the proportion of total relevant items that are found in the
top k (in ranked order by y_prob). If k=5, there are 6 total relevant documents,
and 3 of the top 5 items are relevant, the recall at k will be 0.5.
Samples where y_true is 0 for all labels are filtered out because there will be
0 true positives and false negatives.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
k (int): Number of items to evaluate for relevancy, in descending
sorted order by y_prob
Returns:
recall (float): The recall at k
Example:
>>> y_true = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
>>> y_prob = np.array([
[0.4, 0.6, 0.3],
[0.1, 0.2, 0.9],
[0.9, 0.6, 0.3],
])
>>> recall_at_k(y_true, y_prob, 2)
0.6666666666666666
In the example above, each of the samples has 1 total relevant document.
For the first sample, there are 0 relevant documents in the top k for k=2,
because 0.3 is the 3rd value for y_prob in descending order. For the second
sample, there is 1 relevant document in the top k, because 0.2 is the 2nd
value for y_prob in descending order. For the third sample, there is 1
relevant document in the top k, because 0.9 is the 1st value for y_prob in
descending order. Averaging the values for all of these samples (0, 1, 1)
gives a value for recall at k of 2/3.
"""
check_arrays(y_true, y_prob)
check_k(y_true.shape[1], k)
# Filter out rows of all zeros
mask = y_true.sum(axis=1).astype(bool)
y_prob = y_prob[mask]
y_true = y_true[mask]
# Extract shape components
n_samples, n_items = y_true.shape
# List of locations indexing
y_prob_index_order = np.argsort(-y_prob)
rows = np.reshape(np.arange(n_samples), (-1, 1))
ranking = y_true[rows, y_prob_index_order]
# Calculate number true positives for numerator and number of relevant documents for denominator
num_tp = np.sum(ranking[:, :k], axis=1)
num_relevant = np.sum(ranking, axis=1)
# Calculate recall at k
recall = np.mean(num_tp / num_relevant)
return recall
@typechecked
def precision_at_k(y_true: np.ndarray, y_prob: np.ndarray, k: int) -> float:
"""
Calculates precision at k for binary classification ranking problems.
Precision at k measures the proportion of items in the top k (in ranked
order by y_prob) that are relevant (as defined by y_true). If k=5, and
3 of the top 5 items are relevant, the precision at k will be 0.6.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
k (int): Number of items to evaluate for relevancy, in descending
sorted order by y_prob
Returns:
precision_k (float): The precision at k
Example:
>>> y_true = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
>>> y_prob = np.array([
[0.4, 0.6, 0.3],
[0.1, 0.2, 0.9],
[0.9, 0.6, 0.3],
])
>>> precision_at_k(y_true, y_prob, 2)
0.3333333333333333
For the first sample, there are 0 relevant documents in the top k for k=2,
because 0.3 is the 3rd value for y_prob in descending order. For the second
sample, there is 1 relevant document in the top k, because 0.2 is the 2nd
value for y_prob in descending order. For the third sample, there is 1
relevant document in the top k, because 0.9 is the 1st value for y_prob in
descending order. Because k=2, the values for precision of k for each sample
are 0, 1/2, and 1/2 respectively. Averaging these gives a value for precision
at k of 1/3.
"""
check_arrays(y_true, y_prob)
check_k(y_true.shape[1], k)
# Extract shape components
n_samples, n_items = y_true.shape
# List of locations indexing
y_prob_index_order = np.argsort(-y_prob)
rows = np.reshape(np.arange(n_samples), (-1, 1))
ranking = y_true[rows, y_prob_index_order]
# Calculate number of true positives for numerator
num_tp = np.sum(ranking[:, :k], axis=1)
# Calculate precision at k
precision = np.mean(num_tp / k)
return precision
@typechecked
def mean_reciprocal_rank(y_true: np.ndarray, y_prob: np.ndarray) -> ma:
"""
Gets a positional score about how well you did at rank 1, rank 2,
etc. The resulting vector is of size (n_items,) but element 0 corresponds to
rank 1 not label 0.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
Returns:
mrr (~np.ma.array): The positional ranking score. This will be masked
for ranks where there were no relevant values. size=(n_items,)
"""
check_arrays(y_true, y_prob)
# Extract shape components
n_samples, n_items = y_true.shape
# Determine the ranking order
rank_true = np.flip(np.argsort(y_true, axis=1), axis=1)
rank_prob = np.flip(np.argsort(y_prob, axis=1), axis=1)
# Compute reciprocal ranks
reciprocal = 1.0 / (np.argsort(rank_prob, axis=1) + 1)
# Now order the reciprocal ranks by the true order
rows = np.reshape(np.arange(n_samples), (-1, 1))
cols = rank_true
ordered = reciprocal[rows, cols]
# Create a masked array of true labels only
ma = np.ma.array(ordered, mask=np.isclose(y_true[rows, cols], 0))
return ma.mean(axis=0)
@typechecked
def label_mean_reciprocal_rank(y_true: np.ndarray, y_prob: np.ndarray) -> ma:
"""
Determines the average rank each label was placed across samples. Only labels that are
relevant in the true data set are considered in the calculation.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)that
y_prob (~np.ndarray): The predicted probability the given flag
is relevant. size=(n_samples, n_items)
Returns:
mrr (~np.ma.array): The positional ranking score. This will be masked
for ranks where there were no relevant values. size=(n_items,)
"""
check_arrays(y_true, y_prob)
rank_prob = np.flip(np.argsort(y_prob, axis=1), axis=1)
reciprocal = 1 / (np.argsort(rank_prob, axis=1) + 1)
ma = np.ma.array(reciprocal, mask=~y_true.astype(bool))
return ma.mean(axis=0)
@typechecked
def ndcg(y_true: np.ndarray, y_prob: np.ndarray, k=0) -> np.float64:
"""
A score for measuring the quality of a set of ranked results. The resulting score is between 0 and 1.0 -
results that are relevant and appear earlier in the result set are given a heavier weight, so the
higher the score, the more relevant your results are
The optional k param is recommended for data sets where the first few labels are almost always ranked first,
and hence skew the overall score. To compute this "NDCG after k" metric, we remove the top k (by predicted
probability) labels and compute NDCG as usual for the remaining labels.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)that
y_prob (~np.ndarray): The predicted probability the given flag
is relevant. size=(n_samples, n_items)
k (int): Optional, the top k classes to exclude
Returns:
ndcg (~np.float64): The normalized dcg score across all queries, excluding the top k
"""
# Get the sorted prob indices in descending order
rank_prob = np.flip(np.argsort(y_prob, axis=1), axis=1)
# Get the sorted true indices in descending order
rank_true = np.flip(np.argsort(y_true, axis=1), axis=1)
prob_samples, prob_items = y_prob.shape
true_samples, true_items = y_true.shape
# Compute DCG
# Order y_true and y_prob by y_prob order indices
prob_vals = y_prob[np.arange(prob_samples).reshape(prob_samples, 1), rank_prob]
true_vals = y_true[np.arange(true_samples).reshape(true_samples, 1), rank_prob]
# Remove the first k columns
prob_vals = prob_vals[:, k:]
true_vals = true_vals[:, k:]
rank_prob_k = np.flip(np.argsort(prob_vals, axis=1), axis=1)
n_samples, n_items = true_vals.shape
values = np.arange(n_samples).reshape(n_samples, 1)
# Construct the dcg numerator, which are the relevant items for each rank
dcg_numerator = true_vals[values, rank_prob_k]
# Construct the denominator, which is the log2 of the current rank + 1
position = np.arange(1, n_items + 1)
denominator = np.log2(np.tile(position, (n_samples, 1)) + 1.0)
dcg = np.sum(dcg_numerator / denominator, axis=1)
# Compute IDCG
rank_true_idcg = np.flip(np.argsort(true_vals, axis=1), axis=1)
idcg_true_samples, idcg_true_items = rank_true_idcg.shape
# Order y_true indices
idcg_true_vals = true_vals[np.arange(idcg_true_samples).reshape(idcg_true_samples, 1), rank_true_idcg]
rank_true_k = np.flip(np.argsort(idcg_true_vals, axis=1), axis=1)
idcg_numerator = idcg_true_vals[values, rank_true_k]
idcg = np.sum(idcg_numerator / denominator, axis=1)
with np.warnings.catch_warnings():
np.warnings.filterwarnings('ignore')
sample_ndcg = np.divide(dcg, idcg)
# ndcg may be NaN if idcg is 0; this happens when there are no relevant documents
# in this case, showing anything in any order should be considered correct
where_are_nans = np.isnan(sample_ndcg)
where_are_infs = np.isinf(sample_ndcg)
sample_ndcg[where_are_nans] = 1.0
sample_ndcg[where_are_infs] = 0.0
return np.mean(sample_ndcg, dtype=np.float64)
@typechecked
def generate_y_pred_at_k(y_prob: np.ndarray, k: int) -> np.ndarray:
"""
Generates a matrix of binary predictions from a matrix of probabilities
by evaluating the top k items (in ranked order by y_prob) as true.
In the case where multiple probabilities for a sample are identical, the
behavior is undefined in terms of how the probabilities are ranked by argsort.
Args:
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
k (int): Number of items to evaluate as true, in descending
sorted order by y_prob
Returns:
y_pred (~np.ndarray): A binary prediction that the given flag is
relevant. size=(n_samples, n_items)
Example:
>>> y_prob = np.array([
[0.4, 0.6, 0.3],
[0.1, 0.2, 0.9],
[0.9, 0.6, 0.3],
])
>>> generate_y_pred_at_k(y_prob, 2)
array([
[1, 1, 0],
[0, 1, 1],
[1, 1, 0]
])
For the first sample, the top 2 values for y_prob are 0.6 and 0.4, so y_pred
at those positions is 1. For the second sample, the top 2 values for y_prob
are 0.9 and 0.2, so y_pred at these positions is 1. For the third sample, the
top 2 values for y_prob are 0.9 and 0.6, so y_pred at these positions in 1.
"""
n_items = y_prob.shape[1]
index_array = np.argsort(y_prob, axis=1)
col_idx = np.arange(y_prob.shape[0]).reshape(-1, 1)
y_pred = np.zeros(np.shape(y_prob))
y_pred[col_idx, index_array[:, n_items-k:n_items]] = 1
return y_pred
@typechecked
def confusion_matrix_at_k(y_true: np.ndarray, y_prob: np.ndarray, k: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Generates binary predictions from probabilities by evaluating the top k items
(in ranked order by y_prob) as true. Uses these binary predictions along with
true flags to calculate the confusion matrix per label for binary
classification problems.
Args:
y_true (~np.ndarray): Flags (0, 1) which indicate whether a column is
relevant or not. size=(n_samples, n_items)
y_prob (~np.ndarray): The predicted probability that the given flag
is relevant. size=(n_samples, n_items)
k (int): Number of items to evaluate as true, in descending
sorted order by y_prob
Returns:
tn, fp, fn, tp (tuple of ~np.ndarrays): A tuple of ndarrays containing
the number of true negatives (tn), false positives (fp),
false negatives (fn), and true positives (tp) for each item. The
length of each ndarray is equal to n_items
Example:
>>> y_true = np.array([
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
])
>>> y_prob = np.array([
[0.4, 0.6, 0.3],
[0.1, 0.2, 0.9],
[0.9, 0.6, 0.3],
])
>>> y_pred = np.array([
[1, 1, 0],
[0, 1, 1],
[1, 1, 0]
])
>>> label_names = ['moved', 'hadAJob', 'farmIncome']
>>> confusion_matrix_at_k(y_true, y_prob, 2)
(
np.array([1, 0, 1]),
np.array([1, 2, 1]),
np.array([0, 0, 1]),
np.array([1, 1, 0])
)
In the example above, y_pred is not passed into the function, but is
generated by calling generate_y_pred_at_k with y_prob and k.
For the first item (moved), the first sample is a false positive, the
second is a true negative, and the third is a true positive.
For the second item (hadAJob), the first and third samples are false
positives, and the second is a true positive.
For the third item (farmIncome), the first item is a false negative, the
second is a false positive, and the third is a true positive.
"""
check_arrays(y_true, y_prob)
check_k(y_true.shape[1], k)
y_pred = generate_y_pred_at_k(y_prob, k)
tp = np.count_nonzero(y_pred * y_true, axis=0)
tn = | np.count_nonzero((y_pred - 1) * (y_true - 1), axis=0) | numpy.count_nonzero |
from collections import OrderedDict
import numpy as np
from robosuite_extra.env_base.sawyer import SawyerEnv
from robosuite.models.arenas import TableArena
from robosuite.models.objects import BoxObject, CylinderObject
from robosuite_extra.models.generated_objects import FullyFrictionalBoxObject
from robosuite_extra.models.tasks import UniformSelectiveSampler
from robosuite.utils.mjcf_utils import array_to_string
from robosuite_extra.push_env.push_task import PushTask
from robosuite_extra.utils import transform_utils as T
from robosuite_extra.controllers import SawyerEEFVelocityController
import copy
from collections import deque
class SawyerPush(SawyerEnv):
"""
This class corresponds to a Pushing task for the sawyer robot arm.
This task consists of pushing a rectangular puck from some initial position to a final goal.
The goal and initial positions are chosen randomly within some starting bounds
"""
def __init__(
self,
gripper_type="PushingGripper",
parameters_to_randomise=None,
randomise_initial_conditions=True,
table_full_size=(0.8, 1.6, 0.719),
table_friction=(1e-4, 5e-3, 1e-4),
use_camera_obs=False,
use_object_obs=True,
reward_shaping=True,
placement_initializer=None,
gripper_visualization=True,
use_indicator_object=False,
has_renderer=False,
has_offscreen_renderer=True,
render_collision_mesh=False,
render_visual_mesh=True,
control_freq=10,
horizon=80,
ignore_done=False,
camera_name="frontview",
camera_height=256,
camera_width=256,
camera_depth=False,
pid=True,
):
"""
Args:
gripper_type (str): type of gripper, used to instantiate
gripper models from gripper factory.
parameters_to_randomise [string,] : List of keys for parameters to randomise, None means all the available parameters are randomised
randomise_initial_conditions [bool,]: Whether or not to randomise the starting configuration of the task.
table_full_size (3-tuple): x, y, and z dimensions of the table.
table_friction (3-tuple): the three mujoco friction parameters for
the table.
use_camera_obs (bool): if True, every observation includes a
rendered image.
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_shaping (bool): if True, use dense rewards.
placement_initializer (ObjectPositionSampler instance): if provided, will
be used to place objects on every reset, else a UniformRandomSampler
is used by default.
gripper_visualization (bool): True if using gripper visualization.
Useful for teleoperation.
use_indicator_object (bool): if True, sets up an indicator object that
is useful for debugging.
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering.
render_collision_mesh (bool): True if rendering collision meshes
in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes
in camera. False otherwise.
control_freq (float): how many control signals to receive
in every second. This sets the amount of simulation time
that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
camera_name (str): name of camera to be rendered. Must be
set if @use_camera_obs is True.
camera_height (int): height of camera frame.
camera_width (int): width of camera frame.
camera_depth (bool): True if rendering RGB-D, and RGB otherwise.
pid (bool) : True if using a velocity PID controller for controlling the arm, false if using a
mujoco-implemented proportional controller.
"""
self.initialised = False
# settings for table
self.table_full_size = table_full_size
self.table_friction = table_friction
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
# reward configuration
self.reward_shaping = reward_shaping
if (self.reward_shaping):
self.reward_range = [-np.inf, horizon * (0.1)]
else:
self.reward_range = [0, 1]
# Domain Randomisation Parameters
self.parameters_to_randomise = parameters_to_randomise
self.randomise_initial_conditions = randomise_initial_conditions
self.dynamics_parameters = OrderedDict()
self.default_dynamics_parameters = OrderedDict()
self.parameter_sampling_ranges = OrderedDict()
self.factors_for_param_randomisation = OrderedDict()
# object placement initializer
if placement_initializer:
self.placement_initializer = placement_initializer
else:
self.placement_initializer = UniformSelectiveSampler(
x_range=None,
y_range=None,
ensure_object_boundary_in_range=True,
z_rotation=None,
np_random=None
)
# Param for storing a specific goal and object starting positions
self.specific_goal_position = None
self.specific_gripper_position = None
self.gripper_pos_neutral = [0.44969246, 0.16029991, 1.00875409]
super().__init__(
gripper_type=gripper_type,
gripper_visualization=gripper_visualization,
use_indicator_object=use_indicator_object,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
use_camera_obs=use_camera_obs,
camera_name=camera_name,
camera_height=camera_height,
camera_width=camera_width,
camera_depth=camera_depth,
pid=pid,
)
self._set_default_dynamics_parameters(pid)
self._set_default_parameter_sampling_ranges()
self._set_dynamics_parameters(self.default_dynamics_parameters)
self._set_factors_for_param_randomisation(self.default_dynamics_parameters)
# Check that the parameters to randomise are within the allowed parameters
if (self.parameters_to_randomise is not None):
self._check_allowed_parameters(self.parameters_to_randomise)
# IK solver for placing the arm at desired locations during reset
self.IK_solver = SawyerEEFVelocityController()
self.placement_initializer.set_random_number_generator(self.np_random)
self.init_control_timestep = self.control_timestep
self.init_qpos = self.mujoco_robot.init_qpos
# Storing parameters for temporary switching
self.cached_parameters_to_randomise = None
self.cached_dynamics_parameters = None
self.initialised = True
self.reset()
def _set_dynamics_parameters(self, parameters):
self.dynamics_parameters = copy.deepcopy(parameters)
def _default_damping_params(self):
# return np.array([0.01566, 1.171, 0.4906, 0.1573, 1.293, 0.08688, 0.1942]) # -real world calibration
# return np.array([0.8824,2.3357,1.1729, 0.0 , 0.5894, 0.0 ,0.0082]) #- command calibration
return np.array([8.19520686e-01, 1.25425414e+00, 1.04222253e+00,
0.00000000e+00, 1.43146116e+00, 1.26807887e-01, 1.53680244e-01, ]) # - command calibration 2
def _default_armature_params(self):
return np.array([0.00000000e+00, 0.00000000e+00, 2.70022664e-02, 5.35581203e-02,
3.31204140e-01, 2.59623415e-01, 2.81964631e-01, ])
def _default_joint_friction_params(self):
return np.array([4.14390483e-03,
9.30938506e-02, 2.68656509e-02, 0.00000000e+00, 0.00000000e+00,
4.24867204e-04, 8.62040317e-04])
def _set_default_dynamics_parameters(self, use_pid):
"""
Setting the the default environment parameters.
"""
self.default_dynamics_parameters['joint_forces'] = np.zeros((7,))
self.default_dynamics_parameters['acceleration_forces'] = np.zeros((7,))
self.default_dynamics_parameters['eef_forces'] = np.zeros((6,))
self.default_dynamics_parameters['obj_forces'] = np.zeros((6,))
self.default_dynamics_parameters['eef_timedelay'] = np.asarray(0)
self.default_dynamics_parameters['obj_timedelay'] = np.asarray(0)
self.default_dynamics_parameters['timestep_parameter'] = np.asarray(0.0)
self.default_dynamics_parameters['pid_iteration_time'] = np.asarray(0.)
self.default_dynamics_parameters['mujoco_timestep'] = np.asarray(0.002)
self.default_dynamics_parameters['action_additive_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['action_multiplicative_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['action_systematic_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['eef_obs_position_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['eef_obs_velocity_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['obj_obs_position_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['obj_obs_velocity_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['obj_angle_noise'] = np.asarray(0.0)
self.default_dynamics_parameters['obj_density'] = np.asarray(400)
self.default_dynamics_parameters['obj_size'] = np.array([0.0555 / 2, 0.0555 / 2, 0.03 / 2])
self.default_dynamics_parameters['obj_sliding_friction'] = np.asarray(0.4)
self.default_dynamics_parameters['obj_torsional_friction'] = np.asarray(0.01)
link_masses = np.zeros((7,))
for link_name, idx, body_node, mass_node, joint_node in self._robot_link_nodes_generator():
if (mass_node is not None):
dynamics_parameter_value = float(mass_node.get("mass"))
link_masses[idx] = dynamics_parameter_value
self.default_dynamics_parameters['link_masses'] = link_masses
self.default_dynamics_parameters['joint_dampings'] = self._default_damping_params()
self.default_dynamics_parameters['armatures'] = self._default_armature_params()
self.default_dynamics_parameters['joint_frictions'] = self._default_joint_friction_params()
if (use_pid):
gains = self.mujoco_robot.velocity_pid_gains
kps = np.array([gains['right_j{}'.format(actuator)]['p'] for actuator in range(7)])
kis = np.array([gains['right_j{}'.format(actuator)]['i'] for actuator in range(7)])
kds = np.array([gains['right_j{}'.format(actuator)]['d'] for actuator in range(7)])
#
self.default_dynamics_parameters['kps'] = kps
self.default_dynamics_parameters['kis'] = kis
self.default_dynamics_parameters['kds'] = kds
else:
kvs = np.zeros((7,))
for target_joint, jnt_idx, node in self._velocity_actuator_nodes_generator():
gains_value = float(node.get("kv"))
kvs[jnt_idx] = gains_value
self.default_dynamics_parameters['kvs'] = kvs
def _set_default_parameter_sampling_ranges(self):
"""
Returns the parameter ranges to draw samples from in the domain randomisation.
"""
parameter_ranges = {
'joint_forces': np.array([[0.,0.,0.,0.,0.,0.,0.], [1.5,1.5,1.5,1.5,1.5,1.5,1.5]]),#
'acceleration_forces': np.array([[0.,0.,0.,0.,0.,0.,0.], [0.05,0.05,0.05,0.05,0.05,0.05,0.05]]),#
'eef_forces': np.array([[0.,0.,0.,0.,0.,0.], [0.06 ,0.06,0.06,0.01,0.01,0.01,]]), #
'obj_forces': np.array([[0., 0., 0., 0., 0., 0., ], [0.0011, 0.0011, 0.0011, 0.0005, 0.0005, 0.0005, ]]),
'eef_timedelay': np.array([0, 1]),
'obj_timedelay': np.array([0,2]),
'timestep_parameter': np.array([0.0, 0.01]),
'pid_iteration_time': np.array([0., 0.04]),
'mujoco_timestep': np.array([0.001,0.002]),
'action_additive_noise': np.array([0.01, 0.1]),
'action_multiplicative_noise': np.array([0.005,0.02]),
'action_systematic_noise': np.array([-0.05, 0.05]),
'eef_obs_position_noise': np.array([0.0005, 0.001]),
'eef_obs_velocity_noise': np.array([0.0005, 0.001]),
'obj_obs_position_noise': np.array([0.0005, 0.001]),
'obj_obs_velocity_noise': np.array([0.0005, 0.0015]),
'obj_angle_noise': np.array([0.005, 0.05]),
'obj_density': np.array([100, 800]),
'obj_size': np.array([0.995, 1.005]),
'obj_sliding_friction': np.array([0.01, 0.8]),
'obj_torsional_friction': np.array([0.001, 0.3]),
'link_masses': np.array([0.98, 1.02]),
'joint_dampings': | np.array([0.5, 2.]) | numpy.array |
#from classifier.sentiment.naiveBayes import BoWStruct, getVector
import numpy as np
from collections import OrderedDict
import nltk
from nltk.corpus import sentiwordnet as swn
from nltk.corpus import wordnet as wn
#nltk.download()
class SentimentFeatures:
"""
Deprecated
"""
# FeatureVector Length
FVLength = 6
#def __init__(self):
def buildStruct(self, dataset):
self.struct = BoWStruct(dataset)
return self.struct
def genVec(self, sentence):
return getVector(sentence, self.struct)
def genPresenceVec(self, sentence):
v = np.zeros(len(self.struct))
keysList = list(self.struct.keys())
for word in sentence.split():
if word in self.struct:
v[keysList.index(word)] = 1
return v
POSDict = {
'JJ':'a',
'JJR':'a',
'JJS':'a',
'NN':'n',
'NNS':'n',
'NNP':'n',
'NNPS':'n',
'RB':'av',
'RBR':'av',
'RBS':'av'
}
def genAdjVec(self, sentence):
v = np.zeros(len(self.adjStruct))
s_tokens = nltk.word_tokenize(sentence)
pos_tagged = nltk.pos_tag(s_tokens)
adjCount = 0
adjTotalScore = np.zeros(3)
advCount = 0
advTotalScore = np.zeros(3)
for (word, pos) in pos_tagged:
if pos in self.POSDict:
if self.POSDict[pos] == 'a':
if word in self.adjStruct:
v[self.adjKeyList.index(word)] = v[self.adjKeyList.index(word)] + 1.
np.add(adjTotalScore, self.wordScore(word, 'a'), adjTotalScore)
adjCount += 1
elif self.POSDict[pos] == 'av':
np.add(advTotalScore, self.wordScore(word, 'a'), advTotalScore)
advCount += 1
adjDiv = np.zeros(3)
advDiv = np.zeros(3)
if(adjCount != 0):
np.true_divide(adjTotalScore, adjCount, adjDiv)
if (advCount != 0):
np.true_divide(advTotalScore, advCount, advDiv)
nums = | np.append(adjDiv, advDiv) | numpy.append |
r"""Tests for parallel implementation of triangulations."""
import nose.tools as nt
import numpy as np
import os
import time
from cgal4py import _use_multiprocessing
from cgal4py import parallel, delaunay
from cgal4py.domain_decomp import GenericTree
from cgal4py.tests.test_cgal4py import make_points, make_test, MyTestCase
if _use_multiprocessing:
import multiprocessing as mp
from mpi4py import MPI
import ctypes
np.random.seed(10)
@nt.nottest
def lines_load_test(npts, ndim, periodic=False):
lines = [
"from cgal4py.tests.test_cgal4py import make_points",
"pts, le, re = make_points({}, {})".format(npts, ndim),
"load_dict = dict(pts=pts, left_edge=le, right_edge=re,",
" periodic={})".format(periodic)]
return lines
class TestGetMPIType(MyTestCase):
def setup_param(self):
self._func = parallel._get_mpi_type
self.param_equal = [(MPI.INT, ['i'], {}),
(MPI.LONG, ['l'], {}),
(MPI.FLOAT, ['f'], {}),
(MPI.DOUBLE, ['d'], {})]
self.param_raises = [(ValueError, ['m'], {})]
class TestWriteMPIScript(MyTestCase):
def setup_param(self):
self._func = parallel.write_mpi_script
fname = 'test_mpi_script.py'
read_lines = lines_load_test(10, 2)
self.param_runs = [
((fname, read_lines, 'triangulate'), {}),
((fname, read_lines, 'triangulate'), dict(use_double=True)),
((fname, read_lines, 'triangulate'), dict(use_buffer=True)),
((fname, read_lines, 'triangulate'), dict(profile=True))]
self._fname = fname
self._read_lines = read_lines
def check_runs(self, args, kwargs):
self.func(*args, **kwargs)
assert(os.path.isfile(args[0]))
os.remove(args[0])
def test_overwrite(self):
self.func(self._fname, self._read_lines, 'volumes')
t0 = os.path.getmtime(self._fname)
time.sleep(1)
self.func(self._fname, self._read_lines, 'volumes', overwrite=False)
t1 = os.path.getmtime(self._fname)
nt.eq_(t0, t1)
time.sleep(1)
self.func(self._fname, self._read_lines, 'volumes', overwrite=True)
t2 = os.path.getmtime(self._fname)
nt.assert_not_equal(t1, t2)
os.remove(self._fname)
class TestParallelLeaf(MyTestCase):
def setup_param(self):
self._func = parallel.ParallelLeaf
self.param_runs = [
((0, 2), {}),
((0, 3), {}),
# ((0, 4), {}),
((0, 2), {'periodic':True}),
((0, 3), {'periodic':True}),
# ((0, 4), {'periodic':True}),
]
def check_runs(self, args, kwargs):
pts, tree = make_test(*args, **kwargs)
left_edges = np.vstack([leaf.left_edge for leaf in tree.leaves])
right_edges = np.vstack([leaf.right_edge for leaf in tree.leaves])
for leaf in tree.leaves:
pleaf = self._func(leaf, left_edges, right_edges)
def check_tessellate(self, args, kwargs):
pts, tree = make_test(*args, **kwargs)
left_edges = np.vstack([leaf.left_edge for leaf in tree.leaves])
right_edges = np.vstack([leaf.right_edge for leaf in tree.leaves])
leaf = tree.leaves[0]
pleaf = self._func(leaf, left_edges, right_edges)
pleaf.pts = pts[tree.idx[pleaf.idx], :]
pleaf.tessellate()
pleaf.tessellate(pts)
pleaf.tessellate(pts, tree.idx)
def check_exchange(self, args, kwargs):
pts, tree = make_test(*args, **kwargs)
left_edges = np.vstack([leaf.left_edge for leaf in tree.leaves])
right_edges = np.vstack([leaf.right_edge for leaf in tree.leaves])
leaf0 = tree.leaves[0]
leaf1 = tree.leaves[1]
pleaf0 = self._func(leaf0, left_edges, right_edges)
pleaf1 = self._func(leaf1, left_edges, right_edges)
pleaf0.tessellate(pts, tree.idx)
pleaf1.tessellate(pts, tree.idx)
out0 = pleaf0.outgoing_points()
out1 = pleaf1.outgoing_points()
pleaf1.incoming_points(0, out0[0][1], out0[1], out0[2], out0[3],
pts[out0[0][1], :])
pleaf1.incoming_points(1, out1[0][1], out1[1], out1[2], out1[3],
pts[out1[0][1], :])
pleaf0.incoming_points(0, out0[0][0], out0[1], out0[2], out0[3],
pts[out0[0][0], :])
pleaf0.incoming_points(1, out1[0][0], out1[1], out1[2], out1[3],
pts[out1[0][0], :])
if kwargs.get('periodic', True):
idx = pleaf1.idx
pos = pts[idx, :]
pleaf1.periodic_left[0] = True
pleaf1.periodic_right[0] = True
pleaf1.left_neighbors.append(0)
pos[0,0] = tree.left_edge[0]
pos[1,0] = tree.right_edge[0]
pleaf1.incoming_points(1, idx, pleaf1.neighbors,
pleaf1.left_edges, pleaf1.right_edges, pos)
pleaf1.incoming_points(0, idx, pleaf0.neighbors,
pleaf0.left_edges, pleaf0.right_edges, pos)
def test_tessellate_generator(self):
for args, kwargs in self.param_runs:
yield self.check_tessellate, args, kwargs
def test_exchange_generator(self):
for args, kwargs in self.param_runs:
yield self.check_exchange, args, kwargs
def test_serialize(self):
pts, tree = make_test(0, 2)
left_edges = np.vstack([leaf.left_edge for leaf in tree.leaves])
right_edges = np.vstack([leaf.right_edge for leaf in tree.leaves])
leaf = tree.leaves[0]
pleaf = self._func(leaf, left_edges, right_edges)
pleaf.tessellate(pts, tree.idx)
pleaf.serialize(store=True)
class TestDelaunayProcessMPI(MyTestCase):
def setup_param(self):
self._func = parallel.DelaunayProcessMPI
taskname1 = 'triangulate'
taskname2 = 'volumes'
ndim = 2
periodic = False
pts, tree = make_test(0, ndim, periodic=periodic)
le = tree.left_edge
re = tree.right_edge
self._pts = pts
self._tree = tree
# self._dummy3 = self._func(taskname2, pts, tree)
# self._dummy4 = self._func(taskname2, pts, tree, use_buffer=True)
# self._leaves = self._dummy1._leaves
# TODO: use_buffer curregntly segfaults when run with coverage
self.param_runs = [
# Using C++ communications
# ((taskname1, pts), {}),
# ((taskname1, pts, left_edge=le, right_edge=re), {}),
# ((taskname1, pts), {'use_double':True}),
# ((taskname1, pts), {'limit_mem':True}),
# ((taskname2, pts), {}),
# ((taskname2, pts), {'limit_mem':True}),
# Using Python communications
((taskname1, pts), {'use_python':True}),
((taskname1, pts, tree), {'use_python':True}),
((taskname1, pts, GenericTree.from_tree(tree)),
{'use_python':True}),
((taskname1, pts, tree), {'use_python':True,
'use_double':True}),
# ((taskname1, pts, tree), {'use_python':True},
# 'use_buffer':True}),
((taskname1, pts, tree), {'use_python':True,
'limit_mem':True}),
((taskname2, pts, tree), {'use_python':True}),
# ((taskname2, pts, tree), {'use_python':True},
# 'use_buffer':True}),
((taskname2, pts, tree), {'use_python':True,
'limit_mem':True}),
]
self.param_raises = [
(ValueError, ('null', pts, tree), {})
]
def check_runs(self, args, kwargs):
x = self.func(*args, **kwargs)
x.run()
fname = x.output_filename()
if os.path.isfile(fname):
os.remove(fname)
def test_gather_leaf_arrays(self):
taskname1 = 'triangulate'
pts = self._pts
tree = self._tree
dummy1 = self.func(taskname1, pts, tree, use_python=True)
dummy2 = self.func(taskname1, pts, tree, use_python=True,
use_buffer=True)
leaves = tree.leaves
arr = {leaf.id: np.arange(5*(leaf.id+1)) for leaf in leaves}
dummy1.gather_leaf_arrays(arr)
dummy2.gather_leaf_arrays(arr)
if _use_multiprocessing:
class TestDelaunayProcessMulti(MyTestCase):
def setup_param(self):
self._func = parallel.DelaunayProcessMulti
self.param_runs = [
(('triangulate',), {}),
(('triangulate',), {'limit_mem':True}),
(('volumes',), {}),
(('triangulate',), {'limit_mem':True}),
]
def check_runs(self, args, kwargs):
(taskname,) = args
ndim = 2
periodic = False
pts, tree = make_test(0, ndim, periodic=periodic)
idxArray = mp.RawArray(ctypes.c_ulonglong, tree.idx.size)
ptsArray = mp.RawArray('d', pts.size)
memoryview(idxArray)[:] = tree.idx
memoryview(ptsArray)[:] = pts
left_edges = np.vstack([leaf.left_edge for leaf in tree.leaves])
right_edges = np.vstack([leaf.right_edge for leaf in tree.leaves])
leaves = tree.leaves
nproc = 2 # len(leaves)
count = [mp.Value('i',0),mp.Value('i',0),mp.Value('i',0)]
lock = mp.Condition()
queues = [mp.Queue() for _ in xrange(nproc+1)]
in_pipes = [None for _ in xrange(nproc)]
out_pipes = [None for _ in xrange(nproc)]
for i in range(nproc):
out_pipes[i],in_pipes[i] = mp.Pipe(True)
# Split leaves
task2leaves = [[] for _ in xrange(nproc)]
for leaf in leaves:
task = leaf.id % nproc
task2leaves[task].append(leaf)
# Dummy process
processes = []
for i in xrange(nproc):
processes.append(self._func(
taskname, i, task2leaves[i],
ptsArray, idxArray, left_edges, right_edges,
queues, lock, count, in_pipes[i], **kwargs))
# Perform setup on higher processes
for i in xrange(1, nproc):
P = processes[i]
P.tessellate_leaves()
P.outgoing_points()
for i in xrange(1, nproc):
count[0].value += 1
# Perform entire run on lowest process
P = processes[0]
P.run(test_in_serial=True)
# Do tear down on higher processes
for i in xrange(1, nproc):
P = processes[i]
P.incoming_points()
P.enqueue_result()
for l in range(len(task2leaves[i])):
x = P.receive_result(out_pipes[i])
# Clean up files
for i in xrange(nproc):
P = processes[i]
for leaf in P._leaves:
if kwargs.get('limit_mem', False):
leaf.remove_tess()
ffinal = leaf.tess_output_filename
if os.path.isfile(ffinal):
os.remove(ffinal)
class TestParallelDelaunay(MyTestCase):
def setup_param(self):
self._func = parallel.ParallelDelaunay
ndim_list = [2, 3] # , 4]
param_test = []
self._fprof = 'test_ParallelDelaunay.cProfile'
for ndim in ndim_list:
param_test += [
((0, ndim, 2), {}),
((100, ndim, 2), {'nleaves': 2}),
((100, ndim, 4), {'nleaves': 4}),
((100, ndim, 4), {'nleaves': 8}),
# ((1000, ndim, 2), {'nleaves': 2}),
# ((4*4*2, ndim, 4), {'leafsize': 8}),
# ((1e5, ndim, 8), {'nleaves': 8}),
# ((1e7, ndim, 10), {'nleaves': 10}),
]
self.param_returns = []
for args, kwargs in param_test:
pts, tree = make_test(args[0], args[1], **kwargs)
ans = delaunay.Delaunay(pts)
read_lines = lines_load_test(args[0], args[1])
for limit_mem in [False, True]:
if _use_multiprocessing:
self.param_returns += [
(ans, (pts, tree, args[2]),
{'use_mpi': False, 'limit_mem': limit_mem})
]
for profile in [False, self._fprof]:
self.param_returns += [
(ans, (pts, tree, args[2]),
{'use_mpi': True, 'limit_mem': limit_mem,
'profile': profile, 'use_python':True,
'use_buffer': False}),
(ans, (pts, tree, args[2]),
{'use_mpi': True, 'limit_mem': limit_mem,
'profile': profile, 'use_python':True,
'use_buffer': True}),
# (ans, (pts, tree, args[2]),
# {'use_mpi': True, 'limit_mem': limit_mem,
# 'profile': profile, 'use_python':False})
]
def check_returns(self, result, args, kwargs):
T_seri = result
T_para = self.func(*args, **kwargs)
ndim = args[0].shape[1]
c_seri, n_seri, inf_seri = T_seri.serialize(sort=True)
c_para, n_para, inf_para = T_para.serialize(sort=True)
try:
assert(np.all(c_seri == c_para))
assert( | np.all(n_seri == n_para) | numpy.all |
# Copyright (c) 2003-2019 by <NAME>
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import numpy as np
import os
import coord
import time
import fitsio
import treecorr
from test_helper import assert_raises, do_pickle, timer, get_from_wiki, CaptureLog, clear_save
from test_helper import profile
def generate_shear_field(npos, nhalo, rng=None):
# We do something completely different here than we did for 2pt patch tests.
# A straight Gaussian field with a given power spectrum has no significant 3pt power,
# so it's not a great choice for simulating a field for 3pt tests.
# Instead we place N SIS "halos" randomly in the grid.
# Then we translate that to a shear field via FFT.
if rng is None:
rng = np.random.RandomState()
# Generate x,y values for the real-space field
x = rng.uniform(0,1000, size=npos)
y = rng.uniform(0,1000, size=npos)
nh = rng.poisson(nhalo)
# Fill the kappa values with SIS halo profiles.
xc = rng.uniform(0,1000, size=nh)
yc = rng.uniform(0,1000, size=nh)
scale = rng.uniform(20,50, size=nh)
mass = rng.uniform(0.01, 0.05, size=nh)
# Avoid making huge nhalo * nsource arrays. Loop in blocks of 64 halos
nblock = (nh-1) // 64 + 1
kappa = np.zeros_like(x)
gamma = np.zeros_like(x, dtype=complex)
for iblock in range(nblock):
i = iblock*64
j = (iblock+1)*64
dx = x[:,np.newaxis]-xc[np.newaxis,i:j]
dy = y[:,np.newaxis]-yc[np.newaxis,i:j]
dx[dx==0] = 1 # Avoid division by zero.
dy[dy==0] = 1
dx /= scale[i:j]
dy /= scale[i:j]
rsq = dx**2 + dy**2
r = rsq**0.5
k = mass[i:j] / r # "Mass" here is really just a dimensionless normalization propto mass.
kappa += np.sum(k, axis=1)
# gamma_t = kappa for SIS.
g = -k * (dx + 1j*dy)**2 / rsq
gamma += np.sum(g, axis=1)
return x, y, np.real(gamma), np.imag(gamma), kappa
@timer
def test_kkk_jk():
# Test jackknife and other covariance estimates for kkk correlations.
# Note: This test takes a while!
# The main version I think is a pretty decent test of the code correctness.
# It shows that bootstrap in particular easily gets to within 50% of the right variance.
# Sometimes within 20%, but because of the randomness there, it varies a bit.
# Jackknife isn't much worse. Just a little below 50%. But still pretty good.
# Sample and Marked are not great for this test. I think they will work ok when the
# triangles of interest are mostly within single patches, but that's not the case we
# have here, and it would take a lot more points to get to that regime. So the
# accuracy tests for those two are pretty loose.
if __name__ == '__main__':
# This setup takes about 740 sec to run.
nhalo = 3000
nsource = 5000
npatch = 32
tol_factor = 1
elif False:
# This setup takes about 180 sec to run.
nhalo = 2000
nsource = 2000
npatch = 16
tol_factor = 2
elif False:
# This setup takes about 51 sec to run.
nhalo = 1000
nsource = 1000
npatch = 16
tol_factor = 3
else:
# This setup takes about 20 sec to run.
# So we use this one for regular unit test runs.
# It's pretty terrible in terms of testing the accuracy, but it works for code coverage.
# But whenever actually working on this part of the code, definitely need to switch
# to one of the above setups. Preferably run the name==main version to get a good
# test of the code correctness.
nhalo = 500
nsource = 500
npatch = 16
tol_factor = 4
file_name = 'data/test_kkk_jk_{}.npz'.format(nsource)
print(file_name)
if not os.path.isfile(file_name):
nruns = 1000
all_kkks = []
rng1 = np.random.RandomState()
for run in range(nruns):
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng1)
print(run,': ',np.mean(k),np.std(k))
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1)
kkk.process(cat)
print(kkk.ntri.ravel().tolist())
print(kkk.zeta.ravel().tolist())
all_kkks.append(kkk)
mean_kkk = np.mean([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
var_kkk = np.var([kkk.zeta.ravel() for kkk in all_kkks], axis=0)
np.savez(file_name, all_kkk=np.array([kkk.zeta.ravel() for kkk in all_kkks]),
mean_kkk=mean_kkk, var_kkk=var_kkk)
data = np.load(file_name)
mean_kkk = data['mean_kkk']
var_kkk = data['var_kkk']
print('mean = ',mean_kkk)
print('var = ',var_kkk)
rng = np.random.RandomState(12345)
x, y, _, _, k = generate_shear_field(nsource, nhalo, rng)
cat = treecorr.Catalog(x=x, y=y, k=k)
kkk = treecorr.KKKCorrelation(nbins=3, min_sep=30., max_sep=100.,
min_u=0.9, max_u=1.0, nubins=1,
min_v=0.0, max_v=0.1, nvbins=1, rng=rng)
kkk.process(cat)
print(kkk.ntri.ravel())
print(kkk.zeta.ravel())
print(kkk.varzeta.ravel())
kkkp = kkk.copy()
catp = treecorr.Catalog(x=x, y=y, k=k, npatch=npatch)
# Do the same thing with patches.
kkkp.process(catp)
print('with patches:')
print(kkkp.ntri.ravel())
print(kkkp.zeta.ravel())
print(kkkp.varzeta.ravel())
np.testing.assert_allclose(kkkp.ntri, kkk.ntri, rtol=0.05 * tol_factor)
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
np.testing.assert_allclose(kkkp.varzeta, kkk.varzeta, rtol=0.05 * tol_factor, atol=3.e-6)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.6 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.7 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.7*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.5 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Now as a cross correlation with all 3 using the same patch catalog.
print('with 3 patched catalogs:')
kkkp.process(catp, catp, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.5*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
# Repeat this test with different combinations of patch with non-patch catalogs:
# All the methods work best when the patches are used for all 3 catalogs. But there
# are probably cases where this kind of cross correlation with only some catalogs having
# patches could be desired. So this mostly just checks that the code runs properly.
# Patch on 1 only:
print('with patches on 1 only:')
kkkp.process(catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 2 only:
print('with patches on 2 only:')
kkkp.process(cat, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.diagonal(cov), var_kkk, rtol=0.9 * tol_factor)
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 3 only:
print('with patches on 3 only:')
kkkp.process(cat, cat, catp)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
# Patch on 1,2
print('with patches on 1,2:')
kkkp.process(catp, catp, cat)
print(kkkp.zeta.ravel())
np.testing.assert_allclose(kkkp.zeta, kkk.zeta, rtol=0.1 * tol_factor, atol=1e-3 * tol_factor)
print('jackknife:')
cov = kkkp.estimate_cov('jackknife')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.3*tol_factor)
print('sample:')
cov = kkkp.estimate_cov('sample')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('marked:')
cov = kkkp.estimate_cov('marked_bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_kkk))))
np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_kkk), atol=0.8*tol_factor)
print('bootstrap:')
cov = kkkp.estimate_cov('bootstrap')
print(np.diagonal(cov))
print('max log(ratio) = ',np.max(np.abs(np.log( | np.diagonal(cov) | numpy.diagonal |
try:
from keras import backend as K
import numpy as np
except ImportError as err:
exit(err)
def dice_coef(y_true, y_pred, smooth=1):
"""
From: https://github.com/keras-team/keras/issues/3611 and https://github.com/keras-team/keras/issues/3611#issuecomment-243108708
"""
intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(y_pred, axis=[1, 2, 3])
return K.mean((2. * intersection + smooth)/(union + smooth), axis=0)
def dice_coef_loss(y_true, y_pred):
"""
From: https://github.com/keras-team/keras/issues/3611 and https://github.com/keras-team/keras/issues/3611#issuecomment-243108708
"""
return 1 - dice_coef(y_true, y_pred)
def soft_dice_loss(y_true, y_pred, epsilon=1e-6):
"""
From: https://gist.github.com/jeremyjordan/9ea3032a32909f71dd2ab35fe3bacc08
Soft dice loss calculation for arbitrary batch size, number of classes, and number of spatial dimensions.
Assumes the `channels_last` format.
# Arguments
y_true: b x X x Y( x Z...) x c One hot encoding of ground truth
y_pred: b x X x Y( x Z...) x c Network output, must sum to 1 over c channel (such as after softmax)
epsilon: Used for numerical stability to avoid divide by zero errors
# References
V-Net: Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation
https://arxiv.org/abs/1606.04797
More details on Dice loss formulation
https://mediatum.ub.tum.de/doc/1395260/1395260.pdf (page 72)
Adapted from https://github.com/Lasagne/Recipes/issues/99#issuecomment-347775022
"""
# Skip the batch and class axis for calculating Dice score
axes = tuple(range(1, len(y_pred.shape)-1))
numerator = 2. * np.sum(y_pred * y_true, axes)
denominator = np.sum( | np.square(y_pred) | numpy.square |
#!/usr/bin/env python
# coding: utf-8
from typing import Union
from dataclasses import dataclass
import itertools
import copy
import numpy as np
import geopandas as gpd
import pandas as pd
import shapely.affinity as affine
import shapely.geometry as geom
import shapely.wkt as wkt
from weave_units import WeaveUnit
from tile_units import Tileable
from tile_units import TileShape
@dataclass
class TileGrid:
tile:gpd.GeoSeries = None
to_tile:gpd.GeoSeries = None
grid_type:TileShape = None
extent:gpd.GeoSeries = None
centre:tuple[float] = None
points:gpd.GeoSeries = None
def __init__(self, tile:gpd.GeoSeries, to_tile:gpd.GeoSeries,
grid_type:TileShape = TileShape.RECTANGLE,
to_hex:bool = True) -> None:
self.grid_type = grid_type
self.tile = tile
if self.grid_type == TileShape.TRIANGLE:
self.tile, self.grid_type = self._modify_triangle_tile(to_hex)
self.to_tile = gpd.GeoSeries([to_tile.unary_union])
self.extent, self.centre = self._get_extent()
self.points = self._get_points()
def _get_extent(self) -> gpd.GeoSeries:
mrr = self.to_tile.geometry[0].minimum_rotated_rectangle
mrr_centre = geom.Point(mrr.centroid.coords[0])
mrr_corner = geom.Point(mrr.exterior.coords[0])
radius = mrr_centre.distance(mrr_corner)
return gpd.GeoSeries([mrr_centre.buffer(radius)]), mrr_centre
def _get_points(self) -> gpd.GeoSeries:
if self.grid_type in (TileShape.RECTANGLE, ):
pts = self._get_rect_centres()
elif self.grid_type in (TileShape.HEXAGON, TileShape.TRIHEX):
pts = self._get_hex_centres()
elif self.grid_type in (TileShape.TRIDIAMOND, ):
pts = self._get_diamond_centres()
tr = affine.translate # for efficiency here
tiles = [tr(self.tile.geometry[0], p[0], p[1])
for p in list(pts)]
tiles = [t for t in tiles if self.extent[0].intersects(t)]
return gpd.GeoSeries([t.centroid for t in tiles])
def _get_width_height_left_bottom(self,
gs:gpd.GeoSeries
) -> tuple[float]:
"""Returns width, height, left and bottom limits of a GeoSeries
Args:
gs (geopandas.GeoSeries): GeoSeries for which limits are required.
Returns:
tuple: four float values of width, height, left and bottom of gs.
"""
extent = gs.total_bounds
return extent[2] - extent[0], extent[3] - extent[1], extent[0], extent[1]
def _get_grid(self, ll: tuple[float], nums: tuple[int],
tdim: tuple[float]) -> np.ndarray:
"""Returns rectilinear grid of x,y coordinate pairs.
Args:
ll (tuple[float]): lower left corner coordinates of the grid as
(x, y).
nums (tuple[int]): grid extent as (number of columns, number
of rows).
tdim (tuple[float]): grid resolution as (column width, column
height)
Returns:
np.ndarray: a matrix of nums[0] * nums[1] rows and 2 columns,
each row
containing an x, y coordinate pair.
"""
return np.array(np.meshgrid(np.arange(nums[0]) * tdim[0] + ll[0],
np.arange(nums[1]) * tdim[1] + ll[1])
).reshape(2, nums[0] * nums[1]).transpose()
def _get_rect_centres(self) -> np.ndarray:
"""Returns a rectangular grid of translation vectors that will 'fill' to_tile_gs polygon with the tile_gs polygon (which should be rectangular).
Returns:
np.ndarray: A 2 column array each row being an x, y translation vector.
"""
tt_w, tt_h, tt_x0, tt_y0 = \
self._get_width_height_left_bottom(self.extent)
tile_w, tile_h, tile_x0, tile_y0 = \
self._get_width_height_left_bottom(self.tile)
# number of tiles in each direction
nx = int(np.ceil(tt_w / tile_w))
ny = int(np.ceil(tt_h / tile_h))
# origin is inset from the lower lwft corner based
x0 = (tt_w - (nx * tile_w)) / 2 + tt_x0
y0 = (tt_h - (ny * tile_h)) / 2 + tt_y0
return self._get_grid((x0, y0), (nx + 1, ny + 1), (tile_w, tile_h))
def _get_hex_centres(self) -> np.ndarray:
"""Returns a hexagonal grid of translation vectors that will 'fill'
to_tile_gs with the tile_gs polygon (which should be hexagonal).
Returns:
np.ndarray: A 2 column array each row being an x, y translation vector.
"""
tt_w, tt_h, tt_x0, tt_y0 = \
self._get_width_height_left_bottom(self.extent)
tile_w, tile_h, tile_x0, tile_y0 = \
self._get_width_height_left_bottom(self.tile)
nx = int(np.ceil(tt_w / (tile_w * 3 / 2)))
ny = int(np.ceil(tt_h / tile_h))
# the effective width of two columns of hexagonal tiles is 3w/2
x0 = (tt_w - (nx * tile_w * 3 / 2)) / 2 + tt_x0
y0 = (tt_h - (ny * tile_h)) / 2 + tt_y0
# get two offset rectangular grids and combine them
g1 = self._get_grid((x0, y0 + tile_h / 4),
(nx + 1, ny + 1),
(tile_w * 3 / 2, tile_h))
g2 = self._get_grid((x0 + tile_w * 3 / 4, y0 - tile_h / 4),
(nx, ny),
(tile_w * 3 / 2, tile_h))
return np.append(g1, g2).reshape((g1.shape[0] + g2.shape[0], 2))
# Actually returns rhombus centres
#
# /\
# / \
# \ /
# \/
#
def _get_diamond_centres(self) -> np.ndarray:
tt_w, tt_h, tt_x0, tt_y0 = \
self._get_width_height_left_bottom(self.extent)
tile_w, tile_h, tile_x0, tile_y0 = \
self._get_width_height_left_bottom(self.tile)
nx = int(np.ceil(tt_w / tile_w))
ny = int( | np.ceil(tt_h / tile_h) | numpy.ceil |
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2015-2021 UT-BATTELLE, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""The Perturbation Growth Test:
This tests the null hypothesis that the reference (n) and modified (m) model
ensembles represent the same atmospheric state after each physics parameterization
is applied within a single time-step using the two-sample (n and m) T-test for equal
averages at a 95% confidence level. Ensembles are generated by repeating the
simulation for many initial conditions, with each initial condition subject to
multiple perturbations.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import six
import os
import math
import argparse
# import logging
from pprint import pprint
from collections import OrderedDict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import stats
from netCDF4 import Dataset
import livvkit
from livvkit.util import elements as el
from livvkit.util import functions as fn
from evv4esm.utils import bib2html
# logger = logging.getLogger(__name__)
def parse_args(args=None):
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config',
type=fn.read_json,
default='test/pge_pc0101123.json',
help='A JSON config file containing a `pg` dictionary defining ' +
'the options.')
args = parser.parse_args(args)
name = args.config.keys()[0]
config = args.config[name]
return name, config
def _instance2sub(instance_number, total_perturbations):
"""
Converts an instance number (ii) to initial condition index (ci) and
perturbation index (pi) subscripts
instances use 1-based indexes and vary according to this function:
ii = ci * len(PERTURBATIONS) + pi + 1
where both pi and ci use 0-based indexes.
"""
perturbation_index = (instance_number - 1) % total_perturbations
initial_condition = (instance_number - 1 - perturbation_index) // total_perturbations
return initial_condition, perturbation_index
def _sub2instance(initial_condition, perturbation_index, total_perturbations):
"""
Converts initial condition index (ci) and perturbation index (pi) subscripts
to an instance number (ii)
instances use 1-based indexes and vary according to this function:
ii = ci * len(PERTURBATIONS) + pi + 1
where both pi and ci use 0-based indexes.
"""
instance = initial_condition * total_perturbations + perturbation_index + 1
return instance
def rmse_writer(file_name, rmse, perturbation_names, perturbation_variables, init_file_template, model_name):
"""
Opens and writes a netcdf file for PGE curves
This function is here purely to avoid duplicate
codes so that it is easy to maintain code longterm
"""
with Dataset(file_name, 'w') as nc:
ninit, nprt_m1, nvars = rmse.shape
nc.createDimension('ninit', ninit)
nc.createDimension('nprt', nprt_m1 + 1)
nc.createDimension('nprt_m1', nprt_m1)
nc.createDimension('nvars', nvars)
nc_init_cond = nc.createVariable('init_cond_files', str, 'ninit')
nc_perturbation = nc.createVariable('perturbation_names', str, 'nprt')
nc_variables = nc.createVariable('perturbation_variables', str, 'nvars')
nc_rmse = nc.createVariable('rmse', 'f8', ('ninit', 'nprt_m1', 'nvars'))
# NOTE: Assignment to netcdf4 variable length string array can be done
# via numpy arrays, or in a for loop using integer indices.
# NOTE: Numpy arrays can't be created from a generator for some dumb reason,
# so protect with list
nc_perturbation[:] = np.array(list(perturbation_names))
nc_variables[:] = np.array(list(perturbation_variables))
nc_rmse[:] = rmse[:]
for icond in range(0, ninit):
# NOTE: Zero vs One based indexing
nc_init_cond[icond] = init_file_template.format(model_name, 'i', icond+1)
def variables_rmse(ifile_test, ifile_cntl, var_list, var_pefix=''):
"""
Compute RMSE difference between perturbation and control for a set of
variables
Args:
ifile_test: Path to a NetCDF dataset for a perturbed simulation
ifile_cntl: Path to a NetCDF dataset for the control simulation
var_list (list): List of all variables to analyze
var_pefix: Optional prefix (e.g., t_, qv_) to apply to the variable
returns:
rmse (pandas.DataFrame): A dataframe containing the RMSE and maximum
difference details between the perturbed and control simulation
"""
with Dataset(ifile_test) as ftest, Dataset(ifile_cntl) as fcntl:
lat = ftest.variables['lat']
lon = ftest.variables['lon']
rmse = pd.DataFrame(columns=('RMSE', 'max diff', 'i', 'j', 'control', 'test', 'lat', 'lon'), index=var_list)
# reshape for RMSE
dims = len(ftest.variables[var_pefix + var_list[0]].dimensions)
if dims == 3: # see if it is SE grid
nx, ny = ftest.variables[var_pefix + var_list[0]][0, ...].shape
nz = 1
else:
nx, ny, nz = ftest.variables[var_pefix + var_list[0]][0, ...].shape
for ivar, vvar in enumerate(var_list):
var = var_pefix + vvar
if var in ftest.variables:
vtest = ftest.variables[var.strip()][0, ...] # first dimension is time (=0)
vcntl = fcntl.variables[var.strip()][0, ...] # first dimension is time (=0)
vrmse = math.sqrt(((vtest - vcntl)**2).mean()) / | np.mean(vcntl) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 <NAME> GmbH
All rights reserved.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
@author: <NAME>
"""
import numpy as np
from prssm.utils.utils import retrieve_config
def init_inducing_inputs(config_inducing_inputs, P, D, **kwargs):
"""
Initialize GP inducing inputs
:param method: string, any of 'kmeans', 'random', 'randomXpermutedU'
:param P: number of inducing inputs
:param D: dimensionality of inducing input
:return: inducing inputs (P, D)
"""
method = retrieve_config(config_inducing_inputs, 'method',
"init_inducing_inputs: Keyword 'method' required. \
Can be 'kmeans', 'random', 'randomXpermutedU'")
# Cluster GP input data, cluster centers become inducing input
if method == 'kmeans':
assert 'X' in kwargs, 'Keyword argument X required: GP training input'
X = kwargs['X']
from sklearn.cluster import KMeans
m = KMeans(n_clusters=P, n_init=50, max_iter=500)
m.fit(X.copy())
Z = m.cluster_centers_.copy()
# Randomly draw inducing inputs i.i.d. from N(0,1)
if method == 'random':
noise = retrieve_config(config_inducing_inputs, 'noise',
"init_inducing_inputs: Keyword 'noise' required.")
Z = np.sqrt(noise) * np.random.randn(P, D)
if method == 'uniform':
low = retrieve_config(config_inducing_inputs, 'low',
"init_inducing_inputs: Keyword 'low' required.")
high = retrieve_config(config_inducing_inputs, 'high',
"init_inducing_inputs: Keyword 'high' required.")
Z = np.random.uniform(low=low, high=high, size=(P, D))
# Random inducing inputs on state and random selection of input sequence
if method == 'randomXpermutedU':
assert 'X' in kwargs, 'Keyword argument X required: GP training input'
assert 'dim_u' in kwargs, 'Keyword argument U required: input dim'
X = kwargs['X']
dim_u = kwargs['dim_u']
U_ind = | np.random.permutation(X[:, :dim_u]) | numpy.random.permutation |
import warnings
from typing import Union
import numpy as np
import pandas as pd
import xarray as xr
from regime_switching.generate.base import CanRandomInstance, SeriesGenerator
from regime_switching.utils.rng import AnyRandomState
class ChainGenerator(SeriesGenerator):
"""Base object for chain generators."""
# __init__ defined
# generate() is an abstractmethod
@property
def states(self) -> xr.DataArray:
return self.params["states"]
class IndependentChainGenerator(ChainGenerator, CanRandomInstance):
"""Independent sampling chain.
Attributes
----------
params : xr.Dataset
'prob' and 'states'
random_state : np.random.Generator
Random generator.
"""
def __init__(
self,
params: xr.Dataset = None,
random_state: AnyRandomState = None,
prob=None,
states=None,
):
super().__init__(
params=params, random_state=random_state, prob=prob, states=states
)
@property
def prob(self) -> xr.DataArray:
"""States probability vector."""
return self.params["prob"].copy()
@classmethod
def check_params(cls, params: xr.Dataset) -> xr.Dataset:
"""Checks assumptions on 'prob' and corrects, if needed."""
p = params["prob"]
if (p < 0).any().item():
raise ValueError(f"Probability cannot be negative: {p}")
if not np.allclose(p.sum(), 1):
warnings.warn("Probabilities don't sum to one, rescaling.")
params["prob"] = p = p / p.sum()
return params
@classmethod
def create_params(cls, prob=None, states=None) -> xr.Dataset:
"""Creates dataset of parameters from keyword arguments."""
if prob is None:
if states is None:
raise ValueError(
"At least one of `params`, `prob`"
" or `states` must be given."
)
if isinstance(states, int):
states = range(states)
states = np.array(states)
# Assume `states` is array-like from now on
prob_per = 1.0 / len(states)
prob = xr.DataArray(
prob_per, coords={"states": states}, dims=["states"]
)
elif isinstance(prob, xr.DataArray):
if "states" not in prob.coords:
raise ValueError(
f"Expected 'states' coord, found: {prob.coords}"
)
# Ignore `states` as we already have the "states" coord.
else:
# prob is array-like
prob = np.array(prob)
if states is None:
states = range(len(prob))
elif isinstance(states, int):
states = range(states)
states = | np.array(states) | numpy.array |
import os
import pdb
import numpy as np
# pass in copy of objs?
def corrupt_graph(objs, triples, num_attrs, attrs, vocab, random_seed=None):
# either s,p,o, s_attrib, o_attrib
max_corruptable_elements = 5
# max num of objs, preds, attrs in vocab
max_objs = len(vocab['object_idx_to_name'])
max_preds = len(vocab['pred_idx_to_name'])
max_attrs = len(vocab['attribute_idx_to_name'])
# objs is all objects in batch: s/o index into this list
num_triples = len(triples)
s, p, o = np.split(triples, 3, axis=1)
num_triples = len(triples)
# object ids that index into model vocab
subj_objs = objs[s]
obj_objs = objs[o]
for n in range(0, num_triples):
# debug
subj = | np.array(vocab['object_idx_to_name']) | numpy.array |
from mmab import *
import argparse
import numpy as np
import scipy.stats
import scipy.special
import plotly.graph_objs as go
def make_rgb_transparent(rgb, bg_rgb, alpha):
'''Returns an RGB vector of values with given transparency level and background.
This function is used for generating colors that are transparent with the background.
It has a similar functionality compared to alpha option in other libraries. The only
difference is that it returns the rgb values of the transparent color.
Args:
rgb: The list rgb values for the original color(s).
bg_rgb: The list of rgb values for the background(s).
alpha: A number between 0 and 1 indicating the transparency level.
Returns:
rgb values for the transparent (mixed) colors.
'''
return [alpha * c1 + (1 - alpha) * c2 for (c1, c2) in zip(rgb, bg_rgb)]
def run_montecarlo(nsim, T_vals, k_vals, bern, sigma, beta_a, beta_b, save_results):
'''Implements monte carlo simulations for comparing regret of algorithms.
This function generates monte carlo instances that are used for comparing the regret
of the algorithms discussed in the paper and returns regret and number of pulls of
arms. The function has the capability of simulating for several values of time horizon
and number of arms. Please see the note on the shape of returns.
Args:
nsim: Number of simulations, i.e., monte carlo instances.
T_vals: The list of values for time horizon.
k_vals: The list of values for number of arms.
bern: A boolean indicating whether to use Bernoulli or gaussian rewards.
sigma: The standard deviation of noise (only used for gaussian rewards).
beta_a: Success parameter of the beta prior.
beta_b: Failure parameter of the beta prior.
save_results: A boolean indicating whether to save the regret and number of pulls
of various algorithms as .npy files.
Returns:
all_regret: A list of final (total) regret of algorithms. Each entry of the list is a
numpy array of size nsim * number of different settings (specified by the length of
T_vals and k_vals).
pulls: Number of pulls across all arms reported only for the last configuration
given in T_vals and k_vals.
'''
configs = len(T_vals)
all_regret_greedy = np.zeros((nsim, configs))
all_regret_ss_ucb = np.zeros((nsim, configs))
all_regret_ucbf = np.zeros((nsim, configs))
all_regret_ucb = np.zeros((nsim, configs))
all_regret_ss_greedy = np.zeros((nsim, configs))
all_regret_ts = np.zeros((nsim, configs))
all_regret_ss_ts = np.zeros((nsim, configs))
for j in range(configs):
k = k_vals[j]
T = T_vals[j]
## Regret Vectors
regret_greedy = np.zeros(nsim)
regret_ss_ucb = np.zeros(nsim)
regret_ucbf = np.zeros(nsim)
regret_ucb = np.zeros(nsim)
regret_ss_greedy = np.zeros(nsim)
regret_ts = np.zeros(nsim)
regret_ss_ts = np.zeros(nsim)
## Pulls Vectors
pulls_greedy = np.zeros((nsim, k))
pulls_ucbf = np.zeros((nsim, k))
pulls_ucb = np.zeros((nsim, k))
pulls_ss_ucb = np.zeros((nsim, k))
pulls_ss_greedy = np.zeros((nsim, k))
pulls_ts = np.zeros((nsim, k))
pulls_ss_ts = np.zeros((nsim, k))
if bern == 0:
greedy_sub_num_a = T**((beta_b+1)/3.0)
ucb_sub_a = T**(beta_b/2.0)
TS_sub_a = T**(beta_b/2.0)
else:
greedy_sub_num_a = T**(beta_b/2.0)
ucb_sub_a = T**(beta_b/2.0)
TS_sub_a = T**(beta_b/2.0)
for itr in range(nsim):
print('T=%d, k=%d, iteration = %d' % (T, k, itr))
means = np.random.beta(beta_a, beta_b, k)
## Sorted version of means.
vv = np.argsort(-means)
A = MMAB(T=T, k=k, means=means, sigma=sigma, binary=bern) # Create an instance
## Greedy
gr = A.greedy()
gr_r = gr[0]
regret_greedy[itr] = np.sum(gr_r)
## SS-Greedy
gr_sub = A.greedy(sub_arm=greedy_sub_num_a)
gr_sub_r = gr_sub[0]
regret_ss_greedy[itr] = np.sum(gr_sub_r)
## UCB
ucb = A.ucb()
ucb_r = ucb[0]
regret_ucb[itr] = np.sum(ucb_r)
## SS-UCB
ucbs = A.ssucb(sub_arm=ucb_sub_a)
ucbs_r = ucbs[0]
regret_ss_ucb[itr] = np.sum(ucbs_r)
## UCBF
ucbf = A.ucb_F(beta=beta_b)
ucbf_r = ucbf[0]
regret_ucbf[itr] = np.sum(ucbf_r)
## TS
ts = A.ts(beta_a=beta_a, beta_b=beta_b)
ts_r = ts[0]
regret_ts[itr] = np.sum(ts_r)
## SS-TS
ts_s = A.ts(beta_a=beta_a, beta_b=beta_b, sub_arm=TS_sub_a)
ts_s_r = ts_s[0]
regret_ss_ts[itr] = np.sum(ts_s_r)
if j == configs-1:
gr_np = gr[2]
pulls_greedy[itr, :] = gr_np[vv]
##
gr_sub_np = gr_sub[2]
pulls_ss_greedy[itr, :] = gr_sub_np[vv]
##
ucb_np = ucb[2]
pulls_ucb[itr, :] = ucb_np[vv]
##
ucbs_np = ucbs[2]
pulls_ss_ucb[itr, :] = ucbs_np[vv]
##
ucbf_np = ucbf[2]
pulls_ucbf[itr, :] = ucbf_np[vv]
##
ts_np = ts[2]
pulls_ts[itr, :] = ts_np[vv]
##
ts_s_np = ts_s[2]
pulls_ss_ts[itr, :] = ts_s_np[vv]
regret = np.array([regret_greedy,
regret_ss_greedy,
regret_ucb,
regret_ss_ucb,
regret_ucbf,
regret_ts,
regret_ss_ts])
pulls = np.array([pulls_greedy,
pulls_ss_greedy,
pulls_ucb,
pulls_ss_ucb,
pulls_ucbf,
pulls_ts,
pulls_ss_ts])
if save_results == 1:
if bern == 0:
h = 'Norm_regret_T_{:d}_k_{:d}_a_{:.1f}_b_{:,.1f}_nsim_{:d}'.format(
T_vals[j], k_vals[j], beta_a, beta_b, nsim)
h = h.replace('.', '_')
np.save(h + '.npy', regret)
h = 'Norm_pulls_T_{:d}_k_{:d}_a_{:.1f}_b_{:,.1f}_nsim_{:d}'.format(
T_vals[j], k_vals[j], beta_a, beta_b, nsim)
h = h.replace('.', '_')
np.save(h + '.npy', pulls)
else:
h = 'Bern_regret_T_{:d}_k_{:d}_a_{:.1f}_b_{:,.1f}_nsim_{:d}'.format(
T_vals[j], k_vals[j], beta_a, beta_b, nsim)
h = h.replace('.', '_')
np.save(h + '.npy', regret)
h = 'Bern_pulls_T_{:d}_k_{:d}_a_{:.1f}_b_{:,.1f}_nsim_{:d}'.format(
T_vals[j], k_vals[j], beta_a, beta_b, nsim)
h = h.replace('.', '_')
np.save(h + '.npy', pulls)
all_regret_greedy[:, j] = regret_greedy
all_regret_ss_greedy[:, j] = regret_ss_greedy
all_regret_ucb[:, j] = regret_ucb
all_regret_ss_ucb[:, j] = regret_ss_ucb
all_regret_ucbf[:, j] = regret_ucbf
all_regret_ts[:, j] = regret_ts
all_regret_ss_ts[:, j] = regret_ss_ts
all_regret = np.array([all_regret_greedy,
all_regret_ss_greedy,
all_regret_ucb,
all_regret_ss_ucb,
all_regret_ucbf,
all_regret_ts,
all_regret_ss_ts])
if save_results == 1:
if bern == 0:
h = 'Norm_all_regret_T_{:d}_k_{:d}_a_{:.1f}_b_{:,.1f}_nsim_{:d}'.format(
T_vals[j], k_vals[j], beta_a, beta_b, nsim)
h = h.replace('.', '_')
np.save(h + '.npy', list([all_regret, T_vals, k_vals, beta_a, beta_b, nsim]))
else:
h = 'Norm_all_regret_T_{:d}_k_{:d}_a_{:.1f}_b_{:,.1f}_nsim_{:d}'.format(
T_vals[j], k_vals[j], beta_a, beta_b, nsim)
h = h.replace('.', '_')
np.save(h + '.npy', list([all_regret, T_vals, k_vals, beta_a, beta_b, nsim]))
return all_regret, pulls
def plot_results(T_vals, k_vals, regret, pulls, bern, beta_a, beta_b, save_plots):
'''Generates regret and profile of pulls plot.
This function generates the boxplots of regret and also the pulls vs the quantile
of the (mean of) arms. The use of this function requires the plotly package.
Args:
T_vals: The list of values for time horizon.
k_vals: The list of values for number of arms.
regret: The list of final regret values for different configs defined in T_vals and
k_vals.
pulls: The list of pulls for different algorithms for the last config defined in
T_vals and k_vals.
bern: A boolean indicating whether to use Bernoulli or gaussian rewards.
beta_a: Success parameter of the beta prior.
beta_b: Failure parameter of the beta prior.
save_plots: A boolean indicating whether to save the plots as png files or not.
'''
num_divs = 10
z = max(np.floor(k_vals[-1] / num_divs), 1) + 1
vals = np.arange(0, k_vals[-1], int(z))
num_pts = regret[0].shape[1]
niter = regret[0].shape[0]
NUM_COLORS = 7
MARKERS = ['circle', 'square', 'diamond', 'cross', 'x', 'triangle', 'pentagon',
'hexagram', 'star']
legends = ['Greedy', 'SS-Greedy', 'UCB', 'SS-UCB', 'UCB-F', 'TS', 'SS-TS']
pts_inc = num_pts
color_vals = np.array([[0, 0, 0], [31, 119, 180], [255, 127, 14],
[44, 160, 44], [214, 39, 40], [148, 103, 189],
[227,119,194], [188,189,34], [23, 190, 207]])
color_alph = np.zeros((color_vals.shape[0], 3))
for i in range(color_vals.shape[0]):
color_alph[i,:] = make_rgb_transparent(color_vals[i,:], [255, 255, 255], 0.3)
colors=['rgb(0,0,0)', 'rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)', 'rgb(148, 103, 189)',
'rgb(227, 119, 194)', 'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
x_leg = []
for j in range(pts_inc):
f = niter * ['T={:d}, k={:d}'.format(
T_vals[j-pts_inc+num_pts], k_vals[j+num_pts-pts_inc])]
x_leg += f
fig = go.Figure()
for i in range(NUM_COLORS):
fig.add_trace(go.Box(
y=regret[i][:,num_pts-pts_inc:].transpose().flatten(),
x=x_leg,
name=legends[i],
fillcolor = 'rgb({:f},{:f},{:f})'.format(
color_alph[i, 0], color_alph[i, 1], color_alph[i, 2]),
marker=dict(
color=colors[i],
size=10,
opacity=1,
symbol = i
),
showlegend = False,
boxmean = True,
boxpoints = 'outliers',
))
fig.add_trace(go.Scatter(
y=[0.9*np.max(regret)],
x=[0.6],
name=legends[i],
mode='markers',
marker_symbol=i,
marker_size=16,
marker_color=colors[i]
))
fig.update_layout(
autosize = False,
yaxis=dict(
showgrid=True,
zeroline=True,
gridcolor='rgb(127, 127, 127)',
gridwidth=1,
zerolinecolor='rgb(0, 0, 0)',
zerolinewidth=3,
title = dict(
text = 'Regret',
font = dict(
family = 'sans-serif',
size = 35,
color = 'black'
),
),
),
boxmode='group',
width=1200,
height=1200,
font=dict(
family='sans-serif',
size=35,
color='black',
),
legend=dict(
x=0.8,
y=1,
traceorder='normal',
font=dict(
family='sans-serif',
size=35,
color='black'
),
bgcolor='white',
bordercolor='Black',
borderwidth=6,
),
xaxis=dict(
ticktext = [x_leg[1], x_leg[niter+1]],
tickvals = [0, 1],
tickmode = 'array',
tickfont_size = 30,
scaleanchor = 'y',
ticklen = 2,
),
margin=dict(l=120, r=50, t=20, b=20),
paper_bgcolor='rgb(255,255,255)',
plot_bgcolor='rgb(255,255,255)',
boxgap = 0.4,
boxgroupgap = 0.1,
)
fig.show()
if save_plots == 1:
if bern == 0:
h = 'Norm_regret_a_{:.1f}_b_{:,.1f}'.format(
beta_a, beta_b)
h = h.replace('.', '_')
h = h + '.png'
fig.write_image(h, scale = 1)
else:
h = 'Bern_regret_a_{:.1f}_b_{:,.1f}'.format(
beta_a, beta_b)
h = h.replace('.', '_')
h = h + '.png'
fig.write_image(h, scale = 1)
tickz = []
for j in range(num_divs):
if j == 0:
h = 'Top {:.0%} Arms'.format((j+1)/num_divs)
else:
h = '{:.0%}-{:.0%}'.format(j/num_divs, (j+1)/num_divs)
tickz.append(h)
h = int(np.floor(k_vals[-1]/num_divs))
pulls_avg = np.zeros((num_divs, NUM_COLORS))
pulls_std = np.zeros((num_divs, NUM_COLORS))
for i in range(NUM_COLORS):
for j in range(num_divs):
z = np.arange(j*h,(j+1)*h)
pulls_avg[j, i] = np.mean(pulls[i][:,z])
pulls_std[j, i] = np.std(np.mean(pulls[i][:,z], axis = 1))/np.sqrt(niter)
fig1 = go.Figure()
for i in range(NUM_COLORS):
fig1.add_trace(go.Scatter(
y = np.log(pulls_avg[:, i]),
x = np.arange(1,num_divs+1),
name = legends[i],
marker_symbol = i,
marker_size = 16,
marker_color = colors[i],
mode = 'lines + markers',
error_y = dict(
type='data',
array= | np.log(pulls_avg[:,i]+2*pulls_std[:,i]) | numpy.log |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import FairMot._init_paths
import os
import os.path as osp
import cv2
import logging
import argparse
import motmetrics as mm
import numpy as np
import torch
from FairMot.lib.tracker.multitracker import JDETracker
from FairMot.lib.tracking_utils import visualization as vis
from utils.log import Log
logger = Log(__name__,__name__).getlog()
from FairMot.lib.tracking_utils.timer import Timer
from FairMot.lib.tracking_utils.evaluation import Evaluator
import FairMot.lib.datasets.dataset.jde as datasets
from FairMot.lib.tracking_utils.utils import mkdir_if_missing
from FairMot.lib.opts import opts
from utils.sort_by_point import sort_by_point
def write_results(filename, results, data_type):
if data_type == 'mot':
save_format = '{frame},{id},{x1},{y1},{w},{h},1,-1,-1,-1\n'
elif data_type == 'kitti':
save_format = '{frame} {id} pedestrian 0 0 -10 {x1} {y1} {x2} {y2} -10 -10 -10 -1000 -1000 -1000 -10\n'
else:
raise ValueError(data_type)
with open(filename, 'w') as f:
for frame_id, tlwhs, track_ids,ReID_feat in results:
if data_type == 'kitti':
frame_id -= 1
for tlwh, track_id in zip(tlwhs, track_ids):
if track_id < 0:
continue
x1, y1, w, h = tlwh
x2, y2 = x1 + w, y1 + h
line = save_format.format(frame=frame_id, id=track_id, x1=x1, y1=y1, x2=x2, y2=y2, w=w, h=h)
f.write(line)
logger.info('save results to {}'.format(filename))
def eval_seq(opt, dataloader, data_type, result_filename, save_dir=None, show_image=True, Input_tracker= None,frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
if Input_tracker == None:
tracker = JDETracker(opt, frame_rate=frame_rate) # What is JDE Tracker?
else:
tracker = Input_tracker
timer = Timer()
results = []
frame_id = 0
for path, img, img0 in dataloader:
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
vertical = tlwh[2] / tlwh[3] > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids))
if show_image or save_dir is not None:
online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
fps=1. / timer.average_time)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def Short_track_eval(opt, dataloader, data_type, result_filename,target_frame, reference_point, save_dir=None, show_image=True, Input_tracker= None,frame_rate=30):
if save_dir:
mkdir_if_missing(save_dir)
if Input_tracker == None:
tracker = JDETracker(opt, frame_rate=frame_rate) # What is JDE Tracker?
else:
tracker = Input_tracker
timer = Timer()
results = []
frame_id = 0
img0_array = []
for path, img, img0 in dataloader:
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
online_ReID_features = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
ReID_feature = t.curr_feat
vertical = tlwh[2] / tlwh[3] > 1.6 # w / h > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_ReID_features.append(ReID_feature)
timer.toc()
# save results
results.append((frame_id + 1, online_tlwhs, online_ids, online_ReID_features))
img0_array.append(img0)
# if show_image or save_dir is not None:
# online_im = vis.plot_tracking(img0, online_tlwhs, online_ids, frame_id=frame_id,
# fps=1. / timer.average_time)
# if show_image:
# cv2.imshow('online_im', online_im)
# if save_dir is not None:
# cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
for bias in [0, 2, -2]: # 总能检测到的?
input_result = results[target_frame + bias]
if len(input_result[1]) == 0 : # 有可能目标帧没有检测到,一个目标都没有。
target_id = None
continue
new_reference_point, target_id = sort_by_point(results[target_frame + bias], reference_point)
if target_id != None:
break
# 如果前中后三帧都没有检测到,那就说明这个动作区分不开了。 放弃了。
if target_id == None:
# 目标不存在
return None
# 把每个sub_box提取出来。
for r_index, result in enumerate(results):
img0 = img0_array[r_index]
I_h,I_w,_ = img0.shape
bboxes = result[1]
ids = result[2]
for id_index ,id in enumerate(ids):
if id != target_id:
continue
box = bboxes[id_index]
x1, y1, w, h = box
intbox = tuple(map(int, (max(0,x1), max(0,y1), min(x1 + w,I_w), min(y1 + h,I_h))))
# print(intbox)
sub_img = img0[intbox[1]:intbox[3],intbox[0]:intbox[2]]
cv2.imwrite(os.path.join(save_dir,'{}_{}.jpg'.format(r_index,id)),sub_img)
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# save results
write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def Short_track(tracker, dataloader, opt ):
'''
对输入的短片段进行追踪。
'''
from utils.timer import show_memory_info
timer = Timer()
results = []
for frame_id, [path, img, img0] in enumerate(dataloader):
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
online_targets = tracker.update(blob, img0)
online_tlwhs = []
online_ids = []
online_ReID_features = []
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
ReID_feature = t.curr_feat
vertical = tlwh[2] / tlwh[3] > 1.6 # w / h > 1.6
if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
online_tlwhs.append(tlwh)
online_ids.append(tid)
online_ReID_features.append(ReID_feature)
timer.toc()
# save results
# 相对帧数, bboxes , 对应id, ReId, 原图
results.append((frame_id, online_tlwhs, online_ids, online_ReID_features,img0))
# show_memory_info('action _ {}, {}'.format(action_index, 'Short_track results.append'))
logger.debug('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
return results
def detect(opt, tracker, dataloader, dir_id, save_dir=None, show_image=True ):
if save_dir:
mkdir_if_missing(save_dir)
timer = Timer()
results = []
frame_id = 0
save_dir_subimgs = os.path.join(save_dir,'subimg')
os.makedirs(save_dir_subimgs,exist_ok=True)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter(os.path.join(save_dir,'out_put.avi'), fourcc, 20.0, (1920, 1080), True)
for path, img, img0 in dataloader:
if frame_id % 20 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(frame_id, 1. / max(1e-5, timer.average_time)))
# run tracking
timer.tic()
blob = torch.from_numpy(img).cuda().unsqueeze(0)
[dets,id_feature] = tracker.update_for_detection(blob, img0, save_dir, frame_id)
timer.toc()
# save results
results.append((frame_id + 1, dets, id_feature))
if show_image or save_dir is not None:
online_im = vis.plot_detections(img0, dets, save_dir , dir_id, frame_id)
if show_image:
cv2.imshow('online_im', online_im)
if save_dir is not None:
cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)), online_im)
frame_id += 1
# save results
# write_results(result_filename, results, data_type)
return frame_id, timer.average_time, timer.calls
def main(opt, data_root='/data/MOT16/train', det_root=None, seqs=('MOT16-05',), exp_name='demo',
save_images=False, save_videos=False, show_image=True):
logger.setLevel(logging.INFO)
result_root = os.path.join(data_root, '..', 'results', exp_name)
mkdir_if_missing(result_root)
data_type = 'mot'
# run tracking
accs = []
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
output_dir = os.path.join(data_root, '..', 'outputs', exp_name, seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
dataloader = datasets.LoadImages(osp.join(data_root, seq, 'img1'), opt.img_size)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
meta_info = open(os.path.join(data_root, seq, 'seqinfo.ini')).read()
frame_rate = int(meta_info[meta_info.find('frameRate') + 10:meta_info.find('\nseqLength')])
nf, ta, tc = eval_seq(opt, dataloader, data_type, result_filename,
save_dir=output_dir, show_image=show_image, frame_rate=frame_rate)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
# eval
logger.info('Evaluate seq: {}'.format(seq))
evaluator = Evaluator(data_root, seq, data_type)
accs.append(evaluator.eval_file(result_filename))
if save_videos:
output_video_path = osp.join(output_dir, '{}.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir, output_video_path)
os.system(cmd_str)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / | np.sum(timer_calls) | numpy.sum |
from pyrfuniverse.envs import RFUniverseBaseEnv
from pyrfuniverse.envs import RFUniverseGymGoalWrapper
import numpy as np
from gym import spaces
from gym.utils import seeding
import copy
class Robotiq85NailCardEnv(RFUniverseGymGoalWrapper):
def __init__(
self,
rotation_factor=5,
vertical_movement_factor=0.01,
gripper_movement_factor=100,
nail_movement_factor=0.01,
goal_baseline=0.05,
executable_file=None,
):
super().__init__(
executable_file=executable_file,
camera_channel_id=None,
rigidbody_channel_id="44fe03fb-0021-11ec-b7f2-18c04d443e7d",
articulation_channel_id="44fe03fc-0021-11ec-b7f2-18c04d443e7d",
game_object_channel_id="44fe03fa-0021-11ec-b7f2-18c04d443e7d",
)
self.rotation_factor = rotation_factor
self.vertical_movement_factor = vertical_movement_factor
self.gripper_movement_factor = gripper_movement_factor
self.nail_movement_factor = nail_movement_factor
self.bit_wise_factor = np.array([
self.rotation_factor,
self.vertical_movement_factor,
self.gripper_movement_factor,
self.nail_movement_factor
])
self.goal_baseline = goal_baseline
self.t = 0
self.goal = self._sample_goal()
self.action_space = spaces.Box(
low=-1, high=1, shape=(4,), dtype=np.float32
)
obs = self._get_obs()
self.observation_space = spaces.Dict({
'observation': spaces.Box(-np.inf, np.inf, shape=obs['observation'].shape, dtype=np.float32),
'desired_goal': spaces.Box(-np.inf, np.inf, shape=obs['desired_goal'].shape, dtype=np.float32),
'achieved_goal': spaces.Box(-np.inf, np.inf, shape=obs['achieved_goal'].shape, dtype=np.float32)
})
def step(self, a: np.ndarray):
action = a.copy()
action_ctrl = action * self.bit_wise_factor
curr_state = self._get_gripper_extra_param()
target_state = curr_state + action_ctrl
self._set_target_state(target_state)
self.t += 1
obs = self._get_obs()
done = False
info = {
'is_success': self._check_success(obs)
}
reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], info)
return obs, reward, done, info
def reset(self):
super().reset()
self.env.reset()
self._reset_object()
self.t = 0
self.goal = self._sample_goal()
return self._get_obs()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def render(self, mode='human'):
self._step()
def compute_reward(self, achieved_goal, desired_goal, info):
higher_distance = achieved_goal - desired_goal
return float(-1 * (higher_distance[0] < 0))
def _get_obs(self):
gripper_position = np.array(self.articulation_channel.data[0]['positions'][16])
gripper_velocity = np.array(self.articulation_channel.data[0]['velocities'][16])
gripper_extra_param = self._get_gripper_extra_param()
object_pos = np.array(self.rigidbody_channel.data[0]['position'])
object_rotation = np.array(self.rigidbody_channel.data[0]['rotation'])
object_velocity = np.array(self.rigidbody_channel.data[0]['velocity'])
object_angular_vel = | np.array(self.rigidbody_channel.data[0]['angular_vel']) | numpy.array |
import base64
import datetime
import io
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from xlrd.xldate import xldate_as_datetime
from yattag import Doc
plt.rcParams.update({"figure.autolayout": True})
import matplotlib.gridspec as gridspec
import pandas as pd
import scipy.stats
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import logging
"""
TF_CPP_MIN_LOG_LEVEL:
Defaults to 0, so all logs are shown. Set TF_CPP_MIN_LOG_LEVEL to 1 to filter out INFO logs, 2 to additionally filter out WARNING, 3 to additionally filter out ERROR.
"""
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
from tensorflow import keras
class NNetwork(object):
def __init__(self, network_count=200, epochs=1000):
logging.getLogger().setLevel(logging.INFO)
self.xl_dateformat = r"%Y-%m-%dT%H:%M"
self.model = None
self.pretrained_networks = []
self.software_version = "2.0.1"
self.input_filename = None
self.today = str(datetime.date.today())
self.avg_time_elapsed = 0
self.predictors_scaler = MinMaxScaler(feature_range=(-1, 1))
self.targets_scaler = MinMaxScaler(feature_range=(-1, 1))
self.history = None
self.file = None
self.skipped_rows = []
self.ruleset = []
self.layer1_neurons = 12
self.network_count = network_count
self.epochs = epochs
self.predictors = None
self.targets = None
self.predictions = None
self.avg_case_results_am = None
self.avg_case_results_pm = None
self.worst_case_results_am = None
self.worst_case_results_pm = None
self.WB_bandwidth = None
self.post_process_check = False # Is post-processed better than raw. If False, uses raw results, if true, uses post-processed results
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(self.layer1_neurons, input_dim=5, activation="tanh")
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer, metrics=["mse"])
def import_data_from_csv(self, filename):
"""
Imports data to the network by a comma-separated values (CSV) file.
Load data to a network that are stored in .csv file format.
The data loaded from this method can be used both for training reasons as
well as to make predictions.
:param filename: String containing the filename of the .csv file containing the input data (e.g "input_data.csv")
"""
df = pd.read_csv(filename)
self.file = df.copy()
global FRC_IN
global FRC_OUT
global WATTEMP
global COND
# Locate the fields used as inputs/predictors and outputs in the loaded file
# and split them
if "se1_frc" in self.file.columns:
FRC_IN = "se1_frc"
WATTEMP = "se1_wattemp"
COND = "se1_cond"
FRC_OUT = "se4_frc"
elif "ts_frc1" in self.file.columns:
FRC_IN = "ts_frc1"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc1"
elif "ts_frc" in self.file.columns:
FRC_IN = "ts_frc"
WATTEMP = "ts_wattemp"
COND = "ts_cond"
FRC_OUT = "hh_frc"
# Standardize the DataFrame by specifying rules
# To add a new rule, call the method execute_rule with the parameters (description, affected_column, query)
self.execute_rule("Invalid tapstand FRC", FRC_IN, self.file[FRC_IN].isnull())
self.execute_rule("Invalid household FRC", FRC_OUT, self.file[FRC_OUT].isnull())
self.execute_rule(
"Invalid tapstand date/time",
"ts_datetime",
self.valid_dates(self.file["ts_datetime"]),
)
self.execute_rule(
"Invalid household date/time",
"hh_datetime",
self.valid_dates(self.file["hh_datetime"]),
)
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True) # fix dropped indices in pandas
# Locate the rows of the missing data
drop_threshold = 0.90 * len(self.file.loc[:, [FRC_IN]])
nan_rows_watt = self.file.loc[self.file[WATTEMP].isnull()]
if len(nan_rows_watt) < drop_threshold:
self.execute_rule(
"Missing Water Temperature Measurement",
WATTEMP,
self.file[WATTEMP].isnull(),
)
nan_rows_cond = self.file.loc[self.file[COND].isnull()]
if len(nan_rows_cond) < drop_threshold:
self.execute_rule("Missing EC Measurement", COND, self.file[COND].isnull())
self.skipped_rows = df.loc[df.index.difference(self.file.index)]
self.file.reset_index(drop=True, inplace=True)
start_date = self.file["ts_datetime"]
end_date = self.file["hh_datetime"]
durations = []
all_dates = []
collection_time = []
for i in range(len(start_date)):
try:
# excel type
start = float(start_date[i])
end = float(end_date[i])
start = xldate_as_datetime(start, datemode=0)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = xldate_as_datetime(end, datemode=0)
except ValueError:
# kobo type
start = start_date[i][:16].replace("/", "-")
end = end_date[i][:16].replace("/", "-")
start = datetime.datetime.strptime(start, self.xl_dateformat)
if start.hour > 12:
collection_time = np.append(collection_time, 1)
else:
collection_time = np.append(collection_time, 0)
end = datetime.datetime.strptime(end, self.xl_dateformat)
durations.append((end - start).total_seconds())
all_dates.append(datetime.datetime.strftime(start, self.xl_dateformat))
self.durations = durations
self.time_of_collection = collection_time
self.avg_time_elapsed = np.mean(durations)
# Extract the column of dates for all data and put them in YYYY-MM-DD format
self.file["formatted_date"] = all_dates
predictors = {
FRC_IN: self.file[FRC_IN],
"elapsed time": (np.array(self.durations) / 3600),
"time of collection (0=AM, 1=PM)": self.time_of_collection,
}
self.targets = self.file.loc[:, FRC_OUT]
self.var_names = [
"Tapstand FRC (mg/L)",
"Elapsed Time",
"time of collection (0=AM, 1=PM)",
]
self.predictors = pd.DataFrame(predictors)
if len(nan_rows_watt) < drop_threshold:
self.predictors[WATTEMP] = self.file[WATTEMP]
self.var_names.append("Water Temperature(" + r"$\degree$" + "C)")
self.median_wattemp = np.median(self.file[WATTEMP].dropna().to_numpy())
self.upper95_wattemp = np.percentile(
self.file[WATTEMP].dropna().to_numpy(), 95
)
if len(nan_rows_cond) < drop_threshold:
self.predictors[COND] = self.file[COND]
self.var_names.append("EC (" + r"$\mu$" + "s/cm)")
self.median_cond = np.median(self.file[COND].dropna().to_numpy())
self.upper95_cond = np.percentile(self.file[COND].dropna().to_numpy(), 95)
self.targets = self.targets.values.reshape(-1, 1)
self.datainputs = self.predictors
self.dataoutputs = self.targets
self.input_filename = filename
def set_up_model(self):
self.optimizer = keras.optimizers.Nadam(lr=0.01, beta_1=0.9, beta_2=0.999)
self.model = keras.models.Sequential()
self.model.add(
keras.layers.Dense(
self.layer1_neurons,
input_dim=len(self.datainputs.columns),
activation="tanh",
)
)
self.model.add(keras.layers.Dense(1, activation="linear"))
self.model.compile(loss="mse", optimizer=self.optimizer)
def train_SWOT_network(self, directory):
"""Train the set of 200 neural networks on SWOT data
Trains an ensemble of 200 neural networks on se1_frc, water temperature,
water conductivity."""
if not os.path.exists(directory):
os.makedirs(directory)
self.predictors_scaler = self.predictors_scaler.fit(self.predictors)
self.targets_scaler = self.targets_scaler.fit(self.targets)
x = self.predictors
t = self.targets
self.calibration_predictions = []
self.trained_models = {}
for i in range(self.network_count):
logging.info('Training Network ' + str(i))
model_out = self.train_network(x, t, directory)
self.trained_models.update({'model_' + str(i): model_out})
def train_network(self, x, t, directory):
"""
Trains a single Neural Network on imported data.
This method trains Neural Network on data that have previously been imported
to the network using the import_data_from_csv() method.
The network used is a Multilayer Perceptron (MLP). Input and Output data are
normalized using MinMax Normalization.
The input dataset is split in training and validation datasets, where 80% of the inputs
are the training dataset and 20% is the validation dataset.
The training history is stored in a variable called self.history (see keras documentation:
keras.model.history object)
Performance metrics are calculated and stored for evaluating the network performance.
"""
tf.keras.backend.clear_session()
early_stopping_monitor = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=10,
restore_best_weights=True)
x_norm = self.predictors_scaler.transform(x)
t_norm = self.targets_scaler.transform(t)
trained_model = keras.models.clone_model(self.model)
x_norm_train, x_norm_val, t_norm_train, t_norm_val = train_test_split(x_norm, t_norm, train_size=0.333,
shuffle=True)
new_weights = [np.random.uniform(-0.05, 0.05, w.shape) for w in trained_model.get_weights()]
trained_model.set_weights(new_weights)
trained_model.compile(loss='mse', optimizer=self.optimizer)
trained_model.fit(x_norm_train, t_norm_train, epochs=self.epochs, validation_data=(x_norm_val, t_norm_val),
callbacks=[early_stopping_monitor], verbose=0, batch_size=len(t_norm_train))
self.calibration_predictions.append(self.targets_scaler.inverse_transform(trained_model.predict(x_norm)))
return trained_model
def calibration_performance_evaluation(self, filename):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)
FRC_X = self.datainputs[FRC_IN].to_numpy()
capture_all = (
np.less_equal(Y_true, np.max(Y_pred, axis=0))
* np.greater_equal(Y_true, np.min(Y_pred, axis=0))
* 1
)
capture_90 = (
np.less_equal(Y_true, np.percentile(Y_pred, 95, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 5, axis=0))
* 1
)
capture_80 = (
np.less_equal(Y_true, np.percentile(Y_pred, 90, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 10, axis=0))
* 1
)
capture_70 = (
np.less_equal(Y_true, np.percentile(Y_pred, 85, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 15, axis=0))
* 1
)
capture_60 = (
np.less_equal(Y_true, np.percentile(Y_pred, 80, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 20, axis=0))
* 1
)
capture_50 = (
np.less_equal(Y_true, np.percentile(Y_pred, 75, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 25, axis=0))
* 1
)
capture_40 = (
np.less_equal(Y_true, np.percentile(Y_pred, 70, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 30, axis=0))
* 1
)
capture_30 = (
np.less_equal(Y_true, np.percentile(Y_pred, 65, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 35, axis=0))
* 1
)
capture_20 = (
np.less_equal(Y_true, np.percentile(Y_pred, 60, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 40, axis=0))
* 1
)
capture_10 = (
np.less_equal(Y_true, np.percentile(Y_pred, 55, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 45, axis=0))
* 1
)
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
length_20 = np.sum(np.less(Y_true, 0.2))
test_len = len(Y_true)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture = [
capture_10_sum / test_len,
capture_20_sum / test_len,
capture_30_sum / test_len,
capture_40_sum / test_len,
capture_50_sum / test_len,
capture_60_sum / test_len,
capture_70_sum / test_len,
capture_80_sum / test_len,
capture_90_sum / test_len,
capture_all_sum / test_len,
]
capture_20 = [
capture_10_20_sum / length_20,
capture_20_20_sum / length_20,
capture_30_20_sum / length_20,
capture_40_20_sum / length_20,
capture_50_20_sum / length_20,
capture_60_20_sum / length_20,
capture_70_20_sum / length_20,
capture_80_20_sum / length_20,
capture_90_20_sum / length_20,
capture_all_20_sum / length_20,
]
self.percent_capture_cal = capture_all_sum / test_len
self.percent_capture_02_cal = capture_all_20_sum / length_20
self.CI_reliability_cal = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
self.CI_reliability_02_cal = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
# Rank Histogram
rank = []
for a in range(0, len(Y_true)):
n_lower = np.sum(np.greater(Y_true[a], Y_pred[:, a]))
n_equal = np.sum(np.equal(Y_true[a], Y_pred[:, a]))
deviate_rank = np.random.random_integers(0, n_equal)
rank = np.append(rank, n_lower + deviate_rank)
rank_hist = np.histogram(rank, bins=self.network_count + 1)
delta = np.sum((rank_hist[0] - (test_len / ((self.network_count + 1)))) ** 2)
delta_0 = self.network_count * test_len / (self.network_count + 1)
self.delta_score_cal = delta / delta_0
c = self.network_count
alpha = np.zeros((test_len, (c + 1)))
beta = np.zeros((test_len, (c + 1)))
low_outlier = 0
high_outlier = 0
for a in range(0, test_len):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
for b in range(1, c):
if observation > forecast[b]:
alpha[a, b] = forecast[b] - forecast[b - 1]
beta[a, b] = 0
elif forecast[b] > observation > forecast[b - 1]:
alpha[a, b] = observation - forecast[b - 1]
beta[a, b] = forecast[b] - observation
else:
alpha[a, b] = 0
beta[a, b] = forecast[b] - forecast[b - 1]
# overwrite boundaries in case of outliers
if observation < forecast[0]:
beta[a, 0] = forecast[0] - observation
low_outlier += 1
if observation > forecast[c - 1]:
alpha[a, c] = observation - forecast[c - 1]
high_outlier += 1
alpha_bar = np.mean(alpha, axis=0)
beta_bar = np.mean(beta, axis=0)
g_bar = alpha_bar + beta_bar
o_bar = beta_bar / (alpha_bar + beta_bar)
if low_outlier > 0:
o_bar[0] = low_outlier / test_len
g_bar[0] = beta_bar[0] / o_bar[0]
else:
o_bar[0] = 0
g_bar[0] = 0
if high_outlier > 0:
o_bar[c] = high_outlier / test_len
g_bar[c] = alpha_bar[c] / o_bar[c]
else:
o_bar[c] = 0
g_bar[c] = 0
p_i = np.arange(0 / c, (c + 1) / c, 1 / c)
self.CRPS_cal = np.sum(
g_bar * ((1 - o_bar) * (p_i**2) + o_bar * ((1 - p_i) ** 2))
)
CI_x = [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]
fig = plt.figure(figsize=(15, 10), dpi=100)
gridspec.GridSpec(2, 3)
plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
plt.axhline(0.2, c="k", ls="--", label="Point-of-consumption FRC = 0.2 mg/L")
plt.scatter(
FRC_X, Y_true, edgecolors="k", facecolors="None", s=20, label="Observed"
)
plt.scatter(
FRC_X,
np.median(Y_pred, axis=0),
facecolors="r",
edgecolors="None",
s=10,
label="Forecast Median",
)
plt.vlines(
FRC_X,
np.min(Y_pred, axis=0),
np.max(Y_pred, axis=0),
color="r",
label="Forecast Range",
)
plt.xlabel("Point-of-Distribution FRC (mg/L)")
plt.ylabel("Point-of-Consumption FRC (mg/L)")
plt.xlim([0, np.max(FRC_X)])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax1 = fig.axes[0]
ax1.set_title("(a)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (0, 2), colspan=1, rowspan=1)
plt.plot(CI_x, CI_x, c="k")
plt.scatter(CI_x, capture, label="All observations")
plt.scatter(CI_x, capture_20, label="Point-of-Consumption FRC below 0.2 mg/L")
plt.xlabel("Ensemble Confidence Interval")
plt.ylabel("Percent Capture")
plt.ylim([0, 1])
plt.xlim([0, 1])
plt.legend(
bbox_to_anchor=(0.001, 0.999),
shadow=False,
labelspacing=0.1,
fontsize="small",
handletextpad=0.1,
loc="upper left",
)
ax2 = fig.axes[1]
ax2.set_title("(b)", y=0.88, x=0.05)
plt.subplot2grid((2, 3), (1, 2), colspan=1, rowspan=1)
plt.hist(rank, bins=(self.network_count + 1), density=True)
plt.xlabel("Rank")
plt.ylabel("Probability")
ax3 = fig.axes[2]
ax3.set_title("(c)", y=0.88, x=0.05)
plt.savefig(
os.path.splitext(filename)[0] + "_Calibration_Diagnostic_Figs.png",
format="png",
bbox_inches="tight",
)
plt.close()
myStringIOBytes = io.BytesIO()
plt.savefig(myStringIOBytes, format="png", bbox_inches="tight")
myStringIOBytes.seek(0)
my_base_64_pngData = base64.b64encode(myStringIOBytes.read())
return my_base_64_pngData
def get_bw(self):
Y_true = np.array(self.targets)
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
s2 = []
xt_yt = []
for a in range(0, len(Y_true)):
observation = Y_true[a]
forecast = np.sort(Y_pred[:, a])
s2 = np.append(s2, np.var(forecast))
xt_yt = np.append(xt_yt, (np.mean(forecast) - observation) ** 2)
WB_bw = np.mean(xt_yt) - (1 + 1 / self.network_count) * np.mean(s2)
return WB_bw
def post_process_performance_eval(self, bandwidth):
Y_true = np.squeeze(np.array(self.targets))
Y_pred = np.array(self.calibration_predictions)[:, :, 0]
test_len = len(Y_true)
min_CI = []
max_CI = []
CI_90_Lower = []
CI_90_Upper = []
CI_80_Lower = []
CI_80_Upper = []
CI_70_Lower = []
CI_70_Upper = []
CI_60_Lower = []
CI_60_Upper = []
CI_50_Lower = []
CI_50_Upper = []
CI_40_Lower = []
CI_40_Upper = []
CI_30_Lower = []
CI_30_Upper = []
CI_20_Lower = []
CI_20_Upper = []
CI_10_Lower = []
CI_10_Upper = []
CI_median = []
CRPS = []
Kernel_Risk = []
evaluation_range = np.arange(-10, 10.001, 0.001)
# compute CRPS as well as the confidence intervals of each ensemble forecast
for a in range(0, test_len):
scipy_kde = scipy.stats.gaussian_kde(Y_pred[:, a], bw_method=bandwidth)
scipy_pdf = scipy_kde.evaluate(evaluation_range) * 0.001
scipy_cdf = np.cumsum(scipy_pdf)
min_CI = np.append(
min_CI, evaluation_range[np.max(np.where(scipy_cdf == 0)[0])]
)
max_CI = np.append(max_CI, evaluation_range[np.argmax(scipy_cdf)])
CI_90_Lower = np.append(
CI_90_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.05)))]
)
CI_90_Upper = np.append(
CI_90_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.95)))]
)
CI_80_Lower = np.append(
CI_80_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.1)))]
)
CI_80_Upper = np.append(
CI_80_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.9)))]
)
CI_70_Lower = np.append(
CI_70_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.15)))]
)
CI_70_Upper = np.append(
CI_70_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.85)))]
)
CI_60_Lower = np.append(
CI_60_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.2)))]
)
CI_60_Upper = np.append(
CI_60_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.8)))]
)
CI_50_Lower = np.append(
CI_50_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.25)))]
)
CI_50_Upper = np.append(
CI_50_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.75)))]
)
CI_40_Lower = np.append(
CI_40_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.3)))]
)
CI_40_Upper = np.append(
CI_40_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.7)))]
)
CI_30_Lower = np.append(
CI_30_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.35)))]
)
CI_30_Upper = np.append(
CI_30_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.65)))]
)
CI_20_Lower = np.append(
CI_20_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.4)))]
)
CI_20_Upper = np.append(
CI_20_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.6)))]
)
CI_10_Lower = np.append(
CI_10_Lower, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.45)))]
)
CI_10_Upper = np.append(
CI_10_Upper, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.55)))]
)
CI_median = np.append(
CI_median, evaluation_range[np.argmin(np.abs((scipy_cdf - 0.50)))]
)
Kernel_Risk = np.append(Kernel_Risk, scipy_kde.integrate_box_1d(-10, 0.2))
Heaviside = (evaluation_range >= Y_true[a]).astype(int)
CRPS_dif = (scipy_cdf - Heaviside) ** 2
CRPS = np.append(CRPS, np.sum(CRPS_dif * 0.001))
mean_CRPS = np.mean(CRPS)
capture_all = (
np.less_equal(Y_true, max_CI) * np.greater_equal(Y_true, min_CI) * 1
)
capture_90 = (
np.less_equal(Y_true, CI_90_Upper)
* np.greater_equal(Y_true, CI_90_Lower)
* 1
)
capture_80 = (
np.less_equal(Y_true, CI_80_Upper)
* np.greater_equal(Y_true, CI_80_Lower)
* 1
)
capture_70 = (
np.less_equal(Y_true, CI_70_Upper)
* np.greater_equal(Y_true, CI_70_Lower)
* 1
)
capture_60 = (
np.less_equal(Y_true, CI_60_Upper)
* np.greater_equal(Y_true, CI_60_Lower)
* 1
)
capture_50 = (
np.less_equal(Y_true, CI_50_Upper)
* np.greater_equal(Y_true, CI_50_Lower)
* 1
)
capture_40 = (
np.less_equal(Y_true, CI_40_Upper)
* np.greater_equal(Y_true, CI_40_Lower)
* 1
)
capture_30 = (
np.less_equal(Y_true, CI_30_Upper)
* np.greater_equal(Y_true, CI_30_Lower)
* 1
)
capture_20 = (
np.less_equal(Y_true, CI_20_Upper)
* np.greater_equal(Y_true, CI_20_Lower)
* 1
)
capture_10 = (
np.less_equal(Y_true, CI_10_Upper)
* np.greater_equal(Y_true, CI_10_Lower)
* 1
)
length_20 = np.sum(np.less(Y_true, 0.2))
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture_sum_squares = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
capture_20_sum_squares = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
return (
mean_CRPS,
capture_sum_squares,
capture_20_sum_squares,
capture_all_sum / test_len,
capture_all_20_sum / length_20,
)
def post_process_cal(self):
self.WB_bandwidth = self.get_bw()
(
self.CRPS_post_cal,
self.CI_reliability_post_cal,
self.CI_reliability_02_post_cal,
self.percent_capture_post_cal,
self.percent_capture_02_post_cal,
) = self.post_process_performance_eval(self.WB_bandwidth)
CRPS_Skill = (self.CRPS_post_cal - self.CRPS_cal) / (0 - self.CRPS_cal)
CI_Skill = (self.CI_reliability_post_cal - self.CI_reliability_cal) / (
0 - self.CI_reliability_cal
)
CI_20_Skill = (self.CI_reliability_02_post_cal - self.CI_reliability_02_cal) / (
0 - self.CI_reliability_02_cal
)
PC_Skill = (self.percent_capture_post_cal - self.percent_capture_cal) / (
1 - self.percent_capture_cal
)
PC_20_Skill = (
self.percent_capture_02_post_cal - self.percent_capture_02_cal
) / (1 - self.percent_capture_02_cal)
Net_Score = CRPS_Skill + CI_Skill + CI_20_Skill + PC_Skill + PC_20_Skill
if Net_Score > 0:
self.post_process_check = True
else:
self.post_process_check = False
def full_performance_evaluation(self, directory):
x_norm = self.predictors_scaler.transform(self.predictors)
t_norm = self.targets_scaler.transform(self.targets)
base_model = self.model
base_model.save(directory + "\\base_network.h5")
x_cal_norm, x_test_norm, t_cal_norm, t_test_norm = train_test_split(
x_norm, t_norm, test_size=0.25, shuffle=False, random_state=10
)
self.verifying_observations = self.targets_scaler.inverse_transform(t_test_norm)
self.test_x_data = self.predictors_scaler.inverse_transform(x_test_norm)
early_stopping_monitor = keras.callbacks.EarlyStopping(
monitor="val_loss", min_delta=0, patience=10, restore_best_weights=True
)
self.verifying_predictions = []
for i in range(0, self.network_count):
tf.keras.backend.clear_session()
self.model = keras.models.load_model(directory + "\\base_network.h5")
x_norm_train, x_norm_val, t_norm_train, t_norm_val = train_test_split(
x_cal_norm,
t_cal_norm,
train_size=1 / 3,
shuffle=True,
random_state=i**2,
)
new_weights = [
np.random.uniform(-0.05, 0.05, w.shape)
for w in self.model.get_weights()
]
self.model.set_weights(new_weights)
self.model.fit(
x_norm_train,
t_norm_train,
epochs=self.epochs,
validation_data=(x_norm_val, t_norm_val),
callbacks=[early_stopping_monitor],
verbose=0,
batch_size=len(t_norm_train),
)
self.verifying_predictions.append(self.targets_scaler.inverse_transform(self.model.predict(x_test_norm)))
Y_true = np.array(self.verifying_observations)
Y_pred = np.array(self.verifying_predictions)
FRC_X = self.test_x_data[:, 0]
capture_all = (
np.less_equal(Y_true, np.max(Y_pred, axis=0))
* np.greater_equal(Y_true, np.min(Y_pred, axis=0))
* 1
)
capture_90 = (
np.less_equal(Y_true, np.percentile(Y_pred, 95, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 5, axis=0))
* 1
)
capture_80 = (
np.less_equal(Y_true, np.percentile(Y_pred, 90, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 10, axis=0))
* 1
)
capture_70 = (
np.less_equal(Y_true, np.percentile(Y_pred, 85, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 15, axis=0))
* 1
)
capture_60 = (
np.less_equal(Y_true, np.percentile(Y_pred, 80, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 20, axis=0))
* 1
)
capture_50 = (
np.less_equal(Y_true, np.percentile(Y_pred, 75, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 25, axis=0))
* 1
)
capture_40 = (
np.less_equal(Y_true, np.percentile(Y_pred, 70, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 30, axis=0))
* 1
)
capture_30 = (
np.less_equal(Y_true, np.percentile(Y_pred, 65, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 35, axis=0))
* 1
)
capture_20 = (
np.less_equal(Y_true, np.percentile(Y_pred, 60, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 40, axis=0))
* 1
)
capture_10 = (
np.less_equal(Y_true, np.percentile(Y_pred, 55, axis=0))
* np.greater_equal(Y_true, np.percentile(Y_pred, 45, axis=0))
* 1
)
capture_all_20 = capture_all * np.less(Y_true, 0.2)
capture_90_20 = capture_90 * np.less(Y_true, 0.2)
capture_80_20 = capture_80 * np.less(Y_true, 0.2)
capture_70_20 = capture_70 * np.less(Y_true, 0.2)
capture_60_20 = capture_60 * np.less(Y_true, 0.2)
capture_50_20 = capture_50 * np.less(Y_true, 0.2)
capture_40_20 = capture_40 * np.less(Y_true, 0.2)
capture_30_20 = capture_30 * np.less(Y_true, 0.2)
capture_20_20 = capture_20 * np.less(Y_true, 0.2)
capture_10_20 = capture_10 * np.less(Y_true, 0.2)
length_20 = np.sum(np.less(Y_true, 0.2))
test_len = len(Y_true)
capture_all_sum = np.sum(capture_all)
capture_90_sum = np.sum(capture_90)
capture_80_sum = np.sum(capture_80)
capture_70_sum = np.sum(capture_70)
capture_60_sum = np.sum(capture_60)
capture_50_sum = np.sum(capture_50)
capture_40_sum = np.sum(capture_40)
capture_30_sum = np.sum(capture_30)
capture_20_sum = np.sum(capture_20)
capture_10_sum = np.sum(capture_10)
capture_all_20_sum = np.sum(capture_all_20)
capture_90_20_sum = np.sum(capture_90_20)
capture_80_20_sum = np.sum(capture_80_20)
capture_70_20_sum = np.sum(capture_70_20)
capture_60_20_sum = np.sum(capture_60_20)
capture_50_20_sum = np.sum(capture_50_20)
capture_40_20_sum = np.sum(capture_40_20)
capture_30_20_sum = np.sum(capture_30_20)
capture_20_20_sum = np.sum(capture_20_20)
capture_10_20_sum = np.sum(capture_10_20)
capture = [
capture_10_sum / test_len,
capture_20_sum / test_len,
capture_30_sum / test_len,
capture_40_sum / test_len,
capture_50_sum / test_len,
capture_60_sum / test_len,
capture_70_sum / test_len,
capture_80_sum / test_len,
capture_90_sum / test_len,
capture_all_sum / test_len,
]
capture_20 = [
capture_10_20_sum / length_20,
capture_20_20_sum / length_20,
capture_30_20_sum / length_20,
capture_40_20_sum / length_20,
capture_50_20_sum / length_20,
capture_60_20_sum / length_20,
capture_70_20_sum / length_20,
capture_80_20_sum / length_20,
capture_90_20_sum / length_20,
capture_all_20_sum / length_20,
]
self.percent_capture_cal = capture_all_sum / test_len
self.percent_capture_02_cal = capture_all_20_sum / length_20
self.CI_reliability_cal = (
(0.1 - capture_10_sum / test_len) ** 2
+ (0.2 - capture_20_sum / test_len) ** 2
+ (0.3 - capture_30_sum / test_len) ** 2
+ (0.4 - capture_40_sum / test_len) ** 2
+ (0.5 - capture_50_sum / test_len) ** 2
+ (0.6 - capture_60_sum / test_len) ** 2
+ (0.7 - capture_70_sum / test_len) ** 2
+ (0.8 - capture_80_sum / test_len) ** 2
+ (0.9 - capture_90_sum / test_len) ** 2
+ (1 - capture_all_sum / test_len) ** 2
)
self.CI_reliability_02_cal = (
(0.1 - capture_10_20_sum / length_20) ** 2
+ (0.2 - capture_20_20_sum / length_20) ** 2
+ (0.3 - capture_30_20_sum / length_20) ** 2
+ (0.4 - capture_40_20_sum / length_20) ** 2
+ (0.5 - capture_50_20_sum / length_20) ** 2
+ (0.6 - capture_60_20_sum / length_20) ** 2
+ (0.7 - capture_70_20_sum / length_20) ** 2
+ (0.8 - capture_80_20_sum / length_20) ** 2
+ (0.9 - capture_90_20_sum / length_20) ** 2
+ (1 - capture_all_20_sum / length_20) ** 2
)
# Rank Histogram
rank = []
for a in range(0, len(Y_true)):
n_lower = np.sum(np.greater(Y_true[a], Y_pred[:, a]))
n_equal = np.sum(np.equal(Y_true[a], Y_pred[:, a]))
deviate_rank = np.random.random_integers(0, n_equal)
rank = np.append(rank, n_lower + deviate_rank)
rank_hist = np.histogram(rank, bins=self.network_count + 1)
delta = np.sum((rank_hist[0] - (test_len / ((self.network_count + 1)))) ** 2)
delta_0 = self.network_count * test_len / (self.network_count + 1)
self.delta_score_cal = delta / delta_0
CI_x = [0.10, 0.20, 0.30, 0.40, 0.50, 0.60, 0.70, 0.80, 0.90, 1.00]
fig = plt.figure(figsize=(15, 10), dpi=100)
gridspec.GridSpec(2, 3)
plt.subplot2grid((2, 3), (0, 0), colspan=2, rowspan=2)
plt.axhline(0.2, c="k", ls="--", label="Point-of-consumption FRC = 0.2 mg/L")
plt.scatter(
FRC_X, Y_true, edgecolors="k", facecolors="None", s=20, label="Observed"
)
plt.scatter(
FRC_X,
| np.median(Y_pred, axis=0) | numpy.median |
import numpy as np
import pydart2 as pydart
import math
from fltk import *
from PyCommon.modules.GUI import hpSimpleViewer as hsv
from PyCommon.modules.Renderer import ysRenderer as yr
from Examples.speed_skating.make_skate_keyframe import make_keyframe
render_vector = []
render_vector_origin = []
push_force = []
push_force_origin = []
blade_force = []
blade_force_origin = []
rd_footCenter = []
ik_on = True
# ik_on = False
class MyWorld(pydart.World):
def __init__(self, ):
super(MyWorld, self).__init__(1.0 / 1200.0, '../data/skel/cart_pole_blade_3dof_with_ground.skel')
# pydart.World.__init__(self, 1.0 / 1000.0, '../data/skel/cart_pole_blade_3dof_with_ground.skel')
# pydart.World.__init__(self, 1.0 / 1000.0, '../data/skel/cart_pole_blade.skel')
# pydart.World.__init__(self, 1.0 / 2000.0, '../data/skel/cart_pole.skel')
self.force = None
self.force_r = None
self.force_l = None
self.force_hip_r = None
self.duration = 0
self.skeletons[0].body('ground').set_friction_coeff(0.02)
self.ground_height = self.skeletons[0].body(0).to_world((0., 0.025, 0.))[1]
skel = self.skeletons[2]
root_state = make_keyframe(skel)
state = root_state
self.state_list = []
for i in range(10):
self.state_list.append(state)
state = state.get_next()
state_num = len(self.state_list)
self.state_num = state_num
# print("state_num: ", state_num)
self.curr_state = self.state_list[0]
self.elapsedTime = 0.0
self.curr_state_index = 0
# print("backup angle: ", backup_q)
# print("cur angle: ", self.curr_state.angles)
if ik_on:
revise_pose(self.skeletons[3], self.state_list[0])
print('IK ON')
self.skeletons[3].set_positions(self.curr_state.angles)
self.rd_contact_forces = []
self.rd_contact_positions = []
# print("dof: ", skel.ndofs)
# nonholonomic constraint initial setting
_th = 5. * math.pi / 180.
self.nh0 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_right'), np.array((0.0216+0.104, -0.0216-0.027, 0.)))
self.nh1 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_right'), np.array((0.0216-0.104, -0.0216-0.027, 0.)))
self.nh2 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_left'), np.array((0.0216+0.104, -0.0216-0.027, 0.)))
self.nh3 = pydart.constraints.NonHolonomicContactConstraint(skel.body('h_blade_left'), np.array((0.0216-0.104, -0.0216-0.027, 0.)))
self.nh1.set_violation_angle_ignore_threshold(_th)
self.nh1.set_length_for_violation_ignore(0.208)
self.nh3.set_violation_angle_ignore_threshold(_th)
self.nh3.set_length_for_violation_ignore(0.208)
self.nh0.add_to_world()
self.nh1.add_to_world()
self.nh2.add_to_world()
self.nh3.add_to_world()
self.step_count = 0
def step(self):
# print("self.curr_state: ", self.curr_state.name)
# if self.curr_state.name == "state1":
# self.force = np.array([50.0, 0.0, 0.0])
# # elif self.curr_state.name == "state2":
# # self.force = np.array([0.0, 0.0, -10.0])
# else:
# self.force = None
if self.force is not None:
self.skeletons[2].body('h_pelvis').add_ext_force(self.force)
self.skeletons[3].set_positions(self.curr_state.angles)
if self.curr_state.dt < self.time() - self.elapsedTime:
# print("change the state!!!", self.curr_state_index)
self.curr_state_index = self.curr_state_index + 1
self.curr_state_index = self.curr_state_index % self.state_num
self.elapsedTime = self.time()
self.curr_state = self.state_list[self.curr_state_index]
# print("state_", self.curr_state_index)
# print(self.curr_state.angles)
# ground_height = -0.98 + 0.0251
# ground_height = 0.
ground_height = self.skeletons[0].body("ground").to_world()[1] #0.0251
# print("ground h: ", ground_height)
# ground_height = -0.98 + 0.05
# ground_height = -0.98
# if ik_on:
# ik_res = copy.deepcopy(self.curr_state.angles)
# if self.curr_state.name != "state02":
# # print("ik solve!!-------------------")
# self.ik.update_target(ground_height)
# ik_res[6:] = self.ik.solve()
# # self.curr_state.angles = ik_res
# self.skeletons[3].set_positions(ik_res)
# HP QP solve
lf_tangent_vec = np.array([1.0, 0.0, .0])
rf_tangent_vec = | np.array([1.0, 0.0, .0]) | numpy.array |
import scipy.ndimage as scnd
import scipy.optimize as sio
import numpy as np
import numba
import warnings
import stemtool as st
@numba.jit
def fit_nbed_disks(corr_image, disk_size, positions, diff_spots):
warnings.filterwarnings("ignore")
positions = np.asarray(positions, dtype=np.float64)
diff_spots = np.asarray(diff_spots, dtype=np.float64)
fitted_disk_list = np.zeros_like(positions)
disk_locations = np.zeros_like(positions)
for ii in range(int(np.shape(positions)[0])):
posx = positions[ii, 0]
posy = positions[ii, 1]
par = st.util.fit_gaussian2D_mask(corr_image, posx, posy, disk_size)
fitted_disk_list[ii, 0] = par[0]
fitted_disk_list[ii, 1] = par[1]
disk_locations = np.copy(fitted_disk_list)
disk_locations[:, 1] = 0 - disk_locations[:, 1]
center = disk_locations[
np.logical_and((diff_spots[:, 0] == 0), (diff_spots[:, 1] == 0)), :
]
cx = center[0, 0]
cy = center[0, 1]
disk_locations[:, 0:2] = disk_locations[:, 0:2] - np.asarray(
(cx, cy), dtype=np.float64
)
lcbed, _, _, _ = np.linalg.lstsq(diff_spots, disk_locations, rcond=None)
cy = (-1) * cy
return fitted_disk_list, np.asarray((cx, cy), dtype=np.float64), lcbed
def sobel_filter(image, med_filter=50):
ls_image, _ = st.util.sobel(st.util.image_logarizer(image))
ls_image[ls_image > (med_filter * np.median(ls_image))] = med_filter * np.median(
ls_image
)
ls_image[ls_image < (np.median(ls_image) / med_filter)] = (
np.median(ls_image) / med_filter
)
return ls_image
@numba.jit
def strain_and_disk(data4D, disk_size, pixel_list_xy, disk_list, ROI=1, med_factor=50):
warnings.filterwarnings("ignore")
if np.size(ROI) < 2:
ROI = np.ones((data4D.shape[2], data4D.shape[3]), dtype=bool)
# Calculate needed values
scan_size = np.asarray(data4D.shape)[2:4]
sy, sx = np.mgrid[0 : scan_size[0], 0 : scan_size[1]]
scan_positions = (np.asarray((np.ravel(sy), np.ravel(sx)))).astype(int)
cbed_size = np.asarray(data4D.shape)[0:2]
yy, xx = np.mgrid[0 : cbed_size[0], 0 : cbed_size[1]]
center_disk = (
st.util.make_circle(cbed_size, cbed_size[1] / 2, cbed_size[0] / 2, disk_size)
).astype(np.float64)
i_matrix = (np.eye(2)).astype(np.float64)
sobel_center_disk, _ = st.util.sobel(center_disk)
# Initialize matrices
e_xx = np.zeros(scan_size, dtype=np.float64)
e_xy = np.zeros(scan_size, dtype=np.float64)
e_th = np.zeros(scan_size, dtype=np.float64)
e_yy = np.zeros(scan_size, dtype=np.float64)
disk_x = np.zeros(scan_size, dtype=np.float64)
disk_y = np.zeros(scan_size, dtype=np.float64)
COM_x = np.zeros(scan_size, dtype=np.float64)
COM_y = np.zeros(scan_size, dtype=np.float64)
# Calculate for mean CBED if no reference
mean_cbed = np.mean(data4D, axis=(-1, -2), dtype=np.float64)
mean_ls_cbed, _ = st.util.sobel(st.util.image_logarizer(mean_cbed))
mean_ls_cbed[
mean_ls_cbed > med_factor * | np.median(mean_ls_cbed) | numpy.median |
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" General Utilities
"""
from operator import itemgetter
from typing import Any, Generator, KeysView, List, Set, Tuple, TYPE_CHECKING
import numpy
from fqe.bitstring import lexicographic_bitstring_generator
from fqe.bitstring import check_conserved_bits, count_bits
if TYPE_CHECKING:
#Avoid circular imports and only import for type-checking
from fqe import wavefunction
def alpha_beta_electrons(nele: int, m_s: int) -> Tuple[int, int]:
"""Given the total number of electrons and the z-spin quantum number, return
the number of alpha and beta electrons in the system.
Args:
nele (int) - number of electrons
m_s (int) - spin angular momentum on the z-axis
Return:
number of alpha electrons (int), number of beta electrons (int)
"""
if nele < 0:
raise ValueError('Cannot have negative electrons')
if nele < abs(m_s):
raise ValueError('Spin quantum number exceeds physical limits')
nalpha = int(nele + m_s) // 2
nbeta = nele - nalpha
return nalpha, nbeta
def reverse_bubble_list(arr: List[Any]) -> int:
"""Bubble Sort algorithm to arrange a list so that the lowest value is
stored in 0 and the highest value is stored in len(arr)-1. It is included
here in order to access the swap count.
Args:
arr (list) - object to be sorted
Returns:
arr (list) - sorted
swap_count (int) - number of permutations to achieve the sort
"""
larr = len(arr)
swap_count = 0
for i in range(larr):
swapped = False
for j in range(0, larr - i - 1):
if arr[j][0] < arr[j + 1][0]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
swapped = True
swap_count += 1
if not swapped:
break
return swap_count
def bubblesort(arr: List[Any]) -> int:
"""Bubble Sort algorithm to arrange a list so that the lowest value is
stored in 0 and the highest value is stored in len(arr)-1. It is included
here in order to access the swap count.
Args:
arr (list) - object to be sorted
Returns:
arr (list) - sorted
swap_count (int) - number of permutations to achieve the sort
"""
larr = len(arr)
swap_count = 0
for i in range(larr):
swapped = False
for j in range(0, larr - i - 1):
if arr[j] > arr[j + 1]:
arr[j], arr[j + 1] = arr[j + 1], arr[j]
swapped = True
swap_count += 1
if not swapped:
break
return swap_count
def configuration_key_union(*argv: KeysView[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""Given a list of configuration keys, build a list which is the union of
all configuration keys in the list
Args:
*args (list[(int, int)]) - any number of configuration key lists to be joined
Returns:
list[(int, int)] - a list of unique configuration keys found among all
the passed arguments
"""
keyunion: Set[Tuple[int, int]] = set()
for configs in argv:
keyunion.update(configs)
return list(keyunion)
def configuration_key_intersection(*argv: List[Tuple[int, int]]
) -> List[Tuple[int, int]]:
"""Return the intersection of the passed configuration key lists.
Args:
*args (list[(int, int)]) - any number of configuration key lists to be joined
Returns:
list [(int, int)] - a list of configuration keys found in every
configuration passed.
"""
keyinter = argv[0]
ref = []
for config in argv[1:]:
for key in config:
if key in keyinter:
ref.append(key)
keyinter = ref
return keyinter
def init_bitstring_groundstate(occ_num: int) -> int:
"""Occupy the n lowest orbitals of a state in the bitstring representation
Args:
occ_num (integer) - number of orbitals to occupy
Returns:
(integer) - bitstring representation of the ground state
"""
return (1 << occ_num) - 1
def init_qubit_vacuum(nqubits: int) -> numpy.ndarray:
"""Build the ground state wavefunction for an nqubit system.
Args:
nqubits (integer) - The number of qubits in the qpu
Returns:
numpy.array(dtype=numpy.complex64)
"""
ground_state = numpy.zeros(2**nqubits, dtype=numpy.complex128)
ground_state[0] = 1.0 + 0.0j
return ground_state
def ltlt_index_generator(dim: int
) -> Generator[Tuple[int, int, int, int], None, None]:
"""Generate index sets into a lower triangle, lower triangle matrix
Args:
dim (int) - the dimension of the array
Returns:
(int, int, int, int) - unique pointers into the compressed matrix
"""
lim = dim
for i in range(lim):
for j in range(i + 1):
for k in range(i + 1):
if k == i:
_ull = j + 1
else:
_ull = k + 1
for lst in range(_ull):
yield i, j, k, lst
def invert_bitstring_with_mask(string: int, masklen: int) -> int:
"""Invert a bitstring with a mask.
Args:
string (bitstring) - the bitstring to invert
masklen (int) - the value to mask the inverted bitstring to
Returns:
(bitstring) - a bitstring inverted up to the masking length
"""
mask = (1 << masklen) - 1
return ~string & mask
def paritysort_int(arr: List[int]) -> Tuple[int, List[int]]:
"""Move all even numbers to the left and all odd numbers to the right
Args:
arr list[int] - a list of integers to be sorted
Returns:
arr [list] - mutated in place
swap_count (int) - number of exchanges needed to complete the sorting
"""
larr = len(arr)
parr = [[i % 2, i] for i in arr]
swap_count = 0
for i in range(larr):
swapped = False
for j in range(0, larr - i - 1):
if parr[j][0] > parr[j + 1][0]:
parr[j], parr[j + 1] = parr[j + 1], parr[j]
swapped = True
swap_count += 1
if not swapped:
break
for indx, val in enumerate(parr):
arr[indx] = val[1]
return swap_count, arr
def paritysort_list(arr):
"""Move all even numbers to the left and all odd numbers to the right
Args:
arr list[int] - a list of integers to be sorted
Returns:
arr [list] - mutated in place
swap_count (int) - number of exchanges needed to complete the sorting
"""
larr = len(arr)
parr = [[i[0] % 2, i] for i in arr]
swap_count = 0
for i in range(larr):
swapped = False
for j in range(0, larr - i - 1):
if parr[j][0] > parr[j + 1][0]:
parr[j], parr[j + 1] = parr[j + 1], parr[j]
swapped = True
swap_count += 1
if not swapped:
break
for indx, val in enumerate(parr):
arr[indx] = list(val[1])
return swap_count, arr
def qubit_particle_number_sector(nqubits: int,
pnum: int) -> List[numpy.ndarray]:
"""Generate the basis vectors into the qubit basis representing all states
which have a definite particle number.
Args:
nqubits (int) - the number of qubits in the qpu
pnum (int) - the number of particles to build vectors into
Returns:
list[numpy.array(dtype=numpy.complex64)]
"""
occ = numpy.array([0, 1], dtype=numpy.int)
uno = | numpy.array([1, 0], dtype=numpy.int) | numpy.array |
"""Mapping functions that get values on a prescribed Cartesian coordinates grids from GTS output data files which are in flux coordinates.
"""
import Map_Mod_C as mmc
import numpy as np
from sdp.geometry import grid
import scipy.io.netcdf as nc
from scipy.interpolate import NearestNDInterpolator
from time import clock
class GTS_loader_Error(Exception):
"""Exception class for handling GTS loading errors
"""
def __init__(self,value):
self.value = value
def __str__(self):
return repr(self.value)
class GTS_Loader:
"""GTS Loading class
For each GTS run case, setup all the loading parameters, read out necessary data, and output to suited format.
"""
def __init__(self, grid, t0,dt,nt, fluc_file_path,eq_fname,prof_fname,gts_file_path, n_cross_section = 1, phi_fname_head = 'PHI.', den_fname_head = 'DEN.', n_boundary = 1001, amplification = 1):
"""Initialize Loading Parameters:
grid: sdp.geometry.Grid.Cartesian2D or Cartesian3D object, contains the output grid information.
t0: int; Starting time of the sampling series, in simulation record step counts.
dt: int; The interval between two sample points, in unit of simulation record step counts.
nt: int; The total number of time_steps.
n_cross_section: int; total cross-sections used for enlarging the ensemble
n_boundary: int; The total number of grid points resolving the plasma last closed flux surface. Normally not important.
fluc_file_path: string; directory where to store the output fluctuation files
eq_fname: string; filename of the equalibrium file, either absolute or relative path.
phi_fname_head: string; The header letters of the phi record file before the toroidal plane number, usually "PHI."
den_fname_head: string; The header letters of the density record file before the toroidal plane number, usually "DEN."
gts_file_path: string; the directory where the PHI data files are stored.
"""
self.grid = grid
if(isinstance(grid, grid.Cartesian2D)):
self.dimension = 2
self.xmin,self.xmax,self.nx = grid.Rmin,grid.Rmax,grid.NR
self.ymin,self.ymax,self.ny = grid.Zmin,grid.Zmax,grid.NZ
self.zmin,self.zmax,self.nz = 0,0,1
elif(isinstance(grid, grid.Cartesian3D)):
self.dimension = 3
self.xmin,self.xmax,self.nx = grid.Xmin,grid.Xmax,grid.NX
self.ymin,self.ymax,self.ny = grid.Ymin,grid.Ymax,grid.NY
self.zmin,self.zmax,self.nz = grid.Zmin,grid.Zmax,grid.NZ
else:
raise GTS_loader_Error('grid not valid. Right now GTS loader only support Cartesian2D or Cartesian3D grid.')
self.t0,self.dt,self.nt = t0,dt,nt
self.time_steps = self.t0 + np.arange(self.nt) *self.dt
self.n_cross_section = n_cross_section
self.fluc_file_path = fluc_file_path
self.eq_fname = eq_fname
self.prof_fname = prof_fname
self.phi_fname_head = phi_fname_head
self.den_fname_head = den_fname_head
self.gts_file_path = gts_file_path
self.n_boundary = n_boundary
self.amplification = 1
mmc.set_para_(Xmin=self.xmin,Xmax=self.xmax,NX=self.nx,
Ymin=self.ymin,Ymax=self.ymax,NY=self.ny,
Zmin=self.zmin,Zmax=self.zmax,NZ=self.nz,
NBOUNDARY=self.n_boundary,
TStart=self.t0,TStep=self.dt,NT=self.nt,
Fluc_Amplification=self.amplification,
FlucFilePath=self.fluc_file_path,
EqFileName=self.eq_fname,
NTFileName=self.prof_fname,
PHIFileNameStart=self.phi_fname_head,
DENFileNameStart = self.den_fname_head,
GTSDataDir=self.gts_file_path)
mmc.show_para_()
self.get_fluctuations_from_GTS()
if (self.dimension == 3):
self.dne_on_grid = self.ne0_on_grid[np.newaxis,np.newaxis,:,:,:] * (self.dne_ad_on_grid + self.nane_on_grid)
self.B_2d = self.Btol_2d
elif (self.dimension == 2):
self.ne_on_grid = self.ne0_on_grid * (1 + self.dne_ad_on_grid + self.nane_on_grid)
self.B_on_grid = self.Bt_on_grid
def show_para(self):
mmc.show_para_()
def get_fluctuations_from_GTS(self):
"""load fluctuations on grid using C_function
Create variables:
equilibrium quantities:
ne0_on_grid: double ndarray (1,ny,nx), equilibrium electron density.
Te0_on_grid: double ndarray (1,ny,nx), equilibrium electron temperature.
Bt_on_grid,Bp_on_grid,BR_on_grid,BZ_on_grid: double ndarray (1,ny,nx), equilibrium toroidal and poloidal magnetic field, and R,Z component of Bpol.
fluctuations:
dne_ad_on_grid: double ndarray (nt,nz,ny,nx), adiabatic electron density, calculated from fluctuating potential phi: dne_ad_on_grid/ne0_on_grid = e*phi/Te0_on_grid
nane_on_grid : double ndarray (nt,nz,ny,nx), non-adiabatic electron density normalized to local equilibrium density, read from file.
nate_on_grid : double ndarray (nt,nz,ny,nx), non-adiabatic electron temperature normalized to equilibrium temperature at a reference radius, read from file.
"""
t0 = clock()
if(self.dimension == 3):
x1d = self.grid.X1D
y1d = self.grid.Y1D
self.x2d = np.zeros((1,self.ny,self.nx))+ x1d[np.newaxis,np.newaxis,:]
self.y2d = np.zeros((1,self.ny,self.nx))+ y1d[np.newaxis,:,np.newaxis]
z2d = np.zeros((1,self.ny,self.nx))
x3d = self.grid.X3D
y3d = self.grid.Y3D
z3d = self.grid.Z3D
self.dne_ad_on_grid = np.zeros((self.n_cross_section,self.nt,self.nz,self.ny,self.nx))
self.nane_on_grid = np.zeros((self.n_cross_section,self.nt,self.nz,self.ny,self.nx))
self.nate_on_grid = np.zeros_like(self.nane_on_grid)
#Note that new equilibrium loading convention needs only 2D equilibrium data.
self.ne0_2d = np.zeros((1,self.ny,self.nx))
self.Te0_2d = np.zeros((1,self.ny,self.nx))
self.Btol_2d = np.zeros((1,self.ny,self.nx))
self.Bp_2d = np.zeros((1,self.ny,self.nx))
self.BR_2d = np.zeros((1,self.ny,self.nx))
self.BZ_2d = np.zeros((1,self.ny,self.nx))
mismatched_eq = np.zeros_like(self.x2d,dtype = 'int32')
fluc_2d = np.zeros((self.nt,1,self.ny,self.nx))
mmc.set_para_(Xmin=self.xmin,Xmax=self.xmax,NX=self.nx,
Ymin=self.ymin,Ymax=self.ymax,NY=self.ny,
Zmin=0,Zmax=0,NZ=1,
NBOUNDARY=self.n_boundary,
TStart=1,TStep=1,NT=1,
Fluc_Amplification=self.amplification,
FlucFilePath=self.fluc_file_path,
EqFileName=self.eq_fname,
NTFileName=self.prof_fname,
PHIFileNameStart=self.phi_fname_head,
DENFileNameStart = self.den_fname_head,
GTSDataDir=self.gts_file_path)
#one seperate 2D run to get all the equilibrium quantities
mmc.get_GTS_profiles_(self.x2d,self.y2d,z2d,self.ne0_2d,self.Te0_2d,self.Btol_2d,self.Bp_2d, self.BR_2d, self.BZ_2d, fluc_2d,fluc_2d,fluc_2d,mismatched_eq,0)
self._fill_mismatched_eq(mismatched_eq)
#calculate B_toroidal based on B_total and B_poloidal
self.BPhi_2d = np.sqrt(self.Btol_2d**2 - self.Bp_2d**2)
mmc.set_para_(Xmin=self.xmin,Xmax=self.xmax,NX=self.nx,
Ymin=self.ymin,Ymax=self.ymax,NY=self.ny,
Zmin=self.zmin,Zmax=self.zmax,NZ=self.nz,
NBOUNDARY=self.n_boundary,
TStart=self.t0,TStep=self.dt,NT=self.nt,
Fluc_Amplification=self.amplification,
FlucFilePath=self.fluc_file_path,
EqFileName=self.eq_fname,
NTFileName=self.prof_fname,
PHIFileNameStart=self.phi_fname_head,
DENFileNameStart = self.den_fname_head,
GTSDataDir=self.gts_file_path)
#temporary arrays to hold 3D equilibrium quantities.
self.ne0_on_grid = np.zeros_like(x3d)
self.Te0_on_grid = np.zeros_like(x3d)
self.Btol_on_grid = np.zeros_like(x3d)
self.Bp_on_grid = np.zeros_like(x3d)
self.BR_on_grid = np.zeros_like(x3d)
self.BZ_on_grid = np.zeros_like(x3d)
self.mismatch = np.zeros_like(x3d,dtype = 'int32')
self.total_cross_section = mmc.get_GTS_profiles_(x3d,y3d,z3d,self.ne0_on_grid,self.Te0_on_grid,self.Btol_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[0,...],self.nane_on_grid[0,...],self.nate_on_grid[0,...],self.mismatch, 0)
dcross = int(np.floor(self.total_cross_section / self.n_cross_section))
self.center_cross_sections = np.arange(self.n_cross_section) * dcross
for i in range(1,len(self.center_cross_sections)):
mmc.get_GTS_profiles_(x3d,y3d,z3d,self.ne0_on_grid,self.Te0_on_grid,self.Btol_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[i,...],self.nane_on_grid[i,...],self.nate_on_grid[i,...],self.mismatch,self.center_cross_sections[i])
self._fill_mismatched(self.mismatch)
elif(self.dimension == 2):
x1d = self.grid.R1D
y1d = self.grid.Z1D
x2d = np.zeros((1,self.ny,self.nx))+ x1d[np.newaxis,np.newaxis,:]
y2d = np.zeros((1,self.ny,self.nx))+ y1d[np.newaxis,:,np.newaxis]
z2d = np.zeros((1,self.ny,self.nx))
self.dne_ad_on_grid = np.zeros((self.n_cross_section,self.nt,1,self.ny,self.nx))
self.nane_on_grid = np.zeros((self.n_cross_section,self.nt,1,self.ny,self.nx))
self.nate_on_grid = np.zeros_like(self.nane_on_grid)
#Note that new equilibrium loading convention needs only 2D equilibrium data.
self.ne0_on_grid = np.zeros((1,self.ny,self.nx))
self.Te0_on_grid = np.zeros((1,self.ny,self.nx))
self.Bt_on_grid = np.zeros((1,self.ny,self.nx))
self.Bp_on_grid = np.zeros((1,self.ny,self.nx))
self.BR_on_grid = np.zeros((1,self.ny,self.nx))
self.BZ_on_grid = np.zeros((1,self.ny,self.nx))
self.mismatch = np.zeros_like(self.ne0_on_grid,dtype = 'int32')
self.total_cross_section = mmc.get_GTS_profiles_(x2d,y2d,z2d,self.ne0_on_grid,self.Te0_on_grid,self.Bt_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[0,...],self.nane_on_grid[0,...],self.nate_on_grid[0,...],self.mismatch, 0)
dcross = int(np.floor(self.total_cross_section / self.n_cross_section))
self.center_cross_sections = np.arange(self.n_cross_section) * dcross
for i in range(1,len(self.center_cross_sections)):
mmc.get_GTS_profiles_(x2d,y2d,z2d,self.ne0_on_grid,self.Te0_on_grid,self.Bt_on_grid,self.Bp_on_grid,self.BR_on_grid,self.BZ_on_grid, self.dne_ad_on_grid[i,...],self.nane_on_grid[i,...],self.nate_on_grid[i,...],self.mismatch,self.center_cross_sections[i])
t1 = clock()
self._fill_mismatched(self.mismatch)
t2 = clock()
print('Time used for interpolating mismatched points: {0}\nTotal time used:{1}'.format(t2-t1,t2-t0))
def _fill_mismatched(self,mismatch):
"""interpolate upon correctly matched values, to get values on mismatched points
"""
print('Start correcting mismatched points.')
correct_idx = (mismatch == 0)
mismatch_idx = (mismatch == 1)
if self.dimension == 3:
x_correct = self.grid.X3D[correct_idx]
y_correct = self.grid.Y3D[correct_idx]
z_correct = self.grid.Z3D[correct_idx]
xwant = self.grid.X3D[mismatch_idx]
ywant = self.grid.Y3D[mismatch_idx]
zwant = self.grid.Z3D[mismatch_idx]
points = np.array([z_correct,y_correct,x_correct]).T
points_want = np.array([zwant,ywant,xwant]).T
self.ne0_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.ne0_on_grid[correct_idx])(points_want)
self.Te0_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.Te0_on_grid[correct_idx])(points_want)
self.Btol_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.Btol_on_grid[correct_idx])(points_want)
self.Bp_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.Bp_on_grid[correct_idx])(points_want)
self.BR_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.BR_on_grid[correct_idx])(points_want)
self.BZ_on_grid[mismatch_idx] = NearestNDInterpolator(points,self.BZ_on_grid[correct_idx])(points_want)
for i in range(self.n_cross_section):
for j in range(self.nt):
self.dne_ad_on_grid[i,j][mismatch_idx] = NearestNDInterpolator(points,self.dne_ad_on_grid[i,j][correct_idx])(points_want)
self.nane_on_grid[i,j][mismatch_idx] = NearestNDInterpolator(points,self.nane_on_grid[i,j][correct_idx])(points_want)
self.nate_on_grid[i,j][mismatch_idx] = NearestNDInterpolator(points,self.nate_on_grid[i,j][correct_idx])(points_want)
print('Cross-section {0} finished.'.format(i))
else:
r_correct = self.grid.R2D[correct_idx[0,:,:]]
z_correct = self.grid.Z2D[correct_idx[0,:,:]]
rwant = self.grid.R2D[mismatch_idx[0,:,:]]
zwant = self.grid.Z2D[mismatch_idx[0,:,:]]
points = np.array([z_correct,r_correct]).T
points_want = | np.array([zwant,rwant]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 29 18:33:36 2021
@author: peter
"""
from pathlib import Path
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from vsd_cancer.functions import stats_functions as statsf
import f.plotting_functions as pf
import matplotlib.cm
import matplotlib.gridspec as gridspec
import matplotlib as mpl
import scipy.ndimage as ndimage
def make_figures(initial_df, save_dir, figure_dir, filetype=".png", redo_stats=False):
figsave = Path(figure_dir, "ttx_figure")
if not figsave.is_dir():
figsave.mkdir()
plot_TTX_pre_post(save_dir, figsave, filetype, redo_stats)
plot_TTX_washout(save_dir, figsave, filetype, redo_stats)
plot_pre_post_ttx_traces(initial_df, save_dir, figsave, filetype)
def plot_pre_post_ttx_traces(initial_df, save_dir, figsave, filetype):
def get_most_active_traces(num_traces, df, trial_save, trial_string):
tcs = np.load(Path(trial_save, f"{trial_string}_all_tcs.npy"))
event_dict = np.load(
Path(trial_save, f"{trial_string}_event_properties.npy"), allow_pickle=True
).item()
idx = 0
events = event_dict["events"][idx]
keep = [x for x in np.arange(tcs.shape[0])]
# sort by event amounts
sort_order = np.array(
[
np.sum(np.abs(events["event_props"][x][:, -1]))
if x in events.keys()
else 0
for x in range(tcs.shape[0])
]
)
tcs = tcs[keep, :]
sort_order = np.argsort(sort_order[keep])[::-1]
tcs = tcs[sort_order, :]
so = np.array(keep)[sort_order]
tcs = ndimage.gaussian_filter(tcs[:num_traces, ...], (0, 3))
so = so[:num_traces]
return tcs, so
df = pd.read_csv(initial_df)
ncells = 10
T = 0.2
trial_strings = [
"cancer_20201216_slip1_area2_long_acq_long_acq_blue_0.0296_green_0.0765_heated_to_37_1",
"cancer_20201216_slip1_area3_long_acq_long_acq_blue_0.0296_green_0.0765_heated_to_37_with_TTX_1",
]
tcs = []
for t in trial_strings:
print(df[df.trial_string == t].stage)
tcs.append(
get_most_active_traces(ncells, df, Path(save_dir, "ratio_stacks", t), t)[0]
)
fig, ax = plt.subplots(ncols=2)
ax[0].plot(np.arange(tcs[0].shape[1]) * T, tcs[0].T + np.arange(ncells) / 20, "k")
ax[1].sharey(ax[0])
ax[1].plot(np.arange(tcs[1].shape[1]) * T, tcs[1].T + np.arange(ncells) / 20, "k")
pf.plot_scalebar(ax[0], 0, 0.95, 100, 0.02)
ax[0].axis("off")
ax[1].axis("off")
fig.savefig(
Path(figsave, "example_traces", f"example_traces{filetype}"),
bbox_inches="tight",
dpi=300,
transparent=True,
)
def plot_TTX_pre_post(save_dir, figsave, filetype, redo_stats):
df = pd.read_csv(Path(save_dir, "all_events_df.csv"))
df["exp_stage"] = df.expt + "_" + df.stage
use = [
x
for x in np.unique(df["exp_stage"])
if "TTX" in x and "washout_washout" not in x
]
ttx = [1, 10]
log = [True, False]
only_neg = [True, False]
histtype = ["bar", "step"]
ttx = [1, 10]
log = [True]
only_neg = [False]
histtype = ["bar"]
for t in ttx:
for l in log:
for n in only_neg:
for h in histtype:
fig = plot_events_TTX(
df, use, TTX_level=t, log=l, only_neg=n, histtype=h
)
fig.savefig(
Path(
figsave,
"pre_post",
str(t),
f"TTX_{t}um_histograms_{h}_log_{l}_onlyneg_{n}{filetype}",
),
bbox_inches="tight",
dpi=300,
transparent=True,
)
df2 = pd.read_csv(Path(save_dir, "TTX_active_df_by_cell.csv"))
T = 0.2
df2["exp_stage"] = df2.expt + "_" + df2.stage
df2["day_slip"] = df2.day.astype(str) + "_" + df2.slip.astype(str)
df2["neg_event_rate"] = (df2["n_neg_events"]) / (df2["obs_length"] * T)
df2["neg_integ_rate"] = (
-1 * (df2["neg_integrated_events"]) / (df2["obs_length"] * T)
)
use2 = [x for x in np.unique(df2["exp_stage"]) if "washout" not in x]
plot_TTX_summary(
df2,
use2,
figsave,
filetype,
redo_stats=redo_stats,
key="neg_event_rate",
function=np.mean,
function_name="np.mean",
scale=3,
density=True,
)
plot_TTX_summary(
df2,
use2,
figsave,
filetype,
redo_stats=False,
key="neg_event_rate",
function=np.mean,
function_name="np.mean",
scale=3,
density=False,
)
# plot_TTX_summary(df2,use2,figsave,filetype,redo_stats = redo_stats,key = 'neg_integ_rate', function = np.mean,function_name = 'np.mean',scale = 3, density = True)
# plot_TTX_summary(df2,use2,figsave,filetype,redo_stats = False,key = 'neg_integ_rate', function = np.mean,function_name = 'np.mean',scale = 3, density = False)
def plot_TTX_washout(save_dir, figsave, filetype, redo_stats):
df = pd.read_csv(Path(save_dir, "all_events_df.csv"))
df["exp_stage"] = df.expt + "_" + df.stage
use = [x for x in np.unique(df["exp_stage"]) if "TTX" in x and "washout" in x]
log = [True, False]
only_neg = [True, False]
histtype = ["bar", "step"]
log = [True]
only_neg = [False]
histtype = ["bar"]
for l in log:
for n in only_neg:
for h in histtype:
fig = plot_events_TTX_washout(df, use, log=l, only_neg=n, histtype=h)
fig.savefig(
Path(
figsave,
"washout",
f"TTX_washout_histograms_{h}_log_{l}_onlyneg_{n}{filetype}",
),
bbox_inches="tight",
dpi=300,
transparent=True,
)
# now plot the mean and bootstrapped cis
df2 = pd.read_csv(Path(save_dir, "TTX_active_df_by_cell.csv"))
T = 0.2
df2["exp_stage"] = df2.expt + "_" + df2.stage
df2["neg_event_rate"] = (df2["n_neg_events"]) / (df2["obs_length"] * T)
df2["day_slip"] = df2.day.astype(str) + "_" + df2.slip.astype(str)
df2["neg_integ_rate"] = (
-1 * (df2["neg_integrated_events"]) / (df2["obs_length"] * T)
)
use2 = [x for x in np.unique(df2["exp_stage"]) if "washout" in x]
plot_washout_summary(
df2,
use2,
figsave,
filetype,
redo_stats=redo_stats,
key="neg_event_rate",
function=np.mean,
function_name="np.mean",
scale=3,
density=True,
)
plot_washout_summary(
df2,
use2,
figsave,
filetype,
redo_stats=False,
key="neg_event_rate",
function=np.mean,
function_name="np.mean",
scale=3,
density=False,
)
# plot_washout_summary(df2,use2,figsave,filetype,redo_stats = redo_stats,key = 'neg_integ_rate', function = np.mean,function_name = 'np.mean',scale = 3, density = True)
# plot_washout_summary(df2,use2,figsave,filetype,redo_stats = False,key = 'neg_integ_rate', function = np.mean,function_name = 'np.mean',scale = 3, density = False)
def plot_washout_summary(
df,
use,
figsave,
filetype,
redo_stats=True,
num_resamplings=10**6,
key="neg_event_rate",
function=np.mean,
function_name="np.mean",
scale=3,
density=True,
):
dfn = df.copy()
use_bool = np.array([np.any(x in use) for x in dfn.exp_stage])
dfn = dfn[use_bool]
pre = dfn[dfn.stage == "pre"][key].to_numpy()
post = dfn[dfn.stage == "post"][key].to_numpy()
wash = dfn[dfn.stage == "washout"][key].to_numpy()
ppre = dfn[dfn.stage == "pre"][[key, "day_slip"]]
ppost = dfn[dfn.stage == "post"][[key, "day_slip"]]
wwash = dfn[dfn.stage == "washout"][[key, "day_slip"]]
bins = np.histogram(np.concatenate((pre, post, wash)) * 10**3, bins=10)[1]
fig, axarr = plt.subplots(nrows=3)
c = 0.05
axarr[0].hist(
pre * 10**scale,
bins=bins,
log=True,
density=density,
label="pre TTX",
color=(c, c, c),
)
axarr[1].hist(
post * 10**scale,
bins=bins,
log=True,
density=density,
label="post 10 uM TTX",
color=(c, c, c),
)
axarr[2].hist(
wash * 10**scale,
bins=bins,
log=True,
density=density,
label="washout",
color=(c, c, c),
)
axarr[0].sharey(axarr[1])
axarr[2].sharey(axarr[1])
for idx, a in enumerate(axarr):
if not density:
a.set_ylim([0.6, 10**4.5])
a.set_yticks(10 ** np.arange(0, 4, 3))
a.legend(frameon=False, loc=(0.4, 0.4), fontsize=16)
pf.set_all_fontsize(a, 16)
if idx != 2:
a.set_xticklabels([])
if not density:
axarr[1].set_ylabel("Number of cells")
else:
axarr[1].set_ylabel("Proportion of cells")
if key == "neg_event_rate":
axarr[-1].set_xlabel("Negative event rate " + "(1000 cells$^{-1}$ s$^{-1}$)")
elif key == "neg_integ_rate":
axarr[-1].set_xlabel(
f"Integrated event rate per {10**scale} cells " + "(%$\cdot$s / s)"
)
else:
raise ValueError("wrong key")
fig.savefig(
Path(
figsave, "summary", f"TTX_washout_compare_density_{density}_{key}{filetype}"
),
bbox_inches="tight",
dpi=300,
transparent=True,
)
if redo_stats:
p_pre_post, _, f1 = statsf.bootstrap_test(
pre,
post,
function=function,
plot=True,
num_resamplings=num_resamplings,
names=["Pre TTX", "Post TTX"],
)
p_pre_wash, _, f2 = statsf.bootstrap_test_2sided(
wash,
pre,
function=function,
plot=True,
num_resamplings=num_resamplings,
names=["Pre TTX", "washout"],
)
p_wash_post, _, f3 = statsf.bootstrap_test(
wash,
post,
function=function,
plot=True,
num_resamplings=num_resamplings,
names=["Washout", "Post TTX"],
)
f1.savefig(
Path(
figsave, "summary", "bootstrap", f"bootstrap_pre_post_{key}{filetype}"
),
bbox_inches="tight",
dpi=300,
transparent=True,
)
f2.savefig(
Path(
figsave, "summary", "bootstrap", f"bootstrap_wash_pre_{key}{filetype}"
),
bbox_inches="tight",
dpi=300,
transparent=True,
)
f3.savefig(
Path(
figsave, "summary", "bootstrap", f"bootstrap_wash_post_{key}{filetype}"
),
bbox_inches="tight",
dpi=300,
transparent=True,
)
with open(
Path(figsave, "summary", f"statistical_test_results_washout_{key}.txt"), "w"
) as f:
f.write(f"{datetime.datetime.now()}\n")
f.write(
f"Testing significance of second less than first for function {function_name}\n"
)
f.write(f"N cells pre: {len(pre)}\n")
f.write(f"N cells post: {len(post)}\n")
f.write(f"N cells wash: {len(wash)}\n")
f.write(f'N slips pre: {len(np.unique(ppre["day_slip"]))}\n')
f.write(f'N slips post: {len(np.unique(ppost["day_slip"]))}\n')
f.write(f'N slips wash: {len(np.unique(wwash["day_slip"]))}\n')
f.write(f"Pre mean rate: {np.mean(pre)}\n")
f.write(f"Post mean rate: {np.mean(post)}\n")
f.write(f"Wash mean rate: {np.mean(wash)}\n")
f.write(f"Num resamples: {num_resamplings}\n")
f.write(f"p pre-post {p_pre_post}\n")
f.write(f"p pre-wash (2 sided) {p_pre_wash}\n")
f.write(f"p wash-post {p_wash_post}\n")
def plot_TTX_summary(
df,
use,
figsave,
filetype,
redo_stats=True,
num_resamplings=10**6,
key="neg_event_rate",
function=np.mean,
function_name="np.mean",
scale=3,
density=True,
):
dfn = df.copy()
use_bool = np.array([np.any(x in use) for x in dfn.exp_stage])
dfn = dfn[use_bool]
pre_10 = dfn[dfn.exp_stage == "TTX_10um_pre"][key].to_numpy()
post_10 = dfn[dfn.exp_stage == "TTX_10um_post"][key].to_numpy()
pre_1 = dfn[dfn.exp_stage == "TTX_1um_pre"][key].to_numpy()
post_1 = dfn[dfn.exp_stage == "TTX_1um_post"][key].to_numpy()
ppre_10 = dfn[dfn.exp_stage == "TTX_10um_pre"][[key, "day_slip"]]
ppost_10 = dfn[dfn.exp_stage == "TTX_10um_post"][[key, "day_slip"]]
ppre_1 = dfn[dfn.exp_stage == "TTX_1um_pre"][[key, "day_slip"]]
ppost_1 = dfn[dfn.exp_stage == "TTX_1um_post"][[key, "day_slip"]]
bins_10 = np.histogram(np.concatenate((pre_10, post_10)) * 10**3, bins=10)[1]
bins_1 = np.histogram(np.concatenate((pre_1, post_1)) * 10**3, bins=10)[1]
fig_10, axarr_10 = plt.subplots(nrows=2)
c = 0.05
axarr_10[0].hist(
pre_10 * 10**scale,
bins=bins_10,
log=True,
density=density,
label="pre TTX",
color=(c, c, c),
)
axarr_10[1].hist(
post_10 * 10**scale,
bins=bins_10,
log=True,
density=density,
label="post 10 uM TTX",
color=(c, c, c),
)
axarr_10[0].sharey(axarr_10[1])
for idx, a in enumerate(axarr_10):
if not density:
a.set_ylim([0.6, 10**4.5])
a.set_yticks(10 ** np.arange(0, 4, 3))
a.legend(frameon=False, loc=(0.4, 0.4), fontsize=16)
pf.set_all_fontsize(a, 16)
if idx != 1:
a.set_xticklabels([])
if not density:
axarr_10[1].set_ylabel("Number of cells")
else:
axarr_10[1].set_ylabel("Proportion of cells")
if key == "neg_event_rate":
axarr_10[-1].set_xlabel("Negative event rate " + "(1000 cells$^{-1}$ s$^{-1}$)")
elif key == "neg_integ_rate":
axarr_10[-1].set_xlabel(
f"Integrated event rate per {10**scale} cells " + "(%$\cdot$s / s)"
)
else:
raise ValueError("wrong key")
fig_10.savefig(
Path(figsave, "summary", f"TTX_10um_compare_density_{density}_{key}{filetype}"),
bbox_inches="tight",
dpi=300,
transparent=True,
)
if redo_stats:
p_pre_post_10, _, f1 = statsf.bootstrap_test(
pre_10,
post_10,
function=function,
plot=True,
num_resamplings=num_resamplings,
names=["Pre TTX", "Post 10 uM TTX"],
)
f1.savefig(
Path(figsave, "summary", "bootstrap", f"bootstrap_pre_10_{key}{filetype}"),
bbox_inches="tight",
dpi=300,
transparent=True,
)
with open(
Path(figsave, "summary", f"statistical_test_results_10uM_{key}.txt"), "w"
) as f:
f.write(f"{datetime.datetime.now()}\n")
f.write(
f"Testing significance of second less than first for function {function_name}\n"
)
f.write(f"N cells pre: {len(pre_10)}\n")
f.write(f"N cells post: {len(post_10)}\n")
f.write(f'N slips pre: {len(np.unique(ppre_10["day_slip"]))}\n')
f.write(f'N slips post: {len(np.unique(ppost_10["day_slip"]))}\n')
f.write(f"Pre mean rate: {np.mean(pre_10)}\n")
f.write(f"Post mean rate: {np.mean(post_10)}\n")
print("Hello")
f.write(f"Num resamples: {num_resamplings}\n")
f.write(f"p pre-post {p_pre_post_10}\n")
fig_1, axarr_1 = plt.subplots(nrows=2)
c = 0.05
axarr_1[0].hist(
pre_1 * 10**scale,
bins=bins_1,
log=True,
density=density,
label="pre TTX",
color=(c, c, c),
)
axarr_1[1].hist(
post_1 * 10**scale,
bins=bins_1,
log=True,
density=density,
label="post 1 uM TTX",
color=(c, c, c),
)
axarr_1[0].sharey(axarr_1[1])
for idx, a in enumerate(axarr_1):
if not density:
a.set_ylim([0.6, 10**4.5])
a.set_yticks(10 ** np.arange(0, 4, 3))
a.legend(frameon=False, loc=(0.4, 0.4), fontsize=16)
pf.set_all_fontsize(a, 16)
if idx != 1:
a.set_xticklabels([])
if not density:
axarr_1[1].set_ylabel("Number of cells")
else:
axarr_1[1].set_ylabel("Proportion of cells")
if key == "neg_event_rate":
axarr_1[-1].set_xlabel("Negative event rate " + "(1000 cells$^{-1}$ s$^{-1}$)")
elif key == "neg_integ_rate":
axarr_1[-1].set_xlabel(
f"Integrated event rate per {10**scale} cells " + "(%$\cdot$s / s)"
)
else:
raise ValueError("wrong key")
fig_1.savefig(
Path(figsave, "summary", f"TTX_1um_compare_density_{density}_{key}{filetype}"),
bbox_inches="tight",
dpi=300,
transparent=True,
)
if redo_stats:
p_pre_post_1, _, f1 = statsf.bootstrap_test(
pre_1,
post_1,
function=function,
plot=True,
num_resamplings=num_resamplings,
names=["Pre TTX", "Post 1 uM TTX"],
)
f1.savefig(
Path(figsave, "summary", "bootstrap", f"bootstrap_pre_1_{key}{filetype}"),
bbox_inches="tight",
dpi=300,
transparent=True,
)
with open(
Path(figsave, "summary", f"statistical_test_results_1uM_{key}.txt"), "w"
) as f:
f.write(f"{datetime.datetime.now()}\n")
f.write(
f"Testing significance of second less than first for function {function_name}\n"
)
f.write(f"N cells pre: {len(pre_1)}\n")
f.write(f"N cells post: {len(post_1)}\n")
f.write(f'N slips pre: {len(np.unique(ppre_1["day_slip"]))}\n')
f.write(f'N slips post: {len(np.unique(ppost_1["day_slip"]))}\n')
f.write(f"Pre mean rate: {np.mean(pre_1)}\n")
f.write(f"Post mean rate: {np.mean(post_1)}\n")
f.write(f"Num resamples: {num_resamplings}\n")
f.write(f"p pre-post {p_pre_post_1}\n")
def plot_events_TTX(
df,
use,
TTX_level=1,
log=True,
upper_lim=6.6,
lower_lim=0,
T=0.2,
nbins=20,
only_neg=True,
histtype="bar",
):
dfn = df.copy()
use = [x for x in use if f"{TTX_level}um" in x]
use_bool = np.array([np.any(x in use) for x in dfn.exp_stage])
dfn = dfn[use_bool]
too_big = np.abs(dfn.event_amplitude) > upper_lim / 100
too_small = np.abs(dfn.event_amplitude) < lower_lim / 100
dfn = dfn[np.logical_not(np.logical_or(too_big, too_small))]
if only_neg:
dfn = dfn[dfn["event_amplitude"] < 0]
length_bins = np.histogram(dfn["event_length"] * T, bins=nbins)[1]
if only_neg:
amp_bins = np.histogram(np.abs(dfn["event_amplitude"]) * 100, bins=nbins)[1]
else:
amp_bins = np.histogram(dfn["event_amplitude"] * 100, bins=nbins)[1]
neg = dfn[dfn.stage == "pre"]
pos = dfn[dfn.stage == "post"]
gs = gridspec.GridSpec(2, 2)
gs.update(wspace=0.3, hspace=0.3)
fig, axarr = plt.subplots(figsize=(8, 6))
ax0 = plt.subplot(gs[0])
if only_neg:
ax0.hist(
np.abs(neg["event_amplitude"]) * 100,
bins=amp_bins,
log=log,
label="Pre",
histtype=histtype,
)
ax0.hist(
np.abs(pos["event_amplitude"]) * 100,
bins=amp_bins,
log=log,
label=f"TTX {TTX_level}" + " $\mathrm{\mu}$M",
histtype=histtype,
)
else:
ax0.hist(
neg["event_amplitude"] * 100,
bins=amp_bins,
log=log,
label="Pre",
histtype=histtype,
)
ax0.hist(
pos["event_amplitude"] * 100,
bins=amp_bins,
log=log,
label=f"TTX {TTX_level}" + " $\mathrm{\mu}$M",
histtype=histtype,
)
ax0.set_xlabel("Absolute event amplitude (% $\Delta$R/R$_0$)")
ax0.set_ylabel("Observed Frequency")
ax0.legend(frameon=False)
ax1 = plt.subplot(gs[1])
ax1.hist(
np.abs(neg["event_length"]) * T,
bins=length_bins,
log=log,
label="Pre",
histtype=histtype,
)
ax1.hist(
np.abs(pos["event_length"]) * T,
bins=length_bins,
log=log,
label=f"TTX {TTX_level}" + " $\mathrm{\mu}$M",
histtype=histtype,
)
ax1.set_xlabel("Event duration (s)")
ax1.set_ylabel("Observed Frequency")
ax1.legend(frameon=False)
if log:
norm = mpl.colors.LogNorm()
else:
norm = None
ax2 = plt.subplot(gs[2])
if only_neg:
h = ax2.hist2d(
np.abs(neg["event_amplitude"]) * 100,
neg["event_length"] * T,
bins=(amp_bins, length_bins),
norm=norm,
)
else:
h = ax2.hist2d(
neg["event_amplitude"] * 100,
neg["event_length"] * T,
bins=(amp_bins, length_bins),
norm=norm,
)
plt.colorbar(h[3])
ax2.set_xlabel("Pre-TTX event amplitude (% $\Delta$R/R$_0$)")
ax2.set_ylabel("Event duration (s)")
ax3 = plt.subplot(gs[3])
if only_neg:
h2 = ax3.hist2d(
np.abs(pos["event_amplitude"]) * 100,
pos["event_length"] * T,
bins=(amp_bins, length_bins),
norm=norm,
)
else:
h2 = ax3.hist2d(
pos["event_amplitude"] * 100,
pos["event_length"] * T,
bins=(amp_bins, length_bins),
norm=norm,
)
plt.colorbar(h2[3])
ax3.set_xlabel("Post-TTX event amplitude (% $\Delta$R/R$_0$)")
ax3.set_ylabel("Event duration (s)")
# get number of events before/after TTX
thresh = -2
iid = np.argwhere(h[1] > thresh)[0][0]
n_events_pre = np.sum(h[0][:iid, :])
n_events_post = np.sum(h2[0][:iid, :])
with open(
Path(
"/home/peter/Dropbox/Papers/cancer/v2/ttx_figure", "num_bigneg_events.txt"
),
"w",
) as f:
f.write(f"{datetime.datetime.now()}\n")
f.write(f"Number events in bins up to edge at {h[1][iid]:.3f} %\n")
f.write(f"pre: {n_events_pre} \n")
f.write(f"post: {n_events_post} \n")
return fig
def plot_events_TTX_washout(
df,
use,
log=True,
upper_lim=6.6,
lower_lim=0,
T=0.2,
nbins=20,
only_neg=True,
histtype="bar",
):
dfn = df.copy()
use_bool = np.array([np.any(x in use) for x in dfn.exp_stage])
dfn = dfn[use_bool]
too_big = np.abs(dfn.event_amplitude) > upper_lim / 100
too_small = np.abs(dfn.event_amplitude) < lower_lim / 100
dfn = dfn[np.logical_not(np.logical_or(too_big, too_small))]
if only_neg:
dfn = dfn[dfn["event_amplitude"] < 0]
amp_bins = np.histogram(np.abs(dfn["event_amplitude"]) * 100, bins=nbins)[1]
else:
amp_bins = np.histogram(dfn["event_amplitude"] * 100, bins=nbins)[1]
length_bins = np.histogram(dfn["event_length"] * T, bins=nbins)[1]
neg = dfn[dfn.stage == "pre"]
pos = dfn[dfn.stage == "post"]
wash = dfn[dfn.stage == "washout"]
gs = gridspec.GridSpec(2, 3)
gs.update(wspace=0.3, hspace=0.3)
fig, axarr = plt.subplots(figsize=(8, 6))
ax0 = plt.subplot(gs[0])
if only_neg:
ax0.hist(
np.abs(neg["event_amplitude"]) * 100,
bins=amp_bins,
log=log,
label="Pre",
histtype=histtype,
)
ax0.hist(
np.abs(pos["event_amplitude"]) * 100,
bins=amp_bins,
log=log,
label="TTX 10 $\mathrm{\mu}$M",
histtype=histtype,
)
ax0.hist(
| np.abs(wash["event_amplitude"]) | numpy.abs |
import sys
sys.path.append('/home/ross/CytoPy')
from CytoPy.data.mongo_setup import global_init
from CytoPy.flow.gating import utilities
from CytoPy.tests import make_example_date
from sklearn.neighbors import KernelDensity
from scipy.signal import find_peaks
from itertools import combinations
import numpy as np
import pandas as pd
import unittest
global_init('test')
class TestCheckPeak(unittest.TestCase):
def test(self):
probs = np.array([0, 0, 0, 0.05, 0, 0, 2, 0, 0, 3, 0, 0, 0.05])
peaks = np.where( | np.array(probs) | numpy.array |
#import numpy as np
from numpy import sqrt, zeros, pi, arctan, asarray, arange, delete, sum, multiply, sin, cos
def FT(k, exafs, rmin, rmax, dr):
con = sqrt(2 / pi)
nn = len(k)
rx = zeros(int((rmax - rmin) / dr), float)
exafs_re = zeros(nn, float)
# exafs_im = zeros(nn, float)
# transform_re = zeros(nn, float)
# transform_im = zeros(nn, float)
# fourier_re = zeros(int((rmax - rmin) / dr), float)
# fourier_im = zeros(int((rmax - rmin) / dr), float)
# sn = zeros(nn, float)
# cs = zeros(nn, float)
exafs_re = | asarray(exafs, float) | numpy.asarray |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import numpy as np
import scipy.misc
import itertools
from math import pow
import seaborn as sns
import matplotlib.pyplot as plt
from utils import generate_vel_list_3d
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
class Data_Generator(object):
"""
Generate one dimensional simulated data.
Randomly sample \mu from uniform distribution.
Velocity is fixed.
Place vector is generated from a Gaussian distribution.
"""
def __init__(self, num_interval=1000, min=0, max=1, shape="square", to_use_3D_map=False):
"""
Sigma is the variance in the Gaussian distribution.
"""
self.num_interval = num_interval
self.min, self.max = min, max
self.interval_length = (self.max - self.min) / (self.num_interval - 1)
self.shape = shape
def generate(self, num_data, max_vel=3, min_vel=0, num_step=1, dtype=2, test=False, visualize=False, motion_type='continuous'):
if dtype == 1:
place_pair = self.generate_two_dim_multi_type1(num_data)
elif dtype == 2:
place_pair = self.generate_two_dim_multi_type2(num_data, max_vel, min_vel, num_step,
test=test, visualize=visualize, motion_type=motion_type)
else:
raise NotImplementedError
return place_pair
def generate_3d(self, num_data, max_vel=3, min_vel=0, num_step=1, dtype=2, test=False, visualize=False, motion_type='continuous'):
if dtype == 1:
place_pair = self.generate_three_dim_multi_type1(num_data)
elif dtype == 2:
place_pair = self.generate_three_dim_multi_type2(num_data, max_vel, min_vel, num_step,
test=test, visualize=visualize, motion_type=motion_type)
else:
raise NotImplementedError
return place_pair
def generate_two_dim_multi_type1(self, num_data):
if self.shape == "square":
mu_before = np.random.random(size=[num_data, 2]) * (self.num_interval - 1)
mu_after = np.random.random(size=[num_data, 2]) * (self.num_interval - 1)
elif self.shape == "circle":
mu_seq = np.random.random(size=[num_data * 3, 2])
select_idx = np.where(np.sqrt((mu_seq[:, 0] - 0.5) ** 2 + (mu_seq[:, 1] - 0.5) ** 2) < 0.5)[0]
mu_seq = mu_seq[select_idx[:num_data * 2]]
mu_before = mu_seq[:num_data] * (self.num_interval - 1)
mu_after = mu_seq[num_data:] * (self.num_interval - 1)
elif self.shape == "triangle":
mu_seq = np.random.random(size=[int(num_data * 4.2), 2])
x, y = mu_seq[:, 0], mu_seq[:, 1]
select_idx = np.where((x + 2 * y > 1) * (-x + 2 * y < 1))[0]
mu_seq = mu_seq[select_idx[:num_data * 2]]
mu_before = mu_seq[:num_data] * (self.num_interval - 1)
mu_after = mu_seq[num_data:] * (self.num_interval - 1)
else:
raise NotImplementedError
vel = np.sqrt(np.sum((mu_after - mu_before) ** 2, axis=1)) * self.interval_length
place_pair = {'before': mu_before, 'after': mu_after, 'vel': vel}
assert len(mu_before) == num_data
return place_pair
def generate_three_dim_multi_type1(self, num_data):
if self.shape == "square":
mu_before = np.random.random(size=[num_data, 3]) * (self.num_interval - 1)
mu_after = np.random.random(size=[num_data, 3]) * (self.num_interval - 1)
else:
raise NotImplementedError
vel = np.sqrt(np.sum((mu_after - mu_before) ** 2, axis=1)) * self.interval_length
place_pair = {'before': mu_before, 'after': mu_after, 'vel': vel}
assert len(mu_before) == num_data
return place_pair
def generate_two_dim_multi_type2(self, num_data, max_vel, min_vel, num_step, motion_type, test=False, visualize=False):
"""sample discretized motions and corresponding place pairs"""
vel_idx = None
if not test and motion_type == 'discrete':
velocity = generate_vel_list(max_vel)
num_vel = len(velocity)
if pow(num_vel, num_step) < num_data:
vel_list = np.asarray(list(itertools.product(np.arange(num_vel), repeat=num_step)))
num_vel_list = len(vel_list)
div, rem = num_data // num_vel_list, num_data % num_vel_list
vel_idx = np.vstack((np.tile(vel_list, [div, 1]), vel_list[np.random.choice(num_vel_list, size=rem)]))
np.random.shuffle(vel_idx)
else:
vel_idx = np.random.choice(num_vel, size=[num_data, num_step])
vel_grid = np.take(velocity, vel_idx, axis=0)
vel = vel_grid * self.interval_length
vel_grid_cumsum = np.cumsum(vel_grid, axis=1)
mu_max = np.fmin(self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1))
mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1))
mu_start = np.expand_dims(np.random.random(size=(num_data, 2)) * (mu_max - 1 - mu_min) + mu_min, axis=1)
# mu_start = np.random.sample(size=[num_data, 2])
# mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1)
mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1)
elif not test:
if self.shape == "square":
num_data_sample = num_data
elif self.shape == "circle":
num_data_sample = int(num_data * 1.5)
elif self.shape == "triangle":
num_data_sample = int(num_data * 4)
else:
raise NotImplementedError
theta = np.random.random(size=(num_data_sample, num_step)) * 2 * np.pi - np.pi
length = np.sqrt(np.random.random(size=(num_data_sample, num_step))) * (max_vel - min_vel) + min_vel
x = length * np.cos(theta)
y = length * np.sin(theta)
vel_seq = np.concatenate((np.expand_dims(x, axis=-1), np.expand_dims(y, axis=-1)), axis=-1)
vel_seq_cumsum = np.cumsum(vel_seq, axis=1)
mu_max = np.fmin(self.num_interval - 1, np.min(self.num_interval - 1 - vel_seq_cumsum, axis=1))
mu_min = np.fmax(0, np.max(-vel_seq_cumsum, axis=1))
start = np.random.random(size=(num_data_sample, 2)) * (mu_max - mu_min) + mu_min
start = np.expand_dims(start, axis=1)
mu_seq = np.concatenate((start, start + vel_seq), axis=1)
vel = vel_seq * self.interval_length
if self.shape == "circle":
mu_seq_len = mu_seq * self.interval_length
select_idx = np.sqrt((mu_seq_len[:, :, 0] - 0.5) ** 2 + (mu_seq_len[:, :, 1] - 0.5) ** 2) > 0.5
select_idx = np.where(np.sum(select_idx, axis=1) == 0)[0]
mu_seq = mu_seq[select_idx[:num_data]]
vel = vel[select_idx[:num_data]]
elif self.shape == "triangle":
mu_seq_len = mu_seq * self.interval_length
x, y = mu_seq_len[:, :, 0], mu_seq_len[:, :, 1]
select_idx = (x + 2 * y > 1) * (-x + 2 * y < 1)
select_idx = np.where(np.sum(select_idx, axis=1) == num_step + 1)[0]
mu_seq = mu_seq[select_idx[:num_data]]
vel = vel[select_idx[:num_data]]
else:
velocity = generate_vel_list(max_vel, min_vel)
num_vel = len(velocity)
if visualize:
mu_start = np.reshape([20, 20], newshape=(1, 1, 2))
vel_pool = np.where((velocity[:, 0] >= -1) & (velocity[:, 1] >= -1))
vel_idx = np.random.choice(vel_pool[0], size=[num_data * 10, num_step])
vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1)
mu_seq = np.concatenate((np.tile(mu_start, [num_data * 10, 1, 1]), vel_grid_cumsum + mu_start), axis=1)
mu_seq_new, vel_idx_new = [], []
for i in range(len(mu_seq)):
mu_seq_sub = mu_seq[i]
if len(np.unique(mu_seq_sub, axis=0)) == len(mu_seq_sub):
mu_seq_new.append(mu_seq[i])
vel_idx_new.append(vel_idx[i])
mu_seq, vel_idx = np.stack(mu_seq_new, axis=0), np.stack(vel_idx_new, axis=0)
mu_seq_rs = np.reshape(mu_seq, [-1, (num_step + 1) * 2])
select_idx = np.where(np.sum(mu_seq_rs >= self.num_interval, axis=1) == 0)[0][:num_data]
vel_idx = vel_idx[select_idx]
mu_seq = mu_seq[select_idx]
vel = np.take(velocity, vel_idx, axis=0) * self.interval_length
else:
vel_idx = np.random.choice(num_vel, size=[num_data * 20, num_step])
vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1)
mu_max = np.fmin(self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1))
mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1))
select_idx = np.where(np.sum(mu_max <= mu_min, axis=1) == 0)[0][:num_data]
vel_idx, vel_grid_cumsum = vel_idx[select_idx], vel_grid_cumsum[select_idx]
vel_grid = np.take(velocity, vel_idx, axis=0)
mu_max, mu_min = mu_max[select_idx], mu_min[select_idx]
mu_start = np.random.sample(size=[num_data, 2])
mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1)
mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1)
vel = vel_grid * self.interval_length
# sns.distplot(vel, rug=True, hist=False)
# plt.show()
assert len(mu_seq) == num_data
place_seq = {'seq': mu_seq, 'vel': vel, 'vel_idx': vel_idx}
return place_seq
def generate_three_dim_multi_type2(self, num_data, max_vel, min_vel, num_step, motion_type, test=False, visualize=False):
"""sample discretized motions and corresponding place pairs"""
vel_idx = None
if not test and motion_type == 'discrete':
velocity = generate_vel_list_3d(max_vel)
num_vel = len(velocity)
if pow(num_vel, num_step) < num_data:
vel_list = np.asarray(list(itertools.product(np.arange(num_vel), repeat=num_step)))
num_vel_list = len(vel_list)
div, rem = num_data // num_vel_list, num_data % num_vel_list
vel_idx = np.vstack((np.tile(vel_list, [div, 1]), vel_list[np.random.choice(num_vel_list, size=rem)]))
np.random.shuffle(vel_idx)
else:
vel_idx = np.random.choice(num_vel, size=[num_data, num_step])
vel_grid = np.take(velocity, vel_idx, axis=0)
vel = vel_grid * self.interval_length
vel_grid_cumsum = np.cumsum(vel_grid, axis=1)
mu_max = np.fmin(self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1))
mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1))
mu_start = np.expand_dims(np.random.random(size=(num_data, 2)) * (mu_max - 1 - mu_min) + mu_min, axis=1)
# mu_start = np.random.sample(size=[num_data, 2])
# mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1)
mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1)
elif not test:
if self.shape == "square":
num_data_sample = num_data
else:
raise NotImplementedError
#theta = np.random.random(size=(num_data_sample, num_step)) * 2 * np.pi - np.pi
#length = np.sqrt(np.random.random(size=(num_data_sample, num_step))) * (max_vel - min_vel) + min_vel
#x = length * np.cos(theta)
#y = length * np.sin(theta)
#vel_seq = np.concatenate((np.expand_dims(x, axis=-1), np.expand_dims(y, axis=-1)), axis=-1)
x1 = np.random.standard_normal(size=(num_data_sample, num_step))
y1 = np.random.standard_normal(size=(num_data_sample, num_step))
z1 = np.random.standard_normal(size=(num_data_sample, num_step))
v = np.sqrt(x1**2 + y1 ** 2 + z1 ** 2)
length = np.cbrt(np.random.random(size=(num_data_sample, num_step))) * (max_vel - min_vel) + min_vel
x = length * x1 / v
y = length * y1 / v
z = length * z1 / v
vel_seq = np.concatenate((np.expand_dims(x, axis=-1), np.expand_dims(y, axis=-1), np.expand_dims(z, axis=-1)), axis=-1)
# from matplotlib import pyplot
# from mpl_toolkits.mplot3d import Axes3D
# fig = pyplot.figure()
# ax = Axes3D(fig)
# ax.scatter(x[:30000], y[0:30000], z[0:30000])
# pyplot.show()
vel_seq_cumsum = np.cumsum(vel_seq, axis=1)
mu_max = np.fmin(self.num_interval - 1, np.min(self.num_interval - 1 - vel_seq_cumsum, axis=1))
mu_min = np.fmax(0, np.max(-vel_seq_cumsum, axis=1))
start = np.random.random(size=(num_data_sample, 3)) * (mu_max - mu_min) + mu_min
start = np.expand_dims(start, axis=1)
mu_seq = np.concatenate((start, start + vel_seq), axis=1)
vel = vel_seq * self.interval_length
if self.shape == "circle":
mu_seq_len = mu_seq * self.interval_length
select_idx = np.sqrt((mu_seq_len[:, :, 0] - 0.5) ** 2 + (mu_seq_len[:, :, 1] - 0.5) ** 2) > 0.5
select_idx = np.where(np.sum(select_idx, axis=1) == 0)[0]
mu_seq = mu_seq[select_idx[:num_data]]
vel = vel[select_idx[:num_data]]
elif self.shape == "triangle":
mu_seq_len = mu_seq * self.interval_length
x, y = mu_seq_len[:, :, 0], mu_seq_len[:, :, 1]
select_idx = (x + 2 * y > 1) * (-x + 2 * y < 1)
select_idx = np.where(np.sum(select_idx, axis=1) == num_step + 1)[0]
mu_seq = mu_seq[select_idx[:num_data]]
vel = vel[select_idx[:num_data]]
else:
velocity = generate_vel_list_3d(max_vel, min_vel)
num_vel = len(velocity)
if visualize:
mu_start = np.reshape([20, 20, 20], newshape=(1, 1, 1, 3))
vel_pool = np.where((velocity[:, 0] >= -1) & (velocity[:, 1] >= -1) & (velocity[:, 2] >= -1))
vel_idx = np.random.choice(vel_pool[0], size=[num_data * 10, num_step])
vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1)
mu_seq = np.concatenate((np.tile(mu_start, [num_data * 10, 1, 1, 1]), vel_grid_cumsum + mu_start), axis=1)
mu_seq_new, vel_idx_new = [], []
for i in range(len(mu_seq)):
mu_seq_sub = mu_seq[i]
if len(np.unique(mu_seq_sub, axis=0)) == len(mu_seq_sub):
mu_seq_new.append(mu_seq[i])
vel_idx_new.append(vel_idx[i])
mu_seq, vel_idx = np.stack(mu_seq_new, axis=0), | np.stack(vel_idx_new, axis=0) | numpy.stack |
from collections import namedtuple
import numpy as np
import torch
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
Accuracy = namedtuple('Accuracy', ['unweighted', 'weighted', 'worst', 'accuracy_classes'])
def compute_accuracy_tuple(y, labels):
labels = labels.ravel()
n_labels = len(np.unique(labels))
classes_probabilities = []
accuracy_classes = []
for cl in range(n_labels):
idx = labels == cl
classes_probabilities += [np.mean(idx)]
accuracy_classes += [np.mean((labels[idx] == y[idx])) if classes_probabilities[-1] else 0]
# This is also referred to as the "recall": p = n_true_positive / (n_false_negative + n_true_positive)
# ( We could also compute the "precision": p = n_true_positive / (n_false_positive + n_true_positive) )
accuracy_named_tuple = Accuracy(
unweighted= | np.dot(accuracy_classes, classes_probabilities) | numpy.dot |
import os
import sys
import numpy as np
import time
import matplotlib.pyplot as plt
import pandas as pd
from utils import *
def sliding_dot_product(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m:n]
def sliding_dot_product_stomp(q, t):
n = t.size
m = q.size
# Append t with n zeros
ta = np.append(t, np.zeros(n))
# Reverse Q
qr = np.flip(q, 0)
# Append qra
qra = np.append(qr, np.zeros(2 * n - m))
# Compute FFTs
qraf = np.fft.fft(qra)
taf = np.fft.fft(ta)
# Compute the inverse FFT to the element-wise multiplication of qraf and taf
qt = np.fft.ifft(np.multiply(qraf, taf))
return qt[m - 1:n]
def calculate_distance_profile(q, t, qt, a, sum_q, sum_q2, mean_t, sigma_t):
n = t.size
m = q.size
b = np.zeros(n - m)
dist = np.zeros(n - m)
for i in range(0, n - m):
b[i] = -2 * (qt[i].real - sum_q * mean_t[i]) / sigma_t[i]
dist[i] = a[i] + b[i] + sum_q2
return np.sqrt(np.abs(dist))
# The code below takes O(m) for each subsequence
# you should replace it for MASS
def compute_mean_std_for_query(Q):
# Compute Q stats -- O(n)
sumQ = np.sum(Q)
sumQ2 = np.sum(np.power(Q, 2))
return sumQ, sumQ2
def pre_compute_mean_std_for_TS(ta, m):
na = len(ta)
sum_t = np.zeros(na - m)
sum_t2 = np.zeros(na - m)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
for i in range(na - m):
sum_t[i] = cumulative_sum_t[i + m] - cumulative_sum_t[i]
sum_t2[i] = cumulative_sum_t2[i + m] - cumulative_sum_t2[i]
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
def pre_compute_mean_std_for_TS_stomp(ta, m):
na = len(ta)
# Compute the stats for t
cumulative_sum_t = np.cumsum(ta)
cumulative_sum_t2 = np.cumsum(np.power(ta, 2))
sum_t = (cumulative_sum_t[m - 1:na] - np.concatenate(([0], cumulative_sum_t[0:na - m])))
sum_t2 = (cumulative_sum_t2[m - 1:na] - np.concatenate(([0], cumulative_sum_t2[0:na - m])))
mean_t = np.divide(sum_t, m)
mean_t2 = np.divide(sum_t2, m)
mean_t_p2 = np.power(mean_t, 2)
sigma_t2 = np.subtract(mean_t2, mean_t_p2)
sigma_t = np.sqrt(sigma_t2)
return sum_t, sum_t2, mean_t, mean_t2, mean_t_p2, sigma_t, sigma_t2
# MUEEN’S ALGORITHM FOR SIMILARITY SEARCH (MASS)
def mass(Q, T, a, meanT, sigmaT):
# Z-Normalisation
if np.std(Q) != 0:
Q = (Q - np.mean(Q)) / np.std(Q)
QT = sliding_dot_product(Q, T)
sumQ, sumQ2 = compute_mean_std_for_query(Q)
return calculate_distance_profile(Q, T, QT, a, sumQ, sumQ2, meanT, sigmaT)
def element_wise_min(Pab, Iab, D, idx, ignore_trivial, m):
for i in range(0, len(D)):
if not ignore_trivial or (
np.abs(idx - i) > m / 2.0): # if it's a self-join, ignore trivial matches in [-m/2,m/2]
if D[i] < Pab[i]:
Pab[i] = D[i]
Iab[i] = idx
return Pab, Iab
def stamp(Ta, Tb, m):
"""
Compute the Matrix Profile between time-series Ta and Tb.
If Ta==Tb, the operation is a self-join and trivial matches are ignored.
:param Ta: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
nb = len(Tb)
na = len(Ta)
Pab = np.ones(na - m) * np.inf
Iab = np.zeros(na - m)
idxes = np.arange(nb - m + 1)
sumT, sumT2, meanT, meanT_2, meanTP2, sigmaT, sigmaT2 = pre_compute_mean_std_for_TS(Ta, m)
a = np.zeros(na - m)
for i in range(0, na - m):
a[i] = (sumT2[i] - 2 * sumT[i] * meanT[i] + m * meanTP2[i]) / sigmaT2[i]
ignore_trivial = np.atleast_1d(Ta == Tb).all()
for idx in idxes:
D = mass(Tb[idx: idx + m], Ta, a, meanT, sigmaT)
if (ignore_trivial):
# ignore trivial minimum and maximum
minIdx = int(np.maximum(idx - m / 2.0, 0))
maxIdx = int(np.minimum(idx + m / 2.0, len(D)))
D[minIdx:maxIdx:1] = np.inf
Iab[Pab > D] = i
Pab = np.minimum(Pab, D)
return Pab, Iab
def stomp(T, m):
"""
Compute the Matrix Profile with self join for T
:param T: time-series, np.array
:param Tb: time-series, np.array
:param m: subsequence length
:return: Matrix Profile, Nearest-Neighbor indexes
"""
epsilon = 1e-10
n = len(T)
seq_l = n - m
_, _, meanT, _, _, sigmaT, _ = pre_compute_mean_std_for_TS_stomp(T, m)
Pab = np.full(seq_l + 1, np.inf)
Iab = np.zeros(n - m + 1)
ignore_trivial = True
for idx in range(0, seq_l):
# There's somthing with normalization
Q_std = sigmaT[idx] if sigmaT[idx] > epsilon else epsilon
if idx == 0:
QT = sliding_dot_product_stomp(T[0:m], T).real
QT_first = np.copy(QT)
else:
QT[1:] = QT[0:-1] - (T[0:seq_l] * T[idx - 1]) + (T[m:n] * T[idx + m - 1])
QT[0] = QT_first[idx]
# Calculate distance profile
D = (2 * (m - (QT - m * meanT * meanT[idx]) / (Q_std * sigmaT)))
D[D < epsilon] = 0
if (ignore_trivial):
# ignore trivial minimum and maximum
minIdx = int(np.maximum(idx - m / 2.0, 0))
maxIdx = int(np.minimum(idx + m / 2.0, len(D)))
D[minIdx:maxIdx:1] = np.inf
Iab[Pab > D] = idx
np.minimum(Pab, D, Pab)
np.sqrt(Pab, Pab)
return Pab, Iab
# Quick Test
# def test_stomp(Ta, m):
# start_time = time.time()
#
# Pab, Iab = stomp(Ta, m)
# print("--- %s seconds ---" % (time.time() - start_time))
# plot_motif(Ta, Pab, Iab, m)
# return Pab, Iab
# Quick Test
# def test_stamp(Ta, Tb, m):
# start_time = time.time()
#
# Pab, Iab = stamp(Ta, Tb, m)
# print("--- %s seconds ---" % (time.time() - start_time))
#
# plot_discord(Ta, Pab, Iab, m, )
# return Pab, Iab
def plot_motif(Ta, values, indexes, m):
from matplotlib import gridspec
plt.figure(figsize=(8, 4))
plt.subplot(211)
plt.plot(Ta, linestyle='--', alpha=0.5)
plt.xlim((0, len(Ta)))
print(np.argmax(values))
plt.plot(range(np.argmin(values), np.argmin(values) + m), Ta[np.argmin(values):np.argmin(values) + m], c='g',
label='Top Motif')
plt.plot(range(np.argmax(values), np.argmax(values) + m), Ta[np.argmax(values): | np.argmax(values) | numpy.argmax |
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tests for rollout function."""
from absl.testing import absltest
from absl.testing import parameterized
import mujoco
import numpy as np
import concurrent.futures
import threading
from mujoco import rollout
#--------------------------- models used for testing ---------------------------
TEST_XML = r"""
<mujoco>
<worldbody>
<light pos="0 0 2"/>
<geom type="plane" size="5 5 .1"/>
<body pos="0 0 .1">
<joint name="yaw" axis="0 0 1"/>
<joint name="pitch" axis="0 1 0"/>
<geom type="capsule" size=".02" fromto="0 0 0 1 0 0"/>
<geom type="box" pos="1 0 0" size=".1 .1 .1"/>
<site name="site" pos="1 0 0"/>
</body>
</worldbody>
<actuator>
<general joint="pitch" gainprm="100"/>
<general joint="yaw" dyntype="filter" dynprm="1" gainprm="100"/>
</actuator>
<sensor>
<accelerometer site="site"/>
</sensor>
</mujoco>
"""
TEST_XML_NO_SENSORS = r"""
<mujoco>
<worldbody>
<light pos="0 0 2"/>
<geom type="plane" size="5 5 .1"/>
<body pos="0 0 .1">
<joint name="yaw" axis="0 0 1"/>
<joint name="pitch" axis="0 1 0"/>
<geom type="capsule" size=".02" fromto="0 0 0 1 0 0"/>
<geom type="box" pos="1 0 0" size=".1 .1 .1"/>
<site name="site" pos="1 0 0"/>
</body>
</worldbody>
<actuator>
<general joint="pitch" gainprm="100"/>
<general joint="yaw" dyntype="filter" dynprm="1" gainprm="100"/>
</actuator>
</mujoco>
"""
TEST_XML_NO_ACTUATORS = r"""
<mujoco>
<worldbody>
<light pos="0 0 2"/>
<geom type="plane" size="5 5 .1"/>
<body pos="0 0 .1">
<joint name="yaw" axis="0 0 1"/>
<joint name="pitch" axis="0 1 0"/>
<geom type="capsule" size=".02" fromto="0 0 0 1 0 0"/>
<geom type="box" pos="1 0 0" size=".1 .1 .1"/>
<site name="site" pos="1 0 0"/>
</body>
</worldbody>
<sensor>
<accelerometer site="site"/>
</sensor>
</mujoco>
"""
TEST_XML_MOCAP = r"""
<mujoco>
<worldbody>
<body name="1" mocap="true">
</body>
<body name="2" mocap="true">
</body>
</worldbody>
<sensor>
<framepos objtype="xbody" objname="1"/>
<framequat objtype="xbody" objname="2"/>
</sensor>
</mujoco>
"""
TEST_XML_EMPTY = r"""
<mujoco>
</mujoco>
"""
ALL_MODELS = {'TEST_XML': TEST_XML,
'TEST_XML_NO_SENSORS': TEST_XML_NO_SENSORS,
'TEST_XML_NO_ACTUATORS': TEST_XML_NO_ACTUATORS,
'TEST_XML_EMPTY': TEST_XML_EMPTY}
#------------------------------- tests -----------------------------------------
class MuJoCoRolloutTest(parameterized.TestCase):
def setUp(self):
super().setUp()
np.random.seed(42)
#----------------------------- test basic operation
@parameterized.parameters(ALL_MODELS.keys())
def test_single_step(self, model_name):
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
initial_state = np.random.randn(model.nq + model.nv + model.na)
ctrl = np.random.randn(model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
mujoco.mj_resetData(model, data)
py_state, py_sensordata = step(model, data, initial_state, ctrl=ctrl)
np.testing.assert_array_equal(state, py_state)
np.testing.assert_array_equal(sensordata, py_sensordata)
@parameterized.parameters(ALL_MODELS.keys())
def test_single_rollout(self, model_name):
nstep = 3 # number of timesteps
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
initial_state = np.random.randn(model.nq + model.nv + model.na)
ctrl = np.random.randn(nstep, model.nu)
state, sensordata = rollout.rollout(model, data, initial_state, ctrl)
py_state, py_sensordata = single_rollout(model, data, initial_state,
ctrl=ctrl)
np.testing.assert_array_equal(state, np.asarray(py_state))
np.testing.assert_array_equal(sensordata, np.asarray(py_sensordata))
@parameterized.parameters(ALL_MODELS.keys())
def test_multi_step(self, model_name):
model = mujoco.MjModel.from_xml_string(ALL_MODELS[model_name])
data = mujoco.MjData(model)
nstate = 5 # number of initial states
initial_state = | np.random.randn(nstate, model.nq + model.nv + model.na) | numpy.random.randn |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
class SEIRSModel():
"""
A class to simulate the Deterministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, initN, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, p=0,
beta_D=None, sigma_D=None, gamma_D=None, mu_D=None,
theta_E=0, theta_I=0, psi_E=0, psi_I=0, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = numpy.array([int(initN)])
self.numE = numpy.array([int(initE)])
self.numI = numpy.array([int(initI)])
self.numD_E = numpy.array([int(initD_E)])
self.numD_I = numpy.array([int(initD_I)])
self.numR = numpy.array([int(initR)])
self.numF = numpy.array([int(initF)])
self.numS = numpy.array([self.N[-1] - self.numE[-1] - self.numI[-1] - self.numD_E[-1] - self.numD_I[-1] - self.numR[-1] - self.numF[-1]])
assert(self.numS[0] >= 0), "The specified initial population size N must be greater than or equal to the initial compartment counts."
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@staticmethod
def system_dfes(t, variables, beta, sigma, gamma, xi, mu_I, mu_0, nu,
beta_D, sigma_D, gamma_D, mu_D, theta_E, theta_I, psi_E, psi_I, q):
S, E, I, D_E, D_I, R, F = variables # varibles is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = - (beta*S*I)/N - q*(beta_D*S*D_I)/N + xi*R + nu*N - mu_0*S
dE = (beta*S*I)/N + q*(beta_D*S*D_I)/N - sigma*E - theta_E*psi_E*E - mu_0*E
dI = sigma*E - gamma*I - mu_I*I - theta_I*psi_I*I - mu_0*I
dDE = theta_E*psi_E*E - sigma_D*D_E - mu_0*D_E
dDI = theta_I*psi_I*I + sigma_D*D_E - gamma_D*D_I - mu_D*D_I - mu_0*D_I
dR = gamma*I + gamma_D*D_I - xi*R - mu_0*R
dF = mu_I*I + mu_D*D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_epoch(self, runtime, dt=0.1):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's timeseries
t_eval = numpy.arange(start=self.t, stop=self.t+runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t+runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = [self.numS[-1], self.numE[-1], self.numI[-1], self.numD_E[-1], self.numD_I[-1], self.numR[-1], self.numF[-1]]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(lambda t, X: SEIRSModel.system_dfes(t, X, self.beta, self.sigma, self.gamma, self.xi, self.mu_I, self.mu_0, self.nu,
self.beta_D, self.sigma_D, self.gamma_D, self.mu_D, self.theta_E, self.theta_I, self.psi_E, self.psi_I, self.q
),
t_span=[self.t, self.tmax], y0=init_cond, t_eval=t_eval
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = numpy.append(self.tseries, solution['t'])
self.numS = numpy.append(self.numS, solution['y'][0])
self.numE = numpy.append(self.numE, solution['y'][1])
self.numI = numpy.append(self.numI, solution['y'][2])
self.numD_E = numpy.append(self.numD_E, solution['y'][3])
self.numD_I = numpy.append(self.numD_I, solution['y'][4])
self.numR = numpy.append(self.numR, solution['y'][5])
self.numF = numpy.append(self.numF, solution['y'][6])
self.t = self.tseries[-1]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
paramNames = ['beta', 'sigma', 'gamma', 'xi', 'mu_I', 'mu_0', 'nu',
'beta_D', 'sigma_D', 'gamma_D', 'mu_D',
'theta_E', 'theta_I', 'psi_E', 'psi_I', 'q']
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if(param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param])!=numCheckpoints):
checkpoints[param] = [getattr(self, param)]*numCheckpoints
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if(not checkpoints):
self.run_epoch(runtime=self.tmax, dt=dt)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints['t']):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime-self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if(verbose):
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if(self.t < self.tmax):
self.run_epoch(runtime=self.tmax-self.t, dt=dt)
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.N if plot_percentages else self.numF
Eseries = self.numE/self.N if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.N if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.N if plot_percentages else self.numD_I
Iseries = self.numI/self.N if plot_percentages else self.numI
Rseries = self.numR/self.N if plot_percentages else self.numR
Sseries = self.numS/self.N if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.N/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.N/100)] / (self.N if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.N if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_E=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_E=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, gamma, xi=0, mu_I=0, mu_0=0, nu=0, beta_local=None, p=0,
Q=None, beta_D=None, sigma_D=None, gamma_D=None, mu_D=None, beta_D_local=None,
theta_E=0, theta_I=0, phi_E=0, phi_I=0, psi_E=1, psi_I=1, q=0,
initE=0, initI=10, initD_E=0, initD_I=0, initR=0, initF=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'gamma':gamma, 'xi':xi, 'mu_I':mu_I, 'mu_0':mu_0, 'nu':nu,
'beta_D':beta_D, 'sigma_D':sigma_D, 'gamma_D':gamma_D, 'mu_D':mu_D,
'beta_local':beta_local, 'beta_D_local':beta_D_local, 'p':p,'q':q,
'theta_E':theta_E, 'theta_I':theta_I, 'phi_E':phi_E, 'phi_I':phi_I, 'psi_E':phi_E, 'psi_I':psi_I }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_I = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = self.numNodes - self.numE[0] - self.numI[0] - self.numD_E[0] - self.numD_I[0] - self.numR[0] - self.numF[0]
self.N[0] = self.numS[0] + self.numE[0] + self.numI[0] + self.numD_E[0] + self.numD_I[0] + self.numR[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array([self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0]) + [self.I]*int(self.numI[0]) + [self.D_E]*int(self.numD_E[0]) + [self.D_I]*int(self.numD_I[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoI': {'currentState':self.E, 'newState':self.I},
'ItoR': {'currentState':self.I, 'newState':self.R},
'ItoF': {'currentState':self.I, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'ItoDI': {'currentState':self.I, 'newState':self.D_I},
'DEtoDI': {'currentState':self.D_E, 'newState':self.D_I},
'DItoR': {'currentState':self.D_I, 'newState':self.R},
'DItoF': {'currentState':self.D_I, 'newState':self.F},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_I'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][0] = self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
import time
updatestart = time.time()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.xi = numpy.array(self.parameters['xi']).reshape((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_I = numpy.array(self.parameters['mu_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_I'], shape=(self.numNodes,1))
self.mu_0 = numpy.array(self.parameters['mu_0']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = numpy.array(self.parameters['nu']).reshape((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.p = numpy.array(self.parameters['p']).reshape((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (numpy.array(self.parameters['beta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (numpy.array(self.parameters['sigma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.gamma_D = (numpy.array(self.parameters['gamma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D'], shape=(self.numNodes,1))) if self.parameters['gamma_D'] is not None else self.gamma
self.mu_D = (numpy.array(self.parameters['mu_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_D'], shape=(self.numNodes,1))) if self.parameters['mu_D'] is not None else self.mu_I
self.theta_E = numpy.array(self.parameters['theta_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_I = numpy.array(self.parameters['theta_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_I'], shape=(self.numNodes,1))
self.phi_E = numpy.array(self.parameters['phi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_I = numpy.array(self.parameters['phi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_I'], shape=(self.numNodes,1))
self.psi_E = numpy.array(self.parameters['psi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_E'], shape=(self.numNodes,1))
self.psi_I = numpy.array(self.parameters['psi_I']).reshape((self.numNodes, 1)) if isinstance(self.parameters['psi_I'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['psi_I'], shape=(self.numNodes,1))
self.q = numpy.array(self.parameters['q']).reshape((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = numpy.array(self.parameters['beta_local'])
else: # is numpy.ndarray
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.reshape((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = numpy.array(self.parameters['beta_D_local'])
else: # is numpy.ndarray
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.reshape((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, numpy.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes,1) # sums of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert(self.numNodes == self.numNodes_Q), "The normal and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (numpy.any(self.psi_I) and (numpy.any(self.theta_I) or numpy.any(self.phi_I)))
or (numpy.any(self.psi_E) and (numpy.any(self.theta_E) or numpy.any(self.phi_E))) )
self.tracing_scenario = ( (numpy.any(self.psi_E) and numpy.any(self.phi_E))
or (numpy.any(self.psi_I) and numpy.any(self.phi_I)) )
self.vitality_scenario = (numpy.any(self.mu_0) and numpy.any(self.nu))
self.resusceptibility_scenario = (numpy.any(self.xi))
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:])
else:
return (self.numE[t_idx] + self.numI[t_idx] + self.numD_E[t_idx] + self.numD_I[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def calc_propensities(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
transmissionTerms_I = numpy.zeros(shape=(self.numNodes,1))
if(numpy.any(self.numI[self.tidx])
and numpy.any(self.beta!=0)):
transmissionTerms_I = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_beta, self.X==self.I) )
transmissionTerms_DI = numpy.zeros(shape=(self.numNodes,1))
if(self.testing_scenario
and numpy.any(self.numD_I[self.tidx])
and numpy.any(self.beta_D)):
transmissionTerms_DI = numpy.asarray( scipy.sparse.csr_matrix.dot(self.A_Q_beta_D, self.X==self.D_I) )
numContacts_D = numpy.zeros(shape=(self.numNodes,1))
if(self.tracing_scenario
and (numpy.any(self.numD_E[self.tidx]) or numpy.any(self.numD_I[self.tidx]))):
numContacts_D = numpy.asarray( scipy.sparse.csr_matrix.dot( self.A, ((self.X==self.D_E)|(self.X==self.D_I)) ) )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = ( self.p*((self.beta*self.numI[self.tidx] + self.q*self.beta_D*self.numD_I[self.tidx])/self.N[self.tidx])
+ (1-self.p)*numpy.divide((transmissionTerms_I + transmissionTerms_DI), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0)
)*(self.X==self.S)
propensities_EtoI = self.sigma*(self.X==self.E)
propensities_ItoR = self.gamma*(self.X==self.I)
propensities_ItoF = self.mu_I*(self.X==self.I)
# propensities_EtoDE = ( self.theta_E + numpy.divide((self.phi_E*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_E*(self.X==self.E)
propensities_EtoDE = (self.theta_E + self.phi_E*numContacts_D)*self.psi_E*(self.X==self.E)
# propensities_ItoDI = ( self.theta_I + numpy.divide((self.phi_I*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_I*(self.X==self.I)
propensities_ItoDI = (self.theta_I + self.phi_I*numContacts_D)*self.psi_I*(self.X==self.I)
propensities_DEtoDI = self.sigma_D*(self.X==self.D_E)
propensities_DItoR = self.gamma_D*(self.X==self.D_I)
propensities_DItoF = self.mu_D*(self.X==self.D_I)
propensities_RtoS = self.xi*(self.X==self.R)
propensities__toS = self.nu*(self.X!=self.F)
propensities = numpy.hstack([propensities_StoE, propensities_EtoI,
propensities_ItoR, propensities_ItoF,
propensities_EtoDE, propensities_ItoDI, propensities_DEtoDI,
propensities_DItoR, propensities_DItoF,
propensities_RtoS, propensities__toS])
columns = ['StoE', 'EtoI', 'ItoR', 'ItoF', 'EtoDE', 'ItoDI', 'DEtoDI', 'DItoR', 'DItoF', 'RtoS', '_toS']
return propensities, columns
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def increase_data_series_length(self):
self.tseries= numpy.pad(self.tseries, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numS = numpy.pad(self.numS, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numE = numpy.pad(self.numE, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI = numpy.pad(self.numI, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_E = numpy.pad(self.numD_E, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_I = numpy.pad(self.numD_I, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numR = numpy.pad(self.numR, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numF = numpy.pad(self.numF, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.N = numpy.pad(self.N, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
if(self.store_Xseries):
self.Xseries = numpy.pad(self.Xseries, [(0, 5*self.numNodes), (0,0)], mode=constant, constant_values=0)
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.pad(self.nodeGroupData[groupName]['numS'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numE'] = numpy.pad(self.nodeGroupData[groupName]['numE'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI'] = numpy.pad(self.nodeGroupData[groupName]['numI'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_E'] = numpy.pad(self.nodeGroupData[groupName]['numD_E'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_I'] = numpy.pad(self.nodeGroupData[groupName]['numD_I'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numR'] = numpy.pad(self.nodeGroupData[groupName]['numR'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numF'] = numpy.pad(self.nodeGroupData[groupName]['numF'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['N'] = numpy.pad(self.nodeGroupData[groupName]['N'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def finalize_data_series(self):
self.tseries= numpy.array(self.tseries, dtype=float)[:self.tidx+1]
self.numS = numpy.array(self.numS, dtype=float)[:self.tidx+1]
self.numE = numpy.array(self.numE, dtype=float)[:self.tidx+1]
self.numI = numpy.array(self.numI, dtype=float)[:self.tidx+1]
self.numD_E = numpy.array(self.numD_E, dtype=float)[:self.tidx+1]
self.numD_I = numpy.array(self.numD_I, dtype=float)[:self.tidx+1]
self.numR = numpy.array(self.numR, dtype=float)[:self.tidx+1]
self.numF = numpy.array(self.numF, dtype=float)[:self.tidx+1]
self.N = numpy.array(self.N, dtype=float)[:self.tidx+1]
if(self.store_Xseries):
self.Xseries = self.Xseries[:self.tidx+1, :]
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.array(self.nodeGroupData[groupName]['numS'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numE'] = numpy.array(self.nodeGroupData[groupName]['numE'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI'] = numpy.array(self.nodeGroupData[groupName]['numI'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_E'] = numpy.array(self.nodeGroupData[groupName]['numD_E'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_I'] = numpy.array(self.nodeGroupData[groupName]['numD_I'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numR'] = numpy.array(self.nodeGroupData[groupName]['numR'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numF'] = numpy.array(self.nodeGroupData[groupName]['numF'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['N'] = numpy.array(self.nodeGroupData[groupName]['N'], dtype=float)[:self.tidx+1]
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_iteration(self):
if(self.tidx >= len(self.tseries)-1):
# Room has run out in the timeseries storage arrays; double the size of these arrays:
self.increase_data_series_length()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. Generate 2 random numbers uniformly distributed in (0,1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
r1 = numpy.random.rand()
r2 = numpy.random.rand()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2. Calculate propensities
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities, transitionTypes = self.calc_propensities()
# Terminate when probability of all events is 0:
if(propensities.sum() <= 0.0):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 3. Calculate alpha
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_flat = propensities.ravel(order='F')
cumsum = propensities_flat.cumsum()
alpha = propensities_flat.sum()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 4. Compute the time until the next event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tau = (1/alpha)*numpy.log(float(1/r1))
self.t += tau
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 5. Compute which event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
transitionIdx = numpy.searchsorted(cumsum,r2*alpha)
transitionNode = transitionIdx % self.numNodes
transitionType = transitionTypes[ int(transitionIdx/self.numNodes) ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 6. Update node states and data series
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
assert(self.X[transitionNode] == self.transitions[transitionType]['currentState'] and self.X[transitionNode]!=self.F), "Assertion error: Node "+str(transitionNode)+" has unexpected current state "+str(self.X[transitionNode])+" given the intended transition of "+str(transitionType)+"."
self.X[transitionNode] = self.transitions[transitionType]['newState']
self.tidx += 1
self.tseries[self.tidx] = self.t
self.numS[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.S), a_min=0, a_max=self.numNodes)
self.numE[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.E), a_min=0, a_max=self.numNodes)
self.numI[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.I), a_min=0, a_max=self.numNodes)
self.numD_E[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_E), a_min=0, a_max=self.numNodes)
self.numD_I[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_I), a_min=0, a_max=self.numNodes)
self.numR[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.R), a_min=0, a_max=self.numNodes)
self.numF[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.F), a_min=0, a_max=self.numNodes)
self.N[self.tidx] = numpy.clip((self.numS[self.tidx] + self.numE[self.tidx] + self.numI[self.tidx] + self.numD_E[self.tidx] + self.numD_I[self.tidx] + self.numR[self.tidx]), a_min=0, a_max=self.numNodes)
if(self.store_Xseries):
self.Xseries[self.tidx,:] = self.X.T
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I)
self.nodeGroupData[groupName]['numD_E'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_I'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I)
self.nodeGroupData[groupName]['numR'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['N'][self.tidx] = numpy.clip((self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]), a_min=0, a_max=self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Terminate if tmax reached or num infectious and num exposed is 0:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(self.t >= self.tmax or (self.numI[self.tidx]<1 and self.numE[self.tidx]<1 and self.numD_E[self.tidx]<1 and self.numD_I[self.tidx]<1)):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, checkpoints=None, print_interval=10, verbose='t'):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
for chkpt_param, chkpt_values in checkpoints.items():
assert(isinstance(chkpt_values, (list, numpy.ndarray)) and len(chkpt_values)==numCheckpoints), "Expecting a list of values with length equal to number of checkpoint times ("+str(numCheckpoints)+") for each checkpoint parameter."
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
print_reset = True
running = True
while running:
running = self.run_iteration()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Handle checkpoints if applicable:
if(checkpoints):
if(self.t >= checkpointTime):
if(verbose is not False):
print("[Checkpoint: Updating parameters]")
# A checkpoint has been reached, update param values:
if('G' in list(checkpoints.keys())):
self.update_G(checkpoints['G'][checkpointIdx])
if('Q' in list(checkpoints.keys())):
self.update_Q(checkpoints['Q'][checkpointIdx])
for param in list(self.parameters.keys()):
if(param in list(checkpoints.keys())):
self.parameters.update({param: checkpoints[param][checkpointIdx]})
# Update parameter data structures and scenario flags:
self.update_parameters()
# Update the next checkpoint time:
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(print_interval):
if(print_reset and (int(self.t) % print_interval == 0)):
if(verbose=="t"):
print("t = %.2f" % self.t)
if(verbose==True):
print("t = %.2f" % self.t)
print("\t S = " + str(self.numS[self.tidx]))
print("\t E = " + str(self.numE[self.tidx]))
print("\t I = " + str(self.numI[self.tidx]))
print("\t D_E = " + str(self.numD_E[self.tidx]))
print("\t D_I = " + str(self.numD_I[self.tidx]))
print("\t R = " + str(self.numR[self.tidx]))
print("\t F = " + str(self.numF[self.tidx]))
print_reset = False
elif(not print_reset and (int(self.t) % 10 != 0)):
print_reset = True
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.numNodes if plot_percentages else self.numF
Eseries = self.numE/self.numNodes if plot_percentages else self.numE
Dseries = (self.numD_E+self.numD_I)/self.numNodes if plot_percentages else (self.numD_E+self.numD_I)
D_Eseries = self.numD_E/self.numNodes if plot_percentages else self.numD_E
D_Iseries = self.numD_I/self.numNodes if plot_percentages else self.numD_I
Iseries = self.numI/self.numNodes if plot_percentages else self.numI
Rseries = self.numR/self.numNodes if plot_percentages else self.numR
Sseries = self.numS/self.numNodes if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.numNodes/100)]
dashedReference_IDEstack = (dashed_reference_results.numI + dashed_reference_results.numD_I + dashed_reference_results.numD_E + dashed_reference_results.numE)[::int(self.numNodes/100)] / (self.numNodes if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_IDEstack, color='#E0E0E0', linestyle='--', label='$I+D+E$ ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (shaded_reference_results.numI + shaded_reference_results.numD_I + shaded_reference_results.numD_E + shaded_reference_results.numE) / (self.numNodes if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_IDEstack, 0, color='#EFEFEF', label='$I+D+E$ ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_IDEstack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(combine_D and plot_D_E=='stacked' and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_E, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_Iseries) and plot_D_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), topstack, color=color_D_I, alpha=0.5, label='$D_I$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, topstack+D_Iseries), color=color_D_I, zorder=3)
topstack = topstack+D_Iseries
if(any(Iseries) and plot_I=='stacked'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), topstack, color=color_I, alpha=0.5, label='$I$', zorder=2)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, topstack+Iseries), color=color_I, zorder=3)
topstack = topstack+Iseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), color=color_S, zorder=3)
topstack = topstack+Sseries
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='shaded'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), 0, color=color_F, alpha=0.5, label='$F$', zorder=4)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, zorder=5)
if(any(Eseries) and plot_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), 0, color=color_E, alpha=0.5, label='$E$', zorder=4)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, zorder=5)
if(combine_D and (any(Dseries) and plot_D_E=='shaded' and plot_D_I=='shaded')):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), 0, color=color_D_E, alpha=0.5, label='$D_{all}$', zorder=4)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, zorder=5)
else:
if(any(D_Eseries) and plot_D_E=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), 0, color=color_D_E, alpha=0.5, label='$D_E$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, zorder=5)
if(any(D_Iseries) and plot_D_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), 0, color=color_D_I, alpha=0.5, label='$D_I$', zorder=4)
ax.plot( numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, zorder=5)
if(any(Iseries) and plot_I=='shaded'):
ax.fill_between(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), 0, color=color_I, alpha=0.5, label='$I$', zorder=4)
ax.plot( numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, zorder=5)
if(any(Sseries) and plot_S=='shaded'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), 0, color=color_S, alpha=0.5, label='$S$', zorder=4)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, zorder=5)
if(any(Rseries) and plot_R=='shaded'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), 0, color=color_R, alpha=0.5, label='$R$', zorder=4)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, zorder=5)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(any(Fseries) and plot_F=='line'):
ax.plot(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, Fseries), color=color_F, label='$F$', zorder=6)
if(any(Eseries) and plot_E=='line'):
ax.plot(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, Eseries), color=color_E, label='$E$', zorder=6)
if(combine_D and (any(Dseries) and plot_D_E=='line' and plot_D_I=='line')):
ax.plot(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, Dseries), color=color_D_E, label='$D_{all}$', zorder=6)
else:
if(any(D_Eseries) and plot_D_E=='line'):
ax.plot(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, D_Eseries), color=color_D_E, label='$D_E$', zorder=6)
if(any(D_Iseries) and plot_D_I=='line'):
ax.plot(numpy.ma.masked_where(D_Iseries<=0, self.tseries), numpy.ma.masked_where(D_Iseries<=0, D_Iseries), color=color_D_I, label='$D_I$', zorder=6)
if(any(Iseries) and plot_I=='line'):
ax.plot(numpy.ma.masked_where(Iseries<=0, self.tseries), numpy.ma.masked_where(Iseries<=0, Iseries), color=color_I, label='$I$', zorder=6)
if(any(Sseries) and plot_S=='line'):
ax.plot(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, Sseries), color=color_S, label='$S$', zorder=6)
if(any(Rseries) and plot_R=='line'):
ax.plot(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, Rseries), color=color_R, label='$R$', zorder=6)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(len(vlines)>0 and len(vline_colors)==0):
vline_colors = ['gray']*len(vlines)
if(len(vlines)>0 and len(vline_labels)==0):
vline_labels = [None]*len(vlines)
if(len(vlines)>0 and len(vline_styles)==0):
vline_styles = [':']*len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(vlines, vline_colors, vline_styles, vline_labels):
if(vline_x is not None):
ax.axvline(x=vline_x, color=vline_color, linestyle=vline_style, alpha=1, label=vline_label)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel('days')
ax.set_ylabel('percent of population' if plot_percentages else 'number of individuals')
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if(plot_percentages):
ax.set_yticklabels(['{:,.0%}'.format(y) for y in ax.get_yticks()])
if(legend):
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(legend_handles[::-1], legend_labels[::-1], loc='upper right', facecolor='white', edgecolor='none', framealpha=0.9, prop={'size': 8})
if(title):
ax.set_title(title, size=12)
if(side_title):
ax.annotate(side_title, (0, 0.5), xytext=(-45, 0), ha='right', va='center',
size=12, rotation=90, xycoords='axes fraction', textcoords='offset points')
return ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_basic(self, plot_S='line', plot_E='line', plot_I='line',plot_R='line', plot_F='line',
plot_D_E='line', plot_D_I='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def figure_infections(self, plot_S=False, plot_E='stacked', plot_I='stacked',plot_R=False, plot_F=False,
plot_D_E='stacked', plot_D_I='stacked', combine_D=True,
color_S='tab:green', color_E='orange', color_I='crimson', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_I='mediumorchid', color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True,
figsize=(12,8), use_seaborn=True, show=True):
import matplotlib.pyplot as pyplot
fig, ax = pyplot.subplots(figsize=figsize)
if(use_seaborn):
import seaborn
seaborn.set_style('ticks')
seaborn.despine()
self.plot(ax=ax, plot_S=plot_S, plot_E=plot_E, plot_I=plot_I,plot_R=plot_R, plot_F=plot_F,
plot_D_E=plot_D_E, plot_D_I=plot_D_I, combine_D=combine_D,
color_S=color_S, color_E=color_E, color_I=color_I, color_R=color_R, color_F=color_F,
color_D_E=color_D_E, color_D_I=color_D_I, color_reference=color_reference,
dashed_reference_results=dashed_reference_results, dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results, shaded_reference_label=shaded_reference_label,
vlines=vlines, vline_colors=vline_colors, vline_styles=vline_styles, vline_labels=vline_labels,
ylim=ylim, xlim=xlim, legend=legend, title=title, side_title=side_title, plot_percentages=plot_percentages)
if(show):
pyplot.show()
return fig, ax
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
class SymptomaticSEIRSNetworkModel():
"""
A class to simulate the SEIRS Stochastic Network Model
with Symptom Presentation Compartments
===================================================
Params:
G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (global interactions)
beta_local Rate(s) of transmission between adjacent individuals (optional)
beta_A Rate of transmission (global interactions)
beta_A_local Rate(s) of transmission between adjacent individuals (optional)
sigma Rate of progression to infectious state (inverse of latent period)
lamda Rate of progression to infectious (a)symptomatic state (inverse of prodromal period)
eta Rate of progression to hospitalized state (inverse of onset-to-admission period)
gamma Rate of recovery for non-hospitalized symptomatic individuals (inverse of symptomatic infectious period)
gamma_A Rate of recovery for asymptomatic individuals (inverse of asymptomatic infectious period)
gamma_H Rate of recovery for hospitalized symptomatic individuals (inverse of hospitalized infectious period)
mu_H Rate of death for hospitalized individuals (inverse of admission-to-death period)
xi Rate of re-susceptibility (upon recovery)
mu_0 Rate of baseline death
nu Rate of baseline birth
a Probability of an infected individual remaining asymptomatic
h Probability of a symptomatic individual being hospitalized
f Probability of death for hospitalized individuals (case fatality rate)
p Probability of individuals interacting with global population
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission for individuals with detected infections (global interactions)
beta_D_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of progression to infectious state for individuals with detected infections
lamda_D Rate of progression to infectious (a)symptomatic state for individuals with detected infections
eta_D Rate of progression to hospitalized state for individuals with detected infections
gamma_D_S Rate of recovery for non-hospitalized symptomatic individuals for individuals with detected infections
gamma_D_A Rate of recovery for asymptomatic individuals for individuals with detected infections
theta_E Rate of random testing for exposed individuals
theta_pre Rate of random testing for infectious pre-symptomatic individuals
theta_S Rate of random testing for infectious symptomatic individuals
theta_A Rate of random testing for infectious asymptomatic individuals
phi_E Rate of testing when a close contact has tested positive for exposed individuals
phi_pre Rate of testing when a close contact has tested positive for infectious pre-symptomatic individuals
phi_S Rate of testing when a close contact has tested positive for infectious symptomatic individuals
phi_A Rate of testing when a close contact has tested positive for infectious asymptomatic individuals
d_E Probability of positive test for exposed individuals
d_pre Probability of positive test for infectious pre-symptomatic individuals
d_S Probability of positive test for infectious symptomatic individuals
d_A Probability of positive test for infectious asymptomatic individuals
q Probability of individuals with detected infection interacting with global population
initE Initial number of exposed individuals
initI_pre Initial number of infectious pre-symptomatic individuals
initI_S Initial number of infectious symptomatic individuals
initI_A Initial number of infectious asymptomatic individuals
initH Initial number of hospitalized individuals
initR Initial number of recovered individuals
initF Initial number of infection-related fatalities
initD_E Initial number of detected exposed individuals
initD_pre Initial number of detected infectious pre-symptomatic individuals
initD_S Initial number of detected infectious symptomatic individuals
initD_A Initial number of detected infectious asymptomatic individuals
(all remaining nodes initialized susceptible)
"""
def __init__(self, G, beta, sigma, lamda, gamma,
eta=0, gamma_A=None, gamma_H=None, mu_H=0, xi=0, mu_0=0, nu=0, a=0, h=0, f=0, p=0,
beta_local=None, beta_A=None, beta_A_local=None,
Q=None, lamda_D=None, beta_D=None, beta_D_local=None, sigma_D=None, eta_D=None, gamma_D_S=None, gamma_D_A=None,
theta_E=0, theta_pre=0, theta_S=0, theta_A=0, phi_E=0, phi_pre=0, phi_S=0, phi_A=0,
d_E=1, d_pre=1, d_S=1, d_A=1, q=0,
initE=0, initI_pre=0, initI_S=0, initI_A=0, initH=0, initR=0, initF=0,
initD_E=0, initD_pre=0, initD_S=0, initD_A=0,
node_groups=None, store_Xseries=False):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if(Q is None):
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = { 'beta':beta, 'sigma':sigma, 'lamda':lamda, 'gamma':gamma,
'eta':eta, 'gamma_A':gamma_A, 'gamma_H':gamma_H, 'mu_H':mu_H,
'xi':xi, 'mu_0':mu_0, 'nu':nu, 'a':a, 'h':h, 'f':f, 'p':p,
'beta_local':beta_local, 'beta_A':beta_A, 'beta_A_local':beta_A_local,
'lamda_D':lamda_D, 'beta_D':beta_D, 'beta_D_local':beta_D_local, 'sigma_D':sigma_D,
'eta_D':eta_D, 'gamma_D_S':gamma_D_S, 'gamma_D_A':gamma_D_A,
'theta_E':theta_E, 'theta_pre':theta_pre, 'theta_S':theta_S, 'theta_A':theta_A,
'phi_E':phi_E, 'phi_pre':phi_pre, 'phi_S':phi_S, 'phi_A':phi_A,
'd_E':d_E, 'd_pre':d_pre, 'd_S':d_S, 'd_A':d_A, 'q':q,
'initE':initE, 'initI_pre':initI_pre, 'initI_S':initI_S, 'initI_A':initI_A,
'initH':initH, 'initR':initR, 'initF':initF,
'initD_E':initD_E, 'initD_pre':initD_pre, 'initD_S':initD_S, 'initD_A':initD_A }
self.update_parameters()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo 4-6 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*6 events/timesteps expected; initialize numNodes*6 timestep slots to start
# (will be expanded during run if needed for some reason)
self.tseries = numpy.zeros(5*self.numNodes)
self.numS = numpy.zeros(5*self.numNodes)
self.numE = numpy.zeros(5*self.numNodes)
self.numI_pre = numpy.zeros(5*self.numNodes)
self.numI_S = numpy.zeros(5*self.numNodes)
self.numI_A = numpy.zeros(5*self.numNodes)
self.numH = numpy.zeros(5*self.numNodes)
self.numR = numpy.zeros(5*self.numNodes)
self.numF = numpy.zeros(5*self.numNodes)
self.numD_E = numpy.zeros(5*self.numNodes)
self.numD_pre = numpy.zeros(5*self.numNodes)
self.numD_S = numpy.zeros(5*self.numNodes)
self.numD_A = numpy.zeros(5*self.numNodes)
self.N = numpy.zeros(5*self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI_pre[0] = int(initI_pre)
self.numI_S[0] = int(initI_S)
self.numI_A[0] = int(initI_A)
self.numH[0] = int(initH)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numD_E[0] = int(initD_E)
self.numD_pre[0] = int(initD_pre)
self.numD_S[0] = int(initD_S)
self.numD_A[0] = int(initD_A)
self.numS[0] = (self.numNodes - self.numE[0] - self.numI_pre[0] - self.numI_S[0] - self.numI_A[0] - self.numH[0] - self.numR[0]
- self.numD_E[0] - self.numD_pre[0] - self.numD_S[0] - self.numD_A[0] - self.numF[0])
self.N[0] = self.numNodes - self.numF[0]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I_pre = 3
self.I_S = 4
self.I_A = 5
self.H = 6
self.R = 7
self.F = 8
self.D_E = 9
self.D_pre = 10
self.D_S = 11
self.D_A = 12
self.X = numpy.array( [self.S]*int(self.numS[0]) + [self.E]*int(self.numE[0])
+ [self.I_pre]*int(self.numI_pre[0]) + [self.I_S]*int(self.numI_S[0]) + [self.I_A]*int(self.numI_A[0])
+ [self.H]*int(self.numH[0]) + [self.R]*int(self.numR[0]) + [self.F]*int(self.numF[0])
+ [self.D_E]*int(self.numD_E[0]) + [self.D_pre]*int(self.numD_pre[0]) + [self.D_S]*int(self.numD_S[0]) + [self.D_A]*int(self.numD_A[0])
).reshape((self.numNodes,1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if(store_Xseries):
self.Xseries = numpy.zeros(shape=(5*self.numNodes, self.numNodes), dtype='uint8')
self.Xseries[0,:] = self.X.T
self.transitions = {
'StoE': {'currentState':self.S, 'newState':self.E},
'EtoIPRE': {'currentState':self.E, 'newState':self.I_pre},
'EtoDE': {'currentState':self.E, 'newState':self.D_E},
'IPREtoIS': {'currentState':self.I_pre, 'newState':self.I_S},
'IPREtoIA': {'currentState':self.I_pre, 'newState':self.I_A},
'IPREtoDPRE': {'currentState':self.I_pre, 'newState':self.D_pre},
'IStoH': {'currentState':self.I_S, 'newState':self.H},
'IStoR': {'currentState':self.I_S, 'newState':self.R},
'IStoDS': {'currentState':self.I_S, 'newState':self.D_S},
'IAtoR': {'currentState':self.I_A, 'newState':self.R},
'IAtoDA': {'currentState':self.I_A, 'newState':self.D_A},
'HtoR': {'currentState':self.H, 'newState':self.R},
'HtoF': {'currentState':self.H, 'newState':self.F},
'RtoS': {'currentState':self.R, 'newState':self.S},
'DEtoDPRE': {'currentState':self.D_E, 'newState':self.D_pre},
'DPREtoDS': {'currentState':self.D_pre, 'newState':self.D_S},
'DPREtoDA': {'currentState':self.D_pre, 'newState':self.D_A},
'DStoH': {'currentState':self.D_S, 'newState':self.H},
'DStoR': {'currentState':self.D_S, 'newState':self.R},
'DAtoR': {'currentState':self.D_A, 'newState':self.R},
'_toS': {'currentState':True, 'newState':self.S},
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if(node_groups):
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {'nodes': numpy.array(nodeList),
'mask': numpy.isin(range(self.numNodes), nodeList).reshape((self.numNodes,1))}
self.nodeGroupData[groupName]['numS'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numE'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_pre'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_S'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numI_A'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numH'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numR'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numF'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_E'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_pre'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_S'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numD_A'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['N'] = numpy.zeros(5*self.numNodes)
self.nodeGroupData[groupName]['numS'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI_pre'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_pre)
self.nodeGroupData[groupName]['numI_S'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_S)
self.nodeGroupData[groupName]['numI_A'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_A)
self.nodeGroupData[groupName]['numH'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.H)
self.nodeGroupData[groupName]['numR'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['numD_E'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_pre'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_pre)
self.nodeGroupData[groupName]['numD_I_S'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_S)
self.nodeGroupData[groupName]['numD_I_A'][0] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_I_A)
self.nodeGroupData[groupName]['N'][0] = self.numNodes - self.numF[0]
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_parameters(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = numpy.array(self.parameters['beta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta'], shape=(self.numNodes,1))
self.beta_A = (numpy.array(self.parameters['beta_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_A'], shape=(self.numNodes,1))) if self.parameters['beta_A'] is not None else self.beta
self.sigma = numpy.array(self.parameters['sigma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma'], shape=(self.numNodes,1))
self.lamda = numpy.array(self.parameters['lamda']).reshape((self.numNodes, 1)) if isinstance(self.parameters['lamda'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['lamda'], shape=(self.numNodes,1))
self.gamma = numpy.array(self.parameters['gamma']).reshape((self.numNodes, 1)) if isinstance(self.parameters['gamma'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma'], shape=(self.numNodes,1))
self.eta = numpy.array(self.parameters['eta']).reshape((self.numNodes, 1)) if isinstance(self.parameters['eta'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['eta'], shape=(self.numNodes,1))
self.gamma_A = (numpy.array(self.parameters['gamma_A']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_A'], shape=(self.numNodes,1))) if self.parameters['gamma_A'] is not None else self.gamma
self.gamma_H = (numpy.array(self.parameters['gamma_H']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_H'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_H'], shape=(self.numNodes,1))) if self.parameters['gamma_H'] is not None else self.gamma
self.mu_H = numpy.array(self.parameters['mu_H']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_H'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_H'], shape=(self.numNodes,1))
self.xi = numpy.array(self.parameters['xi']).reshape((self.numNodes, 1)) if isinstance(self.parameters['xi'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['xi'], shape=(self.numNodes,1))
self.mu_0 = numpy.array(self.parameters['mu_0']).reshape((self.numNodes, 1)) if isinstance(self.parameters['mu_0'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['mu_0'], shape=(self.numNodes,1))
self.nu = numpy.array(self.parameters['nu']).reshape((self.numNodes, 1)) if isinstance(self.parameters['nu'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['nu'], shape=(self.numNodes,1))
self.a = numpy.array(self.parameters['a']).reshape((self.numNodes, 1)) if isinstance(self.parameters['a'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['a'], shape=(self.numNodes,1))
self.h = numpy.array(self.parameters['h']).reshape((self.numNodes, 1)) if isinstance(self.parameters['h'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['h'], shape=(self.numNodes,1))
self.f = numpy.array(self.parameters['f']).reshape((self.numNodes, 1)) if isinstance(self.parameters['f'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['f'], shape=(self.numNodes,1))
self.p = numpy.array(self.parameters['p']).reshape((self.numNodes, 1)) if isinstance(self.parameters['p'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['p'], shape=(self.numNodes,1))
# Testing-related parameters:
self.beta_D = (numpy.array(self.parameters['beta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['beta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['beta_D'], shape=(self.numNodes,1))) if self.parameters['beta_D'] is not None else self.beta
self.sigma_D = (numpy.array(self.parameters['sigma_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['sigma_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['sigma_D'], shape=(self.numNodes,1))) if self.parameters['sigma_D'] is not None else self.sigma
self.lamda_D = (numpy.array(self.parameters['lamda_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['lamda_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['lamda_D'], shape=(self.numNodes,1))) if self.parameters['lamda_D'] is not None else self.lamda
self.gamma_D_S = (numpy.array(self.parameters['gamma_D_S']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_D_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D_S'], shape=(self.numNodes,1))) if self.parameters['gamma_D_S'] is not None else self.gamma
self.gamma_D_A = (numpy.array(self.parameters['gamma_D_A']).reshape((self.numNodes, 1))if isinstance(self.parameters['gamma_D_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['gamma_D_A'], shape=(self.numNodes,1))) if self.parameters['gamma_D_A'] is not None else self.gamma
self.eta_D = (numpy.array(self.parameters['eta_D']).reshape((self.numNodes, 1)) if isinstance(self.parameters['eta_D'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['eta_D'], shape=(self.numNodes,1))) if self.parameters['eta_D'] is not None else self.eta
self.theta_E = numpy.array(self.parameters['theta_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_E'], shape=(self.numNodes,1))
self.theta_pre = numpy.array(self.parameters['theta_pre']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_pre'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_pre'], shape=(self.numNodes,1))
self.theta_S = numpy.array(self.parameters['theta_S']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_S'], shape=(self.numNodes,1))
self.theta_A = numpy.array(self.parameters['theta_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['theta_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['theta_A'], shape=(self.numNodes,1))
self.phi_E = numpy.array(self.parameters['phi_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_E'], shape=(self.numNodes,1))
self.phi_pre = numpy.array(self.parameters['phi_pre']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_pre'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_pre'], shape=(self.numNodes,1))
self.phi_S = numpy.array(self.parameters['phi_S']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_S'], shape=(self.numNodes,1))
self.phi_A = numpy.array(self.parameters['phi_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['phi_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['phi_A'], shape=(self.numNodes,1))
self.d_E = numpy.array(self.parameters['d_E']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_E'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_E'], shape=(self.numNodes,1))
self.d_pre = numpy.array(self.parameters['d_pre']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_pre'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_pre'], shape=(self.numNodes,1))
self.d_S = numpy.array(self.parameters['d_S']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_S'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_S'], shape=(self.numNodes,1))
self.d_A = numpy.array(self.parameters['d_A']).reshape((self.numNodes, 1)) if isinstance(self.parameters['d_A'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['d_A'], shape=(self.numNodes,1))
self.q = numpy.array(self.parameters['q']).reshape((self.numNodes, 1)) if isinstance(self.parameters['q'], (list, numpy.ndarray)) else numpy.full(fill_value=self.parameters['q'], shape=(self.numNodes,1))
#Local transmission parameters:
if(self.parameters['beta_local'] is not None):
if(isinstance(self.parameters['beta_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_local'], list)):
self.beta_local = numpy.array(self.parameters['beta_local'])
else: # is numpy.ndarray
self.beta_local = self.parameters['beta_local']
if(self.beta_local.ndim == 1):
self.beta_local.reshape((self.numNodes, 1))
elif(self.beta_local.ndim == 2):
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(self.beta, fill_value=self.parameters['beta_local'])
else:
self.beta_local = self.beta
#----------------------------------------
if(self.parameters['beta_A_local'] is not None):
if(isinstance(self.parameters['beta_A_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_A_local'], list)):
self.beta_A_local = numpy.array(self.parameters['beta_A_local'])
else: # is numpy.ndarray
self.beta_A_local = self.parameters['beta_A_local']
if(self.beta_A_local.ndim == 1):
self.beta_A_local.reshape((self.numNodes, 1))
elif(self.beta_A_local.ndim == 2):
self.beta_A_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_A_local = numpy.full_like(self.beta_A, fill_value=self.parameters['beta_A_local'])
else:
self.beta_A_local = self.beta_A
#----------------------------------------
if(self.parameters['beta_D_local'] is not None):
if(isinstance(self.parameters['beta_D_local'], (list, numpy.ndarray))):
if(isinstance(self.parameters['beta_D_local'], list)):
self.beta_D_local = numpy.array(self.parameters['beta_D_local'])
else: # is numpy.ndarray
self.beta_D_local = self.parameters['beta_D_local']
if(self.beta_D_local.ndim == 1):
self.beta_D_local.reshape((self.numNodes, 1))
elif(self.beta_D_local.ndim == 2):
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(self.beta_D, fill_value=self.parameters['beta_D_local'])
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if(self.beta_local.ndim == 1):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_local, (1,self.numNodes))).tocsr()
elif(self.beta_local.ndim == 2):
self.A_beta = scipy.sparse.csr_matrix.multiply(self.A, self.beta_local).tocsr()
# Pre-multiply beta_A values by the adjacency matrix ("transmission weight connections")
if(self.beta_A_local.ndim == 1):
self.A_beta_A = scipy.sparse.csr_matrix.multiply(self.A, numpy.tile(self.beta_A_local, (1,self.numNodes))).tocsr()
elif(self.beta_A_local.ndim == 2):
self.A_beta_A = scipy.sparse.csr_matrix.multiply(self.A, self.beta_A_local).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if(self.beta_D_local.ndim == 1):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, numpy.tile(self.beta_D_local, (1,self.numNodes))).tocsr()
elif(self.beta_D_local.ndim == 2):
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(self.A_Q, self.beta_D_local).tocsr()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes,1) # sums of adj matrix cols
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G)==numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G)==networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(new_G) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q)==numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q)==networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(new_Q) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert(self.numNodes == self.numNodes_Q), "The normal and quarantine adjacency graphs must be of the same size."
return
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def update_scenario_flags(self):
self.testing_scenario = ( (numpy.any(self.d_E) and (numpy.any(self.theta_E) or numpy.any(self.phi_E)))
or (numpy.any(self.d_pre) and (numpy.any(self.theta_pre) or numpy.any(self.phi_pre)))
or (numpy.any(self.d_S) and (numpy.any(self.theta_S) or numpy.any(self.phi_S)))
or (numpy.any(self.d_A) and (numpy.any(self.theta_A) or numpy.any(self.phi_A))) )
self.tracing_scenario = ( (numpy.any(self.d_E) and numpy.any(self.phi_E))
or (numpy.any(self.d_pre) and numpy.any(self.phi_pre))
or (numpy.any(self.d_S) and numpy.any(self.phi_S))
or (numpy.any(self.d_A) and numpy.any(self.phi_A)) )
self.vitality_scenario = (numpy.any(self.mu_0) and numpy.any(self.nu))
self.resusceptibility_scenario = (numpy.any(self.xi))
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_infections(self, t_idx=None):
if(t_idx is None):
return (self.numE[:] + self.numI_pre[:] + self.numI_S[:] + self.numI_A[:]
+ self.numD_E[:] + self.numD_pre[:] + self.numD_S[:] + self.numD_A[:] + self.numH[:])
else:
return (self.numE[t_idx] + self.numI_pre[t_idx] + self.numI_S[t_idx] + self.numI_A[t_idx]
+ self.numD_E[t_idx] + self.numD_pre[t_idx] + self.numD_S[t_idx] + self.numD_A[t_idx] + self.numH[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def total_num_detected(self, t_idx=None):
if(t_idx is None):
return (self.numD_E[:] + self.numD_pre[:] + self.numD_S[:] + self.numD_A[:])
else:
return (self.numD_E[t_idx] + self.numD_pre[t_idx] + self.numD_S[t_idx] + self.numD_A[t_idx])
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def calc_propensities(self):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
transmissionTerms_I = numpy.zeros(shape=(self.numNodes,1))
if( (numpy.any(self.numI_S[self.tidx]) and self.A_beta.count_nonzero()>0)
or ((numpy.any(self.numI_pre[self.tidx]) or numpy.any(self.numI_A[self.tidx])) and self.A_beta_A.count_nonzero()>0) ):
transmissionTerms_I = numpy.asarray( scipy.sparse.csr_matrix.dot( self.A_beta, self.X==self.I_S )
+ scipy.sparse.csr_matrix.dot( self.A_beta_A, ((self.X==self.I_pre)|(self.X==self.I_A)) ) )
transmissionTerms_D = numpy.zeros(shape=(self.numNodes,1))
if(self.testing_scenario
and (numpy.any(self.numD_pre[self.tidx]) or numpy.any(self.numD_S[self.tidx]) or numpy.any(self.numD_A[self.tidx]) or numpy.any(self.numH[self.tidx]))
and self.A_Q_beta_D.count_nonzero()>0 ):
transmissionTerms_D = numpy.asarray( scipy.sparse.csr_matrix.dot( self.A_Q_beta_D, ((self.X==self.D_pre)|(self.X==self.D_S)|(self.X==self.D_A)|(self.X==self.H)) ) )
numContacts_D = numpy.zeros(shape=(self.numNodes,1))
if(self.tracing_scenario
and (numpy.any(self.numD_E[self.tidx]) or numpy.any(self.numD_pre[self.tidx]) or numpy.any(self.numD_S[self.tidx]) or numpy.any(self.numD_A[self.tidx]) or numpy.any(self.numH[self.tidx])) ):
numContacts_D = numpy.asarray( scipy.sparse.csr_matrix.dot( self.A, ((self.X==self.D_E)|(self.X==self.D_pre)|(self.X==self.D_S)|(self.X==self.D_A)|(self.X==self.H)) ) )
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = ( self.p*((self.beta*self.numI_S[self.tidx] + self.beta_A*(self.numI_pre[self.tidx] + self.numI_A[self.tidx]) + self.q*self.beta_D*(self.numD_pre[self.tidx] + self.numD_S[self.tidx] + self.numD_A[self.tidx]))/self.N[self.tidx])
+ (1-self.p)*numpy.divide((transmissionTerms_I + transmissionTerms_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0)
)*(self.X==self.S)
propensities_EtoIPRE = self.sigma*(self.X==self.E)
propensities_IPREtoIS = (1-self.a)*self.lamda*(self.X==self.I_pre)
propensities_IPREtoIA = self.a*self.lamda*(self.X==self.I_pre)
propensities_IStoR = (1-self.h)*self.gamma*(self.X==self.I_S)
propensities_IStoH = self.h*self.eta*(self.X==self.I_S)
propensities_IAtoR = self.gamma_A*(self.X==self.I_A)
propensities_HtoR = (1-self.f)*self.gamma_H*(self.X==self.H)
propensities_HtoF = self.f*self.mu_H*(self.X==self.H)
propensities_EtoDE = (self.theta_E + self.phi_E*numContacts_D)*self.d_E*(self.X==self.E)
propensities_IPREtoDPRE = (self.theta_pre + self.phi_pre*numContacts_D)*self.d_pre*(self.X==self.I_pre)
propensities_IStoDS = (self.theta_S + self.phi_S*numContacts_D)*self.d_S*(self.X==self.I_S)
propensities_IAtoDA = (self.theta_A + self.phi_A*numContacts_D)*self.d_A*(self.X==self.I_A)
propensities_DEtoDPRE = self.sigma_D*(self.X==self.D_E)
propensities_DPREtoDS = (1-self.a)*self.lamda_D*(self.X==self.D_pre)
propensities_DPREtoDA = self.a*self.lamda_D*(self.X==self.D_pre)
propensities_DStoR = (1-self.h)*self.gamma_D_S*(self.X==self.D_S)
propensities_DStoH = self.h*self.eta_D*(self.X==self.D_S)
propensities_DAtoR = self.gamma_D_A*(self.X==self.D_A)
propensities_RtoS = self.xi*(self.X==self.R)
propensities__toS = self.nu*(self.X!=self.F)
propensities = numpy.hstack([propensities_StoE, propensities_EtoIPRE, propensities_IPREtoIS, propensities_IPREtoIA,
propensities_IStoR, propensities_IStoH, propensities_IAtoR, propensities_HtoR, propensities_HtoF,
propensities_EtoDE, propensities_IPREtoDPRE, propensities_IStoDS, propensities_IAtoDA,
propensities_DEtoDPRE, propensities_DPREtoDS, propensities_DPREtoDA, propensities_DStoR, propensities_DStoH,
propensities_DAtoR, propensities_RtoS, propensities__toS])
columns = ['StoE', 'EtoIPRE', 'IPREtoIS', 'IPREtoIA', 'IStoR', 'IStoH', 'IAtoR', 'HtoR', 'HtoF',
'EtoDE', 'IPREtoDPRE', 'IStoDS', 'IAtoDA', 'DEtoDPRE', 'DPREtoDS', 'DPREtoDA', 'DStoR', 'DStoH', 'DAtoR',
'RtoS', '_toS']
return propensities, columns
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def increase_data_series_length(self):
self.tseries = numpy.pad(self.tseries, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numS = numpy.pad(self.numS, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numE = numpy.pad(self.numE, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI_pre = numpy.pad(self.numI_pre, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI_S = numpy.pad(self.numI_S, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numI_A = numpy.pad(self.numI_A, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numH = numpy.pad(self.numH, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numR = numpy.pad(self.numR, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numF = numpy.pad(self.numF, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_E = numpy.pad(self.numD_E, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_pre = numpy.pad(self.numD_pre, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_S = numpy.pad(self.numD_S, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.numD_A = numpy.pad(self.numD_A, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.N = numpy.pad(self.N, [(0, 5*self.numNodes)], mode='constant', constant_values=0)
if(self.store_Xseries):
self.Xseries = numpy.pad(self.Xseries, [(0, 5*self.numNodes), (0,0)], mode=constant, constant_values=0)
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.pad(self.nodeGroupData[groupName]['numS'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numE'] = numpy.pad(self.nodeGroupData[groupName]['numE'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI_pre'] = numpy.pad(self.nodeGroupData[groupName]['numI_pre'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI_S'] = numpy.pad(self.nodeGroupData[groupName]['numI_S'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numI_A'] = numpy.pad(self.nodeGroupData[groupName]['numI_A'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numH'] = numpy.pad(self.nodeGroupData[groupName]['numH'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numR'] = numpy.pad(self.nodeGroupData[groupName]['numR'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numF'] = numpy.pad(self.nodeGroupData[groupName]['numF'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_E'] = numpy.pad(self.nodeGroupData[groupName]['numD_E'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_pre'] = numpy.pad(self.nodeGroupData[groupName]['numD_pre'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_S'] = numpy.pad(self.nodeGroupData[groupName]['numD_S'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['numD_A'] = numpy.pad(self.nodeGroupData[groupName]['numD_A'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
self.nodeGroupData[groupName]['N'] = numpy.pad(self.nodeGroupData[groupName]['N'], [(0, 5*self.numNodes)], mode='constant', constant_values=0)
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def finalize_data_series(self):
self.tseries = numpy.array(self.tseries, dtype=float)[:self.tidx+1]
self.numS = numpy.array(self.numS, dtype=float)[:self.tidx+1]
self.numE = numpy.array(self.numE, dtype=float)[:self.tidx+1]
self.numI_pre = numpy.array(self.numI_pre, dtype=float)[:self.tidx+1]
self.numI_S = numpy.array(self.numI_S, dtype=float)[:self.tidx+1]
self.numI_A = numpy.array(self.numI_A, dtype=float)[:self.tidx+1]
self.numH = numpy.array(self.numH, dtype=float)[:self.tidx+1]
self.numR = numpy.array(self.numR, dtype=float)[:self.tidx+1]
self.numF = numpy.array(self.numF, dtype=float)[:self.tidx+1]
self.numD_E = numpy.array(self.numD_E, dtype=float)[:self.tidx+1]
self.numD_pre = numpy.array(self.numD_pre, dtype=float)[:self.tidx+1]
self.numD_S = numpy.array(self.numD_S, dtype=float)[:self.tidx+1]
self.numD_A = numpy.array(self.numD_A, dtype=float)[:self.tidx+1]
self.N = numpy.array(self.N, dtype=float)[:self.tidx+1]
if(self.store_Xseries):
self.Xseries = self.Xseries[:self.tidx+1, :]
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'] = numpy.array(self.nodeGroupData[groupName]['numS'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numE'] = numpy.array(self.nodeGroupData[groupName]['numE'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI_pre'] = numpy.array(self.nodeGroupData[groupName]['numI_pre'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI_S'] = numpy.array(self.nodeGroupData[groupName]['numI_S'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numI_A'] = numpy.array(self.nodeGroupData[groupName]['numI_A'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numR'] = numpy.array(self.nodeGroupData[groupName]['numR'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numF'] = numpy.array(self.nodeGroupData[groupName]['numF'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_E'] = numpy.array(self.nodeGroupData[groupName]['numD_E'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_pre'] = numpy.array(self.nodeGroupData[groupName]['numD_pre'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_S'] = numpy.array(self.nodeGroupData[groupName]['numD_S'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['numD_A'] = numpy.array(self.nodeGroupData[groupName]['numD_A'], dtype=float)[:self.tidx+1]
self.nodeGroupData[groupName]['N'] = numpy.array(self.nodeGroupData[groupName]['N'], dtype=float)[:self.tidx+1]
return None
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run_iteration(self):
if(self.tidx >= len(self.tseries)-1):
# Room has run out in the timeseries storage arrays; double the size of these arrays:
self.increase_data_series_length()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. Generate 2 random numbers uniformly distributed in (0,1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
r1 = numpy.random.rand()
r2 = numpy.random.rand()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2. Calculate propensities
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities, transitionTypes = self.calc_propensities()
# Terminate when probability of all events is 0:
if(propensities.sum() <= 0.0):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 3. Calculate alpha
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_flat = propensities.ravel(order='F')
cumsum = propensities_flat.cumsum()
alpha = propensities_flat.sum()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 4. Compute the time until the next event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tau = (1/alpha)*numpy.log(float(1/r1))
self.t += tau
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 5. Compute which event takes place
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
transitionIdx = numpy.searchsorted(cumsum,r2*alpha)
transitionNode = transitionIdx % self.numNodes
transitionType = transitionTypes[ int(transitionIdx/self.numNodes) ]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 6. Update node states and data series
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
assert(self.X[transitionNode] == self.transitions[transitionType]['currentState'] and self.X[transitionNode]!=self.F), "Assertion error: Node "+str(transitionNode)+" has unexpected current state "+str(self.X[transitionNode])+" given the intended transition of "+str(transitionType)+"."
self.X[transitionNode] = self.transitions[transitionType]['newState']
self.tidx += 1
self.tseries[self.tidx] = self.t
self.numS[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.S), a_min=0, a_max=self.numNodes)
self.numE[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.E), a_min=0, a_max=self.numNodes)
self.numI_pre[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.I_pre), a_min=0, a_max=self.numNodes)
self.numI_S[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.I_S), a_min=0, a_max=self.numNodes)
self.numI_A[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.I_A), a_min=0, a_max=self.numNodes)
self.numH[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.H), a_min=0, a_max=self.numNodes)
self.numR[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.R), a_min=0, a_max=self.numNodes)
self.numF[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.F), a_min=0, a_max=self.numNodes)
self.numD_E[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_E), a_min=0, a_max=self.numNodes)
self.numD_pre[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_pre), a_min=0, a_max=self.numNodes)
self.numD_S[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_S), a_min=0, a_max=self.numNodes)
self.numD_A[self.tidx] = numpy.clip(numpy.count_nonzero(self.X==self.D_A), a_min=0, a_max=self.numNodes)
self.N[self.tidx] = numpy.clip((self.numNodes - self.numF[self.tidx]), a_min=0, a_max=self.numNodes)
if(self.store_Xseries):
self.Xseries[self.tidx,:] = self.X.T
if(self.nodeGroupData):
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]['numS'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.S)
self.nodeGroupData[groupName]['numE'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.E)
self.nodeGroupData[groupName]['numI_pre'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_pre)
self.nodeGroupData[groupName]['numI_S'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_S)
self.nodeGroupData[groupName]['numI_A'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.I_A)
self.nodeGroupData[groupName]['numH'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.H)
self.nodeGroupData[groupName]['numR'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.R)
self.nodeGroupData[groupName]['numF'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.F)
self.nodeGroupData[groupName]['numD_E'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_E)
self.nodeGroupData[groupName]['numD_pre'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_pre)
self.nodeGroupData[groupName]['numD_S'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_S)
self.nodeGroupData[groupName]['numD_A'][self.tidx] = numpy.count_nonzero(self.nodeGroupData[groupName]['mask']*self.X==self.D_A)
self.nodeGroupData[groupName]['N'][self.tidx] = numpy.clip((self.nodeGroupData[groupName]['numS'][0] + self.nodeGroupData[groupName]['numE'][0] + self.nodeGroupData[groupName]['numI'][0] + self.nodeGroupData[groupName]['numD_E'][0] + self.nodeGroupData[groupName]['numD_I'][0] + self.nodeGroupData[groupName]['numR'][0]), a_min=0, a_max=self.numNodes)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Terminate if tmax reached or num infections is 0:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(self.t >= self.tmax or self.total_num_infections(self.tidx) < 1):
self.finalize_data_series()
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def run(self, T, checkpoints=None, print_interval=10, verbose='t'):
if(T>0):
self.tmax += T
else:
return False
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(checkpoints):
numCheckpoints = len(checkpoints['t'])
for chkpt_param, chkpt_values in checkpoints.items():
assert(isinstance(chkpt_values, (list, numpy.ndarray)) and len(chkpt_values)==numCheckpoints), "Expecting a list of values with length equal to number of checkpoint times ("+str(numCheckpoints)+") for each checkpoint parameter."
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
print_reset = True
running = True
while running:
running = self.run_iteration()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Handle checkpoints if applicable:
if(checkpoints):
if(self.t >= checkpointTime):
if(verbose is not False):
print("[Checkpoint: Updating parameters]")
# A checkpoint has been reached, update param values:
if('G' in list(checkpoints.keys())):
self.update_G(checkpoints['G'][checkpointIdx])
if('Q' in list(checkpoints.keys())):
self.update_Q(checkpoints['Q'][checkpointIdx])
for param in list(self.parameters.keys()):
if(param in list(checkpoints.keys())):
self.parameters.update({param: checkpoints[param][checkpointIdx]})
# Update parameter data structures and scenario flags:
self.update_parameters()
# Update the next checkpoint time:
checkpointIdx = numpy.searchsorted(checkpoints['t'], self.t) # Finds 1st index in list greater than given val
if(checkpointIdx >= numCheckpoints):
# We are out of checkpoints, stop checking them:
checkpoints = None
else:
checkpointTime = checkpoints['t'][checkpointIdx]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(print_interval):
if(print_reset and (int(self.t) % print_interval == 0)):
if(verbose=="t"):
print("t = %.2f" % self.t)
if(verbose==True):
print("t = %.2f" % self.t)
print("\t S = " + str(self.numS[self.tidx]))
print("\t E = " + str(self.numE[self.tidx]))
print("\t I_pre = " + str(self.numI_pre[self.tidx]))
print("\t I_S = " + str(self.numI_S[self.tidx]))
print("\t I_A = " + str(self.numI_A[self.tidx]))
print("\t H = " + str(self.numH[self.tidx]))
print("\t R = " + str(self.numR[self.tidx]))
print("\t F = " + str(self.numF[self.tidx]))
print("\t D_E = " + str(self.numD_E[self.tidx]))
print("\t D_pre = " + str(self.numD_pre[self.tidx]))
print("\t D_S = " + str(self.numD_S[self.tidx]))
print("\t D_A = " + str(self.numD_A[self.tidx]))
print_reset = False
elif(not print_reset and (int(self.t) % 10 != 0)):
print_reset = True
return True
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
def plot(self, ax=None, plot_S='line', plot_E='line', plot_I_pre='line', plot_I_S='line', plot_I_A='line',
plot_H='line', plot_R='line', plot_F='line',
plot_D_E='line', plot_D_pre='line', plot_D_S='line', plot_D_A='line', combine_D=True,
color_S='tab:green', color_E='orange', color_I_pre='tomato', color_I_S='crimson', color_I_A='crimson',
color_H='violet', color_R='tab:blue', color_F='black',
color_D_E='mediumorchid', color_D_pre='mediumorchid', color_D_S='mediumorchid', color_D_A='mediumorchid',
color_reference='#E0E0E0',
dashed_reference_results=None, dashed_reference_label='reference',
shaded_reference_results=None, shaded_reference_label='reference',
vlines=[], vline_colors=[], vline_styles=[], vline_labels=[],
ylim=None, xlim=None, legend=True, title=None, side_title=None, plot_percentages=True):
import matplotlib.pyplot as pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(not ax):
fig, ax = pyplot.subplots()
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF/self.numNodes if plot_percentages else self.numF
Dseries = self.total_num_detected()/self.numNodes if plot_percentages else self.total_num_detected()
D_Eseries = self.numD_E/self.numNodes if plot_percentages else self.numD_E
D_preseries = self.numD_pre/self.numNodes if plot_percentages else self.numD_pre
D_Aseries = self.numD_A/self.numNodes if plot_percentages else self.numD_A
D_Sseries = self.numD_S/self.numNodes if plot_percentages else self.numD_S
Hseries = self.numH/self.numNodes if plot_percentages else self.numH
Eseries = self.numE/self.numNodes if plot_percentages else self.numE
I_preseries = self.numI_pre/self.numNodes if plot_percentages else self.numI_pre
I_Sseries = self.numI_S/self.numNodes if plot_percentages else self.numI_S
I_Aseries = self.numI_A/self.numNodes if plot_percentages else self.numI_A
Rseries = self.numR/self.numNodes if plot_percentages else self.numR
Sseries = self.numS/self.numNodes if plot_percentages else self.numS
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if(dashed_reference_results):
dashedReference_tseries = dashed_reference_results.tseries[::int(self.numNodes/100)]
dashedReference_infectedStack = dashed_reference_results.total_num_infections()[::int(self.numNodes/100)] / (self.numNodes if plot_percentages else 1)
ax.plot(dashedReference_tseries, dashedReference_infectedStack, color='#E0E0E0', linestyle='--', label='Total infections ('+dashed_reference_label+')', zorder=0)
if(shaded_reference_results):
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_infectedStack = shaded_reference_results.total_num_infections() / (self.numNodes if plot_percentages else 1)
ax.fill_between(shaded_reference_results.tseries, shadedReference_infectedStack, 0, color='#EFEFEF', label='Total infections ('+shaded_reference_label+')', zorder=0)
ax.plot(shaded_reference_results.tseries, shadedReference_infectedStack, color='#E0E0E0', zorder=1)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if(any(Fseries) and plot_F=='stacked'):
ax.fill_between(numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), topstack, color=color_F, alpha=0.5, label='$F$', zorder=2)
ax.plot( numpy.ma.masked_where(Fseries<=0, self.tseries), numpy.ma.masked_where(Fseries<=0, topstack+Fseries), color=color_F, zorder=3)
topstack = topstack+Fseries
if(any(Hseries) and plot_H=='stacked'):
ax.fill_between(numpy.ma.masked_where(Hseries<=0, self.tseries), numpy.ma.masked_where(Hseries<=0, topstack+Hseries), topstack, color=color_H, alpha=0.5, label='$H$', zorder=2)
ax.plot( numpy.ma.masked_where(Hseries<=0, self.tseries), numpy.ma.masked_where(Hseries<=0, topstack+Hseries), color=color_H, zorder=3)
topstack = topstack+Hseries
if(combine_D and any(Dseries) and plot_D_E=='stacked' and plot_D_pre=='stacked' and plot_D_S=='stacked' and plot_D_A=='stacked'):
ax.fill_between(numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), topstack, color=color_D_S, alpha=0.5, label='$D_{all}$', zorder=2)
ax.plot( numpy.ma.masked_where(Dseries<=0, self.tseries), numpy.ma.masked_where(Dseries<=0, topstack+Dseries), color=color_D_S, zorder=3)
topstack = topstack+Dseries
else:
if(any(D_Eseries) and plot_D_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), topstack, color=color_D_E, alpha=0.5, label='$D_E$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Eseries<=0, self.tseries), numpy.ma.masked_where(D_Eseries<=0, topstack+D_Eseries), color=color_D_E, zorder=3)
topstack = topstack+D_Eseries
if(any(D_preseries) and plot_D_pre=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_preseries<=0, self.tseries), numpy.ma.masked_where(D_preseries<=0, topstack+D_preseries), topstack, color=color_D_pre, alpha=0.5, label='$D_{pre}$', zorder=2)
ax.plot( numpy.ma.masked_where(D_preseries<=0, self.tseries), numpy.ma.masked_where(D_preseries<=0, topstack+D_preseries), color=color_D_pre, zorder=3)
topstack = topstack+D_preseries
if(any(D_Sseries) and plot_D_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Sseries<=0, self.tseries), numpy.ma.masked_where(D_Sseries<=0, topstack+D_Sseries), topstack, color=color_D_S, alpha=0.5, label='$D_S$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Sseries<=0, self.tseries), numpy.ma.masked_where(D_Sseries<=0, topstack+D_Sseries), color=color_D_S, zorder=3)
topstack = topstack+D_Sseries
if(any(D_Aseries) and plot_D_A=='stacked'):
ax.fill_between(numpy.ma.masked_where(D_Aseries<=0, self.tseries), numpy.ma.masked_where(D_Aseries<=0, topstack+D_Aseries), topstack, color=color_D_A, alpha=0.5, label='$D_A$', zorder=2)
ax.plot( numpy.ma.masked_where(D_Aseries<=0, self.tseries), numpy.ma.masked_where(D_Aseries<=0, topstack+D_Aseries), color=color_D_A, zorder=3)
topstack = topstack+D_Aseries
if(any(Eseries) and plot_E=='stacked'):
ax.fill_between(numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), topstack, color=color_E, alpha=0.5, label='$E$', zorder=2)
ax.plot( numpy.ma.masked_where(Eseries<=0, self.tseries), numpy.ma.masked_where(Eseries<=0, topstack+Eseries), color=color_E, zorder=3)
topstack = topstack+Eseries
if(any(I_preseries) and plot_I_pre=='stacked'):
ax.fill_between(numpy.ma.masked_where(I_preseries<=0, self.tseries), numpy.ma.masked_where(I_preseries<=0, topstack+I_preseries), topstack, color=color_I_pre, alpha=0.5, label='$I_{pre}$', zorder=2)
ax.plot( numpy.ma.masked_where(I_preseries<=0, self.tseries), numpy.ma.masked_where(I_preseries<=0, topstack+I_preseries), color=color_I_pre, zorder=3)
topstack = topstack+I_preseries
if(any(I_Sseries) and plot_I_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(I_Sseries<=0, self.tseries), numpy.ma.masked_where(I_Sseries<=0, topstack+I_Sseries), topstack, color=color_I_S, alpha=0.5, label='$I_S$', zorder=2)
ax.plot( numpy.ma.masked_where(I_Sseries<=0, self.tseries), numpy.ma.masked_where(I_Sseries<=0, topstack+I_Sseries), color=color_I_S, zorder=3)
topstack = topstack+I_Sseries
if(any(I_Aseries) and plot_I_A=='stacked'):
ax.fill_between(numpy.ma.masked_where(I_Aseries<=0, self.tseries), numpy.ma.masked_where(I_Aseries<=0, topstack+I_Aseries), topstack, color=color_I_A, alpha=0.25, label='$I_A$', zorder=2)
ax.plot( numpy.ma.masked_where(I_Aseries<=0, self.tseries), numpy.ma.masked_where(I_Aseries<=0, topstack+I_Aseries), color=color_I_A, zorder=3)
topstack = topstack+I_Aseries
if(any(Rseries) and plot_R=='stacked'):
ax.fill_between(numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), topstack, color=color_R, alpha=0.5, label='$R$', zorder=2)
ax.plot( numpy.ma.masked_where(Rseries<=0, self.tseries), numpy.ma.masked_where(Rseries<=0, topstack+Rseries), color=color_R, zorder=3)
topstack = topstack+Rseries
if(any(Sseries) and plot_S=='stacked'):
ax.fill_between(numpy.ma.masked_where(Sseries<=0, self.tseries), numpy.ma.masked_where(Sseries<=0, topstack+Sseries), topstack, color=color_S, alpha=0.5, label='$S$', zorder=2)
ax.plot( numpy.ma.masked_where(Sseries<=0, self.tseries), | numpy.ma.masked_where(Sseries<=0, topstack+Sseries) | numpy.ma.masked_where |
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
_module_path = os.path.dirname(__file__)
sys.path.append("D:/uni/tomography-calibration")
from projectionSelector import load_projection
import globalParameters as gp
from calibrationShots import keys
from .simulateSignals import simulate_signal
from skimage.draw import ellipse
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import matplotlib.style as mplstyle
# mplstyle.use('ggplot')
mplstyle.use('bmh')
plt.rcParams.update({'font.size': 12})
plt.rcParams.update({'figure.dpi': 120})
label_fontsize = 12
orange = (239./255., 123./255., 7./255.)
blue = (0./255., 159./255., 227./255.)
dark = (68./255., 1./255., 84./255.)
majorLocator = MultipleLocator(5) # Step of the major ticks
majorFormatter = FormatStrFormatter('%d') # String format for major ticks
minorLocator = MultipleLocator(1) # Step of the minor ticks
def signal_simulation_histogram(phantoms):
""" Show the super fancy histogram for the simulated versus real signals for a given phantom experiment.
Parameters
----------
phantoms: list of int or string
Integer associated with lamp position or string with format "radius_3-digit-angle", e.g. "3_045"
Returns
-------
ok: int
1 if function succeeded
"""
cone_projections = load_projection("complex-view-cone-45.npy")[0]['projections']
cone_projections = cone_projections[:32]
line_projections = load_projection("line-approximation-45-etendue.npy")[0]['projections']
line_projections = line_projections[:32]
phantom_profile = np.zeros((45, 45))
f_simulated_cone = np.zeros(32)
f_simulated_line = np.zeros(32)
f_measured = np.zeros(32)
for phantom in phantoms:
try:
phantom_number = keys.index(phantom)
except ValueError:
phantom_number = phantom
phantom_array = np.load(os.path.join(_module_path, "../phantoms-45-circle/Phantom-%d.npy" % phantom_number))
phantom_profile += phantom_array.reshape((gp.n_rows, gp.n_cols))
f_c, f_m = simulate_signal(phantom_number, cone_projections, plot=False)
f_l, f_m = simulate_signal(phantom_number, line_projections, plot=False)
f_simulated_cone += f_c
f_simulated_line += f_l
f_measured += f_m
fig = plt.figure(figsize=(6, 6))
vessel = fig.add_axes([0.3, 0.33, 0.33, 0.33]) # [Left, Bottom, Width, Height]
vessel.set_aspect('equal', anchor='C')
out = fig.add_axes([0.72, 0.33, 0.15, 0.33])
top = fig.add_axes([0.3, 0.73, 0.33, 0.15])
vessel.pcolormesh(gp.x_array_plot, gp.y_array_plot, phantom_profile)
vessel.add_artist(plt.Circle((0., 0.), 85., color='w', fill=False))
vessel.set_xlabel("R (mm)", fontsize=12)
vessel.set_ylabel("z (mm)", fontsize=12)
vessel.set_xticks([-50, 0, 50])
vessel.set_yticks([-50, 0, 50])
top.bar(np.arange(1, 17), height=f_measured[:16], width=0.25, align='edge', label='real', color=dark)
top.bar( | np.arange(1, 17) | numpy.arange |
import collections
import numpy as np
import tensorflow as tf
from universe import vectorized
from utils import plot_decision_boundary
class NoiseWrapper(vectorized.ObservationWrapper):
def __init__(self, env):
super(NoiseWrapper, self).__init__(env)
def _step(self, action):
observation, reward, done, info = self.env.step(action)
adversarial_observation = self.observation(observation)
info = self._info(observation, adversarial_observation, info)
return adversarial_observation, reward, done, info
def _info(self, observation_n, adversarial_observation_n, info):
for observation, adversarial_observation, log in zip(observation_n, adversarial_observation_n, info['n']):
diff = (observation - adversarial_observation).flatten()
log['adversary/l2'] = np.linalg.norm(diff, ord=2) / np.sqrt(diff.shape[0])
return info
def _observation(self, observation_n):
return [self._noisy(observation) for observation in observation_n]
def _noisy(self, observation):
return observation
class RandomNoiseWrapper(NoiseWrapper):
def __init__(self, env, intensity=0.1):
super(RandomNoiseWrapper, self).__init__(env)
self.intensity = intensity
def _noisy(self, observation):
return observation + self.intensity * np.random.random_sample(observation.shape)
class FGSMNoiseWrapper(NoiseWrapper):
def __init__(self, env, intensity=0.1, skip=0, reuse=False, vf=False, vf_adversarial=True, boundary=True):
super(FGSMNoiseWrapper, self).__init__(env)
self.intensity = intensity
self.skip = skip
self.reuse = reuse
self.vf = vf
self.vf_adversarial = vf_adversarial
self.boundary = boundary
self.boundary_frames = 50
self._last_boundary_frame = 0
def _reset(self):
self._last_noise = None
self._current_step = 0
self._injects = 0
return super(FGSMNoiseWrapper, self)._reset()
def setup(self, policy):
self.policy = policy
self._policy_state = policy.get_initial_features()
# Action probabilities given by the policy.
y = policy.logits
# Get the action with the highest value.
y_true = tf.argmax(y, 1)
# Use categorical cross-entropy as the loss function, as we want the adversarial
# example to be as far away from possible from the true action. We assume that the
# policy matches the actual Q function well (e.g. that argmax(y) is really the
# optimal action to take).
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(y, y_true)
gradient = tf.gradients(loss, policy.x)
self.noise_op = self.intensity * tf.sign(gradient)
def _info(self, observation_n, adversarial_observation_n, info):
info = super(FGSMNoiseWrapper, self)._info(observation_n, adversarial_observation_n, info)
for observation, adversarial_observation, log in zip(observation_n, adversarial_observation_n, info['n']):
log['adversary/injects'] = self._injects
return info
def _get_value_function(self, observation):
sess = tf.get_default_session()
fetched = sess.run([self.policy.vf] + self.policy.state_out, {
self.policy.x: [observation],
self.policy.state_in[0]: self._policy_state[0],
self.policy.state_in[1]: self._policy_state[1],
})
self._policy_state = fetched[1:]
return fetched[0]
def _noisy(self, observation):
"""
Generate adversarial noise using the FGSM method for a given observation.
"""
# Get value function.
if self.vf:
if self.vf_adversarial:
vf_observation = self._last_noise if self._last_noise is not None else observation
else:
vf_observation = observation
vf = self._get_value_function(vf_observation)
if (not self.skip or self._current_step % self.skip == 0) and (not self.vf or vf > 0.5):
# Generate noise based on the current frame.
sess = tf.get_default_session()
noise = sess.run(self.noise_op, {
self.policy.x: observation.reshape([1] + list(observation.shape)),
self.policy.state_in[0]: self._policy_state[0],
self.policy.state_in[1]: self._policy_state[1],
})
self._last_noise = noise.reshape(observation.shape)
noise = self._last_noise
self._injects += 1
# Visualize action decision boundary.
if self.boundary and self._last_boundary_frame < self.boundary_frames:
self._last_boundary_frame += 1
attack_norm = np.linalg.norm(noise)
b_noise = noise / attack_norm
b_random = np.random.random_sample(b_noise.shape)
print('frame', self._last_boundary_frame, 'attack', attack_norm, 'random', np.linalg.norm(b_random))
b_random /= np.linalg.norm(b_random)
def sample_policy(x):
samples = []
for sample in xrange(7):
fetched = self.policy.act(x, *self.policy._last_state, track_state=False)
samples.append(fetched[0].argmax())
return collections.Counter(samples).most_common()[0][0]
def map_action(action):
"""Map action based on semantics."""
# TODO: This should be based on the environment.
return {
0: 0, # no operation
1: 0, # no operation
2: 1, # move up
3: 2, # move down
4: 1, # move up
5: 2, # move down
}[action]
vis_min = -2.0 * attack_norm
vis_max = 2.0 * attack_norm
vis_steps = 101
b_observations = np.zeros([vis_steps, vis_steps], dtype=np.int8)
b_samples = {}
# Preload some coordinates to avoid images being placed there.
sampled_images = np.array([
[0., 0.],
[attack_norm, 0.],
])
limit_dist = vis_max / 10.
limit_edge = vis_max / 4.
for u_index, u in enumerate(np.linspace(vis_min, vis_max, vis_steps)):
for v_index, v in enumerate(np.linspace(vis_min, vis_max, vis_steps)):
b_image = observation + u * b_noise + v * b_random
b_observations[v_index, u_index] = map_action(sample_policy(b_image))
position = | np.array([u, v]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ade:
# Asynchronous Differential Evolution.
#
# Copyright (C) 2018-19 by <NAME>,
# http://edsuom.com/ade
#
# See edsuom.com for API documentation as well as information about
# Ed's background and other projects, software and otherwise.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""
Unit tests for L{ade.report}.
"""
import pickle
import numpy as np
from twisted.internet import defer
from ade.util import *
from ade import history
from ade.test import testbase as tb
class Test_Analysis(tb.TestCase):
def setUp(self):
self.names = ['foo', 'bar', 'zebra']
self.X = np.array([
[110.0, 1, 2, 5], #0
[810.0, 2, 3, 4], #1
[270.0, 3, 4, 3], #2
[580.0, 4, 5, 2], #3
[999.0, 5, 6, 1], #4
])
self.K = [0, 3, 2, 1, 4]
self.a = history.Analysis(self.names, self.X, self.K)
def test_name2k_k2name(self):
for k, name in enumerate(self.names):
self.assertEqual(self.a.name2k(name), k+1)
self.assertEqual(self.a.k2name(k+1), name)
def test_valueVsSSE(self):
XY = self.a.value_vs_SSE(['bar'])
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], [110., 580., 270., 810., 999.])
self.assertItemsEqual(XY[1], [2, 5, 4, 3, 6])
def test_corr(self):
self.assertAlmostEqual(self.a.corr(1, 2), +1)
self.assertAlmostEqual(self.a.corr(1, 3), -1)
def test_Kf12(self):
# 0.0 1.0
K = self.a.Kf12(0.0, 1.0)
self.assertItemsEqual(K, [0, 3, 2, 1])
# 0.0 0.6 1.01
K = self.a.Kf12(0.0, 0.6)
self.assertItemsEqual(K, [0, 3, 2])
K = self.a.Kf12(0.6, 1.01)
self.assertItemsEqual(K, [1, 4])
# 0.0 0.3 1.01
K = self.a.Kf12(0.0, 0.3)
self.assertItemsEqual(K, [0, 2])
K = self.a.Kf12(0.3, 1.01)
self.assertItemsEqual(K, [3, 1, 4])
def test_Kp12(self):
K = self.a.Kp12(0.0, 0.5)
self.assertItemsEqual(K, [0, 3, 2])
K = self.a.Kp12(0.2, 0.7)
self.assertItemsEqual(K, [3, 2, 1])
K = self.a.Kp12(0.5, 1.01)
self.assertItemsEqual(K, [2, 1, 4])
class Test_ClosestPairFinder(tb.TestCase):
def setUp(self):
self.cpf = history.ClosestPairFinder(10, 4)
def test_setRow(self):
self.cpf.S = True # Just not None, for testing
for k in range(10):
Z = [10.0+k] + [k,k+1,k+2]
self.cpf.setRow(k, Z)
self.assertEqual(self.cpf.S, None)
self.assertItemsEqual(self.cpf.X[0,:], [10.0, 0, 1, 2])
def test_clearRow(self):
self.cpf.setRow(0, [100.0, 2, 3, 4])
self.cpf.S = True # Just not None, for testing
self.cpf.clearRow(0)
self.assertEqual(self.cpf.S, None)
self.assertEqual(len(self.cpf.K), 0)
def test_pairs_sampled(self):
self.cpf.K = {3, 1, 4, 5, 9, 2}
for N in (2, 3, 4):
KP = self.cpf.pairs_sampled(N)
self.assertEqual(KP.shape, (N, 2))
for k1, k2 in KP:
self.assertGreater(k2, k1)
self.assertGreater(k1, 0)
self.assertGreater(k2, 0)
self.assertLess(k1, 10)
self.assertLess(k2, 10)
def test_pairs_all(self):
self.cpf.K = {3, 1, 4, 5, 9, 2}
N = len(self.cpf.K)
Np = N*(N-1)/2
KP = self.cpf.pairs_all()
self.assertEqual(KP.shape, (Np, 2))
for k1, k2 in KP:
self.assertGreater(k2, k1)
self.assertGreater(k1, 0)
self.assertGreater(k2, 0)
self.assertLess(k1, 10)
self.assertLess(k2, 10)
@defer.inlineCallbacks
def test_diffs(self):
self.cpf.setRow(0, [ 90.0, 0.11, 0.2, 0.3])
self.cpf.setRow(1, [ 90.0, 0.09, 0.2, 0.3])
self.cpf.setRow(2, [100.0, 0.09, 0.2, 0.3])
self.cpf.setRow(3, [110.0, 0.11, 0.2, 0.3])
self.cpf.setRow(4, [110.0, 0.10, 0.2, 0.3])
self.assertEqual(self.cpf.S, None)
K = np.array([[0, 1], [0, 2], [0, 3], [2, 3], [3, 4]])
D = yield self.cpf(K=K)
self.assertEqual(self.cpf.S.shape, (4,))
s0 = 1.0/np.var([90., 90., 100., 110., 110.])
self.assertAlmostEqual(self.cpf.S[0], s0)
s1 = 1.0/np.var([0.11, 0.09, 0.09, 0.11, 0.10])
self.assertAlmostEqual(self.cpf.S[1], s1)
# 0-1 0-2 0-3 2-3 3-4
SSEs = [90.0, 95.0, 100.0, 105.0, 110.0]
for k, de in enumerate(
[s1*0.02**2, # 0, 1
s0*10.0**2 + s1*0.02**2, # 0, 2
s0*20.0**2, # 0, 3
s0*10.0**2 + s1*0.02**2, # 2, 3
s1*0.01**2 # 3, 4
]):
#print k, D[k], de/np.sqrt(SSEs[k]),
self.assertWithinOnePercent(D[k], de/SSEs[k])
@defer.inlineCallbacks
def test_diffs_someNeverpops(self):
self.cpf.setRow(0, [100.0, 0.1130, 0.10, 0.100], 1)
self.cpf.setRow(1, [100.0, 0.1010, 0.11, 0.100], 1)
self.cpf.setRow(2, [100.0, 0.0940, 0.10, 0.100], 0)
self.cpf.setRow(3, [100.0, 0.0957, 0.10, 0.099], 1)
self.cpf.setRow(4, [100.0, 0.1100, 0.11, 0.100], 0)
self.cpf.setRow(5, [100.0, 0.1100, 0.11, 0.110], 1)
K = np.array([[0, 1], [0, 2], [2, 3]])
D = yield self.cpf(K=K)
# Kn = 4, N = 6
Kn_penalty = 1 + np.exp(12*(4.0/6 - 0.4))
penalty = [Kn_penalty if x else 1.0 for x in (1, 1, 0)]
for k, p in enumerate(penalty):
self.assertWithinTenPercent(D[k], 0.00120/p)
@defer.inlineCallbacks
def test_call(self):
self.cpf.setRow(0, [ 90.0, 0.11, 0.2, 0.30])
self.cpf.setRow(1, [ 90.0, 0.09, 0.2, 0.30])
self.cpf.setRow(2, [100.0, 0.09, 0.2, 0.30])
self.cpf.setRow(3, [110.0, 0.11, 0.2, 0.30])
self.cpf.setRow(4, [110.0, 0.10, 0.2, 0.30])
self.cpf.setRow(5, [140.0, 0.10, 0.2, 0.30])
self.cpf.setRow(6, [140.0, 0.10, 0.2, 0.31])
self.cpf.setRow(7, [140.1, 0.10, 0.2, 0.31])
kr = yield self.cpf()
self.assertEqual(kr, 6)
self.cpf.clearRow(6)
kr = yield self.cpf()
self.assertEqual(kr, 1)
self.cpf.clearRow(1)
kr = yield self.cpf()
self.assertEqual(kr, 0)
class Test_History(tb.TestCase):
def setUp(self):
self.names = ['foo', 'bar', 'zebra']
self.h = history.History(self.names, N_max=10)
def tearDown(self):
return self.h.shutdown()
def test_kkr(self):
self.h.X = np.array([[
# 0 1 2 3 4 5 6 7 kr
# 2 0 3 - 1 4 - 5 k
3, 1, 4, 0, 2, 5, 0, 9]]).transpose()
self.h.K = [
# 0 1 2 3 4 5 k
1, 4, 0, 2, 5, 7]
# 1 2 3 4 5 9 X[K,0]
N = 6
for SSE, k_exp, kr_exp in (
(7.0, 5, 3),
(1.0, 0, 3),
(99, 6, 3),
):
k, kr = self.h.kkr(SSE, N)
self.assertEqual(k, k_exp)
self.assertEqual(kr, kr_exp)
@defer.inlineCallbacks
def test_add_worsening(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 + k
yield self.h.add(i)
self.assertEqual(len(self.h), k+1)
self.assertItemsEqual(self.h[k], [i.SSE] + i.values)
for k, values in enumerate(self.h):
self.assertItemsEqual(values, [k,k+1,k+2])
@defer.inlineCallbacks
def test_add_ignoreInfSSE(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 + k if k < 3 else float('+inf')
kr = yield self.h.add(i)
if k < 3:
self.assertLess(kr, 10)
self.assertEqual(len(self.h), k+1)
self.assertItemsEqual(self.h[k], [i.SSE] + i.values)
else:
self.assertIs(kr, None)
self.assertEqual(len(self.h), 3)
for k, values in enumerate(self.h):
self.assertItemsEqual(values, [k,k+1,k+2])
@defer.inlineCallbacks
def test_add_improving(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 - k
yield self.h.add(i)
self.assertEqual(len(self.h), k+1)
for k, values in enumerate(self.h):
self.assertItemsEqual(values, [4-k,4-k+1,4-k+2])
def popitem_predictably(self, x):
value = sorted(x.values())[0]
for key, this_value in x.items():
if this_value == value:
x.pop(key)
return key, value
@defer.inlineCallbacks
def test_add_limitSize_worsening(self):
krPopped = set()
for k in range(15):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 1000.0 + k
yield self.h.add(i)
if len(self.h.kr) == 10:
iHash, kr = self.popitem_predictably(self.h.kr)
self.h.notInPop(kr)
krPopped.add(kr)
self.assertEqual(len(self.h), 10)
valuesPrev = None
for values in self.h:
if valuesPrev is not None:
for v, vp in zip(values, valuesPrev):
self.assertGreater(v, vp)
valuesPrev = values
@defer.inlineCallbacks
def test_add_limitSize_improving(self):
krPopped = set()
for k in range(15):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 1000.0 - k
yield self.h.add(i)
if len(self.h.kr) == 10:
iHash, kr = self.popitem_predictably(self.h.kr)
yield self.h.notInPop(kr)
krPopped.add(kr)
self.assertEqual(len(self.h), 10)
valuesPrev = None
for values in self.h:
if valuesPrev is not None:
for v, vp in zip(values, valuesPrev):
self.assertLess(v, vp)
valuesPrev = values
@defer.inlineCallbacks
def test_add_limitSize_improving_neverInPop(self):
for k in range(15):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 1000.0 - k
yield self.h.add(i, neverInPop=True)
self.assertEqual(len(self.h), 10)
self.assertEqual(len(self.h.Kp), 0)
self.assertEqual(len(self.h.Kn), 10)
valuesPrev = None
for values in self.h:
if valuesPrev is not None:
for v, vp in zip(values, valuesPrev):
self.assertLess(v, vp)
valuesPrev = values
@defer.inlineCallbacks
def test_add_then_purge(self):
for k in range(5):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 100.0 - k
yield self.h.add(i)
self.assertEqual(len(self.h), k+1)
self.assertEqual(len(self.h), 5)
self.assertEqual(len(self.h.Kp), 5)
self.h.purgePop()
self.assertEqual(len(self.h), 0)
self.assertEqual(len(self.h.Kp), 0)
@defer.inlineCallbacks
def test_value_vs_SSE(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
yield self.h.add(i)
XY = yield self.h.value_vs_SSE(['bar'])
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(10.0, 19.0, 10))
self.assertItemsEqual(XY[1], np.linspace(1.0, 10.0, 10))
@defer.inlineCallbacks
def test_value_vs_SSE_maxRatio(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
yield self.h.add(i)
XY = yield self.h.value_vs_SSE(['bar'], maxRatio=1.5)
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(10.0, 15.0, 6))
self.assertItemsEqual(XY[1], np.linspace(1.0, 6.0, 6))
@defer.inlineCallbacks
def test_value_vs_SSE_inPop(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
kr = yield self.h.add(i)
self.h.notInPop(kr)
XY = yield self.h.value_vs_SSE(['bar'], inPop=True)
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(10.0, 18.0, 9))
self.assertItemsEqual(XY[1], np.linspace(1.0, 9.0, 9))
@defer.inlineCallbacks
def test_value_vs_SSE_notInPop(self):
for k in range(10):
i = tb.MockIndividual(values=[k,k+1,k+2])
i.SSE = 10.0 + k
kr = yield self.h.add(i)
if k > 5: self.h.notInPop(kr)
XY = yield self.h.value_vs_SSE(['bar'], notInPop=True)
self.assertEqual(len(XY), 2)
self.assertItemsEqual(XY[0], np.linspace(16.0, 19.0, 4))
self.assertItemsEqual(XY[1], np.linspace(7.0, 10.0, 4))
@defer.inlineCallbacks
def test_pickle(self):
def values(k):
return [k, np.exp(-0.1*k), | np.exp(-0.5*k) | numpy.exp |
import numpy as np
from math import factorial
def main():
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = | np.array([1, 2, 3, 4]) | numpy.array |
import os
import os.path
from collections import defaultdict
from functools import partial
import numpy as np
from yt.frontends.art.definitions import (
hydro_struct,
particle_fields,
particle_star_fields,
star_struct,
)
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.fortran_utils import read_vector, skip
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.logger import ytLogger as mylog
class IOHandlerART(BaseIOHandler):
_dataset_type = "art"
tb, ages = None, None
cache = None
masks = None
caching = True
def __init__(self, *args, **kwargs):
self.cache = {}
self.masks = {}
super().__init__(*args, **kwargs)
self.ws = self.ds.parameters["wspecies"]
self.ls = self.ds.parameters["lspecies"]
self.file_particle = self.ds._file_particle_data
self.file_stars = self.ds._file_particle_stars
self.Nrow = self.ds.parameters["Nrow"]
def _read_fluid_selection(self, chunks, selector, fields, size):
# Chunks in this case will have affiliated domain subset objects
# Each domain subset will contain a hydro_offset array, which gives
# pointers to level-by-level hydro information
tr = defaultdict(list)
cp = 0
for chunk in chunks:
for subset in chunk.objs:
# Now we read the entire thing
f = open(subset.domain.ds._file_amr, "rb")
# This contains the boundary information, so we skim through
# and pick off the right vectors
rv = subset.fill(f, fields, selector)
for ft, f in fields:
d = rv.pop(f)
mylog.debug(
"Filling %s with %s (%0.3e %0.3e) (%s:%s)",
f,
d.size,
d.min(),
d.max(),
cp,
cp + d.size,
)
tr[(ft, f)].append(d)
cp += d.size
d = {}
for field in fields:
d[field] = np.concatenate(tr.pop(field))
return d
def _get_mask(self, selector, ftype):
key = (selector, ftype)
if key in self.masks.keys() and self.caching:
return self.masks[key]
pstr = "particle_position_%s"
x, y, z = (self._get_field((ftype, pstr % ax)) for ax in "xyz")
mask = selector.select_points(x, y, z, 0.0)
if self.caching:
self.masks[key] = mask
return self.masks[key]
else:
return mask
def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
for _chunk in chunks:
for ptype in sorted(ptf):
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
yield ptype, (x, y, z)
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
for _chunk in chunks:
for ptype, field_list in sorted(ptf.items()):
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
continue
for field in field_list:
data = self._get_field((ptype, field))
yield (ptype, field), data[mask]
def _get_field(self, field):
if field in self.cache.keys() and self.caching:
mylog.debug("Cached %s", str(field))
return self.cache[field]
mylog.debug("Reading %s", str(field))
tr = {}
ftype, fname = field
ptmax = self.ws[-1]
pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax)
npa = idxb - idxa
sizes = np.diff(np.concatenate(([0], self.ls)))
rp = partial(
read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb
)
for ax in "xyz":
if fname.startswith(f"particle_position_{ax}"):
dd = self.ds.domain_dimensions[0]
off = 1.0 / dd
tr[field] = rp(fields=[ax])[0] / dd - off
if fname.startswith(f"particle_velocity_{ax}"):
(tr[field],) = rp(fields=["v" + ax])
if fname.startswith("particle_mass"):
a = 0
data = np.zeros(npa, dtype="f8")
for ptb, size, m in zip(pbool, sizes, self.ws):
if ptb:
data[a : a + size] = m
a += size
tr[field] = data
elif fname == "particle_index":
tr[field] = np.arange(idxa, idxb)
elif fname == "particle_type":
a = 0
data = np.zeros(npa, dtype="int64")
for i, (ptb, size) in enumerate(zip(pbool, sizes)):
if ptb:
data[a : a + size] = i
a += size
tr[field] = data
if pbool[-1] and fname in particle_star_fields:
data = read_star_field(self.file_stars, field=fname)
temp = tr.get(field, np.zeros(npa, "f8"))
nstars = self.ls[-1] - self.ls[-2]
if nstars > 0:
temp[-nstars:] = data
tr[field] = temp
if fname == "particle_creation_time":
self.tb, self.ages, data = interpolate_ages(
tr[field][-nstars:],
self.file_stars,
self.tb,
self.ages,
self.ds.current_time,
)
temp = tr.get(field, np.zeros(npa, "f8"))
temp[-nstars:] = data
tr[field] = temp
del data
# We check again, after it's been filled
if fname.startswith("particle_mass"):
# We now divide by NGrid in order to make this match up. Note that
# this means that even when requested in *code units*, we are
# giving them as modified by the ng value. This only works for
# dark_matter -- stars are regular matter.
tr[field] /= self.ds.domain_dimensions.prod()
if tr == {}:
tr = {f: np.array([]) for f in [field]}
if self.caching:
self.cache[field] = tr[field]
return self.cache[field]
else:
return tr[field]
class IOHandlerDarkMatterART(IOHandlerART):
_dataset_type = "dm_art"
def _count_particles(self, data_file):
return {
k: self.ds.parameters["lspecies"][i]
for i, k in enumerate(self.ds.particle_types_raw)
}
def _identify_fields(self, domain):
field_list = []
self.particle_field_list = [f for f in particle_fields]
for ptype in self.ds.particle_types_raw:
for pfield in self.particle_field_list:
pfn = (ptype, pfield)
field_list.append(pfn)
return field_list, {}
def _get_field(self, field):
if field in self.cache.keys() and self.caching:
mylog.debug("Cached %s", str(field))
return self.cache[field]
mylog.debug("Reading %s", str(field))
tr = {}
ftype, fname = field
ptmax = self.ws[-1]
pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax)
npa = idxb - idxa
sizes = np.diff(np.concatenate(([0], self.ls)))
rp = partial(
read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb
)
for ax in "xyz":
if fname.startswith(f"particle_position_{ax}"):
# This is not the same as domain_dimensions
dd = self.ds.parameters["ng"]
off = 1.0 / dd
tr[field] = rp(fields=[ax])[0] / dd - off
if fname.startswith(f"particle_velocity_{ax}"):
(tr[field],) = rp(["v" + ax])
if fname.startswith("particle_mass"):
a = 0
data = np.zeros(npa, dtype="f8")
for ptb, size, m in zip(pbool, sizes, self.ws):
if ptb:
data[a : a + size] = m
a += size
tr[field] = data
elif fname == "particle_index":
tr[field] = np.arange(idxa, idxb)
elif fname == "particle_type":
a = 0
data = np.zeros(npa, dtype="int64")
for i, (ptb, size) in enumerate(zip(pbool, sizes)):
if ptb:
data[a : a + size] = i
a += size
tr[field] = data
# We check again, after it's been filled
if fname.startswith("particle_mass"):
# We now divide by NGrid in order to make this match up. Note that
# this means that even when requested in *code units*, we are
# giving them as modified by the ng value. This only works for
# dark_matter -- stars are regular matter.
tr[field] /= self.ds.domain_dimensions.prod()
if tr == {}:
tr[field] = np.array([])
if self.caching:
self.cache[field] = tr[field]
return self.cache[field]
else:
return tr[field]
def _yield_coordinates(self, data_file):
for ptype in self.ds.particle_types_raw:
x = self._get_field((ptype, "particle_position_x"))
y = self._get_field((ptype, "particle_position_y"))
z = self._get_field((ptype, "particle_position_z"))
yield ptype, np.stack((x, y, z), axis=-1)
def _determine_field_size(pf, field, lspecies, ptmax):
pbool = np.zeros(len(lspecies), dtype="bool")
idxas = np.concatenate(
(
[
0,
],
lspecies[:-1],
)
)
idxbs = lspecies
if "specie" in field:
index = int(field.replace("specie", ""))
pbool[index] = True
else:
raise RuntimeError
idxa, idxb = idxas[pbool][0], idxbs[pbool][-1]
return pbool, idxa, idxb
def interpolate_ages(
data, file_stars, interp_tb=None, interp_ages=None, current_time=None
):
if interp_tb is None:
t_stars, a_stars = read_star_field(file_stars, field="t_stars")
# timestamp of file should match amr timestamp
if current_time:
tdiff = YTQuantity(b2t(t_stars), "Gyr") - current_time.in_units("Gyr")
if np.abs(tdiff) > 1e-4:
mylog.info("Timestamp mismatch in star particle header: %s", tdiff)
mylog.info("Interpolating ages")
interp_tb, interp_ages = b2t(data)
interp_tb = YTArray(interp_tb, "Gyr")
interp_ages = YTArray(interp_ages, "Gyr")
temp = np.interp(data, interp_tb, interp_ages)
return interp_tb, interp_ages, temp
def _read_art_level_info(
f, level_oct_offsets, level, coarse_grid=128, ncell0=None, root_level=None
):
pos = f.tell()
f.seek(level_oct_offsets[level])
# Get the info for this level, skip the rest
junk, nLevel, iOct = read_vector(f, "i", ">")
# fortran indices start at 1
# Skip all the oct index data
le = np.zeros((nLevel, 3), dtype="int64")
fl = np.ones((nLevel, 6), dtype="int64")
iocts = np.zeros(nLevel + 1, dtype="int64")
idxa, idxb = 0, 0
chunk = int(1e6) # this is ~111MB for 15 dimensional 64 bit arrays
left = nLevel
while left > 0:
this_chunk = min(chunk, left)
idxb = idxa + this_chunk
data = np.fromfile(f, dtype=">i", count=this_chunk * 15)
data = data.reshape(this_chunk, 15)
left -= this_chunk
le[idxa:idxb, :] = data[:, 1:4]
fl[idxa:idxb, 1] = np.arange(idxa, idxb)
# pad byte is last, LL2, then ioct right before it
iocts[idxa:idxb] = data[:, -3]
idxa = idxa + this_chunk
del data
# emulate fortran code
# do ic1 = 1 , nLevel
# read(19) (iOctPs(i,iOct),i=1,3),(iOctNb(i,iOct),i=1,6),
# & iOctPr(iOct), iOctLv(iOct), iOctLL1(iOct),
# & iOctLL2(iOct)
# iOct = iOctLL1(iOct)
# ioct always represents the index of the next variable
# not the current, so shift forward one index
# the last index isn't used
iocts[1:] = iocts[:-1] # shift
iocts = iocts[:nLevel] # chop off the last, unused, index
iocts[0] = iOct # starting value
# now correct iocts for fortran indices start @ 1
iocts = iocts - 1
assert np.unique(iocts).shape[0] == nLevel
# left edges are expressed as if they were on
# level 15, so no matter what level max(le)=2**15
# correct to the yt convention
# le = le/2**(root_level-1-level)-1
# try to find the root_level first
def cfc(root_level, level, le):
d_x = 1.0 / (2.0 ** (root_level - level + 1))
fc = (d_x * le) - 2 ** (level - 1)
return fc
if root_level is None:
root_level = np.floor(np.log2(le.max() * 1.0 / coarse_grid))
root_level = root_level.astype("int64")
for _ in range(10):
fc = cfc(root_level, level, le)
go = np.diff(np.unique(fc)).min() < 1.1
if go:
break
root_level += 1
else:
fc = cfc(root_level, level, le)
unitary_center = fc / (coarse_grid * 2.0 ** (level - 1))
assert np.all(unitary_center < 1.0)
# again emulate the fortran code
# This is all for calculating child oct locations
# iC_ = iC + nbshift
# iO = ishft ( iC_ , - ndim )
# id = ishft ( 1, MaxLevel - iOctLv(iO) )
# j = iC_ + 1 - ishft( iO , ndim )
# Posx = d_x * (iOctPs(1,iO) + sign ( id , idelta(j,1) ))
# Posy = d_x * (iOctPs(2,iO) + sign ( id , idelta(j,2) ))
# Posz = d_x * (iOctPs(3,iO) + sign ( id , idelta(j,3) ))
# idelta = [[-1, 1, -1, 1, -1, 1, -1, 1],
# [-1, -1, 1, 1, -1, -1, 1, 1],
# [-1, -1, -1, -1, 1, 1, 1, 1]]
# idelta = np.array(idelta)
# if ncell0 is None:
# ncell0 = coarse_grid**3
# nchild = 8
# ndim = 3
# nshift = nchild -1
# nbshift = nshift - ncell0
# iC = iocts #+ nbshift
# iO = iC >> ndim #possibly >>
# id = 1 << (root_level - level)
# j = iC + 1 - ( iO << 3)
# delta = np.abs(id)*idelta[:,j-1]
# try without the -1
# le = le/2**(root_level+1-level)
# now read the hvars and vars arrays
# we are looking for iOctCh
# we record if iOctCh is >0, in which it is subdivided
# iOctCh = np.zeros((nLevel+1,8),dtype='bool')
f.seek(pos)
return unitary_center, fl, iocts, nLevel, root_level
def get_ranges(
skip, count, field, words=6, real_size=4, np_per_page=4096 ** 2, num_pages=1
):
# translate every particle index into a file position ranges
ranges = []
arr_size = np_per_page * real_size
idxa, idxb = 0, 0
posa, posb = 0, 0
for _page in range(num_pages):
idxb += np_per_page
for i, fname in enumerate(["x", "y", "z", "vx", "vy", "vz"]):
posb += arr_size
if i == field or fname == field:
if skip < np_per_page and count > 0:
left_in_page = np_per_page - skip
this_count = min(left_in_page, count)
count -= this_count
start = posa + skip * real_size
end = posa + this_count * real_size
ranges.append((start, this_count))
skip = 0
assert end <= posb
else:
skip -= np_per_page
posa += arr_size
idxa += np_per_page
assert count == 0
return ranges
def read_particles(file, Nrow, idxa, idxb, fields):
words = 6 # words (reals) per particle: x,y,z,vx,vy,vz
real_size = 4 # for file_particle_data; not always true?
np_per_page = Nrow ** 2 # defined in ART a_setup.h, # of particles/page
num_pages = os.path.getsize(file) // (real_size * words * np_per_page)
fh = open(file)
skip, count = idxa, idxb - idxa
kwargs = dict(
words=words, real_size=real_size, np_per_page=np_per_page, num_pages=num_pages
)
arrs = []
for field in fields:
ranges = get_ranges(skip, count, field, **kwargs)
data = None
for seek, this_count in ranges:
fh.seek(seek)
temp = np.fromfile(fh, count=this_count, dtype=">f4")
if data is None:
data = temp
else:
data = np.concatenate((data, temp))
arrs.append(data.astype("f8"))
fh.close()
return arrs
def read_star_field(file, field=None):
data = {}
with open(file, "rb") as fh:
for dtype, variables in star_struct:
found = (
isinstance(variables, tuple) and field in variables
) or field == variables
if found:
data[field] = read_vector(fh, dtype[1], dtype[0])
else:
skip(fh, endian=">")
return data.pop(field)
def _read_child_mask_level(f, level_child_offsets, level, nLevel, nhydro_vars):
f.seek(level_child_offsets[level])
ioctch = np.zeros(nLevel, dtype="uint8")
idc = np.zeros(nLevel, dtype="int32")
chunk = int(1e6)
left = nLevel
width = nhydro_vars + 6
a, b = 0, 0
while left > 0:
chunk = min(chunk, left)
b += chunk
arr = np.fromfile(f, dtype=">i", count=chunk * width)
arr = arr.reshape((width, chunk), order="F")
assert np.all(arr[0, :] == arr[-1, :]) # pads must be equal
idc[a:b] = arr[1, :] - 1 # fix fortran indexing
ioctch[a:b] = arr[2, :] == 0 # if it is above zero, then refined available
# zero in the mask means there is refinement available
a = b
left -= chunk
assert left == 0
return idc, ioctch
nchem = 8 + 2
dtyp = np.dtype(f">i4,>i8,>i8,>{nchem}f4,>2f4,>i4")
def _read_child_level(
f,
level_child_offsets,
level_oct_offsets,
level_info,
level,
fields,
domain_dimensions,
ncell0,
nhydro_vars=10,
nchild=8,
noct_range=None,
):
# emulate the fortran code for reading cell data
# read ( 19 ) idc, iOctCh(idc), (hvar(i,idc),i=1,nhvar),
# & (var(i,idc), i=2,3)
# contiguous 8-cell sections are for the same oct;
# ie, we don't write out just the 0 cells, then the 1 cells
# optionally, we only read noct_range to save memory
left_index, fl, octs, nocts, root_level = _read_art_level_info(
f, level_oct_offsets, level, coarse_grid=domain_dimensions[0]
)
if noct_range is None:
nocts = level_info[level]
ncells = nocts * 8
f.seek(level_child_offsets[level])
arr = np.fromfile(f, dtype=hydro_struct, count=ncells)
assert np.all(arr["pad1"] == arr["pad2"]) # pads must be equal
# idc = np.argsort(arr['idc']) #correct fortran indices
# translate idc into icell, and then to iOct
icell = (arr["idc"] >> 3) << 3
iocts = (icell - ncell0) / nchild # without a F correction, there's a +1
# assert that the children are read in the same order as the octs
assert np.all(octs == iocts[::nchild])
else:
start, end = noct_range
nocts = min(end - start, level_info[level])
end = start + nocts
ncells = nocts * 8
skip = np.dtype(hydro_struct).itemsize * start * 8
f.seek(level_child_offsets[level] + skip)
arr = np.fromfile(f, dtype=hydro_struct, count=ncells)
assert np.all(arr["pad1"] == arr["pad2"]) # pads must be equal
source = {}
for field in fields:
sh = (nocts, 8)
source[field] = np.reshape(arr[field], sh, order="C").astype("float64")
return source
def _read_root_level(f, level_offsets, level_info, nhydro_vars=10):
nocts = level_info[0]
f.seek(level_offsets[0]) # Ditch the header
hvar = read_vector(f, "f", ">")
var = read_vector(f, "f", ">")
hvar = hvar.reshape((nhydro_vars, nocts * 8), order="F")
var = var.reshape((2, nocts * 8), order="F")
arr = np.concatenate((hvar, var))
return arr
# All of these functions are to convert from hydro time var to
# proper time
sqrt = np.sqrt
sign = np.sign
def find_root(f, a, b, tol=1e-6):
c = (a + b) / 2.0
last = -np.inf
assert sign(f(a)) != sign(f(b))
while np.abs(f(c) - last) > tol:
last = f(c)
if sign(last) == sign(f(b)):
b = c
else:
a = c
c = (a + b) / 2.0
return c
def quad(fintegrand, xmin, xmax, n=1e4):
spacings = np.logspace(np.log10(xmin), | np.log10(xmax) | numpy.log10 |
import os
import cv2
import numpy as np
import argparse
from skimage import filters
from skimage.filters import prewitt
from matplotlib import pyplot as plt
def Robert(img):
Gx = | np.array([[-1, 0], [0, 1]]) | numpy.array |
from abc import ABC, abstractmethod
import cv2
import numpy as np
import pandas as pd
from skimage.draw import line as raster_line
from .suite import Suite, project_points, compute_pose_error
# delete me
import matplotlib.pyplot as plt
def compute_3d_coordinates(oc, pts, model):
if not len(pts):
return np.empty((0, 3))
colors = oc[pts[:, 1], pts[:, 0]]
if np.any(colors[:, -1] != 255):
raise NotImplementedError("The object coordinate masks have issues")
return colors[:, :3] * model.size / 255 + model.min
def draw_lines(lines, img, color):
paths = np.concatenate(
[
np.stack(
raster_line(line[0, 1], line[0, 0], line[1, 1], line[1, 0]), axis=-1
)
for line in lines
]
)
out = img.copy()
out[paths[:, 0], paths[:, 1]] = color
return out
def extract_sift_keypoints(rgb):
gray = cv2.cvtColor(rgb[:, :, :3], cv2.COLOR_RGB2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
detections = sift.detect(gray, None)
# store unique keypoints
keypoints = np.unique(
np.array([kp.pt for kp in detections]).astype(np.uint32), axis=0
)
return keypoints
def extract_line_segments(rgb):
gray = cv2.cvtColor(rgb[:, :, :3], cv2.COLOR_RGB2GRAY)
ld = cv2.line_descriptor.LSDDetector_createLSDDetector()
keylines = ld.detect(gray, 1, 1)
paths = []
idx = []
for i, keyline in enumerate(keylines):
start = np.round(keyline.getStartPoint()).astype(int)
end = np.round(keyline.getEndPoint()).astype(int)
path = np.stack(raster_line(start[1], start[0], end[1], end[0]), axis=-1)
paths.append(path)
idx.append(np.full(len(path), i))
paths = np.concatenate(paths)
idx = np.concatenate(idx)
# ensure max bounds are not overstepped
max_bound = np.array(rgb.shape[:2]) - 1
paths = np.minimum(paths, max_bound)
return paths, idx
def extract_point_correspondences(oid, frame, keypoints, model):
# filter keypoints to object mask and object coordinate data
pts_2d = keypoints[
np.logical_and(
frame["mask"][keypoints[:, 1], keypoints[:, 0]] == oid,
frame["oc"][keypoints[:, 1], keypoints[:, 0], -1] == 255,
)
]
# objects get the corresponding object coordinates
pts_3d = compute_3d_coordinates(frame["oc"], pts_2d, model)
return pts_2d, pts_3d
def extract_line_correspondences(oid, frame, lines, model):
paths, idx = lines
# prune line segments to masks. assume masks are convex
mask = np.logical_and(frame["mask"] == oid, frame["oc"][:, :, -1] == 255)
line_2d = []
for pid in range(idx[-1]):
path = paths[idx == pid]
if not np.any(mask[path[:, 0], path[:, 1]]):
continue
line = np.empty((2, 2), dtype=int)
# clamp at start and at the end
start, end = None, None
for i, (r, c) in enumerate(path):
if mask[r, c]:
line[0] = (c, r)
start = i
break
for i, (r, c) in enumerate(reversed(path)):
if mask[r, c]:
line[1] = (c, r)
end = len(path) - i
break
# Reject very small segments
if end - start < 5:
continue
line_2d.append(line)
line_2d = np.array(line_2d) # array can cope with empty lists
# # debug
# img = draw_lines(line_2d, frame["rgb"], np.array([255, 255, 255], dtype=np.uint8))
# plt.imshow(img); plt.show()
# # objects get the corresponding object coordinates
line_3d = compute_3d_coordinates(
frame["oc"], line_2d.reshape(-1, 2), model
).reshape(-1, 2, 3)
return line_2d, line_3d
class RealSuite(Suite, ABC):
def __init__(self, methods, timed=True):
super().__init__(methods, timed)
self.data = None # dataset placeholder
# Since each dataset has a different number of sequences, frames
# objects per frames and even instance per objects, we need to
# store everything in a flat array and store indexes for each
# instance
self.did = None # datasets
self.sid = None # sequences
self.fid = None # frames
self.oid = None # objects
def init_run(self, data):
self.data = data
self.results = {
"angular": [],
"translation": [],
}
if self.timed:
self.results["time"] = []
# Initialize accumulators
self.did = [] # datasets
self.sid = [] # sequences
self.fid = [] # frames
self.oid = [] # objects
@abstractmethod
def extract_features(self, rgb):
pass
@abstractmethod
def extract_correspondences(self, oid, frame, features, model):
pass
def run(self, data):
self.init_run(data)
# Can we print some progress statistics
n_prog, i_prog = 0, 0
for ds in self.data:
n_prog += len(ds)
print("Progress: 0.00%", end="", flush=True)
# Looping over datasets
for did, ds in enumerate(self.data):
# looping over sequences
for sid, seq in enumerate(ds):
# looping over frames
for frame in seq:
# extract features in each frame
features = self.extract_features(frame["rgb"])
# Iterate through each object in frame
for oid, pose in frame["poses"].items():
# plt.imsave(f'/tmp/images/{seq.name:02d}_{frame["id"]:04d}.m.png', frame["mask"])
# plt.imsave(f'/tmp/images/{seq.name:02d}_{frame["id"]:04d}.o.png', frame["oc"])
mmask = frame["mask"].astype(bool)
moc = frame["oc"][:, :, -1] == 255
iou = np.sum(np.logical_and(mmask, moc)) / np.sum(
np.logical_or(mmask, moc)
)
# there are legit occlusion cases lower than 0.6 iou
if iou < 0.5:
error_msg = "IoU issues between mask and object coordinates"
raise RuntimeError(error_msg)
# extract correspondences
correspondences = self.extract_correspondences(
oid, frame, features, ds.models[str(oid)]
)
# Pre allocate placeholders storing results
nm = len(self.methods)
ang_all = np.full(nm, np.nan)
trans_all = np.full(nm, np.nan)
time_all = np.full(nm, np.nan)
groundtruth = (pose[:, :3], pose[:, -1])
for mid, method in enumerate(self.methods):
# get a pose estimate
(R, t), time_all[mid] = self.estimate_pose(
method, groundtruth, ds.camera.K, **correspondences
)
# Sanitize results
if np.any(np.isnan(R)) or np.any(np.isnan(t)):
continue
# store error results in the object
ang_all[mid], trans_all[mid] = compute_pose_error(
groundtruth, (R, t)
)
# let each method compute the pose compute pose
self.did.append(did)
self.sid.append(sid)
self.fid.append(frame["id"])
self.oid.append(oid)
self.results["angular"].append(ang_all)
self.results["translation"].append(trans_all)
if self.timed:
self.results["time"].append(time_all)
# progress only reported at frame level
i_prog += 1
percent = i_prog * 100 / n_prog
print(f"\rProgress: {percent:>6.2f}%", end="", flush=True)
print("\rProgress: 100.00%", flush=True)
# merge everything together
self.did = np.array(self.did)
self.sid = np.array(self.sid)
self.fid = np.array(self.fid)
self.oid = np.array(self.oid)
self.results["angular"] = np.stack(self.results["angular"])
self.results["translation"] = np.stack(self.results["translation"])
if self.timed:
self.results["time"] = np.stack(self.results["time"])
def _aggregate_results(self):
# build tables for angular error, translation errors, timings and nan counts
angular = []
translation = []
timings = []
nans = []
dids = []
sids = []
# filter out all nans
good_mask = np.logical_not(
np.logical_or.reduce( | np.isnan(self.results["angular"]) | numpy.isnan |
# coding: utf-8
import numpy as np
# Libraries necessary for visualizing
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import warnings
from scipy.signal import argrelmax,argrelmin
import matplotlib.font_manager as fm
import copy
from data_reader import DataReader
import os
from coordinate_transform import *
class forceAnalyzer(DataReader):
def __init__(self,relative_data_folder,filenames,plate_rotation=20):
super().__init__(relative_data_folder,filenames,skiprows=6,
column_name=['Fx','Fy','Fz','Mx','My','Mz','Syncro','ExtIn1','ExtIn2'])
self.plate_rotation = plate_rotation
self.set_analysis_target(0)
def set_analysis_target(self, analysis_id=0, scale=2.0):
self.extract_syncronized_data(analysis_id=analysis_id)
self.get_first_landing_point(analysis_id=analysis_id)
self.max_peek=[]
self.get_max_peek(analysis_id=analysis_id)
self.modify_force_plate_raw(analysis_id=analysis_id, scale=scale)
self.get_action_point(analysis_id=analysis_id, threshold=40)
def extract_syncronized_data(self,analysis_id=0):
df = self.df_list[analysis_id]
df_copy = df[df.ExtIn1 == 1].copy()
df_copy['time'] = df_copy['time'].values - df_copy['time'].values[0]
self.df_list[analysis_id] = df_copy
self.df_list[analysis_id] = self.df_list[analysis_id].drop(['Syncro','ExtIn1','ExtIn2'], axis=1)
def moving_filter(self,x,window_size,min_periods):
return pd.Series(x).rolling(window=window_size, min_periods=min_periods, center=True).mean().values
def EMA(self,x, alpha):
return pd.Series(x).ewm(alpha=alpha).mean()
def get_peek_action_point(self,analysis_id=0):
return [self.df_list[analysis_id]['action_x'].values[self.max_peek],
self.df_list[analysis_id]['action_y'].values[self.max_peek]]
def get_peek_action_point_for_converting(self):
xs,ys =self.get_peek_action_point()
points = []
for x,y in zip(xs,ys):
points.append([x,y,0])
return points
def get_peek_action_point_for_trans(self):
xs,ys =self.get_peek_action_point()
points = []
for x,y in zip(xs,ys):
points.append([x,y,0.,1.])
return points
def getNearestValue(self,array, num):
"""
概要: リストからある値に最も近い値を返却する関数
@param list: データ配列
@param num: 対象値
@return 対象値に最も近い値
"""
# リスト要素と対象値の差分を計算し最小値のインデックスを取得
idx = np.abs(array - num).argmin()
return array[idx]
def get_max_peek(self,threshold=5.,analysis_id=0):
force_plate_data=self.df_list[analysis_id]
self.max_peek = list(argrelmax(force_plate_data['Fz'].values,order=1000)[0])
tmp = copy.deepcopy(self.max_peek)
for i in tmp:
if force_plate_data['Fz'].values[i] < threshold:
self.max_peek.remove(i)
def get_peek_time(self,analysis_id=0):
return self.df_list[analysis_id]['time'].values[self.max_peek]
def get_first_landing_point(self, analysis_id=0):
force_plate_data = self.df_list[analysis_id].copy()
x_max_peek = argrelmax(force_plate_data['Fz'].values,order=50)
x_min_peek = argrelmin(force_plate_data['Fz'].values,order=100)
# print(x_min_peek,x_max_peek)
offset_peek_list= []
for value in x_max_peek[0]:
if abs(force_plate_data['Fz'].values[value] - force_plate_data['Fz'].values[self.getNearestValue(x_min_peek[0],value)]) > 100:
offset_peek_list.append(value)
# print(offset_peek_list)
self.first_landing_point = offset_peek_list[0]
print('first landing point is ',self.first_landing_point)
def export_from_first_landing_point(self, analysis_id=0):
force_plate_data = self.df_list[analysis_id].copy()
self.get_first_landing_point(analysis_id=analysis_id)
force_cutted_df = force_plate_data[self.first_landing_point:len(force_plate_data)]
# print(force_cutted_df)
# force_cutted_df.plot(y='Fz', figsize=(16,4), alpha=0.5)
force_cutted_df.to_csv('force_plate_cutted_data_a6.csv')
def get_action_point(self,analysis_id=0,scale=1., threshold=20):
Mx = self.df_list[analysis_id]['Mx'].values*scale
My = self.df_list[analysis_id]['My'].values*scale
Fz = self.df_list[analysis_id]['Fz'].values*scale
tmp_action_x = []
tmp_action_y = []
for mx,my,f in zip(Mx,My,Fz):
if abs(f) > threshold:
tmp_action_x.append(my/f)
tmp_action_y.append(mx/f)
else:
tmp_action_x.append(-1)
tmp_action_y.append(-1)
self.action_x = np.array(tmp_action_x)
self.action_y = np.array(tmp_action_y)
self.df_list[analysis_id]['action_x'] = self.action_x
self.df_list[analysis_id]['action_y'] = self.action_y
def modify_force_plate_raw(self, analysis_id=0, scale=1.0):
Fx = self.df_list[analysis_id]['Fx'].values*scale
Fy = self.df_list[analysis_id]['Fy'].values*scale
Fz = self.df_list[analysis_id]['Fz'].values*scale
Mx = self.df_list[analysis_id]['Mx'].values*scale
My = self.df_list[analysis_id]['My'].values*scale
Mz = self.df_list[analysis_id]['Mz'].values*scale
self.df_list[analysis_id]['Fx'] = Fx
self.df_list[analysis_id]['Fy'] = Fy
self.df_list[analysis_id]['Fz'] = Fz
self.df_list[analysis_id]['Mx'] = Mx
self.df_list[analysis_id]['My'] = My
self.df_list[analysis_id]['Mz'] = Mz
def add_motion_coordinate_action_point(self, simultaneous_trans_matrix,analysis_id=0):
motion_coordinate_action_point_x = []
motion_coordinate_action_point_y = []
motion_coordinate_action_point_z = []
for x, y in zip(self.action_x, self.action_y):
if x == -1 or y == -1:
motion_coordinate_action_point_x.append(0)
motion_coordinate_action_point_y.append(0)
motion_coordinate_action_point_z.append(0)
else:
arr = np.array([x, y, 0., 1.])
motion_pos = np.dot(simultaneous_trans_matrix, arr)
motion_coordinate_action_point_x.append(motion_pos[0])
motion_coordinate_action_point_y.append(motion_pos[1])
motion_coordinate_action_point_z.append(motion_pos[2])
self.df_list[analysis_id]['motion_coordinate_action_point_x'] = motion_coordinate_action_point_x
self.df_list[analysis_id]['motion_coordinate_action_point_y'] = motion_coordinate_action_point_y
self.df_list[analysis_id]['motion_coordinate_action_point_z'] = motion_coordinate_action_point_z
def add_corrected_force_data(self,analysis_id=0, scale=1.0):
corrected_Fx = []
corrected_Fy = []
corrected_Fz = []
Fx = self.df_list[analysis_id]['Fx'].values*scale
Fy = self.df_list[analysis_id]['Fy'].values*scale
Fz = self.df_list[analysis_id]['Fz'].values*scale
for fx, fy, fz in zip(Fx,Fy,Fz):
arr = np.array([fx, fy, fz])
corrected_force = np.dot(get_rotation_x(deg2rad(self.plate_rotation)),arr)
corrected_Fx.append(corrected_force[0])
corrected_Fy.append(corrected_force[1])
corrected_Fz.append(corrected_force[2])
self.df_list[analysis_id]['corrected_Fx'] = corrected_Fx
self.df_list[analysis_id]['corrected_Fy'] = corrected_Fy
self.df_list[analysis_id]['corrected_Fz'] = corrected_Fz
def save_data(self, save_dir, filename, analysis_id=0, update=False):
if not os.path.isdir(save_dir+'synchro'):
print('Creating new save folder ...')
print('Save path : ', save_dir+'synchro')
os.mkdir(save_dir+'synchro')
if not os.path.isfile(save_dir+'synchro\\'+filename) or update == True:
df_copy = self.df_list[analysis_id].copy()
df_copy = df_copy.set_index('time')
df_copy.to_csv(save_dir+'synchro\\'+filename)
def plot_peek_action_point(self):
xs,ys =self.get_peek_action_point()
f = plt.figure()
i = 0
for x,y in zip(xs,ys):
plt.plot(x, y, "o", color=cm.spectral(i/10.0))
i += 1
f.subplots_adjust(right=0.8)
plt.show()
plt.close()
def plot(self,analysis_id=0):
target_area = ['Fx','Fy','Fz','Mx','My','Mz']
force_plate_data = self.df_list[analysis_id].copy()
# print(force_plate_data)
column_name = force_plate_data.columns.values
column_name_tmp = []
column_name_tmp_array = []
for target_name in target_area:
column_name_tmp = [name for name in column_name if target_name in name]
column_name_tmp_array.extend(column_name_tmp)
column_name = column_name_tmp_array
# print(column_name)
f = plt.figure()
plt.title('Force plate csv data when liftting up object', color='black')
force_plate_data.plot(x='time',y=column_name[0:3], figsize=(16,4), alpha=0.5,ax=f.gca())
plt.plot(force_plate_data['time'].values[self.max_peek], force_plate_data['Fz'].values[self.max_peek], "ro")
if self.first_landing_point is not 0:
plt.plot(force_plate_data['time'].values[self.first_landing_point], force_plate_data['Fz'].values[self.first_landing_point], "bo")
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
f.subplots_adjust(right=0.8)
plt.show()
plt.close()
class motionAnalyzer(DataReader):
def __init__(self,relative_data_folder,filename,key_label):
super().__init__(relative_data_folder,filename,data_freq=100,key_label_name=key_label)
self.simple_modify_data()
self.peek_time = 0
def simple_modify_data(self):
for df in self.df_list:
for i in range(len(df)-1):
tmp = df.iloc[i+1]
for j,x in enumerate(tmp[0:9]):
if x == 0:
df.iloc[i+1,j] = df.iloc[i,j]
def set_peek_time(self,peek_time):
self.peek_time = peek_time
self.get_nearest_time()
def getNearestValue(self,array, num):
"""
概要: リストからある値に最も近い値を返却する関数
@param list: データ配列
@param num: 対象値
@return 対象値に最も近い値
"""
# リスト要素と対象値の差分を計算し最小値のインデックスを取得
idx = np.abs(array - num).argmin()
return array[idx]
def getNearestIndex(self,array, num):
"""
概要: リストからある値に最も近い値を返却する関数
@param list: データ配列
@param num: 対象値
@return 対象値に最も近い値のインデックス
"""
# リスト要素と対象値の差分を計算し最小値のインデックスを取得
return np.abs(array - num).argmin()
def get_nearest_time(self,analysis_id=0):
tmp = copy.deepcopy(self.peek_time)
self.peek_time = []
for x in tmp:
self.peek_time.append(self.getNearestIndex(self.df_list[analysis_id]['time'].values ,x))
def get_peek_points(self, analysis_id=0):
tmp = self.df_list[analysis_id].iloc[self.peek_time].values
return tmp[:,:9]
def get_euclid_distance(self,vec):
return np.linalg.norm(vec)
def get_nearest_two_points(self):
for points in self.get_peek_points():
each_point = [[points[0:3]],[points[3:6]],[points[6:9]]]
points_num = [[0,1],[1,2],[2,0]]
distance = []
distance.append(np.array(each_point[0])-np.array(each_point[1]))
distance.append(np.array(each_point[1])-np.array(each_point[2]))
distance.append( | np.array(each_point[2]) | numpy.array |
#!/usr/bin/env python
# encoding: utf-8
"""
train_tenfold.py
Created by Shuailong on 2017-02-18.
training module for CCG Parsing.
"""
from __future__ import print_function
from __future__ import division
import numpy as np
import os
from time import time
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
from keras.optimizers import SGD
from utils import generate_ten_fold
from utils import true_accuracy
from dataset import get_data
from dataset import get_embedding_matrix
from model import LSTMTagging
from train import data_generator
from train import MODEL_DIR
| np.random.seed(1337) | numpy.random.seed |
"""
CutBlur
Copyright 2020-present NAVER corp.
MIT license
"""
import random
import numpy as np
def crop(HQ, LQ, psize, scale=4):
h, w = LQ.shape[:-1]
x = random.randrange(0, w-psize+1)
y = random.randrange(0, h-psize+1)
crop_HQ = HQ[y*scale:y*scale+psize*scale, x*scale:x*scale+psize*scale]
crop_LQ = LQ[y:y+psize, x:x+psize]
return crop_HQ.copy(), crop_LQ.copy()
def flip_and_rotate(HQ, LQ):
hflip = random.random() < 0.5
vflip = random.random() < 0.5
rot90 = random.random() < 0.5
if hflip:
HQ, LQ = HQ[:, ::-1, :], LQ[:, ::-1, :]
if vflip:
HQ, LQ = HQ[::-1, :, :], LQ[::-1, :, :]
if rot90:
HQ, LQ = HQ.transpose(1, 0, 2), LQ.transpose(1, 0, 2)
return HQ, LQ
def rgb2ycbcr(img, y_only=True):
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
if y_only:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(
img,
[[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]
) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def calculate_psnr(img1, img2):
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float("inf")
return 20 * np.log10(255.0 / | np.sqrt(mse) | numpy.sqrt |
#!/usr/bin/env python3
import numpy as np
from urllib.request import urlopen
from Bio.PDB.PDBParser import PDBParser
import argparse
import os
import sys
import shutil
import glob
import re
import warnings
import time
startTime = time.time()
#silence warnings from numpy when doing gap_checks (and all others but it's all dealt with internally [hopefully])
old_settings = np.seterr(all='ignore')
warnings.simplefilter(action = "ignore", category = FutureWarning)
# Oregon State 2014
# <NAME>
# In collaboration with:
# <NAME>
# <NAME>
# <NAME>
# Dr. <NAME>
#checks if there is a valid file at a specified location
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return (open(arg, 'r')) # return an open file handle
# importing arguments from the user
parser=argparse.ArgumentParser(
description='''Assigns secondary structure without using hydrogen bonds. One method uses virtual dihedrals and bond angles, the other using phi/psi2 motifs to identify secondary structure. ''',
epilog="""IMPORTANT NOTES:\nExpect there to be strange ? residues at the end of the output if there are any ligands or other non-water HETATOMS present. This can be safely ignored."""
)
parser.add_argument('-i',dest="input",metavar="FILE",type=lambda x: is_valid_file(parser, x), help='This should be a pdb file. Do not use in combination with the "-c" option.')
parser.add_argument('-c',dest="code",type=str, help='This should be a four letter pdb code. Only use this option if you want to download directly from the PDB.')
parser.add_argument('-o',dest="output",metavar="FILE", help='Output file, a table using tabs as seperators. If not specified, default is to output to STDOUT as human readable output.')
parser.add_argument('--legend',dest='legend', action='store_true', help='Option to print a legend for the Secondary Structure codes.')
parser.add_argument('--verbose',dest='verbose', action='store_true', help='Option to print a all the output, including the behind the scenes methods for structure assignment.')
parser.set_defaults(legend=False, verbose=False)
args=parser.parse_args()
if args.legend == True:
print ("\nLEGEND:\n_:\tBreak in the chain (as detected by checking bond distance)\n-:\tUnassigned trans-residue\n=:\tUnassigned cis-residue\nP:\tPII-Helix\nt:\tTurn defined by CA to CA Distance (or implied as a consequence of other turns)\nN:\tA typically non-hydrogen bonded turn\nT:\tA typically hydrogen bonded turn T\nE:\tExtended or Beta-strand conformation\nH:\tAlpha Helical Conformation\nG:\t3,10 Helix\nBb:\tBeta-bulge\nU:\tPi-helical bulge\nX:\tThe Stig\n")
# Dictionary and function that converts triple letter codes to single letter
# Any non-conventional three letter code becomes "?"
# Victor made the first version of this function
def to_single(single,triple):
return (single[triple])
one_letter = {'ALA':'A', 'ARG':'R', 'ASN':'N', 'ASP':'D', 'CYS':'C', 'GLN':'Q', 'GLU':'E', 'GLY':'G', 'HIS':'H', 'ILE':'I', \
'LEU':'L', 'LYS':'K', 'MET':'M', 'PHE':'F', 'PRO':'P', 'SER':'S', 'THR':'T', 'TRP':'W', 'TYR':'Y', 'VAL':'V', 'BLA':'-'}
#function for getting pdb files online
def fetch_pdb(id):
pdb = "%s.pdb" % str(id.lower())
url = 'http://www.rcsb.org/pdb/files/%s.pdb' % id.lower()
tmp = urlopen(url)
fh = open(pdb,'wb')
fh.write(tmp.read())
fh.close()
if args.code == None:
try:
pdb = args.input
print ("\nWorking...\n")
except:
pass
elif args.input == None:
try:
fetch_pdb(args.code)
pdb = open('%s.pdb' %args.code.lower(), 'r')
print ("\nWorking...\n")
except:
print ("\n\n\nPlease enter a valid code or path.\n\n\n")
else:
print ("\n\n\nPlease only choose option -i or option -c.\n\n\n")
def atom_get(i,atom):
if atom_list[i][1] == atom:
return (atom_list[i][2])
else:
return ('no')
def chain_get(i,atom):
if atom_list[i][1] == atom:
return (atom_list[i][3])
else:
return ('no')
def model_get(i,atom):
if atom_list[i][1] == atom:
return (atom_list[i][4])
else:
return ('no')
# function to get indicies, matches the index from resnum list with the index for the atomic coords, which is why the resnum has to be in each
def index_getter(resnum):
indices = []
i = 0
for atom in atom_list:
try:
index = atom.index(resnum)
if index == 0:
indices.append(i)
except:
pass
i += 1
return (indices)
# checks for certain atom types and grabs just those coordinates
#this is for correctly sorting atom types
def atom_getter(index,atom):
if atom_list[index][1] == atom:
return (atom_xyz[index])
else:
return ('no')
#### GAP CHECK FUNCTION ######
def gap_check(resnum):
indices = index_getter(resnum)
prev_indices = []
next_indices = []
for i in indices:
prev_indices.append(i-4)
next_indices.append(i+4)
atom_types = ['C','N']
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
elif atom == 'N':
iN = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
for i in next_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
nextN = atom_getter(i,atom)
try:
ahead = np.subtract(nextN, iC)
behind = np.subtract(iN, prevC)
ahead_mag = np.sqrt(ahead.dot(ahead))
behind_mag = np.sqrt(behind.dot(behind))
if ((ahead_mag > 1.5) and (behind_mag > 1.5)):
return('isolated')
elif(ahead_mag > 1.5):
return('ahead')
elif(behind_mag > 1.5):
return('behind')
else:
return('no')
except:
return("Fatal Error in Gap Check")
#### GAP CHECK FUNCTION for dison3, discn3, discaca3, and dison4 ######
def long_gap_check(resnum):
indices = index_getter(resnum)
next_indices = []
plus_two_indices = []
plus_three_indices =[]
plus_four_indices =[]
for i in indices:
plus_two_indices.append(i+8)
next_indices.append(i+4)
plus_three_indices.append(i+12)
plus_four_indices.append(i+16)
atom_types = ['C','N']
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
for i in plus_two_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
plus_twoC = atom_getter(i,atom)
elif atom == 'N':
plus_twoN = atom_getter(i,atom)
for i in next_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
nextC = atom_getter(i,atom)
elif atom == 'N':
nextN = atom_getter(i,atom)
for i in plus_three_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
plus_threeC = atom_getter(i,atom)
elif atom == 'N':
plus_threeN = atom_getter(i,atom)
for i in plus_four_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
plus_fourN = atom_getter(i,atom)
try:
ahead = np.subtract(nextN, iC)
two_ahead = np.subtract(plus_twoN, nextC)
three_ahead = np.subtract(plus_threeN, plus_twoC)
four_ahead = np.subtract(plus_fourN, plus_threeC)
ahead_mag = np.sqrt(ahead.dot(ahead))
two_ahead_mag = np.sqrt(two_ahead.dot(ahead))
three_ahead_mag = np.sqrt(three_ahead.dot(ahead))
four_ahead_mag = np.sqrt(four_ahead.dot(ahead))
if ((ahead_mag > 1.5) or (two_ahead_mag > 1.5) or (three_ahead_mag > 1.5)):
return('threegap')
elif ((ahead_mag > 1.5) or (two_ahead_mag > 1.5) or (three_ahead_mag > 1.5) or (four_ahead_mag > 1.5)):
return('fourgap')
else:
return('no')
except:
return("Fatal Error in Gap Check")
# ZETA FUNCTION
# returns a single value, zeta for the resnum entered
# There are functions within functions here: I'm sorry. I was new to python
def zeta_calc(resnum):
# The order of atoms in atom_list is N, CA, C, O
#this returns the index values of each of the atoms at this residue number
indices = index_getter(resnum)
prev_indices = []
for i in indices:
prev_indices.append(i-4)
if ((gap_check(resnum) == 'behind') or (gap_check(resnum) == 'isolated')):
return('ERROR')
atom_types = ['C','O']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
elif atom == 'O':
iO = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
elif atom == 'O':
prevO = atom_getter(i,atom)
v1 = np.subtract(iC, iO)
v2 = np.subtract(prevC, iC)
v3 = np.subtract(prevO, prevC)
n1 = np.cross(v1,v2)
n2 = np.cross(v2,v3)
dot1 = np.dot(n1,n2)
n1mag = np.sqrt(n1.dot(n1))
n2mag = np.sqrt(n2.dot(n2))
cos_zeta = dot1/(n1mag*n2mag)
zeta = np.degrees(np.arccos(cos_zeta))
#testing for direction
cross = np.cross(n1,n2)
direction = np.dot(cross, v2)
if direction < 0:
zeta = -1 * zeta
return (zeta)
# Omega FUNCTION
# returns a single value, ome for the resnum entered
# There are functions within functions here: I'm sorry. I was new to python
def ome_calc(resnum):
# The order of atoms in atom_list is N, CA, C, O
indices = index_getter(resnum)
prev_indices = []
for i in indices:
prev_indices.append(i-4)
if ((gap_check(resnum) == 'behind') or (gap_check(resnum) == 'isolated')):
return('ERROR')
atom_types = ['C','N','CA']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
iN = atom_getter(i,atom)
elif atom == 'CA':
iCA = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
elif atom == 'CA':
prevCA = atom_getter(i,atom)
v1 = np.subtract(iN, iCA)
v2 = np.subtract(prevC, iN)
v3 = np.subtract(prevCA, prevC)
n1 = np.cross(v1,v2)
n2 = np.cross(v2,v3)
dot1 = np.dot(n1,n2)
n1mag = np.sqrt(n1.dot(n1))
n2mag = np.sqrt(n2.dot(n2))
cos_ome = dot1/(n1mag*n2mag)
ome = np.degrees(np.arccos(cos_ome))
#testing for direction
cross = np.cross(n1,n2)
direction = np.dot(cross, v2)
if direction < 0:
ome = -1 * ome
return (ome)
# PHI FUNCTION
# returns a single value, phi for the resnum entered
# There are functions within functions here: I'm sorry. I was new to python
def phi_calc(resnum):
indices = index_getter(resnum)
prev_indices = []
for i in indices:
prev_indices.append(i-4)
if ((gap_check(resnum) == 'behind') or (gap_check(resnum) == 'isolated')):
return('ERROR')
atom_types = ['C','N','CA']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
elif atom == 'CA':
iCA = atom_getter(i,atom)
elif atom == 'N':
iN = atom_getter(i,atom)
for i in prev_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
prevC = atom_getter(i,atom)
v1 = np.subtract(iCA, iC)
v2 = np.subtract(iN, iCA)
v3 = np.subtract(prevC, iN)
n1 = np.cross(v1,v2)
n2 = np.cross(v2,v3)
dot1 = np.dot(n1,n2)
n1mag = np.sqrt(n1.dot(n1))
n2mag = np.sqrt(n2.dot(n2))
cos_phi = dot1/(n1mag*n2mag)
phi = np.degrees(np.arccos(cos_phi))
#testing for direction
cross = np.cross(n1,n2)
direction = np.dot(cross, v2)
if direction < 0:
phi = -1 * phi
return (phi)
# PSI FUNCTION
# returns a single value, phi for the resnum entered
# There are functions within functions here: I'm sorry. I was new to python
def psi_calc(resnum):
indices = index_getter(resnum)
next_indices = []
for i in indices:
next_indices.append(i+4)
if ((gap_check(resnum) == 'ahead') or (gap_check(resnum) == 'isolated')):
return('ERROR')
atom_types = ['C','N','CA']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
elif atom == 'CA':
iCA = atom_getter(i,atom)
elif atom == 'N':
iN = atom_getter(i,atom)
for i in next_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
nextN = atom_getter(i,atom)
v1 = np.subtract(iC, nextN)
v2 = np.subtract(iCA, iC)
v3 = np.subtract(iN, iCA)
n1 = np.cross(v1,v2)
n2 = np.cross(v2,v3)
dot1 = np.dot(n1,n2)
n1mag = np.sqrt(n1.dot(n1))
n2mag = np.sqrt(n2.dot(n2))
cos_psi = dot1/(n1mag*n2mag)
psi = np.degrees(np.arccos(cos_psi))
#testing for direction
cross = np.cross(n1,n2)
direction = np.dot(cross, v2)
if direction < 0:
psi = -1 * psi
return (psi)
def tau_calc(resnum):
# The order of atoms in atom_list is N, CA, C, O
indices = index_getter(resnum)
prev_indices = []
next_indices = []
for i in indices:
prev_indices.append(i-4)
next_indices.append(i+4)
if ((gap_check(resnum) == 'behind') or (gap_check(resnum) == 'isolated') or (gap_check(resnum) == 'ahead')):
return('ERROR')
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in prev_indices:
if atom_getter(i,'CA') == 'no':
pass
else:
prev_CA = atom_getter(i,'CA')
for i in indices:
if atom_getter(i,'CA') == 'no':
pass
else:
iCA = atom_getter(i,'CA')
for i in next_indices:
if atom_getter(i,'CA') == 'no':
pass
else:
next_CA = atom_getter(i,'CA')
v1 = np.subtract(prev_CA, iCA)
v2 = np.subtract(next_CA, iCA)
d1 = np.dot(v1,v2)
v1mag = np.sqrt(v1.dot(v1))
v2mag = np.sqrt(v2.dot(v2))
cos_tau = d1/(v1mag*v2mag)
tau = np.degrees(np.arccos(cos_tau))
return (tau)
def dison3_calc(resnum):
indices = index_getter(resnum)
atom_types = ['N','O']
plus_three_indices = []
for i in indices:
plus_three_indices.append(i+12)
if ((gap_check(resnum) == 'ahead') or (gap_check(resnum) == 'isolated')):
return('ERROR')
if ((long_gap_check(resnum) == 'threegap')):
return('ERROR')
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'O':
iO = atom_getter(i,atom)
for i in plus_three_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
plus_three_N = atom_getter(i,atom)
dison3 = np.sqrt( np.sum( ( np.array(plus_three_N) - np.array(iO) ) **2 ) )
return (dison3)
def dison4_calc(resnum):
# The order of atoms in atom_list is N, CA, C, O
#this returns the index values of each of the atoms at this residue number
indices = index_getter(resnum)
plus_four_indices = []
for i in indices:
plus_four_indices.append(i+16)
if ((gap_check(resnum) == 'ahead') or (gap_check(resnum) == 'isolated')):
return('ERROR')
if ((long_gap_check(resnum) == 'fourgap') or (long_gap_check(resnum) == 'threegap')):
return('ERROR')
atom_types = ['N','O']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'O':
iO = atom_getter(i,atom)
for i in plus_four_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
plus_four_N = atom_getter(i,atom)
dison4 = np.sqrt( np.sum( ( np.array(plus_four_N) - np.array(iO) ) **2 ) )
return (dison4)
def discn3_calc(resnum):
# The order of atoms in atom_list is N, CA, C, O
#this returns the index values of each of the atoms at this residue number
indices = index_getter(resnum)
plus_three_indices = []
for i in indices:
plus_three_indices.append(i+12)
if ((gap_check(resnum) == 'ahead') or (gap_check(resnum) == 'isolated')):
return('ERROR')
if ((long_gap_check(resnum) == 'threegap')):
return('ERROR')
atom_types = ['N','C']
# gets coords for each atom and creates a variable for each, this makes atom assignment pdbfile order independant
for i in indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'C':
iC = atom_getter(i,atom)
for i in plus_three_indices:
for atom in atom_types:
if atom_getter(i,atom) == 'no':
pass
elif atom == 'N':
plus_three_N = atom_getter(i,atom)
discn3 = np.sqrt( np.sum( ( np.array(plus_three_N) - | np.array(iC) | numpy.array |
import os
import sys
import shutil
import subprocess
import xarray as xr
import numpy as np
# Current, parent and file paths
CWD = os.getcwd()
CF = os.path.realpath(__file__)
CFD = os.path.dirname(CF)
# Import library specific modules
sys.path.append(os.path.join(CFD,"../"))
sys.path.append(os.path.join(CFD,"../pyspod"))
from pyspod.spod_low_storage import SPOD_low_storage
from pyspod.spod_low_ram import SPOD_low_ram
from pyspod.spod_streaming import SPOD_streaming
# Let's create some 2D syntetic data
# and store them into a variable called p
variables = ['p']
x1 = np.linspace(0,10,100)
x2 = np.linspace(0, 5, 50)
xx1, xx2 = np.meshgrid(x1, x2)
t = | np.linspace(0, 200, 1000) | numpy.linspace |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from numpy import pi
import pytest
import comadyn.generators as g
from comadyn.generators import *
from comadyn.util import mhat
def test_CosineAngle():
# make the test deterministic
# struct.unpack("<L", np.random.bytes(4))[0]
np.random.seed(1448982574)
N = 1000
u = CosineAngle(0, pi / 2)
th = np.array([next(u) for i in range(N)])
th.sort()
assert th[0] >= 0
assert th[-1] <= pi / 2
# compare CDF to ideal case
f = np.cumsum(np.ones_like(th)) / N
d = f - (1 - np.cos(th)**2)
assert np.mean(d) < 2 * np.std(d)
def test_CosineAngle_limits():
# make the test deterministic
# struct.unpack("<L", np.random.bytes(4))[0]
np.random.seed(1448982574)
N = 1000
u = CosineAngle(pi / 6, pi / 4)
th = np.array([next(u) for i in range(N)])
th.sort()
assert th[0] >= pi / 6
assert th[-1] <= pi / 4
# compare CDF to ideal case
f = np.cumsum(np.ones_like(th)) / N
d = f - (1 - np.cos(th)**2)
assert np.mean(d) < 2 * np.std(d)
def test_Delta():
N = 1000
u = Delta(5)
x = np.array([next(u) for i in range(N)])
assert np.all(x == 5)
def test_Grid():
N = 1000
u = Grid(0, 5, N)
x = np.array([next(u) for i in range(N)])
assert np.all(x == np.linspace(0, 5, N))
with pytest.raises(StopIteration):
next(u)
def test_Grid_endpoint():
N = 1000
u = Grid(0, 5, N, endpoint=False)
x = np.array([next(u) for i in range(N)])
assert np.all(x == np.linspace(0, 5, N, endpoint=False))
with pytest.raises(StopIteration):
next(u)
def test_Grid_log():
N = 1000
u = Grid(-1, 5, N, log=True)
x = np.array([next(u) for i in range(N)])
assert np.all(x == np.logspace(-1, 5, N))
with pytest.raises(StopIteration):
next(u)
def test_Grid_cycle():
N = 100
u = Grid(0, 5, N, cycle=2)
x = np.array([next(u) for i in range(2 * N)])
assert np.all(x == np.tile(np.linspace(0, 5, N), 2))
with pytest.raises(StopIteration):
next(u)
def test_Grid_repeat():
N = 100
u = Grid(0, 5, N, repeat=2)
x = np.array([next(u) for i in range(2 * N)])
assert np.all(x == np.repeat(np.linspace(0, 5, N), 2))
with pytest.raises(StopIteration):
next(u)
def test_Log():
# make the test deterministic
# struct.unpack("<L", np.random.bytes(4))[0]
np.random.seed(1448982574)
N = 1000
u = Log(-1, 1)
x = np.array([next(u) for i in range(N)])
x.sort()
assert x[0] >= 0.1
assert x[-1] <= 10
# compare CDF to ideal case
f = np.cumsum(np.ones_like(x)) / N
d = f - np.log(10) * np.log(x)
assert np.mean(d) < 2 * np.std(d)
def test_Normal():
from scipy.special import erfc
# make the test deterministic
# struct.unpack("<L", np.random.bytes(4))[0]
np.random.seed(1448982574)
N = 1000
u = Normal()
x = np.array([next(u) for i in range(N)])
x.sort()
# compare CDF to ideal case
f = np.cumsum(np.ones_like(x)) / N
d = f - (2 - erfc(x)) / 2
assert np.mean(d) < 2 * np.std(d)
def test_Normal_limits():
from scipy.special import erfc
# make the test deterministic
# struct.unpack("<L", np.random.bytes(4))[0]
np.random.seed(1448982574)
N = 1000
u = Normal(x0=0)
x = np.array([next(u) for i in range(N)])
x.sort()
# compare CDF to ideal case
f = np.cumsum(np.ones_like(x)) / N
d = f - (2 - erfc(x)) / 2
assert np.mean(d) < 2 * np.std(d)
def test_Sequence():
N = 100
s = np.random.rand(N)
u = Sequence(s)
x = np.array([next(u) for i in range(N)])
assert np.all(s == x)
with pytest.raises(StopIteration):
next(u)
def test_Sequence_cycle():
N = 100
s = np.random.rand(N)
u = Sequence(s, cycle=2)
x = np.array([next(u) for i in range(2 * N)])
assert np.all(x == np.tile(s, 2))
with pytest.raises(StopIteration):
next(u)
def test_Sequence_repeat():
N = 100
s = np.random.rand(N)
u = Sequence(s, repeat=2)
x = np.array([next(u) for i in range(2 * N)])
assert np.all(x == np.repeat(s, 2))
with pytest.raises(StopIteration):
next(u)
def test_Uniform():
# make the test deterministic
# struct.unpack("<L", np.random.bytes(4))[0]
np.random.seed(1448982574)
N = 1000
u = Uniform(0, 5)
x = np.array([next(u) for i in range(N)])
x.sort()
assert x[0] >= 0
assert x[-1] <= 5
# compare CDF to ideal case
f = np.cumsum(np.ones_like(x)) / N
d = f - x
assert | np.mean(d) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.models.rgb.transfer_functions.itur_bt_2100`
module.
"""
from __future__ import division, unicode_literals
import numpy as np
import unittest
from colour.models.rgb.transfer_functions import (
oetf_PQ_BT2100, oetf_inverse_PQ_BT2100, eotf_PQ_BT2100,
eotf_inverse_PQ_BT2100, ootf_PQ_BT2100, ootf_inverse_PQ_BT2100,
oetf_HLG_BT2100, oetf_inverse_HLG_BT2100)
from colour.models.rgb.transfer_functions.itur_bt_2100 import (
eotf_HLG_BT2100_1, eotf_HLG_BT2100_2, eotf_inverse_HLG_BT2100_1,
eotf_inverse_HLG_BT2100_2, ootf_HLG_BT2100_1, ootf_HLG_BT2100_2,
ootf_inverse_HLG_BT2100_1, ootf_inverse_HLG_BT2100_2)
from colour.models.rgb.transfer_functions.itur_bt_2100 import (
gamma_function_HLG_BT2100)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestOetf_PQ_BT2100', 'TestOetf_inverse_PQ_BT2100', 'TestEotf_PQ_BT2100',
'TestEotf_inverse_PQ_BT2100', 'TestOotf_PQ_BT2100',
'TestOotf_inverse_PQ_BT2100', 'TestGamma_function_HLG_BT2100',
'TestOetf_HLG_BT2100', 'TestOetf_inverse_HLG_BT2100',
'TestEotf_HLG_BT2100_1', 'TestEotf_HLG_BT2100_2',
'TestEotf_inverse_HLG_BT2100_1', 'TestEotf_inverse_HLG_BT2100_2',
'TestOotf_HLG_BT2100_1', 'TestOotf_HLG_BT2100_2',
'TestOotf_inverse_HLG_BT2100_1', 'TestOotf_inverse_HLG_BT2100_2'
]
class TestOetf_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition unit tests methods.
"""
def test_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition.
"""
self.assertAlmostEqual(
oetf_PQ_BT2100(0.0), 0.000000730955903, places=7)
self.assertAlmostEqual(
oetf_PQ_BT2100(0.1), 0.724769816665726, places=7)
self.assertAlmostEqual(
oetf_PQ_BT2100(1.0), 0.999999934308041, places=7)
def test_n_dimensional_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition n-dimensional arrays support.
"""
E = 0.1
E_p = oetf_PQ_BT2100(E)
E = np.tile(E, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(oetf_PQ_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(oetf_PQ_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(oetf_PQ_BT2100(E), E_p, decimal=7)
def test_domain_range_scale_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition domain and range scale support.
"""
E = 0.1
E_p = oetf_PQ_BT2100(E)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_PQ_BT2100(E * factor), E_p * factor, decimal=7)
@ignore_numpy_errors
def test_nan_oetf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_PQ_BT2100` definition nan support.
"""
oetf_PQ_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOetf_inverse_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition unit tests methods.
"""
def test_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition.
"""
self.assertAlmostEqual(
oetf_inverse_PQ_BT2100(0.000000730955903), 0.0, places=7)
self.assertAlmostEqual(
oetf_inverse_PQ_BT2100(0.724769816665726), 0.1, places=7)
self.assertAlmostEqual(
oetf_inverse_PQ_BT2100(0.999999934308041), 1.0, places=7)
def test_n_dimensional_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition n-dimensional arrays support.
"""
E_p = 0.724769816665726
E = oetf_inverse_PQ_BT2100(E_p)
E_p = np.tile(E_p, 6)
E = np.tile(E, 6)
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3))
E = np.reshape(E, (2, 3))
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
E = np.reshape(E, (2, 3, 1))
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p), E, decimal=7)
def test_domain_range_scale_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition domain and range scale support.
"""
E_p = 0.724769816665726
E = oetf_inverse_PQ_BT2100(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_inverse_PQ_BT2100(E_p * factor),
E * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_oetf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_PQ_BT2100` definition nan support.
"""
oetf_inverse_PQ_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition unit tests methods.
"""
def test_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition.
"""
self.assertAlmostEqual(eotf_PQ_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_PQ_BT2100(0.724769816665726), 779.98836083408537, places=7)
self.assertAlmostEqual(eotf_PQ_BT2100(1.0), 10000.0, places=7)
def test_n_dimensional_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition n-dimensional arrays support.
"""
E_p = 0.724769816665726
F_D = eotf_PQ_BT2100(E_p)
E_p = np.tile(E_p, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(eotf_PQ_BT2100(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(eotf_PQ_BT2100(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(eotf_PQ_BT2100(E_p), F_D, decimal=7)
def test_domain_range_scale_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition domain and range scale support.
"""
E_p = 0.724769816665726
F_D = eotf_PQ_BT2100(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_PQ_BT2100(E_p * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_PQ_BT2100` definition nan support.
"""
eotf_PQ_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_inverse_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition unit tests methods.
"""
def test_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition.
"""
self.assertAlmostEqual(
eotf_inverse_PQ_BT2100(0.0), 0.000000730955903, places=7)
self.assertAlmostEqual(
eotf_inverse_PQ_BT2100(779.98836083408537),
0.724769816665726,
places=7)
self.assertAlmostEqual(eotf_inverse_PQ_BT2100(10000.0), 1.0, places=7)
def test_n_dimensional_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition n-dimensional arrays support.
"""
F_D = 779.98836083408537
E_p = eotf_inverse_PQ_BT2100(F_D)
F_D = np.tile(F_D, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D), E_p, decimal=7)
def test_domain_range_scale_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition domain and range scale support.
"""
F_D = 779.98836083408537
E_p = eotf_inverse_PQ_BT2100(F_D)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_inverse_PQ_BT2100(F_D * factor),
E_p * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_eotf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_PQ_BT2100` definition nan support.
"""
eotf_inverse_PQ_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOotf_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition unit tests methods.
"""
def test_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition.
"""
self.assertAlmostEqual(ootf_PQ_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
ootf_PQ_BT2100(0.1), 779.98836083411584, places=7)
self.assertAlmostEqual(
ootf_PQ_BT2100(1.0), 9999.993723673924300, places=7)
def test_n_dimensional_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition n-dimensional arrays support.
"""
E = 0.1
F_D = ootf_PQ_BT2100(E)
E = np.tile(E, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(ootf_PQ_BT2100(E), F_D, decimal=7)
E = np.reshape(E, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(ootf_PQ_BT2100(E), F_D, decimal=7)
E = np.reshape(E, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(ootf_PQ_BT2100(E), F_D, decimal=7)
def test_domain_range_scale_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition domain and range scale support.
"""
E = 0.1
F_D = ootf_PQ_BT2100(E)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
ootf_PQ_BT2100(E * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_ootf_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_PQ_BT2100` definition nan support.
"""
ootf_PQ_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOotf_inverse_PQ_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition unit tests methods.
"""
def test_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition.
"""
self.assertAlmostEqual(ootf_inverse_PQ_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
ootf_inverse_PQ_BT2100(779.98836083411584), 0.1, places=7)
self.assertAlmostEqual(
ootf_inverse_PQ_BT2100(9999.993723673924300), 1.0, places=7)
def test_n_dimensional_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition n-dimensional arrays support.
"""
F_D = 779.98836083411584
E = ootf_inverse_PQ_BT2100(F_D)
F_D = np.tile(F_D, 6)
E = np.tile(E, 6)
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D), E, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E = np.reshape(E, (2, 3))
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D), E, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E = np.reshape(E, (2, 3, 1))
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D), E, decimal=7)
def test_domain_range_scale_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition domain and range scale support.
"""
F_D = 779.98836083411584
E = ootf_inverse_PQ_BT2100(F_D)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
ootf_inverse_PQ_BT2100(F_D * factor),
E * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_ootf_inverse_PQ_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
ootf_inverse_PQ_BT2100` definition nan support.
"""
ootf_inverse_PQ_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestGamma_function_HLG_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
gamma_function_HLG_BT2100` definition unit tests methods.
"""
def test_gamma_function_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
gamma_function_HLG_BT2100` definition.
"""
self.assertAlmostEqual(
gamma_function_HLG_BT2100(1000.0), 1.2, places=7)
self.assertAlmostEqual(
gamma_function_HLG_BT2100(2000.0), 1.326432598178872, places=7)
self.assertAlmostEqual(
gamma_function_HLG_BT2100(4000.0), 1.452865196357744, places=7)
self.assertAlmostEqual(
gamma_function_HLG_BT2100(10000.0), 1.619999999999999, places=7)
class TestOetf_HLG_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition unit tests methods.
"""
def test_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition.
"""
self.assertAlmostEqual(oetf_HLG_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
oetf_HLG_BT2100(0.18 / 12), 0.212132034355964, places=7)
self.assertAlmostEqual(
oetf_HLG_BT2100(1.0), 0.999999995536569, places=7)
def test_n_dimensional_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition n-dimensional arrays support.
"""
E = 0.18 / 12
E_p = oetf_HLG_BT2100(E)
E = np.tile(E, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(oetf_HLG_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(oetf_HLG_BT2100(E), E_p, decimal=7)
E = np.reshape(E, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(oetf_HLG_BT2100(E), E_p, decimal=7)
def test_domain_range_scale_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition domain and range scale support.
"""
E = 0.18 / 12
E_p = oetf_HLG_BT2100(E)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_HLG_BT2100(E * factor), E_p * factor, decimal=7)
@ignore_numpy_errors
def test_nan_oetf_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_HLG_BT2100` definition nan support.
"""
oetf_HLG_BT2100(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestOetf_inverse_HLG_BT2100(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition unit tests methods.
"""
def test_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition.
"""
self.assertAlmostEqual(oetf_inverse_HLG_BT2100(0.0), 0.0, places=7)
self.assertAlmostEqual(
oetf_inverse_HLG_BT2100(0.212132034355964), 0.18 / 12, places=7)
self.assertAlmostEqual(
oetf_inverse_HLG_BT2100(0.999999995536569), 1.0, places=7)
def test_n_dimensional_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition n-dimensional arrays support.
"""
E_p = 0.212132034355964
E = oetf_inverse_HLG_BT2100(E_p)
E_p = np.tile(E_p, 6)
E = np.tile(E, 6)
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3))
E = np.reshape(E, (2, 3))
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p), E, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
E = np.reshape(E, (2, 3, 1))
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p), E, decimal=7)
def test_domain_range_scale_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition domain and range scale support.
"""
E_p = 0.212132034355964
E = oetf_inverse_HLG_BT2100(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
oetf_inverse_HLG_BT2100(E_p * factor),
E * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_oetf_inverse_HLG_BT2100(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
oetf_inverse_HLG_BT2100` definition nan support.
"""
oetf_inverse_HLG_BT2100(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_HLG_BT2100_1(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition unit tests methods.
"""
def test_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition.
"""
self.assertAlmostEqual(eotf_HLG_BT2100_1(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_1(0.212132034355964), 6.476039825649814, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_1(1.0), 1000.000032321769100, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_1(0.212132034355964, 0.001, 10000, 1.4),
27.96039175299561,
places=7)
def test_n_dimensional_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition n-dimensional arrays support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_1(E_p)
E_p = np.tile(E_p, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (6, 1))
F_D = np.reshape(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.array([0.25, 0.50, 0.75])
F_D = np.array([12.49759413, 49.99037650, 158.94693786])
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.tile(E_p, (6, 1))
F_D = np.tile(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 3))
F_D = np.reshape(F_D, (2, 3, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_1(E_p), F_D, decimal=7)
def test_domain_range_scale_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition domain and range scale support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_1(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_HLG_BT2100_1(E_p * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_1` definition nan support.
"""
eotf_HLG_BT2100_1(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_HLG_BT2100_2(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition unit tests methods.
"""
def test_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition.
"""
self.assertAlmostEqual(eotf_HLG_BT2100_2(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_2(0.212132034355964), 6.476039825649814, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_2(1.0), 1000.000032321769100, places=7)
self.assertAlmostEqual(
eotf_HLG_BT2100_2(0.212132034355964, 0.001, 10000, 1.4),
29.581261576946076,
places=7)
def test_n_dimensional_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition n-dimensional arrays support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_2(E_p)
E_p = np.tile(E_p, 6)
F_D = np.tile(F_D, 6)
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3))
F_D = np.reshape(F_D, (2, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 1))
F_D = np.reshape(F_D, (2, 3, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (6, 1))
F_D = np.reshape(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.array([0.25, 0.50, 0.75])
F_D = np.array([12.49759413, 49.99037650, 158.94693786])
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.tile(E_p, (6, 1))
F_D = np.tile(F_D, (6, 1))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
E_p = np.reshape(E_p, (2, 3, 3))
F_D = np.reshape(F_D, (2, 3, 3))
np.testing.assert_almost_equal(eotf_HLG_BT2100_2(E_p), F_D, decimal=7)
def test_domain_range_scale_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition domain and range scale support.
"""
E_p = 0.212132034355964
F_D = eotf_HLG_BT2100_2(E_p)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_HLG_BT2100_2(E_p * factor), F_D * factor, decimal=7)
@ignore_numpy_errors
def test_nan_eotf_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_HLG_BT2100_2` definition nan support.
"""
eotf_HLG_BT2100_2(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_inverse_HLG_BT2100_1(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition unit tests methods.
"""
def test_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition.
"""
self.assertAlmostEqual(eotf_inverse_HLG_BT2100_1(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_1(6.476039825649814),
0.212132034355964,
places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_1(1000.000032321769100), 1.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_1(27.96039175299561, 0.001, 10000, 1.4),
0.212132034355964,
places=7)
def test_n_dimensional_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition n-dimensional arrays support.
"""
F_D = 6.476039825649814
E_p = eotf_inverse_HLG_BT2100_1(F_D)
F_D = np.tile(F_D, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (6, 1))
E_p = np.reshape(E_p, (6, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.array([12.49759413, 49.99037650, 158.94693786])
E_p = np.array([0.25, 0.50, 0.75])
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.tile(F_D, (6, 1))
E_p = np.tile(E_p, (6, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 3))
E_p = np.reshape(E_p, (2, 3, 3))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D), E_p, decimal=7)
def test_domain_range_scale_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition domain and range scale support.
"""
F_D = 6.476039825649814
E_p = eotf_inverse_HLG_BT2100_1(F_D)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_1(F_D * factor),
E_p * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_eotf_inverse_HLG_BT2100_1(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_1` definition nan support.
"""
eotf_inverse_HLG_BT2100_1(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestEotf_inverse_HLG_BT2100_2(unittest.TestCase):
"""
Defines :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_2` definition unit tests methods.
"""
def test_eotf_inverse_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_2` definition.
"""
self.assertAlmostEqual(eotf_inverse_HLG_BT2100_2(0.0), 0.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_2(6.476039825649814),
0.212132034355964,
places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_2(1000.000032321769100), 1.0, places=7)
self.assertAlmostEqual(
eotf_inverse_HLG_BT2100_2(29.581261576946076, 0.001, 10000, 1.4),
0.212132034355964,
places=7)
def test_n_dimensional_eotf_inverse_HLG_BT2100_2(self):
"""
Tests :func:`colour.models.rgb.transfer_functions.itur_bt_2100.\
eotf_inverse_HLG_BT2100_2` definition n-dimensional arrays support.
"""
F_D = 6.476039825649814
E_p = eotf_inverse_HLG_BT2100_2(F_D)
F_D = np.tile(F_D, 6)
E_p = np.tile(E_p, 6)
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3))
E_p = np.reshape(E_p, (2, 3))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = np.reshape(F_D, (2, 3, 1))
E_p = np.reshape(E_p, (2, 3, 1))
np.testing.assert_almost_equal(
eotf_inverse_HLG_BT2100_2(F_D), E_p, decimal=7)
F_D = | np.reshape(F_D, (6, 1)) | numpy.reshape |
import argparse
import numpy as np
def solve(*args, **kwargs):
return None
def main(args):
print('Solving')
print('Input: {0}'.format(args.input))
print('Output: {0}'.format(args.output))
with open(args.input, 'r') as in_file:import argparse
import numpy as np
import matplotlib.pyplot as plt
import utils.parallel as parallel
def preprocess(idx, photo1: tuple, all_photos: list):
n = len(all_photos)
intersections = np.zeros(n, np.int16)
diffsizehalf = np.zeros(n, np.int16)
u = np.zeros(n, np.int16)
tags1 = photo1[3]
for i, photo2 in enumerate(all_photos):
if idx == i:
continue
tags2 = photo2[3]
num_intersections = len(tags1.intersection(tags2))
if num_intersections == 0:
continue
h = int(np.abs(len(tags1) - len(tags2))) // 2
intersections[i] = num_intersections
diffsizehalf[i] = h
if photo2[1]:
u[i] = intersections[i]
else:
u[i] = np.maximum(intersections[i] - diffsizehalf[i], 0)
return idx, intersections, diffsizehalf, u
def merge(v1, v2):
assert v1[1] == v2[1] == True
merged_tags = v1[3].union(v2[3])
return [v1[0], v2[0]], False, len(merged_tags), merged_tags
#def select_vertical(photos):
def solve(photos):
tags_set = set()
n = len(photos)
intersections = | np.zeros((n, n), dtype=np.int16) | numpy.zeros |
import logging
from glob import glob
from pathlib import Path
import cv2
import numpy as np
from imgaug import augmenters as iaa
from imgaug.augmentables import BoundingBox, BoundingBoxesOnImage, Keypoint, KeypointsOnImage
from omegaconf.listconfig import ListConfig
from PIL import Image
from pycocotools.coco import COCO
from torch.utils import data
from utils.helper import instantiate_augmenters
from utils.image import draw_umich_gaussian as draw_gaussian
from utils.image import gaussian_radius
from utils.box import get_annotation_with_angle, rotate_bbox
cv2.setNumThreads(0)
log = logging.getLogger(__name__)
class Dataset(data.Dataset):
def __init__(
self, image_folder, annotation_file, input_size=(512, 512),
target_domain_glob=None, num_classes=80, num_keypoints=0,
rotated_boxes=False, mean=(0.40789654, 0.44719302, 0.47026115),
std=(0.28863828, 0.27408164, 0.27809835),
augmentation=None, augment_target_domain=False, max_detections=150,
down_ratio=4):
self.image_folder = Path(image_folder)
self.coco = COCO(annotation_file)
self.images = self.coco.getImgIds()
self.use_rotated_boxes = rotated_boxes
self.max_detections = max_detections
self.down_ratio = down_ratio
self.input_size = input_size
self.mean = np.array(mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(std, dtype=np.float32).reshape(1, 1, 3)
self.augmentation = augmentation
self.num_classes = num_classes
self.num_keypoints = num_keypoints
self.string_id_mapping = {}
self.augment_target_domain = augment_target_domain
self.cat_mapping = {v: i for i,
v in enumerate(range(1, num_classes + 1))}
self.classes = {y: self.coco.cats[x] if x in self.coco.cats else ''
for x, y in self.cat_mapping.items()}
assert len(input_size) == 2
if isinstance(target_domain_glob, str):
self.target_domain_files = glob(target_domain_glob)
elif isinstance(target_domain_glob, (list, ListConfig)):
self.target_domain_files = []
for pattern in target_domain_glob:
self.target_domain_files.extend(glob(pattern))
else:
self.target_domain_files = []
if self.augmentation:
augmentation_methods = instantiate_augmenters(augmentation)
self.augmentation = iaa.Sequential(augmentation_methods)
self.resize = iaa.Resize((self.input_size[0], self.input_size[1]))
self.resize_out = iaa.Resize(
(self.input_size[0] // down_ratio,
self.input_size[1] // down_ratio))
log.info(
f"found {len(self.target_domain_files)} samples for target domain")
super().__init__()
def __len__(self):
return len(self.images)
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = self.image_folder / file_name
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_detections)
img = np.array(Image.open(img_path).convert("RGB"))
if self.use_rotated_boxes:
ret = self.__get_rotated_coco(img, anns, num_objs)
else:
ret = self.__get_default_coco(img, anns, num_objs)
if isinstance(img_id, str):
mapped_id = self.string_id_mapping.get(
img_id, 1 + len(self.string_id_mapping))
self.string_id_mapping[img_id] = mapped_id
img_id = mapped_id
ret['id'] = img_id
if len(self.target_domain_files):
target_domain_img = np.array(Image.open(
np.random.choice(self.target_domain_files)).convert("RGB"))
if self.augmentation is not None and self.augment_target_domain:
target_domain_img = self.augmentation(image=target_domain_img)
target_domain_img = self.resize(image=target_domain_img)
target_domain_img = np.array(
target_domain_img, dtype=np.float32) / 255.0
target_domain_img = (target_domain_img - self.mean) / self.std
target_domain_img = target_domain_img.transpose(2, 0, 1)
ret['target_domain_input'] = target_domain_img
return ret
def __get_default_coco(self, img, anns, num_objs):
boxes = []
if self.num_keypoints > 0:
kpts = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
boxes.append(BoundingBox(*bbox))
if self.num_keypoints > 0:
if 'keypoints' not in ann:
ann['keypoints'] = np.zeros((3 * self.num_keypoints,))
kpt = [
Keypoint(*x)
for x in np.array(ann['keypoints']).reshape(-1, 3)
[:, : 2]]
kpts.extend(kpt)
bbs = BoundingBoxesOnImage(boxes, shape=img.shape)
if self.num_keypoints > 0:
kpts = KeypointsOnImage(kpts, shape=img.shape)
if self.augmentation is not None:
if self.num_keypoints > 0:
img_aug, bbs_aug, kpts_aug = self.augmentation(
image=img, bounding_boxes=bbs, keypoints=kpts)
else:
img_aug, bbs_aug = self.augmentation(
image=img, bounding_boxes=bbs)
else:
if self.num_keypoints > 0:
kpts_aug = kpts.copy()
img_aug, bbs_aug = np.copy(img), bbs.copy()
if self.num_keypoints > 0:
img_aug, bbs_aug, kpts_aug = self.resize(
image=img_aug, bounding_boxes=bbs_aug, keypoints=kpts_aug)
else:
img_aug, bbs_aug = self.resize(
image=img_aug, bounding_boxes=bbs_aug)
img = (img_aug.astype(np.float32) / 255.)
inp = (img - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = self.input_size[1] // self.down_ratio
output_w = self.input_size[0] // self.down_ratio
num_classes = self.num_classes
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_detections, 2), dtype=np.float32)
reg = np.zeros((self.max_detections, 2), dtype=np.float32)
ind = np.zeros((self.max_detections), dtype=np.int64)
reg_mask = np.zeros((self.max_detections), dtype=np.uint8)
gt_det = np.zeros((self.max_detections, num_classes), dtype=np.float32)
gt_areas = np.zeros((self.max_detections), dtype=np.float32)
if self.num_keypoints > 0:
kp = np.zeros(
(self.max_detections,
self.num_keypoints * 2),
dtype=np.float32)
gt_kp = np.zeros(
(self.max_detections, self.num_keypoints, 2), dtype=np.float32)
kp_reg_mask = np.zeros(
(self.max_detections, self.num_keypoints * 2), dtype=np.uint8)
bbs_aug, kpts_aug = self.resize_out(
bounding_boxes=bbs_aug, keypoints=kpts_aug)
else:
bbs_aug = self.resize_out(bounding_boxes=bbs_aug)
for k in range(num_objs):
ann = anns[k]
bbox_aug = bbs_aug[k].clip_out_of_image((output_w, output_h))
bbox = np.array([bbox_aug.x1, bbox_aug.y1,
bbox_aug.x2, bbox_aug.y2])
cls_id = int(self.cat_mapping[ann['category_id']])
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((np.ceil(h), np.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2],
dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
gt_det[k] = ([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
if self.num_keypoints > 0:
valid = np.array(ann["keypoints"]).reshape(-1, 3)[:, -1]
for i, p in enumerate(
kpts_aug[k * self.num_keypoints: k * self.num_keypoints + self.num_keypoints]):
kp[k][i * 2] = p.x - ct_int[0]
kp[k][i * 2 + 1] = p.y - ct_int[1]
is_valid = valid[i] == 2 and not p.is_out_of_image(
(output_w, output_w))
kp_reg_mask[k, i * 2] = int(is_valid)
kp_reg_mask[k, i * 2 + 1] = int(is_valid)
gt_kp[k][i] = p.x, p.y
if "area" not in ann:
gt_areas[k] = w * h
else:
gt_areas[k] = ann["area"]
del bbs
del bbs_aug
del img_aug
gt_det = np.array(gt_det, dtype=np.float32) if len(
gt_det) > 0 else np.zeros((1, 6), dtype=np.float32)
ret = {
'input': inp,
'hm': hm,
'reg_mask': reg_mask,
'ind': ind,
'wh': wh,
'reg': reg,
'gt_dets': gt_det,
'gt_areas': gt_areas,
}
if self.num_keypoints > 0:
ret['kps'] = kp
ret['gt_kps'] = gt_kp
ret['kp_reg_mask'] = kp_reg_mask
del kpts_aug
return ret
def __get_rotated_coco(self, img, anns, num_objs):
kpts = []
kpts_tmp = []
for k in range(num_objs):
ann = anns[k]
ann_rotated = get_annotation_with_angle(ann)
ann_rotated[4] = ann_rotated[4]
rot = rotate_bbox(*ann_rotated)
kpts.extend([Keypoint(*x) for x in rot])
if self.num_keypoints > 0:
if 'keypoints' not in ann:
ann['keypoints'] = np.zeros((3 * self.num_keypoints,))
kpt = [
Keypoint(*x)
for x in np.array(ann['keypoints']).reshape(-1, 3)
[:, : 2]]
kpts_tmp.extend(kpt)
idx_boxes = len(kpts)
if self.num_keypoints > 0:
kpts.extend(kpts_tmp)
kpts = KeypointsOnImage(kpts, shape=img.shape)
if self.augmentation is not None:
img_aug, kpts_aug = self.augmentation(image=img, keypoints=kpts)
else:
img_aug, kpts_aug = | np.copy(img) | numpy.copy |
"""main threadcount module."""
import json
import csv
from types import SimpleNamespace
from collections import OrderedDict, UserList
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import lmfit
import mpdaf.obj
import astropy.units as u
from . import lines
from . import models
from . import mpdaf_ext # noqa: F401
FLAM16 = u.Unit(1e-16 * u.erg / (u.cm ** 2 * u.s * u.AA))
"""A header["BUNIT"] value we have."""
FLOAT_FMT = ".8g"
"""Default formatting for floats in output files."""
DEFAULT_FIT_INFO = "aic_real bic_real chisqr redchi success".split()
"""Define typical ModelResult information we might want."""
def open_fits_cube(
data_filename, data_hdu_index=None, var_filename=None, var_hdu_index=None, **kwargs
):
"""Load a fits file using :class:`mpdaf.obj.Cube`, and handle variance in separate file.
I highly recommend being explicit in the parameters and not relying on the
guessing that mpdaf can perform.
Parameters
----------
data_filename : str
Path to file containing data
data_hdu_index : int, optional
Index indicating which hdu contains the data (starting with 0), by default None (then the
:class:`mpdaf.obj.Cube` constructor will attempt
to guess the correct extension)
var_filename : str, optional
Path to file containing variance, by default None (No variance will be
loaded. Unless `data_hdu_index` = None, and then the
:class:`mpdaf.obj.Cube` constructor will attempt to
automatically load variance from `data_filename`)
var_hdu_index : int, optional
Index indicating which hdu contains the variance (starting with 0), by
default None (then the :class:`mpdaf.obj.Cube` constructor will attempt
to guess the correct extension)
**kwargs : dict, optional
Any keyword arguments to pass to :class:`mpdaf.obj.Cube`, such as `unit`
Returns
-------
:class:`mpdaf.obj.Cube`
A data cube.
"""
# no variance given:
if var_filename is None:
cube = mpdaf.obj.Cube(data_filename, ext=data_hdu_index, **kwargs)
# data and variance stored in same file:
elif data_filename == var_filename:
cube = mpdaf.obj.Cube(
data_filename, ext=(data_hdu_index, var_hdu_index), **kwargs
)
# data and variance stored in different files:
else:
cube = mpdaf.obj.Cube(data_filename, ext=data_hdu_index, **kwargs)
varcube = mpdaf.obj.Cube(var_filename, ext=var_hdu_index, **kwargs)
# varcube is loaded as masked array.
cube._var = varcube.data.data
cube._mask |= varcube.mask
# test for FLAM16:
if cube.unit == u.dimensionless_unscaled:
if cube.data_header.get("BUNIT") == "FLAM16":
cube.unit = FLAM16
return cube
def de_redshift(wavecoord, z=0, z_initial=0):
r"""De-redshift the WaveCoord in-place.
Parameters
----------
wavecoord : :class:`mpdaf.obj.WaveCoord`
The wavelength coordinate to be de-redshifted
z : float, optional
The redshift of the object whose wavecoord to de-redshift, by default 0 (i.e. no change)
z_initial : float, optional
The redshift currently applied to the wavecoord, by default 0 (i.e. none applied)
Notes
-----
I tried to make z a new attribute in `wavecoord`, but due to details in how
slicing works, this was not a simple change. Therefore z must be stored in
a variable externally to the wavecoord.
"""
wavecoord.set_crval(wavecoord.get_crval() * (1 + z_initial) / (1 + z))
wavecoord.set_step(wavecoord.get_step() * (1 + z_initial) / (1 + z))
return z
# TODO: Add in a part where the user can input in a redshift and move the
# histogram or center line or whatever around. i.e. user input at the end.
def tweak_redshift(
cube,
z_gal,
center_wavelength=lines.OIII5007,
wavelength_range=(-15, 15), # This is in Angstroms
pixel_mask=None,
):
"""Interactively choose a new redshift.
This procedure has several steps.
1. Select which spaxels to use for calculating the redshift via one of these options:
* use the input parameter `pixel_mask`
* Select pixels with a high integrated flux value in the selected wavelength range.
These are likely to be the galaxy. The user will interact with the terminal
and view a plot to interactively change the lower threshold for the desired
pixels. To accept the value plotted, leave the entry blank and press enter.
2. Fit a :class:`~threadcount.models.Const_1GaussModel` to the selected spaxels.
3. Extract the parameter value for 'g1_center' to get the center wavelength
of the fitted gaussian and compute the median center.
4. Calculate the redshift required for the median center to be equal to
`center_wavelength` using the formula::
new_z = (median_center / `center_wavelength`) * (1 + `z_gal`) - 1
5. Display a plot showing the spaxels used and a histogram displaying all the
center wavelengths (with `center_wavelength` subtracted, so it displays
the change from ideal)
Parameters
----------
cube : :class:`mpdaf.obj.Cube`
A datacube containing the wavelength range set in these parameters
z_gal : float
The redshift of the object which has already been applied to the `cube`
center_wavelength : float, optional
The center wavelength of the emission line to fit, by default :const:`threadcount.lines.OIII5007`
wavelength_range : array-like [float, float], optional
The wavelength range to fit, in Angstroms. These are defined as a change
from the `center_wavelength`, by default (-15, 15)
Returns
-------
float
The redshift selected by the user.
"""
plt.close()
print("====================================")
print("Tweak reshift procedure has started.")
print("====================================\n\n")
print("Using line {:.4g} +/- {} A".format(center_wavelength, wavelength_range[1]))
# retrieve spectral subcube from cube.
subcube = cube.select_lambda(
center_wavelength + wavelength_range[0],
center_wavelength + wavelength_range[1],
)
fluxmap = subcube.sum(axis=0)
if pixel_mask is None:
# use the sum of the flux and mask at value, changed by user interaction.
plot_title = (
"Tweak Redshift:\n"
"Spaxels to fit. Set mask level in console.\n"
"Val.=sum of spectrum (arb. units)"
)
limit = interactive_lower_threshold(fluxmap, title=plot_title)
pixel_mask = (fluxmap > limit).mask
fluxmap.mask = pixel_mask
fig, axs = plt.subplots(ncols=2, gridspec_kw={"top": 0.85})
fig.suptitle(
"Tweak z using line {:.4g} +/- {} A".format(
center_wavelength, wavelength_range[1]
)
)
fluxmap.plot(
ax=axs[0],
title="Spaxels included in histogram\nVal.=sum of spectrum (arb. units)",
colorbar="v",
zscale=True,
)
valid_pixels = np.where(~fluxmap.mask)
# loop over valid pixels, do the fit, and store in results list.
results = []
model = models.Const_1GaussModel()
params = None
print("Fitting selected spaxels with gaussian model...")
for y, x in zip(*valid_pixels):
this_mr = subcube[:, y, x].lmfit(model, params=params, method="least_squares")
if params is None:
params = this_mr.params
results += [this_mr]
fit_centers = vget_param_values(results, "g1_center")
# remove invalid values, specifically centers outside the range given:
fit_centers = fit_centers[fit_centers < (center_wavelength + wavelength_range[1])]
fit_centers = fit_centers[fit_centers > (center_wavelength + wavelength_range[0])]
plt.sca(axs[1])
plt.hist(fit_centers - center_wavelength, bins=20)
plt.title("Center wavelength histogram")
plt.xlabel(r"change from {:.5g} $\AA$ [$\AA$]".format(center_wavelength))
plt.axvline(0, color="black", label=r"{:.5g} $\AA$".format(center_wavelength))
plt.axvline(
np.nanmedian(fit_centers) - center_wavelength, color="red", label="median"
)
plt.legend()
plt.show(block=False)
print("Redshift from input settings (for reference) : {}".format(z_gal))
new_z = (np.nanmedian(fit_centers) / center_wavelength) * (1 + z_gal) - 1
print("Redshift calculated from the median of the fit centers: {}".format(new_z))
change_z = input(
"Do you want to update the redshift with the calculated value {} ([y]/n)? ".format(
new_z
)
)
if change_z.lower().startswith("n"):
return_z = z_gal
message = "The original redshift has been kept: {}".format(return_z)
else:
return_z = new_z
message = "The redshift has been updated to {}".format(return_z)
print("Tweak reshift procedure is finished. " + message)
return return_z
def interactive_lower_threshold(image, title=""):
"""Create plot and interact with user to determine the lower threshold for valid data.
The image is plotted, with a mask applied which initially masks the lower 95%
of data. A prompt is given in the console, asking for user input. If the user
enters no input and presses <enter>, that indicates the currently shown level
has been accepted by the user. Otherwise, the user may input a different
number. The plot will be redrawn and the input is requested again.
This function is primarily used to determine the cutoff indicating the
spaxels containing the most flux, hopefully indicating the galaxy center.
We then will use those pixels to fit an emission line, and find the centers.
This can be used to tweak the redshift if desired.
Parameters
----------
image : :class:`mpdaf.obj.image.Image`
An mpdaf image we wish to threshold interactively
title : str, optional
The title given to the plot displaying the `image`, by default ""
Returns
-------
limit : float
The deterimined threshold for valid data.
"""
limit = np.quantile(image.data, 0.95)
m_img = image > limit
m_img.plot(zscale=True, title=title, colorbar="v")
fig = plt.gcf()
plt.show(block=False)
while True:
print("Change the threshold for valid pixels.")
print(
"You may try multiple thresholds. Leave the entry blank and press Enter to confirm your choice."
)
print("current limit: {}".format(limit))
new_limit = input(
"Set new limit: (or leave blank and press Enter to continue) "
)
# if input is convertable to float, redo loop, otherwise exit loop
try:
limit = float(new_limit)
except ValueError or TypeError:
plt.close()
return limit
m_img = image > limit
plt.close(fig)
m_img.plot(zscale=True, title=title, colorbar="v")
fig = plt.gcf()
plt.show(block=False)
def get_param_values(params, param_name, default_value=np.nan):
"""Retrieve parameter value by name from lmfit objects.
Parameters
----------
params : :class:`lmfit.model.ModelResult` or :class:`lmfit.parameter.Parameters`
Input object containing the value you wish to extract
param_name : str
The :class:`lmfit.parameter.Parameter` name, whose value will be returned.
Also may be a :class:`lmfit.model.ModelResult` attribute, such as 'chisqr'
default_value : Any, optional
The return value if the function cannot find the `param_name`, by default np.nan
Returns
-------
float, bool, str, or type(`default_value`)
* If type(`params`) is :class:`~lmfit.parameter.Parameters`: `params`.get(`param_name`).value
* If type('params`) is :class:`~lmfit.model.ModelResult`:
* Tries first: `params`.params.get(`param_name`).value
* Tries second: `params`.get(`param_name`), which allows for ModelResult attributes.
* If all these fail, returns `default_value`
See Also
--------
get_param_values : Use this version of the function on 1 input object
vget_param_values : Use this version of the function on an array of input objects.
This is a vectorized version of this function that you can apply to
arrays (see: https://numpy.org/doc/stable/reference/generated/numpy.vectorize.html)
Examples
--------
>>> import threadcount.fit
>>> from lmfit.models import GaussianModel
>>> model = GaussianModel()
>>> params = model.make_params()
>>> threadcount.fit.get_param_values(params,'sigma')
1.0
>>> # or use the vectorized version:
>>> params2 = model.make_params(sigma=4)
>>> a = np.array([params,params2], dtype=object)
>>> threadcount.fit.vget_param_values(a,"sigma")
array([1., 4.])
"""
# Quick test, because I know sometimes `params` will be None.
if params is None:
return default_value
# The order of the following try/except blocks is from most-nested to least-nested
# extraction.
# 1st: assume `params` is actually a lmfit ModelResult, and that we are
# trying to extract the parameter `param_name` value from that modelresult's params.
try:
return params.params.get(param_name).value
except AttributeError:
pass
# 2nd: assume `params` is a lmfit Parameters object, and that we are
# trying to extract the parameter `param_name` value from it.
try:
return params.get(param_name).value
except AttributeError:
pass
# 3rd: This works for everything else. If `params` is a modelresult and
# if `param_name` is a modelresult attribute, this will return it properly
# If `params` has no attribute `get` (such as if it is type int), then
# default value is returned.
try:
return params.get(param_name, default_value)
except AttributeError:
return default_value
vget_param_values = np.vectorize(get_param_values)
def iter_spaxel(image, index=False):
"""Create an iterator over the spaxels of successive image pixels in a 2d numpy array.
Each call to the iterator returns the value of the array `image` at a spaxel.
The first spaxel to be addressed of image is
pixel 0,0. Thereafter the X-axis pixel index is incremented by one
at each call (modulus the length of the X-axis), and the Y-axis
pixel index is incremented by one each time that the X-axis index
wraps back to zero.
The return value of iter_spaxel() is a python generator that can be
used in loops
Parameters
----------
image : 2d `numpy.ndarray`
The image to be iterated over.
index : bool
If False, return just a value at each iteration.
If True, return both a value and the pixel index
of that spaxel in the image (a tuple of image-array
indexes along the axes (y,x)).
Yields
------
dtype of `image`
"""
if index:
for y, x in np.ndindex(image.shape):
yield image[y, x], (y, x)
else:
for y, x in np.ndindex(image.shape):
yield image[y, x]
def process_settings(default_settings, user_settings_string=""):
"""Combine the default settings with any user settings.
Process the user settings and override the default if a corresponding user
setting exists. Print a warning if a there is a missing user setting.
Parameters
----------
default_settings : dict
A dictionary containing all required settings for the script to run.
user_settings_string : str, optional
A string (created by json.dumps(dictionary) containing user settings.),
by default ""
Returns
-------
:class:`types.SimpleNamespace`
A simple namespace containing the settings, for easier access to attributes.
"""
if user_settings_string == "":
return SimpleNamespace(**default_settings)
# otherwise process them.
user_settings = json.loads(user_settings_string)
# determine if there are missing settings in the user's and report them.
missing = {
k: default_settings[k] for k in default_settings.keys() - user_settings.keys()
}
for k, v in missing.items():
print("Missing setting {}, using default value {}".format(k, v))
final_settings = SimpleNamespace(**user_settings)
final_settings.__dict__.update(**missing)
return final_settings
def process_settings_dict(default_settings, user_settings=None):
"""Combine the default settings with any user settings.
Process the user settings and override the default if a corresponding user
setting exists. Print a warning if a there is a missing user setting.
Parameters
----------
default_settings : dict
A dictionary containing all required settings for the script to run.
user_settings : dict, optional
A dictionary containing user settings, by default None
Returns
-------
:class:`types.SimpleNamespace`
A simple namespace containing the settings, for easier access to attributes.
"""
if not user_settings: # takes care of "", None, and {}
return SimpleNamespace(**default_settings)
# determine if there are missing settings in the user's and report them.
missing = {
k: default_settings[k] for k in default_settings.keys() - user_settings.keys()
}
for k, v in missing.items():
print("Missing setting {}, using default value {}".format(k, v))
final_settings = SimpleNamespace(**user_settings)
final_settings.__dict__.update(**missing)
return final_settings
def get_region(rx, ry=None):
"""Select pixels in ellipse of radius rx, ry from (0,0).
Return an array of np.array([row,col]) that are within an ellipse centered
at [0,0] with radius x of rx and radius y of ry.
Parameters
----------
rx : number or list of numbers [rx, ry]
ry : number
Returns
-------
numpy.ndarray
"""
# try to process a list if it is given as parameter
try:
rx, ry = rx[0], rx[1]
# expect TypeError if rx is not a list.
except TypeError:
pass
# Defaults to a circle if ry=None
if ry is None:
ry = rx
rx = abs(rx)
ry = abs(ry)
rx_int = round(rx)
ry_int = round(ry)
indicies = (np.mgrid[-ry_int : ry_int + 1, -rx_int : rx_int + 1]).T.reshape(-1, 2)
# create boolean array of where inside ellipse is:
rx2 = rx * rx
ry2 = ry * ry
# remember python likes row, column convention, so y comes first.
inside = (
indicies[:, 0] * indicies[:, 0] / ry2 + indicies[:, 1] * indicies[:, 1] / rx2
<= 1
)
return indicies[inside]
def get_reg_image(region):
"""Create kernel image from list of pixels.
The input `region` is typically the output of :func:`get_region`.
This kernel image is used for spatial averaging, and it's values
are either 1 (if included in `region`) or 0.
Parameters
----------
region : list of pixel positions (y, x)
The list of pixel positions relative to an arbitrary point,
usually (0,0) in the case of output from :func:`get_region`, to
set to value 1 in the output image
Returns
-------
2d numpy array
An array consisting of the smallest area that will encompass the list of
pixels in `region`, with the relative shape of `region` preserved. The
array is 0 except for `region` pixels are set to 1.
"""
# calculate the extent of the list of inputs:
mins = region.min(axis=0)
maxs = region.max(axis=0)
shape = maxs - mins + 1
# initialize output
output = np.zeros(shape)
# shift the pixel list by mins to reference the new origin.
inside = [tuple(pix - mins) for pix in region]
# set those pixels in the pixel list to 1.
output[tuple(zip(*inside))] = 1
return output
def spatial_average(cube, kernel_image, **kwargs):
"""Apply kernel image smoothing on every spatial image in a cube.
This function will correctly apply a smoothing image `kernel_image` to the
data and variance arrays in `cube`. The normalization is properly propegated
to the variance array.
Parameters
----------
cube : :class:`mpdaf.obj.cube.Cube`
The data you want smoothed
kernel_image : 2d numpy array
The smoothing image to apply
**kwargs : dict
key word arguments passed to :func:`.mpdaf_ext.correlate2d_norm`
Returns
-------
:class:`mpdaf.obj.cube.Cube`
Spatially smoothed cube.
"""
# determine if variance array of output should be initialized:
var_init = None
if cube.var is not None:
var_init = np.empty
# initialize empty loop output:
output = cube.clone(data_init=np.empty, var_init=var_init)
# loop over all images in cube, and set the output to output.
for ima, k in mpdaf.obj.iter_ima(cube, index=True):
output[k, :, :] = ima.correlate2d_norm(kernel_image)
return output
def get_SNR_map(cube, signal_idx=None, signal_Angstrom=None, nsigma=5, plot=False):
"""Create Image of signal to noise ratio in a given bandwidth.
This bandwidth may be selected in 3 different ways:
1. Choose the indices of the wavlength array to include (`signal_idx`)
2. Choose the wavelengths to include (`signal_Angstrom`)
3. Have the program fit a gaussian to the data, and choose how many sigmas
to include (`nsigma`). (Uses function: :func:`get_SignalBW_idx`)
If multiple of `signal_idx`, `signal_Angstrom`, and `nsigma` are given, the
order of preference is as follows: `signal_idx` overrides all others, then
`signal_Angstrom`, and finally the least preferenced is `nsigma`, which will
only be used if either `signal_idx` or `signal_Angstrom` are not specified.
Parameters
----------
cube : :class:`mpdaf.obj.Cube`
The cube containing data, var, and wave attributes
signal_idx : array [int, int], optional
The indices of the wavelength array to use, by default None
signal_Angstrom : array [float, float], optional
The wavelengths in Angstroms to use, by default None
nsigma : float, optional
Fit a gaussian, and use center wavelength +/- `nsigma` * sigma, by default 5
plot : bool, optional
Plot the whole image spectrum and highlight the SNR bandwidth,
by default False. A tool for troubleshooting/setup.
Returns
-------
:class:`mpdaf.obj.Image`
An Image where the pixel values indicate the signal to noise in the
selected bandwidth. Given a Spectrum for each spaxel, the SNR for the
spaxel is calculated by sum(Spectrum.data)/sqrt(sum(Spectrum.var)).
Examples
--------
Given a Cube with name `this_cube`, then the default bandwidth selection
is to fit a gaussian, and use the gaussian center +/- 5*sigma. This is
implemented by the following command:
>>> import threadcount as tc
>>> snr_image = tc.fit.get_SNR_map(this_cube)
To use the same method but change the width to, for example,
gaussian center +/- 3*sigma, (meaning nsigma=3), then use the following:
>>> snr_image = tc.fit.get_SNR_map(this_cube, nsigma=3)
If you know the specific wavelengths of the bandwidth you would like to use,
(for example, 5000-5020 A) then use the following:
>>> snr_image = tc.fit.get_SNR_map(this_cube, signal_Angstrom=[5000,5020])
And finally, if you know the pixel indices (for example, indices 31-60).
Note, this is an inclusive range, meaning in this case pixel 60 will be
included in the SNR calculation.
>>> snr_image = tc.fit.get_SNR_map(this_cube, signal_idx=[31,60])
"""
if signal_idx is None:
if signal_Angstrom is None:
signal_idx = get_SignalBW_idx(cube, nsigma=nsigma, plot=plot)
plot = False # This is taken care of inside the function.
else:
signal_idx = cube.wave.pixel(signal_Angstrom, nearest=True)
subcube = cube[signal_idx[0] : signal_idx[1] + 1, :, :]
if plot is True:
plt.figure()
spectrum = cube.sum(axis=(1, 2))
title = "Total image spectrum"
try:
title = " ".join([cube.label, title])
except AttributeError:
pass
spectrum.plot(title=title)
plt.axvspan(
*cube.wave.coord(signal_idx),
facecolor=plt.rcParams["axes.prop_cycle"].by_key()["color"][1],
alpha=0.25,
label="SNR range",
zorder=-3,
)
plt.legend()
subcube_sum = subcube.sum(axis=0)
result_image = subcube[0].clone()
result_image.data = subcube_sum.data / np.sqrt(subcube_sum.var)
return result_image
def get_SignalBW_idx(cube, nsigma=5, plot=False):
"""Determine the wavelength indices containing signal.
This function computes an average spectrum using the whole `cube`. Then,
fits a gaussian plus constant (:class:`~threadcount.models.Const_1GaussModel`).
The gaussian center and sigma, along with `nsigma`, are used to compute and
return the indices corresponding to
:math:`[center - nsigma*sigma, center + nsigma*sigma]`.
The plot option may be used for debugging for a visual of the spectrum and
the fit, and the computed range.
Parameters
----------
cube : :class:`mpdaf.obj.Cube`
The cube containing data, var, and wave attributes
nsigma : float, optional
The number of sigmas to include on each side of the gaussian center,
by default 5
plot : bool, optional
Dispaly a plot of the spectrum and fit, with the bandwidth highlighted,
by default False
Returns
-------
array, [int, int]
The indices of the wavelength array corresponding to the calculated
bandwidth.
"""
ydata = np.nanmean(
np.nanmean(cube.data, axis=2), axis=1
) # gives 1d spectrum average for all of data.
x = cube.wave.coord()
gauss_model = models.Const_1GaussModel()
params = gauss_model.guess(data=ydata, x=x)
mod_result = gauss_model.fit(ydata, params, x=x)
center = mod_result.values["g1_center"]
sigma = mod_result.values["g1_sigma"]
low = center - nsigma * sigma
high = center + nsigma * sigma
if plot is True:
plt.figure()
mod_result.plot()
plt.axvspan(
low,
high,
facecolor=plt.rcParams["axes.prop_cycle"].by_key()["color"][2],
alpha=0.25,
label="SNR range",
zorder=-3,
)
plt.legend()
title = "Total image spectrum"
try:
title = " ".join([cube.label, title])
except AttributeError:
pass
plt.suptitle(title)
xrange = [low, high]
# find index of nearest element
xrange_idx = cube.wave.pixel(xrange, nearest=True)
return xrange_idx
def get_index(array, value):
"""Determine the index of 'array' which is closest to `value`.
Parameters
----------
array : float or array/list/iterable of floats
The list of numbers to search. Will be processed with np.array(`array`).
value : float or array/list/iterable of floats
The value(s) to search for in `array`
Returns
-------
int or list of ints
The index (or indices) of array where the value is closest to the search
value.
Examples
--------
>>> get_index([10,11,12,13,14],[13,22])
[3, 4]
>>> get_index(4,[3,0])
[0, 0]
>>> get_index([4,0],10)
0
"""
array = np.array(array)
# value may be a list of values.
try:
value_iter = iter(value)
except TypeError:
# This catches anything if value is not a list.
return (np.abs(array - value)).argmin()
return [(np.abs(array - this_value)).argmin() for this_value in value_iter]
def get_aic(model, error=np.nan):
"""Return the aic_real of a successful fit.
Parameters
----------
model : :class:`lmfit.model.ModelResult`
The modelresult to extract info from.
error : float, optional
The numeric value to assign any unsuccessful modelresult, by default np.nan
Returns
-------
float
The modelresult's aic_real, or `error`
"""
try:
if model.success is True:
return model.aic_real
except AttributeError:
pass
return error
vget_aic = np.vectorize(get_aic, doc="Vectorized :func:`get_aic`.")
def choose_model_aic_single(model_list, d_aic=-150):
r"""Determine best modelresult in a list, chosen by computing :math:`{\Delta}aic`.
Note: we now look at `aic_real`, defined in :meth:`threadcount.lmfit_ext.aic_real`
This function uses the aic (Akaike Information Criterion) to choose between
several models fit to the same data. Our general philosophy: choose simpler
models.
The default change in aic we consider
significant (-150) is quite high compared to a standard -10 you may see in
statistics, since we are intending to identify the model components with
physical processes in the galaxy. This value was chosen by observing
fits to several different spectra and choosing the desired number of gaussian
components by eye, then finding a :math:`{\Delta}aic` which came close to
accomplishing that.
via wikipedia: The :math:`exp((AIC_{min} − AIC_i)/2)` is known as the
relative liklihood of model i.
The numbers returned begin with 1, not 0 as is usual in python. If no results
in `model_list` are valid, then -1 will be returned.
The algorithm goes as follows:
* Lets say `model_list` = [model1, model2] (note the numbers begin with 1).
* If model2.aic_real - model1.aic_real < `d_aic`:
* return 2
* else:
* return 1.
* Lets now say `model_list` = [model1, model2, model3].
* If model2.aic_real - model1.aic_real < `d_aic`:
* This means that model2 is better. We will eliminate
model1 as an option, then apply bullet point 1, with [model2, model3],
returning whichever number is better (so the return value will be 2 or 3).
* else:
* This means that model2 is not better than model1. We will eliminate
model2 as an option, then apply bullet point 1, using [model1, model3],
returning either 1 or 3.
* TODO: I think if we get a choice of 3 from this way, we should flag
it for manual inspection, since it may only be slightly better than
model2 and so our philosophy of less complex is better would be violated.
Parameters
----------
model_list : list of :class:`lmfit.model.ModelResult`
A list of different model results which have been fit to the same data.
Right now, the length must be no longer than 3. The order of the models
is assumed to be from least complex -> more complex.
d_aic : float, optional
The change in fit aic (Akaike Information Criterion) indicating
a significantly better fit, by default -150.
Returns
-------
int
The index+1 of the model chosen with this algorithm. Returns -1 if all
models are invalid.
"""
# Python starts counting at 0 (0-based indexing.). choices starts counting at 1.
# Pay attention that the returned value is the python index +1
# return -1 for invalid:
if model_list is None:
return -1
# return 1 if only one choice:
if len(model_list) == 1:
return 0 + 1
# model list is assumed to be in order simple -> complex.
# philosophy: prefer simpler models.
aic = vget_aic(model_list, error=np.nan)
# if all nans, then return -1 (invalid)
if np.all(np.isnan(aic)):
return -1
# print(np.array(aic)-aic[0])
# now we have different ways of choosing based on if 2 or 3 models:
# TODO: generalize to more than 3. Should be easy enough, given the
# explanation of the algorithm in the docstring.
if len(model_list) == 2:
if (aic[1] - aic[0]) < d_aic:
return (
1 + 1
) # these +1's are for translation to human interaction indexing....
else:
return 0 + 1
if len(model_list) == 3:
if (aic[1] - aic[0]) < d_aic:
# True, this means 2 gaussians better than 1.
# Eliminates id 0 as option and do more tests:
if (aic[2] - aic[1]) < d_aic:
# True, this means 3 gaussians better than 2. choose this.
return 2 + 1
else:
return 1 + 1
else:
# False, this means 2 gaussians not better than 1.
# Eliminates id 1 as option and do more tests:
if (aic[2] - aic[0]) < d_aic:
# True, this means 3 gaussians better than 1. choose this.
return 2 + 1
else:
return 0 + 1
# safest thing to return is 0 i guess?
return 0 + 1
def choose_model_aic(model_list, d_aic=-150):
"""Broadcast :func:`choose_model_aic_single` over array.
Parameters
----------
model_list : array-like, containing :class:`lmfit.model.ModelResult`
Array representing spatial dimensions and the last dimension contains
the model result for different models fitted to that spaxel. Works also
for simply a list of model results for one pixel.
d_aic : float, optional
The change in fit aic (Akaike Information Criterion) indicating
a significantly better fit, by default -150.
Returns
-------
array of shape model_list.shape[:-1] containing int, or int
Spatial array containing the chosen model number, starting with 1.
invalid entries are given the value -1.
See Also
--------
:func:`choose_model_aic_single` : A detailed discussion of this function.
"""
# assume the first dimensions of model_list are spatial and the last is
# the different models.
# Handle a single pixel:
model_list = np.array(model_list)
shape = model_list.shape
if len(shape) == 1:
single = choose_model_aic_single(model_list, d_aic=d_aic)
return single
# if we have passed that block, we know we have an array of size shape to loop over.
# create output
output = np.empty(shape[:-1], dtype=int)
for index in np.ndindex(*shape[:-1]):
output[index] = choose_model_aic_single(model_list[index], d_aic=d_aic)
return output
def marginal_fits(fit_list, choices, flux=0.25, dmu=0.5):
"""Determine which fits should be inspected by hand.
We noticed at times that when fitting multiple gaussians, there was often
a case of an "embedded" gaussian. This is when a small flux narrow
gaussian was fit at the same center wavelength as the highest flux
gaussian. This function is intended to identify those cases and ask the
user to verify that this is actually the desired fit.
This function analyzes the selected model (provided by the combination of
`fit_list` and `choices`), and determines if user inspection is needed based
on the relative characteristics of the gaussians. This procedure depends on
the analyzed components having parameter names ending in "flux" and "center",
and was originally programmed to analyze a multi-gaussian model.
Note that ("amplitude" is used as a fallback for "flux",
because lmfit's gaussian use this name to denote integrated flux).
The "main" gaussian is selected as the model
component with highest flux, lets call this main gaussian `g0`.
For the other components `g#`, we compute `g#_flux/g0_flux` and
`g#_center` - `g0_center`. If any component has both the following:
* `g#_flux/g0_flux < flux`
* `g#_center - g0_center < dmu`
then that fit will be flagged for examination.
Parameters
----------
fit_list : array-like of :class:`lmfit.model.ModelResult`
This assumes a spatial array (n,m) of ModelResults, with an outer dimension
varying in the model used for that spaxel. e.g. shape = (3,n,m) for 3
different models.
choices : array-like of shape `fit_list[0]` containing int
This will be used to select which model for a given spaxel will be
analyzed in this function. For example, if there are 3 models, the value
for choices must be 1,2,or 3 (or negative to indicate invalid).
flux : float, optional
The gaussian flux ratio to main gaussian component indicating that
this should be inspected by hand, by default 0.25
dmu : float, optional
dmu in my head meant delta mu, the change in the x center between a
gaussian component and the main gaussian, by default 0.5.
Returns
-------
numpy array of boolean
Array of same shape as choices, where True means the user should inspect
this spaxel's fit, and False means this spaxel should be okay with the
automatic guessing.
"""
# returns boolean array of shape choices.
# True indicates you need to manually look at them.
# Python starts counting at 0 (0-based indexing.). choices starts counting at 1.
# subtract 1 from choices to convert between these 2 indexing regimes.
chosen_models = np.choose(choices - 1, fit_list, mode="clip")
output = np.empty_like(choices, dtype=bool)
# get the gaussian component comparison for the chosen models.
# 2d index iteration.
for index, modelresult in np.ndenumerate(chosen_models):
if (
modelresult is None
): # this means pixel was not fit, because it didn't pass snr test.
# therefore user does not need to check.
output[index] = False
continue
# if chosen model fit has not succeeded, then user checks.
if not modelresult.success:
output[index] = True
continue # continues to next iteration in the for loop.
# if model is a single gaussian, user does not need to check.
if get_ngaussians(modelresult) == 1:
output[index] = False
continue
# more than 1 gaussian component:
# do tests based on flux and dmu parameters.
# array of [g_flux/g0_flux, g_center - g0_center]
components = get_gcomponent_comparison(modelresult)
# test if the conditions are met for any component.
# if component[0] < flux AND component[1] < dmu, then user must decide.
# (set to True here.)
user_decides = False
for component in components:
if (component[0] < flux) and (np.abs(component[1]) < dmu):
user_decides = True
break # stop the inner loop because we know the answer already.
output[index] = user_decides
return output
def get_gcomponent_comparison(fit):
"""Determine component comparison to this highest flux gaussian.
This function finds the highest flux gaussian (we will name it g0),
and returns a list for the other components containing for each entry:
[g#_flux/g0_flux, g#_center - g0_center].
Parameters
----------
fit : :class:`lmfit.model.ModelResult`
The ModelResult to analyze the components for.
Returns
-------
list of [float,float]
A list containing the list [g#_flux/g0_flux, g#_center - g0_center]
for each component g# that is not g0.
"""
prefixes = [comp.prefix for comp in fit.components if "gaussian" in comp._name]
if len(prefixes) == 1: # means just one gaussian component
return []
# initialize lists for loop
values = []
centers = []
for pre in prefixes:
# values is the value of the flux parameter, and if the flux parameter
# doesn't exist, it falls back on trying to find the value of the
# amplitude parameter.
values += [
fit.params.get(pre + "flux", fit.params.get(pre + "amplitude")).value
]
centers += [fit.params[pre + "center"].value]
# ID the index of the maximum flux.
maxval_idx = np.argmax(values)
# convert to numpy array for easier math.
values = np.array(values)
centers = np.array(centers)
# Column stack allows us to retrieve, e.g. output[0] = [flux/flux0, center-center0]
# note that inside here, we remove the maximum-flux gaussian.
output = np.column_stack(
[
| np.delete(values / values[maxval_idx], maxval_idx) | numpy.delete |
"""
Sparse Layer
@author: <NAME>
"""
import tensorflow as tf
import numpy as np
class SparseLayer(tf.keras.layers.Layer):
def __init__(self, next_layer_dim, activation=None, n_trainable=0):
super(SparseLayer, self).__init__()
self.final_shape = next_layer_dim
self.num_params = n_trainable
if activation == 'relu':
self.nonlinear = tf.nn.relu
elif activation == 'sigmoid':
self.nonlinear = tf.math.sigmoid
elif activation == 'tanh':
self.nonlinear = tf.math.tanh
elif activation == 'softmax':
self.nonlinear = tf.nn.softmax
else:
self.nonlinear = None
def build(self, input_shape):
input_shape = input_shape[1]
self.dim = input_shape + self.final_shape
total_params = self.final_shape * input_shape
self.indexes = np.random.choice(np.array(range(total_params)),
size=self.num_params, replace=False)
if self.num_params > total_params:
raise ValueError('Number of trainable parameters exceeds number of total parameters.')
# construct the variable part
mask_1 = np.zeros(total_params, dtype=np.float32)
mask_1[self.indexes] = 1
self.train_mask = tf.constant(mask_1, dtype=tf.float32)
limit = np.sqrt(6 / self.dim)
initializer = np.random.uniform(-limit, limit, total_params)
self.train_weights = tf.Variable(initializer, trainable=True, dtype=tf.float32)
# construct the constant part
mask_2 = | np.ones(total_params, dtype=np.float32) | numpy.ones |
import numpy as np
from meta_mb.utils.filters import MeanStdFilter
from meta_mb.utils import Serializable
from meta_mb.policies.np_base import NpPolicy
from collections import OrderedDict
class LinearPolicy(NpPolicy):
"""
Linear policy class that computes action as <W, ob>.
"""
def __init__(self,
obs_dim,
action_dim,
name='np_policy',
**kwargs):
NpPolicy.__init__(self, obs_dim, action_dim, name, **kwargs)
self.policy_params = OrderedDict(W=np.zeros((action_dim, obs_dim), dtype=np.float64),
b=np.zeros((action_dim,), dtype=np.float64))
self.obs_filters = [MeanStdFilter(shape=(obs_dim,))]
def get_actions(self, observations, update_filter=True):
observations = np.array(observations)
assert observations.ndim == 2
obs = self.obs_filters[0](observations, update=update_filter)
actions = np.dot(self.policy_params["W"], obs.T).T + self.policy_params["b"]
return actions, {}
def get_action(self, observation, update_filter=False):
actions, _ = self.get_actions(np.expand_dims(observation, axis=0), update_filter=update_filter)
return actions[0], {}
def get_actions_batch(self, observations, update_filter=True):
"""
The observations must be of shape num_deltas x batch_size x obs_dim
:param observations:
:param update_filter:
:return:
"""
# TODO: make sure the reshaping works
assert observations.shape[0] == self._num_deltas and observations.shape[-1] == self.obs_dim
if observations.ndim == 3:
obs = | np.reshape(observations, (-1, self.obs_dim)) | numpy.reshape |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
from functools import partial
from pathlib import Path
from typing import NamedTuple
import numpy as np
import pandas as pd
import pytest
import torch
from tianshou.data import Batch
from qlib.backtest import Order
from qlib.config import C
from qlib.log import set_log_with_config
from qlib.rl.data import pickle_styled
from qlib.rl.entries.test import backtest
from qlib.rl.order_execution import *
from qlib.rl.utils import ConsoleWriter, CsvWriter, EnvWrapperStatus
pytestmark = pytest.mark.skipif(sys.version_info < (3, 8), reason="Pickle styled data only supports Python >= 3.8")
DATA_ROOT_DIR = Path(__file__).parent.parent / ".data" / "rl" / "intraday_saoe"
DATA_DIR = DATA_ROOT_DIR / "us"
BACKTEST_DATA_DIR = DATA_DIR / "backtest"
FEATURE_DATA_DIR = DATA_DIR / "processed"
ORDER_DIR = DATA_DIR / "order" / "valid_bidir"
CN_DATA_DIR = DATA_ROOT_DIR / "cn"
CN_BACKTEST_DATA_DIR = CN_DATA_DIR / "backtest"
CN_FEATURE_DATA_DIR = CN_DATA_DIR / "processed"
CN_ORDER_DIR = CN_DATA_DIR / "order" / "test"
CN_POLICY_WEIGHTS_DIR = CN_DATA_DIR / "weights"
def test_pickle_data_inspect():
data = pickle_styled.load_intraday_backtest_data(BACKTEST_DATA_DIR, "AAL", "2013-12-11", "close", 0)
assert len(data) == 390
data = pickle_styled.load_intraday_processed_data(
DATA_DIR / "processed", "AAL", "2013-12-11", 5, data.get_time_index()
)
assert len(data.today) == len(data.yesterday) == 390
def test_simulator_first_step():
order = Order("AAL", 30.0, 0, pd.Timestamp("2013-12-11 00:00:00"), pd.Timestamp("2013-12-11 23:59:59"))
simulator = SingleAssetOrderExecution(order, BACKTEST_DATA_DIR)
state = simulator.get_state()
assert state.cur_time == pd.Timestamp("2013-12-11 09:30:00")
assert state.position == 30.0
simulator.step(15.0)
state = simulator.get_state()
assert len(state.history_exec) == 30
assert state.history_exec.index[0] == pd.Timestamp("2013-12-11 09:30:00")
assert state.history_exec["market_volume"].iloc[0] == 450072.0
assert abs(state.history_exec["market_price"].iloc[0] - 25.370001) < 1e-4
assert (state.history_exec["amount"] == 0.5).all()
assert (state.history_exec["deal_amount"] == 0.5).all()
assert abs(state.history_exec["trade_price"].iloc[0] - 25.370001) < 1e-4
assert abs(state.history_exec["trade_value"].iloc[0] - 12.68500) < 1e-4
assert state.history_exec["position"].iloc[0] == 29.5
assert state.history_exec["ffr"].iloc[0] == 1 / 60
assert state.history_steps["market_volume"].iloc[0] == 5041147.0
assert state.history_steps["amount"].iloc[0] == 15.0
assert state.history_steps["deal_amount"].iloc[0] == 15.0
assert state.history_steps["ffr"].iloc[0] == 0.5
assert (
state.history_steps["pa"].iloc[0]
== (state.history_steps["trade_price"].iloc[0] / simulator.twap_price - 1) * 10000
)
assert state.position == 15.0
assert state.cur_time == pd.Timestamp("2013-12-11 10:00:00")
def test_simulator_stop_twap():
order = Order("AAL", 13.0, 0, pd.Timestamp("2013-12-11 00:00:00"), pd.Timestamp("2013-12-11 23:59:59"))
simulator = SingleAssetOrderExecution(order, BACKTEST_DATA_DIR)
for _ in range(13):
simulator.step(1.0)
state = simulator.get_state()
assert len(state.history_exec) == 390
assert (state.history_exec["deal_amount"] == 13 / 390).all()
assert state.history_steps["position"].iloc[0] == 12 and state.history_steps["position"].iloc[-1] == 0
assert (state.metrics["ffr"] - 1) < 1e-3
assert abs(state.metrics["market_price"] - state.backtest_data.get_deal_price().mean()) < 1e-4
assert np.isclose(state.metrics["market_volume"], state.backtest_data.get_volume().sum())
assert state.position == 0.0
assert abs(state.metrics["trade_price"] - state.metrics["market_price"]) < 1e-4
assert abs(state.metrics["pa"]) < 1e-2
assert simulator.done()
def test_simulator_stop_early():
order = Order("AAL", 1.0, 1, pd.Timestamp("2013-12-11 00:00:00"), pd.Timestamp("2013-12-11 23:59:59"))
with pytest.raises(ValueError):
simulator = SingleAssetOrderExecution(order, BACKTEST_DATA_DIR)
simulator.step(2.0)
simulator = SingleAssetOrderExecution(order, BACKTEST_DATA_DIR)
simulator.step(1.0)
with pytest.raises(AssertionError):
simulator.step(1.0)
def test_simulator_start_middle():
order = Order("AAL", 15.0, 1, pd.Timestamp("2013-12-11 10:15:00"), pd.Timestamp("2013-12-11 15:44:59"))
simulator = SingleAssetOrderExecution(order, BACKTEST_DATA_DIR)
assert len(simulator.ticks_for_order) == 330
assert simulator.cur_time == pd.Timestamp("2013-12-11 10:15:00")
simulator.step(2.0)
assert simulator.cur_time == pd.Timestamp("2013-12-11 10:30:00")
for _ in range(10):
simulator.step(1.0)
simulator.step(2.0)
assert len(simulator.history_exec) == 330
assert simulator.done()
assert abs(simulator.history_exec["amount"].iloc[-1] - (1 + 2 / 15)) < 1e-4
assert abs(simulator.metrics["ffr"] - 1) < 1e-4
def test_interpreter():
order = Order("AAL", 15.0, 1, pd.Timestamp("2013-12-11 10:15:00"), pd.Timestamp("2013-12-11 15:44:59"))
simulator = SingleAssetOrderExecution(order, BACKTEST_DATA_DIR)
assert len(simulator.ticks_for_order) == 330
assert simulator.cur_time == pd.Timestamp("2013-12-11 10:15:00")
# emulate a env status
class EmulateEnvWrapper(NamedTuple):
status: EnvWrapperStatus
interpreter = FullHistoryStateInterpreter(FEATURE_DATA_DIR, 13, 390, 5)
interpreter_step = CurrentStepStateInterpreter(13)
interpreter_action = CategoricalActionInterpreter(20)
interpreter_action_twap = TwapRelativeActionInterpreter()
wrapper_status_kwargs = dict(initial_state=order, obs_history=[], action_history=[], reward_history=[])
# first step
interpreter.env = EmulateEnvWrapper(status=EnvWrapperStatus(cur_step=0, done=False, **wrapper_status_kwargs))
obs = interpreter(simulator.get_state())
assert obs["cur_tick"] == 45
assert obs["cur_step"] == 0
assert obs["position"] == 15.0
assert obs["position_history"][0] == 15.0
assert all(np.sum(obs["data_processed"][i]) != 0 for i in range(45))
assert | np.sum(obs["data_processed"][45:]) | numpy.sum |
import matplotlib
matplotlib.use('Agg') # for plotting without GUI
import matplotlib.pyplot as plt
import time
import os
import math
import pandas as pd
import numpy as np
from scipy.stats.stats import pearsonr
import tensorflow as tf
import collections
import scipy.sparse as sp_sparse
import tables
from sklearn.decomposition import PCA
# from sklearn.manifold import TSNE # single core
from MulticoreTSNE import MulticoreTSNE as TSNE # MCORE
# Sys
def usage():
process = psutil.Process(os.getpid())
ram = process.memory_info()[0] / float(2 ** 20)
ram = round(ram, 1)
return ram
# DATA I/O # todo: check gene_id barcode uniqueness
def read_csv(fname):
'''read_csv into pd.df, assuming index_col=0, and header=True'''
print('reading ', fname)
tic = time.time()
df = pd.read_csv(fname, index_col=0)
# print("read matrix: [genes, cells]")
print('shape:', df.shape)
# print(df.axes)
if df.shape[0] > 2 and df.shape[1] > 2:
print(df.ix[0:3, 0:2])
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return df
def read_tsv(fname):
'''read_csv into pd.df, assuming index_col=0, and header=True'''
print('reading ', fname)
tic = time.time()
df = pd.read_csv(fname, index_col=0, delimiter='\t')
# print("read matrix: [genes, cells]")
print('shape:', df.shape)
# print(df.axes)
if df.shape[0] > 2 and df.shape[1] > 2:
print(df.ix[0:3, 0:2])
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return df
def save_csv(arr, fname):
'''if fname=x.csv.gz, will be compressed
if fname=x.csv, will not be compressed'''
tic = time.time()
print('saving: ', arr.shape)
np.savetxt(fname, arr, delimiter=',', newline='\n')
toc = time.time()
print("saving" + fname + " took {:.1f} seconds".format(toc - tic))
def save_hd5(df, out_name):
tic = time.time()
print('saving: ', df.shape)
df.to_hdf(out_name, key='null', mode='w', complevel=9, complib='blosc')
toc = time.time()
print("saving" + out_name + " took {:.1f} seconds".format(toc - tic))
def read_hd5(in_name):
'''
:param in_name:
:return df:
'''
print('reading: ', in_name)
df = pd.read_hdf(in_name)
print('read', df.shape)
# print(df.axes)
if df.shape[0] > 2 and df.shape[1] > 2:
print(df.ix[0:3, 0:2])
return df
GeneBCMatrix = collections.namedtuple(
'GeneBCMatrix',
['gene_ids', 'gene_names', 'barcodes', 'matrix'])
def read_sparse_matrix_from_h5(fname, genome, file_ori):
'''
for 10x_genomics h5 file:
always transpose into cell_row if gene_row is the input
https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/advanced/h5_matrices
:return: cell_row sparse matrix
:param fname:
:param genome:
:return:
'''
tic = time.time()
print('reading {} {}'.format(fname, genome))
with tables.open_file(fname, 'r') as f:
try:
dsets = {}
for node in f.walk_nodes('/' + genome, 'Array'):
dsets[node.name] = node.read()
matrix = sp_sparse.csc_matrix(
(dsets['data'], dsets['indices'], dsets['indptr']),
shape=dsets['shape'])
print('shape is {}'.format(matrix.shape))
if file_ori == 'cell_row':
pass
elif file_ori == 'gene_row':
matrix = matrix.transpose()
else:
raise Exception('file orientation {} not recognized'.format(file_ori))
obj = GeneBCMatrix(dsets['genes'], dsets['gene_names'],
dsets['barcodes'], matrix)
nz_count = len(obj.matrix.nonzero()[0])
nz_rate = nz_count / (obj.matrix.shape[0] * obj.matrix.shape[1])
nz_rate = round(nz_rate, 3)
print('nz_rate is {}'.format(nz_rate))
print('nz_count is {}\n'.format(nz_count))
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return obj
except tables.NoSuchNodeError:
raise Exception("Genome %s does not exist in this file." % genome)
except KeyError:
raise Exception("File is missing one or more required datasets.")
def save_sparse_matrix_to_h5(gbm, filename, genome):
'''
for 10x_genomics h5 file:
https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/advanced/h5_matrices
:return:
:param filename:
:param genome:
:return:
'''
flt = tables.Filters(complevel=1)
print('saving: ', gbm.matrix.shape)
with tables.open_file(filename, 'w', filters=flt) as f:
try:
group = f.create_group(f.root, genome)
f.create_carray(group, 'genes', obj=gbm.gene_ids)
f.create_carray(group, 'gene_names', obj=gbm.gene_names)
f.create_carray(group, 'barcodes', obj=gbm.barcodes)
f.create_carray(group, 'data', obj=gbm.matrix.data)
f.create_carray(group, 'indices', obj=gbm.matrix.indices)
f.create_carray(group, 'indptr', obj=gbm.matrix.indptr)
f.create_carray(group, 'shape', obj=gbm.matrix.shape)
except:
raise Exception("Failed to write H5 file.")
def read_data_into_cell_row(fname, orientation='cell_row', genome='mm10'):
'''
read hd5 or csv, into cell_row format
:param fname:
:param orientation: of file
:return: cell_row df
'''
tic = time.time()
print('reading {} into cell_row data frame'.format(fname))
if fname.endswith('hd5'):
df_tmp = read_hd5(fname)
elif fname.endswith('csv'):
df_tmp = read_csv(fname)
elif fname.endswith('tsv'):
df_tmp = read_tsv(fname)
elif fname.endswith('csv.gz'):
df_tmp = read_csv(fname)
elif fname.endswith('h5'): # not hd5
df_tmp = read_sparse_matrix_from_h5(fname, genome=genome, file_ori=orientation)
print('sparse_matrix have been read')
else:
raise Exception('file name not ending in hd5 nor csv, not recognized')
if orientation == 'gene_row':
df_tmp = df_tmp.transpose()
elif orientation == 'cell_row':
pass
else:
raise Exception('parameter err: for {}, orientation {} not correctly spelled'.format(fname, orientation))
#print("after transpose into cell row (if correct file_orientation provided)")
if fname.endswith('h5'):
print("shape is {}".format(df_tmp.matrix.shape))
else:
print("shape is {}".format(df_tmp.shape))
print('nz_rate is {}'.format(nnzero_rate_df(df_tmp)))
print('nz_count is {}\n'.format(nnzero_count_df(df_tmp)))
toc = time.time()
print("reading took {:.1f} seconds".format(toc - tic))
return df_tmp
# PRE-PROCESSING OF DATA FRAMES #
def df_filter(df):
df_filtered = df.loc[(df.sum(axis=1) != 0), (df.sum(axis=0) != 0)]
print("filtered out any rows and columns with sum of zero")
return df_filtered
def df_normalization(df, scale=1e6):
'''
RPM when default
:param df: [gene, cell]
:param scale:
:return:
'''
read_counts = df.sum(axis=0) # colsum
# df_normalized = df.div(read_counts, axis=1).mul(np.median(read_counts)).mul(1)
df_normalized = df.div(read_counts, axis=1).mul(scale)
return df_normalized
def df_log10_transformation(df, pseudocount=1):
'''
log10
:param df:
:param pseudocount:
:return:
'''
df_log10 = np.log10(np.add(df, pseudocount))
return df_log10
def df_rpm_log10(df, pseudocount=1):
'''
log10
:param df: [gene, cell]
:return:
'''
df_tmp = df.copy()
df_tmp = df_normalization(df_tmp)
df_tmp = df_log10_transformation(df_tmp, pseudocount=pseudocount)
return df_tmp
def df_exp_rpm_log10(df, pseudocount=1):
'''
log10
:param df: [gene, cell]
:pseudocount: for exp transformation and log10 transformation
:return:
'''
df_tmp = df.copy()
df_tmp = np.power(10, df_tmp) - pseudocount
df_tmp = df_normalization(df_tmp)
df_tmp = df_log10_transformation(df_tmp, pseudocount=pseudocount)
return df_tmp
def df_exp_discretize_log10(df, pseudocount=1):
'''
For better comparison with ground-truth in gene-scatterplot visualization
Input should be the output of df_log10_transformation (log10(x+1))
If so, all values ≥ 0
1. 10^x-1
2. arount
3. log10(x+1)
:param df:
:param pseudocount:
:return:
'''
df_tmp = df.copy()
df_tmp = np.around(np.power(10, df_tmp) - pseudocount)
df_tmp = np.log10(df_tmp + pseudocount)
return df_tmp
def df_transformation(df, transformation='as_is'):
'''
data_transformation
df not copied
:param df: [genes, cells]
:param format: as_is, log10, rpm_log10, exp_rpm_log10
:return: df_formatted
'''
if transformation == 'as_is':
pass # do nothing
elif transformation == 'log10':
df = df_log10_transformation(df)
elif transformation == 'rpm_log10':
df = df_rpm_log10(df)
elif transformation == 'exp_rpm_log10':
df == df_exp_rpm_log10(df)
else:
raise Exception('format {} not recognized'.format(transformation))
print('data formatting: ', transformation)
return df
def mask_df(df, nz_goal):
'''
:param df: any direction
:param nz_goal:
:return:
'''
df_msked = df.copy()
nz_now = nnzero_rate_df(df)
nz_goal = nz_goal/nz_now
zero_goal = 1-nz_goal
df_msked = df_msked.where(np.random.uniform(size=df.shape) > zero_goal, 0)
return df_msked
def multinormial_downsampling(in_df, libsize_out):
out_df = in_df.copy()
for i in range(len(in_df)):
slice_arr = in_df.values[i, :]
libsize = slice_arr.sum()
p_lst = slice_arr / libsize
slice_resample = np.random.multinomial(libsize_out, p_lst)
out_df.ix[i, :] = slice_resample
return out_df
def split_arr(arr, a=0.8, b=0.1, c=0.1, seed_var=1):
"""input array, output rand split arrays
a: train, b: valid, c: test
e.g.: [arr_train, arr_valid, arr_test] = split(df.values)"""
print(">splitting data")
np.random.seed(seed_var) # for splitting consistency
train_indices = np.random.choice(arr.shape[0], int(round(arr.shape[0] * a // (a + b + c))), replace=False)
remain_indices = np.array(list(set(range(arr.shape[0])) - set(train_indices)))
valid_indices = np.random.choice(remain_indices, int(round(len(remain_indices) * b // (b + c))), replace=False)
test_indices = np.array(list(set(remain_indices) - set(valid_indices)))
np.random.seed() # cancel seed effect
print("total samples being split: ", len(train_indices) + len(valid_indices) + len(test_indices))
print('train:', len(train_indices), ' valid:', len(valid_indices), 'test:', len(test_indices))
arr_train = arr[train_indices]
arr_valid = arr[valid_indices]
arr_test = arr[test_indices]
return (arr_train, arr_valid, arr_test)
def split_df(df, a=0.8, b=0.1, c=0.1, seed_var=1):
"""input df, output rand split dfs
a: train, b: valid, c: test
e.g.: [df_train, df2, df_test] = split(df, a=0.7, b=0.15, c=0.15)"""
np.random.seed(seed_var) # for splitting consistency
train_indices = np.random.choice(df.shape[0], int(df.shape[0] * a // (a + b + c)), replace=False)
remain_indices = np.array(list(set(range(df.shape[0])) - set(train_indices)))
valid_indices = np.random.choice(remain_indices, int(len(remain_indices) * b // (b + c)), replace=False)
test_indices = np.array(list(set(remain_indices) - set(valid_indices)))
np.random.seed() # cancel seed effect
print("total samples being split: ", len(train_indices) + len(valid_indices) + len(test_indices))
print('train:', len(train_indices), ' valid:', len(valid_indices), 'test:', len(test_indices))
df_train = df.ix[train_indices, :]
df_valid = df.ix[valid_indices, :]
df_test = df.ix[test_indices, :]
return df_train, df_valid, df_test
def random_subset_arr(arr, m_max, n_max):
[m, n] = arr.shape
m_reduce = min(m, m_max)
n_reduce = min(n, n_max)
np.random.seed(1201)
row_rand_idx = np.random.choice(m, m_reduce, replace=False)
col_rand_idx = np.random.choice(n, n_reduce, replace=False)
np.random.seed()
arr_sub = arr[row_rand_idx][:, col_rand_idx]
print('matrix from [{},{}] to a random subset of [{},{}]'.
format(m, n, arr_sub.shape[0], arr_sub.shape[1]))
return arr_sub
def subset_df(df_big, df_subset):
return (df_big.ix[df_subset.index, df_subset.columns])
def sparse_matrix_transformation(csr_matrix, transformation='log10'):
'''
data_transformation
df not copied
:param csr_matrix:
:param transformation: as_is, log10
:return:
'''
if transformation == 'as_is':
pass # do nothing
elif transformation == 'log10':
csr_matrix = csr_matrix.log1p()
elif transformation == 'rpm_log10':
raise Exception('rpm_log10 not implemented yet')
elif transformation == 'exp_rpm_log10':
raise Exception('exp_rpm_log10 not implemented yet')
else:
raise Exception('format {} not recognized'.format(transformation))
print('data tranformation: ', transformation)
return csr_matrix
def subsample_matrix(gbm, barcode_indices):
return GeneBCMatrix(gbm.gene_ids, gbm.gene_names,
gbm.barcodes[barcode_indices],
gbm.matrix[:, barcode_indices])
def subgene_matrix(gbm, gene_indices):
return GeneBCMatrix(gbm.gene_ids[gene_indices], gbm.gene_names[gene_indices],
gbm.barcodes,
gbm.matrix[gene_indices, :])
def get_expression(gbm, gene_name):
gene_indices = np.where(gbm.gene_names == gene_name)[0]
if len(gene_indices) == 0:
raise Exception("%s was not found in list of gene names." % gene_name)
return gbm.matrix[gene_indices[0], :].toarray().squeeze()
def split__csr_matrix(csr_matrix, a=0.8, b=0.1, c=0.1, seed_var=1):
"""
input: csr_matrix(cell_row),
output: rand split datasets (train/valid/test)
a: train
b: valid
c: test
e.g. [csr_train, csr_valid, csr_test] = split(df.values)"""
print(">splitting data..")
np.random.seed(seed_var) # for splitting consistency
[m, n] = csr_matrix.shape
train_indices = np.random.choice(m, int(round(m*a//(a+b+c))), replace=False)
remain_indices = np.array(list(set(range(m)) - set(train_indices)))
valid_indices = np.random.choice(remain_indices, int(round(len(remain_indices)*b//(b + c))), replace=False)
test_indices = np.array(list(set(remain_indices) - set(valid_indices)))
np.random.seed() # cancel seed effect
print("total samples being split: ", len(train_indices) + len(valid_indices) + len(test_indices))
print('train:', len(train_indices), ' valid:', len(valid_indices), 'test:', len(test_indices))
csr_train = csr_matrix[train_indices, :]
csr_valid = csr_matrix[valid_indices, :]
csr_test = csr_matrix[test_indices, :]
return (csr_train, csr_valid, csr_test, train_indices, valid_indices, test_indices)
# STAT CALCULATION #
def nnzero_rate_df(df):
idx = df != 0
nnzero_rate = round(sum(sum(idx.values)) / df.size, 3)
return nnzero_rate
def nnzero_count_df(df):
idx = df != 0
nnzero_count = sum(sum(idx.values))
return nnzero_count
def mean_df(df):
Sum = sum(sum(df.values))
Mean = Sum / df.size
return (Mean)
def square_err(arr1, arr2):
'''
arr1 and arr2 of same shape, return squared err between them
arr and df both works
'''
diff = np.subtract(arr1, arr2)
square_err_ = np.sum(np.power(diff, 2))
count = int(arr1.shape[0] * arr1.shape[1])
return square_err_, count
def square_err_omega(arr, arr_ground_truth):
'''
input: arr and arr_ground_truth of same shape
return: squared err omega (excluding zeros in ground truth)
arr and df both works
only zeros are ignored, negatives should not show up
'''
omega = np.sign(arr_ground_truth)
diff = np.subtract(arr, arr_ground_truth)
square_err_ = np.power(diff, 2)
square_err_nz = np.sum(np.multiply(square_err_, omega))
count = int(arr.shape[0] * arr.shape[1])
return square_err_nz, count
def mse_omega(arr_h, arr_m):
'''arr and df both works'''
omega = np.sign(arr_m) # if x>0, 1; elif x == 0, 0;
diff = np.subtract(arr_h, arr_m)
squared = np.power(diff, 2)
non_zero_squared = np.multiply(squared, omega)
mse_omega = np.mean(np.mean(non_zero_squared))
return mse_omega
def mse(arr_h, arr_m):
'''MSE between H and M'''
diff = np.subtract(arr_h, arr_m)
squared = np.power(diff, 2)
mse = np.mean(np.mean(squared))
return mse
def nz_std(X, Y):
'''
Goal: Evaluate gene-level imputation with STD of non-zero values of that gene
Takes two cell_row DFs, X and Y, with same shape
Calculate STD for each column(gene)
Treating zeros in X as Nones, And corresponding values in Y as Nones, too
:param X: Input cell_row matrix
:param Y: Imputation cell_row matrix
:return: two list of NZ_STDs, used for evaluation of imputation
'''
idx_zeros = (X == 0)
X_ = X.copy()
Y_ = Y.copy()
X_[idx_zeros] = None
Y_[idx_zeros] = None
return (X_.std(), Y_.std())
def nz2_corr(x, y):
'''
the nz2_corr between two vectors, excluding any element with zero in either vectors
:param x: vector1
:param y: vector2
:return:
'''
nas = np.logical_or(x == 0, y == 0)
result = pearson_cor(x[~nas], y[~nas])
if not math.isnan(result):
result = round(result, 4)
return result
def gene_mse_nz_from_df(Y, X):
'''
get gene_mse from gene_expression_df (cell_row, with cell_id as index)
X: input/ground-truth
Y: imputation
return a [gene, 1] pd.series with index of gene_ids
'''
mse_df = pd.DataFrame(columns=['gene_name'])
for i in range(X.shape[1]):
mse_ = scimpute.mse_omega(Y.iloc[:, i], X.iloc[:, i])
gene_name = X.columns[i]
mse_df.loc[X.columns[i], 'gene_name']= mse_
mse_df = mse_df.iloc[:, 0]
print(mse_df.head(), '\n', mse_df.shape)
return mse_df
def combine_gene_imputation_of_two_df(Y1, Y2, metric1, metric2, mode='smaller'):
'''
Y1, Y2: two imputation results (cell_row, df)
Metric1, Metric2: [num-gene, 1], df, same metircs for Y1 and Y2, e.g. MSE, SD
select rows of Y1, Y2 into Y_combined
mode: smaller/larger (being selected), e.g. smaller MSE, larger SD
Output in index/column order of Y1
'''
if mode == 'smaller':
idx_better = metric1 < metric2
elif mode == 'larger':
idx_better = metric1 > metric2
else:
raise Exception('mode err')
# try:
# idx_better = idx_better.iloc[:, 0] # df to series, important
# except 'IndexingError':
# pass
print('yg_better boolean series:\n', idx_better.head())
Y_better_lst = [Y1.transpose()[idx_better],
Y2.transpose()[~idx_better]] # list of frames
Y_better = pd.concat(Y_better_lst)
Y_better = Y_better.transpose() # tr back
Y_better = Y_better.loc[
Y1.index, Y1.columns] # get Y1 original order, just in case
print('Y1:\n', Y1.iloc[:5, :3])
print('Y2:\n', Y2.iloc[:5, :3])
print("metrics1:\n", metric1.iloc[:5])
print("metrics2:\n", metric2.iloc[:5])
print('Y_combined:\n', Y_better.iloc[:5, :3])
return Y_better
# PLOTS #
def refresh_logfolder(log_dir):
'''delete and recreate log_dir'''
if tf.gfile.Exists(log_dir):
tf.gfile.DeleteRecursively(log_dir)
print(log_dir, "deleted")
tf.gfile.MakeDirs(log_dir)
print(log_dir, 'created\n')
def max_min_element_in_arrs(arr_list):
'''input a list of np.arrays
e.g: max_element_in_arrs([df_valid.values, h_valid])'''
max_list = []
for x in arr_list:
max_tmp = np.nanmax(x)
max_list.append(max_tmp)
max_all = np.nanmax(max_list)
min_list = []
for x in arr_list:
min_tmp = np.nanmin(x)
min_list.append(min_tmp)
min_all = np.nanmin(min_list)
return max_all, min_all
def scatterplot(x, y,
title='scatterplot', dir='plots', xlab='xlab', ylab='ylab',
alpha=1):
if not os.path.exists(dir):
os.makedirs(dir)
fname = "./{}/{}".format(dir, title)
fig = plt.figure(figsize=(5, 5))
plt.plot(x, y, 'o', alpha=alpha)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('heatmap vis ', title, ' done')
def scatterplot2(x, y, title='title', xlabel='x', ylabel='y', range='same', dir='plots'):
'''
x is slice, y is a slice
have to be slice to help pearsonr(x,y)[0] work
range= same/flexible
:param x:
:param y:
:param title:
:param xlabel:
:param ylabel:
:param range:
:param dir:
:param corr:
:return:
'''
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# corr
corr = pearson_cor(x, y)
if not math.isnan(corr):
corr = str(round(corr, 4))
# nz2_corr
nz_corr = nz2_corr(x, y)
print('corr: {}; nz_corr: {}'.format(corr, nz_corr))
# density plot
from scipy.stats import gaussian_kde
# Calculate the point density
xy = np.vstack([x, y])
try:
z = gaussian_kde(xy)(xy)
# sort: dense on top (plotted last)
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
# plt
fig = plt.figure(figsize=(5, 5))
fig, ax = plt.subplots()
cax = ax.scatter(x, y, c=z, s=50, edgecolor='')
plt.colorbar(cax)
except np.linalg.linalg.LinAlgError:
plt.plot(x, y, 'b.', alpha=0.3)
plt.title('{}\ncorr: {}; corr-nz: {}'.format(title, corr, nz_corr)) # nz2
plt.xlabel(xlabel + "\nmean: " + str(round(np.mean(x), 2)))
plt.ylabel(ylabel + "\nmean: " + str(round(np.mean(y), 2)))
if range is 'same':
max, min = max_min_element_in_arrs([x, y])
plt.xlim(min, max)
plt.ylim(min, max)
elif range is 'flexible':
next
else:
plt.xlim(range[0], range[1])
plt.ylim(range[0], range[1])
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close('all')
def density_plot(x, y,
title='density plot', dir='plots', xlab='x', ylab='y'):
'''x and y must be arr [m, 1]'''
from scipy.stats import gaussian_kde
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fname = "./{}/{}".format(dir, title)
# Calculate the point density
xy = np.vstack([x, y])
z = gaussian_kde(xy)(xy)
# sort: dense on top (plotted last)
idx = z.argsort()
x, y, z = x[idx], y[idx], z[idx]
# plt
fig = plt.figure(figsize=(5, 5))
fig, ax = plt.subplots()
cax = ax.scatter(x, y, c=z, s=50, edgecolor='')
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.colorbar(cax)
plt.savefig(fname + ".png", bbox_inches='tight')
plt.close(fig)
def gene_pair_plot(df, list, tag, dir='./plots'):
'''
scatterplot2 of two genes in a df
:param df: [cells, genes]
:param list: [2, 3] OR [id_i, id_j]
:param tag: output_tag e.g. 'PBMC'
:param dir: output_dir
:return:
'''
for i, j in list:
print('gene_pair: ', i, type(i), j, type(j))
try:
x = df.ix[:, i]
y = df.ix[:, j]
except KeyError:
print('KeyError: the gene index does not exist')
continue
scatterplot2(x, y,
title='Gene' + str(i) + ' vs Gene' + str(j) + '\n' + tag,
xlabel='Gene' + str(i), ylabel='Gene' + str(j),
dir=dir)
def cluster_scatterplot(df2d, labels, title):
'''
PCA or t-SNE 2D visualization
`cluster_scatterplot(tsne_projection, cluster_info.Cluster.values.astype(int),
title='projection.csv t-SNE')`
:param df2d: PCA or t-SNE projection df, cell as row, feature as columns
:param labels:
:param title:
:return:
'''
legends = np.unique(labels)
print('all labels:', legends)
fig = plt.figure(figsize=(8, 6))
ax = plt.subplot(111)
for i in legends:
_ = df2d.iloc[labels == i]
num_cells = str(len(_))
percent_cells = str(round(int(num_cells) / len(df2d) * 100, 1)) + '%'
ax.scatter(_.iloc[:, 0], _.iloc[:, 1],
alpha=0.5, marker='.',
label='c' + str(i) + ':' + num_cells + ', ' + percent_cells
)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.title(title)
plt.xlabel('legend format: cluster_id:num-cells')
plt.savefig(title + '.png', bbox_inches='tight')
plt.show()
plt.close('all')
def pca_tsne(df_cell_row, cluster_info=None, title='data', dir='plots',
num_pc=50, num_tsne=2, ncores=8):
'''
PCA and tSNE plots for DF_cell_row, save projections.csv
:param df_cell_row: data matrix, features as columns, e.g. [cell, gene]
:param cluster_info: cluster_id for each cell_id
:param title: figure title, e.g. Late
:param num_pc: 50
:param num_tsne: 2
:return: tsne_df, plots saved, pc_projection.csv, tsne_projection.csv saved
'''
if not os.path.exists(dir):
os.makedirs(dir)
title = './'+dir+'/'+title
df = df_cell_row
if cluster_info is None:
cluster_info = pd.DataFrame(0, index=df.index, columns=['cluster_id'])
tic = time.time()
# PCA
pca = PCA(n_components=num_pc)
pc_x = pca.fit_transform(df)
df_pc_df = pd.DataFrame(data=pc_x, index=df.index, columns=range(num_pc))
df_pc_df.index.name = 'cell_id'
df_pc_df.columns.name = 'PC'
df_pc_df.to_csv(title+'.pca.csv')
print('dim before PCA', df.shape)
print('dim after PCA', df_pc_df.shape)
print('explained variance ratio: {}'.format(
sum(pca.explained_variance_ratio_)))
colors = cluster_info.reindex(df_pc_df.index)
colors = colors.dropna().iloc[:, 0]
print('matched cluster_info:', colors.shape)
print('unmatched data will be excluded from the plot') # todo: include unmatched
df_pc_ = df_pc_df.reindex(colors.index) # only plot labeled data?
cluster_scatterplot(df_pc_, colors.values.astype(str), title=title+' (PCA)')
# tSNE
print('MCORE-TSNE, with ', ncores, ' cores')
df_tsne = TSNE(n_components=num_tsne, n_jobs=ncores).fit_transform(df_pc_)
print('tsne done')
df_tsne_df = pd.DataFrame(data=df_tsne, index=df_pc_.index)
print('wait to output tsne')
df_tsne_df.to_csv(title+'.tsne.csv')
print('wrote tsne to output')
cluster_scatterplot(df_tsne_df, colors.values.astype(str), title=title+' ('
't-SNE)')
toc = time.time()
print('PCA and tSNE took {:.1f} seconds\n'.format(toc-tic))
return df_tsne_df
def heatmap_vis(arr, title='visualization of matrix in a square manner', cmap="rainbow",
vmin=None, vmax=None, xlab='', ylab='', dir='plots'):
'''heatmap visualization of 2D matrix, with plt.imshow(), in a square manner
cmap options PiYG for [neg, 0, posi]
Greys Reds for [0, max]
rainbow for [0,middle,max]'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = './' + dir + '/' + title + '.vis.png'
if (vmin is None):
vmin = np.min(arr)
if (vmax is None):
vmax = np.max(arr)
fig = plt.figure(figsize=(9, 9))
plt.imshow(arr, cmap=cmap, vmin=vmin, vmax=vmax, aspect='auto')
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.colorbar()
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('heatmap vis ', title, ' done')
def heatmap_vis2(arr, title='visualization of matrix', cmap="rainbow",
vmin=None, vmax=None, xlab='', ylab='', dir='plots'):
'''heatmap visualization of 2D matrix, with plt.pcolor()
cmap options PiYG for [neg, 0, posi]
Greys Reds for [0, max]
rainbow for [0,middle,max]'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = './' + dir + '/' + title + '.vis.png'
if (vmin is None):
vmin = np.min(arr)
if (vmax is None):
vmax = np.max(arr)
fig = plt.figure(figsize=(9, 9))
plt.pcolor(arr, cmap=cmap, vmin=vmin, vmax=vmax)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.colorbar()
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('heatmap vis ', title, ' done')
def curveplot(x, y, title, xlabel, ylabel, dir='plots'):
# scimpute.curveplot(epoch_log, corr_log_valid,
# title='learning_curve_pearsonr.step2.gene'+str(j)+", valid",
# xlabel='epoch',
# ylabel='Pearson corr (predction vs ground truth, valid, including cells with zero gene-j)')
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# plot
plt.plot(x, y)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close()
def curveplot2(x, y, z, title, xlabel, ylabel, dir='plots'):
'''curveplot2(epoch_log, train_log, valid_log, title="t", xlabel="x", ylabel="y")'''
# scimpute.curveplot2(epoch_log, corr_log_train, corr_log_valid,
# title='learning_curve_pearsonr.step2.gene'+str(j)+", train_valid",
# xlabel='epoch',
# ylabel='Pearson corr (predction vs ground truth, valid, including cells with zero gene-j)')
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# plot
plt.plot(x, y, label='train')
plt.plot(x, z, label='valid')
plt.legend()
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.savefig(fprefix + '.png', bbox_inches='tight')
plt.close()
def hist_list(list, xlab='xlab', title='histogram', bins=100, dir='plots'):
'''output histogram of a list into png'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = str(title) + '.png'
fname = "./{}/{}".format(dir, fname)
fig, ax = plt.subplots()
plt.title(title)
plt.xlabel(xlab)
plt.ylabel('Density')
hist = plt.hist(list, bins=bins, density=True)
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print('hist of {} is done'.format(title))
return hist
def hist_arr_flat(arr, title='hist', xlab='x', ylab='Frequency', bins=100, dir='plots'):
'''create histogram for flattened arr'''
if not os.path.exists(dir):
os.makedirs(dir)
fname = "./{}/{}".format(dir, title) + '.png'
fig = plt.figure(figsize=(9, 9))
n, bins, patches = plt.hist(arr.flatten(), bins, normed=1, facecolor='green', alpha=0.75)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.savefig(fname, bbox_inches='tight')
plt.close(fig)
print("histogram ", title, ' done')
def hist_df(df, title="hist of df", xlab='xlab', bins=100, dir='plots', range=None):
if not os.path.exists(dir):
os.makedirs(dir)
df_flat = df.values.reshape(df.size, 1)
# fig = plt.figure(figsize=(9, 9))
hist = plt.hist(df_flat, bins=bins, density=True, range=range)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel('Density')
plt.savefig('./{}/{}.png'.format(dir, title), bbox_inches='tight')
plt.close()
print('hist of ', title, 'is done')
return hist
def pearson_cor (x, y):
'''This function calculates Pearson correlation between vector x and y.
It returns nan if x or y has 2 data points or less, or does not vary
Parameters
------------
x: numpy array
y: numpy array
Return
-----------
Pearson correlation or nan
'''
if (len(x) > 2) and (x.std() > 0) and (y.std() > 0):
corr = pearsonr(x, y)[0]
else:
corr = np.nan
return corr
def hist_2matrix_corr(arr1, arr2, mode='column-wise', nz_mode='ignore',
title='hist_corr', dir='plots'):
'''Calculate correlation between two matrices column-wise or row-wise
default: arr[cells, genes], gene-wise corr (column-wise)
assume: arr1 from benchmark matrix (e.g. input), arr2 from imputation
if corr = NaN, it will be excluded from result
mode: column-wise, row-wise
nz_mode:
ignore (all values in vectors included)
strict (zero values excluded from both vector x,y)
first (zero values excluded from x in arr1 only,
title: 'hist_corr' or custom
dir: 'plots' or custom
'''
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
fprefix = "./{}/{}".format(dir, title)
# if arr1.shape is arr2.shape:
if mode == 'column-wise':
range_size = arr2.shape[1]
elif mode == 'row-wise':
range_size = arr2.shape[0]
else:
raise Exception('mode not recognized')
hist = []
for i in range(range_size):
if mode == 'column-wise':
x = arr1[:, i]
y = arr2[:, i]
elif mode == 'row-wise':
x = arr1[i, :]
y = arr2[i, :]
else:
raise Exception('mode not recognized')
# Pearson correlation can be calculated
# only when there are more than 2 nonzero
# values, and when the standard deviation
# is positive for both x and y
if nz_mode == 'strict':
nas = np.logical_or(x==0, y==0)
corr = pearson_cor (x[~nas], y[~nas])
elif nz_mode == 'first':
nas = (x==0)
corr = pearson_cor (x[~nas], y[~nas])
elif nz_mode == 'ignore':
corr = pearson_cor(x, y)
else:
raise Exception('nz_mode not recognized')
if not math.isnan(corr):
hist.append(corr)
print('correlation calculation completed')
hist.sort()
median_corr = round(np.median(hist), 3)
mean_corr = round(np.mean(hist), 3)
print(title)
print('median corr: {} mean corr: {}'.format(median_corr, mean_corr))
# histogram of correlation
fig = plt.figure(figsize=(5, 5))
plt.hist(hist, bins=100, density=True)
plt.xlabel('median=' + str(median_corr) + ', mean=' + str(mean_corr))
plt.ylabel('Density') #todo freq to density
plt.xlim(-1, 1)
plt.title(title)
plt.savefig(fprefix + ".png", bbox_inches='tight') #todo remove \n from out-name
plt.close(fig)
return hist
# TF #
def variable_summaries(name, var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope(name):
# mean = tf.reduce_mean(var)
# tf.summary.scalar('mean', mean)
# with tf.name_scope('stddev'):
# stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
# tf.summary.scalar('stddev', stddev)
# tf.summary.scalar('max', tf.reduce_max(var))
# tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def weight_variable(name_scope, dim_in, dim_out, sd):
"""
define weights
:param name_scope:
:param dim_in:
:param dim_out:
:param sd:
:return:
"""
with tf.name_scope(name_scope):
W = tf.Variable(tf.random_normal([dim_in, dim_out], stddev=sd),
name=name_scope + '_W')
variable_summaries(name_scope + '_W', W)
return W
def bias_variable(name_scope, dim_out, sd):
"""
define biases
:param name_scope:
:param dim_out:
:param sd:
:return:
"""
with tf.name_scope(name_scope):
b = tf.Variable(tf.random_normal([dim_out], mean=100 * sd, stddev=sd),
name=name_scope + '_b')
variable_summaries(name_scope + '_b', b)
return b
def weight_bias_variable(name_scope, dim_in, dim_out, sd):
"""
define weights and biases
:param name_scope:
:param dim_in:
:param dim_out:
:param sd:
:return:
"""
with tf.name_scope(name_scope):
W = tf.Variable(tf.random_normal([dim_in, dim_out], stddev=sd, dtype=tf.float32),
name=name_scope + '_W')
b = tf.Variable(tf.random_normal([dim_out], mean=100 * sd, stddev=sd, dtype=tf.float32),
name=name_scope + '_b')
variable_summaries(name_scope + '_W', W)
variable_summaries(name_scope + '_b', b)
return W, b
def dense_layer(name, input, W, b, pRetain):
"""
define a layer and return output
:param name:
:param input: X_placeholder or a(l-1)
:param W: weights
:param b: biases
:param pRetain:
:return:
"""
x_drop = tf.nn.dropout(input, pRetain)
z = tf.add(tf.matmul(x_drop, W), b)
a = tf.nn.relu(z)
variable_summaries(name + '_a', a)
return a
def dense_layer_BN(name, input, W, b, pRetain, epsilon=1e-3):
"""
define a layer and return output
:param name:
:param input: X_placeholder or a(l-1)
:param W: weights
:param b: biases
:param pRetain:
:return:
"""
x_drop = tf.nn.dropout(input, pRetain)
z = tf.add(tf.matmul(x_drop, W), b)
# BN
batch_mean, batch_var = tf.nn.moments(z, [0])
z_bn = tf.nn.batch_normalization(z, batch_mean, batch_var, beta, scale, epsilon)
# NL
a = tf.nn.relu(z_bn)
variable_summaries(name + '_a', a)
return a
def learning_curve_mse(epoch, mse_batch, mse_valid,
title='learning curve (MSE)', xlabel='epochs', ylabel='MSE',
range=None,
dir='plots'):
"""
depreciated
"""
# create plots directory
if not os.path.exists(dir):
os.makedirs(dir)
# list to np.array, to use index
epoch = np.array(epoch)
mse_batch = np.array(mse_batch)
# mse_train = np.array(mse_train)
mse_valid = | np.array(mse_valid) | numpy.array |
from typing import List, Dict, Sequence, Union, Tuple
from numbers import Number
import random
import numpy as np
from toolz import curry
from toolz.curried import get
from common import _tuple
__all__ = [
"resize", "resized_crop", "center_crop", "drop_boundary_bboxes",
"to_absolute_coords", "to_percent_coords", "hflip", "hflip2",
"vflip", "vflip2", "random_sample_crop", "move"
]
def iou_1m(box, boxes):
r"""
Calculates one-to-many ious.
Parameters
----------
box : ``Sequences[Number]``
A bounding box.
boxes : ``array_like``
Many bounding boxes.
Returns
-------
ious : ``array_like``
IoUs between the box and boxes.
"""
xi1 = np.maximum(boxes[..., 0], box[0])
yi1 = np.maximum(boxes[..., 1], box[1])
xi2 = np.minimum(boxes[..., 2], box[2])
yi2 = | np.minimum(boxes[..., 3], box[3]) | numpy.minimum |
import numpy as np
import itertools
from .contrib import compress_filter, smooth, residual_model
from .contrib import reduce_interferences
def expectation_maximization(y, x, iterations=2, verbose=0, eps=None):
r"""Expectation maximization algorithm, for refining source separation
estimates.
This algorithm allows to make source separation results better by
enforcing multichannel consistency for the estimates. This usually means
a better perceptual quality in terms of spatial artifacts.
The implementation follows the details presented in [1]_, taking
inspiration from the original EM algorithm proposed in [2]_ and its
weighted refinement proposed in [3]_, [4]_.
It works by iteratively:
* Re-estimate source parameters (power spectral densities and spatial
covariance matrices) through :func:`get_local_gaussian_model`.
* Separate again the mixture with the new parameters by first computing
the new modelled mixture covariance matrices with :func:`get_mix_model`,
prepare the Wiener filters through :func:`wiener_gain` and apply them
with :func:`apply_filter``.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [3] <NAME> and <NAME> and <NAME>. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [4] <NAME> and <NAME> and <NAME>. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [5] <NAME> and <NAME> and <NAME> "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
initial estimates for the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex STFT of the mixture signal
iterations: int [scalar]
number of iterations for the EM algorithm.
verbose: boolean
display some information if True
eps: float or None [scalar]
The epsilon value to use for regularization and filters.
If None, the default will use the epsilon of np.real(x) dtype.
Returns
-------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
estimated sources after iterations
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
estimated power spectral densities
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
estimated spatial covariance matrices
Note
-----
* You need an initial estimate for the sources to apply this
algorithm. This is precisely what the :func:`wiener` function does.
* This algorithm *is not* an implementation of the "exact" EM
proposed in [1]_. In particular, it does compute the posterior
covariance matrices the same (exact) way. Instead, it uses the
simplified approximate scheme initially proposed in [5]_ and further
refined in [3]_, [4]_, that boils down to just take the empirical
covariance of the recent source estimates, followed by a weighted
average for the update of the spatial covariance matrix. It has been
empirically demonstrated that this simplified algorithm is more
robust for music separation.
Warning
-------
It is *very* important to make sure `x.dtype` is `np.complex`
if you want double precision, because this function will **not**
do such conversion for you from `np.complex64`, in case you want the
smaller RAM usage on purpose.
It is usually always better in terms of quality to have double
precision, by e.g. calling :func:`expectation_maximization`
with ``x.astype(np.complex)``.
This is notably needed if you let common deep learning frameworks like
PyTorch or TensorFlow do the STFT, because this usually happens in
single precision.
"""
# to avoid dividing by zero
if eps is None:
eps = np.finfo(np.real(x[0]).dtype).eps
# dimensions
(nb_frames, nb_bins, nb_channels) = x.shape
nb_sources = y.shape[-1]
# allocate the spatial covariance matrices and PSD
R = np.zeros((nb_bins, nb_channels, nb_channels, nb_sources), x.dtype)
v = | np.zeros((nb_frames, nb_bins, nb_sources)) | numpy.zeros |
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensornetwork as tn
from tensornetwork.backends import backend_factory
from tensornetwork.matrixproductstates.base_mps import BaseMPS
import tensorflow as tf
from jax.config import config
config.update("jax_enable_x64", True)
tf.compat.v1.enable_v2_behavior()
@pytest.fixture(
name="backend_dtype_values",
params=[('numpy', np.float64), ('numpy', np.complex128),
('tensorflow', np.float64), ('tensorflow', np.complex128),
('pytorch', np.float64), ('jax', np.float64)])
def backend_dtype(request):
return request.param
def get_random_np(shape, dtype, seed=0):
np.random.seed(seed) #get the same tensors every time you call this function
if dtype is np.complex64:
return | np.random.randn(*shape) | numpy.random.randn |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Amplitude Estimation Algorithm.
"""
from typing import Optional
import logging
from collections import OrderedDict
import numpy as np
from scipy.stats import chi2, norm
from scipy.optimize import bisect
from qiskit.aqua import AquaError
from qiskit.aqua.utils import CircuitFactory
from qiskit.aqua.circuits import PhaseEstimationCircuit
from qiskit.aqua.components.iqfts import IQFT, Standard
from qiskit.aqua.utils.validation import validate_min
from .ae_algorithm import AmplitudeEstimationAlgorithm
from .ae_utils import pdf_a, derivative_log_pdf_a, bisect_max
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class AmplitudeEstimation(AmplitudeEstimationAlgorithm):
"""
The Amplitude Estimation algorithm.
"""
def __init__(self, num_eval_qubits: int,
a_factory: Optional[CircuitFactory] = None,
i_objective: Optional[int] = None,
q_factory: Optional[CircuitFactory] = None,
iqft: Optional[IQFT] = None) -> None:
"""
Args:
num_eval_qubits: number of evaluation qubits, has a min. value of 1.
a_factory: the CircuitFactory subclass object representing
the problem unitary
i_objective: i objective
q_factory: the CircuitFactory subclass object representing an
amplitude estimation sample (based on a_factory)
iqft: the Inverse Quantum Fourier Transform component,
defaults to using a standard iqft when None
"""
validate_min('num_eval_qubits', num_eval_qubits, 1)
super().__init__(a_factory, q_factory, i_objective)
# get parameters
self._m = num_eval_qubits
self._M = 2 ** num_eval_qubits
if iqft is None:
iqft = Standard(self._m)
self._iqft = iqft
self._circuit = None
self._ret = {}
@property
def _num_qubits(self):
if self.a_factory is None: # if A factory is not set, no qubits are specified
return 0
num_ancillas = self.q_factory.required_ancillas_controlled()
num_qubits = self.a_factory.num_target_qubits + self._m + num_ancillas
return num_qubits
def construct_circuit(self, measurement=False):
"""
Construct the Amplitude Estimation quantum circuit.
Args:
measurement (bool): Boolean flag to indicate if measurement
should be included in the circuit.
Returns:
QuantumCircuit: the QuantumCircuit object for the constructed circuit
"""
pec = PhaseEstimationCircuit(
iqft=self._iqft, num_ancillae=self._m,
state_in_circuit_factory=self.a_factory,
unitary_circuit_factory=self.q_factory
)
self._circuit = pec.construct_circuit(measurement=measurement)
return self._circuit
def _evaluate_statevector_results(self, probabilities):
# map measured results to estimates
y_probabilities = OrderedDict()
for i, probability in enumerate(probabilities):
b = '{0:b}'.format(i).rjust(self._num_qubits, '0')[::-1]
y = int(b[:self._m], 2)
y_probabilities[y] = y_probabilities.get(y, 0) + probability
a_probabilities = OrderedDict()
for y, probability in y_probabilities.items():
if y >= int(self._M / 2):
y = self._M - y
a = np.round(np.power(np.sin(y * np.pi / 2 ** self._m), 2),
decimals=7)
a_probabilities[a] = a_probabilities.get(a, 0) + probability
return a_probabilities, y_probabilities
def _compute_fisher_information(self, observed=False):
fisher_information = None
mlv = self._ret['ml_value'] # MLE in [0,1]
m = self._m
if observed:
ai = np.asarray(self._ret['values'])
pi = np.asarray(self._ret['probabilities'])
# Calculate the observed Fisher information
fisher_information = sum(p * derivative_log_pdf_a(a, mlv, m)**2 for p, a in zip(pi, ai))
else:
def integrand(x):
return (derivative_log_pdf_a(x, mlv, m))**2 * pdf_a(x, mlv, m)
M = 2**m
grid = np.sin(np.pi * np.arange(M / 2 + 1) / M)**2
fisher_information = sum(integrand(x) for x in grid)
return fisher_information
def _fisher_ci(self, alpha, observed=False):
shots = self._ret['shots']
mle = self._ret['ml_value']
std = np.sqrt(shots * self._compute_fisher_information(observed))
ci = mle + norm.ppf(1 - alpha / 2) / std * np.array([-1, 1])
return [self.a_factory.value_to_estimation(bound) for bound in ci]
def _likelihood_ratio_ci(self, alpha):
# Compute the two intervals in which we the look for values above
# the likelihood ratio: the two bubbles next to the QAE estimate
M = 2**self._m
qae = self._ret['value']
y = M * np.arcsin(np.sqrt(qae)) / np.pi
left_of_qae = np.sin(np.pi * (y - 1) / M)**2
right_of_qae = np.sin(np.pi * (y + 1) / M)**2
bubbles = [left_of_qae, qae, right_of_qae]
# likelihood function
ai = np.asarray(self._ret['values'])
pi = np.asarray(self._ret['probabilities'])
m = self._m
shots = self._ret['shots']
def loglikelihood(a):
return np.sum(shots * pi * np.log(pdf_a(ai, a, m)))
# The threshold above which the likelihoods are in the
# confidence interval
loglik_mle = loglikelihood(self._ret['ml_value'])
thres = loglik_mle - chi2.ppf(1 - alpha, df=1) / 2
def cut(x):
return loglikelihood(x) - thres
# Store the boundaries of the confidence interval
lower = upper = self._ret['ml_value']
# Check the two intervals/bubbles: check if they surpass the
# threshold and if yes add the part that does to the CI
for a, b in zip(bubbles[:-1], bubbles[1:]):
# Compute local maximum and perform a bisect search between
# the local maximum and the bubble boundaries
locmax, val = bisect_max(loglikelihood, a, b, retval=True)
if val >= thres:
# Bisect pre-condition is that the function has different
# signs at the boundaries of the interval we search in
if cut(a) * cut(locmax) < 0:
left = bisect(cut, a, locmax)
lower = np.minimum(lower, left)
if cut(locmax) * cut(b) < 0:
right = bisect(cut, locmax, b)
upper = np.maximum(upper, right)
# Put together CI
ci = [lower, upper]
return [self.a_factory.value_to_estimation(bound) for bound in ci]
def confidence_interval(self, alpha, kind='likelihood_ratio'):
"""
Compute the (1 - alpha) confidence interval
Args:
alpha (float): confidence level: compute the (1 - alpha) confidence interval
kind (str): the method to compute the confidence interval, can be 'fisher',
'observed_fisher' or 'likelihood_ratio' (default)
Returns:
list[float]: the (1 - alpha) confidence interval
Raises:
AquaError: if 'mle' is not in self._ret.keys() (i.e. `run` was not called yet)
NotImplementedError: if the confidence interval method `kind` is not implemented
"""
# check if AE did run already
if 'mle' not in self._ret.keys():
raise AquaError('Call run() first!')
# if statevector simulator the estimate is exact
if self._quantum_instance.is_statevector:
return 2 * [self._ret['estimation']]
if kind in ['likelihood_ratio', 'lr']:
return self._likelihood_ratio_ci(alpha)
if kind in ['fisher', 'fi']:
return self._fisher_ci(alpha, observed=False)
if kind in ['observed_fisher', 'observed_information', 'oi']:
return self._fisher_ci(alpha, observed=True)
raise NotImplementedError('CI `{}` is not implemented.'.format(kind))
def _run_mle(self):
"""
Compute the Maximum Likelihood Estimator (MLE)
Returns:
The MLE for the previous AE run
Note: Before calling this method, call the method `run` of the AmplitudeEstimation instance
"""
M = self._M
qae = self._ret['value']
# likelihood function
ai = np.asarray(self._ret['values'])
pi = np.asarray(self._ret['probabilities'])
m = self._m
shots = self._ret['shots']
def loglikelihood(a):
return np.sum(shots * pi * np.log(pdf_a(ai, a, m)))
# y is pretty much an integer, but to map 1.9999 to 2 we must first
# use round and then int conversion
y = int(np.round(M * np.arcsin(np.sqrt(qae)) / np.pi))
# Compute the two intervals in which are candidates for containing
# the maximum of the log-likelihood function: the two bubbles next to
# the QAE estimate
bubbles = None
if y == 0:
right_of_qae = np.sin(np.pi * (y + 1) / M)**2
bubbles = [qae, right_of_qae]
elif y == int(M / 2):
left_of_qae = np.sin(np.pi * (y - 1) / M)**2
bubbles = [left_of_qae, qae]
else:
left_of_qae = np.sin(np.pi * (y - 1) / M)**2
right_of_qae = | np.sin(np.pi * (y + 1) / M) | numpy.sin |
import numpy as np
import sys
from os import path, getcwd, makedirs
import pickle
from utils import get_filename
from plotter_params import plot_setup
import scipy.stats as stats
import matplotlib.pyplot as plt
import pylab as pl
import matplotlib.ticker as ticker
plot_setup()
epsilon = 0.1
length = 160
filename = path.join(getcwd(), "data", "full_trace_estimations",
f"epsilon_{epsilon}_length_{length}".replace('.', '_'))
with open(path.join(filename, 'true_process_fidelity.pickle'), 'rb') as f:
true_pf = pickle.load(f)
with open(path.join(filename, 'true_zero_fidelity.pickle'), 'rb') as f:
true_zf = pickle.load(f)
with open(path.join(filename, 'process_fidelity_estimates.pickle'), 'rb') as f:
proc_fs = pickle.load(f)
with open(path.join(filename, 'zero_fidelity_estimates.pickle'), 'rb') as f:
zero_fs = pickle.load(f)
nbin = 80
pf_diffs = [i - true_pf for i in proc_fs[::]]
pf_range = np.max(pf_diffs) - np.min(pf_diffs)
pf_width = pf_range/nbin
p_fidels = sorted(pf_diffs)
fmean = np.mean(p_fidels)
fx, fy, _ = plt.hist(p_fidels, bins=nbin, alpha=0.6, density=True, label='Fidelity')
f_fit = stats.norm.pdf(p_fidels, fmean, np.std(p_fidels))
f_fit = f_fit*(np.max(fx)/ | np.max(f_fit) | numpy.max |
import sys
import os
import platform
from xoppylib.xoppy_util import locations
import numpy
import scipy.constants as codata
#
# WS
#
def xoppy_calc_ws(ENERGY=7.0, CUR=100.0, PERIOD=8.5, N=28.0, KX=0.0, KY=8.739999771118164, \
EMIN=1000.0, EMAX=100000.0, NEE=2000, D=30.0, XPC=0.0, YPC=0.0, XPS=2.0, YPS=2.0, NXP=10, NYP=10):
print("Inside xoppy_calc_ws. ")
try:
with open("ws.inp", "wt") as f:
f.write("inputs from xoppy \n")
f.write("%f %f\n" % (ENERGY, CUR))
f.write("%f %d %f %f\n" % (PERIOD, N, KX, KY))
f.write("%f %f %d\n" % (EMIN, EMAX, NEE))
f.write("%f %f %f %f %f %d %d\n" % (D, XPC, YPC, XPS, YPS, NXP, NYP))
f.write("%d \n" % (4))
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(), 'ws.exe') + "\""
else:
command = "'" + os.path.join(locations.home_bin(), 'ws') + "'"
print("Running command '%s' in directory: %s \n" % (command, locations.home_bin_run()))
# TODO try to capture the text output of the external code
os.system(command)
# write spec file
txt = open("ws.out").readlines()
outFile = os.path.join(locations.home_bin_run(), "ws.spec")
f = open(outFile, "w")
f.write("#F ws.spec\n")
f.write("\n")
f.write("#S 1 ws results\n")
f.write("#N 4\n")
f.write("#L Energy(eV) Flux(photons/s/0.1%bw) Spectral power(W/eV) Cumulated power(W)\n")
cum = 0.0
estep = (EMAX - EMIN) / (NEE - 1)
for i in txt:
tmp = i.strip(" ")
if tmp[0].isdigit():
tmp1 = numpy.fromstring(tmp, dtype=float, sep=' ')
cum += tmp1[1] * codata.e * 1e3
f.write("%f %g %f %f \n" % (tmp1[0], tmp1[1], tmp1[1] * codata.e * 1e3, cum * estep))
else:
f.write("#UD " + tmp)
f.close()
print("File written to disk: ws.spec")
# print output file
# for line in txt:
# print(line, end="")
print("Results written to file: %s" % os.path.join(locations.home_bin_run(), 'ws.out'))
return outFile
except Exception as e:
raise e
#
# TUNING CURVES
#
def xoppy_calc_xtc(
ENERGY = 7.0,
CURRENT = 100.0,
ENERGY_SPREAD = 0.00096,
SIGX = 0.274,
SIGY = 0.011,
SIGX1 = 0.0113,
SIGY1 = 0.0036,
PERIOD = 3.23,
NP = 70,
EMIN = 2950.0,
EMAX = 13500.0,
N = 40,
HARMONIC_FROM = 1,
HARMONIC_TO = 15,
HARMONIC_STEP = 2,
HELICAL = 0,
METHOD = 1,
NEKS = 100, ):
for file in ["tc.inp","tc.out"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("tc.inp", "wt") as f:
f.write("TS called from xoppy\n")
f.write("%10.3f %10.2f %10.6f %s\n"%(ENERGY,CURRENT,ENERGY_SPREAD,"Ring-Energy(GeV) Current(mA) Beam-Energy-Spread"))
f.write("%10.4f %10.4f %10.4f %10.4f %s\n"%(SIGX,SIGY,SIGX1,SIGY1,"Sx(mm) Sy(mm) Sx1(mrad) Sy1(mrad)"))
f.write("%10.3f %d %s\n"%(PERIOD,NP,"Period(cm) N"))
f.write("%10.1f %10.1f %d %s\n"%(EMIN,EMAX,N,"Emin Emax Ne"))
f.write("%d %d %d %s\n"%(HARMONIC_FROM,HARMONIC_TO,HARMONIC_STEP,"Hmin Hmax Hstep"))
f.write("%d %d %d %d %s\n"%(HELICAL,METHOD,1,NEKS,"Helical Method Print_K Neks"))
f.write("foreground\n")
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(),'tc.exe') + "\""
else:
command = "'" + os.path.join(locations.home_bin(), 'tc') + "'"
print("Running command '%s' in directory: %s "%(command, locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
os.system(command)
print("Output file: %s"%("tc.out"))
print("\n--------------------------------------------------------\n")
#
# parse result files to exchange object
#
with open("tc.out","r") as f:
lines = f.readlines()
# print output file
# for line in lines:
# print(line, end="")
# remove returns
lines = [line[:-1] for line in lines]
harmonics_data = []
# separate numerical data from text
floatlist = []
harmoniclist = []
txtlist = []
for line in lines:
try:
tmp = line.strip()
if tmp.startswith("Harmonic"):
harmonic_number = int(tmp.split("Harmonic")[1].strip())
if harmonic_number != HARMONIC_FROM:
harmonics_data[-1][1] = harmoniclist
harmoniclist = []
harmonics_data.append([harmonic_number, None])
tmp = float(line.strip()[0])
floatlist.append(line)
harmoniclist.append(line)
except:
txtlist.append(line)
harmonics_data[-1][1] = harmoniclist
data = numpy.loadtxt(floatlist)
for index in range(0, len(harmonics_data)):
# print (harmonics_data[index][0], harmonics_data[index][1])
harmonics_data[index][1] = numpy.loadtxt(harmonics_data[index][1])
return data, harmonics_data
def xoppy_calc_xtcap(
ENERGY = 6.0,
CURRENT = 200.0,
ENERGY_SPREAD = 0.00138,
SIGX = 0.0148,
SIGY = 0.0037,
SIGX1 = 0.0029,
SIGY1 = 0.0015,
PERIOD = 2.8,
NP = 84,
EMIN = 3217.0,
EMAX = 11975.0,
N = 50,
DISTANCE = 30.0,
XPS = 1.0,
YPS = 1.0,
XPC = 0.0,
YPC = 0.0,
HARMONIC_FROM = 1,
HARMONIC_TO = 7,
HARMONIC_STEP = 2,
HRED = 0,
HELICAL = 0,
NEKS = 100,
METHOD = 0,
BSL = 0,
):
for file in ["tcap.inp","tcap.out","tcap.log"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
pass
with open("tcap.inp", "wt") as f:
f.write("TCAP called from xoppy\n")
f.write("%10.3f %10.2f %10.6f %s\n"%(ENERGY,CURRENT,ENERGY_SPREAD,"Ring-Energy(GeV) Current(mA) Beam-Energy-Spread"))
f.write("%10.4f %10.4f %10.4f %10.4f %s\n"%(SIGX,SIGY,SIGX1,SIGY1,"Sx(mm) Sy(mm) Sx1(mrad) Sy1(mrad)"))
f.write("%10.3f %d %s\n"%(PERIOD,NP,"Period(cm) N"))
f.write("%10.1f %10.1f %d %s\n"%(EMIN,EMAX,N,"Emin Emax Ne"))
f.write("%10.3f %10.3f %10.3f %10.3f %10.3f %d %d %s\n"%(DISTANCE,XPC,YPC,XPS,YPS,10,10,"d xpc ypc xps yps nxp nyp"))
f.write("%d %d %d %d %s\n"%(HARMONIC_FROM,HARMONIC_TO,HARMONIC_STEP,HRED,"Hmin Hmax Hstep Hreduction"))
f.write("%d %d %d %d %d %s\n"%(HELICAL,METHOD,1,NEKS,BSL,"Helical Method Print_K Neks Bsl-Subtr "))
f.write("foreground\n")
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(),'tcap.exe') + "\""
else:
command = "'" + os.path.join(locations.home_bin(), 'tcap') + "'"
print("Running command '%s' in directory: %s "%(command, locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
# os.system(command)
#
# catch the optut and write the output to a log file as well as print it.
#
retvalue = os.popen(command).read()
print(retvalue)
with open("tcap.log", "wt") as f:
f.write(retvalue)
print("Output file: '%s/tcap.out'"%(os.getcwd()) )
print("\n--------------------------------------------------------\n")
#
# parse result files to exchange object
#
with open("tcap.out","r") as f:
lines = f.readlines()
# remove returns
lines = [line[:-1] for line in lines]
harmonics_data = []
# separate numerical data from text
floatlist = []
harmoniclist = []
txtlist = []
for line in lines:
try:
tmp = line.strip()
if tmp.startswith("Harmonic"):
# harmonic_number = int(tmp.split("Harmonic")[1].strip())
harmonic_number = int(tmp.split()[1])
if harmonic_number != HARMONIC_FROM:
harmonics_data[-1][1] = harmoniclist
harmoniclist = []
harmonics_data.append([harmonic_number, None])
tmp = float(line.strip()[0])
floatlist.append(line)
harmoniclist.append(line)
except:
txtlist.append(line)
harmonics_data[-1][1] = harmoniclist
data = numpy.loadtxt(floatlist)
for index in range(0, len(harmonics_data)):
# print (harmonics_data[index][0], harmonics_data[index][1])
harmonics_data[index][1] = numpy.loadtxt(harmonics_data[index][1])
return data, harmonics_data
#
# YAUP
#
def run_external_binary(binary="ls", post_command="", info=""):
if platform.system() == "Windows":
command = "\"" + os.path.join(locations.home_bin(), '%s.exe' % binary) + "\""
else:
command = "'" + os.path.join(locations.home_bin(), binary) + "'"
command += " " + post_command
print("Running command '%s' in directory: %s " % (command, locations.home_bin_run()))
print("\n--------------------------------------------------------\n")
os.system(command)
if info != "":
print(info)
print("\n--------------------------------------------------------\n")
def xoppy_calc_yaup(
#yaup
TITLE = "YAUP EXAMPLE (ESRF BL-8)",
PERIOD = 4.0,
NPER = 42,
NPTS = 40,
EMIN = 3000.0,
EMAX = 30000.0,
NENERGY = 100,
ENERGY = 6.04,
CUR = 0.1,
SIGX = 0.426,
SIGY = 0.085,
SIGX1 = 0.017,
SIGY1 = 0.0085,
D = 30.0,
XPC = 0.0,
YPC = 0.0,
XPS = 2.0,
YPS = 2.0,
NXP = 69,
NYP = 69,
MODE = 4,
NSIG = 2,
TRAJECTORY = "new+keep",
XSYM = "yes",
HANNING = 0,
BFILE = "undul.bf",
TFILE = "undul.traj",
# B field
BFIELD_FLAG = 1,
BFIELD_ASCIIFILE = "",
PERIOD_BFIELD = 4.0,
NPER_BFIELD = 42,
NPTS_BFIELD = 40,
IMAGNET = 0,
ITYPE = 0,
K = 1.38,
GAP = 2.0,
GAPTAP = 10.0,
FILE = "undul.bf",
I2TYPE = 0,
A1 = 0.5,
A2 = 1.0,
):
for file in ["bfield.inp","bfield.out","bfield.dat","u2txt_bfield.inp",
"yaup.inp", "yaup-0.out","undul.bf",
"u2txt_traj.inp","undul_traj.dat"]:
try:
os.remove(os.path.join(locations.home_bin_run(),file))
except:
print("Failed to remove file: %s " % (os.path.join(locations.home_bin_run(),file)) )
if BFIELD_FLAG == 0:
#TODO: test this option...
message = ''
message += 'This option takes an ASCII file and convert it to YAUP format.'
message += 'The text file should be column-formatted, and contain three colums:'
message += ' z, B(z), and phi(z), where the z s are equidistant with step '
message += ' PERIOD/NPTS. See HELP/YAUP for definitions of PERIOD and NPTS.'
message += ' There should be NPTS*NPER+1 lines in the ASCII file.'
ok = showConfirmMessage(message, "OK?")
if not ok: return
f = open('txt2u.inp', 'w')
f.write("%s\n" % (BFIELD_ASCIIFILE) )
f.write("%s\n" % (BFILE))
f.write("%g\n" % (PERIOD_BFIELD))
f.write("%d\n" % (PERIOD_BFIELD))
f.write("%d\n" % (NPTS_BFIELD) )
f.close
run_external_binary(binary="txt2u", post_command="< txt2u.inp",
info="Output file should be: %s" % BFILE)
elif BFIELD_FLAG == 1:
with open("bfield.inp", "wt") as f:
f.write("%g\n" % (PERIOD_BFIELD))
f.write("%d\n" % (NPER_BFIELD))
f.write("%d\n" % (NPTS_BFIELD))
f.write("%d\n" % (1 + ITYPE))
if ITYPE == 0:
f.write("%g\n" % (K))
elif ITYPE == 1:
f.write("%g\n" % (GAP))
f.write("%g\n" % (GAPTAP))
f.write("%s\n" % (FILE))
with open("u2txt_bfield.inp", "wt") as f:
f.write("1\n")
f.write("%s\n" % (FILE))
f.write("bfield.dat\n")
if IMAGNET == 0:
run_external_binary(binary="bfield", post_command="< bfield.inp > bfield.out", info="Output file: bfield.out")
elif IMAGNET == 1:
run_external_binary(binary="bfield2", post_command="< bfield.inp > bfield.out", info="Output file: bfield.out")
run_external_binary(binary="u2txt", post_command="< u2txt_bfield.inp", info="Output file should be bfield.dat")
elif BFIELD_FLAG == 2:
n = NPER
lambdau = PERIOD_BFIELD
npts_per = NPTS_BFIELD
if ITYPE == 0:
b1 = A1
b2 = A2
else:
b1 = A1/0.934/PERIOD_BFIELD
b2 = A2/0.934/PERIOD_BFIELD
und_len = lambdau * npts_per
z = numpy.arange( n * npts_per + 1) / float( n * npts_per)
z *= und_len
bmod = numpy.arange(n * npts_per + 1) / float( n * npts_per) * (b2 - b1) + b1
berr = | numpy.arange(n * npts_per + 1) | numpy.arange |
import numpy as np
from scipy import interpolate, optimize
from scipy.integrate import cumtrapz
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from frenet_path import *
Kern = lambda x: (3/4)*(1-np.power(x,2))*(np.abs(x)<1)
Kern_bis = lambda x,delta: np.power((1 - np.power((np.abs(x)/delta),3)), 3)
class Trajectory:
"""
A class used to represent a 3D curve.
...
Attributes
----------
data : numpy array of shape (N,3) that contained the coordinates of the curve
t : numpy array of shape N, time of each point in data, supposed to be croissant
dim : 3
t0 : float, initial time value
tmax : float, final time value
scale : Boolean, True if the curve is scale, False otherwise
dX1 : function, estimated first derivative
dX2 : function, estimated second derivative
dX3 : function, estimated third derivative
S : function, estimated of the arclenght function
Sdot : function, estimated derivative of the arclenght function
L : float, estimated lenght of the curve
curv_extrins : function, extrinsic estimates of the curvature
tors_extrins : function, extrinsic estimates of the torsion
Methods
-------
loc_poly_estimation(t_out, deg, h):
estimation of derivatives using local polynomial regression with parameters "h" and degree "deg" evaluted of the grid "t_out"
compute_S(scale=False):
compute the arclenght function, the lenght of the curve and scale it if "scale" equals True.
scale():
scale the curve, needs to have run compute_S before.
TNB_GramSchmidt(t):
compute the T,N,B frame of the curve from the pointwise estimated derivatives (of Higher Order : 1,2,3) by Gram-Schmidt Orthonormalization on t
return: instance of class FrenetPath
theta_extrinsic_formula(t):
compute the curvature and torsion functions from the pointwise estimated derivatives (of Higher Order : 1,2,3) computed by the classical formulas
BECAREFUL very unstable (numerically ill-posed).
return: pointwise estimate of curvature, pointwise estimate of torsion
TNB_locPolyReg(grid_in, grid_out, h, p=3, iflag=[1,1], ibound=0, local=True):
TNB estimates based on constrained local polynomial regression |T|=1, <T,N>=0
b0 + b1(t-t_0)+b2(t-t0)^2/2 + b3(t-t0)^3/6 + ... + bp(t-t0)^p/p!, |b1|=1, <b1,b2>=0
minimize (Y-XB)'W(Y-XB) -la*(|b1|^2-1) - mu(2*<b1,b2>)
inputs:
grid_in - input grid
grid_out - output grid
h - scalar
p - degree of polynomial (defaul = 3)
iflag - [1,1] for both constraints, [1,0] for |b1|=1, [0,1] for <b1,b2>=0
ibound - 1 for boundary correction, 0 by default
local - True for local version, False for regular version
return:
Q - instance of class FrenetPath
kappa - [kappa, kappap, tau]
Param - estimates with constraints
Param0 - estimates without constraints
vparam - [la, mu, vla, vmu] tuning parameters
[la, mu]: optimal values amongst vla, and vmu
success - True if a solution was found for all point, False otherwise
"""
def __init__(self, data, t):
self.t = t
self.data = data
self.dim = data.shape[1]
self.t0 = np.min(t)
self.tmax = np.max(t)
self.scale = False
def loc_poly_estimation(self, t_out, deg, h):
pre_process = PolynomialFeatures(degree=deg)
deriv_estim = np.zeros((len(t_out),(deg+1)*self.dim))
for i in range(len(t_out)):
T = self.t - t_out[i]
# print(T)
W = Kern(T/h)
# print(W)
T_poly = pre_process.fit_transform(T.reshape(-1,1))
for j in range(deg+1):
T_poly[:,j] = T_poly[:,j]/np.math.factorial(j)
pr_model = LinearRegression(fit_intercept = False)
pr_model.fit(T_poly, self.data, W)
B = pr_model.coef_
deriv_estim[i,:] = B.reshape(1,(deg+1)*self.dim, order='F')
self.derivatives = deriv_estim
def dx1(t): return interpolate.griddata(self.t, deriv_estim[:,3:6], t, method='cubic')
self.dX1 = dx1
def dx2(t): return interpolate.griddata(self.t, deriv_estim[:,6:9], t, method='cubic')
self.dX2 = dx2
def dx3(t): return interpolate.griddata(self.t, deriv_estim[:,9:12], t, method='cubic')
self.dX3 = dx3
def compute_S(self, scale=False):
def Sdot_fun(t): return np.linalg.norm(self.dX1(t), axis=1)
self.Sdot = Sdot_fun
def S_fun(t): return cumtrapz(self.Sdot(t), t, initial=0)
# S_fun = interpolate.interp1d(self.t, cumtrapz(self.Sdot(self.t), self.t, initial=0))
self.L = S_fun(self.t)[-1]
# print(self.L)
if scale==True:
self.scale = True
def S_fun_scale(t): return cumtrapz(self.Sdot(t), t, initial=0)/self.L
# S_fun_scale = interpolate.interp1d(self.t, cumtrapz(self.Sdot(self.t), self.t, initial=0)/self.L)
self.S = S_fun_scale
self.data = self.data/self.L
else:
self.S = S_fun
def scale(self):
self.scale = True
def S_fun_scale(t): return cumtrapz(self.Sdot(t), t, initial=0)/self.L
self.S = S_fun_scale
self.data = self.data/self.L
def TNB_GramSchmidt(self, t_grid):
def GramSchmidt(DX1, DX2, DX3):
normdX1 = np.linalg.norm(DX1)
normdX2 = np.linalg.norm(DX2)
normdX3 = np.linalg.norm(DX3)
T = DX1/normdX1
N = DX2 - np.dot(np.transpose(T),DX2)*T
N = N/np.linalg.norm(N)
B = DX3 - np.dot(np.transpose(N),DX3)*N - np.dot(np.transpose(T),DX3)*T
B = B/np.linalg.norm(B)
Q = np.stack((T, N, B))
if np.linalg.det(Q)<0:
B = -B
Q = | np.stack((T, N, B)) | numpy.stack |
# Copyright 2020 SAS Project Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Useful geo routines. Outside regular use"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import shapely.geometry as sgeo
from shapely import affinity
from reference_models.geo import utils
from reference_models.geo import vincenty
# Earth ellipsoidal parameters.
_EARTH_MEAN_RADIUS_KM = 6371.0088 # By IUGG
_WGS_EQUATORIAL_RADIUS_KM2 = 6378.137
_WGS_POLAR_RADIUS_KM2 = 6356.753
_EQUATORIAL_DIST_PER_DEGREE = 2 * np.pi * _WGS_EQUATORIAL_RADIUS_KM2 / 360
_POLAR_DIST_PER_DEGREE = 2 * np.pi * _WGS_POLAR_RADIUS_KM2 / 360
def _ProjectEqc(geometry, ref_latitude=None):
"""Projects a geometry using equirectangular projection.
Args:
geometry: A |shapely| geometry with lon,lat coordinates.
ref_latitude: A reference latitude for the Eqc projection. If None, using
the centroid of the geometry.
Returns:
A tuple of:
the same geometry in equirectangular projection.
the reference latitude parameter used for the equirectangular projection.
"""
if ref_latitude is None:
ref_latitude = geometry.centroid.y
geometry = affinity.affine_transform(
geometry,
(_EQUATORIAL_DIST_PER_DEGREE * np.cos(np.radians(ref_latitude)), 0.0,
0.0, _POLAR_DIST_PER_DEGREE,
0, 0))
return geometry, ref_latitude
def _InvProjectEqc(geometry, ref_latitude):
"""Returns the inverse equirectangular projection of a geometry.
Args:
geometry: A |shapely| geometry with lon,lat coordinates.
ref_latitude: The reference latitude of the equirectangular projection.
"""
geometry = affinity.affine_transform(
geometry,
(1./(_EQUATORIAL_DIST_PER_DEGREE * np.cos(np.radians(ref_latitude))), 0.0,
0.0, 1./_POLAR_DIST_PER_DEGREE,
0, 0))
return geometry
def Buffer(geometry, distance_km, ref_latitude=None, **kwargs):
"""Returns a geometry with an enveloppe at a given distance (in km).
This uses the traditional shapely `buffer` method but on the reprojected
geometry. As such this is somewhat approximate depending on the size of the
geometry and the buffering distance.
Args:
geometry: A |shapely| or geojson geometry.
distance_km: The buffering distance in km.
ref_latitude: A reference latitude for the Eqc projection. If None, using
the centroid of the geometry.
**kwargs: The optional parameters forwarded to the shapely `buffer` routine:
(for example: resolution, cap_style, join_style)
"""
geom = utils.ToShapely(geometry)
proj_geom, ref_latitude = _ProjectEqc(geom, ref_latitude)
proj_geom = proj_geom.buffer(distance_km, **kwargs)
geom = _InvProjectEqc(proj_geom, ref_latitude)
if isinstance(geometry, sgeo.base.BaseGeometry):
return geom
else:
return utils.ToGeoJson(geom, as_dict=isinstance(geometry, dict))
def GridPolygonApprox(poly, res_km):
"""Grids a polygon or multi-polygon with approximate resolution (in km).
This is a replacement of `geo.utils.GridPolygon()` for gridding with
approximate metric distance on both latitude and longitude directions. The
regular gridding is still done in PlateCarree (equirectangular) projection,
but with different steps in degrees in lat and long direction.
Points falling in the boundary of polygon will be included.
Args:
poly: A Polygon or MultiPolygon in WGS84 or NAD83, defined either as a
shapely, GeoJSON (dict or str) or generic geometry.
A generic geometry is any object implementing the __geo_interface__
protocol.
res_km: The resolution (in km) used for gridding.
Returns:
A list of (lon, lat) defining the grid points.
"""
poly = utils.ToShapely(poly)
bound_area = ((poly.bounds[2] - poly.bounds[0]) *
(poly.bounds[3] - poly.bounds[1]))
if isinstance(poly, sgeo.MultiPolygon) and poly.area < bound_area * 0.01:
# For largely disjoint polygons, we process per polygon
# to avoid inefficiencies if polygons largely disjoint.
pts = ops.unary_union(
[sgeo.asMultiPoint(GridPolygonApprox(p, res_km))
for p in poly])
return [(p.x, p.y) for p in pts]
# Note: using as reference the min latitude, ie actual resolution < res_km.
# This is to match NTIA procedure.
ref_latitude = poly.bounds[1] # ref_latitude = poly.centroid.y
res_lat = res_km / _POLAR_DIST_PER_DEGREE
res_lng = res_km / (
_EQUATORIAL_DIST_PER_DEGREE * np.cos(np.radians(ref_latitude)))
bounds = poly.bounds
lng_min = np.floor(bounds[0] / res_lng) * res_lng
lat_min = np.floor(bounds[1] / res_lat) * res_lat
lng_max = np.ceil(bounds[2] / res_lng) * res_lng + res_lng/2.
lat_max = np.ceil(bounds[3] / res_lat) * res_lat + res_lat/2.
# The mesh creation is conceptually equivalent to
# mesh_lng, mesh_lat = np.mgrid[lng_min:lng_max:res_lng,
# lat_min:lat_max:res_lat]
# but without the floating point accumulation errors
mesh_lng, mesh_lat = np.meshgrid(
np.arange(np.floor((lng_max - lng_min) / res_lng) + 1),
np.arange(np.floor((lat_max - lat_min) / res_lat) + 1),
indexing='ij')
mesh_lng = lng_min + mesh_lng * res_lng
mesh_lat = lat_min + mesh_lat * res_lat
points = np.vstack((mesh_lng.ravel(), mesh_lat.ravel())).T
# Performs slight buffering by 1mm to include border points in case they fall
# exactly on a multiple of
pts = poly.buffer(1e-8).intersection(sgeo.asMultiPoint(points))
if isinstance(pts, sgeo.Point):
return [(pts.x, pts.y)]
return [(p.x, p.y) for p in pts]
def SampleLine(line, res_km, ref_latitude=None,
equal_intervals=False, precision=5, ratio=1.0):
"""Samples a line with approximate resolution (in km).
Args:
line: A shapely or GeoJSON |LineString|.
res_km: The resolution (in km).
ref_latitude: A reference latitude for the Eqc projection. If None, using
the centroid of the line.
equal_intervals: If True, all intervals are equal to finish on the edges.
precision: Simplify final coordinates with provided precision.
ratio: Only span the given line length.
Returns:
A list of (lon, lat) along the line every `res_km`.
"""
line = utils.ToShapely(line)
proj_line, ref_latitude = _ProjectEqc(line, ref_latitude)
if not equal_intervals:
points = sgeo.MultiPoint(
[proj_line.interpolate(dist)
for dist in np.arange(0, ratio * proj_line.length - 1e-6, res_km)])
else:
n_intervals = ratio * proj_line.length // res_km
points = sgeo.MultiPoint(
[proj_line.interpolate(dist)
for dist in np.linspace(0, ratio * proj_line.length, n_intervals)])
points = _InvProjectEqc(points, ref_latitude)
return [(round(p.x, precision), round(p.y, precision)) for p in points]
def AreaPlateCarreePixel(res_arcsec, ref_latitude):
"""Returns the approximate area in km of a Plate Carree pixel of size `res_arcsec`."""
return np.cos(np.radians(ref_latitude)) * (
2 * np.pi * _EARTH_MEAN_RADIUS_KM * res_arcsec / (360 * 3600))**2
def ReplaceVincentyDistanceByHaversine():
"""Replaces Vincenty distance routines by Haversine great circle routines."""
vincenty.GeodesicDistanceBearing = GreatCircleDistanceBearing
vincenty.GeodesicPoint = GreatCirclePoint
vincenty.GeodesicPoints = GreatCirclePoints
# pylint: disable=unused-argument
# Great Circle methods in replacement of Vincenty methods.
# A good description of Haversine formula in their numerical stable version
# can be found at : https://www.movable-type.co.uk/scripts/latlong.html
def GreatCircleDistanceBearing(lat1, lon1, lat2, lon2, accuracy=None):
"""Calculates distance and bearings between points on earth.
This uses Haversine method considering the earth as a sphere.
See: https://en.wikipedia.org/wiki/Haversine_formula
This routine is designed to be a pure replacement of the similar
'google3.third_party.winnforum_sas.vincenty.GeodesicDistanceBearing()'
which uses the more precise Vincenty-based formula but is much slower.
It also goes beyond the vincenty implementation as it supports sequence
of points, either on the initial, final points or both.
Args:
lat1 (float or iterable of float): The initial point(s) latitude (degrees).
lon1 (float or iterable of float): The initial point(s) longitude (degrees).
lat2 (float or iterable of float): The final point(s) latitude (degrees).
lon2 (float or iterable of float): The final point(s) longitude (degrees).
accuracy: (unused) For compatibility with vincenty method prototype.
Returns:
A tuple of distance (km), direct bearing and back bearing (degrees). If
an iterable of points are passed as input, ndarray are returned.
"""
if lat1 == lat2 and lon1 == lon2:
return 0, 0, -180
lat1 = np.deg2rad(lat1)
lat2 = np.deg2rad(lat2)
delta_lat = lat2 - lat1
delta_lon = np.deg2rad(lon2) - np.deg2rad(lon1)
sin_lat1 = np.sin(lat1)
cos_lat1 = np.cos(lat1)
sin_lat2 = np.sin(lat2)
cos_lat2 = np.cos(lat2)
phi = (
np.sin(delta_lat / 2)**2 + cos_lat1 * cos_lat2 *
(np.sin(delta_lon / 2)**2))
phi = 2 * np.arctan2(np.sqrt(phi), np.sqrt(1 - phi))
distance = _EARTH_MEAN_RADIUS_KM * phi
bearing = np.rad2deg(
np.arctan2(
np.sin(delta_lon) * cos_lat2,
cos_lat1 * sin_lat2 - sin_lat1 * cos_lat2 * np.cos(delta_lon)))
rev_bearing = np.rad2deg(
np.arctan2(-np.sin(delta_lon) * cos_lat1,
cos_lat2 * sin_lat1 - sin_lat2 * cos_lat1 * np.cos(delta_lon)))
if np.isscalar(bearing):
if bearing < 0:
bearing += 360
if rev_bearing < 0:
rev_bearing += 360
else:
bearing[bearing < 0] += 360
rev_bearing[rev_bearing < 0] += 360
return distance, bearing, rev_bearing
def GreatCirclePoint(lat, lon, dist_km, bearing, accuracy=None):
"""Computes the coordinates of a point towards a bearing at given distance.
This uses Haversine method considering the earth as a sphere.
See: https://en.wikipedia.org/wiki/Haversine_formula
This routine is designed to be a pure replacement of the similar
'google3.third_party.winnforum_sas.vincenty.GeodesicDistanceBearing()'
which uses the more precise Vincenty-based formula but is much slower.
Args:
lat (float): The initial point latitude (degrees).
lon (float): The initial point longitude (degrees).
dist_km (float): The distance of the target point (km).
bearing (float): The bearing angle (in degrees)
accuracy (unused): For compatibility with vincenty method prototype.
Returns:
A tuple of the final point latitude, longitude and final reverse bearing,
all in degrees.
"""
tgt_lat, tgt_lon, rev_bearing = GreatCirclePoints(lat, lon, [dist_km],
bearing)
return tgt_lat[0], tgt_lon[0], rev_bearing[0]
def GreatCirclePoints(lat, lon, distances_km, bearing, accuracy=None):
"""Computes the coordinates of points towards a bearing at several distances.
This uses Haversine method considering the earth as a sphere.
See: https://en.wikipedia.org/wiki/Haversine_formula
This routine is designed to be a pure replacement of the similar
'google3.third_party.winnforum_sas.vincenty.GeodesicPoints()'
which uses the more precise Vincenty-based formula but is much slower.
This routine is similar to `GreatCirclePoint` but can take any coherent
sequence of values for the initial point, distances or bearing, and
perform an efficient vectorized operation internally. 'Coherent' means that
either only one of the input is a sequence, or several are with the same
shapes.
Args:
lat: The initial point latitude (degrees).
lon: The initial point longitude (degrees).
distances_km (iterable of float): A sequence of distance of the target
points (in km). Can be for example a ndarray or a list.
bearing (float or iterable of float): The bearing angle (in degrees).
accuracy (unused): For compatibility with vincenty method prototype.
Returns:
A tuple of the points latitude, longitude and reverse bearing, all in
degrees. If one of the input is a ndarray, ndarray are returned,
otherwise lists are returned.
"""
is_array = (
isinstance(distances_km, np.ndarray) or isinstance(bearing, np.ndarray) or
isinstance(lat, np.ndarray))
lat = np.deg2rad(lat)
lon = np.deg2rad(lon)
bearing = np.deg2rad(bearing)
norm_distances = np.asarray(distances_km) / _EARTH_MEAN_RADIUS_KM
tgt_lat = np.arcsin(
np.sin(lat) * np.cos(norm_distances) +
np.cos(lat) * np.sin(norm_distances) * np.cos(bearing))
tgt_lon = lon + np.arctan2(
np.sin(bearing) * np.sin(norm_distances) * np.cos(lat),
np.cos(norm_distances) - np.sin(lat) * np.sin(tgt_lat))
rev_bearing = np.rad2deg(
np.arctan2(-np.sin(tgt_lon - lon) * np.cos(lat),
np.cos(tgt_lat) * | np.sin(lat) | numpy.sin |
# -*- coding: utf-8 -*-
"""LDFA-H: Latent Dynamic Factor Analysis of High-dimensional time-series
This module implements the fitting algorithm of LDFA-H and the accessory functions to
facilitate the associate analyses or inferences.
Todo
----
* Correct function ``fit_Phi``.
.. _[1] Bong et al. (2020). Latent Dynamic Factor Analysis of High-Dimensional Neural Recordings. Submitted to NeurIPS2020.
"""
import time, sys, traceback
import numpy as np
from scipy import linalg
import ldfa.optimize as core
def _generate_lambda_glasso(bin_num, lambda_glasso, offset, lambda_diag=None):
"""Generate sparsity penalty matrix Lambda for a submatrix in Pi."""
lambda_glasso_out = np.full((bin_num, bin_num), -1) + (1+lambda_glasso) * \
(np.abs(np.arange(bin_num) - np.arange(bin_num)[:,np.newaxis]) <= offset)
if lambda_diag:
lambda_glasso_out[np.arange(bin_num), np.arange(bin_num)] = lambda_diag
return lambda_glasso_out
def _switch_back(Sigma, Phi_S, Gamma_T, Phi_T, beta):
"""Make the initial Phi_S positive definite."""
w, v = np.linalg.eig(Sigma)
sqrtS = (v*np.sqrt(w)[...,None,:])@v.transpose(0,2,1)
alpha = np.min(np.linalg.eigvals(
sqrtS @ linalg.block_diag(*Gamma_T) @ sqrtS),-1)/2
return (Sigma - alpha[:,None,None]*linalg.block_diag(*Phi_T),
[P + (b*alpha)@b.T for P, b in zip(Phi_S, beta)])
def _make_PD(m, thres_eigen=1e-4):
"""Make a coariance PSD based on its eigen decomposition."""
s, v = np.linalg.eigh(m)
if np.min(s)<=thres_eigen*np.max(s):
delta = thres_eigen*np.max(s) - np.min(s)
s = s + delta
return ([email protected](s)@np.linalg.inv(v))
def _temporal_est(V_eps_T, ar_order):
"""Perform temporal estimate given V_T.
.. _[1] <NAME>. and <NAME>. (2008). Regularized estimation of large covariance matrices. Ann. Statist., 36(1):199–227.
"""
num_time = V_eps_T.shape[0]
resids = np.zeros(num_time)
Amatrix = np.zeros([num_time, num_time])
resids[0] = V_eps_T[0,0]
for i in np.arange(1, ar_order):
Amatrix[i,:i] = np.linalg.pinv(V_eps_T[:i,:i]) @ V_eps_T[:i,i]
resids[i] = V_eps_T[i,i] \
- V_eps_T[i,:i] @ np.linalg.pinv(V_eps_T[:i,:i]) @ V_eps_T[:i,i]
for i in np.arange(ar_order, num_time):
Amatrix[i,i-ar_order:i] = np.linalg.pinv(V_eps_T[i-ar_order:i,i-ar_order:i]) \
@ V_eps_T[i-ar_order:i,i]
resids[i] = V_eps_T[i,i] \
- V_eps_T[i,i-ar_order:i] \
@ np.linalg.pinv(V_eps_T[i-ar_order:i,i-ar_order:i]) \
@ V_eps_T[i-ar_order:i,i]
# invIA = np.linalg.pinv(np.eye(num_time) - Amatrix)
# Psi_T_hat = invIA @ np.diag(resids) @ invIA.T
# Gamma_T_hat = np.linalg.inv(Psi_T_hat)
Gamma_T_hat = (np.eye(num_time)-Amatrix).T @ np.diag(1/resids) \
@ (np.eye(num_time)-Amatrix)
Psi_T_hat = np.linalg.pinv(Gamma_T_hat)
return Gamma_T_hat, Psi_T_hat
def fit(data, num_f, lambda_cross, offset_cross,
lambda_auto=None, offset_auto=None, lambda_aug=0,
ths_ldfa=1e-2, max_ldfa=1000, ths_glasso=1e-8, max_glasso=1000,
ths_lasso=1e-8, max_lasso=1000, params_init=dict(), make_PD=False,
verbose=False):
"""The main function to perform multi-factor LDFA-H estimation.
Parameters
----------
data: list of (N, p_k, T) ndarrays
Observed data from K areas. Data from each area k consists of p_k-variate
time-series over T time bins in N trials.
num_f: int
The number of factors.
lambda_cross, lambda_auto: float
The sparsity penalty parameter for the inverse cross-correlation and inverse
auto-correlation matrix, respectively. The default value for lambda_auto is 0.
offset_cross, offset_auto: int
The bandwidth parameter for the inverse cross-correlation matrix and inverse
auto-correlation matrix, respectively. The default value for offset_auto is the
given value of offset_cross.
ths_ldfa, ths_glasso, ths_lasso: float, optional
The threshold values for deciding the convergence of the main iteration, the
glasso iteration, and the lasso iteration, respectively.
max_ldfa, max_glasso, max_lasso: int, optional
The maximum number of iteration for the main iteration, the glasso iteration,
and the lasso iteration, respectively.
beta_init: list of (p_k, num_f) ndarrays, optional
Custom initial values for beta. If not given, beta is initialized by CCA.
make_PD: boolean, optional
Switch for manual positive definitization. If data does not generate a positive
definite estimate of the covariance matrix, ``make_PD = True`` helps with
maintaining the matrix positive definite throughout the fitting algorithm. The
default value is False for the sake of running time.
verbose: boolean, optional
Swith for vocal feedback throughout the fitting algorithm. The default value is
False.
Returns
-------
Pi: (K*T, K*T) ndarray
The estimated sparse inverse correlation matrix.
Rho: (K*T, K*T) ndarray
The estimated correlation matrix before sparsification. Note that Rho != Pi^{-1}.
params: dict
The dictionary of the estimated parameters. It provides with the estimation of
Omega: (num_f, K*T, K*T) ndarray;
Gamma_S: a list of (p_k, p_k) ndarrays for k = 1, ..., K;
Gamma_T: a list of (T, T) ndarrays for k = 1, ..., K;
beta: a list of (p_k, num_f) ndarrays for k = 1, ..., K; and
mu: a list of (p_k, T) ndarrays for k = 1, ..., K.
Examples
--------
Pi, Rho, params =\
fit(data, num_f, lambda_cross, offset_cross, lambda_auto, offset_auto)
.. _[1] Bong et al. (2020). Latent Dynamic Factor Analysis of High-Dimensional Neural Recordings. Submitted to NeurIPS2020.
"""
dims = [dat.shape[1] for dat in data]
num_time = data[0].shape[2]
num_trial = data[0].shape[0]
# get full_graph
if lambda_auto is None:
lambda_auto = lambda_cross
if offset_auto is None:
offset_auto = offset_cross
lambda_glasso_auto = _generate_lambda_glasso(num_time, lambda_auto,
offset_auto)
lambda_glasso_cross = _generate_lambda_glasso(num_time, lambda_cross,
offset_cross)
lambda_glasso = np.array(np.block(
[[lambda_glasso_auto if j==i else lambda_glasso_cross
for j, _ in enumerate(data)]
for i, _ in enumerate(data)]))
# set mu
mu= [np.mean(dat, 0) for dat in data]
# initialization
if all(key in params_init for key in ('Omega', 'mu', 'beta', 'Gamma_S', 'Gamma_T')):
Omega = params_init['Omega']; mu = params_init['mu']; beta = params_init['beta']
Gamma_S = params_init['Gamma_S']; Gamma_T = params_init['Gamma_T']
Sigma = np.linalg.inv(Omega)
sig = np.sqrt(np.diagonal(Sigma,0,1,2))
Rho = Sigma/sig[:,None,:]/sig[:,:,None]
Pi = np.linalg.inv(Rho)
else:
if 'beta' in params_init:
beta = [b.copy() for b in params['beta_init']]
weight = [np.linalg.pinv(b) for b in beta]
elif len(data)==2:
# initialize beta by CCA
S_xt = np.tensordot(
np.concatenate([dat-m for dat, m in zip(data,mu)], 1),
np.concatenate([dat-m for dat, m in zip(data,mu)], 1),
axes=((0,2),(0,2)))/num_trial/num_time
S_1 = S_xt[:dims[0],:dims[0]]
S_12 = S_xt[:dims[0],dims[0]:]
S_2 = S_xt[dims[0]:,dims[0]:]
U_1 = linalg.inv(linalg.sqrtm(S_1))
U_2 = linalg.inv(linalg.sqrtm(S_2))
u, s, vh = np.linalg.svd(U_1 @ S_12 @ U_2)
weight = [u[:,:num_f].T @ U_1, vh[:num_f] @ U_2]
beta = [linalg.inv(U_1) @ u[:,:num_f], linalg.inv(U_2) @ vh[:num_f].T]
else:
print("Default initialization only supports 2 populations now.")
raise
weight = [w*np.sqrt(np.sum(b**2, 0))[:,None] for w, b in zip(weight,beta)]
beta = [b/np.sqrt(np.sum(b**2, 0)) for b in beta]
# initialization on other parameters
m_z_x = np.concatenate([np.matmul(w, dat-m)[...,None,:]
for m, w, dat in zip(mu, weight, data)], -2)
V_z_x = np.zeros((num_f,len(dims),num_time)*2)
m_zk_x = m_z_x.transpose((2,0,1,3))
V_zk_x = np.diagonal(V_z_x,0,1,4).transpose(4,0,1,2,3)
# mu = [np.mean(dat - b @ m, 0)
# for dat, m, b in zip(data, m_zk_x, beta)]
m_eps = [dat - b @ m1 - m2
for dat, m1, b, m2 in zip(data, m_zk_x, beta, mu)]
v_eps = [(np.sum(np.square(m))*(1+lambda_aug)/num_trial
+ np.trace(V.reshape(num_f*num_time,num_f*num_time)))
# /(d-1)
for m, V, d in zip(m_eps, V_zk_x, dims)]
V_eps_S = [np.tensordot(m,m,axes=((0,2),(0,2)))*(1+lambda_aug)/num_trial/v
+ [email protected](np.diagonal(V,0,1,3),-1)@b.T/v
for m, V, v, b, d in zip(m_eps, V_zk_x, v_eps, beta, dims)]
V_eps_T = [np.tensordot(m,np.linalg.pinv(V2)@m, axes=((0,1),(0,1)))
*(lambda_aug*np.eye(num_time)+1)/d/num_trial
+ np.tensordot(V1,[email protected](V2)@b,axes=([0,2],[0,1]))/d
for m, V1, V2, b, d in zip(m_eps, V_zk_x, V_eps_S, beta, dims)]
sd_eps_T = [np.sqrt(np.diag(V)) for V in V_eps_T]
R_eps_T = [V/sd/sd[:,None] for V, sd in zip(V_eps_T, sd_eps_T)]
Phi_T = [R*sd*sd[:,None] for sd, R in zip(sd_eps_T, R_eps_T)]
Gamma_T = [np.linalg.inv(P) for P in Phi_T]
V_zf = (np.diagonal(V_z_x,0,0,3).transpose((4,0,1,2,3))
+ (m_z_x.reshape((-1,num_f,len(dims)*num_time)).transpose((1,2,0))
@ m_z_x.reshape((-1,num_f,len(dims)*num_time)).transpose((1,0,2))
* (lambda_aug*np.eye(2*num_time)+1)/ num_trial)\
.reshape((num_f,len(dims),num_time,len(dims),num_time)))
Sigma, Phi_S = _switch_back(
V_zf.reshape(num_f,len(dims)*num_time,len(dims)*num_time),
V_eps_S, Gamma_T, Phi_T, beta)
if make_PD:
Phi_S = [_make_PD(P) for P in Phi_S]
for f in np.arange(num_f):
Sigma[f] = _make_PD(Sigma[f])
Gamma_S = [np.linalg.inv(P) for P in Phi_S]
sig = np.sqrt(np.diagonal(Sigma,0,1,2))
Rho = Sigma/sig[:,None,:]/sig[:,:,None]
Pi = np.linalg.inv(Rho)
Omega = Pi/sig[:,None,:]/sig[:,:,None]
cost = (- log_like(data, {'Omega': Omega, 'beta': beta, 'mu': mu,
'Gamma_S': Gamma_S, 'Gamma_T': Gamma_T}, lambda_aug) / num_trial
+ np.sum(np.where(lambda_glasso*np.abs(Pi)>=0,
lambda_glasso*np.abs(Pi), np.inf)))
# EM algorithm
try:
for iter_ldfa in np.arange(max_ldfa):
Rho_ldfa = Rho.copy() # np.linalg.inv(Pi)
beta_ldfa = [b.copy() for b in beta]
cost_ldfa = cost
start_ldfa = time.time()
# E-step for Z
W_z_x = np.zeros((num_f,len(dims),num_time)*2)
for i, (G_S, G_T, b) in enumerate(zip(Gamma_S, Gamma_T, beta)):
W_z_x[:,i,:,:,i,:] += G_T[None,:,None,:]*(b.T@G_S@b)[:,None,:,None]
for j, W in enumerate(Omega):
W_z_x[j,:,:,j,:,:] += W.reshape(len(dims),num_time,len(dims),num_time)
V_z_x = np.linalg.inv(
W_z_x.reshape((num_f*len(dims)*num_time,num_f*len(dims)*num_time))) \
.reshape((num_f,len(dims),num_time)*2)
V_zk_x = np.diagonal(V_z_x,0,1,4).transpose(4,0,1,2,3)
y = np.stack([(b.T @ G_S) @ (dat - m) for dat, m, b, G_S
in zip(data, mu, beta, Gamma_S)], axis=-2)
S_y = np.tensordot(y, y, (0,0)) / num_trial \
* (lambda_aug*np.eye(len(dims)*num_time)
.reshape(len(dims),num_time,1,len(dims),num_time)+1)
V_z_y = np.stack([np.tensordot(V_z_x[...,i,:], G_T, [-1, 0])
for i, G_T in enumerate(Gamma_T)], -2)
S_mz = np.tensordot(np.tensordot(V_z_y, S_y, [(-3,-2,-1),(0,1,2)]),
V_z_y, [(-3,-2,-1),(-3,-2,-1)])
for iter_pb in np.arange(10):
beta_pb = [b.copy() for b in beta]
# coordinate descent for beta
S_mz_x_S = [np.tensordot(np.tensordot(y, np.stack(
[(np.tensordot(Gamma_T[i], V_z_y[:,i,:,:,j,:], [-1,1])
* (lambda_aug*(i==j)*np.eye(num_time)+1)[:,None,None,:])
for j in np.arange(len(dims))], -2), [(-3,-2,-1),(-3,-2,-1)]),
data[i] - mu[i], [(0,1),(0,-1)]) / num_trial
for i in np.arange(len(dims))]
S_mz_S = [np.tensordot(S_mz[:,i,:,:,i,:], G_T,
[(-3,-1),(0,1)])
for i, G_T in enumerate(Gamma_T)]
beta = [S1.T @ np.linalg.inv(S2 + np.tensordot(G_T, V, axes=((0,1),(1,3))))
for S1, S2, V, G_T in zip(S_mz_x_S, S_mz_S, V_zk_x, Gamma_T)]
# fitting Matrix-variate for Phi_S
V_eps_S = [
(np.tensordot((dat-m) @ (G_T*(lambda_aug*np.eye(num_time)+1)), dat-m,
axes=((0,2),(0,2)))/num_trial
- b @ S1 - S1.T @ b.T + b @ S2 @ b.T
+ b @ np.tensordot(V, G_T, [(-3,-1),(0,1)]) @ b.T)/num_time
for dat, m, b, G_T, V, S1, S2
in zip(data, mu, beta, Gamma_T, V_zk_x, S_mz_x_S, S_mz_S)]
Phi_S = [V.copy() for V in V_eps_S]
Gamma_S = [np.linalg.inv(P) for P in Phi_S]
# fitting Matrix_variate for Phi_T
y1 = np.stack([(b.T @ G_S) @ (dat - m) for dat, m, b, G_S
in zip(data, mu, beta, Gamma_S)], axis=-2)
S_y_y1 = np.tensordot(y, y1, (0,0)) / num_trial \
* (lambda_aug*np.eye(len(dims)*num_time)
.reshape(len(dims),num_time,1,len(dims),num_time)+1)
S_bmz_x_T = [np.tensordot(V_z_y[:,i,:,:,:,:], S_y_y1[:,:,:,:,i,:],
[(0,2,3,4),(3,0,1,2)])
for i in np.arange(len(dims))]
S_bmz_T = [np.tensordot(S_mz[:,i,:,:,i,:], b.T @ G_S @ b, [(0,2),(0,1)])
for i, (b, G_S) in enumerate(zip(beta, Gamma_S))]
V_eps_T = [
(np.tensordot(dat-m, G_S@(dat-m), axes=((0,1),(0,1)))/num_trial
* (lambda_aug * np.eye(num_time) + 1) - S1 - S1.T + S2
+ np.tensordot(V, b.T@G_S@b, axes=([0,2],[0,1])))/d
for dat, d, m, b, G_S, V, S1, S2
in zip(data, dims, mu, beta, Gamma_S, V_zk_x, S_bmz_x_T, S_bmz_T)]
sd_eps_T = [np.sqrt(np.diag(V)) for V in V_eps_T]
R_eps_T = [V/sd/sd[:,None] for V, sd in zip(V_eps_T, sd_eps_T)]
Phi_T = []
for i, (Rt, sd, d) \
in enumerate(zip(R_eps_T, sd_eps_T, dims)):
Tt, Pt = _temporal_est(Rt, offset_auto)
Gamma_T[i] = Tt / sd / sd[:,None]
Phi_T.append(Pt * sd * sd[:,None])
# M-step for mu
# mu = [np.mean(dat - b @ m, 0) for dat, m, b in zip(data, m_zk_x, beta)]
beta_diff = [1-np.sum(b1*b2,0)/np.sqrt(np.sum(b1**2,0)*np.sum(b2**2,0))
for b1, b2 in zip(beta, beta_pb)]
if np.max(beta_diff) < 1e-12:
break
# Normalize beta
V_zf = (np.diagonal(V_z_x,0,0,3)+np.diagonal(S_mz,0,0,3)).transpose(4,0,1,2,3)
B = np.concatenate([np.sqrt(np.sum(b**2,0))[:,None] for b in beta], -1)
Sigma = (V_zf * B[:,:,None,None,None] * B[:,None,None,:,None]) \
.reshape((num_f,len(dims)*num_time,len(dims)*num_time))
beta = [b / np.sqrt(np.sum(b**2, 0)) for b in beta]
# Switch forward
# Sigma, Phi_S = switch_forward(Sigma, Phi_S, Phi_T, beta)
# M-step for Pi
sig = np.sqrt(np.diagonal(Sigma,0,1,2))
Rho = Sigma/sig[:,None,:]/sig[:,:,None]
for i, R in enumerate(Rho):
P = Pi[i].copy()
core.glasso(P, np.linalg.inv(P), R, lambda_glasso,
ths_glasso, max_glasso, ths_lasso, max_lasso)
Pi[i] = P
Omega = Pi/sig[:,None,:]/sig[:,:,None]
# Switch back
# Sigma, Phi_S = switch_back(Sigma, Phi_S, Theta_T, beta)
# Omega = np.linalg.inv(Sigma)
# Theta_S = [np.linalg.inv(P) for P in Phi_S]
# calculate cost
cost = (- log_like(data, {'Omega': Omega, 'beta': beta, 'mu': mu,
'Gamma_S': Gamma_S, 'Gamma_T': Gamma_T}, lambda_aug) / num_trial
+ np.sum(np.where(lambda_glasso*np.abs(Pi)>=0,
lambda_glasso*np.abs(Pi), np.inf)))
change_Sigma = np.max(np.abs(Rho - Rho_ldfa))
change_beta = np.max([1- | np.sum(b1*b2,0) | numpy.sum |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for basic component wise operations using a GPU device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker_v2
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.gen_array_ops import broadcast_gradient_args
from tensorflow.python.platform import test
class GPUBinaryOpsTest(test.TestCase):
def _compareGPU(self, x, y, np_func, tf_func):
with self.cached_session():
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_gpu = self.evaluate(out)
with self.cached_session(use_gpu=False):
inx = ops.convert_to_tensor(x)
iny = ops.convert_to_tensor(y)
out = tf_func(inx, iny)
tf_cpu = self.evaluate(out)
self.assertAllClose(tf_cpu, tf_gpu)
def testFloatBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
self._compareGPU(x, y + 0.1, np.floor_divide, math_ops.floordiv)
self._compareGPU(x, y, np.power, math_ops.pow)
def testFloatWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float32)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float32) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleBasic(self):
x = np.linspace(-5, 20, 15).reshape(1, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
y = np.linspace(20, -5, 15).reshape(1, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
def testDoubleWithBCast(self):
x = np.linspace(-5, 20, 15).reshape(3, 5).astype(np.float64)
y = np.linspace(20, -5, 30).reshape(2, 3, 5).astype(np.float64) # pylint: disable=too-many-function-args
self._compareGPU(x, y, np.add, math_ops.add)
self._compareGPU(x, y, np.subtract, math_ops.subtract)
self._compareGPU(x, y, np.multiply, math_ops.multiply)
self._compareGPU(x, y + 0.1, np.true_divide, math_ops.truediv)
class MathBuiltinUnaryTest(test.TestCase):
def _compare(self, x, np_func, tf_func, use_gpu):
np_out = np_func(x)
with self.cached_session(use_gpu=use_gpu) as sess:
inx = ops.convert_to_tensor(x)
ofunc = tf_func(inx)
tf_out = self.evaluate(ofunc)
self.assertAllClose(np_out, tf_out)
def _inv(self, x):
return 1.0 / x
def _rsqrt(self, x):
return self._inv(np.sqrt(x))
def _testDtype(self, dtype, use_gpu):
data = (np.arange(-3, 3) / 4.).reshape([1, 3, 2]).astype(dtype)
data_gt_1 = data + 2 # for x > 1
self._compare(data, np.abs, math_ops.abs, use_gpu)
self._compare(data, np.arccos, math_ops.acos, use_gpu)
self._compare(data, np.arcsin, math_ops.asin, use_gpu)
self._compare(data, np.arcsinh, math_ops.asinh, use_gpu)
self._compare(data_gt_1, np.arccosh, math_ops.acosh, use_gpu)
self._compare(data, np.arctan, math_ops.atan, use_gpu)
self._compare(data, np.ceil, math_ops.ceil, use_gpu)
self._compare(data, np.cos, math_ops.cos, use_gpu)
self._compare(data, np.cosh, math_ops.cosh, use_gpu)
self._compare(data, np.exp, math_ops.exp, use_gpu)
self._compare(data, np.floor, math_ops.floor, use_gpu)
self._compare(data, np.log, math_ops.log, use_gpu)
self._compare(data, np.log1p, math_ops.log1p, use_gpu)
self._compare(data, np.negative, math_ops.negative, use_gpu)
self._compare(data, self._rsqrt, math_ops.rsqrt, use_gpu)
self._compare(data, np.sin, math_ops.sin, use_gpu)
self._compare(data, np.sinh, math_ops.sinh, use_gpu)
self._compare(data, np.sqrt, math_ops.sqrt, use_gpu)
self._compare(data, np.square, math_ops.square, use_gpu)
self._compare(data, np.tan, math_ops.tan, use_gpu)
self._compare(data, np.tanh, math_ops.tanh, use_gpu)
self._compare(data, np.arctanh, math_ops.atanh, use_gpu)
def testTypes(self):
for dtype in [np.float32]:
self._testDtype(dtype, use_gpu=True)
def testFloorDivide(self):
x = (1 + np.linspace(0, 5, | np.prod([1, 3, 2]) | numpy.prod |
"""
A billboarded particle layer with texture/shader support
"""
import numpy as np
from abc import ABC
from collections.abc import Iterable
from napari.layers import Surface
from napari.layers.utils.layer_utils import calc_data_range
from vispy.visuals.filters import Filter
from vispy.visuals.shaders import Function, Varying
from vispy.gloo import Texture2D, VertexBuffer
from .utils import generate_billboards_2d
from .filters import ShaderFilter, _shader_functions
class BillboardsFilter(Filter):
""" Billboard geometry filter (transforms vertices to always face camera)
"""
def __init__(self, antialias=0):
vmat_inv = Function("""
mat2 inverse(mat2 m) {
return mat2(m[1][1],-m[0][1],-m[1][0], m[0][0]) / (m[0][0]*m[1][1] - m[0][1]*m[1][0]);
}
""")
vfunc = Function("""
varying float v_z_center;
varying float v_scale_intensity;
varying mat2 covariance_inv;
void apply(){
// original world coordinates of the (constant) particle squad, e.g. [5,5] for size 5
vec4 pos = $transform_inv(gl_Position);
pos.z *= pos.w;
vec2 tex = $texcoords;
mat4 cov = mat4(1.0);
cov[0][0] = sqrt($sigmas[0]);
cov[1][1] = sqrt($sigmas[1]);
cov[2][2] = sqrt($sigmas[2]);
// get new inverse covariance matrix (for rotating a gaussian)
vec4 ex = vec4(1,0,0,0);
vec4 ey = vec4(0,1,0,0);
vec4 ez = vec4(0,0,1,0);
vec3 ex2 = $camera(cov*$camera_inv(ex)).xyz;
vec3 ey2 = $camera(cov*$camera_inv(ey)).xyz;
vec3 ez2 = $camera(cov*$camera_inv(ez)).xyz;
mat3 Rmat = mat3(ex2, ey2, ez2);
covariance_inv = mat2(transpose(Rmat)*mat3(cov)*Rmat);
covariance_inv = $inverse(covariance_inv);
// get first and second column of view (which is the inverse of the camera)
vec3 camera_right = $camera_inv(vec4(1,0,0,0)).xyz;
vec3 camera_up = $camera_inv(vec4(0,1,0,0)).xyz;
// when particles become too small, lock texture size and apply antialiasing (only used when antialias=1)
// decrease this value to increase antialiasing
//float dist_cutoff = .2 * max(abs(pos.x), abs(pos.y));
// increase this value to increase antialiasing
float dist_cutoff = $antialias;
float len = length(camera_right);
//camera_right = normalize(camera_right);
//camera_up = normalize(camera_up);
camera_right = camera_right/len;
camera_up = camera_up/len;
vec4 p1 = $transform(vec4($vertex_center.xyz + camera_right*pos.x + camera_up*pos.y, 1.));
vec4 p2 = $transform(vec4($vertex_center,1));
float dist = length(p1.xy/p1.w-p2.xy/p2.w);
// if antialias and far away zoomed out, keep sprite size constant and shrink texture...
// else adjust sprite size
if (($antialias>0) && (dist<dist_cutoff)) {
float scale = dist_cutoff/dist;
//tex = .5+(tex-.5)*clamp(scale,1,10);
tex = .5+(tex-.5);
camera_right = camera_right*scale;
camera_up = camera_up*scale;
v_scale_intensity = scale;
}
vec3 pos_real = $vertex_center.xyz + camera_right*pos.x + camera_up*pos.y;
gl_Position = $transform(vec4(pos_real, 1.));
vec4 center = $transform(vec4($vertex_center,1));
v_z_center = center.z/center.w;
$v_texcoords = tex;
}
""")
ffunc = Function("""
varying float v_scale_intensity;
varying float v_z_center;
void apply() {
gl_FragDepth = v_z_center;
$texcoords;
}
""")
self._texcoord_varying = Varying('v_texcoord', 'vec2')
vfunc['inverse'] = vmat_inv
vfunc['v_texcoords'] = self._texcoord_varying
ffunc['texcoords'] = self._texcoord_varying
self._texcoords_buffer = VertexBuffer(
np.zeros((0, 2), dtype=np.float32)
)
vfunc['texcoords'] = self._texcoords_buffer
vfunc['antialias'] = float(antialias)
self._centercoords_buffer = VertexBuffer(
np.zeros((0, 3), dtype=np.float32))
self._sigmas_buffer = VertexBuffer(
np.zeros((0, 3), dtype=np.float32))
vfunc['vertex_center'] = self._centercoords_buffer
vfunc['sigmas'] = self._sigmas_buffer
super().__init__(vcode=vfunc, vhook='post',fcode=ffunc, fhook='post')
@property
def centercoords(self):
"""The vertex center coordinates as an (N, 3) array of floats."""
return self._centercoords
@centercoords.setter
def centercoords(self, centercoords):
self._centercoords = centercoords
self._update_coords_buffer(centercoords)
def _update_coords_buffer(self, centercoords):
if self._attached and self._visual is not None:
self._centercoords_buffer.set_data(centercoords[:,::-1], convert=True)
@property
def sigmas(self):
"""The vertex center coordinates as an (N, 3) array of floats."""
return self._sigmas
@centercoords.setter
def sigmas(self, sigmas):
self._sigmas = sigmas
self._update_sigmas_buffer(sigmas)
def _update_sigmas_buffer(self, sigmas):
if self._attached and self._visual is not None:
self._sigmas_buffer.set_data(sigmas[:,::-1], convert=True)
@property
def texcoords(self):
"""The texture coordinates as an (N, 2) array of floats."""
return self._texcoords
@texcoords.setter
def texcoords(self, texcoords):
self._texcoords = texcoords
self._update_texcoords_buffer(texcoords)
def _update_texcoords_buffer(self, texcoords):
if self._attached or self._visual is not None:
self._texcoords_buffer.set_data(texcoords[:,::-1], convert=True)
def _attach(self, visual):
# the full projection model view
self.vshader['transform'] = visual.transforms.get_transform('visual', 'render')
# the inverse of it
self.vshader['transform_inv'] = visual.transforms.get_transform('render', 'visual')
# the modelview
self.vshader['camera_inv'] = visual.transforms.get_transform('document', 'scene')
# inverse of it
self.vshader['camera'] = visual.transforms.get_transform('scene', 'document')
super()._attach(visual)
class Particles(Surface):
""" Billboarded particle layer that renders camera facing quads of given size
Can be combined with other (e.g. texture) filter to create particle systems etc
"""
def __init__(self, coords, size=10, sigmas=(1,1,1), values=1, filter=ShaderFilter('gaussian'), antialias=False, **kwargs):
kwargs.setdefault('shading', 'none')
kwargs.setdefault('blending', 'additive')
coords = np.asarray(coords)
sigmas = np.asarray(sigmas, dtype=np.float32)
if np.isscalar(values):
values = values * np.ones(len(coords))
values = np.broadcast_to(values, len(coords))
size = np.broadcast_to(size, len(coords))
sigmas = np.broadcast_to(sigmas, (len(coords),3))
if not coords.ndim == 2 :
raise ValueError(f'coords should be of shape (M,D)')
if not len(size)==len(coords)==len(sigmas):
raise ValueError()
# add dummy z if 2d coords
if coords.shape[1] == 2:
coords = np.concatenate([np.zeros((len(coords),1)), coords], axis=-1)
assert coords.shape[-1]==sigmas.shape[-1]==3
vertices, faces, texcoords = generate_billboards_2d(coords, size=size)
# repeat values for each 4 vertices
centercoords = np.repeat(coords, 4, axis=0)
sigmas = np.repeat(sigmas, 4, axis=0)
values = np.repeat(values, 4, axis=0)
self._coords = coords
self._centercoords = centercoords
self._sigmas = sigmas
self._size = size
self._texcoords = texcoords
self._billboard_filter = BillboardsFilter(antialias=antialias)
self.filter = filter
self._viewer = None
super().__init__((vertices, faces, values), **kwargs)
def _set_view_slice(self):
"""Sets the view given the indices to slice with."""
super()._set_view_slice()
self._update_billboard_filter()
def _update_billboard_filter(self):
faces = self._view_faces.flatten()
if self._billboard_filter._attached and len(faces)>0:
self._billboard_filter.texcoords = self._texcoords[faces]
self._billboard_filter.centercoords = self._centercoords[faces][:,-3:]
self._billboard_filter.sigmas = self._sigmas[faces][:,-3:]
@property
def filter(self):
"""The filter property."""
return self._filter
@filter.setter
def filter(self, value):
if value is None:
value = ()
elif not isinstance(value, Iterable):
value = (value,)
self._filter = tuple(value)
@property
def _extent_data(self) -> np.ndarray:
"""Extent of layer in data coordinates.
Returns
-------
extent_data : array, shape (2, D)
"""
if len(self._coords) == 0:
extrema = np.full((2, self.ndim), np.nan)
else:
size = np.repeat(self._size[:,np.newaxis], self.ndim, axis=-1)
size[:,:-2] *=0
maxs = np.max(self._coords+.5*size, axis=0)
mins = np.min(self._coords-.5*size, axis=0)
extrema = | np.vstack([mins, maxs]) | numpy.vstack |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
from scipy.signal import fftconvolve
from librosa.core import load
from librosa.core import stft
from librosa.core import istft
from librosa import amplitude_to_db, db_to_amplitude
from librosa.display import specshow
from librosa.output import write_wav
from scipy.signal import butter, lfilter, csd
from scipy.linalg import svd, pinv
from utils import apply_reverb, read_wav
import corpus
import mir_eval
from pypesq import pypesq
import pyroomacoustics as pra
import roomsimove_single
import olafilt
def load_file(files):
print(files[0])
print(files[1])
s1, _ = load(files[0], sr=16000)
s2, _ = load(files[1], sr=16000)
# s1, s2 = map(read_wav, files)
if len(s1) > len(s2):
pad_length = len(s1) - len(s2)
s2 = np.pad(s2, (0,pad_length), 'reflect')
else:
pad_length = len(s2) - len(s1)
s1 = | np.pad(s1, (0,pad_length), 'reflect') | numpy.pad |
# Copyright (c) 2020: <NAME> (<EMAIL>).
#
# This file is modified from <https://github.com/philip-huang/PIXOR>:
# Copyright (c) [2019] [<NAME>]
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Utils for PIXOR detection."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os
import pickle
import numpy as np
import tensorflow as tf
from shapely.geometry import Polygon
def get_eval_lists(images, latents, model_net, pixor_size=128):
gt_boxes_list = []
corners_list = []
scores_list = []
gt_pixor_state_list = []
recons_pixor_state_list = []
for i in range(len(latents)):
latent_eps = latents[i]
image_eps = images[i]
for j in range(len(latent_eps)):
latent = latent_eps[j]
dict_obs = image_eps[j]
dict_recons = model_net.reconstruct_pixor(latent)
vh_clas_recons = tf.squeeze(dict_recons['vh_clas'], axis=-1) # (B,H,W,1)
vh_regr_recons = dict_recons['vh_regr'] # (B,H,W,6)
decoded_reg_recons = decode_reg(vh_regr_recons, pixor_size) # (B,H,W,8)
pixor_state_recons = dict_recons['pixor_state']
vh_clas_obs = tf.squeeze(dict_obs['vh_clas'], axis=-1) # (B,H,W,1)
vh_regr_obs = dict_obs['vh_regr'] # (B,H,W,6)
decoded_reg_obs = decode_reg(vh_regr_obs, pixor_size) # (B,H,W,8)
pixor_state_obs = dict_obs['pixor_state']
B = vh_regr_obs.shape[0]
for k in range(B):
gt_boxes, _ = pixor_postprocess(vh_clas_obs[k], decoded_reg_obs[k])
corners, scores = pixor_postprocess(vh_clas_recons[k], decoded_reg_recons[k]) # (N,4,2)
gt_boxes_list.append(gt_boxes)
corners_list.append(corners)
scores_list.append(scores)
gt_pixor_state_list.append(pixor_state_obs[k])
recons_pixor_state_list.append(pixor_state_recons[k])
return gt_boxes_list, corners_list, scores_list, gt_pixor_state_list, recons_pixor_state_list
def get_eval_metrics(images, latents, model_net, pixor_size=128, ap_range=[0.3,0.5,0.7], filename = 'metrics'):
gt_boxes_list, corners_list, scores_list, gt_pixor_state_list, recons_pixor_state_list \
= get_eval_lists(images, latents, model_net, pixor_size=pixor_size)
N = len(gt_boxes_list)
APs = {}
precisions = {}
recalls = {}
for ap in ap_range:
gts = 0
preds = 0
all_scores = []
all_matches = []
for i in range(N):
gt_boxes = gt_boxes_list[i]
corners = corners_list[i]
scores = scores_list[i]
gt_match, pred_match, overlaps = compute_matches(gt_boxes,
corners, scores, iou_threshold=ap)
num_gt = gt_boxes.shape[0]
num_pred = len(scores)
gts += num_gt
preds += num_pred
all_scores.extend(list(scores))
all_matches.extend(list(pred_match))
all_scores = np.array(all_scores)
all_matches = np.array(all_matches)
sort_ids = np.argsort(all_scores)
all_matches = all_matches[sort_ids[::-1]]
if gts == 0 or preds == 0:
return
AP, precision, recall, p, r = compute_ap(all_matches, gts, preds)
print('ap', ap)
print('AP', AP)
print('precision', p)
print('recall', r)
APs[ap] = AP
precisions[ap] = precision
recalls[ap] = recall
results = {}
results['APs'] = APs
results['precisions'] = precisions
results['recalls'] = recalls
error_position = []
error_heading = []
error_velocity = []
for i in range(N):
gt_pixor_state = gt_pixor_state_list[i]
recons_pixor_state = recons_pixor_state_list[i]
x0, y0, cos0, sin0, v0 = gt_pixor_state
x, y, cos, sin, v = recons_pixor_state
error_position.append(np.sqrt((x-x0)**2+(y-y0)**2))
yaw0 = np.arctan2(sin0, cos0)
cos0 = np.cos(yaw0)
sin0 = np.sin(yaw0)
yaw = np.arctan2(sin, cos)
cos = np.cos(yaw)
sin = np.sin(yaw)
error_heading.append(np.arccos(np.dot([cos,sin],[cos0,sin0])))
error_velocity.append(abs(v-v0))
results['error_position'] = np.mean(error_position)
results['error_heading'] = np.mean(error_heading)
results['error_velocity'] = np.mean(error_velocity)
results['std_position'] = np.std(error_position)
results['std_heading'] = np.std(error_heading)
results['std_velocity'] = np.std(error_velocity)
if not os.path.exists('results'):
os.makedirs('results')
path = os.path.join('results', filename)
with open(path, 'wb') as f:
pickle.dump(results, f, protocol=pickle.HIGHEST_PROTOCOL)
def compute_matches(gt_boxes,
pred_boxes, pred_scores,
iou_threshold=0.5, score_threshold=0.0):
"""Finds matches between prediction and ground truth instances.
Returns:
gt_match: 1-D array. For each GT box it has the index of the matched
predicted box.
pred_match: 1-D array. For each predicted box, it has the index of
the matched ground truth box.
overlaps: [pred_boxes, gt_boxes] IoU overlaps.
"""
if len(pred_scores) == 0:
return -1 * np.ones([gt_boxes.shape[0]]), np.array([]), np.array([])
gt_class_ids = np.ones(len(gt_boxes), dtype=int)
pred_class_ids = np.ones(len(pred_scores), dtype=int)
# Sort predictions by score from high to low
indices = np.argsort(pred_scores)[::-1]
pred_boxes = pred_boxes[indices]
pred_class_ids = pred_class_ids[indices]
pred_scores = pred_scores[indices]
# Compute IoU overlaps [pred_boxes, gt_boxes]
overlaps = compute_overlaps(pred_boxes, gt_boxes)
# Loop through predictions and find matching ground truth boxes
match_count = 0
pred_match = -1 * np.ones([pred_boxes.shape[0]])
gt_match = -1 * np.ones([gt_boxes.shape[0]])
for i in range(len(pred_boxes)):
# Find best matching ground truth box
# 1. Sort matches by score
sorted_ixs = np.argsort(overlaps[i])[::-1]
# 2. Remove low scores
low_score_idx = np.where(overlaps[i, sorted_ixs] < score_threshold)[0]
if low_score_idx.size > 0:
sorted_ixs = sorted_ixs[:low_score_idx[0]]
# 3. Find the match
for j in sorted_ixs:
# If ground truth box is already matched, go to next one
if gt_match[j] > 0:
continue
# If we reach IoU smaller than the threshold, end the loop
iou = overlaps[i, j]
if iou < iou_threshold:
break
# Do we have a match?
if pred_class_ids[i] == gt_class_ids[j]:
match_count += 1
gt_match[j] = i
pred_match[i] = j
break
return gt_match, pred_match, overlaps
def compute_overlaps(boxes1, boxes2):
"""Computes IoU overlaps between two sets of boxes.
boxes1, boxes2: a np array of boxes
For better performance, pass the largest set first and the smaller second.
:return: a matrix of overlaps [boxes1 count, boxes2 count]
"""
# Compute overlaps to generate matrix [boxes1 count, boxes2 count]
# Each cell contains the IoU value.
boxes1 = convert_format(boxes1)
boxes2 = convert_format(boxes2)
overlaps = np.zeros((len(boxes1), len(boxes2)))
for i in range(overlaps.shape[1]):
box2 = boxes2[i]
overlaps[:, i] = compute_iou(box2, boxes1)
return overlaps
def compute_ap(pred_match, num_gt, num_pred):
assert num_gt != 0
assert num_pred != 0
tp = (pred_match > -1).sum()
# Compute precision and recall at each prediction box step
precisions = np.cumsum(pred_match > -1) / (np.arange(num_pred) + 1)
recalls = np.cumsum(pred_match > -1).astype(np.float32) / num_gt
# Ensure precision values decrease but don't increase. This way, the
# precision value at each recall threshold is the maximum it can be
# for all following recall thresholds, as specified by the VOC paper.
for i in range(len(precisions) - 2, -1, -1):
precisions[i] = | np.maximum(precisions[i], precisions[i + 1]) | numpy.maximum |
"""
Classes to simplify and standardize the process of drawing samples from the posterior distribution in
Bayesian inference problems.
"""
import numpy as np
import scipy as sp
class Quad_Sampler(object):
"""
Class for drawing samples from an arbitrary one-dimensional probability distribution using numerical integration
and interpolation. In general this will be superior to more sophisticated sampling methods for 1D problems.
Assumes that priors are uniform.
Args:
ln_likelihood: Function which takes the independent variable x as its first argument and returns the log
of the likelihood function, p(d|x,I), up to a constant. May take other *args or **kwargs.
priors: List-type of the form [a,b], where a and b define the upper and lower bounds of the uniform
prior p(x|I).
optioinal:
vect: (bool) Set to true if the log-likelihood accepts a vectorized input.
"""
def __init__(self, ln_likelihood, priors, vect=False):
self._ln_likelihood = ln_likelihood
self._a, self._b = priors
self._vect = vect
# Default values
self.ln_Z = np.nan
self.mean = np.nan
self.std = np.nan
def fit(self, n_pts=200, args=(), **kwargs):
"""
Perform the fit.
Optional:
n_pts: (int) Number of evenly-spaced points over which to compute the probability.
args: (tuple) All additional arguments to be passed on the the likelihood function.
**kwargs: All other keywords are passed on the the likelihood function.
"""
# Evaluate the pdf
self.xs = np.linspace(self._a, self._b, num=n_pts)
if self._vect:
self.ln_pdf = self._ln_likelihood(self.xs, *args, **kwargs)
else:
self.ln_pdf = np.array([self._ln_likelihood(x, *args, **kwargs) for x in self.xs])
# Rescale with the maxima
ln_C = np.amax(self.ln_pdf)
pdf_scaled = np.exp(self.ln_pdf - ln_C)
# Compute the evidence and rescale
Z_scaled = np.trapz(pdf_scaled, x=self.xs)
self.ln_Z = np.log(Z_scaled) + ln_C
self.pdf = pdf_scaled / Z_scaled
self.cdf = sp.integrate.cumtrapz(self.pdf, x=self.xs, initial=0)
# Estimate summary statistics - assuming a normal distribution
samples = self.get_samples(1000)
self.mean = np.mean(samples)
self.std = np.std(samples)
def get_samples(self, n_samples):
"""
"""
u_samp = np.random.rand(n_samples)
return np.interp(u_samp, self.cdf, self.xs)
class Quad_Sampler_ND(object):
"""
Class for drawing samples from an arbitrary N-dimensional probability distribution using numerical integration
and interpolation. This can be useful for problems with a low number of dimensions (~3) for which the likelihood
function can be computed quickly (<< 1 second).
Assumes that priors are uniform. Currently does not support vectorized likelihoods.
Args:
ln_likelihood: Function which takes the independent variables (x1, x2, ..., xN) as its first argument and returns
the log of the likelihood function, p(d|x1,...,I), up to a constant. May take other *args or **kwargs.
priors: List of tuples, of the form [(a1,b1), (a2,b2), ..., (aN,bN)] where a and b define the upper and lower bounds
of the uniform prior p(x1,...|I).
optioinal:
vect: (bool) Set to true if the log-likelihood accepts a vectorized input.
"""
def __init__(self, ln_likelihood, ndim, priors):
self._ln_likelihood = ln_likelihood
self.ndim = ndim
self._a = | np.zeros(self.ndim) | numpy.zeros |
import os
import argparse
from itertools import chain
from operator import itemgetter
import numpy as np
from keras import backend as K
from keras.preprocessing import sequence
from keras.models import load_model, Model
from keras.layers import Dense
from keras.optimizers import SGD, Adam
import tensorflow as tf
from sklearn.model_selection import LeaveOneOut
from sklearn.metrics import confusion_matrix, classification_report
from nltk.corpus import stopwords
from evidencedetection.reader import AnnotationReader
from evidencedetection.vectorizer import EmbeddingVectorizer
from evidencedetection.analyzer import Analyzer
from evidencedetection.models.neuralnetwork import BiLSTM
def parse_arguments():
parser = argparse.ArgumentParser("Tests an argument mining model on the evidence annotations.")
parser.add_argument("--annotations", type=str, help="The path to the user annotations.")
parser.add_argument("--test", type=str, help="The testing file.")
parser.add_argument("--user", type=str, help="The user for which to test the model.")
parser.add_argument("--embeddings", type=str, help="The path to the embeddings folder")
parser.add_argument("--epochs", type=int, default=0, help="The number of epochs to train for")
parser.add_argument("--initial", type=int, default=0, help="""The number of initial epochs in which all, except the new classifier layer are frozen. This number will be added to the other training epochs.""")
parser.add_argument("--lr", type=float, default=0.001, help="The learning rate for the combined learning phase")
parser.add_argument("--ilr", type=float, default=0.01, help="The learning for the initial learning phase.")
parser.add_argument("--suffix", type=str, default="AM", help="The suffix to use, e.g. AM or ED")
parser.add_argument("--modeldir", type=str, default="../models/sentential-argument-mining", help="The model directory. The directory name is also the specific model name + .h5")
parser.add_argument("--maxlen", type=int, default=189, help="The maximum length of the input sentences")
parser.add_argument("--topicSpecific", action="store_true", help="Specifies whether or not the model to load will be topic specific")
parser.add_argument("--seed", type=int, default=0, help="The random seed to use for training.")
return parser.parse_args()
class Experiment:
def __init__(self, vectorizer, epochs, initial, lr, ilr, model_name, max_length, seed):
self.max_length = max_length
self.vectorizer = vectorizer
self.epochs = epochs
self.initial = initial
self.lr = lr
self.ilr = ilr
self.model_name = model_name
self.seed = seed
def run_experiment(self, train, test, old_model_name):
loo = LeaveOneOut()
all_preds = list()
all_targets = list()
training_data = list()
for num_training_files, training_file in enumerate(reversed(train)):
new_training_sentences = training_file[1]
training_data.append(new_training_sentences)
filename = test[0][0]
print("Testing on: ", filename)
test_data = np.array(test[0][1])
flat_training_data = np.array(list(chain(*training_data)))
# labels, preds = self.evaluate(flat_training_data, test_data, filename, old_model_name, num_training_files)
self.evaluate(flat_training_data, test_data, filename, old_model_name, num_training_files)
# all_preds.extend(preds)
# all_targets.extend(labels)
# print(classification_report(all_targets, all_preds, target_names=["no-annotation", "Evidence"]))
def evaluate(self, flat_training_data, test_data, filename, old_model_name, num_training_files):
training_sentences = flat_training_data[:, 0]
training_labels = flat_training_data[:, 1] == "True"
train_data, _ = self.vectorizer.prepare_data(training_sentences, [])
padded_train_data = sequence.pad_sequences(train_data, maxlen=self.max_length)
test_labels = test_data[:, 1] == "True"
test_sentences = test_data[:, 0]
padded_test_sentences = vectorizer.sentences_to_padded_indices(test_sentences, self.max_length)
analyzer = Analyzer({True: "Evidence", False: "no-annotation"},
self.model_name.format(self.epochs,
self.initial,
self.lr,
self.ilr,
self.seed))
preds = self._run_iteration(padded_train_data,
training_labels,
padded_test_sentences,
self.max_length,
filename,
old_model_name)
predictions = np.argmax(preds, axis=1) == 1
results = analyzer.analyze(test_sentences, test_labels, predictions)
print("Saving predictions for num_training files {0}".format(num_training_files))
analyzer.save_prediction_file(results, filename, num_training_files)
# return test_labels.tolist(), predictions.tolist()
def _run_iteration(self, padded_train_data, train_labels, padded_test_sentences, max_length, filename, old_model_name):
num_true_labels = np.count_nonzero(train_labels == True)
num_false_labels = np.count_nonzero(train_labels == False)
class_weight = {0: len(train_labels) - num_false_labels,
1: len(train_labels) - num_true_labels}
# preparing model for fine-tuning
tf.set_random_seed(self.seed)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
old_model = load_model(old_model_name)
if self.initial > 0:
# replace old classifier layer with new one in case we want some initial training
old_model.layers.pop()
last_hidden = old_model.layers[-1].output
new_output = Dense(units=2, activation="softmax", name="new_dense")(last_hidden)
model = Model(old_model.input, new_output)
print(model.summary())
else: # no training, therefore just testing the old model
model = old_model
label_array = np.array(train_labels)
two_d_train_labels = np.zeros((label_array.shape[0], 2))
two_d_train_labels[ | np.where(label_array==0) | numpy.where |
import numpy as np
from toolkitJ import cell2dmatlab_jsp
import GVal
def fScore(beta, P, R):
F = (1 + beta**2) * (P * R) / ((beta**2) * P + R)
return F
def dataSetPreparation(feature_index, X_train_raw, Y_train_raw, X_valid_raw, Y_valid_raw):
X = X_train_raw[:, feature_index]
y = Y_train_raw[:, 0]
X_valid = X_valid_raw[:, feature_index]
y_valid = Y_valid_raw[:, 0]
index_no = cell2dmatlab_jsp([2, 4], 2, [])
for i in range(4):
index_no[0][i] = np.nonzero(y == i)[0]
index_no[1][i] = | np.nonzero(y_valid == i) | numpy.nonzero |
"""
This module is an example of a barebones function plugin for napari
It implements the ``napari_experimental_provide_function`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from __future__ import print_function, division
from typing import TYPE_CHECKING, DefaultDict
from unicodedata import name
import six
# import modules
import sys # input, output, errors, and files
import os # interacting with file systems
import time # getting time
import datetime
import inspect # get passed parameters
import yaml # parameter importing
import json # for importing tiff metadata
try:
import cPickle as pickle # loading and saving python objects
except:
import pickle
import numpy as np # numbers package
import struct # for interpretting strings as binary data
import re # regular expressions
from pprint import pprint # for human readable file output
import traceback # for error messaging
import warnings # error messaging
import copy # not sure this is needed
import h5py # working with HDF5 files
import pandas as pd
import networkx as nx
import collections
# scipy and image analysis
from scipy.signal import find_peaks_cwt # used in channel finding
from scipy.optimize import curve_fit # fitting ring profile
from scipy.optimize import leastsq # fitting 2d gaussian
from scipy import ndimage as ndi # labeling and distance transform
from skimage import io
from skimage import segmentation # used in make_masks and segmentation
from skimage.transform import rotate
from skimage.feature import match_template # used to align images
from skimage.feature import blob_log # used for foci finding
from skimage.filters import threshold_otsu, median # segmentation
from skimage import filters
from skimage import morphology # many functions is segmentation used from this
from skimage.measure import regionprops # used for creating lineages
from skimage.measure import profile_line # used for ring an nucleoid analysis
from skimage import util, measure, transform, feature
import tifffile as tiff
from sklearn import metrics
# deep learning
import tensorflow as tf # ignore message about how tf was compiled
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import models
from tensorflow.keras import losses
from tensorflow.keras import utils
from tensorflow.keras import backend as K
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # supress warnings
# Parralelization modules
import multiprocessing
from multiprocessing import Pool
# Plotting for debug
import matplotlib as mpl
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 12}
mpl.rc('font', **font)
mpl.rcParams['pdf.fonttype'] = 42
from matplotlib.patches import Ellipse
from pathlib import Path
import time
import matplotlib.pyplot as plt
# import modules
import os
import glob
import re
import numpy as np
import tifffile as tiff
import pims_nd2
from skimage import io, measure, morphology
import tifffile as tiff
from scipy import stats
from pprint import pprint # for human readable file output
import multiprocessing
from multiprocessing import Pool
import numpy as np
import warnings
from tensorflow.python.keras import models
from enum import Enum
import numpy as np
import multiprocessing
from multiprocessing import Pool
import os
from napari_plugin_engine import napari_hook_implementation
from skimage.filters import threshold_otsu # segmentation
from skimage import morphology # many functions is segmentation used from this
from skimage import segmentation # used in make_masks and segmentation
from scipy import ndimage as ndi # labeling and distance transform
import matplotlib.gridspec as gridspec
from skimage.exposure import rescale_intensity # for displaying in GUI
from skimage import io, morphology, segmentation
# import mm3_helpers as mm3
import napari
# This is the actual plugin function, where we export our function
# (The functions themselves are defined below)
@napari_hook_implementation
def napari_experimental_provide_function():
# we can return a single function
# or a tuple of (function, magicgui_options)
# or a list of multiple functions with or without options, as shown here:
#return [Segment, threshold, image_arithmetic]
return [Compile, ChannelPicker, Segment]
# 1. First example, a simple function that thresholds an image and creates a labels layer
def threshold(data: "napari.types.ImageData", threshold: int) -> "napari.types.LabelsData":
"""Threshold an image and return a mask."""
return (data > threshold).astype(int)
# print a warning
def warning(*objs):
print(time.strftime("%H:%M:%S WARNING:", time.localtime()), *objs, file=sys.stderr)
# print information
def information(*objs):
print(time.strftime("%H:%M:%S", time.localtime()), *objs, file=sys.stdout)
def julian_day_number():
"""
Need this to solve a bug in pims_nd2.nd2reader.ND2_Reader instance initialization.
The bug is in /usr/local/lib/python2.7/site-packages/pims_nd2/ND2SDK.py in function `jdn_to_datetime_local`, when the year number in the metadata (self._lim_metadata_desc) is not in the correct range. This causes a problem when calling self.metadata.
https://en.wikipedia.org/wiki/Julian_day
"""
dt=datetime.datetime.now()
tt=dt.timetuple()
jdn=(1461.*(tt.tm_year + 4800. + (tt.tm_mon - 14.)/12))/4. + (367.*(tt.tm_mon - 2. - 12.*((tt.tm_mon -14.)/12)))/12. - (3.*((tt.tm_year + 4900. + (tt.tm_mon - 14.)/12.)/100.))/4. + tt.tm_mday - 32075
return jdn
def get_plane(filepath):
pattern = r'(c\d+).tif'
res = re.search(pattern,filepath)
if (res != None):
return res.group(1)
else:
return None
def get_fov(filepath):
pattern = r'xy(\d+)\w*.tif'
res = re.search(pattern,filepath)
if (res != None):
return int(res.group(1))
else:
return None
def get_time(filepath):
pattern = r't(\d+)xy\w+.tif'
res = re.search(pattern,filepath)
if (res != None):
return np.int_(res.group(1))
else:
return None
# loads and image stack from TIFF or HDF5 using mm3 conventions
def load_stack(fov_id, peak_id, color='c1', image_return_number=None):
'''
Loads an image stack.
Supports reading TIFF stacks or HDF5 files.
Parameters
----------
fov_id : int
The FOV id
peak_id : int
The peak (channel) id. Dummy None value incase color='empty'
color : str
The image stack type to return. Can be:
c1 : phase stack
cN : where n is an integer for arbitrary color channel
sub : subtracted images
seg : segmented images
empty : get the empty channel for this fov, slightly different
Returns
-------
image_stack : np.ndarray
The image stack through time. Shape is (t, y, x)
'''
# things are slightly different for empty channels
if 'empty' in color:
if params['output'] == 'TIFF':
img_filename = params['experiment_name'] + '_xy%03d_%s.tif' % (fov_id, color)
with tiff.TiffFile(os.path.join(params['empty_dir'],img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r') as h5f:
img_stack = h5f[color][:]
return img_stack
# load normal images for either TIFF or HDF5
if params['output'] == 'TIFF':
if color[0] == 'c':
img_dir = params['chnl_dir']
elif 'sub' in color:
img_dir = params['sub_dir']
elif 'foci' in color:
img_dir = params['foci_seg_dir']
elif 'seg' in color:
img_dir = params['seg_dir']
img_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, color)
with tiff.TiffFile(os.path.join(img_dir, img_filename)) as tif:
img_stack = tif.asarray()
if params['output'] == 'HDF5':
with h5py.File(os.path.join(params['hdf5_dir'], 'xy%03d.hdf5' % fov_id), 'r') as h5f:
# normal naming
# need to use [:] to get a copy, else it references the closed hdf5 dataset
img_stack = h5f['channel_%04d/p%04d_%s' % (peak_id, peak_id, color)][:]
return img_stack
# load the time table and add it to the global params
def load_time_table():
'''Add the time table dictionary to the params global dictionary.
This is so it can be used during Cell creation.
'''
# try first for yaml, then for pkl
try:
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'rb') as time_table_file:
params['time_table'] = yaml.safe_load(time_table_file)
except:
with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'rb') as time_table_file:
params['time_table'] = pickle.load(time_table_file)
return
# function for loading the channel masks
def load_channel_masks():
'''Load channel masks dictionary. Should be .yaml but try pickle too.
'''
information("Loading channel masks dictionary.")
# try loading from .yaml before .pkl
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.yaml'))
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'r') as cmask_file:
channel_masks = yaml.safe_load(cmask_file)
except:
warning('Could not load channel masks dictionary from .yaml.')
try:
information('Path:', os.path.join(params['ana_dir'], 'channel_masks.pkl'))
with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'rb') as cmask_file:
channel_masks = pickle.load(cmask_file)
except ValueError:
warning('Could not load channel masks dictionary from .pkl.')
return channel_masks
# function for loading the specs file
def load_specs():
'''Load specs file which indicates which channels should be analyzed, used as empties, or ignored.'''
try:
with open(os.path.join(params['ana_dir'], 'specs.yaml'), 'r') as specs_file:
specs = yaml.safe_load(specs_file)
except:
try:
with open(os.path.join(params['ana_dir'], 'specs.pkl'), 'rb') as specs_file:
specs = pickle.load(specs_file)
except ValueError:
warning('Could not load specs file.')
return specs
### functions for dealing with raw TIFF images
# get params is the major function which processes raw TIFF images
def get_initial_tif_params(image_filename):
'''This is a function for getting the information
out of an image for later trap identification, cropping, and aligning with Unet. It loads a tiff file and pulls out the image metadata.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
#print(image_data.shape) # uncomment for debug
#if len(image_data.shape) == 2:
# img_shape = [image_data.shape[0],image_data.shape[1]]
#else:
img_shape = [image_data.shape[1],image_data.shape[2]]
plane_list = [str(i+1) for i in range(image_data.shape[0])]
#print(plane_list) # uncomment for debug
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : plane_list, # list of plane names
'shape' : img_shape} # image shape x y in pixels
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# get params is the major function which processes raw TIFF images
def get_tif_params(image_filename, find_channels=True):
'''This is a damn important function for getting the information
out of an image. It loads a tiff file, pulls out the image data, and the metadata,
including the location of the channels if flagged.
it returns a dictionary like this for each image:
'filename': image_filename,
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jdn' : image_metadata['jdn'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'plane_names' : image_metadata['plane_names'] # list of plane names
'channels': cp_dict, # dictionary of channel locations, in the case of Unet-based channel segmentation, it's a dictionary of channel labels
Called by
mm3_Compile.py __main__
Calls
mm3.extract_metadata
mm3.find_channels
'''
try:
# open up file and get metadata
with tiff.TiffFile(os.path.join(params['TIFF_dir'],image_filename)) as tif:
image_data = tif.asarray()
if params['TIFF_source'] == 'elements':
image_metadata = get_tif_metadata_elements(tif)
elif params['TIFF_source'] == 'nd2ToTIFF':
image_metadata = get_tif_metadata_nd2ToTIFF(tif)
else:
image_metadata = get_tif_metadata_filename(tif)
# look for channels if flagged
if find_channels:
# fix the image orientation and get the number of planes
image_data = fix_orientation(image_data)
# if the image data has more than 1 plane restrict image_data to phase,
# which should have highest mean pixel data
if len(image_data.shape) > 2:
#ph_index = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
ph_index = int(params['phase_plane'][1:]) - 1
image_data = image_data[ph_index]
# get shape of single plane
img_shape = [image_data.shape[0], image_data.shape[1]]
# find channels on the processed image
chnl_loc_dict = find_channel_locs(image_data)
information('Analyzed %s' % image_filename)
# return the file name, the data for the channels in that image, and the metadata
return {'filepath': os.path.join(params['TIFF_dir'], image_filename),
'fov' : image_metadata['fov'], # fov id
't' : image_metadata['t'], # time point
'jd' : image_metadata['jd'], # absolute julian time
'x' : image_metadata['x'], # x position on stage [um]
'y' : image_metadata['y'], # y position on stage [um]
'planes' : image_metadata['planes'], # list of plane names
'shape' : img_shape, # image shape x y in pixels
# 'channels' : {1 : {'A' : 1, 'B' : 2}, 2 : {'C' : 3, 'D' : 4}}}
'channels' : chnl_loc_dict} # dictionary of channel locations
except:
warning('Failed get_params for ' + image_filename.split("/")[-1])
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print(traceback.print_tb(sys.exc_info()[2]))
return {'filepath': os.path.join(params['TIFF_dir'],image_filename), 'analyze_success': False}
# finds metdata in a tiff image which has been expoted with Nikon Elements.
def get_tif_metadata_elements(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by Nikon Elements as a stacked tiff, each for one tpoint.
tif is an opened tif file (using the package tifffile)
arguments:
fname (tifffile.TiffFile): TIFF file object from which data will be extracted
returns:
dictionary of values:
'jdn' (float)
'x' (float)
'y' (float)
'plane_names' (list of strings)
Called by
mm3.Compile
'''
# image Metadata
idata = { 'fov': -1,
't' : -1,
'jd': -1 * 0.0,
'x': -1 * 0.0,
'y': -1 * 0.0,
'planes': []}
# get the fov and t simply from the file name
idata['fov'] = int(tif.fname.split('xy')[1].split('.tif')[0])
idata['t'] = int(tif.fname.split('xy')[0].split('t')[-1])
# a page is plane, or stack, in the tiff. The other metdata is hidden down in there.
for page in tif:
for tag in page.tags.values():
#print("Checking tag",tag.name,tag.value)
t = tag.name, tag.value
t_string = u""
time_string = u""
# Interesting tag names: 65330, 65331 (binary data; good stuff), 65332
# we wnat to work with the tag of the name 65331
# if the tag name is not in the set of tegs we find interesting then skip this cycle of the loop
if tag.name not in ('65331', '65332', 'strip_byte_counts', 'image_width', 'orientation', 'compression', 'new_subfile_type', 'fill_order', 'max_sample_value', 'bits_per_sample', '65328', '65333'):
#print("*** " + tag.name)
#print(tag.value)
pass
#if tag.name == '65330':
# return tag.value
if tag.name in ('65331'):
# make info list a list of the tag values 0 to 65535 by zipoing up a paired list of two bytes, at two byte intervals i.e. fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b
# note that 0X100 is hex for 256
infolist = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
# get char values for each element in infolist
for c_entry in range(0, len(infolist)):
# the element corresponds to an ascii char for a letter or bracket (and a few other things)
if infolist[c_entry] < 127 and infolist[c_entry] > 64:
# add the letter to the unicode string t_string
t_string += chr(infolist[c_entry])
#elif infolist[c_entry] == 0:
# continue
else:
t_string += " "
# this block will find the dTimeAbsolute and print the subsequent integers
# index 170 is counting seconds, and rollover of index 170 leads to increment of index 171
# rollover of index 171 leads to increment of index 172
# get the position of the array by finding the index of the t_string at which dTimeAbsolute is listed not that 2*len(dTimeAbsolute)=26
#print(t_string)
arraypos = t_string.index("dXPos") * 2 + 16
xarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in xarr)
idata['x'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dYPos") * 2 + 16
yarr = tag.value[arraypos:arraypos+4]
b = ''.join(chr(i) for i in yarr)
idata['y'] = float(struct.unpack('<f', b)[0])
arraypos = t_string.index("dTimeAbsolute") * 2 + 26
shortarray = tag.value[arraypos+2:arraypos+10]
b = ''.join(chr(i) for i in shortarray)
idata['jd'] = float(struct.unpack('<d', b)[0])
# extract plane names
il = [a+b*0x100 for a,b in zip(tag.value[0::2], tag.value[1::2])]
li = [a+b*0x100 for a,b in zip(tag.value[1::2], tag.value[2::2])]
strings = list(zip(il, li))
allchars = ""
for c_entry in range(0, len(strings)):
if 31 < strings[c_entry][0] < 127:
allchars += chr(strings[c_entry][0])
elif 31 < strings[c_entry][1] < 127:
allchars += chr(strings[c_entry][1])
else:
allchars += " "
allchars = re.sub(' +',' ', allchars)
words = allchars.split(" ")
planes = []
for idx in [i for i, x in enumerate(words) if x == "sOpticalConfigName"]:
planes.append(words[idx+1])
idata['planes'] = planes
return idata
# finds metdata in a tiff image which has been expoted with nd2ToTIFF.py.
def get_tif_metadata_nd2ToTIFF(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This if tiff files as exported by the mm3 function mm3_nd2ToTIFF.py. All the metdata
is found in that script and saved in json format to the tiff, so it is simply extracted here
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
'planes' (list of strings)
Called by
mm3_Compile.get_tif_params
'''
# get the first page of the tiff and pull out image description
# this dictionary should be in the above form
for tag in tif.pages[0].tags:
if tag.name=="ImageDescription":
idata=tag.value
break
#print(idata)
idata = json.loads(idata)
return idata
# Finds metadata from the filename
def get_tif_metadata_filename(tif):
'''This function pulls out the metadata from a tif file and returns it as a dictionary.
This just gets the tiff metadata from the filename and is a backup option when the known format of the metadata is not known.
Paramters:
tif: TIFF file object from which data will be extracted
Returns:
dictionary of values:
'fov': int,
't' : int,
'jdn' (float)
'x' (float)
'y' (float)
Called by
mm3_Compile.get_tif_params
'''
idata = {'fov' : get_fov(tif.filename), # fov id
't' : get_time(tif.filename), # time point
'jd' : -1 * 0.0, # absolute julian time
'x' : -1 * 0.0, # x position on stage [um]
'y' : -1 * 0.0} # y position on stage [um]
return idata
# make a lookup time table for converting nominal time to elapsed time in seconds
def make_time_table(analyzed_imgs):
'''
Loops through the analyzed images and uses the jd time in the metadata to find the elapsed
time in seconds that each picture was taken. This is later used for more accurate elongation
rate calculation.
Parametrs
---------
analyzed_imgs : dict
The output of get_tif_params.
params['use_jd'] : boolean
If set to True, 'jd' time will be used from the image metadata to use to create time table. Otherwise the 't' index will be used, and the parameter 'seconds_per_time_index' will be used from the parameters.yaml file to convert to seconds.
Returns
-------
time_table : dict
Look up dictionary with keys for the FOV and then the time point.
'''
information('Making time table...')
# initialize
time_table = {}
first_time = float('inf')
# need to go through the data once to find the first time
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
if idata['jd'] < first_time:
first_time = idata['jd']
else:
if idata['t'] < first_time:
first_time = idata['t']
# init dictionary for specific times per FOV
if idata['fov'] not in time_table:
time_table[idata['fov']] = {}
for iname, idata in six.iteritems(analyzed_imgs):
if params['use_jd']:
# convert jd time to elapsed time in seconds
t_in_seconds = np.around((idata['jd'] - first_time) * 24*60*60, decimals=0).astype('uint32')
else:
t_in_seconds = np.around((idata['t'] - first_time) * params['moviemaker']['seconds_per_time_index'], decimals=0).astype('uint32')
time_table[int(idata['fov'])][int(idata['t'])] = int(t_in_seconds)
# save to .pkl. This pkl will be loaded into the params
# with open(os.path.join(params['ana_dir'], 'time_table.pkl'), 'wb') as time_table_file:
# pickle.dump(time_table, time_table_file, protocol=pickle.HIGHEST_PROTOCOL)
# with open(os.path.join(params['ana_dir'], 'time_table.txt'), 'w') as time_table_file:
# pprint(time_table, stream=time_table_file)
with open(os.path.join(params['ana_dir'], 'time_table.yaml'), 'w') as time_table_file:
yaml.dump(data=time_table, stream=time_table_file, default_flow_style=False, tags=None)
information('Time table saved.')
return time_table
# saves traps sliced via Unet
def save_tiffs(imgDict, analyzed_imgs, fov_id):
savePath = os.path.join(params['experiment_directory'],
params['analysis_directory'],
params['chnl_dir'])
img_names = [key for key in analyzed_imgs.keys()]
image_params = analyzed_imgs[img_names[0]]
for peak,img in six.iteritems(imgDict):
img = img.astype('uint16')
if not os.path.isdir(savePath):
os.mkdir(savePath)
for planeNumber in image_params['planes']:
channel_filename = os.path.join(savePath, params['experiment_name'] + '_xy{0:0=3}_p{1:0=4}_c{2}.tif'.format(fov_id, peak, planeNumber))
io.imsave(channel_filename, img[:,:,:,int(planeNumber)-1])
# slice_and_write cuts up the image files one at a time and writes them out to tiff stacks
def tiff_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images per channel.
Loads all tiffs from and FOV into memory and then slices all time points at once.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# go through list of images and get the file path
for n, image in enumerate(images_to_write):
# analyzed_imgs dictionary will be found in main scope. [0] is the key, [1] is jd
image_params = analyzed_imgs[image[0]]
information("Loading %s." % image_params['filepath'].split('/')[-1])
if n == 1:
# declare identification variables for saving using first image
fov_id = image_params['fov']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
# change axis so it goes Y, X, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# channel masks should only contain ints, but you can use this for hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different time stack for all colors
for color_index in range(channel_stack.shape[3]):
# this is the filename for the channel
# # chnl_dir and p will be looked for in the scope above (__main__)
channel_filename = os.path.join(params['chnl_dir'], params['experiment_name'] + '_xy%03d_p%04d_c%1d.tif' % (fov_id, peak, color_index+1))
# save stack
tiff.imsave(channel_filename, channel_stack[:,:,:,color_index], compress=4)
return
# saves traps sliced via Unet to an hdf5 file
def save_hdf5(imgDict, img_names, analyzed_imgs, fov_id, channel_masks):
'''Writes out 4D stacks of images to an HDF5 file.
Called by
mm3_Compile.py
'''
savePath = params['hdf5_dir']
if not os.path.isdir(savePath):
os.mkdir(savePath)
img_times = [analyzed_imgs[key]['t'] for key in img_names]
img_jds = [analyzed_imgs[key]['jd'] for key in img_names]
fov_ids = [analyzed_imgs[key]['fov'] for key in img_names]
# get image_params from first image from current fov
image_params = analyzed_imgs[img_names[0]]
# establish some variables for hdf5 attributes
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
fov_channel_masks = channel_masks[fov_id]
with h5py.File(os.path.join(savePath,'{}_xy{:0=2}.hdf5'.format(params['experiment_name'],fov_id)), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted([key for key in imgDict.keys()]))
# this is for things that change across time, for these create a dataset
img_names = np.asarray(img_names)
img_names = np.expand_dims(img_names, 1)
img_names = img_names.astype('S100')
h5ds = h5f.create_dataset(u'filenames', data=img_names,
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(img_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(img_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak,channel_stack in six.iteritems(imgDict):
channel_stack = channel_stack.astype('uint16')
# create group for this trap
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
channel_loc = fov_channel_masks[peak]
h5g.attrs.create('channel_loc', channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
# same thing as tiff_stack_slice_and_write but do it for hdf5
def hdf5_stack_slice_and_write(images_to_write, channel_masks, analyzed_imgs):
'''Writes out 4D stacks of TIFF images to an HDF5 file.
Called by
__main__
'''
# make an array of images and then concatenate them into one big stack
image_fov_stack = []
# make arrays for filenames and times
image_filenames = []
image_times = [] # times is still an integer but may be indexed arbitrarily
image_jds = [] # jds = julian dates (times)
# go through list of images, load and fix them, and create arrays of metadata
for n, image in enumerate(images_to_write):
image_name = image[0] # [0] is the key, [1] is jd
# analyzed_imgs dictionary will be found in main scope.
image_params = analyzed_imgs[image_name]
information("Loading %s." % image_params['filepath'].split('/')[-1])
# add information to metadata arrays
image_filenames.append(image_name)
image_times.append(image_params['t'])
image_jds.append(image_params['jd'])
# declare identification variables for saving using first image
if n == 1:
# same across fov
fov_id = image_params['fov']
x_loc = image_params['x']
y_loc = image_params['y']
image_shape = image_params['shape']
image_planes = image_params['planes']
# load the tif and store it in array
with tiff.TiffFile(image_params['filepath']) as tif:
image_data = tif.asarray()
# channel finding was also done on images after orientation was fixed
image_data = fix_orientation(image_data)
# add additional axis if the image is flat
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
#change axis so it goes X, Y, Plane
image_data = np.rollaxis(image_data, 0, 3)
# add it to list. The images should be in time order
image_fov_stack.append(image_data)
# concatenate the list into one big ass stack
image_fov_stack = np.stack(image_fov_stack, axis=0)
# create the HDF5 file for the FOV, first time this is being done.
with h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'w', libver='earliest') as h5f:
# add in metadata for this FOV
# these attributes should be common for all channel
h5f.attrs.create('fov_id', fov_id)
h5f.attrs.create('stage_x_loc', x_loc)
h5f.attrs.create('stage_y_loc', y_loc)
h5f.attrs.create('image_shape', image_shape)
# encoding is because HDF5 has problems with numpy unicode
h5f.attrs.create('planes', [plane.encode('utf8') for plane in image_planes])
h5f.attrs.create('peaks', sorted(channel_masks[fov_id].keys()))
# this is for things that change across time, for these create a dataset
h5ds = h5f.create_dataset(u'filenames', data=np.expand_dims(image_filenames, 1),
chunks=True, maxshape=(None, 1), dtype='S100',
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times', data=np.expand_dims(image_times, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
h5ds = h5f.create_dataset(u'times_jd', data=np.expand_dims(image_jds, 1),
chunks=True, maxshape=(None, 1),
compression="gzip", shuffle=True, fletcher32=True)
# cut out the channels as per channel masks for this fov
for peak, channel_loc in six.iteritems(channel_masks[fov_id]):
#information('Slicing and saving channel peak %s.' % channel_filename.split('/')[-1])
information('Slicing and saving channel peak %d.' % peak)
# create group for this channel
h5g = h5f.create_group('channel_%04d' % peak)
# add attribute for peak_id, channel location
h5g.attrs.create('peak_id', peak)
h5g.attrs.create('channel_loc', channel_loc)
# channel masks should only contain ints, but you can use this for a hard fix
# for i in range(len(channel_loc)):
# for j in range(len(channel_loc[i])):
# channel_loc[i][j] = int(channel_loc[i][j])
# slice out channel.
# The function should recognize the shape length as 4 and cut all time points
channel_stack = cut_slice(image_fov_stack, channel_loc)
# save a different dataset for all colors
for color_index in range(channel_stack.shape[3]):
# create the dataset for the image. Review docs for these options.
h5ds = h5g.create_dataset(u'p%04d_c%1d' % (peak, color_index+1),
data=channel_stack[:,:,:,color_index],
chunks=(1, channel_stack.shape[1], channel_stack.shape[2]),
maxshape=(None, channel_stack.shape[1], channel_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# h5ds.attrs.create('plane', image_planes[color_index].encode('utf8'))
# write the data even though we have more to write (free up memory)
h5f.flush()
return
def tileImage(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
print(img.shape, M, N, divisor, subImageNumber)
ans = ([img[x:x+M,y:y+N] for x in range(0,img.shape[0],M) for y in range(0,img.shape[1],N)])
tiles=[]
for m in ans:
if m.shape[0]==512 and m.shape[1]==512:
tiles.append(m)
tiles=np.asarray(tiles)
#print(tiles)
return(tiles)
def get_weights(img, subImageNumber):
divisor = int(np.sqrt(subImageNumber))
M = img.shape[0]//divisor
N = img.shape[0]//divisor
weights = np.ones((img.shape[0],img.shape[1]),dtype='uint8')
for i in range(divisor-1):
weights[(M*(i+1))-25:(M*(i+1)+25),:] = 0
weights[:,(N*(i+1))-25:(N*(i+1)+25)] = 0
return(weights)
def permute_image(img, trap_align_metadata):
# are there three dimensions?
if len(img.shape) == 3:
if img.shape[0] < 3: # for tifs with fewer than three imageing channels, the first dimension separates channels
# img = np.transpose(img, (1,2,0))
img = img[trap_align_metadata['phase_plane_index'],:,:] # grab just the phase channel
else:
img = img[:,:,trap_align_metadata['phase_plane_index']] # grab just the phase channel
return(img)
def imageConcatenatorFeatures(imgStack, subImageNumber = 64):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
#print(rowNumPerImage)
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j]))#,
#imgStack[baseNum+4,:,:,j],imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3]))#,
#featureRowDicts[j][baseNum+4],featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7]))
return(bigImg)
def imageConcatenatorFeatures2(imgStack, subImageNumber = 81):
rowNumPerImage = int(np.sqrt(subImageNumber)) # here I'm assuming our large images are square, with equal number of crops in each dimension
imageNum = int(imgStack.shape[0]/subImageNumber) # total number of sub-images divided by the number of sub-images in each original large image
iterNum = int(imageNum*rowNumPerImage)
imageDims = int(np.sqrt(imgStack.shape[1]*imgStack.shape[2]*subImageNumber))
featureNum = int(imgStack.shape[3])
bigImg = np.zeros(shape=(imageNum, imageDims, imageDims, featureNum), dtype='float32') # create array to store reconstructed images
featureRowDicts = []
for j in range(featureNum):
rowDict = {}
for i in range(iterNum):
baseNum = int(i*iterNum/imageNum)
# concatenate columns of 256x256 images to build each 256x2048 row
rowDict[i] = np.column_stack((imgStack[baseNum,:,:,j],imgStack[baseNum+1,:,:,j],
imgStack[baseNum+2,:,:,j], imgStack[baseNum+3,:,:,j],
imgStack[baseNum+4,:,:,j]))#,imgStack[baseNum+5,:,:,j],
#imgStack[baseNum+6,:,:,j],imgStack[baseNum+7,:,:,j],
#imgStack[baseNum+8,:,:,j]))
featureRowDicts.append(rowDict)
for j in range(featureNum):
for i in range(imageNum):
baseNum = int(i*rowNumPerImage)
# concatenate appropriate 256x2048 rows to build a 2048x2048 image and place it into bigImg
bigImg[i,:,:,j] = np.row_stack((featureRowDicts[j][baseNum],featureRowDicts[j][baseNum+1],
featureRowDicts[j][baseNum+2],featureRowDicts[j][baseNum+3],
featureRowDicts[j][baseNum+4]))#,featureRowDicts[j][baseNum+5],
#featureRowDicts[j][baseNum+6],featureRowDicts[j][baseNum+7],
#featureRowDicts[j][baseNum+8]))
return(bigImg)
def get_weights_array(arr=np.zeros((2048,2048)), shiftDistance=128, subImageNumber=64, padSubImageNumber=81):
originalImageWeights = get_weights(arr, subImageNumber=subImageNumber)
shiftLeftWeights = np.pad(originalImageWeights, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
shiftRightWeights = np.pad(originalImageWeights, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:(-1*shiftDistance)]
shiftUpWeights = np.pad(originalImageWeights, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
shiftDownWeights = np.pad(originalImageWeights, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:(-1*shiftDistance),:]
expandedImageWeights = get_weights(np.zeros((arr.shape[0]+2*shiftDistance,arr.shape[1]+2*shiftDistance)), subImageNumber=padSubImageNumber)[shiftDistance:-shiftDistance,shiftDistance:-shiftDistance]
allWeights = np.stack((originalImageWeights, expandedImageWeights, shiftUpWeights, shiftDownWeights, shiftLeftWeights,shiftRightWeights), axis=-1)
stackWeights = np.stack((allWeights,allWeights),axis=0)
stackWeights = np.stack((stackWeights,stackWeights,stackWeights),axis=3)
return(stackWeights)
# predicts locations of channels in an image using deep learning model
def get_frame_predictions(img,model,stackWeights, shiftDistance=256, subImageNumber=16, padSubImageNumber=25, debug=False):
pred = predict_first_image_channels(img, model, shiftDistance=shiftDistance,
subImageNumber=subImageNumber, padSubImageNumber=padSubImageNumber, debug=debug)[0,...]
# print(pred.shape)
if debug:
print(pred.shape)
compositePrediction = np.average(pred, axis=3, weights=stackWeights)
# print(compositePrediction.shape)
padSize = (compositePrediction.shape[0]-img.shape[0])//2
compositePrediction = util.crop(compositePrediction,((padSize,padSize),
(padSize,padSize),
(0,0)))
# print(compositePrediction.shape)
return(compositePrediction)
def apply_median_filter_normalize(imgs):
selem = morphology.disk(3)
for i in range(imgs.shape[0]):
# Store sample
tmpImg = imgs[i,:,:,0]
medImg = median(tmpImg, selem)
tmpImg = medImg/np.max(medImg)
tmpImg = np.expand_dims(tmpImg, axis=-1)
imgs[i,:,:,:] = tmpImg
return(imgs)
def predict_first_image_channels(img, model,
subImageNumber=16, padSubImageNumber=25,
shiftDistance=128, batchSize=1,
debug=False):
imgSize = img.shape[0]
padSize = (2048-imgSize)//2 # how much to pad on each side to get up to 2048x2048?
imgStack = np.pad(img, pad_width=((padSize,padSize),(padSize,padSize)),
mode='constant', constant_values=((0,0),(0,0))) # pad the images to make them 2048x2048
# pad the stack by 128 pixels on each side to get complemetary crops that I can run the network on. This
# should help me fill in low-confidence regions where the crop boundaries were for the original image
imgStackExpand = np.pad(imgStack, pad_width=((shiftDistance,shiftDistance),(shiftDistance,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))
imgStackShiftRight = np.pad(imgStack, pad_width=((0,0),(0,shiftDistance)),
mode='constant', constant_values=((0,0),(0,0)))[:,shiftDistance:]
imgStackShiftLeft = np.pad(imgStack, pad_width=((0,0),(shiftDistance,0)),
mode='constant', constant_values=((0,0),(0,0)))[:,:-shiftDistance]
imgStackShiftDown = np.pad(imgStack, pad_width=((0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[shiftDistance:,:]
imgStackShiftUp = np.pad(imgStack, pad_width=((shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0)))[:-shiftDistance,:]
#print(imgStackShiftUp.shape)
crops = tileImage(imgStack, subImageNumber=subImageNumber)
print("Crops: ", crops.shape)
crops = np.expand_dims(crops, -1)
data_gen_args = {'batch_size':params['compile']['channel_prediction_batch_size'],
'n_channels':1,
'normalize_to_one':True,
'shuffle':False}
predict_gen_args = {'verbose':1,
'use_multiprocessing':True,
'workers':params['num_analyzers']}
img_generator = TrapSegmentationDataGenerator(crops, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
prediction = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
#print(prediction.shape)
cropsExpand = tileImage(imgStackExpand, subImageNumber=padSubImageNumber)
cropsExpand = np.expand_dims(cropsExpand, -1)
img_generator = TrapSegmentationDataGenerator(cropsExpand, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionExpand = imageConcatenatorFeatures2(predictions, subImageNumber=padSubImageNumber)
predictionExpand = util.crop(predictionExpand, ((0,0),(shiftDistance,shiftDistance),(shiftDistance,shiftDistance),(0,0)))
#print(predictionExpand.shape)
cropsShiftLeft = tileImage(imgStackShiftLeft, subImageNumber=subImageNumber)
cropsShiftLeft = np.expand_dims(cropsShiftLeft, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftLeft, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionLeft = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionLeft = np.pad(predictionLeft, pad_width=((0,0),(0,0),(0,shiftDistance),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,shiftDistance:,:]
#print(predictionLeft.shape)
cropsShiftRight = tileImage(imgStackShiftRight, subImageNumber=subImageNumber)
cropsShiftRight = np.expand_dims(cropsShiftRight, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftRight, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionRight = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionRight = np.pad(predictionRight, pad_width=((0,0),(0,0),(shiftDistance,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:,:(-1*shiftDistance),:]
#print(predictionRight.shape)
cropsShiftUp = tileImage(imgStackShiftUp, subImageNumber=subImageNumber)
#print(cropsShiftUp.shape)
cropsShiftUp = np.expand_dims(cropsShiftUp, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftUp, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionUp = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionUp = np.pad(predictionUp, pad_width=((0,0),(0,shiftDistance),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,shiftDistance:,:,:]
#print(predictionUp.shape)
cropsShiftDown = tileImage(imgStackShiftDown, subImageNumber=subImageNumber)
cropsShiftDown = np.expand_dims(cropsShiftDown, -1)
img_generator = TrapSegmentationDataGenerator(cropsShiftDown, **data_gen_args)
predictions = model.predict_generator(img_generator, **predict_gen_args)
predictionDown = imageConcatenatorFeatures(predictions, subImageNumber=subImageNumber)
predictionDown = np.pad(predictionDown, pad_width=((0,0),(shiftDistance,0),(0,0),(0,0)),
mode='constant', constant_values=((0,0),(0,0),(0,0),(0,0)))[:,:(-1*shiftDistance),:,:]
#print(predictionDown.shape)
allPredictions = np.stack((prediction, predictionExpand,
predictionUp, predictionDown,
predictionLeft, predictionRight), axis=-1)
return(allPredictions)
# takes initial U-net centroids for trap locations, and creats bounding boxes for each trap at the defined height and width
def get_frame_trap_bounding_boxes(trapLabels, trapProps, trapAreaThreshold=2000, trapWidth=27, trapHeight=256):
badTrapLabels = [reg.label for reg in trapProps if reg.area < trapAreaThreshold] # filter out small "trap" regions
goodTraps = trapLabels.copy()
for label in badTrapLabels:
goodTraps[goodTraps == label] = 0 # re-label bad traps as background (0)
goodTrapProps = measure.regionprops(goodTraps)
trapCentroids = [(int(np.round(reg.centroid[0])),int(np.round(reg.centroid[1]))) for reg in goodTrapProps] # get centroids as integers
trapBboxes = []
for centroid in trapCentroids:
rowIndex = centroid[0]
colIndex = centroid[1]
minRow = rowIndex-trapHeight//2
maxRow = rowIndex+trapHeight//2
minCol = colIndex-trapWidth//2
maxCol = colIndex+trapWidth//2
if trapWidth % 2 != 0:
maxCol += 1
coordArray = np.array([minRow,maxRow,minCol,maxCol])
# remove any traps at edges of image
if np.any(coordArray > goodTraps.shape[0]):
continue
if np.any(coordArray < 0):
continue
trapBboxes.append((minRow,minCol,maxRow,maxCol))
return(trapBboxes)
# this function performs image alignment as defined by the shifts passed as an argument
def crop_traps(fileNames, trapProps, labelledTraps, bboxesDict, trap_align_metadata):
frameNum = trap_align_metadata['frame_count']
channelNum = trap_align_metadata['plane_number']
trapImagesDict = {key:np.zeros((frameNum,
trap_align_metadata['trap_height'],
trap_align_metadata['trap_width'],
channelNum)) for key in bboxesDict}
trapClosedEndPxDict = {}
flipImageDict = {}
trapMask = labelledTraps
for frame in range(frameNum):
if (frame+1) % 20 == 0:
print("Cropping trap regions for frame number {} of {}.".format(frame+1, frameNum))
imgPath = os.path.join(params['experiment_directory'],params['image_directory'],fileNames[frame])
fullFrameImg = io.imread(imgPath)
if len(fullFrameImg.shape) == 3:
if fullFrameImg.shape[0] < 3: # for tifs with less than three imaging channels, the first dimension separates channels
fullFrameImg = np.transpose(fullFrameImg, (1,2,0))
trapClosedEndPxDict[fileNames[frame]] = {key:{} for key in bboxesDict.keys()}
for key in trapImagesDict.keys():
bbox = bboxesDict[key][frame]
trapImagesDict[key][frame,:,:,:] = fullFrameImg[bbox[0]:bbox[2],bbox[1]:bbox[3],:]
#tmpImg = np.reshape(fullFrameImg[trapMask==key], (trapHeight,trapWidth,channelNum))
if frame == 0:
medianProfile = np.median(trapImagesDict[key][frame,:,:,0],axis=1) # get intensity of middle column of trap
maxIntensityRow = np.argmax(medianProfile)
if maxIntensityRow > trap_align_metadata['trap_height']//2:
flipImageDict[key] = 0
else:
flipImageDict[key] = 1
if flipImageDict[key] == 1:
trapImagesDict[key][frame,:,:,:] = trapImagesDict[key][frame,::-1,:,:]
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[0]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[2]
else:
trapClosedEndPxDict[fileNames[frame]][key]['closed_end_px'] = bbox[2]
trapClosedEndPxDict[fileNames[frame]][key]['open_end_px'] = bbox[0]
continue
return(trapImagesDict, trapClosedEndPxDict)
# gets shifted bounding boxes to crop traps through time
def shift_bounding_boxes(bboxesDict, shifts, imgSize):
bboxesShiftDict = {}
for key in bboxesDict.keys():
bboxesShiftDict[key] = []
bboxes = bboxesDict[key]
for i in range(shifts.shape[0]):
if i == 0:
bboxesShiftDict[key].append(bboxes)
else:
minRow = bboxes[0]+shifts[i,0]
minCol = bboxes[1]+shifts[i,1]
maxRow = bboxes[2]+shifts[i,0]
maxCol = bboxes[3]+shifts[i,1]
bboxesShiftDict[key].append((minRow,
minCol,
maxRow,
maxCol))
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) < 0):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
if np.any(np.asarray([minRow,minCol,maxRow,maxCol]) > imgSize):
print("channel {} removed: out of frame".format(key))
del bboxesShiftDict[key]
break
return(bboxesShiftDict)
# finds the location of channels in a tif
def find_channel_locs(image_data):
'''Finds the location of channels from a phase contrast image. The channels are returned in
a dictionary where the key is the x position of the channel in pixel and the value is a
dicionary with the open and closed end in pixels in y.
Called by
mm3_Compile.get_tif_params
'''
# declare temp variables from yaml parameter dict.
chan_w = params['compile']['channel_width']
chan_sep = params['compile']['channel_separation']
crop_wp = int(params['compile']['channel_width_pad'] + chan_w/2)
chan_snr = params['compile']['channel_detection_snr']
# Detect peaks in the x projection (i.e. find the channels)
projection_x = image_data.sum(axis=0).astype(np.int32)
# find_peaks_cwt is a function which attempts to find the peaks in a 1-D array by
# convolving it with a wave. here the wave is the default Mexican hat wave
# but the minimum signal to noise ratio is specified
# *** The range here should be a parameter or changed to a fraction.
peaks = find_peaks_cwt(projection_x, np.arange(chan_w-5,chan_w+5), min_snr=chan_snr)
# If the left-most peak position is within half of a channel separation,
# discard the channel from the list.
if peaks[0] < (chan_sep / 2):
peaks = peaks[1:]
# If the diference between the right-most peak position and the right edge
# of the image is less than half of a channel separation, discard the channel.
if image_data.shape[1] - peaks[-1] < (chan_sep / 2):
peaks = peaks[:-1]
# Find the average channel ends for the y-projected image
projection_y = image_data.sum(axis=1)
# find derivative, must use int32 because it was unsigned 16b before.
proj_y_d = np.diff(projection_y.astype(np.int32))
# use the top third to look for closed end, is pixel location of highest deriv
onethirdpoint_y = int(projection_y.shape[0]/3.0)
default_closed_end_px = proj_y_d[:onethirdpoint_y].argmax()
# use bottom third to look for open end, pixel location of lowest deriv
twothirdpoint_y = int(projection_y.shape[0]*2.0/3.0)
default_open_end_px = twothirdpoint_y + proj_y_d[twothirdpoint_y:].argmin()
default_length = default_open_end_px - default_closed_end_px # used for checks
# go through peaks and assign information
# dict for channel dimensions
chnl_loc_dict = {}
# key is peak location, value is dict with {'closed_end_px': px, 'open_end_px': px}
for peak in peaks:
# set defaults
chnl_loc_dict[peak] = {'closed_end_px': default_closed_end_px,
'open_end_px': default_open_end_px}
# redo the previous y projection finding with just this channel
channel_slice = image_data[:, peak-crop_wp:peak+crop_wp]
slice_projection_y = channel_slice.sum(axis = 1)
slice_proj_y_d = np.diff(slice_projection_y.astype(np.int32))
slice_closed_end_px = slice_proj_y_d[:onethirdpoint_y].argmax()
slice_open_end_px = twothirdpoint_y + slice_proj_y_d[twothirdpoint_y:].argmin()
slice_length = slice_open_end_px - slice_closed_end_px
# check if these values make sense. If so, use them. If not, use default
# make sure lenght is not 30 pixels bigger or smaller than default
# *** This 15 should probably be a parameter or at least changed to a fraction.
if slice_length + 15 < default_length or slice_length - 15 > default_length:
continue
# make sure ends are greater than 15 pixels from image edge
if slice_closed_end_px < 15 or slice_open_end_px > image_data.shape[0] - 15:
continue
# if you made it to this point then update the entry
chnl_loc_dict[peak] = {'closed_end_px' : slice_closed_end_px,
'open_end_px' : slice_open_end_px}
return chnl_loc_dict
# make masks from initial set of images (same images as clusters)
def make_masks(analyzed_imgs):
'''
Make masks goes through the channel locations in the image metadata and builds a consensus
Mask for each image per fov, which it returns as dictionary named channel_masks.
The keys in this dictionary are fov id, and the values is a another dictionary. This dict's keys are channel locations (peaks) and the values is a [2][2] array:
[[minrow, maxrow],[mincol, maxcol]] of pixel locations designating the corner of each mask
for each channel on the whole image
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
information("Determining initial channel masks...")
# declare temp variables from yaml parameter dict.
crop_wp = int(params['compile']['channel_width_pad'] + params['compile']['channel_width']/2)
chan_lp = int(params['compile']['channel_length_pad'])
#intiaize dictionary
channel_masks = {}
# get the size of the images (hope they are the same)
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
image_rows = img_v['shape'][0] # x pixels
image_cols = img_v['shape'][1] # y pixels
break # just need one. using iteritems mean the whole dict doesn't load
# get the fov ids
fovs = []
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
if img_v['fov'] not in fovs:
fovs.append(img_v['fov'])
# max width and length across all fovs. channels will get expanded by these values
# this important for later updates to the masks, which should be the same
max_chnl_mask_len = 0
max_chnl_mask_wid = 0
# for each fov make a channel_mask dictionary from consensus mask
for fov in fovs:
# initialize a the dict and consensus mask
channel_masks_1fov = {} # dict which holds channel masks {peak : [[y1, y2],[x1,x2]],...}
consensus_mask = np.zeros([image_rows, image_cols]) # mask for labeling
# bring up information for each image
for img_k in analyzed_imgs.keys():
img_v = analyzed_imgs[img_k]
# skip this one if it is not of the current fov
if img_v['fov'] != fov:
continue
# for each channel in each image make a single mask
img_chnl_mask = np.zeros([image_rows, image_cols])
# and add the channel mask to it
for chnl_peak, peak_ends in six.iteritems(img_v['channels']):
# pull out the peak location and top and bottom location
# and expand by padding (more padding done later for width)
x1 = max(chnl_peak - crop_wp, 0)
x2 = min(chnl_peak + crop_wp, image_cols)
y1 = max(peak_ends['closed_end_px'] - chan_lp, 0)
y2 = min(peak_ends['open_end_px'] + chan_lp, image_rows)
# add it to the mask for this image
img_chnl_mask[y1:y2, x1:x2] = 1
# add it to the consensus mask
consensus_mask += img_chnl_mask
# Normalize concensus mask between 0 and 1.
consensus_mask = consensus_mask.astype('float32') / float(np.amax(consensus_mask))
# threshhold and homogenize each channel mask within the mask, label them
# label when value is above 0.1 (so 90% occupancy), transpose.
# the [0] is for the array ([1] is the number of regions)
# It transposes and then transposes again so regions are labeled left to right
# clear border it to make sure the channels are off the edge
consensus_mask = ndi.label(consensus_mask)[0]
# go through each label
for label in np.unique(consensus_mask):
if label == 0: # label zero is the background
continue
binary_core = consensus_mask == label
# clean up the rough edges
poscols = np.any(binary_core, axis = 0) # column positions where true (any)
posrows = np.any(binary_core, axis = 1) # row positions where true (any)
# channel_id givin by horizontal position
# this is important. later updates to the positions will have to check
# if their channels contain this median value to match up
channel_id = int(np.median(np.where(poscols)[0]))
# store the edge locations of the channel mask in the dictionary. Will be ints
min_row = np.min(np.where(posrows)[0])
max_row = np.max(np.where(posrows)[0])
min_col = np.min(np.where(poscols)[0])
max_col = np.max(np.where(poscols)[0])
# if the min/max cols are within the image bounds,
# add the mask, as 4 points, to the dictionary
if min_col > 0 and max_col < image_cols:
channel_masks_1fov[channel_id] = [[min_row, max_row], [min_col, max_col]]
# find the largest channel width and height while you go round
max_chnl_mask_len = int(max(max_chnl_mask_len, max_row - min_row))
max_chnl_mask_wid = int(max(max_chnl_mask_wid, max_col - min_col))
# add channel_mask dictionary to the fov dictionary, use copy to play it safe
channel_masks[fov] = channel_masks_1fov.copy()
# update all channel masks to be the max size
cm_copy = channel_masks.copy()
for fov, peaks in six.iteritems(channel_masks):
# f_id = int(fov)
for peak, chnl_mask in six.iteritems(peaks):
# p_id = int(peak)
# just add length to the open end (bottom of image, low column)
if chnl_mask[0][1] - chnl_mask[0][0] != max_chnl_mask_len:
cm_copy[fov][peak][0][1] = chnl_mask[0][0] + max_chnl_mask_len
# enlarge widths around the middle, but make sure you don't get floats
if chnl_mask[1][1] - chnl_mask[1][0] != max_chnl_mask_wid:
wid_diff = max_chnl_mask_wid - (chnl_mask[1][1] - chnl_mask[1][0])
if wid_diff % 2 == 0:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - wid_diff/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + wid_diff/2, image_cols - 1)
else:
cm_copy[fov][peak][1][0] = max(chnl_mask[1][0] - (wid_diff-1)/2, 0)
cm_copy[fov][peak][1][1] = min(chnl_mask[1][1] + (wid_diff+1)/2, image_cols - 1)
# convert all values to ints
chnl_mask[0][0] = int(chnl_mask[0][0])
chnl_mask[0][1] = int(chnl_mask[0][1])
chnl_mask[1][0] = int(chnl_mask[1][0])
chnl_mask[1][1] = int(chnl_mask[1][1])
# cm_copy[fov][peak] = {'y_top': chnl_mask[0][0],
# 'y_bot': chnl_mask[0][1],
# 'x_left': chnl_mask[1][0],
# 'x_right': chnl_mask[1][1]}
# print(type(cm_copy[fov][peak][1][0]), cm_copy[fov][peak][1][0])
#save the channel mask dictionary to a pickle and a text file
# with open(os.path.join(params['ana_dir'], 'channel_masks.pkl'), 'wb') as cmask_file:
# pickle.dump(cm_copy, cmask_file, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(params['ana_dir'], 'channel_masks.txt'), 'w') as cmask_file:
pprint(cm_copy, stream=cmask_file)
with open(os.path.join(params['ana_dir'], 'channel_masks.yaml'), 'w') as cmask_file:
yaml.dump(data=cm_copy, stream=cmask_file, default_flow_style=False, tags=None)
information("Channel masks saved.")
return cm_copy
# get each fov_id, peak_id, frame's mask bounding box from bounding boxes arrived at by convolutional neural network
def make_channel_masks_CNN(bboxes_dict):
'''
The keys in this dictionary are peak_ids and the values of each is an array of shape (frameNumber,2,2):
Each frameNumber's 2x2 slice of the array represents the given peak_id's [[minrow, maxrow],[mincol, maxcol]].
One important consequence of these function is that the channel ids and the size of the
channel slices are decided now. Updates to mask must coordinate with these values.
Parameters
analyzed_imgs : dict
image information created by get_params
Returns
channel_masks : dict
dictionary of consensus channel masks.
Called By
mm3_Compile.py
Calls
'''
# initialize the new channel_masks dict
channel_masks = {}
# reorder elements of tuples in bboxes_dict to match [[minrow, maxrow], [mincol, maxcol]] convention above
peak_ids = [peak_id for peak_id in bboxes_dict.keys()]
peak_ids.sort()
bbox_array = np.zeros((len(bboxes_dict[peak_ids[0]]),2,2), dtype='uint16')
for peak_id in peak_ids:
# get each frame's bounding boxes for the given peak_id
frame_bboxes = bboxes_dict[peak_id]
for frame_index in range(len(frame_bboxes)):
# replace the values in bbox_array with the proper ones from frame_bboxes
minrow = frame_bboxes[frame_index][0]
maxrow = frame_bboxes[frame_index][2]
mincol = frame_bboxes[frame_index][1]
maxcol = frame_bboxes[frame_index][3]
bbox_array[frame_index,0,0] = minrow
bbox_array[frame_index,0,1] = maxrow
bbox_array[frame_index,1,0] = mincol
bbox_array[frame_index,1,1] = maxcol
channel_masks[peak_id] = bbox_array
return(channel_masks)
### functions about trimming, padding, and manipulating images
# define function for flipping the images on an FOV by FOV basis
def fix_orientation(image_data):
'''
Fix the orientation. The standard direction for channels to open to is down.
called by
process_tif
get_params
'''
# user parameter indicates how things should be flipped
image_orientation = params['compile']['image_orientation']
# if this is just a phase image give in an extra layer so rest of code is fine
flat = False # flag for if the image is flat or multiple levels
if len(image_data.shape) == 2:
image_data = np.expand_dims(image_data, 0)
flat = True
# setting image_orientation to 'auto' will use autodetection
if image_orientation == "auto":
# use 'phase_plane' to find the phase plane in image_data, assuming c1, c2, c3... naming scheme here.
try:
ph_channel = int(re.search('[0-9]', params['phase_plane']).group(0)) - 1
except:
# Pick the plane to analyze with the highest mean px value (should be phase)
ph_channel = np.argmax([np.mean(image_data[ci]) for ci in range(image_data.shape[0])])
# flip based on the index of the higest average row value
# this should be closer to the opening
if np.argmax(image_data[ph_channel].mean(axis = 1)) < image_data[ph_channel].shape[0] / 2:
image_data = image_data[:,::-1,:]
else:
pass # no need to do anything
# flip if up is chosen
elif image_orientation == "up":
return image_data[:,::-1,:]
# do not flip the images if "down is the specified image orientation"
elif image_orientation == "down":
pass
if flat:
image_data = image_data[0] # just return that first layer
return image_data
# cuts out channels from the image
def cut_slice(image_data, channel_loc):
'''Takes an image and cuts out the channel based on the slice location
slice location is the list with the peak information, in the form
[][y1, y2],[x1, x2]]. Returns the channel slice as a numpy array.
The numpy array will be a stack if there are multiple planes.
if you want to slice all the channels from a picture with the channel_masks
dictionary use a loop like this:
for channel_loc in channel_masks[fov_id]: # fov_id is the fov of the image
channel_slice = cut_slice[image_pixel_data, channel_loc]
# ... do something with the slice
NOTE: this function will try to determine what the shape of your
image is and slice accordingly. It expects the images are in the order
[t, x, y, c]. It assumes images with three dimensions are [x, y, c] not
[t, x, y].
'''
# case where image is in form [x, y]
if len(image_data.shape) == 2:
# make slice object
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1]]
# case where image is in form [x, y, c]
elif len(image_data.shape) == 3:
channel_slicer = np.s_[channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# case where image in form [t, x , y, c]
elif len(image_data.shape) == 4:
channel_slicer = np.s_[:,channel_loc[0][0]:channel_loc[0][1],
channel_loc[1][0]:channel_loc[1][1],:]
# slice based on appropriate slicer object.
channel_slice = image_data[channel_slicer]
# pad y of channel if slice happened to be outside of image
y_difference = (channel_loc[0][1] - channel_loc[0][0]) - channel_slice.shape[1]
if y_difference > 0:
paddings = [[0, 0], # t
[0, y_difference], # y
[0, 0], # x
[0, 0]] # c
channel_slice = np.pad(channel_slice, paddings, mode='edge')
return channel_slice
# calculate cross correlation between pixels in channel stack
def channel_xcorr(fov_id, peak_id):
'''
Function calculates the cross correlation of images in a
stack to the first image in the stack. The output is an
array that is the length of the stack with the best cross
correlation between that image and the first image.
The very first value should be 1.
'''
pad_size = params['subtract']['alignment_pad']
# Use this number of images to calculate cross correlations
number_of_images = 20
# load the phase contrast images
image_data = load_stack(fov_id, peak_id, color=params['phase_plane'])
# if there are more images than number_of_images, use number_of_images images evenly
# spaced across the range
if image_data.shape[0] > number_of_images:
spacing = int(image_data.shape[0] / number_of_images)
image_data = image_data[::spacing,:,:]
if image_data.shape[0] > number_of_images:
image_data = image_data[:number_of_images,:,:]
# we will compare all images to this one, needs to be padded to account for image drift
first_img = np.pad(image_data[0,:,:], pad_size, mode='reflect')
xcorr_array = [] # array holds cross correlation vaues
for img in image_data:
# use match_template to find all cross correlations for the
# current image against the first image.
xcorr_array.append(np.max(match_template(first_img, img)))
return xcorr_array
### functions about subtraction
# average empty channels from stacks, making another TIFF stack
def average_empties_stack(fov_id, specs, color='c1', align=True):
'''Takes the fov file name and the peak names of the designated empties,
averages them and saves the image
Parameters
fov_id : int
FOV number
specs : dict
specifies whether a channel should be analyzed (1), used for making
an average empty (0), or ignored (-1).
color : string
Which plane to use.
align : boolean
Flag that is passed to the worker function average_empties, indicates
whether images should be aligned be for averaging (use False for fluorescent images)
Returns
True if succesful.
Saves empty stack to analysis folder
'''
information("Creating average empty channel for FOV %d." % fov_id)
# get peak ids of empty channels for this fov
empty_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 0: # 0 means it should be used for empty
empty_peak_ids.append(peak_id)
empty_peak_ids = sorted(empty_peak_ids) # sort for repeatability
# depending on how many empties there are choose what to do
# if there is no empty the user is going to have to copy another empty stack
if len(empty_peak_ids) == 0:
information("No empty channel designated for FOV %d." % fov_id)
return False
# if there is just one then you can just copy that channel
elif len(empty_peak_ids) == 1:
peak_id = empty_peak_ids[0]
information("One empty channel (%d) designated for FOV %d." % (peak_id, fov_id))
# load the one phase contrast as the empties
avg_empty_stack = load_stack(fov_id, peak_id, color=color)
# but if there is more than one empty you need to align and average them per timepoint
elif len(empty_peak_ids) > 1:
# load the image stacks into memory
empty_stacks = [] # list which holds phase image stacks of designated empties
for peak_id in empty_peak_ids:
# load data and append to list
image_data = load_stack(fov_id, peak_id, color=color)
empty_stacks.append(image_data)
information("%d empty channels designated for FOV %d." % (len(empty_stacks), fov_id))
# go through time points and create list of averaged empties
avg_empty_stack = [] # list will be later concatentated into numpy array
time_points = range(image_data.shape[0]) # index is time
for t in time_points:
# get images from one timepoint at a time and send to alignment and averaging
imgs = [stack[t] for stack in empty_stacks]
avg_empty = average_empties(imgs, align=align) # function is in mm3
avg_empty_stack.append(avg_empty)
# concatenate list and then save out to tiff stack
avg_empty_stack = np.stack(avg_empty_stack, axis=0)
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (fov_id, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute
h5ds.attrs.create('empty_channels', empty_peak_ids)
h5f.close()
information("Saved empty channel for FOV %d." % fov_id)
return True
# averages a list of empty channels
def average_empties(imgs, align=True):
'''
This function averages a set of images (empty channels) and returns a single image
of the same size. It first aligns the images to the first image before averaging.
Alignment is done by enlarging the first image using edge padding.
Subsequent images are then aligned to this image and the offset recorded.
These images are padded such that they are the same size as the first (padded) image but
with the image in the correct (aligned) place. Edge padding is again used.
The images are then placed in a stack and aveaged. This image is trimmed so it is the size
of the original images
Called by
average_empties_stack
'''
aligned_imgs = [] # list contains the aligned, padded images
if align:
# pixel size to use for padding (ammount that alignment could be off)
pad_size = params['subtract']['alignment_pad']
for n, img in enumerate(imgs):
# if this is the first image, pad it and add it to the stack
if n == 0:
ref_img = np.pad(img, pad_size, mode='reflect') # padded reference image
aligned_imgs.append(ref_img)
# otherwise align this image to the first padded image
else:
# find correlation between a convolution of img against the padded reference
match_result = match_template(ref_img, img)
# find index of highest correlation (relative to top left corner of img)
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad img so it aligns and is the same size as reference image
pad_img = np.pad(img, ((y, ref_img.shape[0] - (y + img.shape[0])),
(x, ref_img.shape[1] - (x + img.shape[1]))), mode='reflect')
aligned_imgs.append(pad_img)
else:
# don't align, just link the names to go forward easily
aligned_imgs = imgs
# stack the aligned data along 3rd axis
aligned_imgs = np.dstack(aligned_imgs)
# get a mean image along 3rd axis
avg_empty = np.nanmean(aligned_imgs, axis=2)
# trim off the padded edges (only if images were alinged, otherwise there was no padding)
if align:
avg_empty = avg_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
# change type back to unsigned 16 bit not floats
avg_empty = avg_empty.astype(dtype='uint16')
return avg_empty
# this function is used when one FOV doesn't have an empty
def copy_empty_stack(from_fov, to_fov, color='c1'):
'''Copy an empty stack from one FOV to another'''
# load empty stack from one FOV
information('Loading empty stack from FOV {} to save for FOV {}.'.format(from_fov, to_fov))
avg_empty_stack = load_stack(from_fov, 0, color='empty_{}'.format(color))
# save out data
if params['output'] == 'TIFF':
# make new name and save it
empty_filename = params['experiment_name'] + '_xy%03d_empty_%s.tif' % (to_fov, color)
tiff.imsave(os.path.join(params['empty_dir'],empty_filename), avg_empty_stack, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % to_fov), 'r+')
# delete the dataset if it exists (important for debug)
if 'empty_%s' % color in h5f:
del h5f[u'empty_%s' % color]
# the empty channel should be it's own dataset
h5ds = h5f.create_dataset(u'empty_%s' % color,
data=avg_empty_stack,
chunks=(1, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
maxshape=(None, avg_empty_stack.shape[1], avg_empty_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
# give attribute which says which channels contribute. Just put 0
h5ds.attrs.create('empty_channels', [0])
h5f.close()
information("Saved empty channel for FOV %d." % to_fov)
# Do subtraction for an fov over many timepoints
def subtract_fov_stack(fov_id, specs, color='c1', method='phase'):
'''
For a given FOV, loads the precomputed empty stack and does subtraction on
all peaks in the FOV designated to be analyzed
Parameters
----------
color : string, 'c1', 'c2', etc.
This is the channel to subtraction. will be appended to the word empty.
Called by
mm3_Subtract.py
Calls
mm3.subtract_phase
'''
information('Subtracting peaks for FOV %d.' % fov_id)
# load empty stack feed dummy peak number to get empty
avg_empty_stack = load_stack(fov_id, 0, color='empty_{}'.format(color))
# determine which peaks are to be analyzed
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 0 means it should be used for empty, -1 is ignore
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information("Subtracting %d channels for FOV %d." % (len(ana_peak_ids), fov_id))
# just break if there are to peaks to analize
if not ana_peak_ids:
return False
# load images for the peak and get phase images
for peak_id in ana_peak_ids:
information('Subtracting peak %d.' % peak_id)
image_data = load_stack(fov_id, peak_id, color=color)
# make a list for all time points to send to a multiprocessing pool
# list will length of image_data with tuples (image, empty)
subtract_pairs = zip(image_data, avg_empty_stack)
# # set up multiprocessing pool to do subtraction. Should wait until finished
# pool = Pool(processes=params['num_analyzers'])
# if method == 'phase':
# subtracted_imgs = pool.map(subtract_phase, subtract_pairs, chunksize=10)
# elif method == 'fluor':
# subtracted_imgs = pool.map(subtract_fluor, subtract_pairs, chunksize=10)
# pool.close() # tells the process nothing more will be added.
# pool.join() # blocks script until everything has been processed and workers exit
# linear loop for debug
subtracted_imgs = [subtract_phase(subtract_pair) for subtract_pair in subtract_pairs]
# stack them up along a time axis
subtracted_stack = np.stack(subtracted_imgs, axis=0)
# save out the subtracted stack
if params['output'] == 'TIFF':
sub_filename = params['experiment_name'] + '_xy%03d_p%04d_sub_%s.tif' % (fov_id, peak_id, color)
tiff.imsave(os.path.join(params['sub_dir'],sub_filename), subtracted_stack, compress=4) # save it
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(subtracted_stack, name='Subtracted' + '_xy1_p'+str(peak_id)+'_sub_'+str(color)+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put subtracted channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_sub_%s' % (peak_id, color) in h5g:
del h5g['p%04d_sub_%s' % (peak_id, color)]
h5ds = h5g.create_dataset(u'p%04d_sub_%s' % (peak_id, color),
data=subtracted_stack,
chunks=(1, subtracted_stack.shape[1], subtracted_stack.shape[2]),
maxshape=(None, subtracted_stack.shape[1], subtracted_stack.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
information("Saved subtracted channel %d." % peak_id)
if params['output'] == 'HDF5':
h5f.close()
return True
# subtracts one phase contrast image from another.
def subtract_phase(image_pair):
'''subtract_phase aligns and subtracts a .
Modified from subtract_phase_only by jt on 20160511
The subtracted image returned is the same size as the image given. It may however include
data points around the edge that are meaningless but not marked.
We align the empty channel to the phase channel, then subtract.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# this is for aligning the empty channel to the cell channel.
### Pad cropped channel.
pad_size = params['subtract']['alignment_pad'] # pixel size to use for padding (ammount that alignment could be off)
padded_chnl = np.pad(cropped_channel, pad_size, mode='reflect')
# ### Align channel to empty using match template.
# use match template to get a correlation array and find the position of maximum overlap
match_result = match_template(padded_chnl, empty_channel)
# get row and colum of max correlation value in correlation array
y, x = np.unravel_index(np.argmax(match_result), match_result.shape)
# pad the empty channel according to alignment to be overlayed on padded channel.
empty_paddings = [[y, padded_chnl.shape[0] - (y + empty_channel.shape[0])],
[x, padded_chnl.shape[1] - (x + empty_channel.shape[1])]]
aligned_empty = np.pad(empty_channel, empty_paddings, mode='reflect')
# now trim it off so it is the same size as the original channel
aligned_empty = aligned_empty[pad_size:-1*pad_size, pad_size:-1*pad_size]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = aligned_empty.astype('int32') - cropped_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0. This is what Sattar does
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
# subtract one fluorescence image from another.
def subtract_fluor(image_pair):
''' subtract_fluor does a simple subtraction of one image to another. Unlike subtract_phase,
there is no alignment. Also, the empty channel is subtracted from the full channel.
Parameters
image_pair : tuple of length two with; (image, empty_mean)
Returns
channel_subtracted : np.array
The subtracted image.
Called by
subtract_fov_stack
'''
# get out data and pad
cropped_channel, empty_channel = image_pair # [channel slice, empty slice]
# check frame size of cropped channel and background, always keep crop channel size the same
crop_size = np.shape(cropped_channel)[:2]
empty_size = np.shape(empty_channel)[:2]
if crop_size != empty_size:
if crop_size[0] > empty_size[0] or crop_size[1] > empty_size[1]:
pad_row_length = max(crop_size[0] - empty_size[0], 0) # prevent negatives
pad_column_length = max(crop_size[1] - empty_size[1], 0)
empty_channel = np.pad(empty_channel,
[[np.int(.5*pad_row_length), pad_row_length-np.int(.5*pad_row_length)],
[np.int(.5*pad_column_length), pad_column_length-np.int(.5*pad_column_length)],
[0,0]], 'edge')
# mm3.information('size adjusted 1')
empty_size = np.shape(empty_channel)[:2]
if crop_size[0] < empty_size[0] or crop_size[1] < empty_size[1]:
empty_channel = empty_channel[:crop_size[0], :crop_size[1],]
### Compute the difference between the empty and channel phase contrast images
# subtract cropped cell image from empty channel.
channel_subtracted = cropped_channel.astype('int32') - empty_channel.astype('int32')
# channel_subtracted = cropped_channel.astype('int32') - aligned_empty.astype('int32')
# just zero out anything less than 0.
channel_subtracted[channel_subtracted < 0] = 0
channel_subtracted = channel_subtracted.astype('uint16') # change back to 16bit
return channel_subtracted
### functions that deal with segmentation and lineages
# Do segmentation for an channel time stack
def segment_chnl_stack(fov_id, peak_id):
'''
For a given fov and peak (channel), do segmentation for all images in the
subtracted .tif stack.
Called by
mm3_Segment.py
Calls
mm3.segment_image
'''
information('Segmenting FOV %d, channel %d.' % (fov_id, peak_id))
# load subtracted images
sub_stack = load_stack(fov_id, peak_id, color='sub_{}'.format(params['phase_plane']))
# set up multiprocessing pool to do segmentation. Will do everything before going on.
#pool = Pool(processes=params['num_analyzers'])
# send the 3d array to multiprocessing
#segmented_imgs = pool.map(segment_image, sub_stack, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# image by image for debug
segmented_imgs = []
for sub_image in sub_stack:
segmented_imgs.append(segment_image(sub_image))
# stack them up along a time axis
segmented_imgs = np.stack(segmented_imgs, axis=0)
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stack
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'],seg_filename),
segmented_imgs, compress=5)
if fov_id==1 and peak_id<50:
napari.current_viewer().add_image(segmented_imgs, name='Segmented' + '_xy1_p'+str(peak_id)+'_sub_'+str(params['seg_img'])+'.tif', visible=True)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
information("Saved segmented channel %d." % peak_id)
return True
# segmentation algorithm
def segment_image(image):
'''Segments a subtracted image and returns a labeled image
Parameters
image : a ndarray which is an image. This should be the subtracted image
Returns
labeled_image : a ndarray which is also an image. Labeled values, which
should correspond to cells, all have the same integer value starting with 1.
Non labeled area should have value zero.
'''
# load in segmentation parameters
OTSU_threshold = params['segment']['otsu']['OTSU_threshold']
first_opening_size = params['segment']['otsu']['first_opening_size']
distance_threshold = params['segment']['otsu']['distance_threshold']
second_opening_size = params['segment']['otsu']['second_opening_size']
min_object_size = params['segment']['otsu']['min_object_size']
# threshold image
try:
thresh = threshold_otsu(image) # finds optimal OTSU threshhold value
except:
return np.zeros_like(image)
threshholded = image > OTSU_threshold*thresh # will create binary image
# if there are no cells, good to clear the border
# because otherwise the OTSU is just for random bullshit, most
# likely on the side of the image
threshholded = segmentation.clear_border(threshholded)
# Opening = erosion then dialation.
# opening smooths images, breaks isthmuses, and eliminates protrusions.
# "opens" dark gaps between bright features.
morph = morphology.binary_opening(threshholded, morphology.disk(first_opening_size))
# if this image is empty at this point (likely if there were no cells), just return
# zero array
if np.amax(morph) == 0:
return np.zeros_like(image)
### Calculate distance matrix, use as markers for random walker (diffusion watershed)
# Generate the markers based on distance to the background
distance = ndi.distance_transform_edt(morph)
# threshold distance image
distance_thresh = np.zeros_like(distance)
distance_thresh[distance < distance_threshold] = 0
distance_thresh[distance >= distance_threshold] = 1
# do an extra opening on the distance
distance_opened = morphology.binary_opening(distance_thresh,
morphology.disk(second_opening_size))
# remove artifacts connected to image border
cleared = segmentation.clear_border(distance_opened)
# remove small objects. Remove small objects wants a
# labeled image and will fail if there is only one label. Return zero image in that case
# could have used try/except but remove_small_objects loves to issue warnings.
cleared, label_num = morphology.label(cleared, connectivity=1, return_num=True)
if label_num > 1:
cleared = morphology.remove_small_objects(cleared, min_size=min_object_size)
else:
# if there are no labels, then just return the cleared image as it is zero
return np.zeros_like(image)
# relabel now that small objects and labels on edges have been cleared
markers = morphology.label(cleared, connectivity=1)
# just break if there is no label
if np.amax(markers) == 0:
return np.zeros_like(image)
# the binary image for the watershed, which uses the unmodified OTSU threshold
threshholded_watershed = threshholded
threshholded_watershed = segmentation.clear_border(threshholded_watershed)
# label using the random walker (diffusion watershed) algorithm
try:
# set anything outside of OTSU threshold to -1 so it will not be labeled
markers[threshholded_watershed == 0] = -1
# here is the main algorithm
labeled_image = segmentation.random_walker(-1*image, markers)
# put negative values back to zero for proper image
labeled_image[labeled_image == -1] = 0
except:
return np.zeros_like(image)
return labeled_image
# loss functions for model
def dice_coeff(y_true, y_pred):
smooth = 1.
# Flatten
y_true_f = tf.reshape(y_true, [-1])
y_pred_f = tf.reshape(y_pred, [-1])
intersection = tf.reduce_sum(y_true_f * y_pred_f)
score = (2. * intersection + smooth) / (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth)
return score
def dice_loss(y_true, y_pred):
loss = 1 - dice_coeff(y_true, y_pred)
return loss
def bce_dice_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)
return loss
def tversky_loss(y_true, y_pred):
alpha = 0.5
beta = 0.5
ones = K.ones((512,512,3)) #K.ones(K.shape(y_true))
p0 = y_pred # proba that voxels are class i
p1 = ones-y_pred # proba that voxels are not class i
g0 = y_true
g1 = ones-y_true
num = K.sum(p0*g0, (0,1,2))
den = num + alpha*K.sum(p0*g1,(0,1,2)) + beta*K.sum(p1*g0,(0,1,2))
T = K.sum(num/den) # when summing over classes, T has dynamic range [0 Ncl]
Ncl = K.cast(K.shape(y_true)[-1], 'float32')
return Ncl-T
def cce_tversky_loss(y_true, y_pred):
loss = losses.categorical_crossentropy(y_true, y_pred) + tversky_loss(y_true, y_pred)
return loss
def get_pad_distances(unet_shape, img_height, img_width):
'''Finds padding and trimming sizes to make the input image the same as the size expected by the U-net model.
Padding is done evenly to the top and bottom of the image. Trimming is only done from the right or bottom.
'''
half_width_pad = (unet_shape[1]-img_width)/2
if half_width_pad > 0:
left_pad = int(np.floor(half_width_pad))
right_pad = int(np.ceil(half_width_pad))
right_trim = 0
else:
left_pad = 0
right_pad = 0
right_trim = img_width - unet_shape[1]
half_height_pad = (unet_shape[0]-img_height)/2
if half_height_pad > 0:
top_pad = int(np.floor(half_height_pad))
bottom_pad = int(np.ceil(half_height_pad))
bottom_trim = 0
else:
top_pad = 0
bottom_pad = 0
bottom_trim = img_height - unet_shape[0]
pad_dict = {'top_pad' : top_pad,
'bottom_pad' : bottom_pad,
'right_pad' : right_pad,
'left_pad' : left_pad,
'bottom_trim' : bottom_trim,
'right_trim' : right_trim}
return pad_dict
#@profile
def segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
batch_size = params['segment']['batch_size']
cellClassThreshold = params['segment']['cell_class_threshold']
if cellClassThreshold == 'None': # yaml imports None as a string
cellClassThreshold = False
min_object_size = params['segment']['min_object_size']
# arguments to data generator
# data_gen_args = {'batch_size':batch_size,
# 'n_channels':1,
# 'normalize_to_one':False,
# 'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=True,
workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting peak {}.'.format(peak_id))
img_stack = load_stack(fov_id, peak_id, color=params['phase_plane'])
if params['segment']['normalize_to_one']:
med_stack = np.zeros(img_stack.shape)
selem = morphology.disk(1)
for frame_idx in range(img_stack.shape[0]):
tmpImg = img_stack[frame_idx,...]
med_stack[frame_idx,...] = median(tmpImg, selem)
# robust normalization of peak's image stack to 1
max_val = np.max(med_stack)
img_stack = img_stack/max_val
img_stack[img_stack > 1] = 1
# trim and pad image to correct size
img_stack = img_stack[:, :unet_shape[0], :unet_shape[1]]
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1) # TF expects images to be 4D
# set up image generator
# image_generator = CellSegmentationDataGenerator(img_stack, **data_gen_args)
image_datagen = ImageDataGenerator()
image_generator = image_datagen.flow(x=img_stack,
batch_size=batch_size,
shuffle=False) # keep same order
# predict cell locations. This has multiprocessing built in but I need to mess with the parameters to see how to best utilize it. ***
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
# pad back incase the image had been trimmed
predictions = np.pad(predictions,
((0,0),
(0,pad_dict['bottom_trim']),
(0,pad_dict['right_trim'])),
mode='constant')
if params['segment']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['pred_dir']):
os.makedirs(params['pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if cellClassThreshold:
predictions[predictions >= cellClassThreshold] = 1
predictions[predictions < cellClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=1)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
#@profile
def segment_fov_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
#ana_peak_ids = ana_peak_ids[:2]
segment_cells_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return
def segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model):
# batch_size = params['foci']['batch_size']
focusClassThreshold = params['foci']['focus_threshold']
if focusClassThreshold == 'None': # yaml imports None as a string
focusClassThreshold = False
# arguments to data generator
data_gen_args = {'batch_size':params['foci']['batch_size'],
'n_channels':1,
'normalize_to_one':False,
'shuffle':False}
# arguments to predict_generator
predict_args = dict(use_multiprocessing=False,
# workers=params['num_analyzers'],
verbose=1)
for peak_id in ana_peak_ids:
information('Segmenting foci in peak {}.'.format(peak_id))
# print(peak_id) # debugging a shape error at some traps
img_stack = load_stack(fov_id, peak_id, color=params['foci']['foci_plane'])
# pad image to correct size
img_stack = np.pad(img_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])),
mode='constant')
img_stack = np.expand_dims(img_stack, -1)
# set up image generator
image_generator = FocusSegmentationDataGenerator(img_stack, **data_gen_args)
# predict foci locations.
predictions = model.predict_generator(image_generator, **predict_args)
# post processing
# remove padding including the added last dimension
predictions = predictions[:, pad_dict['top_pad']:unet_shape[0]-pad_dict['bottom_pad'],
pad_dict['left_pad']:unet_shape[1]-pad_dict['right_pad'], 0]
if params['foci']['save_predictions']:
pred_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['pred_img'])
if not os.path.isdir(params['foci_pred_dir']):
os.makedirs(params['foci_pred_dir'])
int_preds = (predictions * 255).astype('uint8')
tiff.imsave(os.path.join(params['foci_pred_dir'], pred_filename),
int_preds, compress=4)
# binarized and label (if there is a threshold value, otherwise, save a grayscale for debug)
if focusClassThreshold:
predictions[predictions >= focusClassThreshold] = 1
predictions[predictions < focusClassThreshold] = 0
predictions = predictions.astype('uint8')
segmented_imgs = np.zeros(predictions.shape, dtype='uint8')
# process and label each frame of the channel
for frame in range(segmented_imgs.shape[0]):
# get rid of small holes
# predictions[frame,:,:] = morphology.remove_small_holes(predictions[frame,:,:], min_object_size)
# get rid of small objects.
# predictions[frame,:,:] = morphology.remove_small_objects(morphology.label(predictions[frame,:,:], connectivity=1), min_size=min_object_size)
# remove labels which touch the boarder
predictions[frame,:,:] = segmentation.clear_border(predictions[frame,:,:])
# relabel now
segmented_imgs[frame,:,:] = morphology.label(predictions[frame,:,:], connectivity=2)
else: # in this case you just want to scale the 0 to 1 float image to 0 to 255
information('Converting predictions to grayscale.')
segmented_imgs = np.around(predictions * 100)
# both binary and grayscale should be 8bit. This may be ensured above and is unneccesary
segmented_imgs = segmented_imgs.astype('uint8')
# save out the segmented stacks
if params['output'] == 'TIFF':
seg_filename = params['experiment_name'] + '_xy%03d_p%04d_%s.tif' % (fov_id, peak_id, params['seg_img'])
tiff.imsave(os.path.join(params['foci_seg_dir'], seg_filename),
segmented_imgs, compress=4)
if params['output'] == 'HDF5':
h5f = h5py.File(os.path.join(params['hdf5_dir'],'xy%03d.hdf5' % fov_id), 'r+')
# put segmented channel in correct group
h5g = h5f['channel_%04d' % peak_id]
# delete the dataset if it exists (important for debug)
if 'p%04d_%s' % (peak_id, params['seg_img']) in h5g:
del h5g['p%04d_%s' % (peak_id, params['seg_img'])]
h5ds = h5g.create_dataset(u'p%04d_%s' % (peak_id, params['seg_img']),
data=segmented_imgs,
chunks=(1, segmented_imgs.shape[1], segmented_imgs.shape[2]),
maxshape=(None, segmented_imgs.shape[1], segmented_imgs.shape[2]),
compression="gzip", shuffle=True, fletcher32=True)
h5f.close()
def segment_fov_foci_unet(fov_id, specs, model, color=None):
'''
Segments the channels from one fov using the U-net CNN model.
Parameters
----------
fov_id : int
specs : dict
model : TensorFlow model
'''
information('Segmenting FOV {} with U-net.'.format(fov_id))
if color is None:
color = params['phase_plane']
# load segmentation parameters
unet_shape = (params['segment']['trained_model_image_height'],
params['segment']['trained_model_image_width'])
### determine stitching of images.
# need channel shape, specifically the width. load first for example
# this assumes that all channels are the same size for this FOV, which they should
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
break # just break out with the current peak_id
img_stack = load_stack(fov_id, peak_id, color=color)
img_height = img_stack.shape[1]
img_width = img_stack.shape[2]
# find padding and trimming distances
pad_dict = get_pad_distances(unet_shape, img_height, img_width)
# timepoints = img_stack.shape[0]
# dermine how many channels we have to analyze for this FOV
ana_peak_ids = []
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1:
ana_peak_ids.append(peak_id)
ana_peak_ids.sort() # sort for repeatability
k = segment_foci_unet(ana_peak_ids, fov_id, pad_dict, unet_shape, model)
information("Finished segmentation for FOV {}.".format(fov_id))
return(k)
# class for image generation for predicting cell locations in phase-contrast images
class CellSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
if tmpImg.dtype=="uint16":
tmpImg = tmpImg / 2**16 * 2**8
tmpImg = tmpImg.astype('uint8')
if self.normalize_to_one:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
medImg = median(tmpImg, self.selem)
tmpImg = tmpImg/np.max(medImg)
tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
return (X)
class TemporalCellDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
fileName,
batch_size=32,
dim=(32,32,32),
n_channels=1,
n_classes=10,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.fileName = fileName
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.batch_size / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate data
X = self.__data_generation()
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
pass
def __data_generation(self):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], self.n_channels))
full_stack = io.imread(self.fileName)
if full_stack.dtype=="uint16":
full_stack = full_stack / 2**16 * 2**8
full_stack = full_stack.astype('uint8')
img_height = full_stack.shape[1]
img_width = full_stack.shape[2]
pad_dict = get_pad_distances(self.dim, img_height, img_width)
full_stack = np.pad(full_stack,
((0,0),
(pad_dict['top_pad'],pad_dict['bottom_pad']),
(pad_dict['left_pad'],pad_dict['right_pad'])
),
mode='constant')
full_stack = full_stack.transpose(1,2,0)
# Generate data
for i in range(self.batch_size):
if i == 0:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,0,0] = full_stack[:,:,0]
for j in range(1,self.dim[2]):
tmpImg[:,:,j,0] = full_stack[:,:,j]
elif i == (self.batch_size - 1):
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,-1,0] = full_stack[:,:,-1]
for j in range(self.dim[2]-1):
tmpImg[:,:,j,0] = full_stack[:,:,j]
else:
tmpImg = np.zeros((self.dim[0], self.dim[1], self.dim[2], 1))
tmpImg[:,:,:,0] = full_stack[:,:,(i-1):(i+2)]
X[i,:,:,:,:] = tmpImg
return X
# class for image generation for predicting cell locations in phase-contrast images
class FocusSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self,
img_array,
batch_size=32,
n_channels=1,
shuffle=False,
normalize_to_one=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.batch_size = batch_size
self.img_array = img_array
self.img_number = img_array.shape[0]
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(1)
def __len__(self):
'Denotes the number of batches per epoch'
return(int(np.ceil(self.img_number / self.batch_size)))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels), 'uint16')
if self.normalize_to_one:
max_pixels = []
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
if self.normalize_to_one:
# tmpMedian = filters.median(tmpImg, self.selem)
tmpMax = np.max(tmpImg)
max_pixels.append(tmpMax)
except IndexError:
X = X[:i,...]
break
# ensure image is uint8
# if tmpImg.dtype=="uint16":
# tmpImg = tmpImg / 2**16 * 2**8
# tmpImg = tmpImg.astype('uint8')
# if self.normalize_to_one:
# with warnings.catch_warnings():
# warnings.simplefilter('ignore')
# medImg = median(tmpImg, self.selem)
# tmpImg = tmpImg/np.max(medImg)
# tmpImg[tmpImg > 1] = 1
X[i,:,:,0] = tmpImg
if self.normalize_to_one:
channel_max = np.max(max_pixels) / (2**8 - 1)
# print("Channel max: {}".format(channel_max))
# print("Array max: {}".format(np.max(X)))
X = X/channel_max
# print("Normalized array max: {}".format(np.max(X)))
X[X > 1] = 1
return (X)
# class for image generation for predicting trap locations in phase-contrast images
class TrapSegmentationDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, img_array, batch_size=32,
n_channels=1, normalize_to_one=False, shuffle=False):
'Initialization'
self.dim = (img_array.shape[1], img_array.shape[2])
self.img_number = img_array.shape[0]
self.img_array = img_array
self.batch_size = batch_size
self.n_channels = n_channels
self.shuffle = shuffle
self.on_epoch_end()
self.normalize_to_one = normalize_to_one
if normalize_to_one:
self.selem = morphology.disk(3)
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.img_number / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
array_list_temp = [self.img_array[k,:,:,0] for k in indexes]
# Generate data
X = self.__data_generation(array_list_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(self.img_number)
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, array_list_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i in range(self.batch_size):
# Store sample
try:
tmpImg = array_list_temp[i]
except IndexError:
X = X[:i,...]
break
if self.normalize_to_one:
medImg = median(tmpImg, self.selem)
tmpImg = medImg/np.max(medImg)
X[i,:,:,0] = tmpImg
return (X)
# class for image generation for classifying traps as good, empty, out-of-focus, or defective
class TrapKymographPredictionDataGenerator(utils.Sequence):
'Generates data for Keras'
def __init__(self, list_fileNames, batch_size=32, dim=(32,32,32), n_channels=1,
n_classes=10, shuffle=False):
'Initialization'
self.dim = dim
self.batch_size = batch_size
self.list_fileNames = list_fileNames
self.n_channels = n_channels
self.n_classes = n_classes
self.shuffle = shuffle
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.list_fileNames) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indexes of the batch
indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Find list of IDs
list_fileNames_temp = [self.list_fileNames[k] for k in indexes]
# Generate data
X = self.__data_generation(list_fileNames_temp)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.list_fileNames))
if self.shuffle == True:
np.random.shuffle(self.indexes)
def __data_generation(self, list_fileNames_temp):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.n_channels))
# Generate data
for i, fName in enumerate(list_fileNames_temp):
# Store sample
tmpImg = io.imread(fName)
tmpImgShape = tmpImg.shape
if tmpImgShape[0] < self.dim[0]:
t_end = tmpImgShape[0]
else:
t_end = self.dim[0]
X[i,:t_end,:,:] = np.expand_dims(tmpImg[:t_end,:,tmpImg.shape[-1]//2], axis=-1)
return X
def absolute_diff(y_true, y_pred):
y_true_sum = K.sum(y_true)
y_pred_sum = K.sum(y_pred)
diff = K.abs(y_pred_sum - y_true_sum)/tf.to_float(tf.size(y_true))
return diff
def all_loss(y_true, y_pred):
loss = losses.binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def absolute_dice_loss(y_true, y_pred):
loss = dice_loss(y_true, y_pred) + absolute_diff(y_true, y_pred)
return loss
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
def f2_m(y_true, y_pred, beta=2):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
def f_precision_m(y_true, y_pred, beta=0.5):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
numer = (1+beta**2)*recall*precision
denom = recall + (beta**2)*precision + K.epsilon()
return numer/denom
# finds lineages for all peaks in a fov
def make_lineages_fov(fov_id, specs):
'''
For a given fov, create the lineages from the segmented images.
Called by
mm3_Segment.py
Calls
mm3.make_lineage_chnl_stack
'''
ana_peak_ids = [] # channels to be analyzed
for peak_id, spec in six.iteritems(specs[fov_id]):
if spec == 1: # 1 means analyze
ana_peak_ids.append(peak_id)
ana_peak_ids = sorted(ana_peak_ids) # sort for repeatability
information('Creating lineage for FOV %d with %d channels.' % (fov_id, len(ana_peak_ids)))
# just break if there are no peaks to analize
if not ana_peak_ids:
# returning empty dictionary will add nothing to current cells dictionary
return {}
# This is a list of tuples (fov_id, peak_id) to send to the Pool command
fov_and_peak_ids_list = [(fov_id, peak_id) for peak_id in ana_peak_ids]
# set up multiprocessing pool. will complete pool before going on
#pool = Pool(processes=params['num_analyzers'])
# create the lineages for each peak individually
# the output is a list of dictionaries
#lineages = pool.map(make_lineage_chnl_stack, fov_and_peak_ids_list, chunksize=8)
#pool.close() # tells the process nothing more will be added.
#pool.join() # blocks script until everything has been processed and workers exit
# This is the non-parallelized version (useful for debug)
lineages = []
for fov_and_peak_ids in fov_and_peak_ids_list:
lineages.append(make_lineage_chnl_stack(fov_and_peak_ids))
# combine all dictionaries into one dictionary
Cells = {} # create dictionary to hold all information
for cell_dict in lineages: # for all the other dictionaries in the list
Cells.update(cell_dict) # updates Cells with the entries in cell_dict
return Cells
# get number of cells in each frame and total number of pairwise interactions
def get_cell_counts(regionprops_list):
cell_count_list = [len(time_regions) for time_regions in regionprops_list]
interaction_count_list = []
for i,cell_count in enumerate(cell_count_list):
if i+1 == len(cell_count_list):
break
interaction_count_list.append(cell_count*cell_count_list[i+1])
total_cells = np.sum(cell_count_list)
total_interactions = np.sum(interaction_count_list)
return(total_cells, total_interactions, cell_count_list, interaction_count_list)
# get cells' information for track prediction
def gather_interactions_and_events(regionprops_list):
total_cells, total_interactions, cell_count_list, interaction_count_list = get_cell_counts(regionprops_list)
# instantiate an array with a 2x4 array for each pair of cells'
# min_y, max_y, centroid_y, and area
# in reality it would be much, much more efficient to
# look this information up in the data generator at run time
# for now, this will work
pairwise_cell_data = np.zeros((total_interactions,2,5,1))
# make a dictionary, the keys of which will be row indices so that we
# can quickly look up which timepoints/cells correspond to which
# rows of our model's ouput
pairwise_cell_lookup = {}
# populate arrays
interaction_count = 0
cell_count = 0
for frame, frame_regions in enumerate(regionprops_list):
for region in frame_regions:
cell_label = region.label
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
area = region.area
cell_label = region.label
cell_info = (min_y, max_y, y, area, orientation)
cell_count += 1
try:
frame_plus_one_regions = regionprops_list[frame+1]
except IndexError as e:
# print(e)
break
for region_plus_one in frame_plus_one_regions:
paired_cell_label = region_plus_one.label
y,x = region_plus_one.centroid
bbox = region_plus_one.bbox
min_y = bbox[0]
max_y = bbox[2]
area = region_plus_one.area
paired_cell_label = region_plus_one.label
pairwise_cell_data[interaction_count,0,:,0] = cell_info
pairwise_cell_data[interaction_count,1,:,0] = (min_y, max_y, y, area, orientation)
pairwise_cell_lookup[interaction_count] = {'frame':frame, 'cell_label':cell_label, 'paired_cell_label':paired_cell_label}
interaction_count += 1
return(pairwise_cell_data, pairwise_cell_lookup)
# look up which cells are interacting according to the track model
def cell_interaction_lookup(predictions, lookup_table):
'''
Accepts prediction matrix and
'''
frame = []
cell_label = []
paired_cell_label = []
interaction_type = []
# loop over rows of predictions
for row_index in range(predictions.shape[0]):
row_predictions = predictions[row_index]
row_relationship = np.where(row_predictions > 0.95)[0]
if row_relationship.size == 0:
continue
elif row_relationship[0] == 3:
continue
elif row_relationship[0] == 0:
interaction_type.append('migration')
elif row_relationship[0] == 1:
interaction_type.append('child')
elif row_relationship[0] == 2:
interaction_type.append('false_join')
frame.append(lookup_table[row_index]['frame'])
cell_label.append(lookup_table[row_index]['cell_label'])
paired_cell_label.append(lookup_table[row_index]['paired_cell_label'])
track_df = pd.DataFrame(data={'frame':frame,
'cell_label':cell_label,
'paired_cell_label':paired_cell_label,
'interaction_type':interaction_type})
return(track_df)
def get_tracking_model_dict():
model_dict = {}
if not 'migrate_model' in model_dict:
model_dict['migrate_model'] = models.load_model(params['tracking']['migrate_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'child_model' in model_dict:
model_dict['child_model'] = models.load_model(params['tracking']['child_model'],
custom_objects={'bce_dice_loss':bce_dice_loss,
'f2_m':f2_m})
if not 'appear_model' in model_dict:
model_dict['appear_model'] = models.load_model(params['tracking']['appear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'die_model' in model_dict:
model_dict['die_model'] = models.load_model(params['tracking']['die_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'disappear_model' in model_dict:
model_dict['disappear_model'] = models.load_model(params['tracking']['disappear_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
if not 'born_model' in model_dict:
model_dict['born_model'] = models.load_model(params['tracking']['born_model'],
custom_objects={'all_loss':all_loss,
'f2_m':f2_m})
# if not 'zero_cell_model' in model_dict:
# model_dict['zero_cell_model'] = models.load_model(params['tracking']['zero_cell_model'],
# custom_objects={'absolute_dice_loss':absolute_dice_loss,
# 'f2_m':f2_m})
# if not 'one_cell_model' in model_dict:
# model_dict['one_cell_model'] = models.load_model(params['tracking']['one_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
# if not 'two_cell_model' in model_dict:
# model_dict['two_cell_model'] = models.load_model(params['tracking']['two_cell_model'],
# custom_objects={'all_loss':all_loss,
# 'f2_m':f2_m})
# if not 'geq_three_cell_model' in model_dict:
# model_dict['geq_three_cell_model'] = models.load_model(params['tracking']['geq_three_cell_model'],
# custom_objects={'bce_dice_loss':bce_dice_loss,
# 'f2_m':f2_m})
return(model_dict)
# Creates lineage for a single channel
def make_lineage_chnl_stack(fov_and_peak_id):
'''
Create the lineage for a set of segmented images for one channel. Start by making the regions in the first time points potenial cells. Go forward in time and map regions in the timepoint to the potential cells in previous time points, building the life of a cell. Used basic checks such as the regions should overlap, and grow by a little and not shrink too much. If regions do not link back in time, discard them. If two regions map to one previous region, check if it is a sensible division event.
Parameters
----------
fov_and_peak_ids : tuple.
(fov_id, peak_id)
Returns
-------
Cells : dict
A dictionary of all the cells from this lineage, divided and undivided
'''
# load in parameters
# if leaf regions see no action for longer than this, drop them
lost_cell_time = params['track']['lost_cell_time']
# only cells with y positions below this value will recieve the honor of becoming new
# cells, unless they are daughters of current cells
new_cell_y_cutoff = params['track']['new_cell_y_cutoff']
# only regions with labels less than or equal to this value will be considered to start cells
new_cell_region_cutoff = params['track']['new_cell_region_cutoff']
# get the specific ids from the tuple
fov_id, peak_id = fov_and_peak_id
# start time is the first time point for this series of TIFFs.
start_time_index = min(params['time_table'][fov_id].keys())
information('Creating lineage for FOV %d, channel %d.' % (fov_id, peak_id))
# load segmented data
image_data_seg = load_stack(fov_id, peak_id, color=params['track']['seg_img'])
# image_data_seg = load_stack(fov_id, peak_id, color='seg')
# Calculate all data for all time points.
# this list will be length of the number of time points
regions_by_time = [regionprops(label_image=timepoint) for timepoint in image_data_seg] # removed coordinates='xy'
# Set up data structures.
Cells = {} # Dict that holds all the cell objects, divided and undivided
cell_leaves = [] # cell ids of the current leaves of the growing lineage tree
# go through regions by timepoint and build lineages
# timepoints start with the index of the first image
for t, regions in enumerate(regions_by_time, start=start_time_index):
# if there are cell leaves who are still waiting to be linked, but
# too much time has passed, remove them.
for leaf_id in cell_leaves:
if t - Cells[leaf_id].times[-1] > lost_cell_time:
cell_leaves.remove(leaf_id)
# make all the regions leaves if there are no current leaves
if not cell_leaves:
for region in regions:
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
# Create cell and put in cell dictionary
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
# add thes id to list of current leaves
cell_leaves.append(cell_id)
# Determine if the regions are children of current leaves
else:
### create mapping between regions and leaves
leaf_region_map = {}
leaf_region_map = {leaf_id : [] for leaf_id in cell_leaves}
# get the last y position of current leaves and create tuple with the id
current_leaf_positions = [(leaf_id, Cells[leaf_id].centroids[-1][0]) for leaf_id in cell_leaves]
# go through regions, they will come off in Y position order
for r, region in enumerate(regions):
# create tuple which is cell_id of closest leaf, distance
current_closest = (None, float('inf'))
# check this region against all positions of all current leaf regions,
# find the closest one in y.
for leaf in current_leaf_positions:
# calculate distance between region and leaf
y_dist_region_to_leaf = abs(region.centroid[0] - leaf[1])
# if the distance is closer than before, update
if y_dist_region_to_leaf < current_closest[1]:
current_closest = (leaf[0], y_dist_region_to_leaf)
# update map with the closest region
leaf_region_map[current_closest[0]].append((r, y_dist_region_to_leaf))
# go through the current leaf regions.
# limit by the closest two current regions if there are three regions to the leaf
for leaf_id, region_links in six.iteritems(leaf_region_map):
if len(region_links) > 2:
closest_two_regions = sorted(region_links, key=lambda x: x[1])[:2]
# but sort by region order so top region is first
closest_two_regions = sorted(closest_two_regions, key=lambda x: x[0])
# replace value in dictionary
leaf_region_map[leaf_id] = closest_two_regions
# for the discarded regions, put them as new leaves
# if they are near the closed end of the channel
discarded_regions = sorted(region_links, key=lambda x: x[1])[2:]
for discarded_region in discarded_regions:
region = regions[discarded_region[0]]
if region.centroid[0] < new_cell_y_cutoff and region.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
else:
# since the regions are ordered, none of the remaining will pass
break
### iterate over the leaves, looking to see what regions connect to them.
for leaf_id, region_links in six.iteritems(leaf_region_map):
# if there is just one suggested descendant,
# see if it checks out and append the data
if len(region_links) == 1:
region = regions[region_links[0][0]] # grab the region from the list
# check if the pairing makes sense based on size and position
# this function returns true if things are okay
if check_growth_by_region(Cells[leaf_id], region):
# grow the cell by the region in this case
Cells[leaf_id].grow(region, t)
# there may be two daughters, or maybe there is just one child and a new cell
elif len(region_links) == 2:
# grab these two daughters
region1 = regions[region_links[0][0]]
region2 = regions[region_links[1][0]]
# check_division returns 3 if cell divided,
# 1 if first region is just the cell growing and the second is trash
# 2 if the second region is the cell, and the first is trash
# or 0 if it cannot be determined.
check_division_result = check_division(Cells[leaf_id], region1, region2)
if check_division_result == 3:
# create two new cells and divide the mother
daughter1_id = create_cell_id(region1, t, peak_id, fov_id)
daughter2_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[daughter1_id] = Cell(daughter1_id, region1, t,
parent_id=leaf_id)
Cells[daughter2_id] = Cell(daughter2_id, region2, t,
parent_id=leaf_id)
Cells[leaf_id].divide(Cells[daughter1_id], Cells[daughter2_id], t)
# remove mother from current leaves
cell_leaves.remove(leaf_id)
# add the daughter ids to list of current leaves if they pass cutoffs
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_leaves.append(daughter1_id)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_leaves.append(daughter2_id)
# 1 means that daughter 1 is just a continuation of the mother
# The other region should be a leaf it passes the requirements
elif check_division_result == 1:
Cells[leaf_id].grow(region1, t)
if region2.centroid[0] < new_cell_y_cutoff and region2.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region2, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region2, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# ditto for 2
elif check_division_result == 2:
Cells[leaf_id].grow(region2, t)
if region1.centroid[0] < new_cell_y_cutoff and region1.label <= new_cell_region_cutoff:
cell_id = create_cell_id(region1, t, peak_id, fov_id)
Cells[cell_id] = Cell(cell_id, region1, t, parent_id=None)
cell_leaves.append(cell_id) # add to leaves
# return the dictionary with all the cells
return Cells
### Cell class and related functions
# this is the object that holds all information for a detection
class Detection():
'''
The Detection is a single detection in a single frame.
'''
# initialize (birth) the cell
def __init__(self, detection_id, region, t):
'''The detection must be given a unique detection_id and passed the region
information from the segmentation
Parameters
__________
detection_id : str
detection_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point
r is region label for that segmentation
Use the function create_detection_id to return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
'''
# create all the attributes
# id
self.id = detection_id
# identification convenience
self.fov = int(detection_id.split('f')[1].split('p')[0])
self.peak = int(detection_id.split('p')[1].split('t')[0])
self.t = t
self.cell_count = 1
# self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
if region is not None:
self.label = region.label
self.bbox = region.bbox
self.area = region.area
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.length = length_tmp
self.width = width_tmp
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = (length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 + (4/3) * np.pi * (width_tmp/2)**3
# angle of the fit elipsoid and centroid location
self.orientation = region.orientation
self.centroid = region.centroid
else:
self.label = None
self.bbox = None
self.area = None
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = (None, None)
self.length = None
self.width = None
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volume = None
# angle of the fit elipsoid and centroid location
self.orientation = None
self.centroid = None
# this is the object that holds all information for a cell
class Cell():
'''
The Cell class is one cell that has been born. It is not neccesarily a cell that
has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent_id=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(cell_id.split('r')[1])
# parent id may be none
self.parent = parent_id
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
#calculating cell length and width by using <NAME>
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def divide(self, daughter1, daughter2, t):
'''Divide the cell and update stats.
daugther1 and daugther2 are instances of the Cell class.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = daughter1.birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (daughter1.lengths[0] + daughter2.lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((daughter1.widths[0] + daughter2.widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0)
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = daughter1.lengths[0] / (daughter1.lengths[0] + daughter2.lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
class CellTree():
def __init__(self):
self.cells = {}
self.scores = [] # probably needs to be different
self.score = 0
self.cell_id_list = []
def add_cell(self, cell):
self.cells[cell.id] = cell
self.cell_id_list.append(cell.id)
self.cell_id_list.sort()
def update_score(self):
pass
def get_cell(self, cell_id):
return(self.cells[cell_id])
def get_top_from_cell(self, cell_id):
pass
# this is the object that holds all information for a cell
class CellFromGraph():
'''
The CellFromGraph class is one cell that has been born.
It is not neccesarily a cell that has divided.
'''
# initialize (birth) the cell
def __init__(self, cell_id, region, t, parent=None):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell_id : str
cell_id is a string in the form fXpXtXrX
f is 3 digit FOV number
p is 4 digit peak number
t is 4 digit time point at time of birth
r is region label for that segmentation
Use the function create_cell_id to do return a proper string.
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
parent_id : str
id of the parent if there is one.
'''
# create all the attributes
# id
self.id = cell_id
# identification convenience
self.fov = int(cell_id.split('f')[1].split('p')[0])
self.peak = int(cell_id.split('p')[1].split('t')[0])
self.birth_label = int(region.label)
self.regions = [region]
# parent is a CellFromGraph object, can be None
self.parent = parent
# daughters is updated when cell divides
# if this is none then the cell did not divide
self.daughters = None
# birth and division time
self.birth_time = t
self.division_time = None # filled out if cell divides
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][self.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating cell length and width by using Feret Diamter. These values are in pixels
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate cell volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# these are special datatype, as they include information from the daugthers for division
# computed upon division
self.times_w_div = None
self.lengths_w_div = None
self.widths_w_div = None
# this information is the "production" information that
# we want to extract at the end. Some of this is for convenience.
# This is only filled out if a cell divides.
self.sb = None # in um
self.sd = None # this should be combined lengths of daughters, in um
self.delta = None
self.tau = None
self.elong_rate = None
self.septum_position = None
self.width = None
self.death = None
self.disappear = None
self.area_mean_fluorescence = {}
self.volume_mean_fluorescence = {}
self.total_fluorescence = {}
self.foci = {}
def __len__(self):
return(len(self.times))
def add_parent(self, parent):
self.parent = parent
def grow(self, region, t):
'''Append data from a region to this cell.
use cell.times[-1] to get most current value'''
self.times.append(t)
self.abs_times.append(params['time_table'][self.fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating cell length and width by using Feret Diamter
length_tmp, width_tmp = feretdiameter(region)
if length_tmp == None:
warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
def die(self, region, t):
'''
Annotate cell as dying from current t to next t.
'''
self.death = t
def disappears(self, region, t):
'''
Annotate cell as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
assert len(self.daughters) < 3, "Too many daughter cells in cell {}".format(self.id)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda cell: cell.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.division_time = self.daughters[0].birth_time
# update times
self.times_w_div = self.times + [self.division_time]
self.abs_times.append(params['time_table'][self.fov][self.division_time])
# flesh out the stats for this cell
# size at birth
self.sb = self.lengths[0] * params['pxl2um']
# force the division length to be the combined lengths of the daughters
self.sd = (self.daughters[0].lengths[0] + self.daughters[1].lengths[0]) * params['pxl2um']
# delta is here for convenience
self.delta = self.sd - self.sb
# generation time. Use more accurate times and convert to minutes
self.tau = np.float64((self.abs_times[-1] - self.abs_times[0]) / 60.0)
# include the data points from the daughters
self.lengths_w_div = [l * params['pxl2um'] for l in self.lengths] + [self.sd]
self.widths_w_div = [w * params['pxl2um'] for w in self.widths] + [((self.daughters[0].widths[0] + self.daughters[1].widths[0])/2) * params['pxl2um']]
# volumes for all timepoints, in um^3
self.volumes_w_div = []
for i in range(len(self.lengths_w_div)):
self.volumes_w_div.append((self.lengths_w_div[i] - self.widths_w_div[i]) *
np.pi * (self.widths_w_div[i]/2)**2 +
(4/3) * np.pi * (self.widths_w_div[i]/2)**3)
# calculate elongation rate.
try:
times = np.float64((np.array(self.abs_times) - self.abs_times[0]) / 60.0) # convert times to minutes
log_lengths = np.float64(np.log(self.lengths_w_div))
p = np.polyfit(times, log_lengths, 1) # this wants float64
self.elong_rate = p[0] * 60.0 # convert to hours
except:
self.elong_rate = np.float64('NaN')
warning('Elongation rate calculate failed for {}.'.format(self.id))
# calculate the septum position as a number between 0 and 1
# which indicates the size of daughter closer to the closed end
# compared to the total size
self.septum_position = self.daughters[0].lengths[0] / (self.daughters[0].lengths[0] + self.daughters[1].lengths[0])
# calculate single width over cell's life
self.width = np.mean(self.widths_w_div)
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.sb = self.sb.astype(convert_to)
self.sd = self.sd.astype(convert_to)
self.delta = self.delta.astype(convert_to)
self.elong_rate = self.elong_rate.astype(convert_to)
self.tau = self.tau.astype(convert_to)
self.septum_position = self.septum_position.astype(convert_to)
self.width = self.width.astype(convert_to)
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.lengths_w_div = [length.astype(convert_to) for length in self.lengths_w_div]
self.widths = [width.astype(convert_to) for width in self.widths]
self.widths_w_div = [width.astype(convert_to) for width in self.widths_w_div]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
self.volumes_w_div = [vol.astype(convert_to) for vol in self.volumes_w_div]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def add_focus(self, focus, t):
'''Adds a focus to the cell. See function foci_info_unet'''
self.foci[focus.id] = focus
def print_info(self):
'''prints information about the cell'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.parent is not None:
print('parent = {}'.format(self.parent.id))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['fov'] = self.fov
data['trap'] = self.peak
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
data['division_time'] = self.division_time
data['birth_label'] = self.birth_label
data['birth_time'] = self.birth_time
data['sb'] = self.sb
data['sd'] = self.sd
data['delta'] = self.delta
data['tau'] = self.tau
data['elong_rate'] = self.elong_rate
data['septum_position'] = self.septum_position
data['death'] = self.death
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['times'] = self.times
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
# if a cell divides then there is one extra value in abs_times
if self.division_time is None:
data['seconds'] = self.abs_times
else:
data['seconds'] = self.abs_times[:-1]
# if there is fluorescence data, place it into the dataframe
if len(self.area_mean_fluorescence.keys()) != 0:
for fluorescence_channel in self.area_mean_fluorescence.keys():
data['{}_area_mean_fluorescence'.format(fluorescence_channel)] = self.area_mean_fluorescence[fluorescence_channel]
data['{}_volume_mean_fluorescence'.format(fluorescence_channel)] = self.volume_mean_fluorescence[fluorescence_channel]
data['{}_total_fluorescence'.format(fluorescence_channel)] = self.total_fluorescence[fluorescence_channel]
df = pd.DataFrame(data, index=data['id'])
return(df)
# this is the object that holds all information for a fluorescent focus
# this class can eventually be used in focus tracking, much like the Cell class
# is used for cell tracking
class Focus():
'''
The Focus class holds information on fluorescent foci.
A single focus can be present in multiple different cells.
'''
# initialize the focus
def __init__(self,
cell,
region,
seg_img,
intensity_image,
t):
'''The cell must be given a unique cell_id and passed the region
information from the segmentation
Parameters
__________
cell : a Cell object
region : region properties object
Information about the labeled region from
skimage.measure.regionprops()
seg_img : 2D numpy array
Labelled image of cell segmentations
intensity_image : 2D numpy array
Fluorescence image with foci
'''
# create all the attributes
# id
focus_id = create_focus_id(region,
t,
cell.peak,
cell.fov,
experiment_name=params['experiment_name'])
self.id = focus_id
# identification convenience
self.appear_label = int(region.label)
self.regions = [region]
self.fov = cell.fov
self.peak = cell.peak
# cell is a CellFromGraph object
# cells are added later using the .add_cell method
self.cells = [cell]
# daughters is updated when focus splits
# if this is none then the focus did not split
self.parent = None
self.daughters = None
self.merger_partner = None
# appearance and split time
self.appear_time = t
self.split_time = None # filled out if focus splits
# the following information is on a per timepoint basis
self.times = [t]
self.abs_times = [params['time_table'][cell.fov][t]] # elapsed time in seconds
self.labels = [region.label]
self.bboxes = [region.bbox]
self.areas = [region.area]
# calculating focus length and width by using Feret Diamter.
# These values are in pixels
# NOTE: in the future, update to straighten a focus an get straightened length/width
# print(region)
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths = [length_tmp]
self.widths = [width_tmp]
# calculate focus volume as cylinder plus hemispherical ends (sphere). Unit is px^3
self.volumes = [(length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3]
# angle of the fit elipsoid and centroid location
self.orientations = [region.orientation]
self.centroids = [region.centroid]
# special information for focci
self.elong_rate = None
self.disappear = None
self.area_mean_fluorescence = []
self.volume_mean_fluorescence = []
self.total_fluorescence = []
self.median_fluorescence = []
self.sd_fluorescence = []
self.disp_l = []
self.disp_w = []
self.calculate_fluorescence(seg_img, intensity_image, region)
def __len__(self):
return(len(self.times))
def __str__(self):
return(self.print_info())
def add_cell(self, cell):
self.cells.append(cell)
def add_parent_focus(self, parent):
self.parent = parent
def merge(self, partner):
self.merger_partner = partner
def grow(self,
region,
t,
seg_img,
intensity_image,
current_cell):
'''Append data from a region to this focus.
use self.times[-1] to get most current value.'''
if current_cell is not self.cells[-1]:
self.add_cell(current_cell)
self.times.append(t)
self.abs_times.append(params['time_table'][self.cells[-1].fov][t])
self.labels.append(region.label)
self.bboxes.append(region.bbox)
self.areas.append(region.area)
self.regions.append(region)
#calculating focus length and width by using Feret Diamter
length_tmp = region.major_axis_length
width_tmp = region.minor_axis_length
# length_tmp, width_tmp = feretdiameter(region)
# if length_tmp == None:
# warning('feretdiameter() failed for ' + self.id + ' at t=' + str(t) + '.')
self.lengths.append(length_tmp)
self.widths.append(width_tmp)
self.volumes.append((length_tmp - width_tmp) * np.pi * (width_tmp/2)**2 +
(4/3) * np.pi * (width_tmp/2)**3)
self.orientations.append(region.orientation)
self.centroids.append(region.centroid)
self.calculate_fluorescence(seg_img, intensity_image, region)
def calculate_fluorescence(self,
seg_img,
intensity_image,
region):
total_fluor = np.sum(intensity_image[seg_img == region.label])
self.total_fluorescence.append(total_fluor)
self.area_mean_fluorescence.append(total_fluor/self.areas[-1])
self.volume_mean_fluorescence.append(total_fluor/self.volumes[-1])
self.median_fluorescence.append(np.median(intensity_image[seg_img == region.label]))
self.sd_fluorescence.append(np.std(intensity_image[seg_img == region.label]))
# get the focus' displacement from center of cell
# find x and y position relative to the whole image (convert from small box)
# calculate distance of foci from middle of cell (scikit image)
orientation = region.orientation
if orientation < 0:
orientation = np.pi+orientation
cell_idx = self.cells[-1].times.index(self.times[-1]) # final time in self.times is current time
cell_centroid = self.cells[-1].centroids[cell_idx]
focus_centroid = region.centroid
disp_y = (focus_centroid[0]-cell_centroid[0])*np.sin(orientation) - (focus_centroid[1]-cell_centroid[1])*np.cos(orientation)
disp_x = (focus_centroid[0]-cell_centroid[0])*np.cos(orientation) + (focus_centroid[1]-cell_centroid[1])*np.sin(orientation)
# append foci information to the list
self.disp_l = np.append(self.disp_l, disp_y)
self.disp_w = np.append(self.disp_w, disp_x)
def disappears(self, region, t):
'''
Annotate focus as disappearing from current t to next t.
'''
self.disappear = t
def add_daughter(self, daughter, t):
if self.daughters is None:
self.daughters = [daughter]
else:
self.daughters.append(daughter)
# sort daughters by y position, with smaller y-value first.
# this will cause the daughter closer to the closed end of the trap to be listed first.
self.daughters.sort(key=lambda focus: focus.centroids[0][0])
self.divide(t)
def divide(self, t):
'''Divide the cell and update stats.
daughter1 is the daugther closer to the closed end.'''
# put the daugther ids into the cell
# self.daughters = [daughter1.id, daughter2.id]
# give this guy a division time
self.split_time = self.daughters[0].appear_time
# convert data to smaller floats. No need for float64
# see https://docs.scipy.org/doc/numpy-1.13.0/user/basics.types.html
convert_to = 'float16' # numpy datatype to convert to
self.lengths = [length.astype(convert_to) for length in self.lengths]
self.widths = [width.astype(convert_to) for width in self.widths]
self.volumes = [vol.astype(convert_to) for vol in self.volumes]
# note the float16 is hardcoded here
self.orientations = [np.float16(orientation) for orientation in self.orientations]
self.centroids = [(y.astype(convert_to), x.astype(convert_to)) for y, x in self.centroids]
def print_info(self):
'''prints information about the focus'''
print('id = %s' % self.id)
print('times = {}'.format(', '.join('{}'.format(t) for t in self.times)))
print('lengths = {}'.format(', '.join('{:.2f}'.format(l) for l in self.lengths)))
if self.daughters is not None:
print('daughters = {}'.format(', '.join('{}'.format(daughter.id) for daughter in self.daughters)))
if self.cells is not None:
print('cells = {}'.format([cell.id for cell in self.cells]))
def make_wide_df(self):
data = {}
data['id'] = self.id
data['cells'] = self.cells
data['parent'] = self.parent
data['child1'] = None
data['child2'] = None
# data['division_time'] = self.division_time
data['appear_label'] = self.appear_label
data['appear_time'] = self.appear_time
data['disappear'] = self.disappear
if self.daughters is not None:
data['child1'] = self.daughters[0]
if len(self.daughters) == 2:
data['child2'] = self.daughters[1]
df = pd.DataFrame(data, index=[self.id])
return(df)
def make_long_df(self):
data = {}
data['id'] = [self.id]*len(self.times)
data['time'] = self.times
# data['cell'] = self.cells
data['length'] = self.lengths
data['volume'] = self.volumes
data['area'] = self.areas
data['seconds'] = self.abs_times
data['area_mean_fluorescence'] = self.area_mean_fluorescence
data['volume_mean_fluorescence'] = self.volume_mean_fluorescence
data['total_fluorescence'] = self.total_fluorescence
data['median_fluorescence'] = self.median_fluorescence
data['sd_fluorescence'] = self.sd_fluorescence
data['disp_l'] = self.disp_l
data['disp_w'] = self.disp_w
# print(data['id'])
df = pd.DataFrame(data, index=data['id'])
return(df)
class PredictTrackDataGenerator(utils.Sequence):
'''Generates data for running tracking class preditions
Input is a stack of labeled images'''
def __init__(self,
data,
batch_size=32,
dim=(4,5,9)):
'Initialization'
self.batch_size = batch_size
self.data = data
self.dim = dim
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(len(self.data) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate keys of the batch
batch_indices = self.indices[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X = self.__data_generation(batch_indices)
return X
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indices = np.arange(len(self.data))
def __data_generation(self, batch_indices):
'Generates data containing batch_size samples' # X : (n_samples, *dim, n_channels)
# Initialization
# shape is (batch_size, max_cell_num, frame_num, cell_feature_num, 1)
X = np.zeros((self.batch_size, self.dim[0], self.dim[1], self.dim[2], 1))
# Generate data
for idx in batch_indices:
start_idx = idx-2
end_idx = idx+3
# print(start_idx, end_idx)
if start_idx < 0:
batch_frame_list = []
for empty_idx in range(abs(start_idx)):
batch_frame_list.append([])
batch_frame_list.extend(self.data[0:end_idx])
elif end_idx > len(self.data):
batch_frame_list = self.data[start_idx:len(self.data)+1]
for empty_idx in range(abs(end_idx - len(self.data))):
batch_frame_list.extend([])
else:
batch_frame_list = self.data[start_idx:end_idx]
for i,frame_region_list in enumerate(batch_frame_list):
# shape is (max_cell_num, frame_num, cell_feature_num)
# tmp_x = np.zeros((self.dim[0], self.dim[1], self.dim[2]))
if not frame_region_list:
continue
for region_idx, region, in enumerate(frame_region_list):
y,x = region.centroid
bbox = region.bbox
orientation = region.orientation
min_y = bbox[0]
max_y = bbox[2]
min_x = bbox[1]
max_x = bbox[3]
area = region.area
length = region.major_axis_length
cell_label = region.label
cell_index = cell_label - 1
cell_info = (min_x, max_x, x, min_y, max_y, y, orientation, area, length)
if region_idx + 1 > self.dim[0]:
continue
# supplement tmp_x at (region_idx, )
# tmp_x[region_idx, i, :] = cell_info
X[idx, cell_index, i, :,0] = cell_info # tmp_x
return X
def get_greatest_score_info(first_node, second_node, graph):
'''A function that is useful for track linking
'''
score_names = [k for k in graph.get_edge_data(first_node, second_node).keys()]
pred_scores = [val['score'] for k,val in graph.get_edge_data(first_node, second_node).items()]
max_score_index = np.argmax(pred_scores)
max_name = score_names[max_score_index]
max_score = pred_scores[max_score_index]
return(max_name, max_score)
def get_score_by_type(first_node, second_node, graph, score_type='child'):
'''A function useful in track linking
'''
pred_score = graph.get_edge_data(first_node, second_node)[score_type]['score']
return(pred_score)
def count_unvisited(G, experiment_name):
count = 0
for node_id in G.nodes:
if node_id.startswith(experiment_name):
if not G.nodes[node_id]['visited']:
count += 1
return(count)
def create_lineages_from_graph(graph,
graph_df,
fov_id,
peak_id,
):
'''
This function iterates through nodes in a graph of detections
to link the nodes as "CellFromGraph" objects, eventually
leading to the ultimate goal of returning
a CellTree object with each cell's information for the experiment.
For now it ignores the number of cells in a detection and simply
assumes a 1:1 relationship between detections and cell number.
'''
# iterate through all nodes in graph
# graph_score = 0
# track_dict = {}
# tracks = CellTree()
tracks = {}
for node_id in graph.nodes:
graph.nodes[node_id]['visited'] = False
graph_df['visited'] = False
num_unvisited = count_unvisited(graph, params['experiment_name'])
while num_unvisited > 0:
# which detection nodes are not yet visited
unvisited_detection_nodes = graph_df[(~(graph_df.visited) & graph_df.node_id.str.startswith(params['experiment_name']))]
# grab the first unvisited node_id from the dataframe
prior_node_id = unvisited_detection_nodes.iloc[0,1]
prior_node_time = graph.nodes[prior_node_id]['time']
prior_node_region = graph.nodes[prior_node_id]['region']
cell_id = create_cell_id(prior_node_region,
prior_node_time,
peak_id,
fov_id,
experiment_name=params['experiment_name'])
current_cell = CellFromGraph(cell_id,
prior_node_region,
prior_node_time,
parent=None)
if not cell_id in tracks.keys():
tracks[cell_id] = current_cell
else:
current_cell = tracks[cell_id]
# for use later in establishing predecessors
current_node_id = prior_node_id
# set this detection's "visited" status to True in the graph and in the dataframe
graph.nodes[prior_node_id]['visited'] = True
graph_df.iloc[np.where(graph_df.node_id==prior_node_id)[0][0],3] = True
# build current_track list to this detection's node
current_track = collections.deque()
current_track.append(current_node_id)
predecessors_list = [k for k in graph.predecessors(prior_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while len(unvisited_predecessors_list) != 0:
# initialize a scores array to select highest score from the available options
predecessor_scores = np.zeros(len(unvisited_predecessors_list))
# populate array with scores
for i in range(len(unvisited_predecessors_list)):
predecessor_node_id = unvisited_predecessors_list[i]
edge_type, edge_score = get_greatest_score_info(predecessor_node_id, current_node_id, graph)
predecessor_scores[i] = edge_score
# find highest score
max_index = np.argmax(predecessor_scores)
# grab the node_id corresponding to traversing the highest-scoring edge from the prior node
current_node_id = unvisited_predecessors_list[max_index]
current_track.appendleft(current_node_id)
predecessors_list = [k for k in graph.predecessors(current_node_id)]
unvisited_predecessors_list = [k for k in predecessors_list if not graph.nodes[k]['visited']]
while prior_node_id is not 'B':
# which nodes succeed our current node?
successor_node_ids = [node_id for node_id in graph.successors(prior_node_id)]
# keep only the potential successor detections that have not yet been visited
unvisited_node_ids = []
for i,successor_node_id in enumerate(successor_node_ids):
# if it starts with params['experiment_name'], it is a detection node, and not born, appear, etc.
if successor_node_id.startswith(params['experiment_name']):
# if it has been used in the cell track graph, i.e., if 'visited' is True,
# move on. Otherwise, append to our list
if graph.nodes[successor_node_id]['visited']:
continue
else:
unvisited_node_ids.append(successor_node_id)
# if it doesn't start with params['experiment_name'], it is a born, appear, etc., and should always be appended
else:
unvisited_node_ids.append(successor_node_id)
# initialize a scores array to select highest score from the available options
successor_scores = np.zeros(len(unvisited_node_ids))
successor_edge_types = []
# populate array with scores
for i in range(len(unvisited_node_ids)):
successor_node_id = unvisited_node_ids[i]
edge_type, edge_score = get_greatest_score_info(prior_node_id, successor_node_id, graph)
successor_scores[i] = edge_score
successor_edge_types.append(edge_type)
# find highest score
max_score = np.max(successor_scores)
max_index = | np.argmax(successor_scores) | numpy.argmax |
import h5py
import numpy as np
import datetime
import matplotlib.pyplot as plt
from matplotlib import dates
import pyresample as pr
from scipy.spatial import cKDTree
from pyproj import Proj
from scipy.interpolate import interp1d
import scipy
import pandas as pd
import netCDF4
def apr3tocit(apr3filename,fl,sphere_size,psd_filename_2ds,psd_filename_HVPS,query_k = 1,plotson=False,QC=False,slimfast=True,cit_aver=False,cit_aver2=False,
attenuation_correct=False,O2H2O={},per_for_atten = 50,
return_indices=False,BB=True,bbguess=500,
cal_adj_bool = False,cal_adj=0,
cloudtop=True,rollfix=True):
"""
=================
This function finds either the closest gate or averages over a number of gates (query_k) nearest to
the citation aircraft in the radar volume of the APR3. It can return a dict of the original full length
arrays and the matched arrays.
=====
Vars:
=====
apr3filename = str, filename of the apr hdf file
fl = awot object, the citation awot object
sphere_size = int, maximum distance allowed in the kdTree search
psd_filename_2ds = str, filename of the processed 2DS file
psd_filename_HVPS = str, filename of the processed HVPS3 file
query_k = int, number of gates considered in the average (if 1, use closest)
plotson = boolean, will create some premade plots that describe the matched data
QC = boolean, will apply a simple QC method: eliminates any gate within 0.5 km to the surface and the outliers
(plus/minus 1.5IQR)
slimfast = boolean, will not save original data. Cuts down on output file size by only outputting the matched data and the citation data.
cit_aver = boolean, averages the ciation data varibles using a 5 second moving average (there is overlap)
cit_aver2 = boolean, averages the ciation data varibles using a 5 second discrete average (there is NO overlap)
O2H20 = dict, data from sounding to correct for attenuation from O2 and H2O vapor
attenuation_correct = boolean, corrects for attenuation using LWC prof and Sounding. Uses 50th percentile of LWC Prof
per_for_atten = int, the percentile for the supercooled liquid water profile used in the attenuation correction.
return_indeices of matches = boolean, returns the matched gates in 1d coords
BB = boolean, mask gates from the BB and lower. Masks data using the BB_alt algorithm
bbguess = int, give your first guess of where the Bright Band is to assist the BB_alt algorithm
cal_adj_bool = bool, turn on calibration adjustment or not.
cal_adj = array, array of the adjustment needed for correct calibration between frequencies. [ka_adj, w_adj]
cloudtop = bool, eliminates sensativity issues with the Ku-band data (~ < 10 dBZ) by masking out the cloudtop noise using a gausian filter
rollfix = bool, turn on or off the masking of data where the plane is rolling more than 10 degrees (can change the degree of degrees).
=================
"""
#get citation times (datetimes)
cit_time = fl['time']['data']
#Eliminate BB?
if BB:
#Get rid of anything below the melting level + 250 m
apr = apr3read(apr3filename)
#there are two methods to this. One is more conservative (using mean Ku) the other more intense with LDR Ku
#apr = BB_alt(apr,bbguess) #old
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
###new BB tech 2/27/18 RJC
print('Removing BB and below')
apr = mask_surf(apr)
apr['ldr'] = np.ma.masked_where(apr['Ku'].mask,apr['ldr'])
#find bb profs
bb = precip_echo_filt3D(apr['ldr'],thresh=7)
ind1 = np.where(bb[12,:] == 1) #BB profiles based on LDR
top_a = find_bb(apr,ind1)
bb_long = extend_bb(ind1,apr['timedates'][12,:],top_a)
apr['Ku'][:,:,:] = np.ma.masked_where(apr['alt_gate'][:,:,:] <= bb_long,apr['Ku'][:,:,:])
apr['Ka'] = np.ma.masked_where(apr['Ku'].mask,apr['Ka'])
apr['W'] = np.ma.masked_where(apr['Ku'].mask,apr['W'])
###
#correct for attenuation using SLW and Ku
if attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor3(apr,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
elif attenuation_correct:
print('correcting for attenuation...')
apr = atten_cor2(apr3filename,fl,per_for_atten,O2H2O,lwc_alt=False)
print('corrected.')
maxchange = apr['maxchange']
else:
apr = apr3read(apr3filename)
if cloudtop:
print('Removing cloudtop noise..')
apr = cloudtopmask(apr)
if cal_adj_bool:
print('adding calibration means...')
# These values come from the analysis preformed by 3 reasearch groups: NASA JPL, University of Leister, and the University of Illinois. Techniques use sigma_0 of the ocean surface, comparision of frequencies at low Z and numerical simulations of particles.(error/uncertainty:+- 0.5 dB)
apr['Ku'] = apr['Ku'] + 0.8
apr['Ka'] = apr['Ka'] + 1
#Whh is the only one with a time varient calibration adjustment
apr['W'] = apr['W'] + cal_adj
#While calibrating the data, radar artifacts showed up when the roll of the aircraft was > 10degrees.
if rollfix:
roll = apr['roll']
roll3d = np.zeros(apr['Ku'].shape)
for i in np.arange(0,apr['Ku'].shape[1]):
for j in np.arange(0,apr['Ku'].shape[2]):
roll3d[:,i,j] = roll[i,j]
apr['Ku'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ku'])
apr['Ka'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['Ka'])
apr['W'] = np.ma.masked_where(np.abs(roll3d) > 10, apr['W'])
#Get APR3 times (datetimes)
time_dates = apr['timedates'][:,:]
#fix a few radar files where w-band disapears
if time_dates[12,0] >= datetime.datetime(2015,12,18,6,58):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,18,7,6),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
if time_dates[12,0] >= datetime.datetime(2015,12,1,23,43,48) and time_dates[12,0] <=datetime.datetime(2015,12,1,23,43,49):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,550):
temp = np.ma.masked_where(time_dates[12,:] >= datetime.datetime(2015,12,2,0,1,40),apr['W'][j,i,:])
apr['W'][j,i,:] = temp
#Check if radar file is large enought to use (50 gates is arbitrary)
if time_dates[12,:].shape[0] < 50:
print('Limited radar gates in time')
#return
#
#Load PSD
dtime_psd,ND,dD,midpoints = PSD_load(psd_filename_2ds,psd_filename_HVPS,day = time_dates[0,0].day,month=time_dates[0,0].month)
#
#Make ND a masked array (i.e. get rid of nans from loading it in)
ind = np.isnan(ND)
ND = np.ma.masked_where(ind,ND)
#for plotting routine
fontsize=14
#
#Varibles needed for the kdtree
leafsize = 16
query_eps = 0
query_p=2
query_distance_upper_bound = sphere_size
query_n_jobs =1
Barnes = True
K_d = sphere_size
#
#Pre-Determine arrays
Ku_gate = np.ma.array([])
Ka_gate = np.ma.array([])
W_gate = np.ma.array([])
DFR_gate = np.ma.array([])
DFR2_gate = np.ma.array([])
DFR3_gate = np.ma.array([])
lon_c = np.ma.array([])
lat_c = np.ma.array([])
alt_c = np.ma.array([])
t_c = np.ma.array([])
lon_r = np.ma.array([])
lat_r = np.ma.array([])
alt_r = np.ma.array([])
t_r = np.ma.array([])
dis_r = np.ma.array([])
ind_r = np.ma.array([])
conc_hvps3 = np.ma.array([])
T_c = np.ma.array([])
lwc_c = np.ma.array([])
ice_c = np.ma.array([])
cdp_c = np.ma.array([])
twc_c = np.ma.array([])
iwc_c = np.ma.array([])
#
#Set reference point (Currently Mount Olympus, Washington)
lat_0 = 47.7998
lon_0 = -123.7066
#
#Set up map projection to calculate cartesian distances
p = Proj(proj='laea', zone=10, ellps='WGS84',
lat_0=lat_0,
lon_0=lon_0)
#
#make a 1d array of times and find radar start and end times
td = np.ravel(time_dates)
datestart = td[0]
dateend = td[td.shape[0]-1]
#
#Expand apr3 time to plus/minus 4 mins (added 11/8/17) 4 minutes is arbitrary, but what I used for 'good' matches.
datestart = datestart - datetime.timedelta(minutes=4)
dateend = dateend + datetime.timedelta(minutes=4)
#
#Constrain Citation data to radar time
ind = np.where(cit_time > datestart)
ind2 = np.where(cit_time < dateend)
ind3 = np.intersect1d(ind,ind2)
cit_time2 = fl['time']['data'][ind3]
cit_lon = fl['longitude']['data'][ind3]
cit_lat = fl['latitude']['data'][ind3]
cit_alt = fl['altitude']['data'][ind3]
bigins = 0
#
#Average Citation data
if cit_aver:
#Moving average tech.
temp1 = fl['temperature']['data']
temp2 = fl['lwc1']['data']
temp3 = fl['mso_frequency']['data']
temp4 = fl['Conc_CDP']['data']
temp5 = fl['twc']['data']
temp6 = fl['Nev_IWC']['data']
temp7 = fl['dewpoint_temperature1']['data']
temp8 = fl['Wwind']['data']
temp9 = fl['static_pressure']['data']
temp10 = fl['mixing_ratio']['data']
temp11 = fl['Uwind']['data']
temp12 = fl['Vwind']['data']
nsecs = 2
indarray1 = ind3 - nsecs
indarray2 = ind3 + nsecs + 1
temperature_1 = np.ma.zeros(len(ind3))
lwc = np.ma.zeros(len(ind3))
ice = np.ma.zeros(len(ind3))
cdp = np.ma.zeros(len(ind3))
twc = np.ma.zeros(len(ind3))
iwc = np.ma.zeros(len(ind3))
td = np.ma.zeros(len(ind3))
w = np.ma.zeros(len(ind3))
P = np.ma.zeros(len(ind3))
mix = np.ma.zeros(len(ind3))
U = np.ma.zeros(len(ind3))
V = np.ma.zeros(len(ind3))
for i in np.arange(0,len(ind3)):
temperature_1[i] = np.ma.mean(temp1[indarray1[i]:indarray2[i]])
lwc[i] = np.ma.mean(temp2[indarray1[i]:indarray2[i]])
ice[i] = np.ma.mean(temp3[indarray1[i]:indarray2[i]])
cdp[i] = np.ma.mean(temp4[indarray1[i]:indarray2[i]])
twc[i] = np.ma.mean(temp5[indarray1[i]:indarray2[i]])
iwc[i] = np.ma.mean(temp6[indarray1[i]:indarray2[i]])
td[i] = np.ma.mean(temp7[indarray1[i]:indarray2[i]])
w[i] = np.ma.mean(temp8[indarray1[i]:indarray2[i]])
P[i] = np.ma.mean(temp9[indarray1[i]:indarray2[i]])
mix[i] = np.ma.mean(temp10[indarray1[i]:indarray2[i]])
U[i] = np.ma.mean(temp11[indarray1[i]:indarray2[i]])
V[i] = np.ma.mean(temp12[indarray1[i]:indarray2[i]])
#Find average N(D)
ND_sub_a = np.ma.zeros(ND[0,:].shape)
ND_aver = np.ma.zeros([ind3.shape[0],ND[0,:].shape[0]])
for i in np.arange(0,ind3.shape[0]):
if indarray2[i] > ND.shape[0]:
print('indarray4 is too big')
break
ND_sub = ND[indarray1[i]:indarray2[i],:]
ind = np.where(ND_sub < 0)
ND_sub[ind] = np.ma.masked
for j in np.arange(ND.shape[1]):
ND_sub_a[j] = np.ma.mean(ND_sub[:,j])
ND_aver[i,:] = ND_sub_a
elif cit_aver2:
#Discrete average tech.
temp1 = fl['temperature']['data'][ind3]
temp2 = fl['lwc1']['data'][ind3]
temp3 = fl['mso_frequency']['data'][ind3]
temp4 = fl['Conc_CDP']['data'][ind3]
temp5 = fl['twc']['data'][ind3]
temp6 = fl['Nev_IWC']['data'][ind3]
temp7 = fl['dewpoint_temperature1']['data'][ind3]
temp8 = fl['Wwind']['data'][ind3]
temp9 = fl['static_pressure']['data'][ind3]
temp10 = fl['mixing_ratio']['data'][ind3]
temp11 = fl['Uwind']['data'][ind3]
temp12 = fl['Vwind']['data'][ind3]
ND = ND[ind3,:]
max_dtime = cit_time2.max()
min_dtime = cit_time2.min()
total_seconds = max_dtime-min_dtime
total_seconds = total_seconds.total_seconds()
dtime_1s = np.zeros(int(total_seconds)-1,dtype=object)
its = dtime_1s.shape[0]/5.
dtime_5s = np.zeros(int(its),dtype=object)
array = np.ma.zeros(int(its))
array2 = np.ma.zeros(int(its))
array3 = np.ma.zeros(int(its))
array4 = np.ma.zeros(int(its))
array5 = np.ma.zeros(int(its))
array6 = np.ma.zeros(int(its))
array7 = np.ma.zeros(int(its))
array8 = np.ma.zeros(int(its))
array9 = np.ma.zeros(int(its))
array10 = np.ma.zeros(int(its))
array11 = np.ma.zeros(int(its))
array12 = np.ma.zeros(int(its))
array13 = np.ma.zeros(int(its))
array14 = np.ma.zeros(int(its))
array15 = np.ma.zeros(int(its))
#create dtime_array monotonic increase but 5 seconds
for i in np.arange(0,int(its)):
dtime_5s[i] = min_dtime + datetime.timedelta(seconds = i*5)
print('time averaging into 5 second averages...')
for i in np.arange(1,dtime_5s.shape[0]):
time_left = dtime_5s[i-1]
time_right = dtime_5s[i]
ind = np.where(cit_time2 >= time_left)
ind2 = np.where(cit_time2 < time_right)
ind3 = np.intersect1d(ind,ind2)
if len(ind3) >= 1:
temp = temp1[ind3]
array[i-1] = np.ma.mean(temp)
temp = temp2[ind3]
array2[i-1] = np.ma.mean(temp)
temp = temp3[ind3]
array3[i-1] = np.ma.mean(temp)
temp = temp4[ind3]
array4[i-1] = np.ma.mean(temp)
temp = temp5[ind3]
array5[i-1] = np.ma.mean(temp)
temp = temp6[ind3]
array6[i-1] = np.ma.mean(temp)
temp = temp7[ind3]
array7[i-1] = np.ma.mean(temp)
temp = temp8[ind3]
array8[i-1] = np.ma.mean(temp)
temp = temp9[ind3]
array9[i-1] = np.ma.mean(temp)
temp = temp10[ind3]
array10[i-1] = np.ma.mean(temp)
temp = temp11[ind3]
array11[i-1] = np.ma.mean(temp)
temp = temp12[ind3]
array12[i-1] = np.ma.mean(temp)
temp = cit_lat[ind3]
array13[i-1] = np.ma.mean(temp)
temp = cit_lon[ind3]
array14[i-1] = np.ma.mean(temp)
temp = cit_alt[ind]
array15[i-1] = np.ma.mean(temp)
else:
array[i-1] = np.ma.masked
array2[i-1] = np.ma.masked
array3[i-1] = np.ma.masked
array4[i-1] = np.ma.masked
array5[i-1] = np.ma.masked
array6[i-1] =np.ma.masked
array7[i-1] = np.ma.masked
array8[i-1] = np.ma.masked
array9[i-1] = np.ma.masked
array10[i-1] = np.ma.masked
array11[i-1] = np.ma.masked
array12[i-1] = np.ma.masked
array13[i-1] = np.ma.masked
array14[i-1] = np.ma.masked
array15[i-1] = np.ma.masked
continue
#pre-allocate arrays
ND_sub_a = np.ma.zeros(ND[0,:].shape)
ND_aver = np.ma.zeros([dtime_5s.shape[0],ND[0,:].shape[0]])
#
ind = np.where(ND < 0)
ND[ind] = np.ma.masked
for i in np.arange(1,dtime_5s.shape[0]):
time_left = dtime_5s[i-1]
time_right = dtime_5s[i]
ind = np.where(cit_time2 >= time_left)
ind2 = np.where(cit_time2 < time_right)
ind3 = np.intersect1d(ind,ind2)
if len(ind3) >= 1:
ND_sub = ND[ind3,:]
for j in np.arange(ND.shape[1]):
ND_sub_a[j] = np.ma.mean(ND_sub[:,j])
ND_aver[i-1,:] = ND_sub_a
else:
ND_aver[i-1,:] = np.ma.masked
#get rid of last point (less than 5 obs needed for average)
temperature_1 = array[:-1]
lwc = array2[:-1]
ice = array3[:-1]
cdp = array4[:-1]
twc = array5[:-1]
iwc = array6[:-1]
td = array7[:-1]
w = array8[:-1]
P = array9[:-1]
mix = array10[:-1]
U = array11[:-1]
V = array12[:-1]
cit_lat = array13[:-1]
cit_lon = array14[:-1]
cit_alt = array15[:-1]
ND_aver = ND_aver[:-1,:]
#In reality our time should be the midpoint of each time interval. I will add 2.5 seconds to the 5s array
cit_time2 = dtime_5s[:-1] + datetime.timedelta(seconds=2.5)
#get rid of masked spatial cit data. Kd tree doesnt liked masked values (i.e. fill_values sneak in)
ind = cit_lon.mask
cit_lon = cit_lon[~ind]
cit_lat = cit_lat[~ind]
cit_alt = cit_alt[~ind]
cit_time2 = cit_time2[~ind]
temperature_1 = temperature_1[~ind]
lwc = lwc[~ind]
ice = ice[~ind]
cdp = cdp[~ind]
twc = twc[~ind]
iwc = iwc[~ind]
td = td[~ind]
w = w[~ind]
P = P[~ind]
mix = mix[~ind]
U = U[~ind]
V = V[~ind]
ND_aver = ND_aver[~ind,:]
ind = cit_lat.mask
cit_lon = cit_lon[~ind]
cit_lat = cit_lat[~ind]
cit_alt = cit_alt[~ind]
cit_time2 = cit_time2[~ind]
temperature_1 = temperature_1[~ind]
lwc = lwc[~ind]
ice = ice[~ind]
cdp = cdp[~ind]
twc = twc[~ind]
iwc = iwc[~ind]
td = td[~ind]
w = w[~ind]
P = P[~ind]
mix = mix[~ind]
U = U[~ind]
V = V[~ind]
ND_aver = ND_aver[~ind,:]
ind = cit_alt.mask
cit_lon = cit_lon[~ind]
cit_lat = cit_lat[~ind]
cit_alt = cit_alt[~ind]
cit_time2 = cit_time2[~ind]
temperature_1 = temperature_1[~ind]
lwc = lwc[~ind]
ice = ice[~ind]
cdp = cdp[~ind]
twc = twc[~ind]
iwc = iwc[~ind]
td = td[~ind]
w = w[~ind]
P = P[~ind]
mix = mix[~ind]
U = U[~ind]
V = V[~ind]
ND_aver = ND_aver[~ind,:]
else:
#no averaging tech.
temperature_1 = fl['temperature']['data'][ind3]
lwc = fl['lwc1']['data'][ind3]
ice = fl['mso_frequency']['data'][ind3]
cdp = fl['Conc_CDP']['data'][ind3]
twc = fl['twc']['data'][ind3]
iwc = fl['Nev_IWC']['data'][ind3]
td = fl['dewpoint_temperature1']['data'][ind3]
w = fl['Wwind']['data'][ind3]
P = fl['static_pressure']['data'][ind3]
mix = fl['mixing_ratio']['data'][ind3]
U = fl['Uwind']['data'][ind3]
V = fl['Vwind']['data'][ind3]
ND = ND[ind3,:]
#
# ND is in cm**-4 and dD+midpoints is in mm
#Find the echotop of Ku at near nadir
print('finding Ku echotop and constraining Cit...')
precip_yn = precip_echo_filt(apr['Ku'][:,12,:])
ind = np.where(precip_yn ==1)
ku_filt = np.squeeze(apr['Ku'][:,12,ind])
alt_filt = np.squeeze(apr['alt_gate'][:,12,ind])
echo = find_echo(ku_filt,alt_filt)
scan = 12
lat_0 = apr['lat'][scan,0]
lon_0 = apr['lon'][scan,0]
p2 = Proj(proj='laea', zone=10, ellps='WGS84',
lat_0=lat_0,
lon_0=lon_0)
x = apr['lon_gate'][:,scan,:]
y = apr['lat_gate'][:,scan,:]
x2,y2 = p2(x,y)
x3,y3 = p2(lon_0,lat_0)
x_c,y_c = p2(cit_lon,cit_lat)
alt_c = cit_alt
x4 = np.array([])
y4 = np.array([])
x2_c = np.array([])
y2_c = np.array([])
for j in np.arange(0,x2.shape[1]):
x4 = np.append(x4,x2[0,j]-x3)
y4 = np.append(y4,y2[0,j]-y3)
for j in np.arange(0,x_c.shape[0]):
x2_c = np.append(x2_c,x_c[j]-x3)
y2_c = np.append(y2_c,y_c[j]-y3)
R = np.sqrt(x4**2+y4**2)/1000.
R_c = np.sqrt(x2_c**2+y2_c**2)/1000.
R_echo = R[ind]
echo_func = interp1d(R_echo,echo,kind='cubic',bounds_error=False)
echo_c = echo_func(R_c)
ind = np.where(alt_c <= echo_c + 50) #can change this threshold, just arbitrary
cit_lon = cit_lon[ind]
cit_lat = cit_lat[ind]
cit_alt = cit_alt[ind]
cit_time2 = cit_time2[ind]
temperature_1 = temperature_1[ind]
lwc = lwc[ind]
ice = ice[ind]
cdp = cdp[ind]
twc = twc[ind]
iwc = iwc[ind]
td = td[ind]
w = w[ind]
P = P[ind]
mix = mix[ind]
U = U[ind]
V = V[ind]
ND_aver = np.squeeze(ND_aver[ind,:])
R_c = R_c[ind]
echo_c = echo_c[ind]
#
if BB:
print('Constraining Cit above BB..')
bb_func = interp1d(R,bb_long,kind='cubic',bounds_error=False)
bb_c = bb_func(R_c)
ind = np.where(cit_alt >= bb_c - 100) #can change this threshold, just arbitrary
cit_lon = cit_lon[ind]
cit_lat = cit_lat[ind]
cit_alt = cit_alt[ind]
cit_time2 = cit_time2[ind]
temperature_1 = temperature_1[ind]
lwc = lwc[ind]
ice = ice[ind]
cdp = cdp[ind]
twc = twc[ind]
iwc = iwc[ind]
td = td[ind]
w = w[ind]
P = P[ind]
mix = mix[ind]
U = U[ind]
V = V[ind]
ND_aver = np.squeeze(ND_aver[ind,:])
R_c = R_c[ind]
echo_c = echo_c[ind]
#
#Mask out warmer than 0 (i.e. when particles melt)
ind = np.where(temperature_1 > 0)
ND_aver[ind,:] = np.ma.masked
#
#Calculate some PSD parameters (could add other things here, i.e. running IGF for Mu,lambda and N0)
rho_tot2,iwc_HY = rho_e(midpoints/10.,dD/10.,ND_aver,np.zeros(ND_aver.shape),2,2,twc,return_ice=True) #HYs
rho_tot3,iwc_BF = rho_e(midpoints/10.,dD/10.,ND_aver,np.zeros(ND_aver.shape),2,3,twc,return_ice=True) #BF
rho_tot4 = rho_e(midpoints/10.,dD/10.,ND_aver,np.zeros(ND_aver.shape),2,4,twc) #BF
dmm_BF = Dmm(ND_aver*1e8,midpoints/1000.,dD/1000.,0)
dmm_HY = Dmm(ND_aver*1e8,midpoints/1000.,dD/1000.,1)
# rho_tot2 = 0
# rho_tot3 =0
# dmm_BF = Dmm(ND_aver/1e8,midpoints/1000.,dD/1000.,0)
# dmm_HY = Dmm(ND_aver/1e8,midpoints/1000.,dD/1000.,1)
#
#Print out number of potential match points
print(cit_lon.shape)
#
#Make 1-D arrays of radar spatial data
apr_x = np.ravel(apr['lon_gate'][:,:,:])
apr_y = np.ravel(apr['lat_gate'][:,:,:])
apr_alt = np.ravel(apr['alt_gate'][:,:,:])
apr_t = np.ravel(apr['time_gate'][:,:,:])
#
#Make 1-D arrays of radar data
apr_ku = np.ma.ravel(apr['Ku'][:,:,:])
apr_ka = np.ma.ravel(apr['Ka'][:,:,:])
apr_w = np.ma.ravel(apr['W'][:,:,:])
#
#If you want to neglect masked gates throw them out here (Speeds things up and gives better results)
#ku
ind = apr_ku.mask
apr_x = apr_x[~ind]
apr_y = apr_y[~ind]
apr_alt = apr_alt[~ind]
apr_t = apr_t[~ind]
apr_ku = apr_ku[~ind]
apr_ka = apr_ka[~ind]
apr_w = apr_w[~ind]
#ka
ind = apr_ka.mask
apr_x = apr_x[~ind]
apr_y = apr_y[~ind]
apr_alt = apr_alt[~ind]
apr_t = apr_t[~ind]
apr_ku = apr_ku[~ind]
apr_ka = apr_ka[~ind]
apr_w = apr_w[~ind]
#w
ind = apr_w.mask
apr_x = apr_x[~ind]
apr_y = apr_y[~ind]
apr_alt = apr_alt[~ind]
apr_t = apr_t[~ind]
apr_ku = apr_ku[~ind]
apr_ka = apr_ka[~ind]
apr_w = apr_w[~ind]
#
#Use projection to get cartiesian distances
apr_x2,apr_y2 = p(apr_x,apr_y)
cit_x2,cit_y2 = p(cit_lon,cit_lat)
#
#Kdtree things (this is where the matchups are found)
kdt = cKDTree(zip(apr_x2, apr_y2, apr_alt), leafsize=leafsize)
prdistance, prind1d = kdt.query(zip(cit_x2,cit_y2,cit_alt),k=query_k, eps=query_eps, p=query_p,
distance_upper_bound=query_distance_upper_bound,n_jobs=query_n_jobs)
#
#if query_k >1 means you are considering more than one gate and an average is needed
if query_k > 1:
#Issue with prind1d being the size of apr_ku... that means that it is outside you allowed upperbound (sphere_size)
ind = np.where(prind1d == apr_ku.shape[0])
if len(ind[0]) > 0 or len(ind[1]) > 0:
print('gate was outside distance upper bound, eliminating those instances')
#mask values outside search area. Actually setting values to 0?
# prind1d = np.ma.masked_where(prind1d == apr_ku.shape[0],prind1d)
# prdistance = np.ma.masked_where(prind1d == apr_ku.shape[0],prdistance)
prind1d[ind] = np.ma.masked
prdistance[ind] = np.ma.masked
if QC:
#Eliminate observations that are outliers before averaging the data (i.e. get rid of skin paints)
Ku_sub = apr_ku[prind1d]
Ku_sub = np.ma.masked_where(prind1d == 0,Ku_sub)
Q_med = np.array([])
Q_max = np.array([])
Q_min = np.array([])
Q_1 = np.array([])
Q_2 = np.array([])
n_1 = np.array([])
for i in np.arange(Ku_sub.shape[0]):
kk = Ku_sub[i,:]
numberofmasks = kk.mask
kk = kk[~numberofmasks]
if len(kk) < 1:
Q_med = np.append(Q_med,np.nan)
Q_max = np.append(Q_max,np.nan)
Q_min = np.append(Q_min,np.nan)
Q_1 = np.append(Q_1,np.nan)
Q_2 = np.append(Q_2,np.nan)
n_1 = np.append(n_1,0)
continue
Q = np.nanpercentile(kk,[0,10,25,50,75,90,100])
Q_med = np.append(Q_med,Q[3])
Q_max = np.append(Q_max,Q[6])
Q_min = np.append(Q_min,Q[0])
Q_1 = np.append(Q_1,Q[2])
Q_2 = np.append(Q_2,Q[4])
numberofmasks = np.isnan(kk)
kk = kk[~numberofmasks]
#print(notmask)
notmask = kk.shape[0]
n_1 = np.append(n_1,notmask)
IQR = Q_2 - Q_1
outlierup = Q_2 + 1.5*IQR
outlierdown = Q_1- 1.5*IQR
IQR_ku = IQR
Ku_sub = apr_ku[prind1d]
Ku_sub = np.ma.masked_where(prind1d == 0,Ku_sub)
for i in np.arange(Ku_sub.shape[0]):
Ku_subsub = Ku_sub[i,:]
Ku_subsub = np.ma.masked_where(Ku_subsub >= outlierup[i],Ku_subsub)
Ku_sub[i,:] = Ku_subsub
Ka_sub = apr_ka[prind1d]
Ka_sub = np.ma.masked_where(prind1d == 0,Ka_sub)
Q_med = np.array([])
Q_max = np.array([])
Q_min = np.array([])
Q_1 = np.array([])
Q_2 = np.array([])
n_2 = np.array([])
for i in np.arange(Ka_sub.shape[0]):
kk = Ka_sub[i,:]
numberofmasks = kk.mask
kk = kk[~numberofmasks]
if len(kk) < 1:
Q_med = np.append(Q_med,np.nan)
Q_max = np.append(Q_max,np.nan)
Q_min = np.append(Q_min,np.nan)
Q_1 = np.append(Q_1,np.nan)
Q_2 = np.append(Q_2,np.nan)
n_2 = np.append(n_2,0)
continue
Q = np.nanpercentile(kk,[0,10,25,50,75,90,100])
Q_med = np.append(Q_med,Q[3])
Q_max = np.append(Q_max,Q[6])
Q_min = np.append(Q_min,Q[0])
Q_1 = np.append(Q_1,Q[2])
Q_2 = np.append(Q_2,Q[4])
numberofmasks = np.isnan(kk)
kk = kk[~numberofmasks]
notmask = kk.shape[0]
n_2 = np.append(n_2,notmask)
IQR = Q_2 - Q_1
outlierup = Q_2 + 1.5*IQR
outlierdown = Q_1- 1.5*IQR
IQR_ka = IQR
Ka_sub = apr_ka[prind1d]
Ka_sub = np.ma.masked_where(prind1d == 0,Ka_sub)
for i in np.arange(Ka_sub.shape[0]):
Ka_subsub = Ka_sub[i,:]
Ka_subsub = np.ma.masked_where(Ka_subsub >= outlierup[i],Ka_subsub)
Ka_sub[i,:] = Ka_subsub
W_sub = apr_w[prind1d]
W_sub = np.ma.masked_where(prind1d == 0,W_sub)
Q_med = np.array([])
Q_max = np.array([])
Q_min = np.array([])
Q_1 = np.array([])
Q_2 = np.array([])
n_3 = np.array([])
for i in np.arange(W_sub.shape[0]):
kk = W_sub[i,:]
numberofmasks = kk.mask
kk = kk[~numberofmasks]
if len(kk) < 1:
Q_med = np.append(Q_med,np.nan)
Q_max = np.append(Q_max,np.nan)
Q_min = np.append(Q_min,np.nan)
Q_1 = np.append(Q_1,np.nan)
Q_2 = np.append(Q_2,np.nan)
n_3 = np.append(n_3,0)
continue
Q = np.nanpercentile(kk,[0,10,25,50,75,90,100])
Q_med = np.append(Q_med,Q[3])
Q_max = np.append(Q_max,Q[6])
Q_min = np.append(Q_min,Q[0])
Q_1 = np.append(Q_1,Q[2])
Q_2 = np.append(Q_2,Q[4])
numberofmasks = np.isnan(kk)
kk = kk[~numberofmasks]
#print(notmask)
notmask = kk.shape[0]
n_3 = np.append(n_3,notmask)
IQR = Q_2 - Q_1
outlierup = Q_2 + 1.5*IQR
outlierdown = Q_1- 1.5*IQR
IQR_w = IQR
W_sub = apr_w[prind1d]
W_sub = np.ma.masked_where(prind1d == 0,W_sub)
for i in np.arange(W_sub.shape[0]):
W_subsub = W_sub[i,:]
W_subsub = np.ma.masked_where(W_subsub >= outlierup[i],W_subsub)
W_sub[i,:] = W_subsub
apr_DFR = apr_ku - apr_ka
apr_DFR2 = apr_ku - apr_w
apr_DFR3 = apr_ka - apr_w
#Barnes weighting
ku_getridof0s = Ku_sub
ku_getridof0s = np.ma.masked_where(prind1d == 0,ku_getridof0s)
ku_getridof0s = np.ma.masked_where(np.isnan(ku_getridof0s),ku_getridof0s)
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(ku_getridof0s), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * 10. **(ku_getridof0s / 10.),axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
ku_temp = 10. * np.ma.log10(w1/w2)
#Find weighted STD
IQR_ku2 = np.ma.zeros([ku_getridof0s.shape[0]])
for i in np.arange(ku_getridof0s.shape[0]):
ts = np.ma.zeros(len(ku_getridof0s[i,:]))
for j in np.arange(0,len(ku_getridof0s[i,:])):
diffs = np.ma.subtract(ku_getridof0s[i,j],ku_temp[i])
diffs = np.ma.power(diffs,2.)
ts[j] = diffs
temporary = np.ma.sqrt((np.ma.sum(ts)/n_1[i]))
IQR_ku2[i] = temporary
ka_getridof0s = Ka_sub
ka_getridof0s = np.ma.masked_where(prind1d == 0,ka_getridof0s)
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(ka_getridof0s), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * 10. **(ka_getridof0s / 10.),axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
ka_temp = 10. * np.ma.log10(w1/w2)
#Find weighted STD
IQR_ka2 = np.ma.zeros([ka_getridof0s.shape[0]])
for i in np.arange(ka_getridof0s.shape[0]):
ts = np.ma.zeros(len(ka_getridof0s[i,:]))
for j in np.arange(0,len(ka_getridof0s[i,:])):
diffs = np.ma.subtract(ka_getridof0s[i,j],ka_temp[i])
diffs = np.ma.power(diffs,2.)
ts[j] = diffs
temporary = np.ma.sqrt((np.ma.sum(ts)/n_2[i]))
IQR_ka2[i] = temporary
w_getridof0s = W_sub
w_getridof0s = np.ma.masked_where(prind1d == 0,w_getridof0s)
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(w_getridof0s), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * 10. **(w_getridof0s / 10.),axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
w_temp = 10. * np.ma.log10(w1/w2)
#Find weighted STD
IQR_w2 = np.ma.zeros([w_getridof0s.shape[0]])
for i in np.arange(w_getridof0s.shape[0]):
ts = np.ma.zeros(len(w_getridof0s[i,:]))
for j in np.arange(0,len(w_getridof0s[i,:])):
diffs = np.ma.subtract(w_getridof0s[i,j],w_temp[i])
diffs = np.ma.power(diffs,2.)
ts[j] = diffs
temporary = np.ma.sqrt((np.ma.sum(ts)/n_3[i]))
IQR_w2[i] = temporary
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(prdistance), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * prdistance,axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
dis_temp = w1/w2
Ku_gate = ku_temp
Ka_gate = ka_temp
W_gate = w_temp
DFR_gate = ku_temp - ka_temp
DFR2_gate = ku_temp - w_temp
DFR3_gate = ka_temp - w_temp
#
else:
#Eliminate observations that are outliers
Ku_sub = apr_ku[prind1d]
Ku_sub = np.ma.masked_where(prind1d == 0,Ku_sub)
Ka_sub = apr_ka[prind1d]
Ka_sub = np.ma.masked_where(prind1d == 0,Ka_sub)
W_sub = apr_w[prind1d]
W_sub = np.ma.masked_where(prind1d == 0,W_sub)
apr_DFR = apr_ku - apr_ka
apr_DFR2 = apr_ku - apr_w
apr_DFR3 = apr_ka - apr_w
#
#Barnes weighting
ku_getridof0s = Ku_sub
ku_getridof0s = np.ma.masked_where(prind1d == 0,ku_getridof0s)
ku_getridof0s = np.ma.masked_where(np.isnan(ku_getridof0s),ku_getridof0s)
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(ku_getridof0s), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * 10. **(ku_getridof0s / 10.),axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
ku_temp = 10. * np.ma.log10(w1/w2)
ka_getridof0s = Ka_sub
ka_getridof0s = np.ma.masked_where(prind1d == 0,ka_getridof0s)
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(ka_getridof0s), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * 10. **(ka_getridof0s / 10.),axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
ka_temp = 10. * np.ma.log10(w1/w2)
w_getridof0s = W_sub
w_getridof0s = np.ma.masked_where(prind1d == 0,w_getridof0s)
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(w_getridof0s), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * 10. **(w_getridof0s / 10.),axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
w_temp = 10. * np.ma.log10(w1/w2)
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(prdistance), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 * prdistance,axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
dis_temp = w1/w2
Ku_gate = ku_temp
Ka_gate = ka_temp
W_gate = w_temp
DFR_gate = ku_temp - ka_temp
DFR2_gate = ku_temp - w_temp
DFR3_gate = ka_temp - w_temp
#
#append current lat,lon and alt of the citation plane
lat_c = cit_lat
lon_c = cit_lon
alt_c = cit_alt
t_c = cit_time2
T_c = temperature_1
lwc_c = lwc
ice_c = ice
cdp_c = cdp
twc_c = twc
iwc_c = iwc
#
#Use plane location for barnes averaged radar value
lat_r = cit_lat
lon_r = cit_lon
alt_r = cit_alt
t_r = cit_time2
#
dis_r = dis_temp
ind_r = np.nan
#Calculate time difference, weighted the same as everything else
t_tiled = np.empty([t_c.shape[0],query_k],dtype=object)
for i in np.arange(0,t_c.shape[0]):
t_tiled[i,:] = t_c[i]
diftime = apr_t[prind1d] - t_tiled
diftime2 = np.empty(diftime.shape)
for i in np.arange(0,diftime.shape[0]-1):
for j in np.arange(0,diftime.shape[1]-1):
diftime2[i,j] = diftime[i,j].total_seconds()
W_d_k = np.ma.array(np.exp(-1*prdistance**2./K_d**2.))
W_d_k2 = np.ma.masked_where(np.ma.getmask(diftime2), W_d_k.copy())
w1 = np.ma.sum(W_d_k2 *diftime2,axis=1)
w2 = np.ma.sum(W_d_k2, axis=1)
dif_temp = w1/w2
dif_t = dif_temp
#
else:
#For closest gate: Tested 11/09/17
#If gate outside sphere will need to remove flaged data == apr_ku.shape[0]
ind = np.where(prind1d == apr_ku.shape[0])
if len(ind[0]) > 0:
print('gate was outside distance upper bound, eliminating those instances')
#mask ind and distances that are outside the search area
prind1d[ind] = np.ma.masked
prdistance[ind] = np.ma.masked
#
ku_temp = apr_ku[prind1d]
ka_temp = apr_ka[prind1d]
w_temp = apr_w[prind1d]
ku_temp = np.ma.masked_where(prind1d == 0,ku_temp)
ka_temp = np.ma.masked_where(prind1d == 0,ka_temp)
w_temp = np.ma.masked_where(prind1d == 0,w_temp)
dfr_temp = ku_temp - ka_temp
dfr2_temp = ku_temp - w_temp
dfr3_temp = ka_temp - w_temp
Ku_gate = ku_temp
Ka_gate = ka_temp
W_gate = w_temp
DFR_gate = dfr_temp
DFR2_gate = dfr2_temp
DFR3_gate = dfr3_temp
#
#append current lat,lon and alt of the citation plane
lat_c = cit_lat
lon_c = cit_lon
alt_c = cit_alt
t_c = cit_time2
T_c = temperature_1
lwc_c = lwc
ice_c = ice
cdp_c = cdp
twc_c = twc
iwc_c = iwc
#
diftime = apr_t[prind1d] - t_c
diftime2 = np.empty(diftime.shape)
for i in np.arange(0,diftime.shape[0]):
diftime2[i] = diftime[i].total_seconds()
#Get radar gate info and append it
lat_r = apr_y[prind1d]
lon_r = apr_x[prind1d]
alt_r = apr_alt[prind1d]
t_r = apr_t[prind1d]
dis_r = prdistance
ind_r = prind1d
dif_t = diftime2
#Make lists full of all the data
matcher = {}
Cit = {}
APR = {}
matched = {}
kdtree = {}
info_c = {}
info_r = {}
info_m = {}
info_k = {}
#Pack values in lists for export
info_k['prind1d'] = 'Index in the raveled apr3 array of the selected gate/gates. Units = None'
info_k['prdistance'] = 'Cartesian distance between Citation and "matched" radar gate. This will be a barnes average if query_k is greater than 1. Units = meters'
info_k['query_k'] = 'How many gates were considered to be matched. Units = None'
kdtree['prind1d'] = prind1d
kdtree['prdistance'] = prdistance
kdtree['query_k'] = query_k
kdtree['info'] = info_k
info_c['lat'] = 'Latitude of the citation aircraft. Units = Degrees'
info_c['lon'] = 'Longitude of the Citation aircraft. Units = Degrees'
info_c['alt'] = 'Altitude above sea level of Citation aircraft. Units = meters'
info_c['time'] = 'Time of observation in the Citation aircraft. Units = datetime'
info_c['temperature'] = 'Temperature observed on the Citation aircraft. Units = Degrees C'
info_c['lwc'] = 'Liquid water content measured using the King hot wire probe. Units = grams per meter cubed'
info_c['iwc'] = 'Ice water content estimated from the Nevzorov probe. Units = grams per meter cubed'
info_c['ice'] = 'Frequency from Rosemount Icing detector. Units = Hz'
info_c['cdp'] = 'Cloud droplet concentration measured from the CDP. Units = Number per cc'
info_c['twc'] = 'Nevzorov total water content measured by deep cone. Units = grams per meter'
info_c['td'] = 'Dewpoint temperature, Units = Degrees Celcius'
info_c['w'] = 'Vertical velocity, Units = meters per second'
info_c['P'] = 'static pressure, Units = ?'
info_c['mix'] = 'mixing ratio, Units = none (i.e. kg/kg)'
info_c['U'] = 'U componate of the wind, Units = meters per second'
info_c['V'] = 'V componate of the wind, Units = meters per second'
info_r['lat'] = 'Latitude of the center of the radar gate. Units = Degrees'
info_r['lon'] = 'Longitude of the center of the radar gate. Units = Degrees'
info_r['alt'] = 'Altitude above sea level of the radar gate. Units = meters'
info_r['time'] = 'Time of observation at the start of the ray. Units = datetime'
info_r['Ku'] = 'Ku band measured reflectivity at the gate. Units = dBZ'
info_r['Ka'] = 'Ka band measured reflectivity at the gate. Units = dBZ'
info_r['W'] = 'W band measured reflectivity at the gate. Units = dBZ'
info_r['DFR'] = 'Ku - Ka band measured reflectivity at the gate. Units = dB'
info_r['DFR2'] = 'Ku - W band measured reflectivity at the gate. Units = dB'
info_r['DFR3'] = 'Ka - W band measured reflectivity at the gate. Units = dB'
info_m['lat_r'] = 'Latitude of the center of the matched radar gates. Units = Degrees'
info_m['lon_r'] = 'Longitude of the center of the matched radar gates. Units = Degrees'
info_m['alt_r'] = 'Altitude above sea level of the matched radar gates. Units = meters'
info_m['time_r'] = 'Time of the matched observation at the start of the ray. Units = datetime'
info_m['lat_c'] = 'Latitude of the citation aircraft. Units = Degrees'
info_m['lon_c'] = 'Longitude of the Citation aircraft. Units = Degrees'
info_m['alt_c'] = 'Altitude above sea level of Citation aircraft. Units = meters'
info_m['time_c'] = 'Time of observation in the Citation aircraft. Units = datetime'
info_m['Ku'] = 'Ku band measured reflectivity matched to Citation location. Units = dBZ'
info_m['Ka'] = 'Ka band measured reflectivity matched to Citation location. Units = dBZ'
info_m['W'] = 'W band measured reflectivity matched to Citation location. Units = dBZ'
info_m['DFR'] = 'Ku - Ka band measured reflectivity matched to Citation location. Units = dB'
info_m['DFR2'] = 'Ku - W band measured reflectivity matched to Citation location. Units = dB'
info_m['DFR3'] = 'Ka - W band measured reflectivity matched to Citation location. Units = dB'
info_m['dist'] = 'Cartesian distance between Citation and "matched" radar gate. This will be a barnes average if query_k is greater than 1. Units = meters'
info_m['dif_t'] = 'Time difference between the radar gate and the citation observation. Units = Seconds'
info_m['PSD'] = 'N(D) for the matched points. Units = meteres ^ -4'
info_m['dD'] = 'Binwidths for the N(D). Units = meters'
info_m['midpoints'] = 'Bin midpoints for the N(D). Units= meters'
info_m['rho_BF'] = 'Effective density of the particles using the N(D), a and b from Brown and Francis 1995 and assuming a ellipsoidal fit of 0.6'
info_m['rho_HY'] = 'Effective density of the particles using the N(D), a and b from Heymsfield et al. 2004 and assuming a ellipsoidal fit of 0.6'
info_m['rho_NV'] = 'Effective density of the particles using the N(D), mass from Nev TWC, volume of ellip sphere'
info_m['Dmm_BF'] = 'Two types: Dmm, and Dmm_interp. Interp uses a simple interpolation, while Dmm is the Bin that exceeds 50% of the accumulated mass.Median mass dimension using N(D) and a-b from Brown and Francis 1995'
info_m['Dmm_HY'] = 'Two types: Dmm, and Dmm_interp. Interp uses a simple interpolation, while Dmm is the Bin that exceeds 50% of the accumulated mass.Median mass dimension using N(D) and a-b from Heymsfield et al. 2004'
Cit['info'] = info_c
Cit['lat'] = cit_lat
Cit['lon'] = cit_lon
Cit['alt'] = cit_alt
Cit['time'] = cit_time2
Cit['temperature'] = T_c
Cit['lwc'] = lwc_c
Cit['ice'] = ice_c
Cit['cdp'] = cdp_c
Cit['twc'] = twc_c
Cit['iwc'] = iwc_c
Cit['td'] = td
Cit['w'] = w
Cit['P'] = P
Cit['mix'] = mix
Cit['U'] = U
Cit['V'] = V
APR['info'] = info_r
APR['lat'] = apr_y
APR['lon'] = apr_x
APR['alt'] = apr_alt
APR['Ku'] = apr_ku
APR['Ka'] = apr_ka
APR['W'] = apr_w
APR['DFR'] = apr_ku - apr_ka
APR['DFR2'] = apr_ku - apr_w
APR['DFR3'] = apr_ka - apr_w
APR['time'] = apr_t
matched['info'] = info_m
matched['Ku'] = Ku_gate
matched['Ka'] = Ka_gate
matched['W'] = W_gate
matched['DFR'] = DFR_gate
matched['DFR2'] = DFR2_gate
matched['DFR3'] = DFR3_gate
matched['lat_r'] = lat_r
matched['lon_r'] = lon_r
matched['alt_r'] = alt_r
matched['lat_c'] = lat_c
matched['lon_c'] = lon_c
matched['alt_c'] = alt_c
matched['time_r'] = t_r
matched['time_c'] = t_c
matched['dist'] = dis_r
matched['dif_t'] = dif_t
matched['PSD'] = ND_aver*1e8 #convert to m
matched['dD'] = dD /1000. #convert to m
matched['midpoints'] = midpoints / 1000. #convert to m
matched['rho_BF'] = rho_tot3
matched['rho_HY'] = rho_tot2
matched['rho_NV'] = rho_tot4
matched['Dmm_BF'] = dmm_BF
matched['Dmm_HY'] = dmm_HY
matched['iwc_BF'] = iwc_BF
matched['iwc_HY'] = iwc_HY
if attenuation_correct:
matched['maxchange'] = maxchange
matched['lwc_prof'] = apr['lwc_prof']
matched['altbins_prof']= apr['altbins_prof']
matched['k_store'] = apr['k_store']
if attenuation_correct and BB:
matched['gas_w'] = apr['gas_w']
matched['gas_ku'] = apr['gas_ku']
matched['gas_ka'] = apr['gas_ka']
matched['liquid_w'] = apr['liquid']
matched['ice_w'] = apr['ice']
if return_indices:
matched['prind1d'] = prind1d
matched['APR_dim'] = apr['Ku'].shape
matched['time'] = apr['timedates']
matched['APR_lat'] = apr['lat_gate']
matched['APR_lon'] = apr['lon_gate']
matched['APR_alt'] = apr['alt_gate']
matched['APR_Ku'] = apr['Ku']
matched['APR_Ka'] = apr['Ka']
matched['APR_W'] = apr['W']
matched['R'] = R
matched['R_c'] = R_c
matched['echo_c'] = echo_c
matched['echo'] = echo
matched['R_echo'] = R_echo
matched['bb_long'] = bb_long
if query_k > 1 and QC:
matched['IQR_ku'] = IQR_ku
matched['IQR_ka'] = IQR_ka
matched['IQR_w'] = IQR_w
matched['n_1'] = n_1
matched['n_2'] = n_2
matched['n_3'] = n_3
matched['IQR_w_w'] = IQR_w2
matched['IQR_ka_w'] = IQR_ka2
matched['IQR_ku_w'] = IQR_ku2
#Not needed currently (RJC May 31 2017)
#matched['array index'] = ind_r
#matched['conc_hvps3'] = conc_hvps3
if slimfast:
matcher['matched'] = matched
matcher['Cit'] = Cit
else:
matcher['Cit'] = Cit
matcher['APR'] = APR
matcher['matched'] = matched
matcher['kdtree'] = kdtree
#Several plots to visualize data
if plotson:
fontsize=fontsize
matched = matcher
if query_k <= 1:
diftime = matched['matched']['time_r'] - matched['matched']['time_c']
diftime2 = np.array([])
for i in np.arange(0,diftime.shape[0]):
diftime2 = np.append(diftime2,diftime[i].total_seconds())
else:
diftime2= matched['matched']['dif_t']
fig1,axes = plt.subplots(1,2,)
#ax1 is the histogram of times
ax1 = axes[0]
ax1.hist(diftime2/60.,facecolor='b',edgecolor='k')
ax1.set_xlabel('$t_{gate} - t_{Cit}, [min]$')
ax1.set_ylabel('Number of gates')
ax1.set_title(matched['matched']['time_r'][0])
#ax2 is the histogram of distances
ax2 = axes[1]
distances = matched['matched']['dist']
ax2.hist(distances,facecolor='r',edgecolor='k')
ax2.set_xlabel('Distance, $[m]$')
ax2.set_ylabel('Number of gates')
ax2.set_title(matched['matched']['time_r'][0])
plt.tight_layout()
#Print some quick stats
print(distances.shape[0],np.nanmean(diftime2)/60.,np.nanmean(distances))
#
fig = plt.figure()
#ax3 is the swath plot to show radar and plane location
ax3 = plt.gca()
apr = apr3read(apr3filename)
lat3d = apr['lat_gate']
lon3d = apr['lon_gate']
alt3d = apr['alt_gate']
radar_n = apr['Ku']
lon_s = np.empty(alt3d.shape[1:])
lat_s = np.empty(alt3d.shape[1:])
swath = np.empty(alt3d.shape[1:])
for i in np.arange(0,alt3d.shape[2]):
for j in np.arange(0,alt3d.shape[1]):
ind = np.where(alt3d[:,j,i]/1000. > 3.5)
ind2 = np.where(alt3d[:,j,i]/1000. < 3.6)
ind3 = np.intersect1d(ind,ind2)
ind3= ind3[0]
l1 = lat3d[ind3,j,i]
l2 = lon3d[ind3,j,i]
k1 = radar_n[ind3,j,i]
lon_s[j,i] = l2
lat_s[j,i] = l1
swath[j,i] = k1
area_def = pr.geometry.AreaDefinition('areaD', 'IPHEx', 'areaD',
{'a': '6378144.0', 'b': '6356759.0',
'lat_0': '47.7998', 'lat_ts': '47.7998','lon_0': '-123.7066', 'proj': 'stere'},
400, 400,
[-70000., -70000.,
70000., 70000.])
bmap = pr.plot.area_def2basemap(area_def,resolution='l',ax=ax3)
bmap.drawcoastlines(linewidth=2)
bmap.drawstates(linewidth=2)
bmap.drawcountries(linewidth=2)
parallels = np.arange(-90.,90,4)
bmap.drawparallels(parallels,labels=[1,0,0,0],fontsize=12)
meridians = np.arange(180.,360.,4)
bmap.drawmeridians(meridians,labels=[0,0,0,1],fontsize=12)
bmap.drawmapboundary(fill_color='aqua')
bmap.fillcontinents(lake_color='aqua')
x,y = bmap(lon_s,lat_s)
swath[np.where(swath < 0)] = np.nan
pm1 = bmap.pcolormesh(x,y,swath,vmin=0,vmax=40,zorder=11,cmap='seismic')
cbar1 = plt.colorbar(pm1,label='$Z_m, [dBZ]$')
x2,y2 = bmap(matched['matched']['lon_c'],matched['matched']['lat_c'])
pm2 = bmap.scatter(x2,y2,c=diftime2/60.,marker='o',zorder=12,cmap='PuOr',edgecolor=[],vmin=-10,vmax=10)
cbar2 = plt.colorbar(pm2,label = '$\Delta{t}, [min]$')
ax3.set_ylabel('Latitude',fontsize=fontsize,labelpad=20)
ax3.set_xlabel('Longitude',fontsize=fontsize,labelpad=20)
plt.tight_layout()
plt.show()
#Plot timeseries of barnes averaged or closest gate.
plt.figure()
plt.plot(matched['matched']['time_c'],matched['matched']['Ku'],'b',label='Ku',lw=3)
plt.plot(matched['matched']['time_c'],matched['matched']['Ka'],'r',label='Ka',lw=3)
plt.plot(matched['matched']['time_c'],matched['matched']['W'],'g',label='W',lw=3)
#plt.plot(matched['matched']['time_c'],matched['matched']['DFR'],'--b',label='Ku-Ka')
#plt.plot(matched['matched']['time_c'],matched['matched']['DFR2'],'--r',label='Ku-W')
#plt.plot(matched['matched']['time_c'],matched['matched']['DFR3'],'--g',label='Ka-W')
plt.xlabel('Time')
plt.ylabel('Z, [dBZ]')
plt.legend()
plt.show()
print('done')
return matcher
def apr3read(filename):
"""
===========
This is for reading in apr3 hdf (HDF5 updated 2/21/18) files from OLYMPEX and return them all in one dictionary
===========
filename = filename of the apr3 file
"""
apr = {}
flag = 0
##Radar varibles in hdf file found by hdf.datasets
radar_freq = 'zhh14' #Ku
radar_freq2 = 'zhh35' #Ka
radar_freq3 = 'z95s' #W
radar_freq4 = 'ldr14' #LDR
vel_str = 'vel14' #Doppler
##
hdf = h5py.File(filename,"r")
listofkeys = hdf['lores'].keys()
alt = hdf['lores']['alt3D'][:]
lat = hdf['lores']['lat'][:]
lon = hdf['lores']['lon'][:]
time = hdf['lores']['scantime'][:]
surf = hdf['lores']['surface_index'][:]
isurf = hdf['lores']['isurf'][:]
plane = hdf['lores']['alt_nav'][:]
radar = hdf['lores'][radar_freq][:]
radar2 = hdf['lores'][radar_freq2][:]
radar4 = hdf['lores'][radar_freq4][:]
vel = hdf['lores']['vel14c'][:]
lon3d = hdf['lores']['lon3D'][:]
lat3d = hdf['lores']['lat3D'][:]
alt3d = hdf['lores']['alt3D'][:]
#see if there is W band
if 'z95s' in listofkeys:
if 'z95n' in listofkeys:
radar_nadir = hdf['lores']['z95n']
radar_scanning = hdf['lores']['z95s']
radar3 = radar_scanning
##uncomment if you want high sensativty as nadir scan (WARNING, CALIBRATION)
#radar3[:,12,:] = radar_nadir[:,12,:]
else:
radar3 = hdf['lores']['z95s']
print('No vv, using hh')
else:
radar3 = np.ma.array([])
flag = 1
print('No W band')
##convert time to datetimes
time_dates = np.empty(time.shape,dtype=object)
for i in np.arange(0,time.shape[0]):
for j in np.arange(0,time.shape[1]):
tmp = datetime.datetime.utcfromtimestamp(time[i,j])
time_dates[i,j] = tmp
#Create a time at each gate (assuming it is the same down each ray, there is a better way to do this)
time_gate = np.empty(lat3d.shape,dtype=object)
for k in np.arange(0,550):
for i in np.arange(0,time_dates.shape[0]):
for j in np.arange(0,time_dates.shape[1]):
time_gate[k,i,j] = time_dates[i,j]
#Quality control (masked where invalid)
radar = np.ma.masked_where(radar <= -99,radar)
radar2 = np.ma.masked_where(radar2 <= -99,radar2)
radar3 = np.ma.masked_where(radar3 <= -99,radar3)
radar4 = np.ma.masked_where(radar4 <= -99,radar4)
#Get rid of nans, the new HDF has builtin
radar = np.ma.masked_where(np.isnan(radar),radar)
radar2 = np.ma.masked_where(np.isnan(radar2),radar2)
radar3 = np.ma.masked_where(np.isnan(radar3),radar3)
radar4 = np.ma.masked_where(np.isnan(radar4),radar4)
apr['Ku'] = radar
apr['Ka'] = radar2
apr['W'] = radar3
apr['DFR_1'] = radar - radar2 #Ku - Ka
if flag == 0:
apr['DFR_3'] = radar2 - radar3 #Ka - W
apr['DFR_2'] = radar - radar3 #Ku - W
apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward]'
else:
apr['DFR_3'] = np.array([]) #Ka - W
apr['DFR_2'] = np.array([]) #Ku - W
apr['info'] = 'The shape of these arrays are: Radar[Vertical gates,Time/DistanceForward], Note No W band avail'
apr['ldr'] = radar4
apr['vel'] = vel
apr['lon'] = lon
apr['lat'] = lat
apr['alt_gate'] = alt3d
apr['alt_plane'] = plane
apr['surface'] = isurf
apr['time']= time
apr['timedates']= time_dates
apr['time_gate'] = time_gate
apr['lon_gate'] = lon3d
apr['lat_gate'] = lat3d
# fileheader = hdf.select('fileheader')
roll = hdf['lores']['roll']
pitch = hdf['lores']['pitch']
drift = hdf['lores']['drift']
ngates = alt.shape[0]
apr['ngates'] = ngates
apr['roll'] = roll
apr['pitch'] = pitch
apr['drift'] = drift
_range = np.arange(15,550*30,30)
_range = np.asarray(_range,float)
ind = np.where(_range >= plane.mean())
_range[ind] = np.nan
apr['range'] = _range
return apr
def atten_cor2(filename1,fl,percentile,matlab_g,lwc_alt=True):
"""
========
This is a first order attenuation correction algorithm for Ku,Ka and W band radar.
filename1: string, filename of apr3 file
filename2: string, filename of citation file
percentile: threshold for lwc prof
matlab_g: dict, from matlab calc of gaseous attenuation
========
"""
#Read in APR data
apr = apr3read(filename1)
#Determine altitude bins for profile of LWC
altbins = np.arange(1000,9000,500)
altmidpoints = np.arange(1500,9000,500)
coldcloud = True
#lwc1 = King probe
#twc = Nevzorov total water content
lwc1 = fl['lwc1']['data']
twc = fl['twc']['data']
#Get rid negative values
lwc1 = np.ma.masked_where(lwc1 <=0,lwc1)
twc = np.ma.masked_where(lwc1 <=0,twc)
T = fl['temperature']['data']
if coldcloud:
lwc1 = np.ma.masked_where(T > -5,lwc1)
twc = np.ma.masked_where(T > -5,twc)
#Correct for ice response on hot wire probe, Cober et al. 2001
if lwc_alt:
lwc1 = lwc1 - twc *.15
lwc1 = np.ma.masked_where(lwc1 <=0,lwc1)
alt = fl['altitude']['data']
#Get rid of masked values before doing percentiles
ind = lwc1.mask
lwc1 = lwc1[~ind]
alt = alt[~ind]
T = T[~ind]
#Create top and bottom for interp (fit gets weird outside this range)
fit_min = np.min(alt)
fit_max = np.max(alt)
#Do percentiles for each bin
q = np.array([percentile])
Q = np.zeros([altbins.shape[0]-1])
for i in np.arange(1,altbins.shape[0]):
bottom = altbins[i-1]
top = altbins[i]
ind1 = np.where(alt>=bottom)
ind2 = np.where(alt<top)
ind3 = np.intersect1d(ind1,ind2)
if len(ind3) < 1:
Q[i-1] = np.nan
else:
Q[i-1] = np.nanpercentile(lwc1[ind3],q)
#get rid of any nans
ind = np.isnan(Q)
Q_temp = Q[~ind]
altmidpoints_temp = altmidpoints[~ind]
#lwc profile func
lwc_func = interp1d(altmidpoints_temp,Q_temp,kind='cubic',bounds_error=False)
#
#W_lwc_coeff_func
t_ks = np.array([-20,-10,0])
ks = np.array([5.41,5.15,4.82])
k_coeff = np.polyfit(t_ks,ks,deg=1)
k_func = np.poly1d(k_coeff)
#Ka_lwc_coeff_func
t_ks2 = np.array([-20,-10,0])
ks2 = np.array([1.77,1.36,1.05])
k_coeff2 = np.polyfit(t_ks2,ks2,deg=1)
k_func2 = np.poly1d(k_coeff2)
#Ku_lwc_coeff_func
t_ks3 = np.array([-20,-10,0])
ks3 = np.array([0.45,0.32,0.24])
k_coeff3 = np.polyfit(t_ks3,ks3,deg=1)
k_func3 = np.poly1d(k_coeff3)
#temperature function
t_coef = np.polyfit(alt,T,deg=1)
t_func = np.poly1d(t_coef)
#Functions for O2 and H2O atten
alt = np.squeeze(matlab_g['alt'])
L =np.squeeze(matlab_g['L'])
L3 = np.squeeze(matlab_g['L3'])
L5 = np.squeeze(matlab_g['L5'])
k_func4 = interp1d(alt,L,kind='cubic',bounds_error=False)
k_func5 = interp1d(alt,L3,kind='cubic',bounds_error=False)
k_func6 = interp1d(alt,L5,kind='cubic',bounds_error=False)
#function to correct for ice scattering (kulie et al. 2014 fig 7)
k = pd.read_csv('Kulie_specific_attenuation.csv')
x = k['x'].values
y = k['y'].values
x_min = x.min()
x_max = x.max()
#fit function so we can crank out any value of Ku
k_coef7 = np.polyfit(x,y,deg=1)
k_func7 = np.poly1d(k_coef7)
#Make new data arrays
w_new_new = np.ma.zeros(apr['W'].shape)
ka_new_new = np.ma.zeros(apr['Ka'].shape)
ku_new_new = np.ma.zeros(apr['Ku'].shape)
k_store2 = np.ma.zeros(apr['W'].shape)
#Main loop for correcting the profile
for j in np.arange(0,apr['alt_gate'].shape[1]):
alt = np.squeeze(apr['alt_gate'][:,j,:])
w = np.squeeze(apr['W'][:,j,:])
ka = np.squeeze(apr['Ka'][:,j,:])
ku = np.squeeze(apr['Ku'][:,j,:])
#need ku in linear units for ice scatter correction
ku_lin = 10**(ku/10.)
ind = np.ma.where(ku_lin > x_max)
ku_lin[ind] = x_max
w_new = np.ma.zeros(w.shape)
ka_new = np.ma.zeros(ka.shape)
ku_new = np.ma.zeros(ku.shape)
k_store = np.ma.zeros(w.shape)
for i in np.arange(0,alt.shape[1]):
a1 = alt[:,i]
w1 = w[:,i]
ka1 = ka[:,i]
ku1 = ku[:,i]
ku_lin1 = ku_lin[:,i]
#Create a function to get T from alt
ts = t_func(a1)
#Get the right coeffs for the right alts (based on T)
ks = k_func(ts)
ks2 = k_func2(ts)
ks3 = k_func3(ts)
#
#Get the right attenuation from atmospheric gases
ks4 = k_func4(a1)
ks5 = k_func5(a1)
ks6 = k_func6(a1)
#
#get db/m caused by ice from ku following Kulie et al 2014
ks7 = k_func7(ku_lin1)
#zero where ref is masked...
ks7[ku_lin1.mask] = 0.
#
#Get lwc prof
ls = lwc_func(a1)
#
coeff = ls*ks
coeff2 = ls*ks2
coeff3 = ls*ks3
coeff4 = ks4
coeff5 = ks5
coeff6 = ks6
coeff7 = ks7
coeff[a1 <= fit_min+500] = 0
coeff[a1 >= fit_max-500] = 0
coeff2[a1 <= fit_min+500] = 0
coeff2[a1 >= fit_max-500] = 0
coeff3[a1 <= fit_min+500] = 0
coeff3[a1 >= fit_max-500] = 0
#This is an error, was only applying the gaseous attenuation to -5 deg C. Now it goes to the surface (12/13/17)
# coeff4[a1 <= fit_min+500] = 0
# coeff4[a1 >= fit_max-500] = 0
# coeff5[a1 <= fit_min+500] = 0
# coeff5[a1 >= fit_max-500] = 0
# coeff6[a1 <= fit_min+500] = 0
# coeff6[a1 >= fit_max-500] = 0
#Convert to dB/gate
coeff = (coeff/1000.)*30.
coeff2 = (coeff2/1000.)*30.
coeff3 = (coeff3/1000.)*30.
coeff4 = (coeff4/1000.)*30.
coeff5 = (coeff5/1000.)*30.
coeff6 = (coeff6/1000.)*30.
coeff7 = coeff7 * 30.
#
#get rid of nans so cumsum works right, nans are inserted if radar is masked
ind = np.isnan(coeff)
coeff[ind] = 0.
ind = np.isnan(coeff2)
coeff2[ind] = 0.
ind = np.isnan(coeff3)
coeff3[ind] = 0.
ind = np.isnan(coeff4)
coeff4[ind] = 0.
ind = np.isnan(coeff5)
coeff5[ind] = 0.
ind = np.isnan(coeff6)
coeff6[ind] = 0.
ind = np.isnan(coeff7)
coeff7[ind] = 0.
#path integrate
k = np.cumsum(coeff)*2
k2 = np.cumsum(coeff2)*2
k3 = np.cumsum(coeff3)*2
k4 = np.cumsum(coeff4)*2
k5 = np.cumsum(coeff5)*2
k6 = np.cumsum(coeff6)*2
k7 = np.cumsum(coeff7)*2
#
#correct
w1 = w1+k+k4+k7
#uncomment if you wish to correct Ka and Ku
#ka1 = ka1+k2+k5
#ku1 = ku1+k3+k6
#correcting just for gases
ka1 = ka1+k5
ku1 = ku1+k6
w_new[:,i] = w1
ka_new[:,i] = ka1
ku_new[:,i] = ku1
#
k_store[:,i] = k + k4 + k7
w_new_new[:,j,:] = w_new
ka_new_new[:,j,:] = ka_new
ku_new_new[:,j,:] = ku_new
k_store2[:,j,:] = k_store
#mask the attenuation field to where the ref. field is masked (i.e. BB algo) (12/13/17)
k_store2 = np.ma.masked_where(w_new_new.mask,k_store2)
#Find max correction values for table
wmax = np.ma.max(w_new_new - apr['W'])
kamax = np.ma.max(ka_new_new - apr['Ka'])
kumax = np.ma.max(ku_new_new - apr['Ku'])
maxchange = np.array([wmax,kamax,kumax])
#Pack data back into dict
data_corrected = {}
data_corrected['Ku'] = ku_new_new
data_corrected['Ka'] = ka_new_new
data_corrected['W'] = w_new_new
data_corrected['Ku_uc'] = apr['Ku']
data_corrected['Ka_uc'] =apr['Ka']
data_corrected['W_uc'] = apr['W']
data_corrected['lwc_prof'] = Q_temp
data_corrected['altbins_prof'] = altmidpoints_temp
data_corrected['timedates'] = apr['timedates']
data_corrected['alt_gate'] = apr['alt_gate']
data_corrected['lat'] = apr['lat']
data_corrected['lon'] = apr['lon']
data_corrected['lat_gate'] = apr['lat_gate']
data_corrected['lon_gate'] = apr['lon_gate']
data_corrected['surface'] = apr['surface']
data_corrected['time_gate'] = apr['time_gate']
data_corrected['maxchange'] = maxchange
data_corrected['k_store'] = k_store2
data_corrected['roll'] = apr['roll']
return data_corrected
def atten_cor3(apr,fl,percentile,matlab_g,lwc_alt=True):
"""
========
This is a first order attenuation correction algorithm for Ku,Ka and W band radar.
filename1: string, filename of apr3 file
filename2: string, filename of citation file
percentile: threshold for lwc prof
matlab_g: dict, from matlab calc of gaseous attenuation
========
"""
#Determine altitude bins for profile of LWC
altbins = np.arange(1000,9000,500)
altmidpoints = np.arange(1500,9000,500)
coldcloud = True
#lwc1 = King probe
#twc = Nevzorov total water content
lwc1 = fl['lwc1']['data']
twc = fl['twc']['data']
#Get rid negative values
lwc1 = np.ma.masked_where(lwc1 <=0,lwc1)
twc = np.ma.masked_where(lwc1 <=0,twc)
T = fl['temperature']['data']
if coldcloud:
lwc1 = np.ma.masked_where(T > -5,lwc1)
twc = np.ma.masked_where(T > -5,twc)
#Correct for ice response on hot wire probe, Cober et al. 2001
if lwc_alt:
lwc1 = lwc1 - twc *.15
lwc1 = np.ma.masked_where(lwc1 <=0,lwc1)
alt = fl['altitude']['data']
#Get rid of masked values before doing percentiles
ind = lwc1.mask
lwc1 = lwc1[~ind]
alt = alt[~ind]
T = T[~ind]
#Create top and bottom for interp (fit gets weird outside this range)
fit_min = np.min(alt)
fit_max = np.max(alt)
#Do percentiles for each bin
q = np.array([percentile])
Q = np.zeros([altbins.shape[0]-1])
for i in np.arange(1,altbins.shape[0]):
bottom = altbins[i-1]
top = altbins[i]
ind1 = np.where(alt>=bottom)
ind2 = np.where(alt<top)
ind3 = np.intersect1d(ind1,ind2)
if len(ind3) < 1:
Q[i-1] = np.nan
else:
Q[i-1] = np.nanpercentile(lwc1[ind3],q)
#get rid of any nans
ind = np.isnan(Q)
Q_temp = Q[~ind]
altmidpoints_temp = altmidpoints[~ind]
#lwc profile func
lwc_func = interp1d(altmidpoints_temp,Q_temp,kind='cubic',bounds_error=False)
#
#W_lwc_coeff_func
t_ks = np.array([-20,-10,0])
ks = np.array([5.41,5.15,4.82])
k_coeff = np.polyfit(t_ks,ks,deg=1)
k_func = np.poly1d(k_coeff)
#Ka_lwc_coeff_func
t_ks2 = np.array([-20,-10,0])
ks2 = np.array([1.77,1.36,1.05])
k_coeff2 = np.polyfit(t_ks2,ks2,deg=1)
k_func2 = np.poly1d(k_coeff2)
#Ku_lwc_coeff_func
t_ks3 = np.array([-20,-10,0])
ks3 = np.array([0.45,0.32,0.24])
k_coeff3 = np.polyfit(t_ks3,ks3,deg=1)
k_func3 = np.poly1d(k_coeff3)
#temperature function
t_coef = np.polyfit(alt,T,deg=1)
t_func = np.poly1d(t_coef)
#Functions for O2 and H2O atten
alt = np.squeeze(matlab_g['alt'])
L =np.squeeze(matlab_g['L'])
L3 = np.squeeze(matlab_g['L3'])
L5 = np.squeeze(matlab_g['L5'])
k_func4 = interp1d(alt,L,kind='cubic',bounds_error=False)
k_func5 = interp1d(alt,L3,kind='cubic',bounds_error=False)
k_func6 = interp1d(alt,L5,kind='cubic',bounds_error=False)
#function to correct for ice scattering (kulie et al. 2014 fig 7)
k = pd.read_csv('Kulie_specific_attenuation.csv')
x = k['x'].values
y = k['y'].values
x_min = x.min()
x_max = x.max()
#fit function so we can crank out any value of Ku
k_coef7 = np.polyfit(x,y,deg=1)
k_func7 = np.poly1d(k_coef7)
#Make new data arrays
w_new_new = np.ma.zeros(apr['W'].shape)
ka_new_new = np.ma.zeros(apr['Ka'].shape)
ku_new_new = np.ma.zeros(apr['Ku'].shape)
k_store2 = np.ma.zeros(apr['W'].shape)
gas2_w = np.ma.zeros(apr['W'].shape)
gas2_ku = np.ma.zeros(apr['W'].shape)
gas2_ka = np.ma.zeros(apr['W'].shape)
liquid2 = np.ma.zeros(apr['W'].shape)
ice2 = np.ma.zeros(apr['W'].shape)
#Main loop for correcting the profile
for j in np.arange(0,apr['alt_gate'].shape[1]):
alt = np.squeeze(apr['alt_gate'][:,j,:])
w = np.squeeze(apr['W'][:,j,:])
ka = np.squeeze(apr['Ka'][:,j,:])
ku = np.squeeze(apr['Ku'][:,j,:])
#need ku in linear units for ice scatter correction
ku_lin = 10**(ku/10.)
ind = np.ma.where(ku_lin > x_max)
ku_lin[ind] = x_max
w_new = np.ma.zeros(w.shape)
ka_new = np.ma.zeros(ka.shape)
ku_new = np.ma.zeros(ku.shape)
k_store = np.ma.zeros(w.shape)
gas_w = | np.ma.zeros(w.shape) | numpy.ma.zeros |
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
try: import pmt_swig as pmt
except: import pmt
import numpy
# SWIG isn't taking in the #define PMT_NIL;
# getting the singleton locally.
PMT_NIL = pmt.get_PMT_NIL()
#define missing
def pmt_to_tuple(p):
elems = list()
for i in range(pmt.length(p)):
elem = pmt.tuple_ref(p, i)
elems.append(pmt_to_python(elem))
return tuple(elems)
def pmt_from_tuple(p):
args = map(python_to_pmt, p)
return pmt.make_tuple(*args)
def pmt_to_vector(p):
v = list()
for i in range(pmt.length(p)):
elem = pmt.vector_ref(p, i)
v.append(pmt_to_python(elem))
return v
def pmt_from_vector(p):
v = pmt.make_vector(len(p), PMT_NIL)
for i, elem in enumerate(p):
pmt.vector_set(v, i, python_to_pmt(elem))
return v
def pmt_to_dict(p):
d = dict()
items = pmt.dict_items(p)
for i in range(pmt.length(items)):
pair = pmt.nth(i, items)
k = pmt.car(pair)
v = pmt.cdr(pair)
d[pmt_to_python(k)] = pmt_to_python(v)
return d
def pmt_from_dict(p):
d = pmt.make_dict()
for k, v in p.iteritems():
#dict is immutable -> therefore pmt_dict_add returns the new dict
d = pmt.dict_add(d, python_to_pmt(k), python_to_pmt(v))
return d
numpy_mappings = {
numpy.dtype(numpy.float32): (pmt.init_f32vector, float, pmt.f32vector_elements, pmt.is_f32vector),
numpy.dtype(numpy.float64): (pmt.init_f64vector, float, pmt.f64vector_elements, pmt.is_f64vector),
numpy.dtype(numpy.complex64): (pmt.init_c32vector, complex, pmt.c32vector_elements, pmt.is_c32vector),
numpy.dtype(numpy.complex128): (pmt.init_c64vector, complex, pmt.c64vector_elements, pmt.is_c64vector),
numpy.dtype(numpy.int8): (pmt.init_s8vector, int, pmt.s8vector_elements, pmt.is_s8vector),
numpy.dtype(numpy.int16): (pmt.init_s16vector, int, pmt.s16vector_elements, pmt.is_s16vector),
numpy.dtype(numpy.int32): (pmt.init_s32vector, int, pmt.s32vector_elements, pmt.is_s32vector),
# numpy.dtype(numpy.int64): (pmt.init_s64vector, int, pmt.s64vector_elements, pmt.is_s64vector),
numpy.dtype(numpy.uint8): (pmt.init_u8vector, int, pmt.u8vector_elements, pmt.is_u8vector),
numpy.dtype(numpy.uint16): (pmt.init_u16vector, int, pmt.u16vector_elements, pmt.is_u16vector),
numpy.dtype(numpy.uint32): (pmt.init_u32vector, int, pmt.u32vector_elements, pmt.is_u32vector),
# numpy.dtype(numpy.uint64): (pmt.init_u64vector, int, pmt.u64vector_elements, pmt.is_u64vector),
numpy.dtype(numpy.byte): (pmt.init_u8vector, int, pmt.u8vector_elements, pmt.is_u8vector),
}
uvector_mappings = dict([ (numpy_mappings[key][3], (numpy_mappings[key][2], key)) for key in numpy_mappings ])
def numpy_to_uvector(numpy_array):
try:
mapping = numpy_mappings[numpy_array.dtype]
pc = map(mapping[1], | numpy.ravel(numpy_array) | numpy.ravel |
import os
import torch
import torchvision.transforms as transforms
import pandas as pd
from PIL import Image
import numpy as np
class SIIM_ISIC(torch.utils.data.Dataset):
def __init__(self, data_root='/home/group3/DataSet/', csv_file=None, img_folder=None, type='train', transform=None):
self.transform = transform
self.type = type
if type == 'train':
self.df = pd.read_csv(os.path.join(data_root, 'training_set.csv'))
self.imageFolder = os.path.join(data_root, 'Training_set')
if type == 'validate':
self.df = pd.read_csv(os.path.join(data_root, 'validation_set.csv'))
self.imageFolder = os.path.join(data_root, 'Validation_set')
if type == 'test':
self.df = pd.read_csv(os.path.join(data_root, csv_file))
self.imageFolder = os.path.join(data_root, img_folder)
self.df['sex'].fillna('unknown', inplace=True)
self.df['age_approx'].fillna(-1, inplace=True)
self.df['anatom_site_general_challenge'].fillna('unknown', inplace=True)
def __getitem__(self, idx):
image_name = self.df.iloc[idx]["image_name"]
path = os.path.join(self.imageFolder, '{}.jpg'.format(image_name))
image = Image.open(path)
if self.transform:
image = self.transform(image)
target = []
if self.type != 'test':
target = self.df.iloc[idx]["target"]
sex = self.df.iloc[idx]["sex"]
age_approx = self.df.iloc[idx]["age_approx"]
anatom_site_general_challenge = self.df.iloc[idx]["anatom_site_general_challenge"]
meta = {
"sex": sex,
"age_approx": age_approx,
"anatom_site_general_challenge": anatom_site_general_challenge,
}
return image, meta, target
def __len__(self):
return len(self.df)
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = | np.ones((h, w), np.float32) | numpy.ones |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os, re, sys
import numpy as np
# In[2]:
def cal_angle_between_two_vectors(vec_1, vec_2):
"""calculate and return the angle between two vectors. The angle is in radians"""
unit_vec_1 = vec_1 / np.linalg.norm(vec_1)
unit_vec_2 = vec_2 / np.linalg.norm(vec_2)
dot_product = | np.dot(unit_vec_1, unit_vec_2) | numpy.dot |
# Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import datafile
from joulescope import JOULESCOPE_DIR
from joulescope.calibration import Calibration
from joulescope.stream_buffer import reduction_downsample, Statistics, stats_to_api, \
STATS_FIELDS, STATS_VALUES, I_RANGE_MISSING, SUPPRESS_SAMPLES_MAX, RawProcessor
import json
import numpy as np
import datetime
import os
import logging
log = logging.getLogger(__name__)
DATA_RECORDER_FORMAT_VERSION = '1'
SAMPLING_FREQUENCY = 2000000
SAMPLES_PER_BLOCK = 100000
def construct_record_filename():
time_start = datetime.datetime.utcnow()
timestamp_str = time_start.strftime('%Y%m%d_%H%M%S')
name = '%s.jls' % (timestamp_str, )
return os.path.join(JOULESCOPE_DIR, name)
class DataRecorder:
"""Record Joulescope data to a file."""
def __init__(self, filehandle, sampling_frequency, calibration=None):
"""Create a new instance.
:param filehandle: The file-like object or file name.
:param sampling_frequency: The sampling frequency in Hertz.
:param calibration: The calibration bytes in datafile format.
None (default) uses the unit gain calibration.
"""
log.info('init')
if isinstance(filehandle, str):
self._fh = open(filehandle, 'wb')
filehandle = self._fh
else:
self._fh = None
self._sampling_frequency = sampling_frequency
# constraints:
# int1 * _samples_per_reduction = _samples_per_block
# int2 * _samples_per_reduction = _samples_per_tlv
# int3 * _samples_per_tlv = _samples_per_block
self._samples_per_reduction = int(sampling_frequency) // 100 # ~100 Hz
self._samples_per_tlv = self._samples_per_reduction * 20 # ~ 5 Hz
self._samples_per_block = self._samples_per_tlv * 5 # ~1 Hz
# dependent vars
self._reductions_per_tlv = self._samples_per_tlv // self._samples_per_reduction
reduction_block_size = self._samples_per_block // self._samples_per_reduction
self._reduction = np.empty((reduction_block_size, STATS_FIELDS, STATS_VALUES), dtype=np.float32)
self._reduction[:] = np.nan
self._sample_id_tlv = 0 # sample id for start of next TLV
self._sample_id_block = None # sample id for start of current block, None if not started yet
self._stream_buffer = None # to ensure same
self._sb_sample_id_last = None
self._voltage_range = None
self._writer = datafile.DataFileWriter(filehandle)
self._closed = False
self._total_size = 0
self._append_configuration()
if calibration is not None:
if isinstance(calibration, Calibration):
calibration = calibration.data
self._writer.append_subfile('calibration', calibration)
self._writer.collection_start(0, 0)
def _append_configuration(self):
config = {
'type': 'config',
'data_recorder_format_version': DATA_RECORDER_FORMAT_VERSION,
'sampling_frequency': self._sampling_frequency,
'samples_per_reduction': self._samples_per_reduction,
'samples_per_tlv': self._samples_per_tlv,
'samples_per_block': self._samples_per_block,
'reduction_fields': ['current', 'voltage', 'power',
'current_range', 'current_lsb', 'voltage_lsb']
}
cfg_data = json.dumps(config).encode('utf-8')
self._writer.append(datafile.TAG_META_JSON, cfg_data)
def _collection_start(self, data=None):
log.debug('_collection_start()')
c = self._writer.collection_start(1, 0, data=data)
c.metadata = {'start_sample_id': self._sample_id_tlv}
c.on_end = self._collection_end
self._sample_id_block = self._sample_id_tlv
def _collection_end(self, collection):
tlv_offset = (self._sample_id_tlv - self._sample_id_block) // self._samples_per_tlv
r_stop = tlv_offset * self._reductions_per_tlv
log.debug('_collection_end(%s, %s)', r_stop, len(self._reduction))
self._writer.collection_end(collection, self._reduction[:r_stop, :, :].tobytes())
self._sample_id_block = None
self._reduction[:] = np.nan
def stream_notify(self, stream_buffer):
"""Process data from a stream buffer.
:param stream_buffer: The stream_buffer instance which has a
"sample_id_range" member, "voltage_range" member,
raw(start_sample_id, stop_sample_id) and
get_reduction(reduction_idx, start_sample_id, stop_sample_id).
"""
sb_start, sb_stop = stream_buffer.sample_id_range
if self._stream_buffer is None:
self._stream_buffer = stream_buffer
self._sample_id_tlv = sb_stop
self._sample_id_block = None
elif self._stream_buffer != stream_buffer:
raise ValueError('Supports only a single stream_buffer instance')
sample_id_next = self._sample_id_tlv + self._samples_per_tlv
while sb_stop >= sample_id_next: # have at least one block
if self._samples_per_tlv > len(stream_buffer):
raise ValueError('stream_buffer length too small. %s > %s' %
(self._samples_per_tlv, len(stream_buffer)))
self._voltage_range = stream_buffer.voltage_range
if self._sample_id_block is None:
collection_data = {
'v_range': stream_buffer.voltage_range,
'sample_id': sample_id_next,
}
collection_data = json.dumps(collection_data).encode('utf-8')
self._collection_start(data=collection_data)
log.debug('_process() add tlv %d', self._sample_id_tlv)
b = stream_buffer.raw_get(self._sample_id_tlv, sample_id_next)
self._append_data(b.tobytes())
tlv_offset = (self._sample_id_tlv - self._sample_id_block) // self._samples_per_tlv
r_start = tlv_offset * self._reductions_per_tlv
r_stop = r_start + self._reductions_per_tlv
stream_buffer.data_get(self._sample_id_tlv, sample_id_next,
self._samples_per_reduction, out=self._reduction[r_start:r_stop, :, :])
self._sample_id_tlv = sample_id_next
if self._sample_id_tlv - self._sample_id_block >= self._samples_per_block:
self._collection_end(self._writer.collections[-1])
sample_id_next += self._samples_per_tlv
def _append_data(self, data):
if self._closed:
return
expected_len = self._samples_per_tlv * 2 * 2 # two uint16's per sample
if expected_len != len(data):
raise ValueError('invalid data length: %d != %d', expected_len, len(data))
self._writer.append(datafile.TAG_DATA_BINARY, data, compress=False)
self._total_size += len(data) // 4
def _append_meta(self):
index = {
'type': 'footer',
'size': self._total_size, # in samples
}
data = json.dumps(index).encode('utf-8')
self._writer.append(datafile.TAG_META_JSON, data)
def close(self):
if self._closed:
return
self._closed = True
while len(self._writer.collections):
collection = self._writer.collections[-1]
if len(collection.metadata):
self._collection_end(collection)
else:
self._writer.collection_end()
self._append_meta()
self._writer.finalize()
if self._fh is not None:
self._fh.close()
self._fh = None
class DataReader:
def __init__(self):
self.calibration = None
self.config = None
self.footer = None
self._fh_close = False
self._fh = None
self._f = None # type: datafile.DataFileReader
self._data_start_position = 0
self._voltage_range = 0
self._sample_cache = None
self.raw_processor = RawProcessor()
def __str__(self):
if self._f is not None:
return 'DataReader %.2f seconds (%d samples)' % (self.duration, self.footer['size'])
def close(self):
if self._fh_close:
self._fh.close()
self._fh_close = False
self._fh = None
self._f = None
self._sample_cache = None
self._reduction_cache = None
def open(self, filehandle):
self.close()
self.calibration = Calibration() # default calibration
self.config = None
self.footer = None
self._data_start_position = 0
if isinstance(filehandle, str):
log.info('DataReader(%s)', filehandle)
self._fh = open(filehandle, 'rb')
self._fh_close = True
else:
self._fh = filehandle
self._fh_close = False
self._f = datafile.DataFileReader(self._fh)
while True:
tag, value = self._f.peek()
if tag is None:
raise ValueError('could not read file')
elif tag == datafile.TAG_SUBFILE:
name, data = datafile.subfile_split(value)
if name == 'calibration':
self.calibration = Calibration().load(data)
elif tag == datafile.TAG_COLLECTION_START:
self._data_start_position = self._f.tell()
elif tag == datafile.TAG_META_JSON:
meta = json.loads(value.decode('utf-8'))
type_ = meta.get('type')
if type_ == 'config':
self.config = meta
elif type_ == 'footer':
self.footer = meta
break
else:
log.warning('Unknown JSON section type=%s', type_)
self._f.skip()
if self._data_start_position == 0 or self.config is None or self.footer is None:
raise ValueError('could not read file')
log.info('DataReader with %d samples:\n%s', self.footer['size'], json.dumps(self.config, indent=2))
if self.config['data_recorder_format_version'] != DATA_RECORDER_FORMAT_VERSION:
raise ValueError('Invalid file format')
self.config.setdefault('reduction_fields', ['current', 'voltage', 'power'])
cal = self.calibration
self.raw_processor.calibration_set(cal.current_offset, cal.current_gain, cal.voltage_offset, cal.voltage_gain)
return self
@property
def sample_id_range(self):
if self._f is not None:
s_start = 0
s_end = int(s_start + self.footer['size'])
return [s_start, s_end]
return 0
@property
def sampling_frequency(self):
if self._f is not None:
return float(self.config['sampling_frequency'])
return 0.0
@property
def reduction_frequency(self):
if self._f is not None:
return self.config['sampling_frequency'] / self.config['samples_per_reduction']
return 0.0
@property
def duration(self):
f = self.sampling_frequency
if f > 0:
r = self.sample_id_range
return (r[1] - r[0]) / f
return 0.0
@property
def voltage_range(self):
return self._voltage_range
def _validate_range(self, start=None, stop=None, increment=None):
idx_start, idx_end = self.sample_id_range
if increment is not None:
idx_end = ((idx_end + increment - 1) // increment) * increment
# log.debug('[%d, %d] : [%d, %d]', start, stop, idx_start, idx_end)
if not idx_start <= start < idx_end:
raise ValueError('start out of range: %d <= %d < %d' % (idx_start, start, idx_end))
if not idx_start <= stop <= idx_end:
raise ValueError('stop out of range: %d <= %d <= %d: %s' %
(idx_start, stop, idx_end, increment))
def _sample_tlv(self, sample_idx):
if self._sample_cache and self._sample_cache['start'] <= sample_idx < self._sample_cache['stop']:
# cache hit
return self._sample_cache
idx_start, idx_end = self.sample_id_range
if not idx_start <= sample_idx < idx_end:
raise ValueError('sample index out of range: %d <= %d < %d' % (idx_start, sample_idx, idx_end))
if self._sample_cache is not None:
log.debug('_sample_cache cache miss: %s : %s %s',
sample_idx, self._sample_cache['start'], self._sample_cache['stop'])
# seek
samples_per_tlv = self.config['samples_per_tlv']
samples_per_block = self.config['samples_per_block']
tgt_block = sample_idx // samples_per_block
if self._sample_cache is not None and sample_idx > self._sample_cache['start']:
# continue forward
self._fh.seek(self._sample_cache['tlv_pos'])
voltage_range = self._sample_cache['voltage_range']
block_fh_pos = self._sample_cache['block_pos']
current_sample_idx = self._sample_cache['start']
block_counter = current_sample_idx // samples_per_block
else: # add case for rewind?
log.debug('_sample_tlv resync to beginning')
self._fh.seek(self._data_start_position)
voltage_range = 0
block_fh_pos = 0
block_counter = 0
current_sample_idx = 0
if self._f.advance() != datafile.TAG_COLLECTION_START:
raise ValueError('data section must be single collection')
while True:
tag, _ = self._f.peek_tag_length()
if tag is None:
log.error('sample_tlv not found before end of file: %s > %s', sample_idx, current_sample_idx)
break
if tag == datafile.TAG_COLLECTION_START:
if block_counter < tgt_block:
self._f.skip()
block_counter += 1
else:
block_fh_pos = self._f.tell()
tag, collection_bytes = next(self._f)
c = datafile.Collection.decode(collection_bytes)
if c.data:
collection_start_meta = json.loads(c.data)
voltage_range = collection_start_meta.get('v_range', 0)
self._voltage_range = voltage_range
current_sample_idx = block_counter * samples_per_block
elif tag == datafile.TAG_COLLECTION_END:
block_counter += 1
self._f.advance()
elif tag == datafile.TAG_DATA_BINARY:
tlv_stop = current_sample_idx + samples_per_tlv
if current_sample_idx <= sample_idx < tlv_stop:
# found it!
tlv_pos = self._f.tell()
tag, value = next(self._f)
data = np.frombuffer(value, dtype=np.uint16).reshape((-1, 2))
self._sample_cache = {
'voltage_range': voltage_range,
'start': current_sample_idx,
'stop': tlv_stop,
'buffer': data,
'tlv_pos': tlv_pos,
'block_pos': block_fh_pos,
}
return self._sample_cache
else:
self._f.advance()
current_sample_idx = tlv_stop
else:
self._f.advance()
def raw(self, start=None, stop=None, units=None):
"""Get the raw data.
:param start: The starting time relative to the first sample.
:param stop: The ending time.
:param units: The units for start and stop: ['seconds', 'samples']. None (default) is 'samples'.
:return: The output which is (out_raw, bits, out_cal).
"""
start, stop = self.normalize_time_arguments(start, stop, units)
x_start, x_stop = self.sample_id_range
if start is None:
start = x_start
if stop is None:
stop = x_stop
self._fh.seek(self._data_start_position)
self._validate_range(start, stop)
length = stop - start
if length <= 0:
return np.empty((0, 2), dtype=np.uint16)
# process extra before & after to handle filtering
if start > SUPPRESS_SAMPLES_MAX:
sample_idx = start - SUPPRESS_SAMPLES_MAX
prefix_count = SUPPRESS_SAMPLES_MAX
else:
sample_idx = 0
prefix_count = start
if stop + SUPPRESS_SAMPLES_MAX <= x_stop:
end_idx = stop + SUPPRESS_SAMPLES_MAX
else:
end_idx = x_stop
out_idx = 0
d_raw = np.empty((end_idx - sample_idx, 2), dtype=np.uint16)
if self._f.advance() != datafile.TAG_COLLECTION_START:
raise ValueError('data section must be single collection')
while sample_idx < end_idx:
sample_cache = self._sample_tlv(sample_idx)
if sample_cache is None:
break
data = sample_cache['buffer']
b_start = sample_idx - sample_cache['start']
length = sample_cache['stop'] - sample_cache['start'] - b_start
out_remaining = end_idx - sample_idx
length = min(length, out_remaining)
if length <= 0:
break
b_stop = b_start + length
d = data[b_start:b_stop, :]
d_raw[out_idx:(out_idx + length), :] = d
out_idx += length
sample_idx += length
d_raw = d_raw[:out_idx, :]
self.raw_processor.reset()
self.raw_processor.voltage_range = self._voltage_range
d_cal, d_bits = self.raw_processor.process_bulk(d_raw.reshape((-1, )))
j = prefix_count
k = min(prefix_count + stop - start, out_idx)
return d_raw[j:k, :], d_bits[j:k], d_cal[j:k, :]
def _reduction_tlv(self, reduction_idx):
sz = self.config['samples_per_reduction']
incr = self.config['samples_per_block'] // sz
tgt_r_idx = reduction_idx
if self._reduction_cache and self._reduction_cache['r_start'] <= tgt_r_idx < self._reduction_cache['r_stop']:
return self._reduction_cache
if self._reduction_cache is not None:
log.debug('_reduction_tlv cache miss: %s : %s %s',
tgt_r_idx, self._reduction_cache['r_start'], self._reduction_cache['r_stop'])
idx_start, idx_end = self.sample_id_range
r_start = idx_start // sz
r_stop = idx_end // sz
if not r_start <= tgt_r_idx < r_stop:
raise ValueError('reduction index out of range: %d <= %d < %d', r_start, tgt_r_idx, r_stop)
if self._reduction_cache is not None and tgt_r_idx > self._reduction_cache['r_start']:
# continue forward
self._fh.seek(self._reduction_cache['next_collection_pos'])
r_idx = self._reduction_cache['r_stop']
else: # add case for rewind?
log.debug('_reduction_tlv resync to beginning')
self._fh.seek(self._data_start_position)
r_idx = 0
if self._f.advance() != datafile.TAG_COLLECTION_START:
raise ValueError('data section must be single collection')
self._fh.seek(self._data_start_position)
if self._f.advance() != datafile.TAG_COLLECTION_START:
raise ValueError('data section must be single collection')
while True:
tag, _ = self._f.peek_tag_length()
if tag is None or tag == datafile.TAG_COLLECTION_END:
log.error('reduction_tlv not found before end of file: %s > %s', r_stop, r_idx)
break
elif tag != datafile.TAG_COLLECTION_START:
raise ValueError('invalid file format: not collection start')
r_idx_next = r_idx + incr
if tgt_r_idx >= r_idx_next:
self._f.skip()
r_idx = r_idx_next
continue
self._f.collection_goto_end()
tag, value = next(self._f)
if tag != datafile.TAG_COLLECTION_END:
raise ValueError('invalid file format: not collection end')
field_count = len(self.config['reduction_fields'])
data = np.frombuffer(value, dtype=np.float32).reshape((-1, field_count, STATS_VALUES))
if field_count != STATS_FIELDS:
d_nan = np.full((len(data), STATS_FIELDS - field_count, STATS_VALUES), np.nan, dtype=np.float32)
data = np.concatenate((data, d_nan), axis=1)
self._reduction_cache = {
'r_start': r_idx,
'r_stop': r_idx_next,
'buffer': data,
'next_collection_pos': self._f.tell()
}
return self._reduction_cache
def get_reduction(self, start=None, stop=None, units=None, out=None):
"""Get the fixed reduction with statistics.
:param start: The starting sample identifier (inclusive).
:param stop: The ending sample identifier (exclusive).
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indices.
:return: The Nx3x4 sample data.
"""
start, stop = self.normalize_time_arguments(start, stop, units)
self._fh.seek(self._data_start_position)
self._validate_range(start, stop)
sz = self.config['samples_per_reduction']
r_start = start // sz
total_length = (stop - start) // sz
r_stop = r_start + total_length
log.info('DataReader.get_reduction(r_start=%r,r_stop=%r)', r_start, r_stop)
if total_length <= 0:
return np.empty((0, STATS_FIELDS, STATS_VALUES), dtype=np.float32)
if out is None:
out = np.empty((total_length, STATS_FIELDS, STATS_VALUES), dtype=np.float32)
elif len(out) < total_length:
raise ValueError('out too small')
r_idx = r_start
out_idx = 0
while r_idx < r_stop:
reduction_cache = self._reduction_tlv(r_idx)
if reduction_cache is None:
break
data = reduction_cache['buffer']
b_start = r_idx - reduction_cache['r_start']
length = reduction_cache['r_stop'] - reduction_cache['r_start'] - b_start
out_remaining = r_stop - r_idx
length = min(length, out_remaining)
if length <= 0:
break
out[out_idx:(out_idx + length), :, :] = data[b_start:(b_start + length), :, :]
out_idx += length
r_idx += length
if out_idx != total_length:
log.warning('DataReader length mismatch: out_idx=%s, length=%s', out_idx, total_length)
total_length = min(out_idx, total_length)
return out[:total_length, :]
def _get_reduction_stats(self, start, stop):
"""Get statistics over the reduction
:param start: The starting sample identifier (inclusive).
:param stop: The ending sample identifier (exclusive).
:return: The tuple of ((sample_start, sample_stop), :class:`Statistics`).
"""
# log.debug('_get_reduction_stats(%s, %s)', start, stop)
s = Statistics()
sz = self.config['samples_per_reduction']
incr = self.config['samples_per_block'] // sz
r_start = start // sz
if (r_start * sz) < start:
r_start += 1
r_stop = stop // sz
if r_start >= r_stop: # cannot use the reductions
s_start = r_start * sz
return (s_start, s_start), s
r_idx = r_start
while r_idx < r_stop:
reduction_cache = self._reduction_tlv(r_idx)
if reduction_cache is None:
break
data = reduction_cache['buffer']
b_start = r_idx - reduction_cache['r_start']
length = reduction_cache['r_stop'] - reduction_cache['r_start'] - b_start
out_remaining = r_stop - r_idx
length = min(length, out_remaining)
if length <= 0:
break
r = reduction_downsample(data, b_start, b_start + length, length)
s.combine(Statistics(length=length * sz, stats=r[0, :, :]))
r_idx += length
return (r_start * sz, r_stop * sz), s
def get_calibrated(self, start=None, stop=None, units=None):
"""Get the calibrated data (no statistics).
:param start: The starting sample identifier (inclusive).
:param stop: The ending sample identifier (exclusive).
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indices.
:return: The tuple of (current, voltage), each as np.ndarray
with dtype=np.float32.
"""
log.debug('get_calibrated(%s, %s, %s)', start, stop, units)
_, _, d = self.raw(start, stop)
i, v = d[:, 0], d[:, 1]
return i, v
def get(self, start=None, stop=None, increment=None, units=None):
"""Get the calibrated data with statistics.
:param start: The starting sample identifier (inclusive).
:param stop: The ending sample identifier (exclusive).
:param increment: The number of raw samples per output sample.
:param units: The units for start and stop.
'seconds' or None is in floating point seconds relative to the view.
'samples' is in stream buffer sample indices.
:return: The Nx3x4 sample data.
"""
log.debug('DataReader.get(start=%r,stop=%r,increment=%r)', start, stop, increment)
start, stop = self.normalize_time_arguments(start, stop, units)
if increment is None:
increment = 1
if self._fh is None:
raise IOError('file not open')
increment = max(1, int(np.round(increment)))
out_len = (stop - start) // increment
if out_len <= 0:
return np.empty((0, STATS_FIELDS, STATS_VALUES), dtype=np.float32)
out = np.empty((out_len, STATS_FIELDS, STATS_VALUES), dtype=np.float32)
if increment == 1:
_, d_bits, d_cal = self.raw(start, stop)
i, v = d_cal[:, 0], d_cal[:, 1]
out[:, 0, 0] = i
out[:, 1, 0] = v
out[:, 2, 0] = i * v
out[:, 3, 0] = np.bitwise_and(d_bits, 0x0f)
out[:, 4, 0] = np.bitwise_and(np.right_shift(d_bits, 4), 0x01)
out[:, 5, 0] = np.bitwise_and(np.right_shift(d_bits, 5), 0x01)
out[:, :, 1] = 0.0 # zero variance, only one sample!
out[:, :, 2] = np.nan # min
out[:, :, 3] = np.nan # max
elif increment == self.config['samples_per_reduction']:
out = self.get_reduction(start, stop, out=out)
elif increment > self.config['samples_per_reduction']:
r_out = self.get_reduction(start, stop)
increment = int(increment / self.config['samples_per_reduction'])
out = reduction_downsample(r_out, 0, len(r_out), increment)
else:
k_start = start
for idx in range(out_len):
k_stop = k_start + increment
out[idx, :, :] = self._stats_get(k_start, k_stop).value
k_start = k_stop
return out
def summary_string(self):
s = [str(self)]
config_fields = ['sampling_frequency', 'samples_per_reduction', 'samples_per_tlv', 'samples_per_block']
for field in config_fields:
s.append(' %s = %r' % (field, self.config[field]))
return '\n'.join(s)
def time_to_sample_id(self, t):
if t is None:
return None
s_min, s_max = self.sample_id_range
s = int(t * self.sampling_frequency)
if s < s_min or s > s_max:
return None
return s
def sample_id_to_time(self, s):
if s is None:
return None
return s / self.sampling_frequency
def normalize_time_arguments(self, start, stop, units):
s_min, s_max = self.sample_id_range
if units == 'seconds':
start = self.time_to_sample_id(start)
stop = self.time_to_sample_id(stop)
elif units is None or units == 'samples':
if start is not None and start < 0:
start = s_max + start
if stop is not None and stop < 0:
stop = s_max + start
else:
raise ValueError(f'invalid time units: {units}')
s1 = s_min if start is None else start
s2 = s_max if stop is None else stop
if not s_min <= s1 < s_max:
raise ValueError(f'start sample out of range: {s1}')
if not s_min <= s2 <= s_max:
raise ValueError(f'start sample out of range: {s2}')
return s1, s2
def _stats_get(self, start, stop):
s1, s2 = start, stop
(k1, k2), s = self._get_reduction_stats(s1, s2)
if k1 >= k2:
# compute directly over samples
stats = np.empty((STATS_FIELDS, STATS_VALUES), dtype=np.float32)
_, d_bits, z = self.raw(s1, s2)
i, v = z[:, 0], z[:, 1]
p = i * v
zi = np.isfinite(i)
i_view = i[zi]
if len(i_view):
i_range = np.bitwise_and(d_bits, 0x0f)
i_lsb = np.right_shift(np.bitwise_and(d_bits, 0x10), 4)
v_lsb = np.right_shift(np.bitwise_and(d_bits, 0x20), 5)
for idx, field in enumerate([i_view, v[zi], p[zi], i_range, i_lsb[zi], v_lsb[zi]]):
stats[idx, 0] = np.mean(field, axis=0)
stats[idx, 1] = np.var(field, axis=0)
stats[idx, 2] = np.amin(field, axis=0)
stats[idx, 3] = | np.amax(field, axis=0) | numpy.amax |
'''
-----------------------------------------------
File Name: data_seg$
Description:
Author: Jing$
Date: 6/29/2021$
-----------------------------------------------
'''
from __future__ import division
import warnings
warnings.filterwarnings('ignore') # ignore warnings
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" # only show error
import numpy as np
import pandas as pd
import cv2 as cv
from PIL import Image
import skimage.io as io
from skimage import img_as_ubyte
import os
from medpy.metric.binary import dc, hd, assd
from keras import backend as K
from keras.optimizers import Adam
#from tensorflow.keras.optimizers import Adam # only for doubleunet
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping
import segmentation_models as sm
from model_seg import *
from doubleu_net import *
def load_data(img_path_aug, img_path_ori, gt_path_aug, gt_path_ori, csv_aug, csv_ori, H, W):
df_ori = pd.read_csv(csv_ori)
df_aug = pd.read_csv(csv_aug)
filename_list_ori = df_ori['filename'].values
filename_list_aug = df_aug['filename'].values
pixel_size_ori = df_ori['pixel size(mm)'].values
hcpx_ori = df_ori['head circumference(px)'].values
img_ori = []
label_ori = []
img_aug = []
label_aug = []
pixel_ori = []
label_hc = []
for (i, f) in enumerate(filename_list_ori):
img = Image.open(img_path_ori + f).convert('RGB') # 3 channels
img = img.resize((H,W))
img = np.array(img)
img_norm = (img - np.mean(img)) / np.std(img) # normalize
img_ori.append(img_norm)
pixel_ori.append(pixel_size_ori[i])
label_hc.append(hcpx_ori[i])
gt = Image.open(gt_path_ori + f).convert('L')
gt = gt.resize((H,W))
gt = np.array(gt)
gt[gt > 0.5] = 1 # normalize
gt[gt <= 0.5] = 0
gt = gt[:, :, np.newaxis]
label_ori.append(gt)
for (i, f) in enumerate(filename_list_aug):
img = Image.open(img_path_aug + f).convert('RGB')
img = img.resize((H,W))
img = np.array(img)
img_norm = (img - np.mean(img)) / np.std(img) # normalize
# img = img_norm[:, :, np.newaxis]
img_aug.append(img_norm)
gt = Image.open(gt_path_aug + f).convert('L')
gt = gt.resize((H,W))
gt = np.array(gt)
gt[gt > 0.5] = 1 # normalize
gt[gt <= 0.5] = 0
gt = gt[:, :, np.newaxis]
label_aug.append(gt)
print("load data successfully!")
return np.asarray(img_aug, dtype=np.float64), np.asarray(label_aug), np.asarray(label_hc), \
np.asarray(img_ori, dtype=np.float64), np.asarray(label_ori), np.asarray(pixel_ori)
def save_data(save_path, segment_results, label, shape=(800, 540)):
image_resize = []
label_resize = []
for i, item in enumerate(segment_results):
img = item[:, :, 0]
if np.isnan(np.sum(img)):
img = img[~np.isnan(img)] # just remove nan elements from vector
img[img > 0.5] = 1
img[img <= 0.5] = 0
img_resize = cv.resize(img, shape, interpolation=cv.INTER_AREA)
image_resize.append(img_resize)
io.imsave(os.path.join(save_path, "%d_predict.png" % i), img_as_ubyte(img_resize))
for i, item in enumerate(label):
gt_resize = cv.resize(item, shape, interpolation=cv.INTER_AREA)
label_resize.append(gt_resize)
print("save data successfully!")
return np.asarray(image_resize), np.asarray(label_resize)
def Dice(y_true, y_pred):
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection) / (K.sum(y_true_f) + K.sum(y_pred_f))
def Dice_score(gt, seg):
dice = []
for item in range(len(gt)):
dice.append(dc(gt[item], seg[item]))
print("The mean and std dice score is: ", '%.2f' % np.mean(dice), '%.2f' % np.std(dice))
return np.mean(dice), np.std(dice)
def HausdorffDistance_score(gt, seg, pixelsize):
hausdorff = []
for item in range(len(gt)):
if np.sum(seg[item]) > 0: # If the structure is predicted on at least one pixel
hausdorff.append(hd(seg[item], gt[item], voxelspacing=[pixelsize[item], pixelsize[item]]))
print("The mean and std Hausdorff Distance is: ", '%.2f' % np.mean(hausdorff), '%.2f' % np.std(hausdorff))
return np.mean(hausdorff), np.std(hausdorff)
def ASSD_score(gt, seg, pixelsize):
ASSD = []
for item in range(len(gt)):
if np.sum(seg[item]) > 0:
ASSD.append(assd(seg[item], gt[item], voxelspacing=[pixelsize[item], pixelsize[item]]))
print("The mean and std ASSD is: ", '%.2f' % np.mean(ASSD), '%.2f' % np.std(ASSD))
return np.mean(ASSD), np.std(ASSD)
def EllipseCircumference(a,b):
# Ramanujan approximation II
# HC = pi*(a+b)*(1+3h/(10+sqrt(4-3*h))),h = (a-b)**2/(a+b)**2
h = ((a / 2 - b / 2) ** 2) / ((a / 2 + b / 2) ** 2)
perimeter_ellipse = np.pi * (a / 2 + b / 2) * (1 + 3 * h / (10 + np.sqrt(4 - 3 * h)))
return perimeter_ellipse
def HC_calculate(pred):
'''
3 ways to calculate HC:
1. the number of contour points;
2. the length of contour;
3. the length of fitted ellipse
'''
num_points_pp = []
len_contour_pp = []
len_ellipse_pp = []
num_points = []
len_contour = []
len_ellipse = []
for item in range(len(pred)):
image = np.uint8(pred[item])
image[image > 0.5] = 255
image[image <= 0.5] = 0
contour = cv.Canny(image, 80, 160)
contours, hierarchy = cv.findContours(contour, mode=cv.RETR_EXTERNAL, method=cv.CHAIN_APPROX_NONE)
#print("performing with post processing")
max_contour = []
for i in range(len(contours)):
if len(contours[i]) > len(max_contour):
max_contour = contours[i]
if len(max_contour) != 0:
perimeter_contour = cv.arcLength(max_contour, True) # para2:indicating whether the curve is closed or not
# fitting ellipse, return center points, axis
(cx, cy), (a, b), angle = cv.fitEllipse(max_contour)
perimeter_ellipse = EllipseCircumference(a,b)
else:
perimeter_contour = 0
perimeter_ellipse = 0
num_points_pp.append(len(max_contour))
len_contour_pp.append(perimeter_contour)
len_ellipse_pp.append(perimeter_ellipse)
#print("performing without post processing")
if len(contours) !=0:
num_points_unit=0
len_contour_unit=0
len_ellipse_unit=0
for i in range(len(contours)):
num_points_unit +=len(contours[i])
len_contour_unit +=cv.arcLength(contours[i], True)
if len(contours[i])>5:#There should be at least 5 points to fit the ellipse in function 'cv::fitEllipse'
(cx, cy), (a, b), angle = cv.fitEllipse(contours[i])
len_ellipse_unit +=EllipseCircumference(a,b)
else:
num_points_unit = 0
len_contour_unit = 0
len_ellipse_unit = 0
num_points.append(num_points_unit)
len_contour.append(len_contour_unit)
len_ellipse.append(len_ellipse_unit)
return np.asarray(num_points), np.asarray(len_contour), np.asarray(len_ellipse),\
np.asarray(num_points_pp), np.asarray(len_contour_pp), np.asarray(len_ellipse_pp)
def predictions(x_text, y_test, label_hc_px, pixelsize, model, save_path):
score = model.evaluate(x_text, y_test, verbose=2)
print("Loss,iou sore:", '%.2f' % score[0], '%.2f' % score[1])
results = model.predict(x_text) # return probability
pred, y_test = save_data(save_path, results, y_test)
# segmentation analysis
mean_dice, std_dice = Dice_score(y_test, pred)
mean_hd, std_hd = HausdorffDistance_score(y_test, pred, pixelsize)
mean_assd, std_assd = ASSD_score(y_test, pred, pixelsize)
# HC analysis
HC_pred_points, HC_pred_contour, HC_pred_ellipse,\
HC_pred_points_pp, HC_pred_contour_pp, HC_pred_ellipse_pp= HC_calculate(pred)
print("predicted value in mm:", HC_pred_ellipse_pp * pixelsize)
print("predicted value in mm wo pp:", HC_pred_ellipse * pixelsize)
absDiff_points = np.abs((HC_pred_points - label_hc_px) * pixelsize)
absDiff_contour = np.abs((HC_pred_contour - label_hc_px) * pixelsize)
absDiff_ellipse = np.abs((HC_pred_ellipse - label_hc_px) * pixelsize)
absDiff_points_pp = np.abs((HC_pred_points_pp - label_hc_px) * pixelsize)
absDiff_contour_pp = np.abs((HC_pred_contour_pp - label_hc_px) * pixelsize)
absDiff_ellipse_pp = np.abs((HC_pred_ellipse_pp - label_hc_px) * pixelsize)
mean_mae_points = round(np.mean(absDiff_points), 2) # compute mae in mm
mean_mae_contour = round(np.mean(absDiff_contour), 2) # compute mae in mm
mean_mae_ellipse = round(np.mean(absDiff_ellipse), 2) # compute mae in mm
mean_mae_points_pp = round(np.mean(absDiff_points_pp), 2) # compute mae in mm
mean_mae_contour_pp = round(np.mean(absDiff_contour_pp), 2) # compute mae in mm
mean_mae_ellipse_pp = round(np.mean(absDiff_ellipse_pp), 2) # compute mae in mm
std_mae_points = round(np.std(absDiff_points), 2)
std_mae_contour = round(np.std(absDiff_contour), 2)
std_mae_ellipse = round(np.std(absDiff_ellipse), 2)
std_mae_points_pp = round(np.std(absDiff_points_pp), 2)
std_mae_contour_pp = round(np.std(absDiff_contour_pp), 2)
std_mae_ellipse_pp = round(np.std(absDiff_ellipse_pp), 2)
mean_mae_px_points = round(np.mean(absDiff_points / pixelsize), 2) # compute mae in pixel
mean_mae_px_contour = round(np.mean(absDiff_contour / pixelsize), 2) # compute mae in pixel
mean_mae_px_ellipse = round(np.mean(absDiff_ellipse / pixelsize), 2) # compute mae in pixel
mean_mae_px_points_pp = round(np.mean(absDiff_points_pp / pixelsize), 2) # compute mae in pixel
mean_mae_px_contour_pp = round(np.mean(absDiff_contour_pp / pixelsize), 2) # compute mae in pixel
mean_mae_px_ellipse_pp = round(np.mean(absDiff_ellipse_pp / pixelsize), 2) # compute mae in pixel
std_mae_px_points = round(np.std(absDiff_points / pixelsize), 2)
std_mae_px_contour = round(np.std(absDiff_contour / pixelsize), 2)
std_mae_px_ellipse = round(np.std(absDiff_ellipse / pixelsize), 2)
std_mae_px_points_pp = round( | np.std(absDiff_points_pp / pixelsize) | numpy.std |
from jax import lax
import numpy as np
from scipy.integrate import solve_ivp
from sklearn.preprocessing import StandardScaler
from .utils import generate_diff_kernels
__all__ = ["generate_dataset"]
def generate_dataset(dt=1e-2, tmax=None, num_visible=2, num_der=2, raw_sol=False):
if tmax is None:
tmax = 100 + 2 * dt
def rossler(t, y0, a, b, c):
"""Rossler equations"""
u, v, w = y0[..., 0], y0[..., 1], y0[..., 2]
up = -v - w
vp = u + a * v
wp = b + w * (u - c)
return np.stack((up, vp, wp), axis=-1)
# Rossler parameters and initial conditions
a, b, c = 0.2, 0.2, 5.7
u0, v0, w0 = 0, 1, 1.05
# Integrate the Rossler equations on the time grid t
print("Generating Rossler system dataset...")
t_eval = | np.arange(0, tmax, dt) | numpy.arange |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 4 22:45:19 2019
@author: Pavan
"""
import numpy as np
import scipy.stats as sc
import math
################################################################################################
# Helper Functions
def column_wise(fun,*args):
return np.apply_along_axis(fun,0,*args)
def rolling_window(a, shape): # rolling window for 2D array
s = (a.shape[0] - shape[0] + 1,) + (a.shape[1] - shape[1] + 1,) + shape
strides = a.strides + a.strides
return np.lib.stride_tricks.as_strided(a, shape=s, strides=strides)
def fisher_helper_ema(array,alpha,window):
def numpy_ewma(a, alpha, windowSize):
# a=a.flatten()
wghts = (1-alpha)**np.arange(windowSize)
# wghts /= wghts.sum()
out = np.convolve(a,wghts)
out[:windowSize-1] = np.nan
return out[:a.size]
return column_wise(numpy_ewma,array,alpha,window)
################################################################################################
# A. Mathematical Functions
# Rolling Median
def median(a,window):
me = np.empty_like(a)
me[:window-1,:]=np.nan
me[window-1:,:]= np.squeeze(np.median(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return me
# Rolling Mean
def mean(a,window):
me = np.empty_like(a)
me[:window-1,:]=np.nan
me[window-1:,:]= np.squeeze(np.mean(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return me
#Rolling Standard Deviation
def stdev(a,window):
std = np.empty_like(a)
std[:window-1,:]=np.nan
std[window-1:,:]= np.squeeze(np.std(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return std
#Rolling Product
def product(a,window):
prod = np.empty_like(a)
prod[:window-1,:]=np.nan
prod[window-1:,:]= np.squeeze(np.prod(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return prod
#Rolling Summation
def summation(a,window):
summ = np.empty_like(a)
summ[:window-1,:]=np.nan
summ[window-1:,:]= np.squeeze(np.sum(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return summ
#Rolling Nan Product. Treats nan values as 1
def nanprod(a,window):
nanprod = np.empty_like(a)
nanprod[:window-1,:]=np.nan
nanprod[window-1:,:]= np.squeeze(np.nanprod(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return nanprod
#Rolling Nan Summation. Treats nan values as 0
def nansum(a,window):
summ = np.empty_like(a)
summ[:window-1,:]=np.nan
summ[window-1:,:]= np.squeeze(np.nansum(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return summ
#Rolling Cummulative Product
def cumproduct(a,window):
prod = np.empty_like(a)
prod[:window-1,:]=np.nan
prod[window-1:,:]= np.squeeze(np.cumprod(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return prod
#Rolling Summation
def cumsummation(a,window):
summ = np.empty_like(a)
summ[:window-1,:]=np.nan
summ[window-1:,:]= np.squeeze(np.cumsum(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return summ
#Rolling nan Cummulative Product. Treats nan as 1
def nancumproduct(a,window):
prod = np.empty_like(a)
prod[:window-1,:]=np.nan
prod[window-1:,:]= np.squeeze(np.nancumprod(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return prod
#Rolling nan Cummulative Summation. Treats nan as 0
def nancumsummation(a,window):
summ = np.empty_like(a)
summ[:window-1,:]=np.nan
summ[window-1:,:]= np.squeeze(np.nancumsum(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return summ
#backward difference: a[n]=b[n]-b[n-(window-1)]
def back_diff(a,window):
back= np.empty_like(a)
back[:window-1,:]=np.nan
back[window-1:,:]=a[window-1:,:]-a[:-(window-1),:]
return back
# rolling integration
def integrate(a,window):
inte= np.empty_like(a)
inte[:window-1,:]=np.nan
inte[window-1:,:]=np.squeeze(np.trapz(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return inte
# rolling integration
def integrate_along_x(y,x,window):
inte= np.empty_like(y)
inte[:window-1,:]=np.nan
inte[window-1:,:]=np.squeeze(np.trapz(rolling_window(y,(window,y.shape[1])),rolling_window(x,(window,x.shape[1])),axis=2),axis=1)
return inte
#Centers Value by subtracting its median over the TimePeriod.
#Using the median instead of the mean reduces the effect of outliers.
def center(a,window):
cen = np.empty_like(a)
cen[:window-1,:]=np.nan
cen[window-1:,:]= a[window-1:,:]-np.squeeze(np.median(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
return cen
#Compresses Value to the -100...+100 range. For this, Value is divided by its interquartile range
# - the difference of the 75th and 25th percentile - taken over TimePeriod, and
# then compressed by a cdf function. Works best when Value is an oscillator that crosses
# the zero line. Formula: 200 * cdf(0.25*Value/(P75-P25)) - 100.
def compress(a,window):
from scipy.stats import norm
com = np.empty_like(a)
com[:window-1,:]=np.nan
value = a[window-1:,:]
q25 = np.squeeze(np.quantile(rolling_window(a,(window,a.shape[1])),0.25,axis=2),axis=1)
q75 = np.squeeze(np.quantile(rolling_window(a,(window,a.shape[1])),0.75,axis=2),axis=1)
com[window-1:,:] = 200*(norm.cdf((0.25*value/(q75-q25))))-100
return com
#Centers and compresses Value to the -100...+100 scale.
#The deviation of Value from its median is divided by its interquartile range and then
#compressed by a cdf function. Formula: 200 * cdf(0.5*(Value-Median)/(P75-P25)) - 100.
def scale(a,window):
from scipy.stats import norm
scale = np.empty_like(a)
scale[:window-1,:]=np.nan
value = a[window-1:,:]
median = np.squeeze(np.median(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
q25 = np.squeeze(np.quantile(rolling_window(a,(window,a.shape[1])),0.25,axis=2),axis=1)
q75 = np.squeeze(np.quantile(rolling_window(a,(window,a.shape[1])),0.75,axis=2),axis=1)
scale[window-1:,:] = 200*(norm.cdf((0.25*(value-median)/(q75-q25))))-100
return scale
#Normalizes Value to the -100...+100 range through subtracting its minimum and dividing
#by its range over TimePeriod. Formula: 200 * (Value-Min)/(Max-Min) - 100 .
#For a 0..100 oscillator, multiply the returned value with 0.5 and add 50.
def normalize(a,window):
norm = np.empty_like(a)
norm[:window-1,:]=np.nan
value = a[window-1:,:]
minimum = np.squeeze(rolling_window(a,(window,a.shape[1])).min(axis=2),axis=1)
maximum = np.squeeze(rolling_window(a,(window,a.shape[1])).max(axis=2),axis=1)
norm[window-1:,:] = 200*((value-minimum)/(maximum-minimum))-100
return norm
def normalize_o(a,window):
norm = np.empty_like(a)
norm[:window-1,:]=np.nan
value = a[window-1:,:]
minimum = np.squeeze(rolling_window(a,(window,a.shape[1])).min(axis=2),axis=1)
maximum = np.squeeze(rolling_window(a,(window,a.shape[1])).max(axis=2),axis=1)
norm[window-1:,:] = 2*((value-minimum)/(maximum-minimum))-1
return norm
#Calculates the Z-score of the Value. The Z-score is the deviation from
# the mean over the TimePeriod, divided by the standard deviation.
# Formula: (Value-Mean)/StdDev.
def zscore(a,window):
z = np.empty_like(a)
z[:window-1,:]=np.nan
value = a[window-1:,:]
mean =np.squeeze(np.mean(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
stddev = np.squeeze(np.std(rolling_window(a,(window,a.shape[1])),axis=2),axis=1)
z[window-1:,:] = (value-mean)/stddev
return z
#### Mathematical functions
def absolute(a):
return np.absolute(a)
def sin(a):
return np.sin(a)
def cos(a):
return np.cos(a)
def tan(a):
return np.tan(a)
def asin(a):
return np.arcsin(a)
def acos(a):
return np.arccos(a)
def atan(a):
return np.arctan(a)
def sinh(a):
return np.sinh(a)
def cosh(a):
return np.cosh(a)
def tanh(a):
return np.tanh(a)
def asinh(a):
return np.arcsinh(a)
def acosh(a):
return np.arccosh(a)
def atanh(a):
return np.arctanh(a)
def floor(a):
return np.floor(a)
def ceil(a):
return np.ceil(a)
def clamp(a,minimum,maximum):
return np.clip(a,minimum,maximum)
def around(a,decimals=0):
return np.around(a,decimals)
def round_(a,decimals=0):
return np.round_(a,decimals)
def rint(a):
return np.rint(a)
def fix(a):
return np.fix(a)
def trunc(a):
return np.trunc(a)
def pdf(a):
from scipy.stats import norm
return norm.pdf(a)
def logpdf(a):
from scipy.stats import norm
return norm.logpdf(a)
def cdf(a):
from scipy.stats import norm
return norm.cdf(a)
def logcdf(a):
from scipy.stats import norm
return norm.logcdf(a)
def qnorm(a):
from scipy.stats import norm
return norm.ppf(a)
def survival(a):
from scipy.stats import norm
return norm.sf(a)
def inv_survival(a):
from scipy.stats import norm
return norm.isf(a)
def errorf(a):
from scipy.special import erf
return erf(a)
def errorfc(a):
from scipy.special import erfc
return erfc(a)
def exp(a):
return np.exp(a)
def exp1(a):
return np.exp1(a)
def exp2(a):
return np.exp2(a)
def log(a):
return np.log(a)
def log10(a):
return np.log10(a)
def log2(a):
return np.log2(a)
def log1p(a):
return np.log1p(a)
def add(a,b):
return np.add(a,b)
def receiprocal(a):
return np.reciprocal(a)
def negative(a):
return np.negative(a)
def multiply(a,b):
return np.multiply(a,b)
def divide(a,b):
return np.divide(a,b)
def power(a,b):
return np.power(a,b)
def subtract(a,b):
return np.subtract(a,b)
def true_divide(a,b):
return np.true_divide(a,b)
def remainder(a,b):
return np.remainder(a,b)
def sqrt(a):
return np.sqrt(a)
def square(a):
return np.square(a)
def sign(a):
return np.sign(a)
def maximum(a,b):
return np.maximum(a,b)
def minimum(a,b):
return np.minimum(a,b)
def nan_to_num(a):
return np.nan_to_num(a)
############################################################################################
### Time Series properties, transformations and statistics
#Rolling Pearson Correlation
def corr(a,b,window):
from skimage.util import view_as_windows
A = view_as_windows(a,(window,1))[...,0]
B = view_as_windows(b,(window,1))[...,0]
A_mA = A - A.mean(-1, keepdims=True)
B_mB = B - B.mean(-1, keepdims=True)
# ## Sum of squares across rows
# ssA = (A_mA**2).sum(-1) # or better : np.einsum('ijk,ijk->ij',A_mA,A_mA)
# ssB = (B_mB**2).sum(-1) # or better : np.einsum('ijk,ijk->ij',B_mB,B_mB)
ssA = np.einsum('ijk,ijk->ij',A_mA,A_mA)
ssB = np.einsum('ijk,ijk->ij',B_mB,B_mB)
## Finally get corr coeff
out = np.full(a.shape, np.nan)
out[window-1:] = np.einsum('ijk,ijk->ij',A_mA,B_mB)/np.sqrt(ssA*ssB)
return out
# Rolling Covariance
def covariance(a,b,window):
from skimage.util import view_as_windows
A = view_as_windows(a,(window,1))[...,0]
B = view_as_windows(b,(window,1))[...,0]
A_mA = A - A.mean(-1, keepdims=True)
B_mB = B - B.mean(-1, keepdims=True)
out = np.full(a.shape, np.nan)
out[window-1:] = np.einsum('ijk,ijk->ij',A_mA,B_mB)
return out
## Fisher transformation
#Fisher Transform; transforms a normalized Data series to a normal distributed range.
# The return value has no theoretical limit, but most values are between -1 .. +1.
# All Data values must be in the -1 .. +1 range f.i. by normalizing with
# the AGC, Normalize, or cdf function. The minimum Data length is 1.
def fisher(a):
tran = np.clip(a,-0.998,0.998)
return 0.5*np.log((1+tran)/(1-tran))
def invfisher(a):
return (np.exp(2*a)-1)/(np.exp(2*a)+1)
# Simple Moving average
def sma(array,window):
def sma_array(array,window):
weights = np.ones(window)/window
ma = np.full_like(array,np.nan)
ma[window-1:] = np.convolve(array, weights, 'valid')
return ma
return column_wise(sma_array,array,window)
def ema_v1(array,alpha,window):
def numpy_ewma(a, alpha, windowSize):
wghts = (1-alpha)**np.arange(windowSize)
wghts /= wghts.sum()
out = np.convolve(a,wghts)
out[:windowSize-1] = np.nan
return out[:a.size]
return column_wise(numpy_ewma,array,alpha,window)
def ema(array,window):
def ExpMovingAverage(values, window):
alpha = 2/(1.0+window)
weights = (1-alpha)** | np.arange(window) | numpy.arange |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
'''
This package is to be used as a library. Please do not edit.
'''
def runge_function(n: int = 100, min_x: float = -5.0, max_x: float = 5.0) -> (np.ndarray, np.ndarray):
"""
Compute the discrete Runge function on the linearly spaced inverval [min_x, max_x] with n function values.
Arguments:
min_x: left border of the interval
max_x: right border of the interval
n: number of function values inside the interval
Return:
x: vector containing all x values, correspond to values in y
y: vector containing all function values, correspond to values in x
"""
x = np.linspace(min_x, max_x, n)
y = 1.0 / (1.0 + x ** 2)
return x, y
def pad_coefficients(poly, length):
"""Adds zeros to the coefficients of poly if they have not the proper length."""
return np.pad(poly.coeffs, (length - poly.coeffs.size, 0), mode='constant', constant_values=0)
def plot_function(x, y):
""" Plot the function that is given by the discrete point pairs in x and y. """
plt.grid(True)
plt.plot(x, y, 'r-')
min_x = np.min(x)
min_y = np.min(y)
max_x = np.max(x)
max_y = np.max(y)
scale_x = max_x - min_x
scale_y = max_y - min_y
plt.xlim(min_x - 0.05 * scale_x, max_x + 0.05 * scale_x)
plt.ylim(min_y - 0.05 * scale_y, max_y + 0.05 * scale_y)
plt.show()
def plot_function_interpolations(function, support_points, interpolations, bases):
""" Plot a grid with the given function, the support points, interpolation and bases in each plot. """
x_f, y_f = function
fig1 = plt.figure()
for i in range(len(support_points)):
x_s, y_s = support_points[i]
x_i, y_i = interpolations[i]
p = fig1.add_subplot(3, 3, i + 1)
p.grid(True)
p.set_xlim(-5.3, 5.3)
p.set_xticks([-5, 0, 5])
p.set_ylim(-1.2, 2.2)
p.plot(x_f, y_f, 'r-')
p.plot(x_s, y_s, 'ko')
p.plot(x_i, y_i, 'b-')
fig2 = plt.figure()
for i in range(len(bases)):
p1 = fig2.add_subplot(3, 3, i + 1)
p1.grid(True)
p1.set_xlim(-5.3, 5.3)
p1.set_xticks([-5, 0, 5])
p1.set_ylim(-1.2, 2.2)
for base_func in bases[i]: plt.plot(x_f, base_func(x_f), '-')
plt.show()
def plot_spline(points, interpolations):
""" Plot a spline with the interpolation points."""
# Plot interpolation points
plt.plot(points[0], points[1], 'ko')
# Plot piecewise interpolants
for i in range(len(points[0]) - 1):
# plot local interpolant
p = interpolations[i]
px = np.linspace(points[0][i], points[0][i + 1], 100 / len(points[0]))
py = p(px)
plt.plot(px, py, '-')
# Plot Runge function
rx = np.linspace(-5, 5, 100)
ry = 1.0 / (1 + rx ** 2)
plt.plot(rx, ry, '--', color='0.7')
# Beautify plot
plt.grid(True)
plt.xlim(-5.1, 5.1)
plt.xticks(np.linspace(-5, 5, 11))
plt.ylim(-0.1, 1.1)
plt.subplots_adjust(left=0.05, right=0.98, top=0.98, bottom=0.05)
plt.show()
class Stickguy:
""" The stick guy. Only use in this package. """
def __init__(self, ax):
self.spine, = ax.plot([], [], lw=2)
self.left_arm, = ax.plot([], [], lw=2)
self.right_arm, = ax.plot([], [], lw=2)
self.left_leg, = ax.plot([], [], lw=2)
self.right_leg, = ax.plot([], [], lw=2)
def linear_animation(keytime, keyframe):
"""
The returned function computes interpolated keyframe curframe at given time t.
It uses the given keytime and splines parameters for this.
"""
def animation_function(t):
k = np.searchsorted(keytime, t, side='right') - 1
u = (t - keytime[k]) / (keytime[k + 1] - keytime[k])
curframe = (1.0 - u) * keyframe[k] + u * keyframe[k + 1]
return curframe
return animation_function
def cubic_animation(keytime, splines):
"""
The returned function computes interpolated keyframe curframe at given time t.
It uses the given keytime and splines parameters for this.
"""
def animation_function(t):
k = np.searchsorted(keytime, t, side='right') - 1
curframe = np.array([s[k](t) for s in splines])
return curframe
return animation_function
def param2pos(param, stickguy):
"""
Computes positions of joints for the stick guy.
Inputs:
param : list of parameters describing the pose
param[0]: height of hip
param[1]: angle of spine to vertical axis
param[2]: angle of upper arm 0 to spine
param[3]: angle of lower arm 0 to upper arm 0
param[4,5]: as above, other arm
param[6]: angle of neck/head to spine
param[7]: angle of upper leg 0 to vertical axis
param[8]: angle of lower leg 0 to upper leg 0
param[9,10]: as above, other leg
"""
hip_pos = np.array([0.0, param[0]])
spine_vec = np.array([0.0, 1.0])
spine_vec = rotate(spine_vec, param[1])
neck_pos = hip_pos + spine_vec
basic_arm_vec = -0.6 * spine_vec
arm_vec = rotate(basic_arm_vec, param[2])
left_elbow_pos = neck_pos + arm_vec
arm_vec = rotate(arm_vec, param[3])
left_hand_pos = left_elbow_pos + arm_vec
lad = np.array([neck_pos, left_elbow_pos, left_hand_pos])
stickguy.left_arm.set_data(lad[:, 0], lad[:, 1])
arm_vec = rotate(basic_arm_vec, param[4])
right_elbow_pos = neck_pos + arm_vec
arm_vec = rotate(arm_vec, param[5])
right_hand_pos = right_elbow_pos + arm_vec
rad = np.array([neck_pos, right_elbow_pos, right_hand_pos])
stickguy.right_arm.set_data(rad[:, 0], rad[:, 1])
neck_vec = 0.3 * spine_vec
neck_vec = rotate(neck_vec, param[6])
head_pos = neck_pos + neck_vec
sd = np.array([hip_pos, neck_pos, head_pos])
stickguy.spine.set_data(sd[:, 0], sd[:, 1])
basic_leg_vec = (0.0, -0.7)
leg_vec = rotate(basic_leg_vec, param[7])
left_knee_pos = hip_pos + leg_vec
leg_vec = rotate(leg_vec, param[8])
left_foot_pos = left_knee_pos + leg_vec
lld = np.array([hip_pos, left_knee_pos, left_foot_pos])
stickguy.left_leg.set_data(lld[:, 0], lld[:, 1])
leg_vec = rotate(basic_leg_vec, param[9])
right_knee_pos = hip_pos + leg_vec
leg_vec = rotate(leg_vec, param[10])
right_foot_pos = right_knee_pos + leg_vec
rld = np.array([hip_pos, right_knee_pos, right_foot_pos])
stickguy.right_leg.set_data(rld[:, 0], rld[:, 1])
return
def rotate(v, angle):
""" Helper function to turn a vector for a given angle. """
s = np.sin(angle)
c = np.cos(angle)
rv = | np.array([v[0] * c - v[1] * s, v[0] * s + v[1] * c]) | numpy.array |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''NPT'''
import numpy as np
import mindspore.common.dtype as mstype
from mindspore import Tensor
from mindspore import nn
from mindspore.common.parameter import Parameter
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindsponge import Angle
from mindsponge import Bond
from mindsponge import Dihedral
from mindsponge import LangevinLiujian
from mindsponge import LennardJonesInformation
from mindsponge import MdInformation
from mindsponge import NonBond14
from mindsponge import NeighborList
from mindsponge import ParticleMeshEwald
from mindsponge import RestrainInformation
from mindsponge import SimpleConstarin
from mindsponge import VirtualInformation
from mindsponge import CoordinateMolecularMap
from mindsponge import BDBARO
class Controller:
'''controller'''
def __init__(self, args_opt):
self.input_file = args_opt.i
self.initial_coordinates_file = args_opt.c
self.amber_parm = args_opt.amber_parm
self.restrt = args_opt.r
self.mdcrd = args_opt.x
self.mdout = args_opt.o
self.mdbox = args_opt.box
self.command_set = {}
self.md_task = None
self.commands_from_in_file()
self.punctuation = ","
def commands_from_in_file(self):
'''command from in file'''
file = open(self.input_file, 'r')
context = file.readlines()
file.close()
self.md_task = context[0].strip()
for val in context:
val = val.strip()
if val and val[0] != '#' and ("=" in val):
val = val[:val.index(",")] if ',' in val else val
assert len(val.strip().split("=")) == 2
flag, value = val.strip().split("=")
value = value.replace(" ", "")
flag = flag.replace(" ", "")
if flag not in self.command_set:
self.command_set[flag] = value
else:
print("ERROR COMMAND FILE")
# print(self.command_set)
# print("========================commands_from_in_file")
class NPT(nn.Cell):
'''npt'''
def __init__(self, args_opt):
super(NPT, self).__init__()
self.control = Controller(args_opt)
self.md_info = MdInformation(self.control)
self.mode = self.md_info.mode
self.update_step = 0
self.bond = Bond(self.control)
self.bond_is_initialized = self.bond.is_initialized
self.angle = Angle(self.control)
self.angle_is_initialized = self.angle.is_initialized
self.dihedral = Dihedral(self.control)
self.dihedral_is_initialized = self.dihedral.is_initialized
self.nb14 = NonBond14(
self.control, self.dihedral, self.md_info.atom_numbers)
self.nb14_is_initialized = self.nb14.is_initialized
self.nb_info = NeighborList(
self.control, self.md_info.atom_numbers, self.md_info.box_length)
self.lj_info = LennardJonesInformation(
self.control, self.md_info.nb.cutoff, self.md_info.sys.box_length)
self.lj_info_is_initialized = self.lj_info.is_initialized
self.liujian_info = LangevinLiujian(
self.control, self.md_info.atom_numbers)
self.liujian_info_is_initialized = self.liujian_info.is_initialized
self.pme_method = ParticleMeshEwald(self.control, self.md_info)
self.pme_is_initialized = self.pme_method.is_initialized
self.restrain = RestrainInformation(
self.control, self.md_info.atom_numbers, self.md_info.crd)
self.restrain_is_initialized = self.restrain.is_initialized
self.simple_constrain_is_initialized = 0
self.simple_constrain = SimpleConstarin(
self.control, self.md_info, self.bond, self.angle, self.liujian_info)
self.simple_constrain_is_initialized = self.simple_constrain.is_initialized
self.freedom = self.simple_constrain.system_freedom
self.vatom = VirtualInformation(
self.control, self.md_info, self.md_info.sys.freedom)
self.vatom_is_initialized = 1
self.random = P.UniformReal(seed=1)
self.pow = P.Pow()
self.five = Tensor(5.0, mstype.float32)
self.third = Tensor(1 / 3, mstype.float32)
self.mol_map = CoordinateMolecularMap(self.md_info.atom_numbers, self.md_info.sys.box_length, self.md_info.crd,
self.md_info.nb.excluded_atom_numbers, self.md_info.nb.h_excluded_numbers,
self.md_info.nb.h_excluded_list_start, self.md_info.nb.h_excluded_list)
self.mol_map_is_initialized = 1
self.init_params()
self.init_tensor_1()
self.init_tensor_2()
self.op_define_1()
self.op_define_2()
self.depend = P.Depend()
self.print = P.Print()
self.total_count = Parameter(
Tensor(0, mstype.int32), requires_grad=False)
self.accept_count = Parameter(
Tensor(0, mstype.int32), requires_grad=False)
self.is_molecule_map_output = self.md_info.output.is_molecule_map_output
self.target_pressure = Tensor([self.md_info.sys.target_pressure], mstype.float32)
self.nx = self.nb_info.nx
self.ny = self.nb_info.ny
self.nz = self.nb_info.nz
self.nxyz = Tensor([self.nx, self.ny, self.nz], mstype.int32)
self.pme_inverse_box_vector = Parameter(Tensor(
self.pme_method.pme_inverse_box_vector, mstype.float32), requires_grad=False)
self.pme_inverse_box_vector_init = Parameter(Tensor(
self.pme_method.pme_inverse_box_vector, mstype.float32), requires_grad=False)
self.mc_baro_is_initialized = 0
self.bd_baro_is_initialized = 0
self.constant_uint_max_float = 4294967296.0
self.volume = Parameter(Tensor(self.pme_method.volume, mstype.float32), requires_grad=False)
self.crd_scale_factor = Parameter(Tensor([1.0,], mstype.float32), requires_grad=False)
self.bd_baro = BDBARO(self.control, self.md_info.sys.target_pressure,
self.md_info.sys.box_length, self.md_info.mode)
self.bd_baro_is_initialized = self.bd_baro.is_initialized
self.update_interval = Tensor([self.bd_baro.update_interval], mstype.float32)
self.pressure = Parameter(Tensor([self.md_info.sys.d_pressure,], mstype.float32), requires_grad=False)
self.compressibility = Tensor([self.bd_baro.compressibility], mstype.float32)
self.bd_baro_dt = Tensor([self.bd_baro.dt], mstype.float32)
self.bd_baro_taup = Tensor([self.bd_baro.taup], mstype.float32)
self.system_reinitializing_count = Parameter(
Tensor(0, mstype.int32), requires_grad=False)
self.bd_baro_newv = Parameter(
Tensor(self.bd_baro.new_v, mstype.float32), requires_grad=False)
self.bd_baro_v0 = Parameter(
Tensor(self.bd_baro.v0, mstype.float32), requires_grad=False)
def init_params(self):
'''init params'''
self.bond_energy_sum = Tensor(0, mstype.int32)
self.angle_energy_sum = Tensor(0, mstype.int32)
self.dihedral_energy_sum = Tensor(0, mstype.int32)
self.nb14_lj_energy_sum = Tensor(0, mstype.int32)
self.nb14_cf_energy_sum = Tensor(0, mstype.int32)
self.lj_energy_sum = Tensor(0, mstype.int32)
self.ee_ene = Tensor(0, mstype.int32)
self.total_energy = Tensor(0, mstype.int32)
self.ntwx = self.md_info.ntwx
self.atom_numbers = self.md_info.atom_numbers
self.residue_numbers = self.md_info.residue_numbers
self.bond_numbers = self.bond.bond_numbers
self.angle_numbers = self.angle.angle_numbers
self.dihedral_numbers = self.dihedral.dihedral_numbers
self.nb14_numbers = self.nb14.nb14_numbers
self.nxy = self.nb_info.nxy
self.grid_numbers = self.nb_info.grid_numbers
self.max_atom_in_grid_numbers = self.nb_info.max_atom_in_grid_numbers
self.max_neighbor_numbers = self.nb_info.max_neighbor_numbers
self.excluded_atom_numbers = self.md_info.nb.excluded_atom_numbers
self.refresh_count = Parameter(
Tensor(self.nb_info.refresh_count, mstype.int32), requires_grad=False)
self.refresh_interval = self.nb_info.refresh_interval
self.skin = self.nb_info.skin
self.cutoff = self.nb_info.cutoff
self.cutoff_square = self.nb_info.cutoff_square
self.cutoff_with_skin = self.nb_info.cutoff_with_skin
self.half_cutoff_with_skin = self.nb_info.half_cutoff_with_skin
self.cutoff_with_skin_square = self.nb_info.cutoff_with_skin_square
self.half_skin_square = self.nb_info.half_skin_square
self.beta = self.pme_method.beta
self.d_beta = Parameter(Tensor([self.pme_method.beta], mstype.float32), requires_grad=False)
self.d_beta_init = Parameter(Tensor([self.pme_method.beta], mstype.float32), requires_grad=False)
self.neutralizing_factor = Parameter(Tensor([self.pme_method.neutralizing_factor], mstype.float32),
requires_grad=False)
self.fftx = self.pme_method.fftx
self.ffty = self.pme_method.ffty
self.fftz = self.pme_method.fftz
self.random_seed = self.liujian_info.random_seed
self.dt = self.liujian_info.dt
self.half_dt = self.liujian_info.half_dt
self.exp_gamma = self.liujian_info.exp_gamma
self.update = False
self.file = None
self.datfile = None
self.max_velocity = self.liujian_info.max_velocity
self.constant_kb = 0.00198716
def init_tensor_1(self):
'''init tensor'''
self.uint_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3], dtype=np.uint32), mstype.uint32),
requires_grad=False)
self.need_potential = Parameter(Tensor(0, mstype.int32), requires_grad=False)
self.need_pressure = Parameter(Tensor(0, mstype.int32), requires_grad=False)
self.atom_energy = Parameter(Tensor([0] * self.atom_numbers, mstype.float32), requires_grad=False)
self.atom_virial = Parameter(Tensor([0] * self.atom_numbers, mstype.float32), requires_grad=False)
self.frc = Parameter(Tensor(np.zeros([self.atom_numbers, 3]), mstype.float32), requires_grad=False)
self.crd = Parameter(
Tensor(np.array(self.md_info.coordinate).reshape(
[self.atom_numbers, 3]), mstype.float32),
requires_grad=False)
self.crd_to_uint_crd_cof = Parameter(Tensor(
self.md_info.pbc.crd_to_uint_crd_cof, mstype.float32), requires_grad=False)
self.quarter_crd_to_uint_crd_cof = Parameter(Tensor(
self.md_info.pbc.quarter_crd_to_uint_crd_cof, mstype.float32), requires_grad=False)
self.uint_dr_to_dr_cof = Parameter(
Tensor(self.md_info.pbc.uint_dr_to_dr_cof, mstype.float32), requires_grad=False)
self.box_length = Parameter(
Tensor(self.md_info.box_length, mstype.float32), requires_grad=False)
self.box_length_1 = Tensor(self.md_info.box_length, mstype.float32)
self.charge = Parameter(Tensor(np.asarray(self.md_info.h_charge), mstype.float32), requires_grad=False)
self.old_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3]), mstype.float32), requires_grad=False)
self.last_crd = Parameter(Tensor(np.zeros([self.atom_numbers, 3]), mstype.float32), requires_grad=False)
self.mass = Tensor(self.md_info.h_mass, mstype.float32)
self.mass_inverse = Tensor(self.md_info.h_mass_inverse, mstype.float32)
self.res_mass = Tensor(self.md_info.res.h_mass, mstype.float32)
self.res_mass_inverse = Tensor(
self.md_info.res.h_mass_inverse, mstype.float32)
self.res_start = Tensor(self.md_info.h_res_start, mstype.int32)
self.res_end = Tensor(self.md_info.h_res_end, mstype.int32)
self.velocity = Parameter(
Tensor(self.md_info.velocity, mstype.float32), requires_grad=False)
self.acc = Parameter(Tensor(np.zeros(
[self.atom_numbers, 3], np.float32), mstype.float32), requires_grad=False)
self.bond_atom_a = Tensor(np.asarray(
self.bond.h_atom_a, np.int32), mstype.int32)
self.bond_atom_b = Tensor(np.asarray(
self.bond.h_atom_b, np.int32), mstype.int32)
self.bond_k = Tensor(np.asarray(
self.bond.h_k, np.float32), mstype.float32)
self.bond_r0 = Tensor(np.asarray(
self.bond.h_r0, np.float32), mstype.float32)
self.angle_atom_a = Tensor(np.asarray(
self.angle.h_atom_a, np.int32), mstype.int32)
self.angle_atom_b = Tensor(np.asarray(
self.angle.h_atom_b, np.int32), mstype.int32)
self.angle_atom_c = Tensor(np.asarray(
self.angle.h_atom_c, np.int32), mstype.int32)
self.angle_k = Tensor(np.asarray(
self.angle.h_angle_k, np.float32), mstype.float32)
self.angle_theta0 = Tensor(np.asarray(
self.angle.h_angle_theta0, np.float32), mstype.float32)
self.dihedral_atom_a = Tensor(np.asarray(
self.dihedral.h_atom_a, np.int32), mstype.int32)
self.dihedral_atom_b = Tensor(np.asarray(
self.dihedral.h_atom_b, np.int32), mstype.int32)
self.dihedral_atom_c = Tensor(np.asarray(
self.dihedral.h_atom_c, np.int32), mstype.int32)
self.dihedral_atom_d = Tensor(np.asarray(
self.dihedral.h_atom_d, np.int32), mstype.int32)
self.pk = Tensor(np.asarray(self.dihedral.h_pk,
np.float32), mstype.float32)
self.gamc = Tensor(np.asarray(
self.dihedral.h_gamc, np.float32), mstype.float32)
self.gams = Tensor(np.asarray(
self.dihedral.h_gams, np.float32), mstype.float32)
self.pn = Tensor(np.asarray(self.dihedral.h_pn,
np.float32), mstype.float32)
self.ipn = Tensor(np.asarray(
self.dihedral.h_ipn, np.int32), mstype.int32)
def init_tensor_2(self):
'''init tensor 2'''
self.nb14_atom_a = Tensor(np.asarray(
self.nb14.h_atom_a, np.int32), mstype.int32)
self.nb14_atom_b = Tensor(np.asarray(
self.nb14.h_atom_b, np.int32), mstype.int32)
self.lj_scale_factor = Tensor(np.asarray(
self.nb14.h_lj_scale_factor, np.float32), mstype.float32)
self.cf_scale_factor = Tensor(np.asarray(
self.nb14.h_cf_scale_factor, np.float32), mstype.float32)
self.grid_n = Tensor(self.nb_info.grid_n, mstype.int32)
self.grid_length = Parameter(
Tensor(self.nb_info.grid_length, mstype.float32), requires_grad=False)
self.grid_length_inverse = Parameter(
Tensor(self.nb_info.grid_length_inverse, mstype.float32), requires_grad=False)
self.bucket = Parameter(Tensor(
np.asarray(self.nb_info.bucket, np.int32).reshape(
[self.grid_numbers, self.max_atom_in_grid_numbers]),
mstype.int32), requires_grad=False) # Tobe updated
self.bucket_init = Parameter(Tensor(
np.asarray(self.nb_info.bucket, np.int32).reshape(
[self.grid_numbers, self.max_atom_in_grid_numbers]),
mstype.int32), requires_grad=False) # Tobe updated
self.atom_numbers_in_grid_bucket = Parameter(Tensor(self.nb_info.atom_numbers_in_grid_bucket, mstype.int32),
requires_grad=False) # to be updated
self.atom_numbers_in_grid_bucket_init = Parameter(
Tensor(self.nb_info.atom_numbers_in_grid_bucket, mstype.int32),
requires_grad=False) # to be updated
self.atom_in_grid_serial = Parameter(Tensor(np.zeros([self.nb_info.atom_numbers,], np.int32), mstype.int32),
requires_grad=False) # to be updated
self.atom_in_grid_serial_init = Parameter(
Tensor(np.zeros([self.nb_info.atom_numbers,], np.int32), mstype.int32),
requires_grad=False) # to be updated
self.pointer = Parameter(
Tensor( | np.asarray(self.nb_info.pointer, np.int32) | numpy.asarray |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 31 13:38:02 2019
@author: brsr
"""
import geopandas
import pandas as pd
import shapely
from shapely.geometry import LineString, Polygon, Point
import pyproj
#import homography
import warnings
import numpy as np
from abc import ABC
from scipy.optimize import minimize, minimize_scalar, root_scalar
from scipy.special import hyp2f1, gamma, ellipj, ellipk, ellipkinc
#TODO:
#vectorize all the things
#find a better implementation of conformal
# (some kind of circle-packing thing?)
#repeated subdivision
#arange3 = np.arange(3)
#FIRST AXIS IS SPATIAL
TGTPTS3 = np.eye(3)
TGTPTS4 = np.array([[0, 1, 1, 0],
[0, 0, 1, 1]])
def normalize(vectors, axis=0):
"""Normalizes vectors in n-space. The zero vector remains the zero vector.
Args:
vectors: Array of vectors
axis: Which axis to take the norm over (by default the first axis, 0)
>>> x = np.stack((np.ones(5), np.arange(5)), axis=0)
>>> normalize(x)
array([[1. , 0.70710678, 0.4472136 , 0.31622777, 0.24253563],
[0. , 0.70710678, 0.89442719, 0.9486833 , 0.9701425 ]])
"""
n = np.linalg.norm(vectors, axis=axis, keepdims=True)
return np.where(n <= 0, 0, vectors / n)
def complex_to_float2d(arr):
"""Converts a complex array to a multidimensional float array.
>>> x = np.exp(2j*np.pi*np.linspace(0, 1, 5)).round()
>>> complex_to_float2d(x.round())
array([[ 1., 0.],
[ 0., 1.],
[-1., 0.],
[-0., -1.],
[ 1., -0.]])
"""
return arr.view(float).reshape(list(arr.shape) + [-1])
def float2d_to_complex(arr):
"""Converts a multidimensional float array to a complex array.
Input must be a float type, since there is no integer complex type.
>>> y = np.arange(8, dtype=float).reshape((-1, 2))
>>> float2d_to_complex(y)
array([[0.+1.j],
[2.+3.j],
[4.+5.j],
[6.+7.j]])
"""
return arr.view(complex)
def sqrt(x):
"""Real sqrt clipped to 0 for negative values.
>>> x = np.array([-np.inf, -1, 0, 1, np.inf, np.nan])
>>> sqrt(x)
array([ 0., 0., 0., 1., inf, nan])
"""
return np.where(x < 0, 0, np.sqrt(x))
def geodesics(lon, lat, geod, n=100, includepts=False):
"""Draw geodesics between each adjacent pair of points given by
lon and lat.
"""
lon2 = np.roll(lon, -1, axis=0)
lat2 = np.roll(lat, -1, axis=0)
result = []
for l, t, l2, t2 in zip(lon, lat, lon2, lat2):
g = geod.npts(l, t, l2, t2, n)
g.insert(0, (l, t))
g.append((l2, t2))
result.append(LineString(g))
ctrlboundary = geopandas.GeoSeries(result)
if includepts:
controlpts = arraytoptseries(np.array([lon, lat]))
ctrlpoly = geopandas.GeoSeries(pd.concat([ctrlboundary, controlpts],
ignore_index=True))
return ctrlpoly
else:
return ctrlboundary
def transform_antipode(lon, lat):
"""Transform a point given by lon and lat to its antipode."""
lon2 = lon - 180
np.where(lon2 <= -180, lon2 + 360, lon2)
return lon2, -lat
def ptseriestoarray(ser):
"""Convert a geopandas GeoSeries containing shapely Points
(or LineStrings of all the same length) to an array of
shape (2, n) or (3, n).
"""
return np.stack([x.coords for x in ser], axis=-1).squeeze()
def arraytoptseries(arr, crs={'epsg': '4326'}):
"""Convert an array of shape (2, ...) or (3, ...) to a
geopandas GeoSeries containing shapely Point objects.
"""
if arr.shape[0] == 2:
result = geopandas.GeoSeries([Point(x[0], x[1])
for x in arr.reshape(2, -1).T])
else:
result = geopandas.GeoSeries([Point(x[0], x[1], x[2])
for x in arr.reshape(3, -1).T])
#result.crs = crs
return result
def transeach(func, geoms):
"""Transform each element of geoms using the function func."""
plist = []
for geom in geoms:
if isinstance(geom, Point):
#special logic for points
ll = geom.coords[0]
plist.append(Point(func(*ll)))
else:
plist.append(shapely.ops.transform(func, geom))
return geopandas.GeoSeries(plist)
def graticule(spacing1=15, spacing2=1,
lonrange = [-180, 180], latrange = [-90, 90]):
"""
Create a graticule (or another square grid)
"""
a = int((lonrange[1] - lonrange[0])//spacing2)
b = int((latrange[1] - latrange[0])//spacing1)
c = int((lonrange[1] - lonrange[0])//spacing1)
d = int((latrange[1] - latrange[0])//spacing2)
plx = np.linspace(lonrange[0], lonrange[1], num=a + 1)
ply = np.linspace(latrange[0], latrange[1], num=b + 1)
mex = np.linspace(lonrange[0], lonrange[1], num=c + 1)
mey = np.linspace(latrange[0], latrange[1], num=d + 1)
parallels = np.stack(np.meshgrid(plx, ply), axis=-1).transpose((1,0,2))
meridians = np.stack(np.meshgrid(mex, mey), axis=-1)
gratlist = [parallels[:, i] for i in range(parallels.shape[1])]
gratlist += [meridians[:, i] for i in range(meridians.shape[1])]
gratl2 = [LineString(line) for line in gratlist]
grat = geopandas.GeoSeries(gratl2)
grat.crs = {'init': 'epsg:4326'}
return grat
#%%
def trigivenangles(angles, scale=np.pi/180):
"""Given angles, create the vertices of a triangle with those vertex
angles. Only uses the first 2 angles. The last vertex is always 1, 0.
>>> angles = np.array([45,90,45])
>>> np.round(trigivenangles(angles), decimals=8)
array([[-1., 0., 1.],
[ 0., -1., 0.]])
"""
angles = angles * scale
p0 = [np.cos(2*angles[1]), np.sin(2*angles[1])]
p1 = [np.cos(2*angles[0]), np.sin(-2*angles[0])]
p2 = [1, 0]
return np.array([p0, p1, p2]).T
def anglesgivensides(sides, scale=180/np.pi):
"""Given side lengths of a triangle, determines the interior angle at each
vertex, and the radius of the circumcircle.
>>> sides=np.array( [3,4,5])
>>> anglesgivensides(sides)
"""
#might be more stable to use law of cotangents, but eh
r = np.product(sides)/sqrt(
2*np.sum(sides**2*np.roll(sides,1)**2)
-np.sum(sides**4))
s1 = sides
s2 = np.roll(sides, -1)
s3 = np.roll(sides, 1)
cosangle = (s2**2 + s3**2 - s1**2)/ (2*s2*s3)
angles = np.arccos(cosangle)
return angles*scale, r
def trigivenlengths(sides):
"""Given side lengths, creates the vertices of a triangle with those
side lengths, and having circumcenter at 0,0.
>>> sides=np.array( [3,4,5])
>>> np.round(trigivenlengths(sides), decimals=8)
array([[-2.5, -0.7, 2.5],
[ 0. , -2.4, 0. ]])
"""
angles, r = anglesgivensides(sides, scale=1)
return r*trigivenangles(np.roll(angles, -1), scale=1)
#%%
def central_angle(x, y, signed=False):
"""Central angle between vectors with respect to 0. If vectors have norm
1, this is the spherical distance between them.
Args:
x, y: Coordinates of points on the sphere.
axis: Which axis the vectors lie along. By default, -1.
Returns: Array of central angles.
>>> t = np.linspace(0, np.pi, 5)
>>> c = np.cos(t)
>>> s = np.sin(t)
>>> z = np.zeros(t.shape)
>>> x = np.stack((c, s, z), axis=0)
>>> y = np.stack((c, z, s), axis=0)
>>> np.round(central_angle(x, y)/np.pi*180)
array([ 0., 60., 90., 60., 0.])
"""
cos = np.sum(x*y, axis=0)
sin = np.linalg.norm(np.cross(x, y, axis=0), axis=0)
result = np.arctan2(sin, cos)
return result if signed else abs(result)
def slerp(pt1, pt2, intervals):
"""Spherical linear interpolation.
Args:
pt1: Array of points. When interval is 0, the result is pt1.
pt2: Array of points. When interval is 1, the result is pt2.
intervals: Array of intervals at which to evaluate the
linear interpolation
>>> x = np.array([1, 0, 0])
>>> y = np.array([0, 0, 1])
>>> t = np.linspace(0, 1, 4)[:, np.newaxis]
>>> slerp(x, y, t)
array([[1. , 0. , 0. ],
[0.8660254, 0. , 0.5 ],
[0.5 , 0. , 0.8660254],
[0. , 0. , 1. ]])
"""
t = intervals
angle = central_angle(pt1, pt2)[..., np.newaxis]
return (np.sin((1 - t)*angle)*pt1 + np.sin((t)*angle)*pt2)/np.sin(angle)
def dslerp(pt1, pt2, intervals):
"""The derivative of slerp."""
t = intervals
angle = central_angle(pt1, pt2)[..., np.newaxis]
return (-np.cos((1 - t)*angle)*pt1 + np.cos(t*angle)*pt2)/np.sin(angle)
def triangle_solid_angle(a, b, c, axis=0):
"""Solid angle of a triangle with respect to 0. If vectors have norm 1,
this is the spherical area. Note there are two solid angles defined by
three points, determined by orientation of a, b, c.
Formula is from <NAME>; <NAME> (1983).
"The Solid Angle of a Plane Triangle". IEEE Trans. Biom. Eng.
BME-30 (2): 125–126. doi:10.1109/TBME.1983.325207.
Args:
a, b, c: Coordinates of points on the sphere.
Returns: Array of solid angles.
>>> t = np.linspace(0, np.pi, 5)
>>> a = np.stack([np.cos(t), np.sin(t), np.zeros(5)],axis=0)
>>> b = np.array([0, 1, 1])/np.sqrt(2)
>>> c = np.array([0, -1, 1])/np.sqrt(2)
>>> np.round(triangle_solid_angle(a, b, c), 4)
array([ 1.5708, 1.231 , 0. , -1.231 , -1.5708])
"""
axes = (axis,axis)
top = np.tensordot(a, np.cross(b, c, axis=axis), axes=axes)
na = np.linalg.norm(a, axis=0)
nb = np.linalg.norm(b, axis=0)
nc = np.linalg.norm(c, axis=0)
bottom = (na * nb * nc + np.tensordot(a, b, axes=axes) * nc
+ np.tensordot(b, c, axes=axes) * na
+ np.tensordot(c, a, axes=axes) * nb)
return 2 * np.arctan2(top, bottom)
def shoelace(pts):
"""Find area of polygon in the plane defined by pts, where pts is an
array with shape (2,n).
>>> pts = np.arange(6).reshape(2,-1)%4
>>> shoelace(pts)
2.0
"""
return abs(np.sum(np.cross(pts, np.roll(pts, -1, axis=1), axis=0)))/2
def antipode_v(ll):
"""Antipodes of points given by longitude and latitude."""
antipode = ll.copy()
antipode[0] -= 180
index = antipode[0] < -180
antipode[0, index] += 360
antipode[1] *= -1
return antipode
def omegascale(adegpts, degpts_t, geod, spacing=1):
"""Estimate scale factor and max deformation angle for a map projection
based on a grid of points
"""
#actrlpts, tgtpts,
#ar, p = geod.polygon_area_perimeter(actrlpts[0], actrlpts[1])
#at = shoelace(tgtpts)
es = geod.es
a = geod.a
factor = np.pi/180
#lon = adegpts[0]*factor
lat = adegpts[1]*factor
x = degpts_t[0]
y = degpts_t[1]
dx = np.gradient(x, factor*spacing)
dy = np.gradient(y, factor*spacing)
dxdlat, dxdlon = dx
dydlat, dydlon = dy
J = (dydlat*dxdlon - dxdlat*dydlon)
R = a*np.sqrt(1-es)/(1-es*np.sin(lat)**2)
h = sqrt((dxdlat)**2 + (dydlat)**2)*(1-es*np.sin(lat)**2)**(3/2)/(a*(1-es))
k = sqrt((dxdlon)**2 + (dydlon)**2)*(1-es*np.sin(lat)**2)**(1/2)/(a*np.cos(lat))
scale = J/(R**2*np.cos(lat))
sinthetaprime = np.clip(scale/(h*k), -1, 1)
aprime = sqrt(h**2 + k**2 + 2*h*k*sinthetaprime)
bprime = sqrt(h**2 + k**2 - 2*h*k*sinthetaprime)
sinomegav2 = np.clip(bprime/aprime, -1, 1)
omega = 360*np.arcsin(sinomegav2)/np.pi
return omega, scale
def rodrigues(center, v, theta):
"""Rodrigues formula: rotate vector v around center by angle theta
"""
cxv = np.cross(center, v)
cv = np.sum(center* v, axis=-1, keepdims=True)
cc = v*np.cos(theta) + cxv*np.sin(theta) + center*cv*(1-np.cos(theta))
return cc
#%%
class Projection(ABC):
"""Don't subclass this without subclassing one of
transform and transform_v and one of invtransform and invtransform_v,
or else an infinite regression will occur"""
def transform(self, x, y, z = None, **kwargs):
if z is None:
pts = np.stack([x,y])
else:
pts = np.stack([x,y,z])
vresult = self.transform_v(pts, **kwargs)
return vresult
def invtransform(self, x, y, z=None, **kwargs):
if z is None:
pts = np.stack([x,y])
else:
pts = np.stack([x,y,z])
vresult = self.invtransform_v(pts, **kwargs)
return vresult
def transform_v(self, pts, **kwargs):
rpts = pts.reshape((pts.shape[0],-1)).T
result = []
for xy in rpts:
result.append(self.transform(*xy, **kwargs))
result = np.array(result)
shape = [-1, ] + list(pts.shape[1:])
return result.T.reshape(shape)
def invtransform_v(self, pts, **kwargs):
rpts = pts.reshape((pts.shape[0],-1)).T
result = []
for xy in rpts:
result.append(self.invtransform(*xy, **kwargs))
result = np.array(result)
shape = [-1, ] + list(pts.shape[1:])
return result.T.reshape(shape)
#%%
class UV(Projection):
nctrlpts = 4
@staticmethod
def grid(**kwargs):
"""Create a square grid"""
return graticule(spacing1=1, spacing2=0.01,
lonrange=[0,1], latrange=[0,1])
@staticmethod
def gridpolys(n=11):
poi = np.array(np.meshgrid(np.linspace(0, 1, n),
np.linspace(0, 1, n)))
poilist = []
for i, j in np.ndindex(n-1,n-1):
x = Polygon([poi[:, i, j], poi[:, i, j+1],
poi[:, i+1, j+1], poi[:, i+1, j]])
poilist.append(x)
poiframe = geopandas.geoseries.GeoSeries(poilist)
return poiframe
@staticmethod
def segment(uv):
u, v = uv
index1 = u > v
index2 = u < 1 - v
#1 and 2 = 0
#1 and not 2 = 1
#not 1 and not 2 = 2
#not 1 and 2 = 3
result = np.zeros(u.shape)
result[index1 & ~index2] = 1
result[~index1 & ~index2] = 2
result[~index1 & index2] = 3
return result
class Bilinear(UV):
"""Bilinear interpolation
"""
_bilinear_mat = np.array([[ 1, 1, 1, 1],
[-1, 1, 1,-1],
[-1,-1, 1, 1],
[ 1,-1, 1,-1]])/4
def __init__(self, tgtpts):
self.tgtpts = tgtpts
self.abcd = self._bilinear_mat @ tgtpts.T
def transform(self, u, v):
"""u and v should have the same shape"""
abcd = self.abcd
stack = np.stack([np.ones(u.shape), u, v, u*v])
return (abcd @ stack).T
def transform_v(self, pts, **kwargs):
return self.transform(pts[0], pts[1])
def invtransform_v(self, pts):
abcd = self.abcd
A = abcd[:,0]
B = abcd[:,1]
C = abcd[:,2]
D = abcd[:,3] - pts
AB = np.cross(A,B)
AC = np.cross(A,C)
AD = np.cross(A,D)
BC = np.cross(B,C)
BD = np.cross(B,D)
CD = np.cross(C,D)
ua = 2*BD
ub = AD + BC
uc = 2*AC
va = 2*CD
vb = AD - BC
vc = 2*AB
u1 = (-ub + sqrt(ub**2 - ua*uc) )/ua
#u2 = (-ub - sqrt(ub**2 - ua*uc) )/ua
#v2 = (-vb + sqrt(vb**2 - va*vc) )/va
v1 = (-vb - sqrt(vb**2 - va*vc) )/va
return u1, v1
class Homeomorphism(UV):
"""Homeomorphism"""
def __init__(self, tgtpts):
self.tgtpts = tgtpts
class Barycentric(Projection):
"""Transforms between plane and barycentric coordinates"""
nctrlpts = 3
def __init__(self, tgtpts):
self.tgtpts = tgtpts
m = np.concatenate([self.tgtpts, np.ones((1, 3))])
self.minv = np.linalg.inv(m)
def transform_v(self, bary):
"""Convert barycentric to plane"""
rbary = bary.reshape(3,-1)
result = self.tgtpts @ rbary
shape = [2,] + list(bary.shape[1:])
return result.reshape(shape)
def invtransform_v(self, xy):
"""Convert plane to barycentric"""
rxy = xy.reshape(2,-1)
shape = list(rxy.shape)
shape[0] = 1
xy1 = np.concatenate([rxy, np.ones(shape)])
result = self.minv @ xy1
shape = [3,] + list(xy.shape[1:])
return result.reshape(shape)
@staticmethod
def grid(spacing1=0.1, spacing2=1E-2, rang = [0, 1], eps=1E-8):
"""Create a triangle grid in barycentric coordinates
"""
nx = int((rang[1] - rang[0])/spacing1 + 1)
ny = int((rang[1] - rang[0])/spacing2 + 1)
x = np.linspace(rang[0], rang[1], nx)
y = np.linspace(rang[0], rang[1], ny)
z = 1 - x[..., np.newaxis] - y
#valid = (rang[0] <= z) & (z <= rang[1])
#z[~valid] = np.nan
bary1 = np.stack([np.broadcast_to(x[..., np.newaxis], (nx, ny)),
np.broadcast_to(y, (nx, ny)),
z])
bary = np.concatenate([bary1, np.roll(bary1, -1, axis=0),
np.roll(bary1, -2, axis=0)], axis=1)
gratlist = [bary[:, i] for i in range(nx*3)]
gratl2 = []
for i in range(nx*3):
g = gratlist[i]
valid = np.all((rang[0]-eps <= g) & (g <= rang[1]+eps), axis=0)
if np.sum(valid) > 1:
g = g[..., valid]
gratl2.append(LineString(g.T))
grat = geopandas.GeoSeries(gratl2)
return grat
@staticmethod
def gridpolys(n=11, eps=0.01):
poi = np.meshgrid(np.linspace(0, 1, n), np.linspace(0, 1, n))
poi.append(1 - poi[0] - poi[1])
poi = np.array(poi)
poilist = []
for i,j in np.ndindex(n-1,n-1):
if poi[2, i, j] >= eps:
x = Polygon([poi[:, i, j],poi[:, i, j+1],poi[:, i+1, j]])
poilist.append(x)
if poi[2, i+1, j+1] >= -eps:
y = Polygon([poi[:, i+1, j+1],poi[:, i+1, j],poi[:, i, j+1]])
poilist.append(y)
poiframe = geopandas.geoseries.GeoSeries(poilist)
return poiframe
@staticmethod
def segment(bary):
return np.argmin(bary, axis=0)
class UnitVector(Projection):
"""Convert longitude and latitude to unit vector normals.
The methods of this class are static, and mostly organized in a class
for consistency."""
@staticmethod
def transform(x, y, **kwargs):
pts = np.stack([x,y])
vresult = UnitVector.transform_v(pts, **kwargs)
return vresult
@staticmethod
def invtransform(x, y, z, **kwargs):
pts = np.stack([x,y,z])
vresult = UnitVector.invtransform_v(pts, **kwargs)
return vresult
@staticmethod
def transform_v(ll, scale=np.pi/180):
"""Convert longitude and latitude to 3-vector
>>> ll = np.arange(6).reshape(2,3)*18
>>> UnitVector.transform_v(ll)
array([[5.87785252e-01, 2.93892626e-01, 4.95380036e-17],
[0.00000000e+00, 9.54915028e-02, 3.59914664e-17],
[8.09016994e-01, 9.51056516e-01, 1.00000000e+00]])
"""
lon, lat = ll*scale
x = np.cos(lat)*np.cos(lon)
y = np.cos(lat)*np.sin(lon)
z = np.sin(lat)
return np.stack([x, y, z], axis=0)
@staticmethod
def invtransform_v(pts, scale=180/np.pi):
"""Convert 3-vector to longitude and latitude.
Vector does not have to be normalized.
>>> UnitVector.invtransform_v(np.eye(3))
array([[ 0., 90., 0.],
[ 0., 0., 90.]])
"""
lat = scale*np.arctan2(pts[2], sqrt(pts[1]**2 + pts[0]**2))
lon = scale*np.arctan2(pts[1], pts[0])
return np.stack([lon, lat], axis=0)
_unitsphgeod = pyproj.Geod(a=1, b=1)
class CtrlPtsProjection(Projection, ABC):
"""Subclass for any map projection that uses (2 or more) control points."""
def __init__(self, ctrlpts, geod = _unitsphgeod):
"""Parameters:
ctrlpts: 2x3 or 2x4 Numpy array, latitude and longitude of
each control point
geod= a pyproj.Geod object. For a unit sphere use
pyproj.Geod(a=1,b=1)
"""
n = ctrlpts.shape[1]
if self.nctrlpts != n:
raise ValueError(
'ctrlpts has wrong number of points for this projection')
self.geod = geod
#it's possible to get a geod where this would give the wrong answer,
#but I think it would have to be really weird
area, _ = geod.polygon_area_perimeter([0,120,-120],[0,0,0])
self.totalarea = 2*area
self.ctrlpts = ctrlpts
ctrlpts_v = UnitVector.transform_v(ctrlpts)
self.ctrlpts_v = ctrlpts_v
center_v = ctrlpts_v.sum(axis=1)
self.center_v = center_v / np.linalg.norm(center_v)
self.center = UnitVector.invtransform_v(center_v)
antipode = antipode_v(ctrlpts)
self.antipode = antipode
self.antipode_v = UnitVector.transform_v(antipode)
self.sa = 0
if self.nctrlpts > 2:
faz, baz, sides = self.geod.inv(ctrlpts[0], ctrlpts[1],
np.roll(ctrlpts[0], -1),
np.roll(ctrlpts[1], -1))
self.sides = sides
self.faz = faz
self.baz = baz
self.ctrl_angles = (faz - np.roll(baz, 1))%360
area, _ = geod.polygon_area_perimeter(*ctrlpts)
self.area = area
self.ca = central_angle(ctrlpts_v,
np.roll(ctrlpts_v, -1, axis=1))
for i in range(1, self.nctrlpts-1):
self.sa += triangle_solid_angle(ctrlpts_v[..., 0],
ctrlpts_v[..., i],
ctrlpts_v[..., i+1])
self.edgenormals = np.cross(ctrlpts_v,
np.roll(ctrlpts_v, -1, axis=1), axis=0)
else:
faz, baz, sides = self.geod.inv(ctrlpts[0,0], ctrlpts[1,0],
ctrlpts[0,1], ctrlpts[1,1])
self.sides = sides
self.faz = faz
self.baz = baz
self.area = 0
self.ca = central_angle(ctrlpts_v[..., 0], ctrlpts_v[..., 1])
self.edgenormals = np.cross(ctrlpts_v[..., 0], ctrlpts_v[..., 1])
self.cosca = np.cos(self.ca)
self.sinca = np.sin(self.ca)
if self.sa < 0:
warnings.warn('control polygon is in negative orientation, '
+ 'may cause unusual results')
if self.nctrlpts == 4:
ctrlpts_v = self.ctrlpts_v
v0 = ctrlpts_v[..., 0]
v1 = ctrlpts_v[..., 1]
v2 = ctrlpts_v[..., 2]
v3 = ctrlpts_v[..., 3]
poip1 = np.cross(np.cross(v0, v1), np.cross(v3, v2))
poip2 = np.cross(np.cross(v0, v3), np.cross(v1, v2))
poip = np.stack([[poip1, -poip1],
[poip2, -poip2]]).transpose(2,0,1)
poip = poip / np.linalg.norm(poip, axis=0)
self.poi_v = poip
self.poi = UnitVector.invtransform_v(poip)
self.crossx = np.cross(ctrlpts_v,
np.roll(ctrlpts_v, -2, axis=1),
axis=0)[..., :2]
def orienttgtpts(self, tgtpts, N = (0, 90)):
"""Orient target points so that line from 0 to the projection of N
points up. Will fail if map projection doesn't define tgtpts."""
pN = self.transform(*N)
if np.allclose(pN, [0,0]):
raise ValueError('projection of N too close to 0')
angle = np.arctan2(pN[0],pN[1])
rotm = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
result = rotm @ tgtpts
self.tgtpts = result
def lune(self, lon, lat):
"""
Determine which lune a point or series of points lies in.
Lune 0 is the lune with vertex at the centroid and edges passing through
control points 0 and 1. Lune 1 is the same using control pts 1 and 2,
and Lune 2 uses control pts 2 and 0.
"""
#inexact on ellipsoids but close enough
testpt = UnitVector.transform(lon, lat)
testpt_v = testpt.reshape(3,-1)
ctrlpts_v = self.ctrlpts_v
center_v = self.center_v
cx = np.cross(center_v, ctrlpts_v, axis=0)
sk = cx.T @ testpt_v
sg = sk >= 0
ind = sg & ~np.roll(sg, shift=-1, axis=0)
result = np.argmax(ind, axis=0)
return result.reshape(testpt.shape[1:])
class BarycentricMapProjection(CtrlPtsProjection):
nctrlpts = 3
tweak = False
bcenter = np.ones(3)/3
def fixbary(self, bary):
if self.tweak:
return self.fixbary_normalize(bary)
else:
return self.fixbary_subtract(bary)
@staticmethod
def fixbary_normalize(bary):
"""Converts array bary to an array with sum = 1 by dividing by
bary.sum(). Will return nan if bary.sum() == 0.
>>> fixbary_normalize(np.arange(3))
array([0. , 0.33333333, 0.66666667])
"""
bary = np.array(bary)
return bary / np.sum(bary, axis=0, keepdims=True)
@staticmethod
def fixbary_subtract(bary):
"""Converts array bary to an array with sum = 1 by subtracting
(bary.sum() - 1)/bary.shape[0].
>>> fixbary_subtract(np.arange(3))
array([-0.66666667, 0.33333333, 1.33333333])
"""
bary = np.array(bary)
s = (np.sum(bary, axis=0, keepdims=True) - 1)/bary.shape[0]
return bary - s
def _fix_corners(self, lon, lat, result):
ctrlpts = self.ctrlpts
index0 = (lon == ctrlpts[0,0]) & (lat == ctrlpts[1,0])
index1 = (lon == ctrlpts[0,1]) & (lat == ctrlpts[1,1])
index2 = (lon == ctrlpts[0,2]) & (lat == ctrlpts[1,2])
#print(lon, lat, ctrlpts, result)
#print(index0.shape, result.shape, np.array([1, 0, 0])[..., np.newaxis].shape)
result[..., index0] = np.array([1, 0, 0])[..., np.newaxis]
result[..., index1] = np.array([0, 1, 0])[..., np.newaxis]
result[..., index2] = np.array([0, 0, 1])[..., np.newaxis]
return result
def _fix_corners_inv(self, bary, result):
index0 = (bary[0] == 1)
index1 = (bary[1] == 1)
index2 = (bary[2] == 1)
if np.any(index0):
result[..., index0] = self.ctrlpts_v[..., 0, np.newaxis]
if np.any(index1):
result[..., index1] = self.ctrlpts_v[..., 1, np.newaxis]
if np.any(index2):
result[..., index2] = self.ctrlpts_v[..., 2, np.newaxis]
return result
class UVMapProjection(CtrlPtsProjection):
nctrlpts = 4
bcenter = np.ones(2)/2
def _fix_corners(self, lon, lat, result):
ctrlpts = self.ctrlpts
index0 = (lon == ctrlpts[0,0]) & (lat == ctrlpts[1,0])
index1 = (lon == ctrlpts[0,1]) & (lat == ctrlpts[1,1])
index2 = (lon == ctrlpts[0,2]) & (lat == ctrlpts[1,2])
index3 = (lon == ctrlpts[0,3]) & (lat == ctrlpts[1,3])
result[..., index0] = np.array([ 0, 0])[..., np.newaxis]
result[..., index1] = np.array([ 1, 0])[..., np.newaxis]
result[..., index2] = np.array([ 1, 1])[..., np.newaxis]
result[..., index3] = np.array([ 0, 1])[..., np.newaxis]
return result
def _fix_corners_inv(self, x, y, result):
index0 = (x == 0) & (y == 0)
index1 = (x == 1) & (y == 0)
index2 = (x == 1) & (y == 1)
index3 = (x == 0) & (y == 1)
if np.any(index0):
result[..., index0] = self.ctrlpts_v[..., 0, np.newaxis]
if np.any(index1):
result[..., index1] = self.ctrlpts_v[..., 1, np.newaxis]
if np.any(index2):
result[..., index2] = self.ctrlpts_v[..., 2, np.newaxis]
if np.any(index3):
result[..., index3] = self.ctrlpts_v[..., 3, np.newaxis]
return result
#%% not-polygonal projections
class ChambTrimetric(CtrlPtsProjection):
"""Chamberlin trimetric projection"""
#FIXME this implementation fails for control triangles with
#high aspect ratios
nctrlpts = 3
def __init__(self, ctrlpts, geod=_unitsphgeod):
super().__init__(ctrlpts, geod)
self.tgtpts = trigivenlengths(self.sides)
try:
self.orienttgtpts(self.tgtpts)
except ValueError:
pass
def transform(self, x, y, **kwargs):
if hasattr(x, '__iter__'):
raise TypeError()
tgtpts = self.tgtpts
f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],
x*np.ones(3), y*np.ones(3))
faz = self.faz
raz1 = (faz - f) % 360
radsq = np.array(rad).squeeze()**2
ctgt = tgtpts.T.copy().view(dtype=complex).squeeze()
a = np.roll(ctgt, -1) - ctgt
b = ctgt
l = abs(a)
lsq = l**2
rsq = radsq/lsq
ssq = np.roll(radsq, -1, axis=-1)/lsq
x0 = (rsq - ssq + 1)/2
y0 = sqrt(-rsq**2 + 2*rsq*(ssq + 1) - (ssq - 1)**2)/2
y0[np.isnan(y0)] = 0
y = np.where(raz1 > 180, -y0, y0)
z0 = x0 +1j*y
pts = (a * z0 + b)
result = np.mean(pts)
return result.real, result.imag
def invtransform(self, *args, **kwargs):
return NotImplemented
class LstSqTrimetric(ChambTrimetric):
"""Least-squares variation of the Chamberlin trimetric projection"""
def transform(self, x, y, **kwargs):
init = super().transform(x, y)
tgtpts = self.tgtpts
f, b, rad = self.geod.inv(self.ctrlpts[0], self.ctrlpts[1],
x*np.ones(3), y*np.ones(3))
def objective(v):
x = v[0]
y = v[1]
a = tgtpts[0]
b = tgtpts[1]
xma = x-a
ymb = y-b
dist = np.sqrt(xma**2 + ymb**2)
result = np.sum((dist - rad)**2 )
f = 1 - rad/dist
f[rad <= 0] = 1
jac = 2*np.array([np.sum(xma*f), np.sum(ymb*f)])
return result, jac
res = minimize(objective, init, jac=True,
method = 'BFGS')
return res.x
class LinearTrimetric(CtrlPtsProjection):
"""The linear variation of the Chamberlin Trimetric projection."""
nctrlpts = 3
matrix1 = np.array([[0,-1],
[1,0]])
matrix2 = np.array([[0, -1, 1],
[1, 0, -1],
[-1, 1, 0]])
matrixinv1 = np.array([[-2,1,1],
[1,-2,1],
[1,1,-2]])*2/3
def __init__(self, ctrlpts, geod=_unitsphgeod):
"""Parameters:
ctrlpts: 2x3 Numpy array, latitude and longitude of each control point
geod= a pyproj.Geod object. For a unit sphere use
pyproj.Geod(a=1,b=1).
"""
super().__init__(ctrlpts, geod)
self.radius = ((geod.a**(3/2) + geod.b**(3/2))/2)**(2/3)
self.tgtpts = trigivenlengths(self.sides)
self.setmat()
# try:
# self.orienttgtpts(self.tgtpts)
# self.setmat()
# except ValueError:
# pass
vctrl = self.ctrlpts_v
self.invctrlvector = np.linalg.pinv(vctrl)
self.invperpmatrix = self.invctrlvector @ self.invctrlvector.T
cosrthmin = 1 / np.sqrt(self.invperpmatrix.sum())
self.hminall = np.arccos(cosrthmin)**2
def setmat(self, tgtpts=None):
"""Set matrices that use tgtpts"""
if tgtpts is None:
tgtpts = self.tgtpts
else:
self.tgtpts = tgtpts
tgtde = np.linalg.det(np.concatenate([tgtpts, np.ones((1,3))], axis=0))
self.m = self.matrix1 @ tgtpts @ self.matrix2 /(2*tgtde)
self.minv = self.matrixinv1 @ tgtpts.T
def transform_v(self, pts):
rpts = pts.reshape((2,-1)).T
rad = []
for x,y in rpts:
f, b, radi = self.geod.inv(x*np.ones(3), y*np.ones(3),
self.ctrlpts[0], self.ctrlpts[1])
rad.append(radi)
shape = list(pts.shape)
shape[0] = 3
rad = np.array(rad).T
radsq = np.array(rad)**2
result = self.m @ radsq
return result.reshape(pts.shape)
def invtransform_v(self, pts, n=20, stop=1E-8):
if not self.geod.sphere:
warnings.warn('inverse transform is approximate on ellipsoids')
rpts = pts.reshape((2,-1))
k = self.minv @ rpts/self.radius**2
hmin = -np.min(k, axis=0)
print('k: ', k)
#hmax = np.pi**2-np.max(k, axis=0)
hminall = self.hminall
h = np.where(hmin < hminall, hminall, hmin)
print('h: ', h)
for i in range(n):
rsq = (k + h)
#pos = rsq > 0
neg = rsq < 0
zer = rsq == 0
c = np.where(neg, np.cosh(np.sqrt(-rsq)), np.cos(np.sqrt(rsq)))
b = np.where(neg, np.sinh(np.sqrt(-rsq)),
np.sin(np.sqrt(rsq)))/np.sqrt(np.abs(rsq))
b[zer] = 1
f = np.einsum('i...,ij,j...', c, self.invperpmatrix, c) - 1
fprime = np.einsum('i...,ij,j...', c, self.invperpmatrix, b)
delta = f/fprime
h += delta
print('delta:', delta)
print('h: ', h)
if np.max(np.abs(delta)) < stop:
break
#h = np.clip(h, hmin, hmax)
rsq = np.clip(k + h, 0, np.pi**2)
c = np.cos(np.sqrt(rsq))
vector = self.invctrlvector.T @ c
print(c)
print(vector)
return UnitVector.invtransform_v(vector).reshape(pts.shape)
def nmforplot(self, pts, n=100):
rpts = pts.reshape((2,-1))
k = self.minv @ rpts/self.radius**2
hmin = -np.min(k, axis=0)
hmax = np.pi**2-np.max(k, axis=0)
h = np.linspace(hmin,hmax,100).T
rsq = (k[..., np.newaxis] + h)
c = np.cos(np.sqrt(rsq))
nm = np.einsum('i...,ij,j...', c, self.invperpmatrix, c)
return h, nm
class Alfredo(BarycentricMapProjection):
"""this doesn't really accomplish anything"""
def __init__(self, ctrlpts, tweak=False):
"""Parameters:
ctrlpts: 2x3 Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
ctrlpts_v = self.ctrlpts_v
self.cosADfactor = (np.cross(np.roll(ctrlpts_v, 1, axis=1),
np.roll(ctrlpts_v, -1, axis=1), axis=0) +
ctrlpts_v * np.linalg.det(ctrlpts_v))
self.tweak = tweak
def transform_v(self, ll):
rll = ll.reshape(2, -1)
ctrlpts_v = self.ctrlpts_v
cosADfactor = self.cosADfactor
vtestpt = UnitVector.transform_v(rll)
cosAPi = (vtestpt.T @ ctrlpts_v).T
cosADi = (vtestpt.T @ cosADfactor).T
pli = np.sqrt((1-cosAPi)/(1-cosADi))
b = 1 - pli
result = self.fixbary(b)
shape = (3,) + ll.shape[1:]
return result.reshape(shape)
def invtransform(self, *args, **kwargs):
return NotImplemented
#%%
class Areal(BarycentricMapProjection):
"""Spherical areal projection."""
def __init__(self, ctrlpts, geod=_unitsphgeod):
"""Parameters:
ctrlpts: 2x3 Numpy array, latitude and longitude of each control point
geod: a pyproj.Geod object. For a unit sphere use
pyproj.Geod(a=1,b=1).
"""
super().__init__(ctrlpts, geod)
a_i = np.sum(np.roll(self.ctrlpts_v, -1, axis=1) *
np.roll(self.ctrlpts_v, 1, axis=1), axis=0)
self.a_i = a_i
self.b_i = (np.roll(a_i, -1) + np.roll(a_i, 1))/(1+a_i)
self.tau_c = self.tau(self.area)
def tau(self, area):
"""Convert areas on the geod to tau values for inverse transform"""
return np.tan(area/self.totalarea*2*np.pi)
def transform(self, x, y):
try:
areas = []
for i in range(3):
smtri = self.ctrlpts.copy()
smtri[:, i] = np.array([x,y])
a, _ = self.geod.polygon_area_perimeter(smtri[0],
smtri[1])
areas.append(a)
areas = np.array(areas)
return areas/self.area
except ValueError:
raise TypeError()
def invtransform_v(self, bary):
rbary = bary.reshape(3,-1)
if not self.geod.sphere:
warnings.warn('inverse transform is approximate on ellipsoids')
b_i = self.b_i[:,np.newaxis]
tau = self.tau_c
tau_i = self.tau(self.area*rbary)
t_i = tau_i/tau
c_i = t_i / ((1+b_i) + (1-b_i) * t_i)
f_i = c_i / (1 - np.sum(c_i, axis=0))
vector = self.ctrlpts_v @ f_i
shape = [2] + list(bary.shape[1:])
result = UnitVector.invtransform_v(vector).reshape(shape)
return result
#%%
class BisectTri(BarycentricMapProjection):
"""Inverse is only approximate
"""
def __init__(self, ctrlpts):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
ctrlpts_v = self.ctrlpts_v
#v_0 = ctrlpts_v[..., 0]
#v_1 = ctrlpts_v[..., 1]
#v_2 = ctrlpts_v[..., 2]
midpoint_v = np.roll(ctrlpts_v, 1, axis=1) + np.roll(ctrlpts_v, -1, axis=1)
midpoint_v /= np.linalg.norm(midpoint_v, axis=0, keepdims=True)
self.midpoint_v = midpoint_v
self.midpoint = UnitVector.invtransform_v(self.midpoint_v)
aream = []
for i in range(3):
#index = np.roll(np.arange(3), -i)[:2]
#lona = list(ctrlpts[0, index]) + [self.midpoint[0,i],]
#lata = list(ctrlpts[1, index]) + [self.midpoint[1,i],]
#am, _ = self.geod.polygon_area_perimeter(lona, lata)
am = triangle_solid_angle(ctrlpts_v[:,i], ctrlpts_v[:,(i+1)%3],
midpoint_v[:,i])
#vc[:,0], mi, lproj)
aream.append(am)
self.aream = np.array(aream)
def transform(self, lon, lat):
lon + 0
vtestpt = UnitVector.transform(lon, lat)
areas = []
vctrlpts = self.ctrlpts_v
actrlpts = self.ctrlpts
geod = self.geod
area = self.area
for i in range(3):
vc = np.roll(vctrlpts, i, axis=1)
#ac = np.roll(actrlpts, i, axis=1)
mi = self.midpoint_v[:,-i]#?
lproj = -np.cross(np.cross(vc[..., 1], vc[..., 2]),
np.cross(vc[..., 0], vtestpt))
#lllproj = UnitVector.invtransform_v(lproj)
#loni = [ac[0,0], mi[0], lllproj[0]]
#lati = [ac[1,0], mi[1], lllproj[1]]
#a1, _ = geod.polygon_area_perimeter(loni, lati)
a1 = triangle_solid_angle(vc[:,0], mi, lproj)
areas.append(a1)
areas = np.array(areas) + self.aream
aa = areas/area
bx = []
for i in range(3):
x,y,z = np.roll(aa, i, axis=0)
b = (y**2 * x**2 + z**2 * x**2 - y**2 * z**2
- x * y**2 + z * y**2
- 2*y*x**2 - x*z**2 + y*z**2 + x**2
+ 3*y*x + z*x - 2*y*z
- 2*x - y + z + 1)
bx.append(b)
bx = np.array(bx)
betax = bx/bx.sum()
return self._fix_corners(lon, lat, betax)
def invtransform(self, b1, b2, b3):
b1 + 0
beta = np.array([b1,b2,b3])
vctrlpts3 = self.ctrlpts_v
#xs = []
ptts = []
for i in range(3):
beta1, beta2, beta3 = np.roll(beta, -i, axis=0)
x = beta2/(1 - beta1)
#xs.append(x)
a = x * self.area
pt0 = vctrlpts3[:,i]
pt1 = vctrlpts3[:,i-2]
pt2 = vctrlpts3[:,i-1]
cosw = pt1 @ pt2
w = np.arccos(cosw)
sinw = np.sin(w)
p2 = ((np.cos(a/2)* pt2 @ np.cross(pt0, pt1)- np.sin(a/2)*pt2 @ (pt1 + pt0))
+ np.sin(a/2)*cosw*(1 + pt1 @ pt0))
p3 = sinw*np.sin(a/2)*(1 + pt0 @ pt1)
r = 2*p3*p2/(p2**2 - p3**2)
t = np.arctan(r)/w#really close to just x
#print(x, t)
#t = x
ptt = slerp(pt2, pt1, t)
ptts.append(ptt)
ptts = np.array(ptts).T
ns = np.cross(vctrlpts3, ptts, axis=0)
pts = np.cross(ns, np.roll(ns, -1, axis=1), axis=0)
v = pts.sum(axis=1)
v = self._fix_corners_inv(beta, v)
return UnitVector.invtransform_v(v)
class BisectTri2(BarycentricMapProjection):
"""Inverse is only approximate"""
def __init__(self, ctrlpts):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
ctrlpts_v = self.ctrlpts_v
#v_0 = ctrlpts_v[..., 0]
#v_1 = ctrlpts_v[..., 1]
#v_2 = ctrlpts_v[..., 2]
midpoint_v = np.roll(ctrlpts_v, 1, axis=1) + np.roll(ctrlpts_v, -1, axis=1)
midpoint_v /= np.linalg.norm(midpoint_v, axis=0, keepdims=True)
self.midpoint_v = midpoint_v
self.midpoint = UnitVector.invtransform_v(self.midpoint_v)
def transform(self, lon, lat):
lon + 0
vtestpt = UnitVector.transform(lon, lat)
aa = []
vctrlpts = self.ctrlpts_v
actrlpts = self.ctrlpts
for i in range(3):
vc = np.roll(vctrlpts, i, axis=1)
ac = np.roll(actrlpts, i, axis=1)
mi = self.midpoint[:,-i]
lproj = -np.cross(np.cross(vc[..., 1], vc[..., 2]),
np.cross(vc[..., 0], vtestpt))
lllproj = UnitVector.invtransform_v(lproj)
dist1x = central_angle(vc[..., 1], lproj)
f, b, dist1x = self.geod.inv(mi[0], mi[1],
lllproj[0],lllproj[1])
f0, b0, _ = self.geod.inv(mi[0], mi[1],
ac[0,2], ac[1,2])
deltaf = (f-f0) % 360
if (deltaf <= 90) | (deltaf > 270):
s = 1
else:
s = -1
t = s*dist1x/self.sides[i] + 1/2
#print(t)
aa.append(t)
bx = []
for i in range(3):
x,y,z = np.roll(aa, i, axis=0)
b = (y**2 * x**2 + z**2 * x**2 - y**2 * z**2
- x * y**2 + z * y**2
- 2*y*x**2 - x*z**2 + y*z**2 + x**2
+ 3*y*x + z*x - 2*y*z
- 2*x - y + z + 1)
bx.append(b)
bx = np.array(bx)
betax = bx/bx.sum()
return self._fix_corners(lon, lat, betax)
def invtransform(self, b1, b2, b3):
b1 + 0
beta = np.array([b1,b2,b3])
vctrlpts3 = self.ctrlpts_v
#xs = []
ptts = []
for i in range(3):
beta1, beta2, beta3 = np.roll(beta, -i, axis=0)
x = beta2/(1 - beta1)
pt1 = vctrlpts3[:,i-2]
pt2 = vctrlpts3[:,i-1]
ptt = slerp(pt2, pt1, x)
ptts.append(ptt)
ptts = np.array(ptts).T
ns = np.cross(vctrlpts3, ptts, axis=0)
pts = np.cross(ns, np.roll(ns, -1, axis=1), axis=0)
v = pts.sum(axis=1)
v = self._fix_corners_inv(beta, v)
return UnitVector.invtransform_v(v)
class FullerEq(BarycentricMapProjection):
def transform_v(self, ll):
vtestpt_pre = UnitVector.transform(*ll)
vtestpt = vtestpt_pre.reshape(3,-1)
ctrlpts_v = self.ctrlpts_v
b = []
for i in range(3):
v0 = ctrlpts_v[..., i]
v1 = ctrlpts_v[..., (i+1)%3]
v2 = ctrlpts_v[..., (i-1)%3]
cosw01 = v0 @ v1
cosw02 = v0 @ v2
w01 = np.arccos(cosw01)
w02 = np.arccos(cosw02)
w = (w01 + w02) / 2
sinw = np.sin(w)
cosw = np.cos(w)
vt01 = np.tensordot(vtestpt, np.cross(v0, v1), axes=(0,0))
vt12 = np.tensordot(vtestpt, np.cross(v1, v2), axes=(0,0))
vt20 = np.tensordot(vtestpt, np.cross(v2, v0), axes=(0,0))
bi = np.arctan2(sinw*vt12, cosw*vt12 + vt01 + vt20)/w
#gx = vt12 + cosw*(vt01 + vt20)
#tx = np.arctan2(sinw*(vt20 + vt01),gx)/w
b.append(bi)
#b.append(1-tx)
b = np.array(b)
result = self.fixbary_subtract(b)
return result.reshape(vtestpt_pre.shape)
def invtransform(self, b1, b2, b3):
b1 + 0 #still not vectorized
bi = np.array([b1, b2, b3])
v0 = self.ctrlpts_v[..., 0]
v1 = self.ctrlpts_v[..., 1]
v2 = self.ctrlpts_v[..., 2]
w = self.ca.mean()
bi = np.array([b1, b2, b3])
cw = np.cos(w)
#sw = np.sin(w)
cbw = np.cos(bi*w)
sbw = np.sin(bi*w)
pcbw = np.product(cbw)
psbw = np.product(sbw)
scc = np.sum(sbw * np.roll(cbw, -1) * np.roll(cbw, 1))
css = np.sum(cbw*np.roll(sbw, -1)*np.roll(sbw, 1))
objw2 = np.array([2*pcbw - cw - 1,
2*scc,
3*pcbw + 3 - css,
2*psbw])
rts = np.roots(objw2)[-1]#FIXME solve this cubic explicitly
rts = rts.real
k = np.arctan(rts)/w
#f0 = np.where(bi[0] + k > 1, -1, 1)
f1 = np.where(bi[1] + k > 1, -1, 1)
f2 = np.where(bi[2] + k > 1, -1, 1)
#v01 = slerp(v1, v0, bi[0] + k)
#v02 = slerp(v2, v0, bi[0] + k)
#cx12 = np.cross(v01, v02)*f0
v12 = slerp(v2, v1, bi[1] + k)
v10 = slerp(v0, v1, bi[1] + k)
cx20 = np.cross(v12, v10)*f1
v20 = slerp(v0, v2, bi[2] + k)
v21 = slerp(v1, v2, bi[2] + k)
cx01 = np.cross(v20, v21)*f2
v0x = normalize(np.cross(cx20, cx01))
#v1x = normalize(np.cross(cx01, cx12))
#v2x = normalize(np.cross(cx12, cx20))
v0x = self._fix_corners_inv(bi, v0x)
#print(v0x)
return UnitVector.invtransform_v(v0x)
class Fuller(BarycentricMapProjection):
def __init__(self, ctrlpts, tweak=False):
"""Parameters:
ctrlpts: 2xn Numpy array, latitude and longitude of each control point
"""
super().__init__(ctrlpts)
self.tweak = tweak
def transform(self, lon, lat):
lon + 0#will TypeError if lon is not a number
vtestpt = UnitVector.transform(lon, lat)
ctrlpts_v = self.ctrlpts_v
b = []
for i in range(3):
v0 = ctrlpts_v[..., i]
v1 = ctrlpts_v[..., (i+1)%3]
v2 = ctrlpts_v[..., (i+2)%3]
vt01 = vtestpt @ np.cross(v0, v1)
vt12 = vtestpt @ np.cross(v1, v2)
vt20 = vtestpt @ np.cross(v2, v0)
cosw01 = v0 @ v1
cosw02 = v0 @ v2
w01 = np.arccos(cosw01)
w02 = np.arccos(cosw02)
if np.isclose(w01, w02):
w = (w01 + w02) / 2
sinw = np.sin(w)
cosw = np.cos(w)
g = vt12 + cosw*(vt01 + vt20)
ti = self._b_eq(w, sinw, vt20, vt01, g)
else:
sinw01 = sqrt(1 - cosw01**2)
sinw02 = sqrt(1 - cosw02**2)
g = vt12 + cosw02*vt01 + cosw01*vt20
ti = self._b_neq(w01, sinw02, vt01, w02, sinw01, vt20, g)
b.append(1-ti)
return self.fixbary(b)
def _b_neq(self, w01, sinw02, vt01, w02, sinw01, vt20, g):
t0 = (w01*sinw02*vt01 + w02*sinw01*vt20)/(g*w01*w02)
if ~np.isfinite(t0):
t0 = 0
else:
lim = np.pi/np.array([w01,w02]).max()
t0 = np.clip(t0, -lim, lim)
if abs(t0) < 1E-3:
return t0
w = (w01 + w02) / 2
sinw = np.sin(w)
t1 = self._b_eq(w, sinw, vt20, vt01, g)
t0 = np.clip(t0, -abs(t1), abs(t1))
c1 = sqrt(g**2 + (sinw01*vt20 - sinw02*vt01)**2)
c2 = sqrt(g**2 + (sinw01*vt20 + sinw02*vt01)**2)
d1 = np.arctan2(sinw01*vt20 - sinw02*vt01, g)
d2 = np.arctan2(sinw01*vt20 + sinw02*vt01, g)
def objective(t):
if t < -lim or t > lim:
return t**2, 2*t
if t == 0:
t = | np.finfo(float) | numpy.finfo |
"""
:Author(s) <NAME>, <NAME>:
This file contains the methods used to perform calculations on scuba diving profiles.
"""
from rpy2.robjects.vectors import IntVector
import rpy2.robjects as robjects
import numpy as np
import math
import DiveConstants as dc
class Calculations:
def initialise_dive(self, data, gas_combinations):
"""
Creates and initialises the dive_profile object.
:param data: dataframe:
a dataframe containing columns: time and depth
:param gas_combinations: list:
A list of gases in the format [[fO2,fHe,fN2,time]]
:returns tuple:
:dive_profile: a scuba dive object
:gas_list: a list of scuba gas objects
"""
dive = robjects.r['dive']
gas_list, tank_times = self.trimix_list(gas_combinations)
size = len(gas_list)
d = dive(data, tanklist=gas_list)
# Use Imbedded R script to name and swap gas tanks for dive
custom_gas = robjects.r('''
customGas <- function(dive_profile, numgas, list_of_times)
{
#Applies names to the tanklist in the format c("1":"n") - necessary to select which gas to use at a specific time.
names(tanklist(dive_profile)) <- c(1:numgas)
#Cuts the dive_profile and switches to the specific gas at the time listed
d <- cut(times.dive(dive_profile), breaks = c(do.call(c, list_of_times), Inf), include.lowest = TRUE, labels = names(tanklist(dive_profile)))
whichtank(dive_profile) <- cut(times.dive(dive_profile), breaks = c(do.call(c, list_of_times), Inf), include.lowest = TRUE, labels = names(tanklist(dive_profile)))
return(dive_profile)
}
''')
dive_profile = custom_gas(d, size, tank_times)
return dive_profile, gas_list
def trimix_list(self, gas_combinations):
"""
converts gas_combination string into trimix gas objects
:param gas_combinations: dataframe:
a list of strings in the format [[f02 fHe fN2 time][...]]
:returns tuple:
:gas_list: a list of trimix gas objects
:time_list: a list of times to pair with gas_list
"""
trimix = robjects.r['trimix']
gas_list = []
time_list = []
try:
# set default gas to air if no gas specified at time 0
if((len(gas_combinations) < 1 or gas_combinations[0][3] > 0)):
gas_list.append(trimix(0.21, 0, 0.79))
time_list.append(-1)
for gas in gas_combinations:
gas_list.append(trimix(gas[0], gas[1]))
time_list.append(gas[3])
except IndexError:
pass
return gas_list, time_list
def o2_tox(self, dive_profile):
"""
calculates oxygen toxcity exposure
:param dive_profile: dive:
a dive profile to test OTU level
:returns float:
value representing pulmonary oxygen toxicity dose for a given dive profile and breathing gas
is NaN if scuba is unable to calculate.
"""
oxtox = robjects.r['oxtox']
otu = oxtox(dive_profile, progressive=False)
ret = float(np.asarray(otu))
if(math.isnan(ret)):
ret = '.'
return ret
def max_ascent(self, dive_csv):
"""
finds the maximum ascent rate
:param dive_csv: dataframe:
a dataframe containing columns: time and depth
:returns:
the maximum ascent rate
"""
data = np.array(dive_csv)
max = 0
ascent_rate = 0
time_interval = data[0][3] - data[0][2]
for idx, depth in np.ndenumerate(data[1, :]):
try:
temp = data[1][idx[0]+1]
if ((depth - temp) > max):
max = depth - temp
except IndexError:
pass
# calculates the max ascent rate per min
div = 60.0 / time_interval
ascent_rate = round(max * div, 3)
return ascent_rate
def compartment_pressures(self, dive_profile, halftime_set):
"""
Gets compartment pressures from dive profile based on given half time set.
:param data: dataframe:
a dataframe containing columns: time and depth
:param halftime_set: str:
the name of the halftime set to be used
:returns:
:cp: a dataframe containing compartment pressures from 1,1b - 16
"""
# setup R functions
haldane = robjects.r['haldane']
pickmodel = robjects.r['pickmodel']
data_frame = robjects.r['data.frame']
if(not(halftime_set == 'ZH-L16A' or
halftime_set == 'ZH-L16B' or
halftime_set == 'ZH-L16C' or
halftime_set == 'Haldane' or
halftime_set == 'DSAT' or
halftime_set == 'Workman65' or
halftime_set == 'Buzzacott')):
raise ValueError('Invalid halftime-set')
else:
# if halftime set is decimate, set up decimate model.
if(halftime_set == 'Buzzacott'):
hm = robjects.r['hm']
buzzacott_model = hm(HalfT=IntVector((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15)), M0=IntVector((
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)), dM=IntVector((1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)))
cp = haldane(dive_profile, model=buzzacott_model,
progressive=True)
# if halftime set is ZH-L16B or ZH-L16C use ZH-L16A. This is like this in order to use model for different gradient factor calculations
elif(halftime_set == 'ZH-L16B' or halftime_set == 'ZH-L16C'):
cp = haldane(dive_profile, model=pickmodel(
'ZH-L16A'), progressive=True)
# for all other models, set up normally
else:
cp = haldane(dive_profile, model=pickmodel(
halftime_set), progressive=True)
# return the compartment pressures as dataframe
return data_frame(cp)
def max_values(self, ambient_pressures, compartment_pressures, totalIPP, nIPP, heIPP):
"""
merges max_bubble, max_inspired into a single function
Calculates max inspired, bubble and surf values as well as recording the compartment pressure values at the time of max bubble
:param ambient_pressures: float[]:
a list of ambient pressures at each time point (depth/10)
:param compartment_pressures: float[]:
a list of compartment pressure values (cp_value)
:param totalIPP: float[]:
the total inert gas partial pressure at given time points
:returns float[]:
max_values : array containing the collumns:
:max_ins: cp_value - totalIPP
:max_bub: cp_value - ambient_pressure
:n2_cp: cp_value where maxbub occured
:he_cp: helium cp_value where maxbub occured
:surf: the cp when the diver surfaces.
:he_surf: the helium cp when the diver surfaces
"""
# get compartment pressures and ambient pressure data
# cp = [row][col]
cp = np.array(compartment_pressures, dtype=np.float64).transpose()
ap = ambient_pressures
rows = cp.shape[0]
cols = cp.shape[1]
if cols > 17:
cols = 17
# initialize output array, array is same length as comparment pressures
max_values = np.zeros((cols, 8))
# for each column
for i in range(cols):
max_bub = 0
max_ins = -9999
max_he_ins = -9999
total_ins = -9999
n2_cp = 0
he_cp = 0
# for each row
for j, cp_val in np.ndenumerate(cp[:, i]):
try:
# for buhlmann models
if(cols == 17):
temp_ins = cp_val - nIPP[j]
he_ins = cp[j][i+17] - heIPP[j]
temp_total = temp_ins + he_ins
temp_bub = (cp_val + cp[j][i+17]) - ap[j]
if(he_ins > max_he_ins):
max_he_ins = he_ins
# for air dives
else:
temp_bub = cp_val - ap[j]
temp_ins = cp_val - totalIPP[j]
temp_total = temp_ins
if(temp_total > total_ins):
total_ins = temp_total
if(temp_ins > max_ins):
max_ins = temp_ins
if(temp_bub > max_bub):
max_bub = temp_bub
n2_cp = cp_val
# get he_cp value iff buhlmann model
if(cols == 17):
he_cp = cp[j][i+17]
except IndexError:
pass
max_values[i][0] = max_ins
max_values[i][1] = max_bub
max_values[i][2] = n2_cp
max_values[i][3] = he_cp
max_values[i][4] = cp[rows-1][i] # N2Surf
if(cols == 17):
max_values[i][5] = cp[rows-1][i+17] # heSurf
max_values[i][6] = max_he_ins # helium maxins values
max_values[i][7] = total_ins
return max_values
def ambient_pressures(self, dive):
"""
calculates ambient pressures
:param dive: dataframe:
a dataframe containing columns: time and depth
:returns float[]:
a list of ambient pressures at each time point
"""
# get dive data (times/depths) and convert to np array
df = np.array(dive, dtype=np.float64)
# initialize output array
ap = np.zeros(df.shape[1])
# enumerate 2nd column of array (depths) and calculate ambient pressure
for idx, depth in np.ndenumerate(df[1, :]):
ap[idx] = depth/10 + 1
return ap
def gradient_factors(self, data, compartment_pressures, halftime_set, surf_vals):
"""
Calculates the maximum percentage of the respective M-value any compartment reaches known as the GFHigh and GFLow specific to the user selected halftime set,
Finds the depth at which any compartment reaches 100% of its m value and finds the first miss according to that depth.
:param data: dataframe:
a dataframe containing columns: time and depth
:param compartment_pressures: dataframe:
a dataframe containing compartment pressures specific to each halftime set
:param halftime_set: str:
the name of the halftime set to be used
:param surf_vals: array:
a 2D array containing the surface values for nitrogen and helium needed to calculate the GFHigh values
:returns list:
:GF_Lows_final: all final values GFLowNMax
:GF_Lows_max_max: the maximum value of all GFLowNMax values,
:gf_100D: an integer representing the depth at which a compartment hits 100% of its m value
:first_miss: the closest multiple of 3 to gf_100D
:GF_Highs: all GFHigh values
:GF_Highs_max: the maximum value of all GFHigh values
"""
# convert compartment pressures to numpy array, transpose so we have [rows][cols] rather than [cols][rows]
cp = np.array(compartment_pressures, dtype=np.float64).transpose()
if halftime_set == 'ZH-L16A' or halftime_set == 'ZH-L16B' or halftime_set == 'ZH-L16C':
num_compartments = 17
elif halftime_set == 'DSAT':
num_compartments = 8
elif halftime_set == 'Workman65':
num_compartments = 9
elif halftime_set == 'Haldane':
num_compartments = 5
gaugeP = np.zeros(cp.shape[0])
# nitrogen and helium XDM, calculation = (the respective gas * gauge pressure at each timepoint)
nXDM = | np.zeros((gaugeP.shape[0], num_compartments)) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 26 16:46:44 2021
@author: OTPS
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LogNorm
import scipy as scipy
from CQED_fit import fit
from CQED_fit import avoided_crossing_direct_coupling_flat
from CQED_fit import avoided_crossing_direct_coupling
from CQED_fit import data_set
from CQED_fit import shortest_dist
def amp_to_volt(amp):
# amp in mV
x = 0.15
amp_fs = 2*2*amp/x/1e3 # in V!
out_range = 0.750
amp_HL = 5
rect_wave = 1
signal_voltage = rect_wave*amp_fs*out_range
return signal_voltage
vpk_to_dbm = lambda v: 10*np.log10((v/np.sqrt(2))**2/(50*1e-3))
### DATA 211026 1014
I_211026_1014 = np.loadtxt('211026_1014_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R_211026_1014 = np.loadtxt('211026_1014_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod211026_1014 = np.sqrt(R_211026_1014**2 + I_211026_1014**2)
mod_mat211026_1014 = mod211026_1014.reshape(17,301)
nr_of_volt_measurements = np.arange(6.6, 7.4, 0.05).size
volt_measurements = np.arange(6.6, 7.4, 0.05)
# ### DATA 211026 1016 --> irregular measurement, needs to be discarded
# I_211026_1016 = np.loadtxt('211026_1016_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
# R_211026_1016 = np.loadtxt('211026_1016_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
# mod211026_1016 = np.sqrt(R_211026_1016**2 + I_211026_1016**2)
# mod_mat211026_1016 = mod211026_1016.reshape(17,301)
# nr_of_volt_measurements = np.arange(6.6, 7.4, 0.05).size
# volt_measurements = -np.arange(-6.6, -7.4, -0.05)
### DATA 211027 1005
I_211027_1005 = np.loadtxt('211027_1005_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R_211027_1005 = np.loadtxt('211027_1005_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod211027_1005 = np.sqrt(R_211027_1005**2 + I_211027_1005**2)
mod_mat211027_1005 = mod211027_1005.reshape(17,301)
nr_of_volt_measurements = np.arange(6.6, 7.4, 0.05).size
volt_measurements = -np.arange(-6.6, -7.4, -0.05)
### DATA 211027 1007
I_211027_1007 = np.loadtxt('211027_1007_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R_211027_1007 = np.loadtxt('211027_1007_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod211027_1007 = np.sqrt(R_211027_1007**2 + I_211027_1007**2)
mod_mat211027_1007 = mod211027_1007.reshape(17,301)
nr_of_volt_measurements = np.arange(6.6, 7.4, 0.05).size
volt_measurements = -np.arange(-6.6, -7.4, -0.05)
### DATA 211027 1008
I_211027_1008 = np.loadtxt('211027_1008_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R_211027_1008 = np.loadtxt('211027_1008_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod211027_1008 = np.sqrt(R_211027_1008**2 + I_211027_1008**2)
mod_mat211027_1008 = mod211027_1008.reshape(17,301)
nr_of_volt_measurements = np.arange(6.6, 7.4, 0.05).size
volt_measurements = -np.arange(-6.6, -7.4, -0.05)
### DATA 211029 1008
I_211029_1008 = np.loadtxt('211029_1008_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R_211029_1008 = np.loadtxt('211029_1008_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod211029_1008 = np.sqrt(R_211029_1008**2 + I_211029_1008**2)
mod_mat211029_1008 = mod211029_1008.reshape(17,301)
### DATA 211029 1009
I_211029_1009 = np.loadtxt('211029_1009_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R_211029_1009 = np.loadtxt('211029_1009_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod211029_1009 = np.sqrt(R_211029_1009**2 + I_211029_1009**2)
mod_mat211029_1009 = mod211029_1009.reshape(17,301)
### DATA 21112 1009
I_21112_1005 = np.loadtxt('21112_1005_QUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
R_21112_1005 = np.loadtxt('21112_1005_IUHFLICh0cav_spec.dat', unpack = True, skiprows=1)
mod21112_1005 = | np.sqrt(R_21112_1005**2 + I_21112_1005**2) | numpy.sqrt |
import sys, os
import numpy as np
from keras.preprocessing.image import transform_matrix_offset_center, apply_transform, Iterator,random_channel_shift, flip_axis
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
import cv2
import random
import pdb
from skimage.io import imsave, imread
from skimage.transform import rotate
from skimage import transform
from skimage.transform import resize
from params import *
import json
import math
#import matplotlib.pyplot as plt
def clip(img, dtype, maxval):
return np.clip(img, 0, maxval).astype(dtype)
def RandomLight(img,img_right):
lights = random.choice(["Rfilter","Rbright","Rcontr", "RSat","RhueSat"])
#print(lights)
if lights=="Rfilter":
alpha = 0.5 * random.uniform(0, 1)
kernel = np.ones((3, 3), np.float32)/9 * 0.2
colored = img[..., :3]
colored = alpha * cv2.filter2D(colored, -1, kernel) + (1-alpha) * colored
maxval = np.max(img[..., :3])
dtype = img.dtype
img[..., :3] = clip(colored, dtype, maxval)
#right image
colored = img_right[..., :3]
colored = alpha * cv2.filter2D(colored, -1, kernel) + (1-alpha) * colored
maxval = np.max(img_right[..., :3])
dtype = img_right.dtype
img_right[..., :3] = clip(colored, dtype, maxval)
if lights=="Rbright":
alpha = 1.0 + 0.1*random.uniform(-1, 1)
maxval = np.max(img[..., :3])
dtype = img.dtype
img[..., :3] = clip(alpha * img[...,:3], dtype, maxval)
#right image
maxval = np.max(img_right[..., :3])
dtype = img_right.dtype
img_right[..., :3] = clip(alpha * img_right[...,:3], dtype, maxval)
if lights=="Rcontr":
alpha = 1.0 + 0.1*random.uniform(-1, 1)
gray = cv2.cvtColor(img[:, :, :3], cv2.COLOR_BGR2GRAY)
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
maxval = np.max(img[..., :3])
dtype = img.dtype
img[:, :, :3] = clip(alpha * img[:, :, :3] + gray, dtype, maxval)
#right image
gray = cv2.cvtColor(img_right[:, :, :3], cv2.COLOR_BGR2GRAY)
gray = (3.0 * (1.0 - alpha) / gray.size) * np.sum(gray)
maxval = np.max(img_right[..., :3])
dtype = img.dtype
img_right[:, :, :3] = clip(alpha * img_right[:, :, :3] + gray, dtype, maxval)
if lights=="RSat":
maxval = np.max(img[..., :3])
dtype = img.dtype
alpha = 1.0 + random.uniform(-0.1, 0.1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
img[..., :3] = alpha * img[..., :3] + (1.0 - alpha) * gray
img[..., :3] = clip(img[..., :3], dtype, maxval)
#righ image
maxval = np.max(img_right[..., :3])
dtype = img_right.dtype
alpha = 1.0 + random.uniform(-0.1, 0.1)
gray = cv2.cvtColor(img_right, cv2.COLOR_BGR2GRAY)
gray = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
img_right[..., :3] = alpha * img_right[..., :3] + (1.0 - alpha) * gray
img_right[..., :3] = clip(img_right[..., :3], dtype, maxval)
if lights=="RhueSat":
img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img)
hue_shift = np.random.uniform(-25,25)
h = cv2.add(h, hue_shift)
sat_shift = np.random.uniform(-25,25)
s = cv2.add(s, sat_shift)
val_shift = np.random.uniform(-25, 25)
v = cv2.add(v, val_shift)
img = cv2.merge((h, s, v))
img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
#right image
img_right = cv2.cvtColor(img_right, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(img_right)
h = cv2.add(h, hue_shift)
s = cv2.add(s, sat_shift)
v = cv2.add(v, val_shift)
img_right = cv2.merge((h, s, v))
img_right = cv2.cvtColor(img_right, cv2.COLOR_HSV2BGR)
return img,img_right
def perspectivedist(img,img_right,img_mask, flag='all'):
if flag=='all':
magnitude=3
# pdb.set_trace()
rw=img.shape[0]
cl=img.shape[1]
#x = random.randrange(50, 200)
#nonzeromask=(img_mask>0).nonzero()
#nonzeroy = np.array(nonzeromask[0])
#nonzerox = np.array(nonzeromask[1])
#bbox = (( np.maximum(np.min(nonzerox)-x,0), np.maximum(np.min(nonzeroy)-x,0)), (np.minimum(np.max(nonzerox)+x,cl), np.minimum(np.max(nonzeroy)+x,rw)))
#pdb.set_trace()
# img=img[bbox[0][1]:(bbox[1][1]),bbox[0][0]:(bbox[1][0])]
# img_mask=img_mask[bbox[0][1]:(bbox[1][1]),bbox[0][0]:(bbox[1][0])]
skew = random.choice(["TILT", "TILT_LEFT_RIGHT", "TILT_TOP_BOTTOM", "CORNER"])
w, h,_ = img.shape
x1 = 0
x2 = h
y1 = 0
y2 = w
original_plane = np.array([[(y1, x1), (y2, x1), (y2, x2), (y1, x2)]], dtype=np.float32)
max_skew_amount = max(w, h)
max_skew_amount = int(math.ceil(max_skew_amount *magnitude))
skew_amount = random.randint(1, max_skew_amount)
if skew == "TILT" or skew == "TILT_LEFT_RIGHT" or skew == "TILT_TOP_BOTTOM":
if skew == "TILT":
skew_direction = random.randint(0, 3)
elif skew == "TILT_LEFT_RIGHT":
skew_direction = random.randint(0, 1)
elif skew == "TILT_TOP_BOTTOM":
skew_direction = random.randint(2, 3)
if skew_direction == 0:
# Left Tilt
new_plane = np.array([(y1, x1 - skew_amount), # Top Left
(y2, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2 + skew_amount)], dtype=np.float32) # Bottom Left
elif skew_direction == 1:
# Right Tilt
new_plane = np.array([(y1, x1), # Top Left
(y2, x1 - skew_amount), # Top Right
(y2, x2 + skew_amount), # Bottom Right
(y1, x2)],dtype=np.float32) # Bottom Left
elif skew_direction == 2:
# Forward Tilt
new_plane = np.array([(y1 - skew_amount, x1), # Top Left
(y2 + skew_amount, x1), # Top Right
(y2, x2), # Bottom Right
(y1, x2)], dtype=np.float32) # Bottom Left
elif skew_direction == 3:
# Backward Tilt
new_plane = np.array([(y1, x1), # Top Left
(y2, x1), # Top Right
(y2 + skew_amount, x2), # Bottom Right
(y1 - skew_amount, x2)], dtype=np.float32) # Bottom Left
if skew == "CORNER":
skew_direction = random.randint(0, 7)
if skew_direction == 0:
# Skew possibility 0
new_plane = np.array([(y1 - skew_amount, x1), (y2, x1), (y2, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 1:
# Skew possibility 1
new_plane = np.array([(y1, x1 - skew_amount), (y2, x1), (y2, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 2:
# Skew possibility 2
new_plane = np.array([(y1, x1), (y2 + skew_amount, x1), (y2, x2), (y1, x2)],dtype=np.float32)
elif skew_direction == 3:
# Skew possibility 3
new_plane = np.array([(y1, x1), (y2, x1 - skew_amount), (y2, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 4:
# Skew possibility 4
new_plane = np.array([(y1, x1), (y2, x1), (y2 + skew_amount, x2), (y1, x2)], dtype=np.float32)
elif skew_direction == 5:
# Skew possibility 5
new_plane = np.array([(y1, x1), (y2, x1), (y2, x2 + skew_amount), (y1, x2)], dtype=np.float32)
elif skew_direction == 6:
# Skew possibility 6
new_plane = np.array([(y1, x1), (y2, x1), (y2, x2), (y1 - skew_amount, x2)],dtype=np.float32)
elif skew_direction == 7:
# Skew possibility 7
new_plane =np.array([(y1, x1), (y2, x1), (y2, x2), (y1, x2 + skew_amount)], dtype=np.float32)
# pdb.set_trace()
perspective_matrix = cv2.getPerspectiveTransform(original_plane, new_plane)
img = cv2.warpPerspective(img, perspective_matrix,
(img.shape[1], img.shape[0]),
flags = cv2.INTER_LINEAR)
img_right = cv2.warpPerspective(img_right, perspective_matrix,
(img.shape[1], img.shape[0]),
flags = cv2.INTER_LINEAR)
img_mask = cv2.warpPerspective(img_mask, perspective_matrix,
(img.shape[1], img.shape[0]),
flags = cv2.INTER_LINEAR)
return img, img_right, img_mask
def apply_clahe(img):
lab= cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
img = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
return img
def add_gaussian_noise(X_imgs):
#pdb.set_trace()
row, col,_= X_imgs.shape
#X_imgs=X_imgs/255
X_imgs = X_imgs.astype(np.float32)
# Gaussian distribution parameters
mean = 0
var = 0.1
sigma = var ** 0.5
gaussian = np.random.random((row, col, 1)).astype(np.float32)
gaussian = np.concatenate((gaussian, gaussian, gaussian), axis = 2)
gaussian_img = cv2.addWeighted(X_imgs, 0.75, 0.25 * gaussian, 0.25, 0)
gaussian_img = np.array(gaussian_img, dtype = np.uint8)
return gaussian_img
def random_affine(img,img_right,img_mask):
flat_sum_mask=sum(img_mask.flatten())
(row,col,_)=img_mask.shape
angle=shear_deg=0
zoom=1
center_shift = np.array((1000, 1000)) / 2. - 0.5
tform_center = transform.SimilarityTransform(translation=-center_shift)
tform_uncenter = transform.SimilarityTransform(translation=center_shift)
big_img=np.zeros((1000,1000,3), dtype=np.uint8)
big_img_right=np.zeros((1000,1000,3), dtype=np.uint8)
big_mask=np.zeros((1000,1000,3), dtype=np.uint8)
big_img[190:(190+row),144:(144+col)]=img
big_img_right[190:(190+row),144:(144+col)]=img_right
big_mask[190:(190+row),144:(144+col)]=img_mask
affine = random.choice(["rotate", "zoom", "shear"])
if affine == "rotate":
angle= random.uniform(-90, 90)
if affine == "zoom":
zoom = random.uniform(0.5, 1.5)
if affine=="shear":
shear_deg = random.uniform(-5, 5)
# pdb.set_trace()
tform_aug = transform.AffineTransform(rotation = np.deg2rad(angle),
scale =(1/zoom, 1/zoom),
shear = np.deg2rad(shear_deg),
translation = (0, 0))
tform = tform_center + tform_aug + tform_uncenter
# pdb.set_trace()
img_tr=transform.warp((big_img), tform)
img_tr_right=transform.warp((big_img_right), tform)
mask_tr=transform.warp((big_mask), tform)
# pdb.set_trace()
masktemp = cv2.cvtColor((img_tr*255).astype(np.uint8), cv2.COLOR_BGR2GRAY)>20
img_tr=img_tr[np.ix_(masktemp.any(1),masktemp.any(0))]
mask_tr = mask_tr[np.ix_(masktemp.any(1),masktemp.any(0))]
img_tr_right = img_tr_right[np.ix_(masktemp.any(1),masktemp.any(0))]
return (img_tr*255).astype(np.uint8),(img_tr_right*255).astype(np.uint8),(mask_tr*255).astype(np.uint8)
class CustomNumpyArrayIterator(Iterator):
def __init__(self, X, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering='th'):
self.X = X
self.y = y
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.training=image_data_generator.training
self.img_rows=image_data_generator.netparams.img_rows
self.img_cols=image_data_generator.netparams.img_cols
with open('labels_2017.json') as json_file:
self.Data = json.load(json_file)
#pdb.set_trace()
super(CustomNumpyArrayIterator, self).__init__(X.shape[0], batch_size, shuffle, seed)
def _get_batches_of_transformed_samples(self, index_array):
# pdb.set_trace()
batch_x_right = np.zeros((len(index_array),self.img_rows,self.img_cols,3), dtype=np.float32)
batch_x_left = np.zeros((len(index_array),self.img_rows,self.img_cols,3), dtype=np.float32)
if self.training:
if self.image_data_generator.netparams.task=='all':
ch_num=11
elif self.image_data_generator.netparams.task=='binary':
ch_num=1
elif self.image_data_generator.netparams.task=='parts':
ch_num=3
elif self.image_data_generator.netparams.task=='instrument':
ch_num=7
else:
ch_num=3
batch_y=np.zeros((len(index_array), self.img_rows,self.img_cols,ch_num), dtype=np.float32)
infos=[]
for i, j in enumerate(index_array):
#pdb.set_trace()
x_left = imread(self.X[j][0])
x_right =imread(self.X[j][1])
y1 =imread(self.y[j])
y1 = y1[...,[1,2,0]]
#print(j)
#pdb.set_trace()
infos.append((self.X[j][0], x_left.shape))
_x_left, _x_right, _y1 = self.image_data_generator.random_transform(x_left.astype(np.uint8), x_right.astype(np.uint8),y1.astype(np.uint8),self.Data)
batch_x_left[i]=_x_left
batch_x_right[i]=_x_right
batch_y[i]=_y1
#inf_temp=[]
#inf_temp.append()
# inf_temp.append()
# infos.append(
# pdb.set_trace()
batch_y=np.reshape(batch_y,(-1,self.img_rows,self.img_cols,ch_num))
return batch_x_left,batch_x_right,batch_y,infos
def next(self):
with self.lock:
index_array = next(self.index_generator)
#print(index_array)
return self._get_batches_of_transformed_samples(index_array)
def convert_gray(data,im, tasktype):
#pdb.set_trace()
#np.shape(self.Data['instrument'])
if tasktype.task=='all':
out = (np.zeros((im.shape[0],im.shape[1],11)) ).astype(np.uint8)
#pdb.set_trace()
image=np.squeeze(im[:,:,0])
indexc=0
for label_info,index in zip(data['instrument'],range(0,np.shape(data['instrument'])[0]+1)):
rgb=label_info['color'][0]
if rgb==0:
continue
temp_out = (np.zeros(im.shape[:2]) ).astype(np.uint8)
gray_val=255
#pdb.set_trace()
match_pxls = | np.where(image == rgb) | numpy.where |
import sys
import numpy as np
from skimage.measure import label
def getSegType(mid):
m_type = np.uint64
if mid<2**8:
m_type = np.uint8
elif mid<2**16:
m_type = np.uint16
elif mid<2**32:
m_type = np.uint32
return m_type
def seg2Count(seg,do_sort=True,rm_zero=False):
sm = seg.max()
if sm==0:
return None,None
if sm>1:
segIds,segCounts = np.unique(seg,return_counts=True)
if rm_zero:
segCounts = segCounts[segIds>0]
segIds = segIds[segIds>0]
if do_sort:
sort_id = np.argsort(-segCounts)
segIds=segIds[sort_id]
segCounts=segCounts[sort_id]
else:
segIds=np.array([1])
segCounts=np.array([np.count_nonzero(seg)])
return segIds, segCounts
def removeSeg(seg, did, invert=False):
sm = seg.max()
did = did[did<=sm]
if invert:
rl = np.zeros(1+sm).astype(seg.dtype)
rl[did] = did
else:
rl = | np.arange(1+sm) | numpy.arange |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.