prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# The script is used to perform analysis of phtotluminescence spectra of nanoscale Si measured by
# Perkin Elmer FL 8500 or 6500 using Spectrum FL Software
# (https://www.perkinelmer.com/product/fl-8500-with-spectrum-fl-software-n4200030).
# The measurement can be done with powder or liquid samples, with differnect filters to
# get rid of excitation light in the emission spectra. The script combines the data measured in 3D mode,
# i.e. emission spectra is measured for each excitation wavelength. Depending on the excitation wavelengths
# and emission filters used the script combines the spectra into one graph. For example, consider the same
# sample was first measured with excitation wavelegths from 300 to 400 nm and emission filter at 430 nm, and
# then measured with excitation wavelegths from 400 to 500 nm with emission filter at 515 nm. Then the script
# will combine those measurements into one and plot relevant graphs for the combined data.
# Script works by setting sample id and the folder, where folders with measurements are located.
# These folders must start with sample id followed by '_' character with additional measurement
# description. The folder contain the filter wavelength in nm somewhere after the '_' character.
# The folder may end with '_' followed by measurement index is case the measurement was repeated.
# However there is no way to select the exact measurement repeat, and it is selection is determined
# by the directory search function glob.glob().
import argparse
import chardet
import glob
import os
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xlsxwriter
from matplotlib import cm
from matplotlib.ticker import LinearLocator
def load_csv(meas_folder: str, encoding: str) -> pd.DataFrame:
'''Loads the csv to pandas dataframe.'''
csv_path = glob.glob(os.path.join(meas_folder, 'Administrator*'))[0]
# get file encoding
if encoding == '':
with open(csv_path, 'rb') as raw:
encoding = chardet.detect(raw.read())
encoding = encoding['encoding']
# open file and replace , with .
with open(csv_path, 'r', encoding=encoding) as f:
csv = f.read().replace(',', '.')
with open(csv_path, 'w', encoding=encoding) as f:
f.write(csv)
# get dataframe
meas_df = pd.read_csv(csv_path, sep=';', skiprows=1, encoding=encoding)
meas_df.drop(meas_df.columns[len(meas_df.columns)-1], axis=1, inplace=True)
meas_df.astype(float)
return meas_df
def get_sample_data(measure_dir: str,
sample_id: str,
emission_filters: list,
excitation_wavelengths: list,
encoding: str) -> list:
'''Get sample data for sample with sample_id, excitation wavelengths and emission filters'''
# get all folders with specified
all_sample_paths = [x for x in glob.glob(os.path.join(measure_dir, sample_id + '*')) if os.path.isdir(x)]
print(all_sample_paths)
if not all_sample_paths:
print('error: sample with specified id was not found: ' + sample_id)
return
# loop through emission filters and sample paths to
# and select ine measurement for each filter and excitation range
x_nm = []
sample_data = []
sample_excit_wls = []
if emission_filters:
# if there are emission filters, select measurement for each folder
for i, ef in enumerate(emission_filters):
meas_path = ''
for path in all_sample_paths:
if str(ef) in path:
meas_path = path
if meas_path == '':
# no measurement with such filter found
print('error: no measurement for specified emission filter was found: ' + str(ef) + ' nm')
return
# load the sample data into dataframe
print(f'info: using measurement {meas_path} for emission filter {ef} nm and range {excitation_wavelengths[i]}')
meas_df = load_csv(meas_path, encoding)
# select the first column which is wavelength in nm
x_nm = meas_df.iloc[:, 0].to_numpy()
# get excitation wavelengths from the column
meas_excit_wls = np.array([float(x.strip(')').strip('INT(')) for x in list(meas_df.columns[1:])])
meas_data = meas_df.iloc[:,1:].to_numpy()
excitation_filter_mask = ((meas_excit_wls >= excitation_wavelengths[i][0]) & (meas_excit_wls < excitation_wavelengths[i][1]))
meas_data = meas_data[:, excitation_filter_mask]
meas_excit_wls = meas_excit_wls[excitation_filter_mask]
if len(sample_data) == 0:
# sample data is empty make it not empty with meas data
sample_data = meas_data
sample_excit_wls = meas_excit_wls
else:
# sample data is not empty, so it can be joined with meas data
sample_data = np.concatenate((sample_data, meas_data), axis=1)
sample_excit_wls = np.concatenate((sample_excit_wls, meas_excit_wls))
else:
# select the last one from the all_sample_paths
meas_df = load_csv(all_sample_paths[:-1], encoding)
x_nm = meas_df.iloc[:, 0].to_numpy()
sample_excit_wls = np.array([float(x.strip(')').strip('(')) for x in list(meas_df.columns[1:])])
sample_data = meas_df.iloc[:,1:].to_numpy()
return [x_nm, sample_excit_wls, sample_data]
def analyze_sample(args: argparse.Namespace):
'''Performs analysis of the sample spectrum: subtracts background if specified and gets
values for the peak emission.'''
x_nm, excit_wls, spectra = get_sample_data(args.measure_dir, args.sample_id, args.emission_filters,
args.excitation_wavelengths, args.encoding)
if args.bg_id:
bg_x_nm, bg_excit_wls, bg_spectra = get_sample_data(args.measure_dir,
args.bg_id,
args.emission_filters,
args.excitation_wavelengths,
args.encoding)
# manipulate the bg values for each excitation wavelengths
# this is needed becuase background peak at ~830 nm is much larger
# then when sample is in holder and thus gives negative values.
bg_max_idx = np.argmax(bg_spectra, axis=0)
bg_max =
|
np.amax(bg_spectra, axis=0)
|
numpy.amax
|
"""Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant
values, high correlations)."""
from enum import Enum, auto, unique
from typing import Any, Dict, List, Optional, Set, Union
import numpy as np
import pandas as pd
from pandas_profiling.config import Settings
from pandas_profiling.model.correlations import perform_check_correlation
@unique
class AlertType(Enum):
"""Alert types"""
CONSTANT = auto()
"""This variable has a constant value."""
ZEROS = auto()
"""This variable contains zeros."""
HIGH_CORRELATION = auto()
"""This variable is highly correlated."""
HIGH_CARDINALITY = auto()
"""This variable has a high cardinality."""
UNSUPPORTED = auto()
"""This variable is unsupported."""
DUPLICATES = auto()
"""This variable contains duplicates."""
SKEWED = auto()
"""This variable is highly skewed."""
MISSING = auto()
"""This variable contains missing values."""
INFINITE = auto()
"""This variable contains infinite values."""
TYPE_DATE = auto()
"""This variable is likely a datetime, but treated as categorical."""
UNIQUE = auto()
"""This variable has unique values."""
CONSTANT_LENGTH = auto()
"""This variable has a constant length"""
REJECTED = auto()
"""Variables are rejected if we do not want to consider them for further analysis."""
UNIFORM = auto()
"""The variable is uniformly distributed"""
EMPTY = auto()
"""The DataFrame is empty"""
class Alert:
"""An alert object (type, values, column)."""
_anchor_id: Optional[str] = None
def __init__(
self,
alert_type: AlertType,
values: Optional[Dict] = None,
column_name: Union[str, None] = None,
fields: Optional[Set] = None,
):
if values is None:
values = {}
if fields is None:
fields = set()
self.fields = fields
self.alert_type = alert_type
self.values = values
self.column_name = column_name
@property
def anchor_id(self) -> Optional[str]:
if self._anchor_id is None:
self._anchor_id = str(hash(self.column_name))
return self._anchor_id
def fmt(self) -> str:
# TODO: render in template
name = self.alert_type.name.replace("_", " ")
if name == "HIGH CORRELATION":
num = len(self.values["fields"])
title = ", ".join(self.values["fields"])
name = f'<abbr title="This variable has a high correlation with {num} fields: {title}">HIGH CORRELATION</abbr>'
return name
def __repr__(self):
alert_type = self.alert_type.name
column = self.column_name
return f"[{alert_type}] alert on column {column}"
def check_table_alerts(table: dict) -> List[Alert]:
"""Checks the overall dataset for alerts.
Args:
table: Overall dataset statistics.
Returns:
A list of alerts.
"""
alerts = []
if alert_value(table.get("n_duplicates", np.nan)):
alerts.append(
Alert(
alert_type=AlertType.DUPLICATES,
values=table,
fields={"n_duplicates"},
)
)
if table["n"] == 0:
alerts.append(
Alert(
alert_type=AlertType.EMPTY,
values=table,
fields={"n"},
)
)
return alerts
def numeric_alerts(config: Settings, summary: dict) -> List[Alert]:
alerts = []
# Skewness
if skewness_alert(summary["skewness"], config.vars.num.skewness_threshold):
alerts.append(
Alert(
alert_type=AlertType.SKEWED,
fields={"skewness"},
)
)
# Infinite values
if alert_value(summary["p_infinite"]):
alerts.append(
Alert(
alert_type=AlertType.INFINITE,
fields={"p_infinite", "n_infinite"},
)
)
# Zeros
if alert_value(summary["p_zeros"]):
alerts.append(
Alert(
alert_type=AlertType.ZEROS,
fields={"n_zeros", "p_zeros"},
)
)
if (
"chi_squared" in summary
and summary["chi_squared"]["pvalue"] > config.vars.num.chi_squared_threshold
):
alerts.append(Alert(alert_type=AlertType.UNIFORM))
return alerts
def categorical_alerts(config: Settings, summary: dict) -> List[Alert]:
alerts = []
# High cardinality
if summary.get("n_distinct", np.nan) > config.vars.cat.cardinality_threshold:
alerts.append(
Alert(
alert_type=AlertType.HIGH_CARDINALITY,
fields={"n_distinct"},
)
)
if (
"chi_squared" in summary
and summary["chi_squared"]["pvalue"] > config.vars.cat.chi_squared_threshold
):
alerts.append(Alert(alert_type=AlertType.UNIFORM))
if summary.get("date_warning"):
alerts.append(Alert(alert_type=AlertType.TYPE_DATE))
# Constant length
if "composition" in summary and summary["min_length"] == summary["max_length"]:
alerts.append(
Alert(
alert_type=AlertType.CONSTANT_LENGTH,
fields={"composition_min_length", "composition_max_length"},
)
)
return alerts
def generic_alerts(summary: dict) -> List[Alert]:
alerts = []
# Missing
if alert_value(summary["p_missing"]):
alerts.append(
Alert(
alert_type=AlertType.MISSING,
fields={"p_missing", "n_missing"},
)
)
return alerts
def supported_alerts(summary: dict) -> List[Alert]:
alerts = []
if summary.get("n_distinct", np.nan) == summary["n"]:
alerts.append(
Alert(
alert_type=AlertType.UNIQUE,
fields={"n_distinct", "p_distinct", "n_unique", "p_unique"},
)
)
if summary.get("n_distinct", np.nan) == 1:
summary["mode"] = summary["value_counts_without_nan"].index[0]
alerts.append(
Alert(
alert_type=AlertType.CONSTANT,
fields={"n_distinct"},
)
)
alerts.append(
Alert(
alert_type=AlertType.REJECTED,
fields=set(),
)
)
return alerts
def unsupported_alerts(summary: Dict[str, Any]) -> List[Alert]:
alerts = [
Alert(
alert_type=AlertType.UNSUPPORTED,
fields=set(),
),
Alert(
alert_type=AlertType.REJECTED,
fields=set(),
),
]
return alerts
def check_variable_alerts(config: Settings, col: str, description: dict) -> List[Alert]:
"""Checks individual variables for alerts.
Args:
col: The column name that is checked.
description: The series description.
Returns:
A list of alerts.
"""
alerts = []
alerts += generic_alerts(description)
if description["type"] == "Unsupported":
alerts += unsupported_alerts(description)
else:
alerts += supported_alerts(description)
if description["type"] == "Categorical":
alerts += categorical_alerts(config, description)
if description["type"] == "Numeric":
alerts += numeric_alerts(config, description)
for idx in range(len(alerts)):
alerts[idx].column_name = col
alerts[idx].values = description
return alerts
def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:
alerts = []
for corr, matrix in correlations.items():
if config.correlations[corr].warn_high_correlations:
threshold = config.correlations[corr].threshold
correlated_mapping = perform_check_correlation(matrix, threshold)
if len(correlated_mapping) > 0:
for k, v in correlated_mapping.items():
alerts.append(
Alert(
column_name=k,
alert_type=AlertType.HIGH_CORRELATION,
values={"corr": corr, "fields": v},
)
)
return alerts
def get_alerts(
config: Settings, table_stats: dict, series_description: dict, correlations: dict
) -> List[Alert]:
alerts = check_table_alerts(table_stats)
for col, description in series_description.items():
alerts += check_variable_alerts(config, col, description)
alerts += check_correlation_alerts(config, correlations)
alerts.sort(key=lambda alert: str(alert.alert_type))
return alerts
def alert_value(value: float) -> bool:
return not
|
np.isnan(value)
|
numpy.isnan
|
'''
Differentially private Bayesian learning on distributed data
<NAME> 2016-17
Modified from the original code:
Differentially private Bayesian linear regression
<NAME> 2016-2017
University of Helsinki Department of Computer Science
Helsinki Institute of Information Technology HIIT
GDSC/drug sensitivity data
Various functions and data processing steps used in the tests.
'''
import sys, os, copy
import numpy as np
from scipy.stats import spearmanr
import warnings
# NOTE on normalisation in distributed setting:
# assume centered data (so remove column means)
# row-wise L2-normalization is ok, since doesn't depend on other rows
# Centers and L2-normalises x-data (removes columnwise mean, normalises rows to norm 1)
def xnormalise(x):
n = x.shape[0]
d = x.shape[1]
if n == 0:
return x
else:
z = x-np.dot(np.ones((n,1),dtype=np.float),np.nanmean(x,0).reshape(1,d))
return np.divide(z,np.dot(np.sqrt(np.nansum(np.power(z,2.0),1)).reshape(n,1),np.ones((1,d),dtype=np.float)))
# Centers y-data (removes columnwise mean, except for columns where all samples have / all but one sample has missing drug response(s))
def ynormalise(y):
n = y.shape[0]
d = y.shape[1]
if n == 0:
return y
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
m = np.nanmean(y,0)
ind = np.where(np.sum(~np.isnan(y),0)<=1)[0]
m[ind] = 0.0 # don't center samples of size <= 1
return y-np.dot(np.ones((n,1),dtype=np.float),m.reshape(1,d))
# Clip data
def clip(x,y,B_x,B_y):
C = np.multiply(np.sign(x),np.minimum(np.abs(x),B_x))
with np.errstate(invalid='ignore'):
D = np.multiply(np.sign(y),np.minimum(np.abs(y),B_y))
return C,D
# Selects drug based on drugid, removes cell lines with missing drug response
def ignoreNaN(xx,yy,drugid):
ind = np.where(np.isnan(yy[:,drugid]))
y = np.delete(yy[:,drugid],ind,axis=0)
x = np.delete(xx,ind,axis=0)
return x,y
# Non-private sufficient statistics
def nxx(x):
return np.dot(x.T,x)
def nxy(x,y):
return np.dot(x.T,y)
def nyy(y):
return np.dot(y.T,y)
# Precision measure: Spearman's rank correlation coefficient
def precision(y_pred,y_real):
r = spearmanr(y_pred,y_real)[0]
if np.isnan(r):
return 0.0
else:
return r
# Prediction errors (MAE, MSE) helper script
def pred_errors(pred, y, method):
if method == 'mae':
return np.mean(np.absolute(pred-y))
elif method =='mse':
return np.mean((pred-y)**2)
# Choose optimal w_x,w_y for clipping thresholds
def omega(n,d,eps,delta, method='corr',ln=20):
# Precision parameters (correspond to the means of the gamma hyperpriors)
l = 1.0
l0 = 1.0
l1 = ln
l2 = ln
st = np.arange(0.1,2.1,0.1)
lenC1 = len(st)
lenC2 = lenC1
err = np.zeros((lenC1,lenC2),dtype=np.float64)
for i in range(l1):
# Create synthetic data
x = np.random.normal(0.0,1.0,(n,d))
x = xnormalise(x)
sx = np.std(x,ddof=1)
b = np.random.normal(0.0,1.0/
|
np.sqrt(l0)
|
numpy.sqrt
|
"""
acr_sess_analys.py
This script runs analyses across sessions using a Session object with data
generated by the Allen Institute OpenScope experiments for the Credit
Assignment Project.
Authors: <NAME>
Date: October, 2019
Note: this code uses python 3.7.
"""
import logging
import warnings
import numpy as np
import scipy.stats as scist
from util import file_util, gen_util, logger_util, math_util, rand_util
from sess_util import sess_gen_util, sess_ntuple_util, sess_str_util
from extra_analysis import quant_analys, signif_grps
from extra_plot_fcts import acr_sess_analysis_plots as acr_sess_plots
logger = logging.getLogger(__name__)
MIN_N = 2 # minimum number of values outside CIs
#############################################
def set_multcomp(permpar, sessions=None, n_linpla=4, n_sess=1, factor=1,
CIs=True):
"""
set_multcomp(permpar)
Returns permpar updated with the number of comparisons computed from the
sessions, if permpar.multcomp is True.
Required args:
- permpar (PermPar) : named tuple containing permutation parameters
Optional args:
- sessions (list): nested list of Session objects
(linpla x mouse x sess) used to infer
n_linpla and n_sess, if provided
default: None
- n_linpla (int) : number of lines/planes, used if sessions is None
default: 4
- n_sess (int) : number of sessions, used if sessions is None
default: 1
- factor (int) : additional factor by which to multiply the number of
comparisons
default: 1
- CIs (bool) : whether confidence interval comparisons are included
default: True
Returns:
- permpar (PermPar): updated permutation parameter named tuple
"""
if permpar.multcomp:
if sessions is not None:
n_linpla = (len(sessions))
n_sess = [len(m_sess) for lp_sess in sessions for m_sess in lp_sess]
if len(list(set(n_sess))) != 1:
raise RuntimeError("There should be the same number of "
"sessions for each mouse.")
n_sess = n_sess[0]
n_comps = 0
# sessions compared to their CIs
if CIs:
n_comps += n_sess
# session pair comparisons
k = 2
if n_sess >= k:
fact = np.math.factorial
n_comps += fact(n_sess) / (fact(k) * fact(n_sess - k))
# for each line/plane, and multiplied by specified factor
n_comps *= n_linpla
# interplane comparisons (approximate if missing planes)
n_comps += n_sess * (n_linpla // 2)
# multiplied by specified factor
n_comps *= factor
permpar = sess_ntuple_util.get_modif_ntuple(
permpar, "multcomp", int(n_comps)
)
return permpar
#############################################
def split_by_linpla(sessions, rem_empty=False):
"""
split_by_linpla(sessions)
Returns nested list of sessions organized by line/plane.
Required args:
- sessions (list): nested list of Session objects (mouse x sess)
Optional args:
- rem_empty (bool): if True, lines/planes with no sessions are omitted
default: False
Returns:
- linpla_sess (list) : nested list of Session objects
(linpla x mouse x sess) (None for missing
sessions)
- linpla_order (list): line x plane order
"""
lines = ["L2/3", "L5"]
planes = ["dendrites", "soma"]
linpla_order = [f"{lin} {pla[:4]}" for pla in planes for lin in lines]
linpla_strip = [s.replace("/", "") for s in linpla_order]
linpla_sess = [[] for _ in range(len(linpla_order))] # linpla x mice x sess
for mouse_sess in sessions:
line = list(set([sess.line for sess in mouse_sess if sess is not None]))
if len(line) != 1:
raise RuntimeError("Error - why multiple lines? (or None?)")
else:
line = line[0][:3].strip("-")
plas = [sess.plane for sess in mouse_sess if sess is not None]
pla_vals = list(set(plas))
for pla in pla_vals:
sesses = []
for sess in mouse_sess:
if sess is None or sess.plane != pla:
sesses.append(None)
else:
sesses.append(sess)
if list(set(sesses)) != [None]: # only add if it"s not all None
idx = linpla_strip.index(f"{line} {pla}")
linpla_sess[idx].append(sesses)
# check for empty lists in any lin/pla
if rem_empty:
rem_idx = []
for l, sessions in enumerate(linpla_sess):
if len(sessions) == 0:
rem_idx.append(l)
linpla_order = gen_util.remove_idx(linpla_order, rem_idx)
linpla_sess = gen_util.remove_idx(linpla_sess, rem_idx)
return linpla_sess, linpla_order
#############################################
def comp_vals_acr_planes(linpla_ord, vals, n_perms=None, normal=True,
stats="mean"):
"""
comp_vals_acr_planes(linpla_ord, vals)
Returns p values for comparisons across planes within lines.
Required args:
- linpla_ord (list): ordered list of planes/lines
- vals (list) : values, structured as
planes/lines x session
Optional args:
- n_perms (int): number of permutations to do if doing a permutation
test. If None, a different test is used
default: None
- stats (str) : stats to use for permutation test
default: "mean"
- normal (bool): whether data is expected to be normal or not
(determines whether a t-test or Mann Whitney test
will be done. Ignored if n_perms is not None.)
default: True
Returns:
- p_vals (2D array): p values, structured as
planes/lines x session
"""
lines = ["L2/3", "L5"]
n_sess = len(vals[0])
p_vals = np.full([len(lines), n_sess], np.nan)
for li, line in enumerate(lines):
idx = [i for i in range(len(linpla_ord)) if line in linpla_ord[i]]
# do comparison
if len(idx) == 2:
for s in range(n_sess):
# check for nans or None
data = [vals[i][s] for i in idx]
skip = False
for d in data:
if d is None or len(d) == 0:
skip = True
if skip:
continue
if n_perms is not None:
p_vals[li, s] = rand_util.get_op_p_val(
data, n_perms, stats=stats, op="diff")
elif normal:
p_vals[li, s] = scist.ttest_ind(
data[0], data[1], axis=None)[1]
else:
p_vals[li, s] = scist.mannwhitneyu(data[0], data[1])[1]
return p_vals
#############################################
def get_n_comps(all_p_vals, n_sess, lin_p_vals=None):
"""
get_n_comps(all_p_vals, n_sess)
Returns number of comparisons done for all lines and planes, as well
as the theoretical max number of comparisons each dataset is included in.
Required args:
- all_p_vals (list): list of p-values, structured as
line/plane x comparison
- n_sess (int) : number of sessions in each line/plane (incl. None)
Optional args:
- lin_p_vals (list): list of p-values, structured as
line x comparison
Returns:
- tot_n_comps (int) : total number of comparisons for all lines and
planes
- max_comps_per (int): maximum number of comparisons for each dataset
(theoretical - based on number of sessions)
"""
theor_tot = np.sum(range(n_sess)[1:])
if theor_tot != len(all_p_vals[0]):
raise RuntimeError("Theoretical number of comparisons within "
f"layer/planes is expected to be {theor_tot}, but is "
f"{len(all_p_vals[0])}.")
p_vals = [p for all_ps in all_p_vals for p in all_ps]
if lin_p_vals is not None:
p_vals = p_vals + [p for all_ps in lin_p_vals for p in all_ps]
tot_n_comps = np.count_nonzero(~np.isnan(p_vals))
# max number of comparisons each dataset is involved in
max_comps_per = n_sess - 1 + (lin_p_vals is not None)
return tot_n_comps, max_comps_per
#############################################
def data_from_refs(sess, refs, analyspar, stimpar, datatype="roi",
integ=False, baseline=0.0, base_pre=None, ch_fl=None,
ref_type="segs"):
"""
data_from_refs(sess, segs, analyspar, stimpar)
Returns data for the session.
Required args:
- sess (Session) : Session object
- refs (list) : segments or twop frames
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- integ (bool) : if True, sequence data is integrated
default: False
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0.0
- base_pre (num) : pre value based on which to calculate
baseline. If None, stimpar.pre is used.
default: None
- ch_fl (list) : flanks in sec [pre sec, post sec] around
frames to check for removal if out of bounds
default: None
- ref_type (str) : type of reference provided
("segs", "twop_frs", "stim_frs")
default: "segs"
Returns:
- data_arr (1-3D array): data array structured as
[x ROIs] x seq [x frames]
"""
if analyspar.rem_bad:
nanpol = None
else:
nanpol = "omit"
args = {"rem_bad": analyspar.rem_bad,
"scale" : analyspar.scale}
if ref_type not in ["segs", "twop_frs", "stim_frs"]:
gen_util.accepted_values_error(
"ref_type", ref_type, ["segs", "twop_frs", "stim_frs"]
)
# retrieve stimulus type
if stimpar.stimtype == "both": # use either
if ref_type not in ["twop_frs", "stim_frs"]:
raise ValueError("If stimpar.stimtype is 'both', must provide "
"'twop_frs' or 'stim_frs' as ref_type.")
for stimtype in ["gabors", "visflow"]:
if hasattr(sess, stimtype):
stim = sess.get_stim(stimtype)
break
else:
stim = sess.get_stim(stimpar.stimtype)
use_ch_fl = ch_fl
if baseline:
if use_ch_fl is None:
use_ch_fl = [0, 0]
if base_pre is None:
base_pre = stimpar.pre
base_pre, base_post = quant_analys.define_transition_baseline(
stimpar.stimtype, stimpar.gabfr, baseline, base_pre, stimpar.post)
# expand flank checking as needed
use_ch_fl = [np.max([p, b])
for p, b in zip(use_ch_fl, [base_pre, base_post])]
if datatype == "roi":
if ref_type == "segs":
fr_ns = stim.get_fr_by_seg(
refs, start=True, stop=True, ch_fl=use_ch_fl, fr_type="twop"
)[f"start_frame_twop"]
elif ref_type == "twop_frs":
if ch_fl is not None:
fr_ns = stim.sess.check_flanks(refs, ch_fl, fr_type="twop")
else:
raise ValueError("If 'datatype' is 'roi', must provide either "
"'segs' or 'twop_frs' as ref_type.")
if stim.sess.only_tracked_rois != analyspar.tracked:
raise RuntimeError(
"stim.sess.only_tracked_rois should match analyspar.tracked."
)
fct = stim.get_roi_data
col = "roi_traces"
args["fluor"] = analyspar.fluor
elif datatype == "run":
# array: 1 x sequences
if ref_type == "segs":
fr_ns = stim.get_fr_by_seg(
refs, start=True, stop=True, ch_fl=use_ch_fl, fr_type="stim"
)[f"start_frame_stim"]
elif ref_type == "stim_frs":
if ch_fl is not None:
fr_ns = stim.sess.check_flanks(refs, ch_fl, fr_type="stim")
else:
raise ValueError("If 'datatype' is 'run', must provide either "
"'segs' or 'stim_frs' as ref_type.")
fct = stim.get_run_data
col = "run_velocity"
else:
gen_util.accepted_values_error("datatype", datatype, ["run", "roi"])
if len(fr_ns) == 0:
raise RuntimeError("No frames found given flank requirements.")
data_arr = gen_util.reshape_df_data(
fct(fr_ns, stimpar.pre, stimpar.post, **args)[col],
squeeze_cols=True)
if baseline:
base_data = gen_util.reshape_df_data(
fct(fr_ns, base_pre, base_post, **args), squeeze_cols=True)
end_shape = list(base_data.shape)[:-1] + [1]
# (ROI x) sequences x frames
base_data = math_util.mean_med(
base_data, stats=analyspar.stats, axis=-1, nanpol=nanpol)
data_arr = data_arr - base_data.reshape(end_shape)
if integ:
data_arr = math_util.integ(
data_arr, 1. / sess.twop_fps, axis=-1, nanpol=nanpol)
return data_arr
#############################################
def get_stim_onset_offset_frames(sess, stimpar, lock="stim_onset",
frametype="twop_frs"):
"""
get_stim_onset_offset_frames(sess, stimpar)
Returns segment numbers for each stimulus, as well as the pre/post
stimulus timing information
Required args:
- sess (Session) : Session object
- stimpar (StimPar): named tuple containing stimulus parameters
Optional args:
- lock (str) : how to lock the stimulus (onset or offset)
default: "stim_onset"
- frametype (str): type of frames to return
Returns:
- stim (Stim) : stimulus object
- frames (1D array): start frames
"""
if stimpar.stimtype not in "both":
raise NotImplementedError("Implemented for both stimulus types.")
stim = None
for stimtype in ["gabors", "visflow"]: # use any stimulus to retrieve data
if hasattr(sess, stimtype):
stim = sess.get_stim(stimtype)
break
fr_type = frametype.replace("_frs", "")
if lock == "stim_onset":
frames = sess.grayscr.get_stop_fr(fr_type=fr_type)[
f"stop_frame_{fr_type}"][:-1]
elif lock == "stim_offset":
frames = sess.grayscr.get_start_fr(fr_type=fr_type)[
f"start_frame_{fr_type}"][1:]
return stim, frames
#############################################
def get_common_oris(stimpar, split="by_exp"):
"""
get_common_oris(stimpar)
Returns Gabor orientations for common orientations, and checks parameters.
Required args:
- stimpar (StimPar):
named tuple containing stimulus parameters
Optional args:
- split (str):
how to split data:
"by_exp" (all exp, all unexp)
default: "by_exp"
Returns:
- gab_oris (list): Gabor orientations common to U frames
"""
if split != "by_exp":
raise NotImplementedError("'common_oris' only implemented "
"with 'split' set to 'by_exp'.")
if stimpar.stimtype != "gabors":
raise ValueError(
"Index analysis with common orientations can only be run on Gabors."
)
gab_oris = sess_gen_util.gab_oris_common_U(stimpar.gab_ori)
return gab_oris
#############################################
def get_seg_info(sess, stimpar, split="by_exp", prog_pos=0, common_oris=False):
"""
get_seg_info(sess, stimpar)
Returns segment information for a specific split type.
Required args:
- sess (Session) : Session object
- stimpar (StimPar): named tuple containing stimulus parameters
Optional args:
- split (str) : how to split data, either
"by_exp": all exp vs all unexp, or
"unexp_lock": unexp, vs preceeding exp, or
"exp_lock": exp, vs preceeding unexp
"prog_unexp": unexp, vs preceeding exp, but not
locked (e.g., U, vs prev D)
(i.e., pre not necessarily equal to post)
"prog_exp": exp, vs preceeding unexp, but not
locked (e.g., D, vs prev U)
(i.e., pre not necessarily equal to post)
default: "by_exp"
- prog_pos (int) : unexpected or expected position to retrieve if
split is "prog_unexp" or "prog_exp"
default: 0
- common_oris (bool): if True, only Gabor stimulus orientations shared
by D and U frames are included
("by_exp" split only)
default: False
Returns:
- segs (list) : segment array for each split
- pre_posts (list): [pre, post] values for each split
"""
locks = ["exp_lock", "unexp_lock"]
progs = ["prog_exp", "prog_unexp"]
split_values = (["by_exp"] + locks + progs)
if split not in split_values:
if split in ["stim_onset", "stim_offset"]:
raise NotImplementedError(
"Cannot retrieve segments for stim_onset/stim_offset."
)
else:
gen_util.accepted_values_error("split", split, split_values)
stim = sess.get_stim(stimpar.stimtype)
# check parameters
if split in locks and stimpar.pre != stimpar.post:
raise ValueError("stimpar.pre must equal stimpar.post for "
"locked analyses.")
if common_oris:
gab_ori = get_common_oris(stimpar, split)
else:
gab_ori = stimpar.gab_ori
# identify info for retrieving segments
if split == "by_exp":
gab_oris = [gab_ori, gab_ori]
if stimpar.stimtype == "gabors":
# if single Gabor orientation value, adjust by shifting orientation for unexp
if stimpar.gabfr in [3, 4] and isinstance(gab_ori, int):
gab_oris[1] = sess_gen_util.get_unexp_gab_ori(gab_ori)
# check if unexpected component is included
if (stimpar.gabfr * 0.3 + stimpar.post) < 0.9:
raise RuntimeError(f"{stimpar.post}s after gaborframe "
f"{stimpar.gabfr} is too short to include unexpected period.")
segs = [stim.get_segs_by_criteria(
gabfr=stimpar.gabfr, gabk=stimpar.gabk, gab_ori=gab_oris[unexp],
visflow_dir=stimpar.visflow_dir, visflow_size=stimpar.visflow_size,
unexp=unexp,
by="seg") for unexp in [0, 1]]
pre_posts = [[stimpar.pre, stimpar.post]] * 2
else:
if split in locks:
remconsec = True
gabfr = "any"
elif split in progs:
remconsec = False
gabfr = stimpar.gabfr
unexp = 1 if ("unexp" in split) else 0
segs = stim.get_segs_by_criteria(gabfr=gabfr, gabk=stimpar.gabk,
gab_ori=stimpar.gab_ori, visflow_dir=stimpar.visflow_dir,
visflow_size=stimpar.visflow_size, unexp=unexp, by="seg",
remconsec=remconsec)
if split in locks:
# shift to correct gabor frame
if (stimpar.stimtype == "gabors" and
stimpar.gabfr not in ["any", "all"]):
segs = [seg + stimpar.gabfr for seg in segs]
segs = [segs] * 2
pre_posts = [[stimpar.pre, 0], [0, stimpar.post]]
elif split in progs:
if not int(prog_pos) == float(prog_pos):
raise ValueError("prog_pos must be of type int.")
prog_pos = int(prog_pos)
# get the shift values for the main and previous segments
base_shift = 4 if stimpar.stimtype == "gabors" else 1
if stimpar.stimtype == "gabors" and gabfr in ["any", "all"]:
raise NotImplementedError("Setting 'stimpar.gabfr' to "
"'any' or 'all' has not been sufficiently tested for "
"'prog' split values.")
main_seg_shift = base_shift * prog_pos
# get the main segment numbers
start_segs, n_consec = gen_util.consec(segs, smallest=True)
if prog_pos == 0:
main_segs = np.asarray(start_segs)
else:
keep_segs_idx = np.where(np.asarray(n_consec) > prog_pos)[0]
main_segs = np.asarray(start_segs)[keep_segs_idx] + \
main_seg_shift
prev_seg_shift = base_shift * (1 + prog_pos)
main_segs = np.asarray(
list(filter(lambda i : i >= prev_seg_shift, main_segs))
).reshape(-1)
if len(main_segs) == 0:
raise RuntimeError("No segments meet the criteria for "
f"'prog_pos' = {prog_pos}.")
# [prev_segs, main_segs]
segs = [main_segs - prev_seg_shift, main_segs]
pre_posts = [[stimpar.pre, stimpar.post]] * 2
return segs, pre_posts
#############################################
def split_data_by_sess(sess, analyspar, stimpar, datatype="roi",
split="by_exp", integ=False, baseline=0.0, prog_pos=0,
common_oris=False):
"""
split_data_by_sess(sess, analyspar, stimpar)
Returns data for the session, split as requested.
Required args:
- sess (Session) : Session object
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- split (str) : how to split data, either
"by_exp": all exp vs all unexp, or
"unexp_lock": unexp, vs preceeding exp, or
"exp_lock": exp, vs preceeding unexp
"prog_unexp": unexp, vs preceeding exp, but
not locked (e.g., U, vs prev D)
(i.e., pre not necessarily equal to post)
"prog_exp": exp, vs preceeding unexp, but not
locked (e.g., D, vs prev U)
(i.e., pre not necessarily equal to post)
"stim_onset": grayscreen vs stimulus onset
"stim_offset": stimulus offset vs grayscreen
default: "by_exp"
- integ (bool) : if True, sequence data is integrated
default: False
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0.0
- prog_pos (int) : unexpected or expected position to retrieve
if split is "prog_unexp" or "prog_exp"
default: 0
- common_oris (bool) : if True, only Gabor stimulus orientations
common to D and U frames are included
("by_exp" split only)
default: False
Returns:
- data_arr (list): list of data arrays structured as
split1, 2 [x ROIs] x seq [x frames]
split1, 2 ordered in time, i.e.
if split in ["by_exp", "unexp_lock", "prog_unexp"]:
split1, 2: exp, unexp
elif split in ["exp_lock", "prog_exp"]:
split1, 2: unexp, exp
elif split is "stim_onset":
split1, 2: grayscr, stim
elif split is "stim_offset:
split1, 2: stim, grayscr
"""
locks = ["exp_lock", "unexp_lock"]
progs = ["prog_exp", "prog_unexp"]
stim_on_offset = ["stim_onset", "stim_offset"]
if split in (["by_exp"] + locks + progs):
stim = sess.get_stim(stimpar.stimtype)
refs, pre_posts = get_seg_info(
sess, stimpar, split=split, prog_pos=prog_pos,
common_oris=common_oris
)
ref_type = "segs"
elif split in stim_on_offset:
pre_posts = [[stimpar.pre, 0], [0, stimpar.post]]
ref_type = "twop_frs" if datatype == "roi" else "stim_frs"
stim, frames = get_stim_onset_offset_frames(
sess, stimpar, lock=split, frametype=ref_type
)
refs = [frames, frames]
if split in progs:
prev_seg_shift = np.unique(refs[1] - refs[0])
if len(prev_seg_shift) != 1:
raise RuntimeError(
"Expected both sets of segs to be equally spaced."
)
prev_seg_shift = prev_seg_shift[0]
data_arr = []
for s, (subrefs, [pre, post]) in enumerate(zip(refs, pre_posts)):
ch_fl = None
base_pre = None
# check flanks and baseline pre adjusted as pre and post are split up!
if split in (locks + stim_on_offset):
base_pre = stimpar.pre
ch_fl = [stimpar.pre, stimpar.post]
elif split in progs:
base_pre = stimpar.pre
sec_between = prev_seg_shift * stim.seg_len_s
if stimpar.stimtype == "gabors":
n_grayscr_segs = (3 - stimpar.gabfr + prev_seg_shift) // 4
sec_between += n_grayscr_segs * stim.seg_len_s
if s == 0: # for prev segs
ch_fl = [stimpar.pre, stimpar.post + sec_between]
elif s == 1: # for main segs
ch_fl = [stimpar.pre + sec_between, stimpar.post]
stimpar_use = sess_ntuple_util.get_modif_ntuple(
stimpar, ["pre", "post"], [pre, post])
data_arr.append(data_from_refs(
sess, subrefs, analyspar, stimpar_use, datatype, integ=integ,
baseline=baseline, base_pre=base_pre, ch_fl=ch_fl,
ref_type=ref_type))
# very few stim onset/offset sequences, so best to retain all
axis = -1 if integ else -2
if ((split in stim_on_offset) and
(data_arr[s].shape[axis] != len(subrefs))):
raise RuntimeError("Not all sequences could be retained for "
f"{split} with stimpar.pre={stimpar.pre} and "
f"stimpar.post={stimpar.post}.")
return data_arr
#############################################
def dir_data_by_sess(sess, analyspar, stimpar, datatype="roi", integ=False,
baseline=0.0, unexp="any", remconsec=False):
"""
dir_data_by_sess(sess, analyspar, stimpar)
Returns data for the session, split by direction.
Required args:
- sess (Session) : Session object
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- integ (bool) : if True, sequence data is integrated
default: False
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0.0
- unexp (str or int) : unexpected value, e.g. "any", 0, 1
default: "any"
- remconsec (bool) : if True, consecutive segments are removed
default: False
Returns:
- data_arr (list): list of data arrays structured as
nasal, temp [x ROIs] x seq [x frames]
"""
if stimpar.stimtype != "visflow":
raise ValueError("Cannot get direction data for Gabors.")
if not remconsec and not (baseline is None or baseline == 0):
raise NotImplementedError("Baseline not implemented for "
"Visflow direction without 'remconsec'.")
stim = sess.get_stim(stimpar.stimtype)
data_arr = []
for direc in ["nasal", "temp"]:
stimpar_sp = sess_ntuple_util.get_modif_ntuple(
stimpar, "visflow_dir", direc)
segs = stim.get_segs_by_criteria(
gabfr=stimpar_sp.gabfr, gabk=stimpar_sp.gabk,
gab_ori=stimpar_sp.gab_ori, visflow_dir=stimpar_sp.visflow_dir,
visflow_size=stimpar_sp.visflow_size, remconsec=remconsec,
unexp=unexp, by="seg")
data_arr.append(data_from_refs(
sess, segs, analyspar, stimpar, datatype, integ=integ,
baseline=baseline, base_pre=stimpar.pre))
return data_arr
#############################################
def split_diff_by_sess(sess, analyspar, stimpar, n_perms=1000, datatype="roi",
split="by_exp", baseline=0.0):
"""
split_diff_by_sess(sess, analyspar, stimpar)
Returns session statistics for difference between sequence splits as well
as random values obtained from permutations.
Required args:
- sess (Session) : Session object
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- n_perms (int) : number of permutations for CI estimation. If
None, random data is not calculated.
default: 1000
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- split (str) : how to split data, either
"by_exp": all exp vs all unexp, or
"unexp_lock": unexp, vs preceeding exp, or
"exp_lock": exp, vs preceeding unexp
"prog_unexp": unexp, vs preceeding exp, but
not locked (e.g., U, vs prev D)
(i.e., pre not necessarily equal to post)
"prog_exp": exp, vs preceeding unexp, but not
locked (e.g., D, vs prev U)
(i.e., pre not necessarily equal to post)
"stim_onset": grayscreen vs stimulus onset
"stim_offset": stimulus offset vs grayscreen
default: "by_exp"
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0.0
Returns:
- diff_st (1D array) : session statistics for difference between
split2 and split1 sequence areas (me, err)
- all_rand (1D array): random value obtained for each permutation
(None, if n_perms is None)
- data_arr (list) : list of data arrays, structured as
split1, 2 [x ROIs] x seq
split1, 2 ordered in time, i.e.
if split in ["by_exp", "unexp_lock",
"prog_unexp"]:
split1, 2: exp, unexp
elif split in ["exp_lock", "prog_exp"]:
split1, 2: unexp, exp
elif split is "stim_onset":
split1, 2: grayscr, stim
elif split is "stim_offset:
split1, 2: stim, grayscr
"""
nanpol = "omit"
if analyspar.rem_bad:
nanpol = None
data_arr = split_data_by_sess(sess, analyspar, stimpar, datatype=datatype,
split=split, integ=True, baseline=baseline)
# take mean/median across sequences
mean_meds = [math_util.mean_med(data, stats=analyspar.stats, axis=-1,
nanpol=nanpol) for data in data_arr]
last_dim = np.sum([sub.shape[-1] for sub in data_arr])
if datatype != "roi":
mean_meds = np.asarray(mean_meds).reshape(2, 1)
targ = (1, last_dim)
else:
targ = (-1, last_dim)
diff_st = math_util.get_stats(mean_meds[1] - mean_meds[0],
stats=analyspar.stats, error=analyspar.error, nanpol=nanpol)
# get CI
div = data_arr[0].shape[-1] # length of exp
all_rand = None
if n_perms is not None:
all_rand = math_util.mean_med(rand_util.permute_diff_ratio(
np.concatenate(data_arr, axis=-1).reshape(targ), div=div,
n_perms=n_perms, stats=analyspar.stats, nanpol=nanpol, op="diff"),
stats=analyspar.stats, axis=0, nanpol=nanpol)
return diff_st, all_rand, data_arr
#############################################
def prog_by_sess(sess, analyspar, stimpar, datatype="roi", unexp="prog_unexp",
position=0, baseline=0):
"""
prog_by_sess(sess, analyspar, stimpar)
Returns differences between unexpected sequences and preceeding expected
sequences across a session, as well as the average for the session.
Required args:
- sess (Session) : Session object
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- n_perms (int) : number of permutations for CI estimation
default: 1000
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- unexp (str) : how to split unexpected vs exp data, either
"prog_unexp": unexp, vs preceeding exp,
but not locked (e.g., prog U, vs prev D)
(i.e., pre not necessarily equal to post)
"prog_exp": exp, vs preceeding unexp,
but not locked (e.g., D, vs prev U)
(i.e., pre not necessarily equal to post)
default: "prog_unexp"
- position (int) : unexpected or expected position to retrieve
default: 0
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0
Returns:
- data_arr (2 or 3D array): array of data for unexpected sequences and
the preceeding expected sequence
(exp, unexp), or v.v. if unexp is
"prog_exp", structured as
(exp, unexp) [x ROIs] x seq
"""
if unexp not in ["prog_unexp", "prog_exp"]:
gen_util.accepted_values_error(
"unexp", unexp, ["prog_unexp", "prog_exp"]
)
data_arr = split_data_by_sess(sess, analyspar, stimpar, datatype=datatype,
split=unexp, integ=True, baseline=baseline,
prog_pos=position)
data_arr = np.asarray(data_arr)
return data_arr
#############################################
def stim_idx_by_sess(sess, analyspar, stimpar, n_perms=1000, datatype="roi",
feature="by_exp", position=0, op="d-prime", baseline=0.0,
common_oris=False, seed=None, run_random=True):
"""
stim_idx_by_sess(sess, analyspar, stimpar)
Returns session item (ROIs or 1 for running) indices for difference between
sequences split by the specified feature, as well as their percentiles
based on random permutations for each item.
Required args:
- sess (Session) : Session object
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- n_perms (int) : number of permutations for CI estimation
default: 1000
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- feature (str) : how to split stimuli, e.g.,
"by_exp": all exp vs all unexp, or
"prog_unexp": first unexp, vs prec. exp, or
"prog_exp": first exp, vs prec. unexp, or
"dir": left v right direction
default: "by_exp"
- position (int) : unexpected or expected position to retrieve
if feature is "prog_unexp" or "prog_exp"
default: 0
- op (str) : operation to use in measuring indices
("diff", "rel_diff", "d-prime")
default: "d-prime"
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0.0
- common_oris (bool) : if True, only Gabor stimulus orientations
common to D and U frames are included
("by_exp" feature only)
default: False
- seed (int) : seed value to use. (-1 treated as None)
default: None
- run_random (bool) : if True, randomization is run and results are
returns (item_percs, all_rand)
default: True
Returns:
- item_idxs (1D array) : item (ROIs or 1 for running) indices for the
session
if run_random:
- item_percs (1D array): item (ROIs or 1 for running) index
percentiles for the session, based on
each item's random permutations
- all_rand (2D array) : item (ROIs or 1 for running) indices
calculated through randomized permutation,
structured as item x n_perms
"""
seed = rand_util.seed_all(seed, "cpu", log_seed=False)
nanpol = "omit"
if analyspar.rem_bad:
nanpol = None
if "dir" in feature:
if feature == "dir_exp":
unexp = 0
elif feature == "dir_unexp":
unexp = 1
elif feature == "dir":
unexp = "any"
else:
raise ValueError("If 'dir' in 'feature', must be "
"among 'dir_exp', 'dir_unexp' or 'dir'.")
if common_oris:
raise ValueError("'common_oris' only applies to Gabor analyses.")
data_arr = dir_data_by_sess(sess, analyspar, stimpar,
datatype=datatype, integ=True, baseline=baseline, unexp=unexp,
remconsec=False)
else:
data_arr = split_data_by_sess(sess, analyspar, stimpar,
datatype=datatype, split=feature, integ=True, baseline=baseline,
prog_pos=position, common_oris=common_oris)
if op != "d-prime":
# take statistic across sequences
seq_mes = np.stack([math_util.mean_med(
arr, stats=analyspar.stats, axis=-1, nanpol=nanpol)
for arr in data_arr])
axis = None
else:
seq_mes = data_arr
axis = -1
# take relative difference (index)
item_idxs = math_util.calc_op(seq_mes, op=op, nanpol=nanpol, axis=axis)
if run_random:
last_dim = np.sum([sub.shape[-1] for sub in data_arr])
if datatype != "roi":
item_idxs = np.asarray(item_idxs).reshape(-1)
targ = (1, last_dim)
else:
targ = (-1, last_dim)
# get CI
div = data_arr[0].shape[-1] # length of exp
# perms (items x perms)
all_rand = rand_util.permute_diff_ratio(
np.concatenate(data_arr, axis=-1).reshape(targ), div=div,
n_perms=n_perms, stats=analyspar.stats, nanpol=nanpol, op=op)
item_percs = np.empty(len(item_idxs))
for r, (item_idx, item_rand) in enumerate(zip(item_idxs, all_rand)):
item_percs[r] = scist.percentileofscore(
item_rand, item_idx, kind="mean")
return item_idxs, item_percs, all_rand
else:
return item_idxs
#############################################
def stim_idx_acr_sesses(sessions, analyspar, stimpar, n_perms=1000,
datatype="roi", feature="by_exp", position=0,
op="d-prime", baseline=0.0, common_oris=False,
seed=None, parallel=False):
"""
stim_idx_acr_sesses(sessions, analyspar, stimpar)
Returns item (ROIs or running) indices for difference between
stimulus features (e.g., unexpected v expected, visual flow direction), as
well as their percentiles based on random permutations for each item,
grouped across mice for the session number.
Required args:
- sessions (list) : Session objects for each mouse
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- n_perms (int) : number of permutations for CI estimation
default: 1000
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- feature (str) : how to split stimuli, e.g.,
"by_exp": all exp vs all unexp, or
"prog_unexp": first unexp, vs prec. exp, or
"prog_exp": first exp, vs prec. unexp, or
"dir_exp": left v right direction (exp.),
"dir_unexp": left v right direction (unexp.),
"dir": left v right direction
default: "by_exp"
- position (int) : unexpected or expected position to retrieve
if unexp is "prog_unexp" or "prog_exp"
default: 0
- op (str) : operation to use in measuring indices
("diff", "rel_diff", "d-prime")
default: "d-prime"
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0.0
- common_oris (bool) : if True, only Gabor stimulus orientations
common to D and U frames are included
("by_exp" feature only)
default: False
- seed (int) : seed value to use. (-1 treated as None)
default: None
- parallel (bool) : if True, sessions are analysed in parallel
(not implemented)
default: False
Returns:
- all_item_idxs (list) : item (ROIs or running) indices grouped across
all sessions
- all_item_percs (list): item (ROIs or running) index percentiles,
based on each item's random permutations,
grouped across all sessions
- all_rand_idxs (list) : for each session number, random item
(ROIs or running) indices, based on each
item's random permutations, grouped across
all sessions and items (items * n_perms)
"""
all_item_idxs, all_item_percs, all_rand_idxs = [], [], []
for sess in sessions:
if sess is None:
continue
try:
item_idxs, item_percs, rand_idxs = stim_idx_by_sess(
sess, analyspar, stimpar, n_perms, datatype, feature,
position, op, baseline, common_oris, seed)
except Exception as e:
if "dir" in feature and "No segments" in str(e):
continue
else:
raise e
all_item_idxs.extend(item_idxs.tolist())
all_item_percs.extend(item_percs.tolist())
all_rand_idxs.extend(rand_idxs.tolist())
return all_item_idxs, all_item_percs, all_rand_idxs
#############################################
def stim_idx_by_sesses(sessions, analyspar, stimpar, n_perms=1000, p_val=0.05,
datatype="roi", feature="by_exp", op="d-prime",
position=0, baseline=0.0, common_oris=False, seed=None,
parallel=False):
"""
stim_idx_by_sesses(sessions, analyspar, stimpar)
Returns item (ROIs or running) indices for difference between
stimulus features (e.g., unexpected v expected, visual flow direction), as
well as their percentiles based on random permutations for each item,
grouped across mice for each session number.
Required args:
- sessions (list) : nested list of Session objects (mouse x sess)
- analyspar (AnalysPar): named tuple containing analysis parameters
- stimpar (StimPar) : named tuple containing stimulus parameters
Optional args:
- n_perms (int) : number of permutations for CI estimation
default: 1000
- p_val (float) : p-value (used to decide number of bins)
default: 0.05
- datatype (str) : type of data (e.g., "roi", "run")
default: "roi"
- feature (str) : how to split stimuli, e.g.,
"by_exp": all exp vs all unexp, or
"prog_unexp": first unexp, vs prec. exp, or
"prog_exp": first exp, vs prece. unexp, or
"dir_exp": left v right direction (exp.),
"dir_unexp": left v right direction (unexp.),
"dir": left v right direction
default: "by_exp"
- op (str) : operation to use in measuring indices
("diff", "rel_diff", "d-prime")
default: "d-prime"
- position (int) : unexpected or expected position to retrieve
if unexp is "prog_unexp" or "prog_exp"
default: 0
- baseline (bool or num): if not False, number of second to use for
baseline
default: 0.0
- common_oris (bool) : if True, only Gabor stimulus orientations
common to D and U frames are included
("by_exp" feature only)
default: False
- seed (int) : seed value to use. (-1 treated as None)
default: None
- parallel (bool) : if True, sessions are analysed in parallel
default: False
Returns:
- all_item_idxs (list) : for each session number, item (ROIs or running)
index bin counts grouped across mice
- all_item_percs (list): for each session number, item (ROIs or running)
index percentile bin counts, based on
each item's random permutations, grouped
across mice
- all_rand_idxs (list) : for each session number, binned random item
(ROIs or running) index bin counts,
based on each item's random permutations,
grouped across mice, structured as
session (item * n_perms)
- all_perc_pos (list) : for each session number, percent ROIs with
positive indices, grouped across mice,
- sess_edges (list) : for each session number, bin edges used for
indices,
session x [min, max]
- sess_info (list) : nested list of dictionaries for each
session number containing information from
each mouse, with None for missing sessions
["mouse_ns"] (list) : mouse numbers
["sess_ns"] (list) : session numbers
["lines"] (list) : mouse lines
["planes"] (list) : imaging planes
["nrois"] (list) : number of ROIs in session
"""
if len(sessions) == 0:
raise ValueError("At least one session must be passed.")
n_sess = list(set([len(m_sess) for m_sess in sessions]))
if len(n_sess) != 1:
raise RuntimeError("There should be the same number of sessions for "
"each mouse.")
sessions_zipped = zip(*sessions)
n_bins = 4 / p_val
if n_bins != int(n_bins):
raise NotImplementedError(f"Analysis not well adapted to binning "
f"with p-value of {p_val}.")
else:
n_bins = int(n_bins)
all_item_idxs, all_item_percs, all_rand_idxs, all_poses = [], [], [], []
sess_edges = []
sess_info = []
for sesses in sessions_zipped:
sesses = list(sesses)
all_items, all_percs, all_rand = stim_idx_acr_sesses(
sesses, analyspar, stimpar, n_perms, datatype,
feature, position, op, baseline, common_oris, seed, parallel)
sess_info.append(sess_gen_util.get_sess_info(
sesses, analyspar.fluor, add_none=True,
incl_roi=(datatype=="roi"), rem_bad=analyspar.rem_bad))
if len(all_rand) == 0:
use_bounds = [-0.5, 0.5]
bin_edges = np.linspace(*use_bounds, n_bins + 1)
all_item_idxs.append(
np.histogram(all_items, bins=bin_edges)[0].tolist())
all_rand_idxs.append(
(np.histogram(all_rand, bins=bin_edges)[0]).tolist())
sess_edges.append([np.min(bin_edges), np.max(bin_edges)])
all_item_percs.append(
np.histogram(
all_percs, bins=n_bins, range=[0, 100])[0].tolist())
all_poses.append(np.nan)
continue
# get edges for histogram
all_rand = np.concatenate(all_rand, axis=0)
div = len(all_rand)/float(len(all_items))
if op in ["diff", "d-prime"]:
use_bounds = [np.min(all_rand), np.max(all_rand)]
elif op == "rel_diff":
# use extrema or outlier bounds, whichever are tighter
rand_outlier_bounds = math_util.outlier_bounds(
all_rand, fences="outer")
use_bounds = [fct([o, fct(all_rand)]) for o, fct in
zip(rand_outlier_bounds, [np.max, np.min])]
# ensure that real data is fully included
use_bounds = [fct([fct(all_items), r])
for r, fct in zip(use_bounds, [np.min, np.max])]
n_out = np.sum(all_rand < use_bounds[0]) + \
np.sum(all_rand > use_bounds[1])
if n_out > 0:
logger.warning(f"{n_out}/{len(all_rand)} random values lie "
"outside histogram bin bounds (outliers).")
bin_edges = np.linspace(*use_bounds, n_bins + 1)
perc_pos = (len(np.where(np.asarray(all_items) > 0)[0]) * 100 /
len(all_items))
all_item_idxs.append(
np.histogram(all_items, bins=bin_edges)[0].tolist())
all_rand_idxs.append(
(np.histogram(all_rand, bins=bin_edges)[0]/div).tolist())
all_item_percs.append(
np.histogram(all_percs, bins=n_bins, range=[0, 100])[0].tolist())
all_poses.append(perc_pos)
sess_edges.append([np.min(bin_edges), np.max(bin_edges)])
return [all_item_idxs, all_item_percs, all_rand_idxs, all_poses,
sess_edges, sess_info]
#############################################
def get_grped_roi_stats(all_roi_vals, analyspar, permpar):
"""
get_grped_roi_stats(all_roi_vals, analyspar, permpar)
Returns difference between sequence data for each split, with ROIs grouped
across mice.
Required args:
- all_roi_vals (list) : sequence areas, split across groups
(e.g., exp, unexp) values for each session,
structured as
session x mice x splits x ROI x seqs
- analyspar (AnalysPar): named tuple containing analysis parameters
- permpar (PermPar) : named tuple containing permutation parameters
Returns:
- all_diff_st (list) : difference stats (split2 - split1) across ROIs
(grouped across mice), structured as
session x stats
- CI_vals (list) : CIs values across ROIs, structured as
session x perc (med, lo, high)
- sign_sess (list) : significant session indices, optionally
structured by tail
- all_diffs (list) : differences, structured as session x ROI
- p_vals_grped (list): p values for each comparison, organized by
session pairs (where the second session is
cycled in the inner loop, e.g., 0-1, 0-2, 1-2,
including empty groups)
- p_vals_sess (list) : p values for each session
"""
# integrate multiple comparisons into p-value
if permpar.multcomp:
permpar = sess_ntuple_util.get_modif_ntuple(
permpar, ["multcomp", "p_val"],
[False, permpar.p_val / permpar.multcomp]
)
# join ROIs across mice
percs = [50.0] + math_util.get_percentiles(
CI = (1.0 - permpar.p_val), tails=permpar.tails
)[0]
st_len = 2 + (analyspar.stats == "median" and analyspar.error == "std")
nanpol = "omit"
if analyspar.rem_bad:
nanpol = None
n_sess = len(all_roi_vals)
all_diff_st = np.empty([n_sess, st_len]) * np.nan
all_rand = np.empty([n_sess, permpar.n_perms]) * np.nan
all_diffs = []
for s, sess_roi_vals in enumerate(all_roi_vals):
# mice x splits x ROIs x seqs
sess_mean_meds = []
sess_rands = []
for mouse_vals in sess_roi_vals: # for each mouse
sess_mean_meds.append([math_util.mean_med(split_vals, axis=-1,
stats=analyspar.stats) for split_vals in mouse_vals])
# get CI
div = mouse_vals[0].shape[-1] # length of exp
# perms
sess_rands.append(rand_util.permute_diff_ratio(
|
np.concatenate(mouse_vals, axis=1)
|
numpy.concatenate
|
from pioneer.common import linalg
from enum import Enum
import cv2
import numpy as np
class Pos(Enum):
LEFT = 0
CENTER = 1
RIGHT = 2
class CylindricalProjection():
''' cylindrical projection for 3 cameras
args:
intrinsic_calibrations: list of the 3 intrinsic cameras calibration
distortion_coef: list of the 3 distorsion cameras coef
extrinsic_calibrations: list of the 3 extrinsics 4x4 matrix, the middle is equal to identity
config: configuration dict
radius: cylinder radius (meter)
FOV_h: total horizontal cylinder FOV (rad)
FOV_v: total vertical cylinder FOV (rad)
image_h : horizontal cylinder (pixel)
image_v : vertical cylinder (pixel)
fusion_overlap_ratio : overlap ratio between take in account for image merging or fusion (0.0 to 1.0)
'''
def __init__(self
, intrinsic_calibrations = None
, distortion_coef = None
, extrinsic_calibrations = None
, config={'radius':50.0, 'FOV_h':np.deg2rad(210), 'FOV_v':np.deg2rad(67.5), 'image_h':2000, 'image_v':int(2000*0.25), 'fusion_overlap_ratio': 0.25}
):
self.__assert_intrinsic_calibrations(intrinsic_calibrations)
self.__assert_distortion_coefficients(distortion_coef)
self.__assert_extrinsic_calibrations(extrinsic_calibrations)
self.radius = config.get('radius', 50.0)
self.FOV_width = config.get('FOV_h', np.deg2rad(210))
self.FOV_height = config.get('FOV_v',
|
np.deg2rad(67.5)
|
numpy.deg2rad
|
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def plot_levels(func, xrange=None, yrange=None, levels=None):
"""
Plotting the contour lines of the function.
Example:
--------
>> oracle = oracles.QuadraticOracle(np.array([[1.0, 2.0], [2.0, 5.0]]), np.zeros(2))
>> plot_levels(oracle.func)
"""
if xrange is None:
xrange = [-6, 6]
if yrange is None:
yrange = [-5, 5]
if levels is None:
levels = [0, 0.25, 1, 4, 9, 16, 25]
x =
|
np.linspace(xrange[0], xrange[1], 100)
|
numpy.linspace
|
import numpy as np
from sklearn.datasets import load_digits
from copy import deepcopy
def H(h_triplets: dict, x, y) -> bool:
"""
return True if belongs to class 1
return False if not belongs to class 1
"""
global M
global one
global minus_one
res = []
for i in range(M):
sum = 0
for ind, trip in enumerate(h_triplets.values()):
alpha = trip[2]
sum += alpha * trip[5][i]
res.append(sum > 0)
good_guess = 0
for ind, guess in enumerate(res):
if guess and y[ind] == one: # correct guess, it is class 1
good_guess += 1
elif not guess and y[ind] == minus_one: # correct guess, it is class -1
good_guess += 1
score = good_guess / M
return score
def adaboost10(h: dict, x, y):
global M
global N
global one
global minus_one
eps_vec = np.zeros((N, 1))
W = np.full((M, 1), 1 / M)
T = 10
best_h = {}
wrongs = {}
while len(best_h.keys()) != T:
inp_guess = {}
for ind, triplet in enumerate(h.values()):
inp_guess[ind] = []
wrong_ind = []
bigger_than = (triplet[0] == '>')
thres = triplet[1]
for index, num in enumerate(x[:, ind]): # on collumn ind, attr: n{ind}
if (bigger_than and num > thres) or (not bigger_than and num < thres): # predict that is class 1
inp_guess[ind].append(1)
if y[index] != one:
wrong_ind.append(index) # Wrong input index
else: # predict that is class -1
inp_guess[ind].append(-1)
if y[index] != minus_one:
wrong_ind.append(index)
tmp_eps = 0
for i in wrong_ind:
tmp_eps += W[i]
eps_vec[ind] = tmp_eps
wrongs[ind] = wrong_ind
# updating h and alpha
min_eps = min(eps_vec) # minimal epsilon in time: t
if min_eps == 0:
print('100% success, pick 2 different digits')
return
min_h = np.argmin(eps_vec) # best h in time: t
cur_alpha = float(0.5 *
|
np.log((1 - min_eps) / min_eps)
|
numpy.log
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2019 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Model functions used to describe SQUID oscillations.
"""
from math import pi
import numpy as np
from scipy.constants import h, e
from scipy.interpolate import interp1d
def compute_squid_current(
phase,
cpr1,
parameters1,
cpr2,
parameters2,
positive=True,
aux_res=101,
inductance=0.0,
compute_phase=False,
):
"""Compute the SQUID current from 2 CPRs.
Parameters
----------
phase : np.ndarray
Phase at which to compute the SQUID current flow (1D array at most).
cpr1 : callable
Function used to compute the current in the first junction. The
callable should take the phase as first argument.
parameters1 : tuple
Parameters to use to compute the current in the first junction.
cpr2 : callable
Function used to compute the current in the second junction. The
callable should take the phase as first argument.
parameters1 : tuple
Parameters to use to compute the current in the second junction.
positive : bool, optional
Should the computed current be positive or negative
aux_res : int, optional
Number of points to use when optimizing the phase to get the maximum
current.
inductance : float, optional
Inductance of the loop in H
compute_phase : bool, optional
Computethe phase at which the squid current is maximal instead of the
current.
"""
phi1, *p1 = parameters1
phi2, *p2 = parameters2
if inductance == 0.0:
aux = np.tile(
|
np.linspace(0, 2 * np.pi, aux_res)
|
numpy.linspace
|
#!/usr/bin/env python3
"""
Created on Tue Apr 24 15:48:52 2020
@author: <NAME>
"""
from os.path import splitext
import numpy as np
# import spatialmath as sp
from spatialmath import SE3
from spatialmath.base.argcheck import getvector, verifymatrix
from spatialmath.base import tr2rpy, r2q
from ropy.robot.ELink import ELink
from ropy.backend.PyPlot.functions import \
_plot, _teach, _fellipse, _vellipse, _plot_ellipse, \
_plot2, _teach2
from ropy.backend import xacro
from ropy.backend import URDF
class ETS(object):
"""
The Elementary Transform Sequence (ETS). A superclass which represents the
kinematics of a serial-link manipulator
:param et_list: List of elementary transforms which represent the robot
kinematics
:type et_list: ET list
:param name: Name of the robot
:type name: str, optional
:param manufacturer: Manufacturer of the robot
:type manufacturer: str, optional
:param base: Location of the base is the world frame
:type base: SE3, optional
:param tool: Offset of the flange of the robot to the end-effector
:type tool: SE3, optional
:param gravity: The gravity vector
:type n: ndarray(3)
:references:
- Kinematic Derivatives using the Elementary Transform Sequence,
<NAME> and <NAME>
"""
def __init__(
self,
elinks,
name='noname',
base_link=None,
ee_link=None,
manufacturer='',
base=None,
tool=None,
gravity=np.array([0, 0, 9.81])):
super(ETS, self).__init__()
if base is None:
self.base = SE3()
else:
self.base = base
self.tool = SE3()
self.gravity = gravity
# Verify elinks
if not isinstance(elinks, list):
raise TypeError('The links must be stored in a list.')
self._ets = []
self._n = 0
self._M = 0
self._q_idx = []
# Set up a dictionary for looking up links by name
for link in elinks:
if isinstance(link, ELink):
self._M += 1
else:
raise TypeError("Input can be only ELink")
# Set up references between links, a bi-directional linked list
# Also find the top of the tree
self.root = []
self.end = []
for link in elinks:
for li in link.parent:
li._child.append(link)
if len(link.parent) == 0:
self.root.append(link)
# Find the bottom of the tree
for link in elinks:
if len(link.child) == 0:
self.end.append(link)
# If the base link is not defined, use the root of the tree
if base_link is None:
self._base_link = self.root[0] # Needs to be private attrib
else:
self._base_link = base_link # Needs to be private attrib
# If the ee link is not defined, use the bottom of the tree
if ee_link is None:
self._ee_link = self.end[-1]
else:
self._ee_link = ee_link
def add_links(link, lst, q_idx):
if link.jtype == link.VARIABLE:
q_idx.append(len(lst))
lst.append(link)
# Figure out the order of links with resepect to joint variables
self.bfs_link(
lambda link: add_links(link, self._ets, self._q_idx))
self._n = len(self._q_idx)
self._reset_fk_path()
self.name = name
self.manuf = manufacturer
# Current joint angles of the robot
self.q = np.zeros(self.n)
self.qd = np.zeros(self.n)
self.qdd = np.zeros(self.n)
self.control_type = 'v'
def _reset_fk_path(self):
# Pre-calculate the forward kinematics path
self._fkpath = self.dfs_path(self.base_link, self.ee_link)
def bfs_link(self, func):
queue = self.root
for li in queue:
func(li)
def vis_children(link):
for li in link.child:
if li not in queue:
queue.append(li)
func(li)
while len(queue) > 0:
link = queue.pop(0)
vis_children(link)
def dfs_path(self, l1, l2):
path = []
visited = [l1]
def vis_children(link):
visited.append(link)
for li in link.child:
if li not in visited:
if li == l2 or vis_children(li):
path.append(li)
return True
vis_children(l1)
path.append(l1)
path.reverse()
return path
def to_dict(self):
ob = {
'links': [],
'name': self.name,
'n': self.n,
'M': self.M,
'q_idx': self.q_idx
}
self.allfkine()
for link in self.ets:
li = {
'axis': [],
'eta': [],
'q_idx': link.q_idx,
'geometry': [],
't': link._fk.t.tolist(),
'q': r2q(link._fk.R).tolist()
}
for et in link.ets:
li['axis'].append(et.axis)
li['eta'].append(et.eta)
for gi in link.geometry:
g_fk = link._fk * gi.base
if gi.scale is not None:
scale = gi.scale.tolist()
else:
scale = [1, 1, 1]
li['geometry'].append({
'filename': gi.filename,
'scale': scale,
't': g_fk.t.tolist(),
'q': r2q(g_fk.R).tolist()
})
ob['links'].append(li)
return ob
def fk_dict(self):
ob = {
'links': []
}
self.allfkine()
# print(Tall)
for link in self.ets:
li = {
't': link._fk.t.tolist(),
'q': r2q(link._fk.R).tolist()
}
ob['links'].append(li)
return ob
# @classmethod
# def urdf_to_ets(cls, file_path):
# name, ext = splitext(file_path)
# if ext == '.xacro':
# urdf_string = xacro.main(file_path)
# urdf = URDF.loadstr(urdf_string, file_path)
# return ETS(
# urdf.elinks,
# name=urdf.name
# )
@staticmethod
def urdf_to_ets_args(file_path):
name, ext = splitext(file_path)
if ext == '.xacro':
urdf_string = xacro.main(file_path)
urdf = URDF.loadstr(urdf_string, file_path)
return urdf.elinks, urdf.name
# @classmethod
# def dh_to_ets(cls, robot):
# """
# Converts a robot modelled with standard or modified DH parameters to an
# ETS representation
# :param robot: The robot model to be converted
# :type robot: SerialLink
# :return: List of returned :class:`bluepy.btle.Characteristic` objects
# :rtype: ets class
# """
# ets = []
# q_idx = []
# M = 0
# for j in range(robot.n):
# L = robot.links[j]
# # Method for modified DH parameters
# if robot.mdh:
# # Append Tx(a)
# if L.a != 0:
# ets.append(ET.Ttx(L.a))
# M += 1
# # Append Rx(alpha)
# if L.alpha != 0:
# ets.append(ET.TRx(L.alpha))
# M += 1
# if L.is_revolute:
# # Append Tz(d)
# if L.d != 0:
# ets.append(ET.Ttz(L.d))
# M += 1
# # Append Rz(q)
# ets.append(ET.TRz(joint=j+1))
# q_idx.append(M)
# M += 1
# else:
# # Append Tz(q)
# ets.append(ET.Ttz(joint=j+1))
# q_idx.append(M)
# M += 1
# # Append Rz(theta)
# if L.theta != 0:
# ets.append(ET.TRz(L.alpha))
# M += 1
# return cls(
# ets,
# q_idx,
# robot.name,
# robot.manuf,
# robot.base,
# robot.tool)
@property
def qlim(self):
v = np.zeros((2, self.n))
j = 0
for i in range(self.M):
if self.ets[i].jtype == self.ets[i].VARIABLE:
v[:, j] = self.ets[i].qlim
j += 1
return v
@property
def base_link(self):
return self._base_link
@property
def ee_link(self):
return self._ee_link
@property
def q(self):
return self._q
@property
def qd(self):
return self._qd
@property
def qdd(self):
return self._qdd
@property
def control_type(self):
return self._control_type
@property
def ets(self):
return self._ets
@property
def name(self):
return self._name
@property
def manuf(self):
return self._manuf
@property
def base(self):
return self._base
@property
def tool(self):
return self._tool
@property
def n(self):
return self._n
@property
def M(self):
return self._M
@property
def q_idx(self):
return self._q_idx
@property
def gravity(self):
return self._gravity
@name.setter
def name(self, name_new):
self._name = name_new
@manuf.setter
def manuf(self, manuf_new):
self._manuf = manuf_new
@gravity.setter
def gravity(self, gravity_new):
self._gravity = getvector(gravity_new, 3, 'col')
@q.setter
def q(self, q_new):
q_new = getvector(q_new, self.n)
self._q = q_new
@qd.setter
def qd(self, qd_new):
self._qd = getvector(qd_new, self.n)
@qdd.setter
def qdd(self, qdd_new):
self._qdd = getvector(qdd_new, self.n)
@control_type.setter
def control_type(self, cn):
if cn == 'p' or cn == 'v' or cn == 'a':
self._control_type = cn
else:
raise ValueError(
'Control type must be one of \'p\', \'v\', or \'a\'')
@base.setter
def base(self, T):
if not isinstance(T, SE3):
T = SE3(T)
self._base = T
@tool.setter
def tool(self, T):
if not isinstance(T, SE3): # pragma nocover
T = SE3(T)
self._tool = T
@base_link.setter
def base_link(self, link):
if isinstance(link, ELink):
self._base_link = link
else:
self._base_link = self.ets[link]
self._reset_fk_path()
@ee_link.setter
def ee_link(self, link):
if isinstance(link, ELink):
self._ee_link = link
else:
self._ee_link = self.ets[link]
self._reset_fk_path()
def fkine(self, q=None):
'''
Evaluates the forward kinematics of a robot based on its ETS and
joint angles q.
T = fkine(q) evaluates forward kinematics for the robot at joint
configuration q.
T = fkine() as above except uses the stored q value of the
robot object.
Trajectory operation:
Calculates fkine for each point on a trajectory of joints q where
q is (nxm) and the returning SE3 in (m)
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:return: The transformation matrix representing the pose of the
end-effector
:rtype: SE3
:notes:
- The robot's base or tool transform, if present, are incorporated
into the result.
:references:
- Kinematic Derivatives using the Elementary Transform
Sequence, <NAME> and <NAME>
'''
trajn = 1
if q is None:
q = self.q
try:
q = getvector(q, self.n, 'col')
except ValueError:
trajn = q.shape[1]
verifymatrix(q, (self.n, trajn))
for i in range(trajn):
j = 0
tr = self.base
for link in self._fkpath:
if link.jtype == link.VARIABLE:
T = link.A(q[j, i])
j += 1
else:
T = link.A()
tr = tr * T
tr = tr * self.tool
if i == 0:
t = SE3(tr)
else:
t.append(tr)
return t
def allfkine(self, q=None):
'''
Tall = allfkine(q) evaluates fkine for each joint within a robot and
returns a trajecotry of poses.
Tall = allfkine() as above except uses the stored q value of the
robot object.
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:return T: Homogeneous transformation trajectory
:rtype T: SE3 list
:notes:
- The robot's base transform, if present, are incorporated
into the result.
:references:
- Kinematic Derivatives using the Elementary Transform
Sequence, <NAME> and <NAME>
'''
if q is None:
q = np.copy(self.q)
else:
q = getvector(q, self.n)
# t = self.base
# Tall = SE3()
j = 0
if self.ets[0].jtype == self.ets[0].VARIABLE:
self.ets[0]._fk = self.base * self.ets[0].A(q[j])
j += 1
else:
self.ets[0]._fk = self.base * self.ets[0].A()
for i in range(1, self.M):
if self.ets[i].jtype == self.ets[i].VARIABLE:
t = self.ets[i].A(q[j])
j += 1
else:
t = self.ets[i].A()
self.ets[i]._fk = self.ets[i].parent[0]._fk * t
def jacob0(self, q=None):
"""
J0 = jacob0(q) is the manipulator Jacobian matrix which maps joint
velocity to end-effector spatial velocity. v = J0*qd in the
base frame.
J0 = jacob0() as above except uses the stored q value of the
robot object.
:param q: The joint angles/configuration of the robot (Optional,
if not supplied will use the stored q values).
:type q: float ndarray(n)
:return J: The manipulator Jacobian in ee frame
:rtype: float ndarray(6,n)
:references:
- Kinematic Derivatives using the Elementary Transform
Sequence, <NAME> and <NAME>
"""
if q is None:
q = np.copy(self.q)
else:
q = getvector(q, self.n)
T = (self.base.inv() * self.fkine(q)).A
U = np.eye(4)
j = 0
J = np.zeros((6, self.n))
for link in self._fkpath:
for k in range(link.M):
if k != link.q_idx:
U = U @ link.ets[k].T().A
else:
if link.ets[k]._axis == 'Rz':
U = U @ link.ets[k].T(q[j]).A
Tu = np.linalg.inv(U) @ T
n = U[:3, 0]
o = U[:3, 1]
a = U[:3, 2]
y = Tu[1, 3]
x = Tu[0, 3]
J[:3, j] = (o * x) - (n * y)
J[3:, j] = a
j += 1
if link.ets[k]._axis == 'Ry':
U = U @ link.ets[k].T(q[j]).A
Tu = np.linalg.inv(U) @ T
n = U[:3, 0]
o = U[:3, 1]
a = U[:3, 2]
z = Tu[2, 3]
x = Tu[0, 3]
J[:3, j] = (n * z) - (a * x)
J[3:, j] = o
j += 1
if link.ets[k]._axis == 'Rx':
U = U @ link.ets[k].T(q[j]).A
Tu =
|
np.linalg.inv(U)
|
numpy.linalg.inv
|
#! encoding = utf-8
"""
Fit time domain FID signal
"""
import numpy as np
from scipy.signal import butter, sosfiltfilt
import lmfit
from lib import calc_delta_g_coeff, sig2vol
from v1d_theory import run_v1d_theory as run_sim_snr
def load_fid(filename, t1=256, t2=256 + 4096):
""" Load FID from file
:arguments
filename: str data file
t1: int rect window starting time
t2: int rect window end time
:returns
vol: np1darray FID voltage (mV)
"""
raw = np.loadtxt(filename, skiprows=1, dtype='int64')
with open(filename, 'r') as f:
hd_array = f.readline().strip().split('|')
total_avg = int(hd_array[6])
# the raw data is accumulative, take the average of it
vol = sig2vol(raw / total_avg)
# remove baseline drift using the last 100 points
shift = np.average(vol[-100:])
vol -= shift
return vol[t1:t2]
def f2min_tds_simple(lmpar, x, y):
""" Fitting function directly for time domain data, simple form """
v = lmpar.valuesdict()
t01 = v['t01']
t02 = v['t02']
s1 = v['s1']
s2 = v['s2']
a0 = v['a0']
b0 = v['b0']
base = v['base']
phir1 = v['phi1'] / 180 * np.pi # phase in radian
phir2 = v['phi2'] / 180 * np.pi # phase in radian
f1 = v['f1']
f2 = v['f2']
# f_exp = s * np.exp(- a0 * (x + t0)**2 - b0 * (x + t0))
f_exp = np.exp(- a0 * x ** 2 - b0 * x)
f_sin = s1 * np.sin(2 * np.pi * f1 * x + phir1) \
+ s2 *
|
np.sin(2 * np.pi * f2 * x + phir2)
|
numpy.sin
|
# coding: utf-8
# In[1]:
import torch
import pandas as pd
import numpy as np
import sys
import os
mutation = sys.argv[1]
seed = int(sys.argv[2])
use_mean = sys.argv[3] == 'True'
net_arch = map(int, sys.argv[4].split(',')) if len(sys.argv) == 5 else []
def get_data(mutation, seed, mean):
np.random.seed(seed)
SEQ_PATH = '/data/lisa/data/AML-MILA/sequences.npy'
SAMPLES_PATH = '/data/lisa/data/AML-MILA/samples.txt'
LABELS_PATH = '/data/lisa/data/AML-MILA/patients.20170523.txt'
samples = open(SAMPLES_PATH, 'r').read().split('|')
labels = pd.read_csv(LABELS_PATH, sep='\t', index_col=0).T
labels = np.array([int(labels.loc[sample][mutation]) for sample in samples])
data = np.log(np.load(SEQ_PATH) + 1)
idx = np.random.permutation(len(data))
size_train, size_valid = int(len(idx) * 0.6), int(len(idx) * 0.2)
train_idx, valid_idx = idx[:size_train], idx[size_train:size_train+size_valid]
if mean:
train_data = torch.FloatTensor(data[train_idx].mean(axis=1))
train_label = torch.FloatTensor(labels[train_idx])
else:
train_data = torch.FloatTensor(data[train_idx].reshape(-1, data.shape[2]))
train_label = torch.FloatTensor(np.repeat(labels[train_idx], 100))
valid_data = torch.FloatTensor(data[valid_idx].reshape(-1, data.shape[2]))
valid_label = torch.FloatTensor(np.repeat(labels[valid_idx], 100))
return (train_data, train_label), (valid_data, valid_label)
train_loader, valid_loader = get_data(mutation, seed, use_mean)
# In[9]:
def shuffle_data(data):
idx = np.random.permutation(len(data[0]))
train = np.copy(data[0])[idx]
label =
|
np.copy(data[1])
|
numpy.copy
|
import numpy as np
from funcGeneral import max2,argmax2, max3,argmax3, deriv1,deriv1_step2,enhance,argmin3
from scipy import interpolate
import matplotlib.pyplot as plt
### PEAK DETECTION ###
def peakListAll(dProject,keys):
for key in keys:
key1=str('dPeak'+key)
dProject[key1]=fPeakList(dProject['dData'][key],False,False)
return dProject
def fPeakList(dataIn,isDel=False,isAdd=False,repType=None):
## RepTpes: "Cubic", "Poly2"
peakX,peakY=peakDetection(dataIn)
if peakX[0]<3:
peakX=np.delete(peakX,0)
peakY=np.delete(peakY,0)
if peakX[-1]>len(dataIn)-4:
peakX=np.delete(peakX,-1)
peakY=np.delete(peakY,-1)
dPeakList=DPeakList()
dPeakList['NPeak']=len(peakX)
dPeakList['pos']=peakX
dPeakList['amp']=peakY
dPeakList['score']=np.ones(dPeakList['NPeak'])
dPeakList['averW'],dPeakList['stdW'],dPeakList['minW'],dPeakList['maxW']=findAverPeakW(peakX,rate=0.33,minR=0.4,maxR=1.8)
if isDel:
dPeakList=delPeaks(dPeakList, dataIn)
if isAdd:
dPeakList=addPeaks(dPeakList, dataIn)
if repType!=None:
dPeakList['X']=[]
dPeakList['Y']=[]
for i in range(dPeakList['NPeak']):
if repType=="Cubic":
newX,newY,newPos,newAmp=fitSplineToPeak(dataIn,dPeakList['pos'][i],wid=3)
elif repType=="Poly2":
newX,newY,newPos,newAmp=fitPolyToPeak(dataIn,dPeakList['pos'][i],wid=3)
elif repType=="Amp":
newX,newY,newPos,newAmp=peakX[i],peakY[i],dPeakList['pos'][i],peakY[i]
dPeakList['pos'][i]=newPos
dPeakList['amp'][i]=newAmp
dPeakList['X'].append(newX)
dPeakList['Y'].append(newY)
return dPeakList
def DPeakList():
dPeakList={}
dPeakList['NPeak']=0
dPeakList['pos']=np.array([],dtype='i4')
dPeakList['amp']=np.array([],dtype='f4')
dPeakList['wid']=np.array([],dtype='f4')
dPeakList['area']=np.array([],dtype='f4')
dPeakList['averW']=np.array([],dtype='f4')
dPeakList['stdW']=np.array([],dtype='f4')
dPeakList['minW']=np.array([],dtype='f4')
dPeakList['maxW']=np.array([],dtype='f4')
return dPeakList
def peakDetection(dataIn,isY=True):
if len(dataIn)<3:
peakX=np.array([])
return peakX
derivData=deriv1(dataIn)
peakX=findPeakX(derivData,dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def peakDetection_v3(dataIn,isY=True):
if len(dataIn)<3:
peakX=np.array([])
return peakX
derivData1=deriv1(dataIn)
derivData2=deriv1_step2(dataIn)
av_deriv=np.add(derivData1,derivData2)/2
peakX=findPeakX_v5(av_deriv,dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def troughDetection(dataIn,isY=True):
if len(dataIn)<3:
peakX=np.array([])
return peakX
derivData=deriv1(dataIn)
peakX=findtroughX(derivData,dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def peakDetection_v2(dataIn,isY=True):
if len(dataIn)<2:
peakX=np.array([])
return peakX
derivData=deriv1(dataIn)
peakX=findPeakX(derivData,dataIn)
peakX=findPeakX(derivData,dataIn)
#peakX=findPeakX_v3(dataIn)
if isY:
peakY=findPeakY(dataIn,peakX)
return peakX, peakY
else:
return peakX
def findPeakX(derivData,dataIn):
peakX=np.array([],dtype='i4')
NData=len(derivData)
i=0
while i<NData-1:
if np.sign(derivData[i]) > np.sign(derivData[i+1]):
peak=argmax3(dataIn[i-1],dataIn[i],dataIn[i+1])
peak=i-1+peak
i=i+1
if peak>2:
peakX=np.append(peakX,peak)
i+=1
return peakX
def findPeakX_v5(derivData,dataIn):
peakX=np.array([],dtype='i4')
NData=len(derivData)
i=0
window_size=10
window_number=NData/window_size
for j in range(window_number):
derivData_w=derivData[j*(window_size):((j+1)*window_size)]
dataIn_w=dataIn[j*(window_size):((j+1)*window_size)]
#print len(derivData_w)
mean_ddw=np.mean(derivData_w)
derivData_w=derivData_w-mean_ddw
#plt.plot(np.arange(window_size),derivData_w)
#plt.plot(np.arange(window_size),dataIn_w)
#plt.show()
for i in range(window_size-1):
if np.sign(derivData_w[i]) > np.sign(derivData_w[i+1]):
peak=argmax3(dataIn_w[i-1],dataIn_w[i],dataIn_w[i+1])
peak=i-1+peak+j*window_size
if peak>2 and peak<NData-1:
peakX=np.append(peakX,peak)
#print peakX
return peakX
def findtroughX(derivData,dataIn):
peakX=np.array([],dtype='i4')
NData=len(derivData)
i=0
while i<NData-1:
if np.sign(derivData[i]) < np.sign(derivData[i+1]):
peak=argmin3(dataIn[i-1],dataIn[i],dataIn[i+1])
peak=i-1+peak
i=i+1
peakX=np.append(peakX,peak)
i+=1
return peakX
def findPeakX_v2(derivData,dataIn):
peakX=np.array([],dtype='i4')
NData=len(derivData)
i=0
while i<NData-2:
diff = derivData[i]-derivData[i+2]
if np.sign(derivData[i])>np.sign(derivData[i+1]) and diff>1:
diff=derivData[i]-derivData[i+1]
peak=argmax3(dataIn[i-1],dataIn[i],dataIn[i+1])
peak=i-1+peak
i=i+1
peakX=np.append(peakX,peak)
i+=1
return peakX
def findPeakX_v3(dataIn):
peakX=np.array([],dtype='i4')
NData=len(dataIn)
i=2
while i<NData-2:
peak_ind=np.argmax(dataIn[i-2:i+2])
peak=np.max(dataIn[i-2:i+2])
trough=np.min(dataIn[i-2:i+2])
mag=peak-trough
if peak_ind == 2 and mag>2:
peak_ind=i-1+peak_ind
i=i+1
peakX=np.append(peakX,peak_ind)
i+=1
return peakX
def findPeakX_v4(derivData,dataIn):
peakX=np.array([],dtype='i4')
NData=len(derivData)
i=0
while i<NData-1:
if np.sign(derivData[i]) > np.sign(derivData[i+1]):
peak=argmax3(dataIn[i-1],dataIn[i],dataIn[i+1])
peak=i-1+peak
i=i+1
if peak>2 and peak<(len(dataIn)-2):
peakX=np.append(peakX,peak)
i+=1
return peakX
def findPeakY(dataIn,peakX):
NPeak=len(peakX)
peakY=np.zeros(NPeak,dtype='f4')
for i in range(NPeak):
peakY[i]= dataIn[peakX[i]]
return peakY
def delPeaks(dPeakList, dataIn):
newPeakX=np.array([dPeakList['pos'][0]],dtype='i4')
i=1
while i<len(dPeakList['pos'])-2:
fark0=dPeakList['pos'][i+1]-dPeakList['pos'][i]
if fark0<dPeakList['minW']:
fark1=dPeakList['pos'][i+2]-dPeakList['pos'][i+1]
fark2=dPeakList['pos'][i]-dPeakList['pos'][i-1]
if fark1>fark2:
newPeakX=np.append(newPeakX,dPeakList['pos'][i+1])
else:
newPeakX=np.append(newPeakX,dPeakList['pos'][i])
i+=1
else:
newPeakX=np.append(newPeakX,dPeakList['pos'][i])
i+=1
newPeakX=np.append(newPeakX,dPeakList['pos'][-2])
newPeakX=np.append(newPeakX,dPeakList['pos'][-1])
dPeakList['NPeak']=len(newPeakX)
newPeakY=np.zeros(dPeakList['NPeak'])
for i in range(dPeakList['NPeak']):
newPeakY[i]=dataIn[newPeakX[i]]
dPeakList['amp']=newPeakY
dPeakList['pos']=newPeakX
dPeakList['score']=np.ones(dPeakList['NPeak'])
return dPeakList
def addPeaks(dPeakList,dataIn):
newPeakX=np.array([dPeakList['pos'][0]],dtype='i4')
newScore=np.array([1],dtype='i4')
i=1
while i<len(dPeakList['pos']):
firstP=newPeakX[-1]
secondP=dPeakList['pos'][i]
fark=secondP-firstP
if fark>dPeakList['maxW']:
s=int(firstP+dPeakList['minW'])
e=int(s+dPeakList['averW'])
partData=enhance(dataIn[s:e])
partX=peakDetection(partData,False)
if len(partX)>0:
newPeakX=np.append(newPeakX,int(partX[0]+s))
else:
m=int(firstP+dPeakList['averW'])
argM=m-1+argmax3(dataIn[m-1],dataIn[m],dataIn[m+1])
newPeakX=np.append(newPeakX,argM)
newScore=np.append(newScore,0)
elif fark>1.2*dPeakList['averW']:
s=int(firstP+dPeakList['minW'])
e=int(secondP-dPeakList['minW']+1)
partData=enhance(dataIn[s:e])
partX=peakDetection(partData,False)
if len(partX)>0:
newPeakX=np.append(newPeakX,(partX[0]+s))
newScore=np.append(newScore,0)
newPeakX=np.append(newPeakX,secondP)
newScore=np.append(newScore,1)
i+=1
else:
newPeakX=np.append(newPeakX,secondP)
newScore=np.append(newScore,1)
i+=1
dPeakList['NPeak']=len(newPeakX)
dPeakList['pos']=np.array(newPeakX,dtype='i4')
dPeakList['amp']=dataIn[newPeakX]
dPeakList['score']=np.array(newScore,dtype='i4')
return dPeakList
def findAverPeakW(peakX,rate=0.33,minR=0.4,maxR=1.5):
NPeak=len(peakX)
diffW=peakX[1:]-peakX[:-1]
diffW=np.sort(diffW)
s=int(NPeak*rate)
e=int(NPeak*(1-rate))
averW=np.average(diffW[s:e])
stdW = np.std(diffW[s:e])
minW=averW*minR
maxW=averW*maxR
return averW,stdW,minW,maxW
def fitSplineToPeak(dataIn,peakPos,wid=3):
s=int(peakPos-wid)
e=int(peakPos+wid+1)
X=np.arange(s,e)
Y=dataIn[s:e]
fittedFunc= interpolate.splrep(X,Y,s=0)
newWid=wid*5
newX=np.linspace(s,e-1,newWid)
newY=interpolate.splev(newX,fittedFunc,der=0)
argMax=np.argmax(newY)
newAmp=newY[argMax]
newPos=newX[argMax]
s=newPos-wid
e=newPos+wid+1
newX=np.linspace(s,e-1,newWid)
newY=interpolate.splev(newX,fittedFunc,der=0)
return newX,newY,newPos,newAmp
def fitPolyToPeak(dataIn,peakPos,wid=3):
s=int(peakPos-wid)
e=int(peakPos+wid+1)
X=np.arange(s,e)
Y=dataIn[s:e]
fittedFunc= np.poly1d(np.polyfit(X,Y,2))
newWid=wid*5
newX=np.linspace(s,e-1,newWid)
newY=fittedFunc(newX)#,fittedFunc,der=)
argMax=np.argmax(newY)
newAmp=newY[argMax]
newPos=newX[argMax]
s=newPos-wid
e=newPos+wid+1
newX=np.linspace(s,e-1,newWid)
newY=fittedFunc(newX)#,fittedFunc,der=)
#newY=interpolate.splev(newX,fittedFunc,der=0)
return newX,newY,newPos,newAmp
def noBand(n,m):
band=np.zeros([2,n],dtype='i4')
for i in range(n):
band[0,i] = 0
band[1,i] = m
return band
def SakoeChibaBand(n,m,r):
band=np.zeros([2,n],dtype='i4')
mnf=float(m)/float(n)
for i in range(n):
band[0,i]=np.ceil(mnf*i-r) # int(np.max(np.array([ceil(i * mnf - r), 0.0 ])));
band[1,i]=np.floor(mnf*i+r) #int(np.min(np.array([ceil(i * mnf - r), 0.0 ])));
if band[0,i]<0:
band[0,i]=0
if band[1,i]>m:
band[1,i]=m
return band
## SIMILARITY FUNCTIONS
# Similarity between two numbers, a and b
def simAandB(a,b):
a=float(a)
b=float(b)
sim=1.0-(np.abs(a-b)/(2*max2(np.abs(a),np.abs(b))))
sim=sim*2.0-1.0
return sim
# Mean similarity between two time series, a1,a2,...,aN, and b1,...,bN
def simMean(A,B):
if len(A)!=len(B):
return False
sum=0
for i in range(len(A)):
sum+=simAandB(A[i],B[i])
sim=sum/len(A)
return sim
def simMeanDeriv(A,B):
A0=deriv1(A)
B0=deriv1(B)
sim=simMean(A0,B0)
return sim
# The root mean square similarity
def simRootMean(A,B):
if len(A)!=len(B):
return False
sum=0
for i in range(len(A)):
sum+=(simAandB(A[i],B[i]))**2
sim=np.sqrt(sum/len(A))
return sim
def simCorr(A,B):
if len(A)!=len(B):
return 0
sim=np.corrcoef(A,B)[0,1]
return sim
def simPeakCorr(A,B,i,j,D=0.02):
corr=simCorr(A,B)
posSim=np.exp(np.abs(i-j)*D)
sim=corr/posSim
return sim
def simCosAngle(A,B):
top = np.dot(A, B)
bot = np.sqrt(np.sum(A**2)*np.sum(B**2)) #mass_spect1_sum*mass_spect2_sum)
if bot > 0:
sim = top/bot
else:
sim = 0
sim=sim*2-1
return sim
def posSim(t1,t2,D=0.03):
posSim0=np.exp(-np.abs(t1-t2)*D)
posSim0=posSim0*2-1
return posSim0
#a=10
#b=np.arange(5,150)
#for i in b:
# print a,i,posSim(a,i)
def simPosAmp(a0,a1,t0,t1,kA=0.1,kT=0.9):
simPos=posSim(t0,t1)
simAmp=simAandB(a0,a1)
sim=simPos*kT+simAmp*kA
sim=sim*2-1
return sim
def timeTol(t1,t2,D=0.05):
rtime = np.exp(-np.abs(t1-t2)*D)
return rtime
simFuncs=['Amplitude','Mean','Derivative','Correlation','Cosine','Gaussian','Position']
def DPeakAlignParams():
dParams={}
dParams['method']='Global'
dParams['simFunc']='Amplitude'
dParams['peakRep']=None
dParams['gap']=-0.20
dParams['timeT']=0.00
dParams['band']=0.20
dParams['minScore']=0.0
dParams['seqType']='pos' # 'pos', 'index'
dParams['isBackTrace']=True
dParams['repType']=None
return dParams
def obtainCostM(dPeakList0,dPeakList1,bandM,dParams):
n=dPeakList0['NPeak']
m=dPeakList1['NPeak']
costM=np.array(np.ones([n,m])*(-9999), dtype='f4')
if dParams['simFunc']=='Amplitude':
for i in range(n):
for j in np.arange(bandM[0,i],bandM[1,i]):
costM[i,j]=simAandB(dPeakList0['amp'][i],dPeakList1['amp'][j])
elif dParams['simFunc']=='Position':
for i in range(n):
for j in np.arange(bandM[0,i],bandM[1,i]):
costM[i,j]=posSim(dPeakList0['pos'][i],dPeakList1['pos'][j])
elif dParams['simFunc']=='Mean':
for i in range(n):
for j in np.arange(bandM[0,i],bandM[1,i]):
costM[i,j]=simMean(dPeakList0['Y'][i],dPeakList1['Y'][j])
elif dParams['simFunc']=='Derivative':
for i in range(n):
for j in np.arange(bandM[0,i],bandM[1,i]):
costM[i,j]=simMeanDeriv(dPeakList0['Y'][i],dPeakList1['Y'][j])
elif dParams['simFunc']=='Correlation':
for i in range(n):
for j in np.arange(bandM[0,i],bandM[1,i]):
costM[i,j]=simCorr(dPeakList0['Y'][i],dPeakList1['Y'][j])
elif dParams['simFunc']=='Gaussian':
for i in range(n):
for j in np.arange(bandM[0,i],bandM[1,i]):
costM[i,j]=simCorr(dPeakList0['Y'][i],dPeakList1['Y'][j])
elif dParams['simFunc']=='Cosine':
for i in range(n):
for j in np.arange(bandM[0,i],bandM[1,i]):
costM[i,j]=simCosAngle(dPeakList0['Y'][i],dPeakList1['Y'][j])
# # Apply time tolerace
# if dParams['method']=='Global' and dParams['timeT']>0:
# for i in range(n):
# for j in np.arange(bandM[0,i],bandM[1,i]):
# costM[i,j]=costM[i,j]*timeTol(dPeakList0['pos'][i],dPeakList1['pos'][j],dParams['timeT'])
#
return costM
def peakScoringGlobal(dPeakList0,dPeakList1,costM,bandM, gap=-1.0, minScr=0.5):
l1, l2 = dPeakList0['NPeak'], dPeakList1['NPeak']
scormat = np.zeros( (l1+1,l2+1), dtype='f4')
arrow = np.zeros( [l1+1,l2+1], int)
arrow[0,:] = 2 # np.ones(NSeq+1)
arrow[:,0] = 1 #np.ones(NSeq+1)
scormat[0,:] = np.arange(l2+1)* gap
scormat[:,0] = np.arange(l1+1)* gap
# arrow[0] = np.ones(l2+1)
for i in range( 1,l1+1 ):
for j in range(bandM[0,i-1]+1,bandM[1,i-1]+1,1): # for j in range( 1, l2+1 ):
s0= scormat[i-1,j-1]+ costM[i-1,j-1]
s1= scormat[i-1,j] + gap
s2= scormat[i,j-1] + gap
scormat[i,j] = max3(s0,s1,s2)
arrow[i,j] = argmax3(s0,s1,s2)
if costM[i-1,j-1]<minScr:
scormat[i,j] = max2(s1,s2)
arrow[i,j] = 1+argmax2(s1,s2)
return scormat, arrow
def peakBacktraceGlobal(seq0,seq1,scormat,arrow,costM,minScr=0.5):
NPeak0=len(seq0)
NPeak1=len(seq1)
st0, st1 = [],[]
v,h = arrow.shape
v-=1
h-=1
ok = 1
while ok:
if arrow[v,h] == 0:
st0.append(seq0[v-1])# += seq1[v-1]
st1.append(seq1[h-1])# += seq2[h-1]
v -= 1
h -= 1
elif arrow[v,h] == 1:
st0.append(seq0[v-1]) #+= seq1[v-1]
st1.append(-1)
v -= 1
elif arrow[v,h] == 2:
st0.append(-1)
st1.append(seq1[h-1])
h -= 1
if v==0 and h==0:
ok = 0
# reverse the strings
st0.reverse()
st1.reverse()
return st0, st1
def peakScoringLocal(dPeakList0,dPeakList1,costM, gap=-1.0, minScr=0.0):
l1, l2 = dPeakList0['NPeak'], dPeakList1['NPeak']
scormat = np.zeros( (l1+1,l2+1), dtype='f4')
arrow = np.ones( (l1+1,l2+1), int)
# create first row and first column
arrow[0,:] = 1 # np.ones(NSeq+1)
arrow[:,0] = 2 #np.ones(NSeq+1)
for i in range( 1,l1+1 ):
for j in range(1,l2+1): # for j in range( 1, l2+1 ):
s0= scormat[i-1,j-1]+ costM[i-1,j-1]
s1= scormat[i-1,j] + gap
s2= scormat[i,j-1] + gap
if costM[i-1,j-1]>minScr:
scormat[i,j],arrow[i,j] = maxArg4(s0,s1,s2,0)
else:
scormat[i,j] = max3(s1,s2,0)
arrow[i,j] = 1+argmax3(s1,s2,0)
return scormat, arrow
def peakBackTraceLocal(seq0,seq1,scormat,arrow):
NPeak0=len(seq0)
NPeak1=len(seq1)
st0, st1 = [],[]
ok = 1
v,h = divmod(scormat.argmax(), NPeak1+1)
while ok:
if arrow[v,h] == 0:
st0.append(seq0[v-1])# += seq1[v-1]
st1.append(seq1[h-1])# += seq2[h-1]
v -= 1
h -= 1
elif arrow[v,h] == 1:
st0.append(seq0[v-1]) #+= seq1[v-1]
st1.append(-1)
v -= 1
elif arrow[v,h] == 2:
st0.append(-1)
st1.append(seq1[h-1])
h -= 1
if (v==0 and h==0) or scormat[v,h]==0:
ok = 0
# reverse the strings
st0.reverse()
st1.reverse()
return st0, st1
def peakScoreM(peakXA, peakXB,costM, G=0.1):
l1, l2 = len( peakXA), len(peakXB)
scormat = np.zeros([l1+1,l2+1])
arrow = np.zeros([l1+1,l2+1], int )
# create first row and first column
scormat[0] = np.arange(l2+1)* G
scormat[:,0] = np.arange( l1+1)* G
arrow[0] = np.ones(l2+1)
# fill in the matrix
for i in range(1,l1+1):
for j in range(1,l2+1):
f = np.zeros(3, float )
f[0] = scormat[i-1,j] + G
f[1] = scormat[i,j-1] + G
f[2] = scormat[i-1,j-1] + costM[i-1,j-1] #subvals[i]
scormat[i,j] = np.max(f)
arrow[i,j] = np.argmax(f)
return scormat, arrow
def myPeakAlignment(dPeakList0,dPeakList1,dParams):
if dParams['method']=='Global':
bandM=SakoeChibaBand(dPeakList0["NPeak"],dPeakList1["NPeak"],dPeakList0["NPeak"]*dParams['band'])
if dParams['method']=='Local':
bandM=noBand(dPeakList0["NPeak"],dPeakList1["NPeak"])
costM=obtainCostM(dPeakList0,dPeakList1,bandM,dParams)
if dParams['method']=='Global':
scormat, arrow=peakScoringGlobal(dPeakList0,dPeakList1,costM,bandM, gap=dParams['gap'],minScr=dParams['minScore'])
if dParams['isBackTrace']==False:
return scormat[-1,-1]
if dParams['method']=='Local':
scormat, arrow=peakScoringLocal(dPeakList0,dPeakList1,costM, gap=dParams['gap'],minScr=dParams['minScore'])
seq0=dPeakList0['pos']
seq1=dPeakList1['pos']
if dParams['seqType']=='index':
seq0=np.arange(dPeakList0['NPeak'],dtype='i4')
seq1=
|
np.arange(dPeakList1['NPeak'],dtype='i4')
|
numpy.arange
|
# Generally useful functions. Uses cgs units unless specified otherwise.
import math
import sys
import numpy as np
from scipy.interpolate import interp1d
### physical constants
Lsun = 3.839e33 # solar luminosity in erg/s
G = 6.674e-8 # gravitational constant in cm**3 g**-1 s**-2
Msun = 1.989e33 # mass of the sun in grams
Rsun = 6.9551e10 # solar radius in cm
sigma = 5.6704e-5 # Stefan-Boltzmann constant in erg*cm**-2*s**-1*K**-4
c = 2.99792458e10 # speed of light in cm/s
h = 6.62606885e-27 # planck's constant in erg*s
k = 1.3806504e-16 # Boltzmann constant in erg/K
D10 = 3.085678e+19 # ten parsecs in cm
Tsun = (Lsun / (4*math.pi*sigma*Rsun**2))**(0.25) # temperature of the sun in Kelvins
Zsun = 0.01886 # from Anders and Grevesse, 1989, Geochimica et Cosmochimica Acta, Volume 53, Issue 1, p. 197-214 (used by http://kurucz.harvard.edu/grids/gridm01/)
# Zsun = 0.017 # from <NAME>., & <NAME>., 1998, Space Sci. Rev., 85, 161 (used by Castelli and Kurucz 2004)
Zsun_mist = 0.0142 # bulk solar metallicity from Asplund et al, Annu. Rev. Astron. Astrophys. 2009. 47:481–522 (used by MIST)
# printf() function from O'Reilly's Python Cookbook
def printf(format, *args):
sys.stdout.write(format % args)
# inputs: starting and ending time in seconds
# output: a string with the time in hours, minutes and seconds
def timef(atime):
hours, rem = divmod(atime, 3600)
minutes, seconds = divmod(rem, 60)
res = "{:0>2}:{:0>2}:{:05.2f}".format(int(hours), int(minutes), seconds)
return res
# distance from modulus
def dist(mod):
return D10 * 10**(mod / 5)
# Keplerian limit on the angular velocity,
# for fixed mass in solar masses and equatorial radius in solar radii
def OmegaK(M, Req):
return np.sqrt(G * M * Msun / (Req * Rsun)**3)
## Conversions between dimensionless omegas in the context of a Roche model;
## see appendix in arXiv:1505.03997
# Omega / Omega_Keplerian as a function of Omega / Omega_critical
def omega(otilde):
def om(ot):
chi = np.arcsin(ot)
om = np.sqrt((6./ot) * np.sin(chi/3) - 2)
return om
lst = hasattr(otilde, "__iter__") # is the input list-like (iterable)?
if lst:
omega = np.empty_like(otilde)
m = (otilde == 0)
omega[m] = 0
omega[~m] = om(otilde[~m])
else:
if otilde == 0:
omega = 0
else:
omega = om(otilde)
return omega
# Omega / Omega_critical as a function of Omega / Omega_Keplerian
def otilde(omega):
otilde = omega * np.sqrt(27./8) * (1 + omega**2 / 2)**(-3./2)
return otilde
# surface area in squared solar radii
# from luminosity in solar luminosities and effective temperature in Kelvin
def area(L, Teff):
return L * Lsun / (sigma * Teff**4 * Rsun**2)
# pseudo effective temperature in Kelvin
# from luminosity in solar luminosities and equatorial radius in solar radii
def tau(L, Req):
return ( L * Lsun / (4 * np.pi * sigma * (Req * Rsun)**2) )**(1./4)
# luminosity in solar luminosities
# from pseudo effective temperature in Kelvin and equatorial radius in solar radii
def L(tau, Req):
return 4 * np.pi * (Req * Rsun)**2 * sigma * tau**4 / Lsun
# log pseudo effective gravity
def gamma(M, Req):
return np.log10( G * M * Msun / (Req * Rsun)**2 )
# mass in solar masses
def M(gamma, Req):
return 10**gamma * (Req * Rsun)**2 / (G * Msun)
# convert between absolute metallicity Z and logarithmic relative metallicity [M/H],
# as well as between these variables for different solar metallicities
def logZp(Z):
return np.log10(Z / Zsun)
def Z_from_logZp(logZ):
return Zsun * 10**logZ
def logZm(Z):
return np.log10(Z / Zsun_mist)
def logZm_from_logZp(logZ):
return logZ + np.log10(Zsun) - np.log10(Zsun_mist)
def logZp_from_logZm(logZ):
return logZ - np.log10(Zsun) + np.log10(Zsun_mist)
# v sin i from mass
def vsini1(M, R, omega, inc):
return omega * np.sqrt(G * M * Msun / (R * Rsun)) *
|
np.sin(inc)
|
numpy.sin
|
import matplotlib.pyplot as plt
import numpy as np
fig1 = plt.figure();
fig2 = plt.figure();
fig3 = plt.figure();
fig4 = plt.figure();
fig5 = plt.figure();
ax1 = fig1.add_subplot(111)
ax2 = fig2.add_subplot(111)
ax3 = fig3.add_subplot(111)
ax4 = fig4.add_subplot(111)
ax5 = fig5.add_subplot(111)
dom1 = np.arange(-20,20.1,0.1)
dom2 =
|
np.arange(0.1,100,0.1)
|
numpy.arange
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 15 08:53:50 2020
@author: AliHaidar
This package allows using PSO for optimizing Machine Learning algorithms parameters.
Four algorithms were initiated in the first study: MLP, SVM, XGBoost, GBDT
The class contains various static classes, to allow running seperate functions alone.
In some cases, i was forced to follow the hardest way since I didnt want to modify any part of the package
that supports pso (pwswarms)
"""
import random
import numpy as np
import time
import itertools as it
import lightgbm as lgb
import xgboost as xgb
from sklearn.svm import SVC,SVR
#keras MLP
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint,EarlyStopping,TensorBoard,ReduceLROnPlateau
from keras.layers import Dense
import pyswarms as ps# Import PySwarms
from sklearn.metrics import mean_squared_error,accuracy_score,auc,roc_curve
class pspso:
"""
This class searches for algorithm parameters by using the Particle Swarm Optimization (PSO) algorithm.
"""
best_paricle_cost_ann =None
best_model_ann=None
best_history_ann=None
best_particle_position_ann=None
verbose=0
early_stopping=20
defaultparams= None #contains the default parameters of the algorithm that were not selected for optimization, or cant be selected for optimization
parameters=None #contains the list of the parameters selected for optimization
paramdetails= None #contains the dictionary given as input
rounding=None # contains a list that determines to what extent the parameter will be loadad e.g. learning rate selected is 0.342 will be loaded to 0.34 if rounding value is 2 (two integers after the decimal point)
def __init__(self, estimator='xgboost', params=None, task="regression",score= 'rmse'):
"""Construct an istance of the class pspso.
Inputs
------
estimator: a variable that can be 'xgboost', 'gbdt','mlp', or 'svm'. Default 'xgboost'
The name of the estimators whose parameters to be optimized.
params: a dictionary that determines the parameters to be optimized
task: a variable (regression, binary classification, or binary classification -r)
determines the type of the application
score: the fitness evaluation score while selecting the hyper-parameters.
"""
self.estimator = estimator # this variable is used to save the estimator, can be: xgboost, gbdt, mlp, or svm (cnn + catboost are not supported yet)
self.task=task # can be 'regression' or 'classification'
self.score=score # score: can be 'rmse' for regression 'acc' or 'auc' for binary classification. Multi-class classification (not supported yet)
self.cost=None
self.pos=None
self.model=None
self.duration=None
self.rmse=None
self.optimizer=None
pspso.parameters,pspso.defaultparams,self.x_min,self.x_max,pspso.rounding,self.bounds, self.dimensions,pspso.paramdetails=pspso.read_parameters(params,self.estimator,self.task)
@staticmethod
def get_default_search_space(estimator,task):
"""Create a dictionary of default parameters if the user didnt provide parameters.
Inputs
estimator: string value
A string value that determines the estimator: 'mlp','xgboost','svm', or 'gbdt'
task: string value
A string value that determines the task under consideration: 'regression' or 'binary classification'
Returns
params: Dictionary
A dictionary that contains default parameters to be used.
"""
if estimator == 'xgboost':
if task == 'binary classification':
params = {"learning_rate": [0.1,0.3,2],
"max_depth": [1,10,0],
"n_estimators": [2,70,0],
"subsample": [0.7,1,2]}
else:
params = {"objective": ["reg:linear","reg:tweedie","reg:gamma"],
"learning_rate": [0.1,0.3,2],
"max_depth": [1,10,0],
"n_estimators": [2,70,0],
"subsample": [0.7,1,2]}
elif estimator == 'gbdt':
if task == 'binary classification':
params = {"learning_rate": [0.1,0.3,2],
"max_depth": [1,10,0],
"n_estimators": [2,70,0],
"subsample": [0.7,1,2]}
else:
params = {"objective": ["tweedie","regression"],
"learning_rate": [0.1,0.3,2],
"max_depth": [1,10,0],
"n_estimators": [2,70,0],
"subsample": [0.7,1,2]}
elif estimator == 'svm':
params = {"kernel": ["linear", "rbf", "poly"] ,
"gamma": [0.1,10,1],
"C": [0.1,10,1],
"degree": [0,6,0]}
elif estimator == 'mlp':
params = {"optimizer": ["RMSprop", "adam", "sgd",'adamax','nadam','adadelta'] ,
"learning_rate": [0.1,0.3,2],
"neurons": [1,40,0],
"hiddenactivation": ['relu','sigmoid','tanh'],
"activation":['relu','sigmoid','tanh']}
return params
@staticmethod
def get_default_params(estimator, task):
"""Set the default parameters of the estimator.
This function assigns the default parameters for the user.
Each algorithm has a set of parameters. To allow the user to search for some parameters
instead of the supported parameters, this function is used to assign a default value for each parameter.
In addition, it gets other parameters for each algorithm. For e.g, it returns the number of epochs, batch_size, and loss for the mlp.
Inputs
estimator: string value
A string value that determines the estimator: 'mlp','xgboost','svm', or 'gbdt'
task: string value
A string value that determines the task under consideration: 'regression' or 'binary classification'
Returns
defaultparams: Dictionary
A dictionary that contains default parameters to be used.
"""
defaultparams= {}
if estimator == 'xgboost':
defaultparams.update({'learning_rate':0.01,'max_depth':6,'n_estimators':40,'subsample':0.99})
if task =='binary classification': # default activation
defaultparams.update({'objective':'binary:logistic','eval_metric':["aucpr","auc"]})
elif task =='regression':
defaultparams.update({'objective':'reg:tweedie','eval_metric':["rmse"]})
elif estimator == 'gbdt':
if task =='regression':
defaultparams['objective'] = 'tweedie'
eval_metric ='rmse'
elif task =='binary classification':
defaultparams['objective'] = 'binary'
eval_metric =['auc']
defaultparams.update({'learning_rate':0.01,'max_depth':6,'n_estimators':40,'subsample':0.99,
'boosting_type':'gbdt','eval_metric':eval_metric})
elif estimator == 'mlp':
#steps_per_epoch=4000 // batch_size
defaultparams.update({'batch_size':12,'epochs':50,'shuffle':True,
'neurons':13,'hiddenactivation':'sigmoid',
'activation':'sigmoid','learning_rate':0.01,
'mode':'auto'})#batchsize, epochs, and shuffling default values.
if task =='binary classification': # set the optimizer based on the task
defaultparams.update({'optimizer':'adam','metrics':['binary_accuracy'],'loss':'binary_crossentropy'})
elif task=='regression':
defaultparams.update({'optimizer':'RMSprop','metrics':['mse'],'loss':'mse'})
elif estimator == 'svm':
defaultparams.update({'kernel':'rbf','C':5,'gamma':5})
return defaultparams
@staticmethod
def read_parameters(params=None,estimator=None, task=None):
"""Read the parameters provided by the user.
Inputs
params: dictionary of key,values added by the user
This dictionary determines the parameters and ranges of parameters the user wants to selection values from.
estimator: string value
A string value that determines the estimator: 'mlp','xgboost','svm', or 'gbdt'
task: string value
A string value that determines the task under consideration: 'regression' or 'binary classification'
Returns
parameters
The parameters selected by the user
defaultparams
Default parameters
x_min: list
The lower bounds of the parameters search space
x_max: list
The upper bounds of the parameters search space
rounding: list
The rounding value in each dimension of the search space
bounds: dict
A dictionary of the lower and upper bounds
dimensions: integer
Dimensions of the search space
params: Dict
Dict given by the author
"""
if params == None:
params=pspso.get_default_search_space(estimator,task)
x_min,x_max,rounding,parameters=[],[],[],[]
for key in params:
if all(isinstance(item, str) for item in params[key]):
of=params[key]
x_min.append(0)
x_max.append(len(of)-1)
parameters.append(key)
rounding.append(0)
else:
thelist=params[key]
x_min.append(thelist[0])
x_max.append(thelist[1])
parameters.append(key)
rounding.append(thelist[2])
bounds = (np.asarray(x_min), np.asarray(x_max))
dimensions=len(x_min)
defaultparams=pspso.get_default_params(estimator, task)
return parameters,defaultparams, x_min,x_max,rounding,bounds, dimensions,params
@staticmethod
def decode_parameters(particle):
"""Decodes the parameters of a list into a meaningful set of parameters.
To decode a particle, we need the following global variables:parameters,
defaultparameters, paramdetails, and rounding.
"""
decodeddict={}
# read each value in the particle
for d in range(0,len(particle)):
#get the key of the particle
key=pspso.parameters[d]# expected to save parameter name, like objective, neurons, learning_rate, etc.
#get the value of the particle
particlevalueatd=particle[d]
# if it is a numerical variable, i want to round it
# if it is a categorical variable, i want to check its meaning
# to do that, i can check the first value in the list
if all(isinstance(item, str) for item in pspso.paramdetails[key]):
#this means all the values are string, round the particlevalueatd and get the value
index=int(round(particlevalueatd))
decodeddict[key] = pspso.paramdetails[key][index]
else:
#get the rounding for the parameter
decodeddict[key] =round(particlevalueatd,pspso.rounding[pspso.parameters.index(key)])
if pspso.rounding[pspso.parameters.index(key)] == 0:
decodeddict[key]=int(decodeddict[key])#neurons, max_depth, estimators should be integers
return decodeddict
@staticmethod
def forward_prop_gbdt(particle,task,score,X_train,Y_train,X_val,Y_val):
#print(pspso.decode_parameters(particle))
"""Train the GBDT after decoding the parameters in variable particle.
The particle is decoded into parameters of the gbdt. Then, The gbdt is trained and the score is sent back to the fitness function.
Inputs
particle: list of values (n dimensions)
A particle in the swarm
task: regression, binary classification
the task to be conducted
score: rmse (regression), auc (binary classification), acc (binary classification)
the type of evaluation
X_train: numpy.ndarray of shape (m, n)
Training dataset
Y_train: numpy.ndarray of shape (m,1)
Training target
X_val: numpy.ndarray of shape (x, y)
Validation dataset
Y_val: numpy.ndarray of shape (x,1)
Validation target
Returns
variable, model
the score of the trained algorithm over the validation dataset, trained model
"""
model=None
eval_set = [(X_val, np.squeeze(Y_val))]#eval set is the same in regression and classification
try:
decodedparams = pspso.decode_parameters(particle)
modelparameters = {**pspso.defaultparams,**decodedparams}
eval_metric=modelparameters['eval_metric']
del modelparameters['eval_metric']
if task !='binary classification':
model = lgb.LGBMRegressor(**modelparameters)
else : # if it is a binary classification task, will use XGBClassifier, note the different decoder since we have objective as fixed this time.
model = lgb.LGBMClassifier(**modelparameters)
model.fit(X_train,np.squeeze(Y_train),
early_stopping_rounds=pspso.early_stopping,
eval_set=eval_set,
eval_metric=eval_metric,
verbose=pspso.verbose )
return pspso.predict(model,'gbdt',task, score,X_val,np.squeeze(Y_val)),model
except Exception as e:
print('An exception occured in GBDT training.')
print(e)
return None,None
@staticmethod
def forward_prop_xgboost(particle,task,score,X_train,Y_train,X_val,Y_val):
"""Train the XGBoost after decoding the parameters in variable particle.
The particle is decoded into parameters of the XGBoost.
This function is similar to forward_prop_gbdt
The gbdt is trained and the score is sent back to the fitness function.
Inputs
particle: list of values (n dimensions)
A particle in the swarm
task: regression, binary classification
the task to be conducted
score: rmse (regression), auc (binary classification), acc (binary classification)
the type of evaluation
X_train: numpy.ndarray of shape (m, n)
Training dataset
Y_train: numpy.ndarray of shape (m,1)
Training target
X_val: numpy.ndarray of shape (x, y)
Validation dataset
Y_val: numpy.ndarray of shape (x,1)
Validation target
Returns
variable, model
the score of the trained algorithm over the validation dataset, trained model
"""
model=None
eval_set = [(X_val, Y_val)]#eval set is the same in regression and classification
try:
decodedparams = pspso.decode_parameters(particle)
modelparameters = {**pspso.defaultparams,**decodedparams}
if task !='binary classification':
model = xgb.XGBRegressor(**modelparameters)
else : # if it is a binary classification task, will use XGBClassifier, note the different decoder since we have objective as fixed this time.
model = xgb.XGBClassifier(**modelparameters)
model.fit(X_train,Y_train,early_stopping_rounds=pspso.early_stopping,eval_set=eval_set,verbose=pspso.verbose )
return pspso.predict(model,'xgboost',task, score,X_val,Y_val),model
except Exception as e:
print('An exception occured in XGBoost training.')
print(e)
return None,None
@staticmethod
def forward_prop_svm(particle,task,score,X_train,Y_train,X_val,Y_val):
"""Train the SVM after decoding the parameters in variable particle.
"""
try:
decodedparams = pspso.decode_parameters(particle)
modelparameters = { **pspso.defaultparams,**decodedparams}
if task == 'regression': # if it is a regression task, use SVR
if modelparameters['kernel']!='poly': # the fourth parameter is only usable with kernel being polynomial : 'poly'
model = SVR(kernel=modelparameters['kernel'], C=modelparameters['C'],gamma=modelparameters['gamma']).fit(X_train, np.squeeze(Y_train))
else:
model = SVR(kernel=modelparameters['kernel'], C=modelparameters['C'],gamma=modelparameters['gamma'],degree=modelparameters['degree']).fit(X_train, np.squeeze(Y_train))
elif task == 'binary classification': # if it is a binary classification task, use SVC
if modelparameters['kernel']!='poly':
model = SVC(kernel=modelparameters['kernel'], C=modelparameters['C'],gamma=modelparameters['gamma'],probability=True).fit(X_train, np.squeeze(Y_train))
else:
model = SVC(kernel=modelparameters['kernel'], C=modelparameters['C'],gamma=modelparameters['gamma'],degree=modelparameters['degree'],probability=True).fit(X_train, np.squeeze(Y_train))
return pspso.predict(model,'svm',task, score,X_val,Y_val),model
except Exception as e:
print(e)
print('An exception occured in SVM training.')
return None,None
@staticmethod
def forward_prop_mlp(particle,task,score,X_train,Y_train,X_val,Y_val):
"""Train the MLP after the decoding the parameters in variable particle.
"""
try:
decodedparams = pspso.decode_parameters(particle)
modelparameters = {**pspso.defaultparams,**decodedparams}
model=Sequential()
model.add(Dense(int(modelparameters['neurons']), input_dim=X_train.shape[1], activation=modelparameters['hiddenactivation']))#particle,task='regression',score='rmse',X_train,Y_train,X_val,Y_val
model.add(Dense(1, activation=modelparameters['activation']))#kernel_initializer='lecun_uniform',bias_initializer='zeros'
model.compile(loss=modelparameters['loss'], optimizer=modelparameters['optimizer'], metrics=modelparameters['metrics'])
model.optimizer.learning_rate=modelparameters['learning_rate']
#checkpoint=ModelCheckpoint('mlp.h5',monitor='val_loss',verbose=pspso.verbose,save_best_only=True,mode=mode)
es = EarlyStopping(monitor='val_loss', mode=modelparameters['mode'], verbose=pspso.verbose,patience=pspso.early_stopping)
#callbacks_list=[checkpoint,es]
callbacks_list=[es]
history=model.fit(X_train,
Y_train,
batch_size=modelparameters['batch_size'],
epochs=modelparameters['epochs'],
shuffle=modelparameters['shuffle'],
validation_data=(X_val,Y_val),
callbacks=callbacks_list,
verbose=pspso.verbose)
#model.load_weights('mlp.h5')
#model.compile(loss=loss, optimizer=modelparameters['optimizer'], metrics=metrics)
return pspso.predict(model,'mlp',task, score,X_val,Y_val),model,history
except Exception as e:
print("An exception occured in MLP training.")
print(e)
return None,None
@staticmethod
def f(q,estimator,task,score,X_train,Y_train,X_val,Y_val):
"""Higher-level method to do forward_prop in the
whole swarm.
Inputs
x: numpy.ndarray of shape (n_particles, dimensions)
The swarm that will perform the search
Returns
numpy.ndarray of shape (n_particles, )
The computed loss for each particle
"""
n_particles = q.shape[0]
if estimator=='xgboost':
e = [pspso.forward_prop_xgboost(q[i],task,score,X_train,Y_train,X_val,Y_val) for i in range(n_particles)]# forward_prop varies based on each classifier
j=[e[i][0] for i in range(n_particles)]
elif estimator == 'gbdt':
e = [pspso.forward_prop_gbdt(q[i],task,score,X_train,Y_train,X_val,Y_val) for i in range(n_particles)]
j=[e[i][0] for i in range(n_particles)]
elif estimator == 'svm':
e = [pspso.forward_prop_svm(q[i],task,score,X_train,Y_train,X_val,Y_val) for i in range(n_particles)]
j=[e[i][0] for i in range(n_particles)]
elif estimator == 'mlp': # as mentioned in paper, the mlp should be treated differently
e = [pspso.forward_prop_mlp(q[i],task,score,X_train,Y_train,X_val,Y_val) for i in range(n_particles)]
j=[e[i][0] for i in range(n_particles)]
if pspso.best_particle_position_ann is not None:
#if a position has been already assigned to this global variable, then it implies that this is not the first iteration.
#if the same particle is found as the global best, check if it has better solution and swap, otherwise, keep the previous one and update the particle best
for i in range(n_particles):
# if it is the same particle as the global best
if pspso.decode_parameters(q[i]) == pspso.decode_parameters(pspso.best_particle_position_ann):
#if error is higher than global best error
if j[i]>pspso.best_paricle_cost_ann:#same parameters, lower accuracy because of initial weights
j[i]=pspso.best_paricle_cost_ann #assign the global best accuracy
#check if there is a better solution in the swarm.
if min(j) <=pspso.best_paricle_cost_ann:
min_loss_index= j.index(min(j)) # get the index of the minimum value
pspso.best_paricle_cost_ann=min(j)
pspso.best_model_ann=e[min_loss_index][1] #get the best model at index 1
pspso.best_history_ann = e[min_loss_index][2]# get the history which is at index 2
pspso.best_particle_position_ann=q[min_loss_index]#get the best particle position from the list of particles
else:
# this case is for the first iteration where no position,cost,model, or history are assigned yet
min_loss_index= j.index(min(j)) # get the index of the minimum value
pspso.best_paricle_cost_ann=min(j) # set the best cost to best_particle_cost_ann
pspso.best_model_ann=e[min_loss_index][1] #set the best model to best_model_ann
pspso.best_history_ann=e[min_loss_index][2] # set the best history in best_history_ann
pspso.best_particle_position_ann=q[min_loss_index]#set the best position to best_particle_position_ann
#return the score of each particle in the population.
return np.array(j)
@staticmethod
def rebuildmodel(estimator,pos,task,score,X_train,Y_train,X_val,Y_val):
"""Used to rebuild the model after selecting the parameters.
"""
if estimator=='xgboost':
met,model=pspso.forward_prop_xgboost(pos,task,score,X_train,Y_train,X_val,Y_val)
elif estimator == 'gbdt':
met,model=pspso.forward_prop_gbdt(pos,task,score,X_train,Y_train,X_val,Y_val)
elif estimator == 'svm':
met,model=pspso.forward_prop_svm(pos,task,score,X_train,Y_train,X_val,Y_val)
elif estimator == 'mlp' :# again, if the case is mlp,1dcnn, or 2dcnn we will load the best solution found in global variables of the class
return pspso.best_paricle_cost_ann,pspso.best_model_ann
return met,model
def fitpspso(self, X_train=None, Y_train=None, X_val=None,Y_val=None,psotype='global',number_of_particles=5, number_of_iterations=10, options = {'c1': 1.49618, 'c2': 1.49618, 'w': 0.7298}):
"""Select the algorithm parameters based on PSO.
Inputs
X_train: numpy.ndarray of shape (a,b)
Contains the training input features, a is the number of samples, b is the number of features
Y_train: numpy.ndarray of shape (a,1)
Contains the training target, a is the number of samples
X_train: numpy.ndarray of shape (c,b)
Contains the validation input features, c is the number of samples, b is the number of features
Y_train: numpy.ndarray of shape (c,1)
Contains the training target, c is the number of samples
number_of_particles: integer
number of particles in the PSO search space.
number_of_iterations: integer
number of iterations.
options: dictionary
A key,value dict of PSO parameters c1,c2, and w
Returns
pos: list
The encoded parameters of the best solution
cost: float
The score of the best solution
duration: float
The time taken to conduct random search.
model:
The best model generated via random search
combinations: list of lists
The combinations examined during random search
results: list
The score of each combination in combinations list
"""
print("Running PSO Search .....")
self.selectiontype= "PSO" # selection type
self.number_of_particles=number_of_particles # the number of particles of the PSO
self.number_of_iterations=number_of_iterations # the number of iterations in the pso
self.psotype=psotype
self.options=options # parameters of the PSO
self.number_of_attempts=self.number_of_iterations *self.number_of_particles # max number of attempts to find a solution
self.totalnbofcombinations= len(self.calculatecombinations())
pspso.best_paricle_cost_ann =None
pspso.best_model_ann=None
pspso.best_history_ann=None
pspso.best_particle_position_ann=None
kwargs = {"estimator":self.estimator, "task":self.task, "score":self.score, "X_train" : X_train, "Y_train" : Y_train,
"X_val" : X_val,"Y_val":Y_val}
if psotype =='global':
self.optimizer = ps.single.GlobalBestPSO(n_particles=self.number_of_particles, dimensions=self.dimensions, options=self.options,bounds=self.bounds)
elif psotype =='local':
self.optimizer = ps.single.LocalBestPSO(n_particles=self.number_of_particles, dimensions=self.dimensions, options=self.options,bounds=self.bounds)
start=time.time()
#Perform optimization by using the optimize class
self.cost, self.pos = self.optimizer.optimize(pspso.f, iters=self.number_of_iterations,**kwargs)
end=time.time()
self.duration=end-start
self.met,self.model=pspso.rebuildmodel(self.estimator,self.pos,self.task,self.score,X_train,Y_train,X_val,Y_val)
if self.estimator =='mlp' :# if the estimator is mlp, assign history variable
self.history=pspso.best_history_ann
self.miniopt=self.save_optimizer_details()
return self.pos,self.cost,self.duration,self.model,self.optimizer
def fitpsgrid(self,X_train=None, Y_train=None, X_val=None,Y_val=None ):
""" Select the algorithm parameters based on Grid search.
Grid search was implemented to match the training process with pspso and for comparison purposes.
I have to traverse each value between x_min, x_max. Create a list seperating rounding value.
"""
print("Running Grid Search .....")
self.selectiontype= "Grid"
self.results=[]
self.model=None
self.pos=None
self.cost=None
self.combinations=self.calculatecombinations()
self.totalnbofcombinations=len(self.combinations)
self.number_of_attempts=self.totalnbofcombinations
start=time.time()
for comb in self.combinations:#for each value, run the function associated with the estimator
#run the combination
if self.estimator=='xgboost':
met,mo=pspso.forward_prop_xgboost(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
elif self.estimator == 'gbdt':
met,mo=pspso.forward_prop_gbdt(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
elif self.estimator == 'svm':
met,mo=pspso.forward_prop_svm(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
elif self.estimator == 'mlp' :
met,mo,hist=pspso.forward_prop_mlp(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
self.results.append(met)#record results
if self.cost == None: #starting
self.cost=met
self.pos=comb
self.model= mo
else:
if met <self.cost: #everything is treated as a minimization problem
self.cost=met
self.pos=comb
self.model=mo
if self.estimator =='mlp':
self.history =hist
end=time.time()
self.duration=end-start
return self.pos,self.cost,self.duration,self.model,self.combinations,self.results #return pos, cost, duration, model, combinations, results
def fitpsrandom(self,X_train=None, Y_train=None, X_val=None,Y_val=None,number_of_attempts=20 ):
"""Select the algorithm parameters based on radnom search.
With Random search, the process is done for number of times specified by a parameter in the function.
Inputs
X_train: numpy.ndarray of shape (a,b)
Contains the training input features, a is the number of samples, b is the number of features
Y_train: numpy.ndarray of shape (a,1)
Contains the training target, a is the number of samples
X_train: numpy.ndarray of shape (c,b)
Contains the validation input features, c is the number of samples, b is the number of features
Y_train: numpy.ndarray of shape (c,1)
Contains the training target, c is the number of samples
number_of_attempts: integer
The number of times random search to be tried.
Returns
pos: list
The encoded parameters of the best solution
cost: float
The score of the best solution
duration: float
The time taken to conduct random search.
model:
The best model generated via random search
combinations: list of lists
The combinations examined during random search
results: list
The score of each combination in combinations list
"""
print("Running Random Search .....")
self.number_of_attempts=number_of_attempts
self.selectiontype= "Random"
self.combinations=self.calculatecombinations()
self.totalnbofcombinations=len(self.combinations)#check the number of combinations we have
start=time.time()
self.results=[]
self.model=None
self.pos=None
self.cost=None
for z in list(range(0,number_of_attempts)):
#generate a random number between zero and totalnbofcombinations-1
a=random.randint(0,self.totalnbofcombinations-1)
comb=self.combinations[a]
#run the combination
if self.estimator=='xgboost':
met,mo=pspso.forward_prop_xgboost(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
elif self.estimator == 'gbdt':
met,mo=pspso.forward_prop_gbdt(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
elif self.estimator == 'svm':
met,mo=pspso.forward_prop_svm(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
elif self.estimator == 'mlp' :
met,mo,hist=pspso.forward_prop_mlp(comb,self.task,self.score,X_train,Y_train,X_val,Y_val)
self.results.append(met)
if self.cost == None: #starting
self.cost=met
self.pos=comb
self.model= mo
else:
if met <self.cost: #everything is treated as a minimization problem
self.cost=met
self.pos=comb
self.model=mo
if self.estimator =='mlp':
self.history =hist
end=time.time()
self.duration=end-start
return self.pos,self.cost,self.duration,self.model,self.combinations,self.results
def print_results(self):
"""Print the results found in the pspso instance. Expected to print general details
like estimator, task, selection type, number of attempts examined, total number of
combinations, position of the best solution, score of the best solution, parameters,
details about the pso algorithm.
"""
print("Estimator: " + self.estimator)
print("Task: "+ self.task)
print("Selection type: "+ str(self.selectiontype))
print("Number of attempts:" + str(self.number_of_attempts))
print("Total number of combinations: " + str(self.totalnbofcombinations))
print("Parameters:")
print(pspso.decode_parameters(self.pos))
print("Global best position: " + str(self.pos))
print("Global best cost: " +str(round(self.cost,4)))
print("Time taken to find the set of parameters: "+ str(self.duration))
if self.selectiontype == "PSO":
print("Number of particles: " +str(self.number_of_particles))
print("Number of iterations: "+ str(self.number_of_iterations))
def calculatecombinations(self):
"""A function that will generate all the possible combinations in the search space.
Used mainly with grid search
Returns
combinations: list
A list that contains all the possible combinations.
"""
index=0
thedict={}
#I have to traverse each value between x_min, x_max. Create a list seperating rounding value.
for i,j in zip(self.x_min,self.x_max):
a=np.arange(i, j+0.000001, 10**(-1*self.rounding[index]))
a=
|
np.round(a,self.rounding[index])
|
numpy.round
|
# Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import copy
import inspect
import numpy as np
from pandapipes.idx_branch import FROM_NODE, TO_NODE, FROM_NODE_T, TO_NODE_T, VINIT, branch_cols, \
ACTIVE as ACTIVE_BR
from pandapipes.idx_node import NODE_TYPE, P, PINIT, NODE_TYPE_T, T, node_cols, \
ACTIVE as ACTIVE_ND, TABLE_IDX as TABLE_IDX_ND, ELEMENT_IDX as ELEMENT_IDX_ND
from pandapipes.properties.fluids import get_fluid
from scipy.sparse import coo_matrix, csgraph
try:
import pplog as logging
except ImportError:
import logging
logger = logging.getLogger(__name__)
default_options = {"friction_model": "nikuradse", "converged": False, "tol_p": 1e-4, "tol_v": 1e-4,
"tol_T": 1e-3, "tol_res": 1e-3, "iter": 10, "error_flag": False, "alpha": 1,
"nonlinear_method": "constant", "p_scale": 1, "mode": "hydraulics",
"ambient_temperature": 293, "check_connectivity": True,
"only_update_hydraulic_matrix": False,
"reuse_internal_data": False,
"quit_on_inconsistency_connectivity": False}
def get_net_option(net, option_name):
"""
Returns the requested option of the given net. Raises a UserWarning if the option was not found.
:param net: pandapipesNet for which option is requested
:type net: pandapipesNet
:param option_name: Name of requested option
:type option_name: str
:return: option - The value of the option
"""
try:
return net["_options"][option_name]
except KeyError:
raise UserWarning("The option %s is not stored in the pandapipes net." % option_name)
def get_net_options(net, *option_names):
"""
Returns several requested options of the given net. Raises a UserWarning if any of the options
was not found.
:param net: pandapipesNet for which option is requested
:type net: pandapipesNet
:param option_names: Names of requested options (as args)
:type option_names: str
:return: option - Tuple with values of the options
"""
return (get_net_option(net, option) for option in list(option_names))
def set_net_option(net, option_name, option_value):
"""
Auxiliary function to set the value of a specific option (options are saved in a dict).
:param net: pandapipesNet for which option shall be set
:type net: pandapipesNet
:param option_name: Name under which the option shall be saved
:type option_name: str
:param option_value: Value that shall be set for the given option
:return: No output
"""
net["_options"][option_name] = option_value
def warn_high_index(element_name, element_length, max_element_index):
if (element_length > 100 and max_element_index > 1000 * element_length) \
or (element_length <= 100 and max_element_index > 50000):
logger.warning("High index in %s table!!!" % element_name)
def add_table_lookup(table_lookup, table_name, table_number):
"""
Auxiliary function to add a lookup between table name in the pandapipes net and table number in
the internal structure (pit).
:param table_lookup: The lookup dictionary from table names to internal number (n2t) and vice \
versa (t2n)
:type table_lookup: dict
:param table_name: Name of the table that shall be mapped to number
:type table_name: str
:param table_number: Number under which the table is saved in the pit
:type table_number: int
:return: No output
"""
table_lookup["n2t"][table_number] = table_name
table_lookup["t2n"][table_name] = table_number
def get_table_number(table_lookup, table_name):
"""
Auxiliary function to retrieve the internal pit number for a given pandapipes net table name from
the table lookup.
:param table_lookup: The lookup dictionary from table names to internal number (n2t) and vice \
versa (t2n)
:type table_lookup: dict
:param table_name: Name of the table for which the internal number shall be retrieved
:type table_name: str
:return: table_number - Internal number of the given table name within the pit
:rtype: int
"""
if table_name not in table_lookup["t2n"]:
return None
return table_lookup["t2n"][table_name]
def get_table_name(table_lookup, table_number):
"""
Auxiliary function to retrieve the pandapipes net table name for a given internal pit number from
the table lookup.
:param table_lookup: The lookup dictionary from table names to internal number (n2t) and vice \
versa (t2n)
:type table_lookup: dict
:param table_number: Internal number of the table for which the name shall be retrieved
:type table_number: int
:return: table_name - pandapipes net table name for the internal pit number
:rtype: str
"""
if table_number not in table_lookup["n2t"]:
return None
return table_lookup["n2t"][table_number]
def get_lookup(net, pit_type="node", lookup_type="index"):
"""
Returns internal lookups which are mostly defined in the function `create_lookups`.
:param net: The pandapipes net for which the lookup is requested
:type net: pandapipesNet
:param pit_type: Identifier which of the two pits ("branch" or "node") the lookup belongs to
:type pit_type: str
:param lookup_type: Name of the lookup type
:type lookup_type: str
:return: lookup - A lookup (mostly a dict with mappings from pandapipesNet to internal
structure)
:rtype: dict, np.array, ....
"""
pit_type = pit_type.lower()
lookup_type = lookup_type.lower()
all_lookup_types = ["index", "table", "from_to", "active", "length", "from_to_active",
"index_active"]
if lookup_type not in all_lookup_types:
type_names = "', '".join(all_lookup_types)
logger.error("No lookup type '%s' exists. Please choose one of '%s'."
% (lookup_type, type_names))
return None
if pit_type not in ["node", "branch"]:
logger.error("No pit type '%s' exists. Please choose one of 'node' and 'branch'."
% pit_type)
return None
return net["_lookups"]["%s_%s" % (pit_type, lookup_type)]
def set_user_pf_options(net, reset=False, **kwargs):
"""
This function sets the "user_pf_options" dictionary for net. These options overrule
net._internal_options once they are added to net. These options are used in configuration of
load flow calculation.
At the same time, user-defined arguments for `pandapipes.pipeflow()` always have a higher
priority. To remove user_pf_options, set "reset = True" and provide no additional arguments.
:param net: pandapipes network for which to create user options
:type net: pandapipesNet
:param reset: Specifies whether the user_pf_options is removed before setting new options
:type reset: bool, default False
:param kwargs: pipeflow options that shall be set, e. g. tol_v = 1e-7
:return: No output
"""
if reset or 'user_pf_options' not in net.keys():
net['user_pf_options'] = dict()
additional_kwargs = set(kwargs.keys()) - set(default_options.keys()) - {"fluid", "hyd_flag"}
if len(additional_kwargs) > 0:
logger.info('parameters %s are not in the list of standard options'
% list(additional_kwargs))
net.user_pf_options.update(kwargs)
def init_options(net, local_parameters):
"""
Initializes physical and mathematical constants included in pandapipes. In addition, options
for the nonlinear and time-dependent solver are also set.
Those are the options that can be set and their default values:
- **iter** (int): 10 - If the simulation is terminated after a certain amount of iterations,\
this is the number of iterations.
- **tol_p** (float): 1e-4 - The relative tolerance for the pressure. A result is accepted \
if the relative error is smaller than this factor.
- **tol_v** (float): 1e-4 - The relative tolerance for the velocity. A result is accepted \
if the relative error is smaller than this factor.
- **tol_T** (float): 1e-4 - The relative tolerance for the temperature. A result is \
accepted if the relative error is smaller than this factor.
- **tol_res** (float): 1e-3 - The relative tolerance for the residual. A result is accepted\
if the relative error is smaller than this factor.
- **ambient_temperature** (float): 293.0 - The assumed ambient temperature for the\
calculation of the barometric formula
- **friction_model** (str): "nikuradse" - The friction model that shall be used to identify\
the value for lambda (can be "nikuradse" or "colebrook")
- **alpha** (float): 1 - The step width for the Newton iterations. If the Newton steps \
shall be damped, **alpha** can be reduced. See also the **nonlinear_method** \
parameter.
- **nonlinear_method** (str): "constant" - The option of how the damping factor **alpha** \
is determined in each iteration. It can be "constant" (i.e. **alpha** is always the\
same in each iteration) or "automatic", in which case **alpha** is adapted \
automatically with respect to the convergence behaviour.
- **gas_impl** (str): "pandapipes" - Implementation of the gas model. It can be set to\
"pandapipes" with calculations according to "Handbuch der Gasversorgungstechnik"\
or to "STANET" with calculations according to the STANET reference.
- **heat_transfer** (bool): False - Flag to determine if the heat transfer shall be\
calculated.
- **only_update_hydraulic_matrix** (bool): False - If True, the system matrix is not \
created in every iteration, but only the data is updated according to a lookup that\
is identified in the first iteration. This speeds up calculation, but has not yet\
been tested extensively.
- **check_connectivity** (bool): True - If True, a connectivity check is performed at the\
beginning of the pipeflow and parts of the net that are not connected to external\
grids are set inactive.
- **quit_on_inconsistency_connectivity** (bool): False - If True, inconsistencies in the\
connectivity check raise an error, otherwise they are handled. Inconsistencies mean\
that out of service nodes are connected to in service branches. If that is the case\
and the flag is set to False, the connected nodes are activated.
:param net: The pandapipesNet for which the options are initialized
:type net: pandapipesNet
:param local_parameters: Dictionary with local parameters that were passed to the pipeflow call.
:type local_parameters: dict
:return: No output
:Example:
>>> init_options(net)
"""
from pandapipes.pipeflow import pipeflow
# the base layer of the options consists of the default options
net["_options"] = copy.deepcopy(default_options)
excluded_params = {"net", "interactive_plotting", "t_start", "sol_vec", "kwargs"}
# the base layer is overwritten and extended by options given by the default parameters of the
# pipeflow function definition
args_pf = inspect.getfullargspec(pipeflow)
pf_func_options = dict(zip(args_pf.args[-len(args_pf.defaults):], args_pf.defaults))
pf_func_options = {k: pf_func_options[k] for k in set(pf_func_options.keys()) - excluded_params}
net["_options"].update(pf_func_options)
# the third layer is the user defined pipeflow options
if "user_pf_options" in net and len(net.user_pf_options) > 0:
net["_options"].update(net.user_pf_options)
# the last layer is the layer of passeed parameters by the user, it is defined as the local
# existing parameters during the pipeflow call which diverges from the default parameters of the
# function definition in the second layer
params = dict()
for k, v in local_parameters.items():
if k in excluded_params or (k in pf_func_options and pf_func_options[k] == v):
continue
params[k] = v
params.update(local_parameters["kwargs"])
net["_options"].update(params)
net["_options"]["fluid"] = get_fluid(net).name
if not net["_options"]["only_update_hydraulic_matrix"]:
net["_options"]["reuse_internal_data"] = False
def create_internal_results(net):
"""
Initializes a dictionary that shall contain some internal results later.
:param net: pandapipes net to which internal result dict will be added
:type net: pandapipesNet
:return: No output
"""
net["_internal_results"] = dict()
def write_internal_results(net, **kwargs):
"""
Adds specified values to the internal result dictionary of the given pandapipes net. If internal
results are not yet defined for the net, they are created as well.
:param net: pandapipes net for which to update internal result dict
:type net: pandapipesNet
:param kwargs: Additional keyword arguments with the internal result values
:return: No output
"""
if "_internal_results" not in net:
create_internal_results(net)
net["_internal_results"].update(kwargs)
def initialize_pit(net, node_name, NodeComponent, NodeElementComponent, BranchComponent,
BranchWInternalsComponent):
"""
Initializes and fills the internal structure which is called pit (pandapipes internal tables).
The structure is a dictionary which should contain one array for all nodes and one array for all
branches of the net (c.f. also `create_empty_pit`).
:param net: The pandapipes network for which to create and fill the internal structure
:type net: pandapipesNet
:return: (node_pit, branch_pit) - The two internal structure arrays
:rtype: tuple(np.array)
"""
pit = create_empty_pit(net)
for comp in net['component_list']:
if issubclass(comp, NodeComponent) | \
issubclass(comp, BranchWInternalsComponent) | \
issubclass(comp, NodeElementComponent):
comp.create_pit_node_entries(net, pit["node"], node_name)
if issubclass(comp, BranchComponent):
comp.create_pit_branch_entries(net, pit["branch"], node_name)
return pit["node"], pit["branch"]
def create_empty_pit(net):
"""
Creates an empty internal structure which is called pit (pandapipes internal tables). The\
structure is a dictionary which should contain one array for all nodes and one array for all\
branches of the net. It is very often referred to within the pipeflow. So the structure in\
general looks like this:
>>> net["_pit"] = {"node": np.array((no_nodes, col_nodes), dtype=np.float64), "branch": np.array((no_branches, col_branches), dtype=np.float64)}
:param net: The pandapipes net to which to add the empty structure
:type net: pandapipesNet
:return: pit - The dict of arrays with the internal node / branch structure
:rtype: dict
"""
node_length = get_lookup(net, "node", "length")
branch_length = get_lookup(net, "branch", "length")
# init empty pit
pit = {"node": np.empty((node_length, node_cols), dtype=np.float64),
"branch": np.empty((branch_length, branch_cols), dtype=np.float64)}
net["_pit"] = pit
return pit
def extract_all_results(net, node_name):
"""
Extract results from branch pit and node pit and write them to the different tables of the net,
as defined by the component models.
:param net: pandapipes net for which to extract results into net.res_xy
:type net: pandapipesNet
:return: No output
"""
for comp in net['component_list']:
comp.extract_results(net, net["_options"], node_name)
def create_lookups(net, NodeComponent, BranchComponent, BranchWInternalsComponent):
"""
Create all lookups necessary for the pipeflow of the given net.
The lookups are usually:
- node_from_to: The start and end indices of all node component tables within the pit
- branch_from_to: The start and end indices of all branch component tables within the pit
- node_table: Dictionary to determine indices for node component tables (e.g. {"junction": 0}).\
Can be arbitrary and strongly depends on the component order given by\
`get_component_list`.
- branch_table: Dictionary to determine indices for branch component tables (e.g.\
{"pipe": 0, "valve": 1}). Can be arbitrary and strongly depends on the\
component order given by `get_component_list`.
- node_index: Lookup from component index (e.g. junction 2) to pit index (e.g. 0) for nodes.
- branch_index: Lookup from component index (e.g. pipe 1) to pit index (e.g. 5) for branches.
- internal_nodes_lookup: Lookup for internal nodes of branch components that makes result\
extraction a lot easier.
:param net: The pandapipes network for which to create the lookups
:type net: pandapipesNet
:return: No output
"""
node_ft_lookups, node_idx_lookups, node_from, node_table_nr = dict(), dict(), 0, 0
branch_ft_lookups, branch_idx_lookups, branch_from, branch_table_nr = dict(), dict(), 0, 0
branch_table_lookups = {"t2n": dict(), "n2t": dict()}
node_table_lookups = {"t2n": dict(), "n2t": dict()}
internal_nodes_lookup = dict()
for comp in net['component_list']:
if issubclass(comp, BranchComponent):
branch_from, branch_table_nr = comp.create_branch_lookups(
net, branch_ft_lookups, branch_table_lookups, branch_idx_lookups, branch_table_nr,
branch_from)
if issubclass(comp, NodeComponent) | issubclass(comp, BranchWInternalsComponent):
node_from, node_table_nr = comp.create_node_lookups(
net, node_ft_lookups, node_table_lookups, node_idx_lookups, node_from,
node_table_nr, internal_nodes_lookup)
net["_lookups"] = {"node_from_to": node_ft_lookups, "branch_from_to": branch_ft_lookups,
"node_table": node_table_lookups, "branch_table": branch_table_lookups,
"node_index": node_idx_lookups, "branch_index": branch_idx_lookups,
"node_length": node_from, "branch_length": branch_from,
"internal_nodes_lookup": internal_nodes_lookup}
def check_connectivity(net, branch_pit, node_pit, check_heat):
"""
Perform a connectivity check which means that network nodes are identified that don't have any
connection to an external grid component. Quick overview over the steps of this function:
- Build a sparse matrix graph (scipy.sparse.csr_matrix) from all branches that are in_service\
(nodes of this graph are taken from FROM_NODE and TO_NODE column in pit).
- Add a node that represents all external grids and connect all nodes that are connected to\
external grids to that node.
- Perform a breadth first order search to identify all nodes that are reachable from the \
added external grid node.
- Create masks for exisiting nodes and branches to show if they are reachable from an \
external grid.
- Compare the reachable nodes with the initial in_service nodes.\n
- If nodes are reachable that were set out of service by the user, they are either set \
in_service or an error is raised. The behavior depends on the pipeflow option \
**quit_on_inconsistency_connectivity**.
- If nodes are not reachable that were set in_service by the user, they will be set out of\
service automatically (this is the desired functionality of the connectivity check).
:param net: The pandapipesNet for which to perform the check
:type net: pandapipesNet
:param branch_pit: Internal array with branch entries
:type branch_pit: np.array
:param node_pit: Internal array with node entries
:type node_pit: np.array
:param check_heat: Flag which determines whether to also check for connectivity to heat \
external grids
:type check_heat: bool
:return: (nodes_connected_hyd, branches_connected) - Lookups of np.arrays stating which of the
internal nodes and branches are reachable from any of the hyd_slacks (np mask).
:rtype: tuple(np.array)
"""
active_branch_lookup = branch_pit[:, ACTIVE_BR].astype(np.bool)
active_node_lookup = node_pit[:, ACTIVE_ND].astype(np.bool)
from_nodes = branch_pit[:, FROM_NODE].astype(np.int32)
to_nodes = branch_pit[:, TO_NODE].astype(np.int32)
hyd_slacks = np.where(node_pit[:, NODE_TYPE] == P & node_pit[:, ACTIVE_ND].astype(np.bool))[0]
nodes_connected, branches_connected = perform_connectivity_search(
net, node_pit, hyd_slacks, from_nodes, to_nodes, active_node_lookup, active_branch_lookup,
mode="hydraulics")
if not check_heat:
return nodes_connected, branches_connected
heat_slacks = np.where((node_pit[:, NODE_TYPE_T] == T) & nodes_connected)[0]
if len(heat_slacks) == len(hyd_slacks) and np.all(heat_slacks == hyd_slacks):
return nodes_connected, branches_connected
nodes_connected, branches_connected = perform_connectivity_search(
net, node_pit, heat_slacks, from_nodes, to_nodes, nodes_connected, branches_connected,
mode="heat transfer")
return nodes_connected, branches_connected
def perform_connectivity_search(net, node_pit, slack_nodes, from_nodes, to_nodes,
active_node_lookup, active_branch_lookup, mode="hydraulics"):
len_nodes = len(node_pit)
nobranch = np.sum(active_branch_lookup)
active_from_nodes = from_nodes[active_branch_lookup]
active_to_nodes = to_nodes[active_branch_lookup]
# we create a "virtual" node that is connected to all slack nodes and start the connectivity
# search at this node
fn_matrix = np.concatenate([active_from_nodes, slack_nodes])
tn_matrix = np.concatenate([active_to_nodes,
np.full(len(slack_nodes), len_nodes, dtype=np.int32)])
adj_matrix = coo_matrix((np.ones(nobranch + len(slack_nodes)), (fn_matrix, tn_matrix)),
shape=(len_nodes + 1, len_nodes + 1))
# check which nodes are reachable from the virtual heat slack node
reachable_nodes = csgraph.breadth_first_order(adj_matrix, len_nodes, False, False)
# throw out the virtual heat slack node
reachable_nodes = reachable_nodes[reachable_nodes != len_nodes]
nodes_connected = np.zeros(len(active_node_lookup), dtype=np.bool)
nodes_connected[reachable_nodes] = True
if not np.all(nodes_connected[active_from_nodes] == nodes_connected[active_to_nodes]):
raise ValueError(
"An error occured in the %s connectivity check. Please contact the pandapipes development" \
" team!" % mode)
branches_connected = active_branch_lookup & nodes_connected[from_nodes]
oos_nodes = np.where(~nodes_connected & active_node_lookup)[0]
is_nodes = np.where(nodes_connected & ~active_node_lookup)[0]
if len(oos_nodes) > 0:
msg = "\n".join("In table %s: %s" % (tbl, nds) for tbl, nds in
get_table_index_list(net, node_pit, oos_nodes))
logger.info("Setting the following nodes out of service for %s calculation in connectivity"
" check:\n%s" % (mode, msg))
if len(is_nodes) > 0:
node_type_message = "\n".join("In table %s: %s" % (tbl, nds) for tbl, nds in
get_table_index_list(net, node_pit, is_nodes))
if get_net_option(net, "quit_on_inconsistency_connectivity"):
raise UserWarning(
"The following nodes are connected to in_service branches in the %s calculation "
"although being out of service, which leads to an inconsistency in the connectivity"
" check!\n%s" % (mode, node_type_message))
logger.info("Setting the following nodes back in service for %s calculation in connectivity"
" check as they are connected to in_service branches:\n%s"
% (mode, node_type_message))
return nodes_connected, branches_connected
def get_table_index_list(net, pit_array, pit_indices, pit_type="node"):
"""
Auxiliary function to get a list of tables and the table indices that belong to a number of pit
indices.
:param net: pandapipes net for which the list is requested
:type net: pandapipesNet
:param pit_array: Internal structure from which to derive the tables and table indices
:type pit_array: np.array
:param pit_indices: Indices for which the table name and index list are requested
:type pit_indices: list, np.array, ....
:param pit_type: Type of the pit ("node" or "branch")
:type pit_type: str, default "node"
:return: List of table names and table indices belonging to the pit indices
"""
int_pit = pit_array[pit_indices, :]
tables = np.unique(int_pit[:, TABLE_IDX_ND])
table_lookup = get_lookup(net, pit_type, "table")
return [(get_table_name(table_lookup, tbl), list(int_pit[int_pit[:, TABLE_IDX_ND] == tbl,
ELEMENT_IDX_ND].astype(np.int32)))
for tbl in tables]
def reduce_pit(net, node_pit, branch_pit, nodes_connected, branches_connected):
"""
Create an internal ("active") pit with all nodes and branches that are actually in_service. This
is also done for different lookups (e.g. the from_to indices for this pit and the node index
lookup). A specialty that needs to be considered is that from_nodes and to_nodes change to new
indices.
:param net: The pandapipesNet for which the pit shall be reduced
:type net: pandapipesNet
:param node_pit: The internal structure node array
:type node_pit: np.array
:param branch_pit: The internal structure branch array
:type branch_pit: np.array
:param nodes_connected: A mask array stating which nodes are actually connected to the rest of\
the net
:type nodes_connected: np.array
:param branches_connected: A mask array stating which branches are actually connected to the \
rest of the net
:type branches_connected: np.array
:return: No output
"""
active_pit = dict()
els = dict()
reduced_node_lookup = None
if np.alltrue(nodes_connected):
net["_lookups"]["node_from_to_active"] = copy.deepcopy(get_lookup(net, "node", "from_to"))
net["_lookups"]["node_index_active"] = copy.deepcopy(get_lookup(net, "node", "index"))
active_pit["node"] = np.copy(node_pit)
else:
active_pit["node"] = np.copy(node_pit[nodes_connected, :])
reduced_node_lookup = np.cumsum(nodes_connected) - 1
node_idx_lookup = get_lookup(net, "node", "index")
net["_lookups"]["node_index_active"] = {
tbl: reduced_node_lookup[idx_lookup[idx_lookup != -1]]
for tbl, idx_lookup in node_idx_lookup.items()}
els["node"] = nodes_connected
if np.alltrue(branches_connected):
net["_lookups"]["branch_from_to_active"] = copy.deepcopy(get_lookup(net, "branch",
"from_to"))
active_pit["branch"] = np.copy(branch_pit)
net["_lookups"]["branch_index_active"] = copy.deepcopy(get_lookup(net, "branch", "index"))
else:
active_pit["branch"] = np.copy(branch_pit[branches_connected, :])
if reduced_node_lookup is not None:
active_pit["branch"][:, FROM_NODE] = reduced_node_lookup[
branch_pit[branches_connected, FROM_NODE].astype(np.int32)]
active_pit["branch"][:, TO_NODE] = reduced_node_lookup[
branch_pit[branches_connected, TO_NODE].astype(np.int32)]
branch_idx_lookup = get_lookup(net, "branch", "index")
if len(branch_idx_lookup):
reduced_branch_lookup = np.cumsum(branches_connected) - 1
net["_lookups"]["branch_index_active"] = {
tbl: reduced_branch_lookup[idx_lookup[idx_lookup != -1]]
for tbl, idx_lookup in branch_idx_lookup.items()}
else:
net["_lookups"]["branch_index_active"] = dict()
els["branch"] = branches_connected
net["_active_pit"] = active_pit
net["_lookups"]["node_active"] = nodes_connected
net["_lookups"]["branch_active"] = branches_connected
for el, connected_els in els.items():
ft_lookup = get_lookup(net, el, "from_to")
aux_lookup = {table: (ft[0], ft[1], np.sum(connected_els[ft[0]: ft[1]]))
for table, ft in ft_lookup.items() if ft is not None}
from_to_active_lookup = copy.deepcopy(ft_lookup)
count = 0
for table, (f_old, t_old, len_new) in sorted(aux_lookup.items(), key=lambda x: x[1][0]):
from_to_active_lookup[table] = (count, count + len_new)
count += len_new
net["_lookups"]["%s_from_to_active" % el] = from_to_active_lookup
def extract_results_active_pit(net, node_pit, branch_pit, nodes_connected, branches_connected):
"""
Extract the pipeflow results from the internal pit structure ("_active_pit") to the general pit
structure.
:param net: The pandapipes net that the internal structure belongs to
:type net: pandapipesNet
:param node_pit: The internal structure node array
:type node_pit: np.array
:param branch_pit: The internal structure branch array
:type branch_pit: np.array
:param nodes_connected: A mask array stating which nodes are actually connected to the rest of\
the net
:type nodes_connected: np.array
:param branches_connected: A mask array stating which branches are actually connected to the \
rest of the net
:type branches_connected: np.array
:return: No output
"""
if not np.alltrue(nodes_connected):
node_pit[~nodes_connected, PINIT] = np.NaN
node_pit[nodes_connected, :] = net["_active_pit"]["node"]
else:
net["_pit"]["node"] = np.copy(net["_active_pit"]["node"])
if not
|
np.alltrue(branches_connected)
|
numpy.alltrue
|
import os
import sys
import tqdm
import random
import operator
import tarfile
import zipfile
import logging
import argparse
import fontconfig
import numpy as np
from pathlib import Path
import six.moves.urllib as urllib
import skvideo.io
import skvideo.datasets
from distutils.version import StrictVersion
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image as PILImage
from PIL import ImageDraw, ImageFont
from fonts.otf import font_files
from IPython.display import display, Image
import tensorflow as tf
from object_detection.utils import ops as utils_ops
from utils import label_map_util
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
system_fonts = fontconfig.query(family='ubuntu', lang='en')
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def load_graph(model_path):
try:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_path.as_posix(), 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
logging.info('Done loading frozen graph from {}'.format(model_path))
return detection_graph
except Exception as err:
logging.error('Error loading frozen graph from {}'.format(model_path))
return None
def build_model(default_graph, session):
tensor_list = ['num_detections', 'detection_boxes', 'detection_scores',
'detection_classes', 'detection_masks']
tensor_dict = {}
with default_graph.as_default():
with session.as_default():
# Get handles to input and output tensors
ops = default_graph.get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
for key in tensor_list:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = default_graph.get_tensor_by_name(tensor_name)
image_tensor = default_graph.get_tensor_by_name('image_tensor:0')
return tensor_dict, image_tensor
def load_label_map(label_map_file):
map_loader = label_map_util.create_category_index_from_labelmap
return map_loader(label_map_file.as_posix(), use_display_name=True)
def fetch_images(source_images, ext='.jpg'):
if source_images.is_file():
with source_images.open() as pfile:
test_images = pfile.readlines()
test_images = [t.strip() for t in test_images]
elif source_images.is_dir():
test_images = [img for img in source_images.iterdir()
if img.suffix == ext]
else:
logger.error('Neither an image list'
' or a directory {}'.format(source_images))
return None
return test_images
def fetch_frames(vid_path):
try:
vidgen = skvideo.io.vreader(vid_path.as_posix())
return vidgen
except Exception as err:
logging.error('Error parsing video {}, {}'.format(vid_path, err))
return None
def run_inference(sess, output_dict, image_tensor, image):
with sess.as_default():
output_dict = sess.run(output_dict,
feed_dict={image_tensor: np.expand_dims(image,
0)})
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
return output_dict
def draw_detections(source_img, bboxes, scores, labels,
label_map, threshold=0.5):
source_img = source_img.convert("RGBA")
draw = ImageDraw.Draw(source_img)
width, height = source_img.size
for bbox, score, label in zip(bboxes, scores, labels):
if score < threshold:
continue
ymin = int(bbox[0] * height)
ymax = int(bbox[2] * height)
xmin = int(bbox[1] * width)
xmax = int(bbox[3] * width)
rect_width = int(min(32, 0.1 * (xmax - xmin)))
font_size = int(min(32, 0.5 * (xmax - xmin)))
draw.rectangle(((xmin, ymin), (xmax, ymax)),
fill=None, outline='red', width=rect_width)
object_string = '{} : {:.2f} %'.format(label_map[label]['name'], score)
draw.text((xmin, ymax), object_string,
font=ImageFont.truetype(system_fonts[0].file, font_size))
source_img = source_img.convert("RGB")
return source_img
def run_detection(video_path, images_path, model_path,
labels_file, destination, im_size, threshold):
is_video = video_path is not None
is_image = images_path is not None
assert operator.xor(is_video, is_image), \
"Señor! Either provide images or video but not both"
if video_path:
writer = skvideo.io.FFmpegWriter(destination.as_posix())
image_gen = fetch_frames(video_path)
if images_path:
image_gen = fetch_images(images_path)
graph = load_graph(model_path)
session = tf.Session(graph=graph)
output_tensors, input_tensor = build_model(graph, session)
labels_map = load_label_map(labels_file)
for image in tqdm.tqdm(image_gen):
if is_image:
pil_image = PILImage.open(image)
pil_image.thumbnail(im_size, PILImage.ANTIALIAS)
np_image = np.array(pil_image).astype(np.uint8)
if is_video:
pil_image = PILImage.fromarray(image)
pil_image.thumbnail(im_size, PILImage.ANTIALIAS)
np_image = np.array(pip_image).astype(np.uint8)
preds = run_inference(session, output_tensors,
input_tensor, np_image)
pil_image = draw_detections(pil_image, preds['detection_boxes'],
preds['detection_scores'],
preds['detection_classes'],
labels_map, threshold=threshold)
if is_video:
writer.writeFrame(
|
np.array(pil_image)
|
numpy.array
|
from __future__ import print_function, division
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import copy
import os
import collections
import string
import matplotlib
# Tommy's code
import jwst_targloc as jtl
# other code
import coords_transform as ct
import least_squares_iterate as lsi
# Header
__author__ = "<NAME>"
__version__ = "1.0"
"""
This script has Target Acquisition functions that are auxiliary to the TA functions in the jwst_targloc.py
script.
* Functions are ordered alphabetically.
** Functions specific to test if the averaging of centroids in pixel space, sky, or individually returns the best
results are located at the end of this file.
"""
# FUNCTIONS AUXILIARY TO JWST CENTROID ALGORITHM
def bg_correction(img, bg_method=None, bg_value=None, bg_frac=None, verbose=True, debug=False):
"""
Subtract a background value from every pixel in the image, based on
the background method (None, Fixed, or Fraction):
- If None, the image is used as-is.
- If Fixed, the given background level (bg_value) is the value
to be subtracted from each pixel in the image.
- If Fraction, the given background level is the fraction (e.g. if
bg_fraction = 0.5, the background is set to the median pixel value
in the image; if bg_fraction = 0.4, 40% of the pixels have data
values less than background, while 60% have data values larger than
background, and implicitly, the top 20% of the data values are
assumed to contain significant counts from astronomical sources,
cosmic rays, or hot pixels. See code).
Args:
img -- Image
bg_method -- Either None value or string: "fixed" or "frac"
bg_value -- Float, fixed value to subtract from each pixel (this has to
be set if bg_method = "fixed")
bg_frac -- Float, fractional value to subtract from image (this has to
be set if bg_method = "frac")
Returns:
img_bgcorr -- The group of 3 background subtracted images
Example usage:
>> img_bgcorr = bg_correction(master_img, bg_method='frac', bg_value=0.4)
"""
# Make sure to return the image as is if None is selected
if bg_method is None:
return img
elif "fix" in bg_method:
# Check that bg_value is defined
if bg_value is None:
print ("(bg_correction): ERROR - Background_method set to 'fixed': bg_value needs to be a float number, got None.")
exit()
master_img_bgcorr = img - bg_value
return master_img_bgcorr
elif "frac" in bg_method:
# Check that bg_value is defined
if bg_frac is None:
print ("(bg_correction): ERROR - Background_method set to 'fractional': bg_frac needs to be a float number, got None.")
exit()
# Find the pixel value (bg) that represents that fraction of the population
img_original = copy.deepcopy(img)
sorted_img = np.sort(np.ravel(img)) # flatten the image and sort it
xsize = np.shape(img)[1]
ysize = np.shape(img)[0]
idx_bg = int(np.floor(bg_frac * xsize * ysize))
# If at the edge, correct
if idx_bg == np.shape(sorted_img)[0]:
idx_bg -= 1
bg = sorted_img[idx_bg]
img_bgcorr = img_original - bg
# Debugging messages
if debug:
print("(bg_correction): xsize = {}, ysize= {}".format(xsize, ysize))
print("(bg_correction): sorted_img = {}".format(sorted_img))
print("(bg_correction): idx_bg = {}".format(idx_bg))
print("(bg_correction): bg = {}".format(bg))
return img_bgcorr
def centroid2fulldetector(cb_centroid_list, true_center, detector, perform_avgcorr=True):
"""
Transform centroid coordinates into full detector coordinates.
Args:
cb_centroid_list -- List, centroid window based centroid determined by TA algorithm in
terms of 32 by 32 pixels for centroid window sizes 3, 5, and 7
true_center -- List, actual (true) position of star in terms of full detector
detector -- integer, either 491 or 492
perform_avgcorr -- True or False, perform average Pierre's correction on measurements
Returns:
cb_centroid_list_fulldetector -- List of centroid locations determined with the TA algorithm in
terms of full detector. List is for positions determined with
3, 5, and 7 centroid window sizes.
loleftcoords -- List, Coordinates of the lower left corner of the 32x32 pixel box
true_center32x32 -- List, true center given in coordinates of 32x32 pix
differences_true_TA -- List, difference of true-observed positions
"""
# Get the lower left corner coordinates in terms of full detector. We subtract 16.0 because indexing
# from centroid function starts with 1
corrected_x = true_center[0]
corrected_y = true_center[1]
loleft_x = np.floor(corrected_x) - 16.0
loleft_y = np.floor(corrected_y) - 16.0
loleftcoords = [loleft_x, loleft_y]
# get center in terms of 32x32 cutout
true_center32x32 = [corrected_x-loleft_x, corrected_y-loleft_y]
# Add lower left corner to centroid location to get it in terms of full detector
cb_centroid_list_fulldetector = []
for centroid_location in cb_centroid_list:
centroid_fulldetector_x = centroid_location[0] + loleft_x
centroid_fulldetector_y = centroid_location[1] + loleft_y
centroid_fulldetector = [centroid_fulldetector_x, centroid_fulldetector_y]
cb_centroid_list_fulldetector.append(centroid_fulldetector)
corr_cb_centroid_list = cb_centroid_list_fulldetector
# Correct true centers for average value given by Pier
if perform_avgcorr:
corr_cb_centroid_list = do_Piers_correction(detector, corr_cb_centroid_list)
# Determine difference between center locations
differences_true_TA = []
d3_x = true_center[0] - corr_cb_centroid_list[0][0]
d3_y = true_center[1] - corr_cb_centroid_list[0][1]
d3 = [d3_x, d3_y]
if len(corr_cb_centroid_list) != 1: # make sure this function works even for one centroid window
d5_x = true_center[0] - corr_cb_centroid_list[1][0]
d5_y = true_center[1] - corr_cb_centroid_list[1][1]
d7_x = true_center[0] - corr_cb_centroid_list[2][0]
d7_y = true_center[1] - corr_cb_centroid_list[2][1]
d5 = [d5_x, d5_y]
d7 = [d7_x, d7_y]
diffs = [d3, d5, d7]
else:
diffs = d3
differences_true_TA.append(diffs)
return corr_cb_centroid_list, loleftcoords, true_center32x32, differences_true_TA
def compare2ref(case, bench_stars, benchV2, benchV3, stars, V2in, V3in):
"""
This function obtains the differences of the input arrays with the reference or benchmark data.
Args:
case -- string, for example 'Scene2_rapid_real_bgFrac'
bench_stars -- numpy array of the star numbers being used
benchV2 -- numpy array of the benchmark V2s
benchV3 -- numpy array of the benchmark V3s
stars -- list of the star numbers being studied
V2in -- numpy array of measured V2s
V3in -- numpy array of measured V3s
Returns:
4 lists: diffV2, diffV3, bench_V2_list, bench_V3_list
diffV2 = benchmark V2 - measured V2
diffV3 = benchmark V3 - measured V3
bench_V2_list = benchmark V2 converted in same units as input
bench_V3_list = benchmark V3 converted in same units as input
"""
# calculate the differences with respect to the benchmark data
if len(stars) == len(bench_stars): # for the fixed and None background case
diffV2 = benchV2 - V2in
diffV3 = benchV3 - V3in
bench_V2_list = benchV2.tolist()
bench_V3_list = benchV3.tolist()
else: # for the fractional background case
bench_V2_list, bench_V3_list = [], []
diffV2, diffV3 = [], []
for i, s in enumerate(stars):
if s in bench_stars:
j = bench_stars.tolist().index(s)
dsV2 = benchV2[j] - V2in[i]
dsV3 = benchV3[j] - V3in[i]
diffV2.append(dsV2)
diffV3.append(dsV3)
bench_V2_list.append(benchV2[j])
bench_V3_list.append(benchV3[j])
diffV2 = np.array(diffV2)
diffV3 = np.array(diffV3)
return diffV2, diffV3, bench_V2_list, bench_V3_list
def convert2MSAcenter(xin, yin, xtin, ytin, arcsec):
"""
This function is a python translation of Tonys IDL equivalent function. It converts the
measured coordinates of each star into the frame relative to the center of the MSA.
Args:
xin: numpy array of the measured V2s
yin: numpy array of the measured V2s
xtin: numpy array of true V2s
ytin: numpy array of true V3s
Returns:
4 numpy arrays: x, y, xt, yt - Corrected arrays
"""
# Center coordinates of NIRSpec V2, V3
x0_XAN = 376.769 # V2 in arcsec
y0_YAN = -428.453 # V3 in arcsec
# measured V2 V3 in degrees
if arcsec:
x0 = x0_XAN # conversion of V2 to XAN in degrees
y0_YANd = y0_YAN # intermediate conversion: V3 arcsec to V3 degrees=-0.119015
y0 = -y0_YANd -468.0 # convert V3 degrees to YAN=+0.249015
else:
x0 = x0_XAN/3600. # conversion of V2 to XAN in degrees
y0_YANd = y0_YAN/3600. # intermediate conversion: V3 arcsec to V3 degrees=-0.119015
y0 = -y0_YANd -0.13 # convert V3 degrees to YAN=+0.249015
# convert inputs to MSA center
x = xin - x0
y = yin - y0
xt = xtin - x0
yt = ytin - y0
return x, y, xt, yt
def display_centroids(detector, st, case, psf, corr_true_center_centroid,
corr_cb_centroid_list, show_disp, vlims=None, savefile=False,
fig_name=None, redos=False, display_master_img=False):
"""
This function displays de the centroids for the 32x32 pixel cutout images, showing
the true position as wel as the measured centroids.
Args:
detector -- integer, either 491 or 492
st -- integer, star number
case -- string, for example 'Scene2_rapid_real_bgFrac'
psf -- numpy array of shape (3, 32, 32) -- cutout of 3 ramped images
corr_true_center_centroid -- list of x and y true pixel positions
corr_cb_centroid_list -- list of 3 lists, x and y pixel positions for centroiding window 3, 5, and 7
show_disp -- True or False, show the 32x32 image with true center and measured positions
vlims -- tuple, example: (10.0, 50.0)
savefile -- True or False, save or not the image as a .jpg
fig_name -- string, name for the figure
redos -- True or False, use or not the directories with _redo
display_master_img -- True or False, show the initial image (before background subtraction)
Returns:
Nothing.
"""
if isinstance(st, int):
fig_title = "star_"+str(st)+"_"+case
else:
fig_title = st
if vlims is None:
vlims = (10, 50)
if display_master_img is not False:
# Display original image.
_, ax = plt.subplots(figsize=(8, 8))
ax.set_title(fig_title+"_original")
ax.autoscale(enable=False, axis='both')
ax.imshow(display_master_img, cmap='gray', interpolation='nearest')
ax.set_ylim(0.0, np.shape(display_master_img)[0])
ax.set_xlim(0.0, np.shape(display_master_img)[1])
ax.imshow(display_master_img, cmap='gray', interpolation='nearest', vmin=vlims[0], vmax=vlims[1])
# Add plot of measured centroids
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 12}
matplotlib.rc('font', **font)
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_title(fig_title)
ax.autoscale(enable=False, axis='both')
ax.imshow(psf, cmap='gray', interpolation='nearest')
ax.set_ylim(-1.0, np.shape(psf)[0])
ax.set_xlim(-1.0, np.shape(psf)[1])
ax.set_ylabel("Pixel y-position")
ax.set_xlabel("Pixel x-position")
# the -1.0 in all the measurements and true positions is to bring back numbers to python index
ax.plot(corr_cb_centroid_list[0][0]-1.0, corr_cb_centroid_list[0][1]-1.0, marker='^', ms=19, mec='cyan', mfc='blue', ls='', label='CentroidWin=3')
plt.vlines(15.0, 0.0, 31.5, colors='y', linestyles='dashed')
plt.hlines(15.0, 0.0, 31.5, colors='y', linestyles='dashed')
if len(corr_cb_centroid_list) != 1:
ax.plot(corr_cb_centroid_list[1][0]-1.0, corr_cb_centroid_list[1][1]-1.0, marker='o', ms=17, mec='black', mfc='green', ls='', label='CentroidWin=5')
ax.plot(corr_cb_centroid_list[2][0]-1.0, corr_cb_centroid_list[2][1]-1.0, marker='*', ms=19, mec='black', mfc='red', ls='', label='CentroidWin=7')
if corr_true_center_centroid != [0.0, 0.0]: # plot only is center is defined
ax.plot(corr_true_center_centroid[0]-1.0, corr_true_center_centroid[1]-1.0, marker='o', ms=12, mec='black', mfc='yellow', ls='', label='True Centroid')
# Shrink current axis by 10%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.9, box.height])
ax.legend(loc='upper right', bbox_to_anchor=(1.26, 1.0), prop={"size":"small"}) # put legend out of the plot box
ax.imshow(psf, cmap='gray', interpolation='nearest', vmin=vlims[0], vmax=vlims[1])
if show_disp:
plt.show()
else:
plt.close('all')
if savefile:
if fig_name is None:
# define the path for the simulated data
path4fig = "../PFforMaria/detector_"+str(detector)+"_centroid_figs"
if "scene1" in fig_title:
if "slow" in fig_title:
if "real" in fig_title:
in_dir = "Scene1_slow_real"
else:
in_dir = "Scene1_slow_nonoise"
elif "rapid" in fig_title:
if "real" in fig_title:
in_dir = "Scene1_rapid_real"
else:
in_dir = "Scene1_rapid_nonoise"
if "scene2" in fig_title:
if "slow" in fig_title:
if "real" in fig_title:
in_dir = "Scene2_slow_real"
else:
in_dir = "Scene2_slow_nonoise"
elif "rapid" in fig_title:
if "real" in fig_title:
in_dir = "Scene2_rapid_real"
else:
in_dir = "Scene2_rapid_nonoise"
fig_name = path4fig+in_dir+"/"+fig_title+".jpg"
if redos:
fig_name = path4fig+"_redo/"+in_dir+"_redo/"+fig_title+"_redo.jpg"
# if the name is defined then use it
fig.savefig(fig_name)
print ("Figure ", fig_name, " was saved!")
def display_ns_psf(image, vlim=(), fsize=(8, 8), interp='nearest', title='',
cmap='gray', extent=None, savefile=None, cb=False):
"""
Custom display a PSF generated with WEBBPSF or similar tool.
A quick tool for displaying NIRSpec images in native size
(2048x2048) with additional options for display.
Args:
image -- A 2D image to display
vlim -- The image range (in terms of image counts) to display.
Defaults to empty (), displaying full spectrum.
fsize -- Figure image size (in cm?)
interp -- Interpolation type. Defaults to 'nearest'.
title -- Title for plot. Defaults to ''.
cmap -- Color map for plot. Defaults to 'gray'.
cb -- Color bar toggle. Defaults to 'False'.
savefile -- Figure save toggle. Defaults to 'None'. A filename
(with directory structure) can be entered to save to
given filename.
Returns:
Nothing.
"""
# Display PSF (oversampled and detector levels)
fig, ax = plt.subplots(figsize=fsize)
ax.set_title(title)
ax.set_aspect('equal')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_ylim(0.0, np.shape(image)[0])
if vlim == ():
vlim = (image.min(), image.max())
if extent is not None:
cax = ax.imshow(image, cmap=cmap, interpolation=interp, vmin=vlim[0], \
extent=extent-.5, vmax=vlim[1])
else:
cax = ax.imshow(image, cmap=cmap, interpolation=interp, vmin=vlim[0], vmax=vlim[1])
if cb: fig.colorbar(cax, ax=ax, shrink=0.8)
# See plots when not in Notebook environment
plt.show()
if savefile is not None:
fig.savefig(savefile)
def do_Piers_correction(detector, cb_centroid_list):
""" This function performs the average correction found by Pierre in each of
the x and y measured centroids.
Args:
detector -- integer, either 491 or 492
cb_centroid_list -- list of 3 lists (measured centroids for centroid window sizes 3, 5, and 7)
Returns:
List of 3 lists corresponding to corrected centroids for centroid window sizes 3, 5, and 7
"""
xy3, xy5, xy7 = cb_centroid_list
xy3corr = Pier_correction(detector, xy3)
xy5corr = Pier_correction(detector, xy5)
xy7corr = Pier_correction(detector, xy7)
corr_cb_centroid_list = [xy3corr, xy5corr, xy7corr]
return corr_cb_centroid_list
def find_centroid(fits_file, bg_corr_info, recursive_centroids_info, display_centroids_info, x_centroids, y_centroids,
fits_names, output_file_path, centroids_info, verbose=True):
""" This function reads the image, finds the centroid, and displays the result.
It returns the centroid values.
Args:
fits_file -- name of the fits file being studied
bg_corr_info -- list of the information concerning background subtraction
recursive_centroids_info -- list of information for running centroid algorithm
display_centroids_info -- list of information to show and display the centroids
x_centroids -- list of centroids for centroid window sizes of 3, 5, and 7 for x position
y_centroids -- list of centroids for centroid window sizes of 3, 5, and 7 for y position
fits_names -- list to the append the studied files (so that it ends up being the same
length as the list of the measured centroids -- in case of fractional background)
output_file_path -- path for the output file
centroids_info -- list of the information concerning the true centroids, the output in full
detector coordinates, and the on-screen measured centroids
Returns:
x_centroids = list of 3 lists corresponding to pixel x-positions for centroid window sizes 3, 5, and 7
y_centroids = list of 3 lists corresponding to pixel y-positions for centroid window sizes 3, 5, and 7
"""
# unfold information
backgnd_subtraction_method, background_method, bg_value, bg_frac, debug = bg_corr_info
xwidth_list, ywidth_list, centroid_win_size, max_iter, threshold, determine_moments, display_master_img, vlim = recursive_centroids_info
true_center, output_full_detector, show_centroids, perform_avgcorr = centroids_info
case, show_disp, save_centroid_disp = display_centroids_info
x_centroids3, y_centroids3 = x_centroids[0], y_centroids[0]
x_centroids5, y_centroids5 = x_centroids[1], y_centroids[1]
x_centroids7, y_centroids7 = x_centroids[2], y_centroids[2]
# get detector and name of base name of each fits file
ff = os.path.basename(fits_file)
ff1 = string.split(ff, sep="_")
detector = ff1[2]
#fits_trial = ff1[1]
#fits_base = ff1[0]
# Read FITS image
#img = fits.open(fits_file)
#img.info()
#input()
#hdr = fits.getheader(fits_file, 0)
#print("** HEADER:", hdr)
master_img = fits.getdata(fits_file, 0)
if verbose:
print ('Master image shape: ', np.shape(master_img))
# Obtain the combined FITS image that combines all frames into one image AND
# check if all image is zeros, take the image that still has a max value
psf = readimage(master_img, backgnd_subtraction_method, bg_method=background_method,
bg_value=bg_value, bg_frac=bg_frac, debug=debug)
cb_centroid_list_in32x32pix = run_recursive_centroids(psf, bg_frac, xwidth_list, ywidth_list,
centroid_win_size, max_iter, threshold,
determine_moments, verbose, debug)
cb_centroid_list, loleftcoords, true_center32x32, differences_true_TA = centroid2fulldetector(cb_centroid_list_in32x32pix,
true_center, detector, perform_avgcorr)
if not output_full_detector:
cb_centroid_list = cb_centroid_list_in32x32pix
if show_centroids:
print ('***** Measured centroids:')
print (' cb_centroid_list = ', cb_centroid_list)
#print (' True center = ', true_center)
x_centroids3.append(cb_centroid_list[0][0])
y_centroids3.append(cb_centroid_list[0][1])
if len(xwidth_list) != 1:
x_centroids5.append(cb_centroid_list[1][0])
y_centroids5.append(cb_centroid_list[1][1])
x_centroids7.append(cb_centroid_list[2][0])
y_centroids7.append(cb_centroid_list[2][1])
x_centroids = [x_centroids3, x_centroids5, x_centroids7]
y_centroids = [y_centroids3, y_centroids5, y_centroids7]
# Show the display with the measured and true positions
ff = string.replace(ff, ".fits", "")
fits_names.append(ff)
fig_name = os.path.join(output_file_path, ff+".jpg")
# Display the combined FITS image that combines all frames into one image
m_img = display_master_img
if display_master_img:
m_img = readimage(master_img, backgnd_subtraction_method=None, bg_method=None,
bg_value=None, bg_frac=None, debug=False)
if true_center == [0.0, 0.0]:
true_center32x32 = [0.0, 0.0]
display_centroids(detector, ff, case, psf, true_center32x32, cb_centroid_list_in32x32pix,
show_disp, vlim, savefile=save_centroid_disp, fig_name=fig_name, display_master_img=m_img)
return x_centroids, y_centroids
def find_std(arr):
"""
This function determines the standard deviation of the given array.
Args:
arr = numpy array for which the standard deviation and means are to be determined
Returns:
std = standard deviation of the given array
mean = mean value of the given array
Usage:
import TA_functions as taf
std, mean = taf.find_std(y_positions)
"""
N = float(len(arr))
mean = sum(arr) / N
diff2meansq_list = []
for a in arr:
diff = a - mean
diffsq = diff * diff
diff2meansq_list.append(diffsq)
std = (1.0 / N * sum(diff2meansq_list)) ** 0.5
#print ('sigma = ', std, ' mean = ', mean)
return std, mean
def get_frac_stdevs(frac_data):
"""
This function obtains the standard deviation and means for centroid winows 3, 5, and 7 in the case
of running a fractional background study.
Args:
frac_data: list of numpy arrays (corresponding to each fractional value)
Returns:
standard deviations and means
"""
sig3, mean3 = [], []
sig5, mean5 = [], []
sig7, mean7 = [], []
for f in frac_data:
s3, m3 = find_std(f[1])
s5, m5 = find_std(f[3])
s7, m7 = find_std(f[5])
sig3.append(s3)
sig5.append(s5)
sig7.append(s7)
mean3.append(m3)
mean5.append(m5)
mean7.append(m7)
return sig3, mean3, sig5, mean5, sig7, mean7
def get_mindiff(d1, d2, d3):
"""
This function determines the minimum difference from centroid window sizes 3, 5, and 7,
and counts the number of repetitions.
Args:
d1: list of differences of measured values with respect to true for centroid window size 3
d2: list of differences of measured values with respect to true for centroid window size 5
d3: list of differences of measured values with respect to true for centroid window size 7
Returns:
min_diff = the minimum difference centroid window
counter = a dictionary that has the centroid window sizes and their repetitions in order of most
repeated to least, example: {{7: 13}, {5: 8}, {3: 2}}
"""
min_diff = []
for i, _ in enumerate(d1):
diffs_list = [d1[i], d2[i], d3[i]]
md = min(diffs_list)
if md == d1[i]:
m_diff = 3
elif md == d2[i]:
m_diff = 5
elif md == d3[i]:
m_diff = 7
min_diff.append(m_diff)
counter=collections.Counter(min_diff)
return min_diff, counter
def get_raw_star_directory(path4starfiles, scene, shutters, noise, redo=True):
"""
This function returns a list of the directories (positions 1 and 2) to be studied.
Possible paths to Scenes 1 and 2 directories in directory sim_scenes:
path_scene1_rapid = "Scene_1_AB23/NRSRAPID_TA_real_Sim_2017222173523/postage_redo10"
path_scene1_rapid_shifted = "Scene_1_AB23/NRSRAPID_TA_real_Sim_2017222173639/postage_redo_shifted"
Args:
path4starfiles -- string, path to get to the files
scene -- integer, either 1 or 2
shutters -- string, shutter velocity: 'rapid' or 'slow'
noise -- string, noise level: 'nonoise' or 'real'
redo -- True or False, go to (or not) to the directories that have a _redo at the end
Returns:
dir2test_list = A list of strings with the paths to position files 1 and 2
"""
"""
### This section will be commented out since the case has been narrowed down to NRSRAPID real for the ne TA scipts.
# define shutter velocity to be used
shutter_vel = "NRS" # for slow case
if shutters == "rapid":
shutter_vel = "NRSRAPID"
# define noise level
noise_level = " no_noise"
if noise == "real":
noise_level = ""
"""
# define directory path for scenario 1
position1 = path4starfiles+"Scene_"+repr(scene)+"_AB23/NRSRAPID_TA_real_Sim_2017222173523/postage_redo10"
position2 = path4starfiles+"Scene_"+repr(scene)+"_AB23/NRSRAPID_TA_real_Sim_2017222173639/postage_redo10_shifted"
if scene == 2:
print("Scene 2 is not being used for this exercise. Exiting.")
exit()
"""
### This section will be commented out since the case has been narrowed down to NRSRAPID real for the ne TA scipts.
position1 = path4starfiles+"Scene_"+repr(scene)+"_AB1823/NIRSpec_TA_Sim_AB1823 first "+shutter_vel+noise_level+"/postage"
position2 = path4starfiles+"Scene_"+repr(scene)+"_AB1823/NIRSpec_TA_Sim_AB1823 shifted "+shutter_vel+noise_level+"/postage"
if redo:
position1 += "_redo"
position2 += "_redo"
"""
dir2test_list = [position1, position2]
return dir2test_list
def Pier_correction(detector, XandYarr):
""" This function corrects the measured centroids for the average values.
Args:
Pier_corr -- Perform average correction suggested by Pier: True or False
Returns:
cb_centroid_list -- List, values corrected for Pier's values
"""
# Corrections for offsets in positions (see section 2.5 of Technical Notes in Documentation directory)
offset_491 = (-0.086, -0.077)
offset_492 = (0.086, 0.077)
corrected_x = XandYarr[0]
corrected_y = XandYarr[1]
if detector == 491:
corrected_x = XandYarr[0] + offset_491[0]
corrected_y = XandYarr[1] + offset_491[1]
elif detector == 492:
corrected_x = XandYarr[0] + offset_492[0]
corrected_y = XandYarr[1] + offset_492[1]
corr_XandYarr = [corrected_x, corrected_y]
return corr_XandYarr
def plot_offsets(plot_title, offsets, sigmas, means, bench_star, destination,
plot_type='.jpg', save_plot=False, show_plot=False, xlims=None, ylims=None,
Nsigma=None):
"""
This function plots the x and y-offsets in pixel space.
Args:
plot_title -- string, plot title
offsets -- numpy array of 6 columns corresponding to x and y of centroid windows 3, 5, and 7
sigmas -- list, standard deviations in the y-direction for centroid windows 3, 5, and 7
means -- list, means in the y-direction for centroid windows 3, 5, and 7
bench_star -- list, star numbers being analyzed
destination -- string, path to save the figure
plot_type -- string, type of image to be saved (jpg has best resolution)
save_plot -- True or False, save the plot in given destination
show_plot -- True or False, display the plot on screen
xlims -- list, min and max x-axis values for plot
ylims -- list, min and max y-axis values for plot
Nsigma -- float or integer, number of sigmas to reject
Returns:
Statement that plot has been saved or nothing.
"""
fig1 = plt.figure(1, figsize=(12, 10))
ax1 = fig1.add_subplot(111)
plt.title(plot_title)
plt.xlabel('Residual offset in X [pixels]')
plt.ylabel('Residual offset in Y [pixels]')
plt.plot(offsets[0], offsets[1], 'b^', ms=8, alpha=0.7, label='Centroid window=3')
plt.plot(offsets[2], offsets[3], 'go', ms=8, alpha=0.7, label='Centroid window=5')
plt.plot(offsets[4], offsets[5], 'r*', ms=10, alpha=0.7, label='Centroid window=7')
if xlims is None:
xmin, xmax = ax1.get_xlim()
else:
xmin, xmax = xlims[0], xlims[1]
plt.xlim(xmin, xmax)
plt.hlines(0.0, xmin, xmax*2, colors='k', linestyles='dashed')
if ylims is None:
ymin, ymax = ax1.get_ylim()
else:
ymin, ymax = ylims[0], ylims[1]
plt.ylim(ymin, ymax)
plt.vlines(0.0, ymin, ymax*2, colors='k', linestyles='dashed')
# Shrink current axis by 10%
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.85, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # put legend out of the plot box
sigx3, sigx5, sigx7, sigy3, sigy5, sigy7 = sigmas
meanx3, meanx5, meanx7, meany3, meany5, meany7 = means
textinfig3x = r'x$\sigma3$ = %0.2f x$\mu3$ = %0.2f' % (sigx3, meanx3)
textinfig5x = r'x$\sigma5$ = %0.2f x$\mu5$ = %0.2f' % (sigx5, meanx5)
textinfig7x = r'x$\sigma7$ = %0.2f x$\mu7$ = %0.2f' % (sigx7, meanx7)
textinfig3y = r'y$\sigma3$ = %0.2f y$\mu3$ = %0.2f' % (sigy3, meany3)
textinfig5y = r'y$\sigma5$ = %0.2f y$\mu5$ = %0.2f' % (sigy5, meany5)
textinfig7y = r'y$\sigma7$ = %0.2f y$\mu7$ = %0.2f' % (sigy7, meany7)
ax1.annotate(textinfig3x, xy=(1.02, 0.35), xycoords='axes fraction' )
ax1.annotate(textinfig5x, xy=(1.02, 0.32), xycoords='axes fraction' )
ax1.annotate(textinfig7x, xy=(1.02, 0.29), xycoords='axes fraction' )
ax1.annotate(textinfig3y, xy=(1.02, 0.24), xycoords='axes fraction' )
ax1.annotate(textinfig5y, xy=(1.02, 0.21), xycoords='axes fraction' )
ax1.annotate(textinfig7y, xy=(1.02, 0.18), xycoords='axes fraction' )
y_reject = [-1.0, 1.0]
x_reject = [-1.0, 1.0]
if Nsigma is not None:
# perform sigma-clipping
y_reject = [-Nsigma, Nsigma]
x_reject = [-Nsigma, Nsigma]
for si,xi,yi in zip(bench_star, offsets[0], offsets[1]):
if yi>=y_reject[1] or yi<=y_reject[0] or xi>=x_reject[1] or xi<=x_reject[0]:
si = int(si)
subxcoord = 5
subycoord = 0
side = 'left'
plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')
if save_plot:
fig1.savefig(destination)
print ("\n Plot saved: ", destination)
if show_plot:
plt.show()
else:
plt.close('all')
def plot_offsets_frac(plot_title, frac_bgs, frac_data, sigmas, means, bench_star, destination,
save_plot=False, show_plot=False, xlims=None, ylims=None):
"""
This function plots the x and y-offsets in pixel space for fractional background case.
Args:
plot_title -- string, plot title
frac_bgs -- list, fractional backgrounds to be plotted
frac_data -- list of lists, each containing x and y of centroid windows 3, 5, and 7
sigmas -- list, standard deviations in the y-direction for centroid windows 3, 5, and 7
means -- list, means in the y-direction for centroid windows 3, 5, and 7
bench_star -- list, star numbers being analyzed
destination -- string, path to save the figure
save_plot -- True or False, save the plot in given destination
show_plot -- True or False, display the plot on screen
xlims -- list, min and max x-axis values for plot
ylims -- list, min and max y-axis values for plot
Returns:
Statement that plot has been saved or nothing.
"""
# unfold variables
frac00, frac01, frac02, frac03, frac04, frac05, frac06, frac07, frac08, frac09, frac10 = frac_data
sig3, sig5, sig7 = sigmas
mean3, mean5, mean7 = means
# crate the plot for centroid window 3
fig2 = plt.figure(1, figsize=(12, 10))
fig2.subplots_adjust(hspace=0.30)
ax1 = fig2.add_subplot(311)
ax1.set_title(plot_title)
ax1.set_xlabel('Offset in X: Centroid window=3')
ax1.set_ylabel('Offset in Y: Centroid window=3')
ax1.plot(frac00[0], frac00[1], 'bo', ms=6, alpha=0.7, label='bg_frac=0.0')
ax1.plot(frac01[0], frac01[1], 'g^', ms=8, alpha=0.7, label='bg_frac=0.1')
ax1.plot(frac02[0], frac02[1], 'mo', ms=8, alpha=0.7, label='bg_frac=0.2')
ax1.plot(frac03[0], frac03[1], 'r*', ms=10, alpha=0.7, label='bg_frac=0.3')
ax1.plot(frac04[0], frac04[1], 'ks', ms=6, alpha=0.7, label='bg_frac=0.4')
ax1.plot(frac05[0], frac05[1], 'y<', ms=8, alpha=0.7, label='bg_frac=0.5')
ax1.plot(frac06[0], frac06[1], 'c>', ms=8, alpha=0.7, label='bg_frac=0.6')
ax1.plot(frac07[0], frac07[1], 'b+', ms=10, alpha=0.7, label='bg_frac=0.7')
ax1.plot(frac08[0], frac08[1], 'rd', ms=8, alpha=0.7, label='bg_frac=0.8')
ax1.plot(frac09[0], frac09[1], 'm*', ms=5, alpha=0.7, label='bg_frac=0.9')
ax1.plot(frac10[0], frac10[1], 'kx', ms=5, alpha=0.7, label='bg_frac=1.0')
if xlims is None:
xmin, xmax = ax1.get_xlim()
else:
xmin, xmax = xlims[0], xlims[1]
plt.hlines(0.0, xmin, xmax, colors='k', linestyles='dashed')
if ylims is None:
ymin, ymax = ax1.get_ylim()
else:
ymin, ymax = ylims[0], ylims[1]
plt.vlines(0.0, ymin, ymax, colors='k', linestyles='dashed')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
for si,xi,yi in zip(bench_star, frac00[0], frac00[1]):
if yi>=1.0 or yi<=-1.0 or xi>=1.0 or xi<=-1.0:
si = int(si)
subxcoord = 5
subycoord = 0
side = 'left'
plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')
# Shrink current axis by 10%
box = ax1.get_position()
ax1.set_position([box.x0, box.y0, box.width * 0.9, box.height])
ax1.legend(loc='center left', bbox_to_anchor=(1, 0.5)) # put legend out of the plot box
# crate the plot for centroid window 5
ax2 = fig2.add_subplot(312)
ax2.set_xlabel('Offset in X: Centroid window=5')
ax2.set_ylabel('Offset in Y: Centroid window=5')
ax2.plot(frac00[2], frac00[3], 'bo', ms=6, alpha=0.7, label='bg_frac=0.0')
ax2.plot(frac01[2], frac01[3], 'g^', ms=8, alpha=0.7, label='bg_frac=0.1')
ax2.plot(frac02[2], frac02[3], 'mo', ms=8, alpha=0.7, label='bg_frac=0.2')
ax2.plot(frac03[2], frac03[3], 'r*', ms=10, alpha=0.7, label='bg_frac=0.3')
ax2.plot(frac04[2], frac04[3], 'ks', ms=6, alpha=0.7, label='bg_frac=0.4')
ax2.plot(frac05[2], frac05[3], 'y<', ms=8, alpha=0.7, label='bg_frac=0.5')
ax2.plot(frac06[2], frac06[3], 'c>', ms=8, alpha=0.7, label='bg_frac=0.6')
ax2.plot(frac07[2], frac07[3], 'b+', ms=10, alpha=0.7, label='bg_frac=0.7')
ax2.plot(frac08[2], frac08[3], 'rd', ms=8, alpha=0.7, label='bg_frac=0.8')
ax2.plot(frac09[2], frac09[3], 'm*', ms=5, alpha=0.7, label='bg_frac=0.9')
ax2.plot(frac10[2], frac10[3], 'kx', ms=5, alpha=0.7, label='bg_frac=1.0')
plt.hlines(0.0, xmin, xmax, colors='k', linestyles='dashed')
plt.vlines(0.0, ymin, ymax, colors='k', linestyles='dashed')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
textinfig = r'BG y$\sigma$3 y$\sigma$5 y$\sigma$7'
ax2.annotate(textinfig, xy=(1.02, 0.90), xycoords='axes fraction' )
sigx = 1.02
sigy = 0.9
for fbg, s3, s5, s7 in zip(frac_bgs, sig3, sig5, sig7):
line = ('{:<7} {:<6.2f} {:<6.2f} {:<6.2f}'.format(fbg, s3, s5, s7))
sigy -= 0.08
ax2.annotate(line, xy=(sigx, sigy), xycoords='axes fraction' )
for si,xi,yi in zip(bench_star, frac00[0], frac00[1]):
if yi>=1.0 or yi<=-1.0 or xi>=1.0 or xi<=-1.0:
si = int(si)
subxcoord = 5
subycoord = 0
side = 'left'
plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')
# Shrink current axis by 10%
box = ax2.get_position()
ax2.set_position([box.x0, box.y0, box.width * 0.9, box.height])
# crate the plot for centroid window 7
ax3 = fig2.add_subplot(313)
ax3.set_xlabel('Offset in X: Centroid window=7')
ax3.set_ylabel('Offset in Y: Centroid window=7')
ax3.plot(frac00[4], frac00[5], 'bo', ms=6, alpha=0.7, label='bg_frac=0.0')
ax3.plot(frac01[4], frac01[5], 'g^', ms=8, alpha=0.7, label='bg_frac=0.1')
ax3.plot(frac02[4], frac02[5], 'mo', ms=8, alpha=0.7, label='bg_frac=0.2')
ax3.plot(frac03[4], frac03[5], 'r*', ms=10, alpha=0.7, label='bg_frac=0.3')
ax3.plot(frac04[4], frac04[5], 'ks', ms=6, alpha=0.7, label='bg_frac=0.4')
ax3.plot(frac05[4], frac05[5], 'y<', ms=8, alpha=0.7, label='bg_frac=0.5')
ax3.plot(frac06[4], frac06[5], 'c>', ms=8, alpha=0.7, label='bg_frac=0.6')
ax3.plot(frac07[4], frac07[5], 'b+', ms=10, alpha=0.7, label='bg_frac=0.7')
ax3.plot(frac08[4], frac08[5], 'rd', ms=8, alpha=0.7, label='bg_frac=0.8')
ax3.plot(frac09[4], frac09[5], 'm*', ms=5, alpha=0.7, label='bg_frac=0.9')
ax3.plot(frac10[4], frac10[5], 'kx', ms=5, alpha=0.7, label='bg_frac=1.0')
plt.hlines(0.0, xmin, xmax, colors='k', linestyles='dashed')
plt.vlines(0.0, ymin, ymax, colors='k', linestyles='dashed')
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
for si,xi,yi in zip(bench_star, frac00[0], frac00[1]):
if yi>=1.0 or yi<=-1.0 or xi>=1.0 or xi<=-1.0:
si = int(si)
subxcoord = 5
subycoord = 0
side = 'left'
plt.annotate('{}'.format(si), xy=(xi,yi), xytext=(subxcoord, subycoord), ha=side, textcoords='offset points')
# Shrink current axis by 10%
textinfig = r'BG y$\mu$3 y$\mu$5 y$\mu$7'
ax3.annotate(textinfig, xy=(1.02, 0.90), xycoords='axes fraction' )
sigx = 1.02
sigy = 0.9
for fbg, m3, m5, m7 in zip(frac_bgs, mean3, mean5, mean7):
line = ('{:<7} {:<6.2f} {:<6.2f} {:<6.2f}'.format(fbg, m3, m5, m7))
sigy -= 0.08
ax3.annotate(line, xy=(sigx, sigy), xycoords='axes fraction' )
box = ax3.get_position()
ax3.set_position([box.x0, box.y0, box.width * 0.9, box.height])
if save_plot:
fig2.savefig(destination)
print ("\n Plot saved: ", destination)
if show_plot:
plt.show()
else:
plt.close('all')
def plot_zoomin(plot_title, offsets_list, bench_star, destination,
plot_type='.jpg', save_plot=False, show_plot=False, xlims=[-1., 1.], ylims=[-1., 1.],
Nsigma=None):
"""
This function plots a zoom-in region of the offsets, keeping only 'good' stars.
Args:
bench_star -- list, star numbers being analyzed
offsets_list -- list 6 columns corresponding to x and y of centroid windows 3, 5, and 7
plot_title -- string, plot title
destination -- string, path to save the figure
plot_type -- string, type of image to be saved (jpg has best resolution)
save_plot -- True or False, save the plot in given destination
show_plot -- True or False, display the plot on screen
xlims -- list, min and max x-axis values for plot
ylims -- list, min and max y-axis values for plot
Returns:
Statement that plot has been saved or nothing.
"""
# Copy all stars and offsets in order to remove 'bad' stars
if Nsigma is not None:
Nsigma_results = Nsigma_rejection(Nsigma, np.array(offsets_list[0]), np.array(offsets_list[1]), max_iterations=10)
sigx3, meanx3, sigy3, meany3, x_new3, y_new3, _, _, _ = Nsigma_results
Nsigma_results = Nsigma_rejection(Nsigma, np.array(offsets_list[2]), np.array(offsets_list[3]), max_iterations=10)
sigx5, meanx5, sigy5, meany5, x_new5, y_new5, _, _, _ = Nsigma_results
Nsigma_results = Nsigma_rejection(Nsigma, np.array(offsets_list[4]), np.array(offsets_list[5]), max_iterations=10)
sigx7, meanx7, sigy7, meany7, x_new7, y_new7, _, _, _ = Nsigma_results
good_offsets = [x_new3, y_new3, x_new5, y_new5, x_new7, y_new7]
good_stars_only = []
for i, xi in enumerate(offsets_list[0]):
if xi in x_new3:
good_stars_only.append(bench_star[i])
#xlims, ylims = [-0.15, 0.15], [-0.15, 0.15]
Nsigma = sigy3*Nsigma
else:
good_stars_only = copy.deepcopy(bench_star)
good_offsets_list = copy.deepcopy(offsets_list)
for i, s in enumerate(bench_star):
if offsets_list[0][i]>=1.1 or offsets_list[0][i]<=-1.1 or offsets_list[1][i]>=1.1 or offsets_list[1][i]<=-1.1:
idx2remove = good_stars_only.index(s)
# items must be removed from all columns at the same time to avoid removing wrong item
good_stars_only.pop(idx2remove)
good_offsets_list[0].pop(idx2remove)
good_offsets_list[1].pop(idx2remove)
good_offsets_list[2].pop(idx2remove)
good_offsets_list[3].pop(idx2remove)
good_offsets_list[4].pop(idx2remove)
good_offsets_list[5].pop(idx2remove)
good_offsets = np.array(good_offsets_list)
sigx3, meanx3 = find_std(good_offsets[0])
sigx5, meanx5 = find_std(good_offsets[2])
sigx7, meanx7 = find_std(good_offsets[4])
sigy3, meany3 = find_std(good_offsets[1])
sigy5, meany5 = find_std(good_offsets[3])
sigy7, meany7 = find_std(good_offsets[5])
sigmas = [sigx3, sigx5, sigx7, sigy3, sigy5, sigy7]
means = [meanx3, meanx5, meanx7, meany3, meany5, meany7]
plot_title = plot_title+'_zoomin'
plot_offsets(plot_title, good_offsets, sigmas, means, good_stars_only, destination,
plot_type='.jpg', save_plot=save_plot, show_plot=show_plot, xlims=xlims, ylims=ylims,
Nsigma=Nsigma)
def plot_zoomin_frac(plot_title, frac_bgs, frac_data, bench_star, destination,
save_plot=False, show_plot=False, xlims=[-1., 1.], ylims=[-1., 1.]):
"""
This function plots zoom-in of the x and y-offsets in pixel space for fractional background case.
Args:
plot_title -- string, plot title
frac_bgs -- list, fractional backgrounds to be plotted
frac_data -- list of lists, each containing x and y of centroid windows 3, 5, and 7
bench_star -- list, star numbers being analyzed
destination -- string, path to save the figure
save_plot -- True or False, save the plot in given destination
show_plot -- True or False, display the plot on screen
xlims -- list, min and max x-axis values for plot
ylims -- list, min and max y-axis values for plot
Returns:
Statement that plot has been saved or nothing.
"""
frac00, frac01, frac02, frac03, frac04, frac05, frac06, frac07, frac08, frac09, frac10 = frac_data
frac00 = frac00.tolist()
frac01 = frac01.tolist()
frac02 = frac02.tolist()
frac03 = frac03.tolist()
frac04 = frac04.tolist()
frac05 = frac05.tolist()
frac06 = frac06.tolist()
frac07 = frac07.tolist()
frac08 = frac08.tolist()
frac09 = frac09.tolist()
frac10 = frac10.tolist()
frac_data = [frac00, frac01, frac02, frac03, frac04, frac05, frac06, frac07, frac08, frac09, frac10]
# Copy all stars and offsets in order to remove 'bad' stars
good_stars_only = copy.deepcopy(bench_star.tolist())
idx2remove_list = []
for i, s in enumerate(bench_star):
if frac03[0][i]>=1.1 or frac03[0][i]<=-1.1 or frac03[1][i]>=1.1 or frac03[1][i]<=-1.1:
idx2remove = good_stars_only.index(s)
good_stars_only.pop(idx2remove)
idx2remove_list.append(idx2remove)
for idx in idx2remove_list:
for j, _ in enumerate(frac_data):
frac_data[j][0].pop(idx)
frac_data[j][1].pop(idx)
frac_data[j][2].pop(idx)
frac_data[j][3].pop(idx)
frac_data[j][4].pop(idx)
frac_data[j][5].pop(idx)
sig3, mean3, sig5, mean5, sig7, mean7 = get_frac_stdevs(frac_data)
sigmas = [sig3, sig5, sig7]
means = [mean3, mean5, mean7]
plot_title = plot_title+'_zoomin'
frac_bgs = ['0.0', '0.1', '0.2', '0.3', '0.4', '0.5' ,'0.6' ,'0.7', '0.8', '0.9', '1.0']
plot_offsets_frac(plot_title, frac_bgs, frac_data, sigmas, means, bench_star, destination,
save_plot=save_plot, show_plot=show_plot, xlims=xlims, ylims=ylims)
def print_file_lines(output_file, save_text_file, xwidth_list, ff, background2use,
i, x_centroids, y_centroids, verbose=True):
""" This function prints the info on screen AND in a text file. It expects that the output file
already exists (it appends information to it). Columns are fits file name, background value used
and method (None, fixed, or fractional), x and y coordinates for centroid window sizes of 3, 5, and 7
pixels. Eight columns in total.
Args:
output_file -- name of the output file = string
save_text_file -- do you want to save the file = True or False
xwidth_list -- list of x width size = list can have 1 to 3 elements
ff -- name of the fits file the info corresponds to = string
background2use -- value of the background to use for the background method = float
i -- the index of line to append to the file = integer
x_centroids -- list of the x measured centroids for centroid window sizes 3, 5, and 7
y_centroids -- list of the y measured centroids for centroid window sizes 3, 5, and 7
Returns:
Nothing.
"""
x_centroids3, y_centroids3 = x_centroids[0], y_centroids[0]
x_centroids5, y_centroids5 = x_centroids[1], y_centroids[1]
x_centroids7, y_centroids7 = x_centroids[2], y_centroids[2]
if len(xwidth_list)==1:
line1 = "{:<40} {:>4} {:>16} {:>16}".format(
ff, background2use, x_centroids3[i], y_centroids3[i])
else:
line1 = "{:<40} {:>4} {:>16} {:>14} {:>18} {:>14} {:>18} {:>14}".format(
ff, background2use,
x_centroids3[i], y_centroids3[i],
x_centroids5[i], y_centroids5[i],
x_centroids7[i], y_centroids7[i])
if verbose:
print (line1)
if save_text_file:
f = open(output_file, "a")
f.write(line1+"\n")
def Nsigma_rejection(N, x, y, max_iterations=10, verbose=True):
""" This function will reject any residuals that are not within N*sigma in EITHER coordinate.
Args:
- x and y must be the numpy arrays of the differences with respect to true values: True-Measured
- N is the factor (integer or float) by which sigma will be multiplied
- max_iterations is the maximum integer allowed iterations
Returns:
- sigma_x = the standard deviation of the new array x
- mean_x = the mean of the new array x
- sigma_y = the standard deviation of the new array y
- mean_y = the mean of the new array y
- x_new = the new array x (with rejections)
- y_new = the new array y (with rejections)
- niter = the number of iterations to reach a convergence (no more rejections)
Usage:
import TA_functions as taf
Nsigma_results = taf.Nsigma_rejection(N, x, y, max_iterations=10)
sigma_x, mean_x, sigma_y, mean_y, x_new, y_new, niter, lines2print, rejected_elements_idx = Nsigma_results
"""
N = float(N)
or_sigma_x, or_mean_x = find_std(x)
or_sigma_y, or_mean_y = find_std(y)
x_new = copy.deepcopy(x)
y_new = copy.deepcopy(y)
original_diffs = copy.deepcopy(x)
for nit in range(max_iterations):
# Determine the standard deviation for each array
sigma_x, mean_x = find_std(x_new)
sigma_y, mean_y = find_std(y_new)
thres_x = N*sigma_x
thres_y = N*sigma_y
xdiff =
|
np.abs(x_new - mean_x)
|
numpy.abs
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
# TODO: not sure if we want to expose these as part of the public Gammapy API
# For now keeping as-is, but hiding from the docs by uncommenting here.
# 'Gauss2DPDF',
# 'MultiGauss2D',
# 'gaussian_sum_moments',
__all__ = [
]
__doctest_requires__ = {('gaussian_sum_moments'): ['uncertainties']}
class Gauss2DPDF(object):
"""2D symmetric Gaussian PDF.
Reference: http://en.wikipedia.org/wiki/Multivariate_normal_distribution#Bivariate_case
Parameters
----------
sigma : float
Gaussian width.
"""
def __init__(self, sigma=1):
self.sigma = np.asarray(sigma, np.float64)
@property
def _sigma2(self):
"""Sigma squared (float)"""
return self.sigma * self.sigma
@property
def amplitude(self):
"""PDF amplitude at the center (float)"""
return self.__call(0, 0)
def __call__(self, x, y=0):
"""dp / (dx dy) at position (x, y)
Parameters
----------
x : `~numpy.ndarray`
x coordinate
y : `~numpy.ndarray`, optional
y coordinate
Returns
-------
dpdxdy : `~numpy.ndarray`
dp / (dx dy)
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
theta2 = x * x + y * y
amplitude = 1 / (2 * np.pi * self._sigma2)
exponent = -0.5 * theta2 / self._sigma2
return amplitude * np.exp(exponent)
def dpdtheta2(self, theta2):
"""dp / dtheta2 at position theta2 = theta ^ 2
Parameters
----------
theta2 : `~numpy.ndarray`
Offset squared
Returns
-------
dpdtheta2 : `~numpy.ndarray`
dp / dtheta2
"""
theta2 = np.asarray(theta2, dtype=np.float64)
amplitude = 1 / (2 * self._sigma2)
exponent = -0.5 * theta2 / self._sigma2
return amplitude * np.exp(exponent)
def containment_fraction(self, theta):
"""Containment fraction.
Parameters
----------
theta : `~numpy.ndarray`
Offset
Returns
-------
containment_fraction : `~numpy.ndarray`
Containment fraction
"""
theta = np.asarray(theta, dtype=np.float64)
return 1 - np.exp(-0.5 * theta ** 2 / self._sigma2)
def containment_radius(self, containment_fraction):
"""Containment angle for a given containment fraction.
Parameters
----------
containment_fraction : `~numpy.ndarray`
Containment fraction
Returns
-------
containment_radius : `~numpy.ndarray`
Containment radius
"""
containment_fraction = np.asarray(containment_fraction, dtype=np.float64)
return self.sigma * np.sqrt(-2 * np.log(1 - containment_fraction))
def gauss_convolve(self, sigma):
"""Convolve with another Gaussian 2D PDF.
Parameters
----------
sigma : `~numpy.ndarray` or float
Gaussian width of the new Gaussian 2D PDF to covolve with.
Returns
-------
gauss_convolve : `~gammapy.image.models.Gauss2DPDF`
Convolution of both Gaussians.
"""
sigma = np.asarray(sigma, dtype=np.float64)
new_sigma = np.sqrt(self._sigma2 + sigma ** 2)
return Gauss2DPDF(new_sigma)
class MultiGauss2D(object):
"""Sum of multiple 2D Gaussians.
Parameters
----------
sigmas : `~numpy.ndarray`
widths of the Gaussians to add
norms : `~numpy.ndarray`, optional
normalizations of the Gaussians to add
Notes
-----
* This sum is no longer a PDF, it is not normalized to 1.
* The "norm" of each component represents the 2D integral,
not the amplitude at the origin.
"""
def __init__(self, sigmas, norms=None):
# If no norms are given, you have a PDF.
sigmas = np.asarray(sigmas, dtype=np.float64)
self.components = [Gauss2DPDF(sigma) for sigma in sigmas]
if norms is None:
self.norms = np.ones(len(self.components))
else:
self.norms = np.asarray(norms, dtype=np.float64)
def __call__(self, x, y=0):
"""dp / (dx dy) at position (x, y)
Parameters
----------
x : `~numpy.ndarray`
x coordinate
y : `~numpy.ndarray`, optional
y coordinate
Returns
-------
total : `~numpy.ndarray`
dp / (dx dy)
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
total = np.zeros_like(x)
for norm, component in zip(self.norms, self.components):
total += norm * component(x, y)
return total
@property
def n_components(self):
"""Number of components (int)"""
return len(self.components)
@property
def sigmas(self):
"""Array of Gaussian widths (`~numpy.ndarray`)"""
return np.array([_.sigma for _ in self.components])
@property
def integral(self):
"""Integral as sum of norms (`~numpy.ndarray`)"""
return np.nansum(self.norms)
@property
def amplitude(self):
"""Amplitude at the center (float)"""
return self.__call__(0, 0)
@property
def max_sigma(self):
"""Largest Gaussian width (float)"""
return self.sigmas.max()
@property
def eff_sigma(self):
r"""Effective Gaussian width for single-Gauss approximation (float)
Notes
-----
The effective Gaussian width is given by:
.. math:: \sigma_\mathrm{eff} = \sqrt{\sum_i N_i \sigma_i^2}
where ``N`` is normalization and ``sigma`` is width.
"""
sigma2s = np.array([component._sigma2 for component
in self.components])
return np.sqrt(
|
np.sum(self.norms * sigma2s)
|
numpy.sum
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 16 11:57:09 2019
@author: smrak
"""
from pyGnss import pyGnss, scintillation
from pyGnss import gnssUtils as gu
from datetime import datetime, timedelta
import georinex as gr
import numpy as np
import subprocess
from glob import glob
from dateutil import parser
import yaml
import os
import h5py
from argparse import ArgumentParser
import matplotlib.pyplot as plt
from matplotlib import dates
import warnings
import platform
warnings.simplefilter('ignore', np.RankWarning)
if platform.system() == 'Linux':
separator = '/'
else:
separator = '\\'
def getIntervals(y, maxgap=3, maxjump=2):
r =
|
np.arange(y.size)
|
numpy.arange
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import matplotlib as mpl
import os
import sys
from glob import glob
from datetime import datetime
plt.close('all')
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['font.size'] = 12
mpl.rcParams['axes.linewidth'] = 2
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['ytick.major.width'] = 2
if sys.platform == 'linux':
datapath = '/mnt/llmStorage203/Danny/freqent/spinOsc/190709/'
savepath = '/media/daniel/storage11/Dropbox/LLM_Danny/freqent/spinOsc/'
elif sys.platform == 'darwin':
datapath = '/Volumes/Storage/Danny/freqent/spinOsc/190709/'
savepath = '/Users/Danny/Dropbox/LLM_Danny/freqent/spinOsc/'
alphas = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
ndims = [2, 3, 4]
nsim = 64
s_array = np.zeros((len(ndims), len(alphas), nsim))
for file in glob(os.path.join(datapath, '*.hdf5')):
with h5py.File(file, 'r') as d:
dim = d['params']['ndim'][()]
alpha = d['params']['alpha'][()]
s_array[ndims.index(dim), alphas.index(alpha)] = d['data']['s'][:]
fig2, ax2 = plt.subplots(figsize=(5, 5))
# ax2.errorbar(alphas, np.mean(s_array[0], axis=1), yerr=np.std(s_array[0], axis=1),
# fmt='ko', capsize=5, lw=2, label='data')
ax2.plot(alphas, np.mean(s_array[0], axis=1), 'ko', label=r'$\dot{S}_{spectral}$')
ax2.fill_between(alphas, np.mean(s_array[0], axis=1) + np.std(s_array[0], axis=1),
np.mean(s_array[0], axis=1) - np.std(s_array[0], axis=1),
color='k', alpha=0.5)
# ax2.plot(alphas, s_array[0], 'k.')
ax2.plot(np.arange(0, 10, 0.01), 2 * np.arange(0, 10, 0.01)**2, 'r-',
lw=2, label=r'$\dot{S}_{thry} = 2 \alpha^2$')
ax2.tick_params(which='both', direction='in')
ax2.set(xlabel=r'$\alpha$', ylabel=r'$\dot{S}$', title='2 dimensions', ylim=[-14, 260])
ax2.set_aspect(np.diff(ax2.set_xlim())[0] / np.diff(ax2.set_ylim())[0])
ax2.legend()
plt.tight_layout()
fig2.savefig(os.path.join(savepath, datetime.now().strftime('%y%m%d') + '_eprPlot_2dim.pdf'), format='pdf')
fig3, ax3 = plt.subplots(figsize=(5, 5))
# ax3.errorbar(alphas, np.mean(s_array[1], axis=1), yerr=np.std(s_array[1], axis=1),
# fmt='k^', capsize=5, lw=2, label='data')
ax3.plot(alphas, np.mean(s_array[1], axis=1), 'k^', label=r'$\dot{S}_{spectral}$')
ax3.fill_between(alphas, np.mean(s_array[1], axis=1) +
|
np.std(s_array[1], axis=1)
|
numpy.std
|
# Required imports
import os
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
import pylab
import scipy
import random
import datetime
import re
import time
from math import sqrt
import matplotlib.dates as mdates
from matplotlib.dates import date2num, num2date
get_ipython().run_line_magic('matplotlib', 'inline')
from sklearn import preprocessing
pd.set_option('display.max_columns', None) # to view all columns
from scipy.optimize import curve_fit
from supersmoother import SuperSmoother
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.gaussian_process.kernels import Matern, WhiteKernel
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge, Lasso, RidgeCV, LassoCV
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
import warnings
warnings.filterwarnings("ignore")
from pyproj import Proj, Transformer
from ipyleaflet import (Map, basemaps, WidgetControl, GeoJSON,
LayersControl, Icon, Marker,FullScreenControl,
CircleMarker, Popup, AwesomeIcon)
from ipywidgets import HTML
plt.rcParams["font.family"] = "Times New Roman"
class functions:
def __init__(self, data):
self.setData(data)
self.__jointData = [None, 0]
# DATA VALIDATION
def __isValid_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
def __isValid_Construction_Data(self, data):
if(str(type(data)).lower().find('dataframe') == -1):
return (False, 'Make sure the data is a pandas DataFrame.\n')
if(not self.__hasColumns_Construction_Data(data)):
return (False, 'Make sure that ALL of the columns specified in the REQUIREMENTS are present.\n')
else:
return (True, None)
# COLUMN VALIDATION
def __hasColumns_Data(self, data):
find = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
def __hasColumns_Construction_Data(self, data):
find = ['STATION_ID', 'AQUIFER', 'WELL_USE', 'LATITUDE', 'LONGITUDE', 'GROUND_ELEVATION', 'TOTAL_DEPTH']
cols = list(data.columns)
cols = [x.upper() for x in cols]
hasCols = all(item in cols for item in find)
return hasCols
# SETTING DATA
def setData(self, data, verbose=True):
validation = self.__isValid_Data(data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(data.columns)]
data.columns = cols_upper
self.data = data
if(verbose):
print('Successfully imported the data!\n')
self.__set_units()
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_DATA()
def setConstructionData(self, construction_data, verbose=True):
validation = self.__isValid_Construction_Data(construction_data)
if(validation[0]):
# Make all columns all caps
cols_upper = [x.upper() for x in list(construction_data.columns)]
construction_data.columns = cols_upper
self.construction_data = construction_data.set_index(['STATION_ID'])
if(verbose):
print('Successfully imported the construction data!\n')
else:
print('ERROR: {}'.format(validation[1]))
return self.REQUIREMENTS_CONSTRUCTION_DATA()
def jointData_is_set(self, lag):
if(str(type(self.__jointData[0])).lower().find('dataframe') == -1):
return False
else:
if(self.__jointData[1]==lag):
return True
else:
return False
def set_jointData(self, data, lag):
self.__jointData[0] = data
self.__jointData[1] = lag
# GETTING DATA
def getData(self):
return self.data
def get_Construction_Data(self):
return self.construction_data
# MESSAGES FOR INVALID DATA
def REQUIREMENTS_DATA(self):
print('PYLENM DATA REQUIREMENTS:\nThe imported data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']")
def REQUIREMENTS_CONSTRUCTION_DATA(self):
print('PYLENM CONSTRUCTION REQUIREMENTS:\nThe imported construction data needs to meet ALL of the following conditions to have a successful import:')
print(' 1) Data should be a pandas dataframe.')
print(" 2) Data must have these column names: \n ['station_id', 'aquifer', 'well_use', 'latitude', 'longitude', 'ground_elevation', 'total_depth']")
# Helper function for plot_correlation
# Sorts analytes in a specific order: 'TRITIUM', 'URANIUM-238','IODINE-129','SPECIFIC CONDUCTANCE', 'PH', 'DEPTH_TO_WATER'
def __custom_analyte_sort(self, analytes):
my_order = 'TURISPDABCEFGHJKLMNOQVWXYZ-_abcdefghijklmnopqrstuvwxyz135790 2468'
return sorted(analytes, key=lambda word: [my_order.index(c) for c in word])
def __plotUpperHalf(self, *args, **kwargs):
corr_r = args[0].corr(args[1], 'pearson')
corr_text = f"{corr_r:2.2f}"
ax = plt.gca()
ax.set_axis_off()
marker_size = abs(corr_r) * 10000
ax.scatter([.5], [.5], marker_size, [corr_r], alpha=0.6, cmap="coolwarm",
vmin=-1, vmax=1, transform=ax.transAxes)
font_size = abs(corr_r) * 40 + 5
ax.annotate(corr_text, [.5, .48,], xycoords="axes fraction", # [.5, .48,]
ha='center', va='center', fontsize=font_size, fontweight='bold')
# Description:
# Removes all columns except 'COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'.
# If the user specifies additional columns in addition to the ones listed above, those columns will be kept.
# The function returns a dataframe and has an optional parameter to be able to save the dataframe to a csv file.
# Parameters:
# data (dataframe): data to simplify
# inplace (bool): save data to current working dataset
# columns (list of strings): list of any additional columns on top of ['COLLECTION_DATE', 'STATION_ID', 'ANALYTE_NAME', 'RESULT', and 'RESULT_UNITS'] to be kept in the dataframe.
# save_csv (bool): flag to determine whether or not to save the dataframe to a csv file.
# file_name (string): name of the csv file you want to save
# save_dir (string): name of the directory you want to save the csv file to
def simplify_data(self, data=None, inplace=False, columns=None, save_csv=False, file_name= 'data_simplified', save_dir='data/'):
if(str(type(data)).lower().find('dataframe') == -1):
data = self.data
else:
data = data
if(columns==None):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS']
else:
hasColumns = all(item in list(data.columns) for item in columns)
if(hasColumns):
sel_cols = ['COLLECTION_DATE','STATION_ID','ANALYTE_NAME','RESULT','RESULT_UNITS'] + columns
else:
print('ERROR: specified column(s) do not exist in the data')
return None
data = data[sel_cols]
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
data = data.sort_values(by="COLLECTION_DATE")
dup = data[data.duplicated(['COLLECTION_DATE', 'STATION_ID','ANALYTE_NAME', 'RESULT'])]
data = data.drop(dup.index)
data = data.reset_index().drop('index', axis=1)
if(save_csv):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
data.to_csv(save_dir + file_name + '.csv')
print('Successfully saved "' + file_name +'.csv" in ' + save_dir)
if(inplace):
self.setData(data, verbose=False)
return data
# Description:
# Returns the Maximum Concentration Limit value for the specified analyte.
# Example: 'TRITIUM' returns 1.3
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_MCL(self, analyte_name):
mcl_dictionary = {'TRITIUM': 1.3, 'URANIUM-238': 1.31, 'NITRATE-NITRITE AS NITROGEN': 1,
'TECHNETIUM-99': 2.95, 'IODINE-129': 0, 'STRONTIUM-90': 0.9
}
return mcl_dictionary[analyte_name]
def __set_units(self):
analytes = list(np.unique(self.data[['ANALYTE_NAME']]))
mask1 = ~self.data[['ANALYTE_NAME','RESULT_UNITS']].duplicated()
res = self.data[['ANALYTE_NAME','RESULT_UNITS']][mask1]
mask2 = ~self.data[['ANALYTE_NAME']].duplicated()
res = res[mask2]
unit_dictionary = pd.Series(res.RESULT_UNITS.values,index=res.ANALYTE_NAME).to_dict()
self.unit_dictionary = unit_dictionary
# Description:
# Returns the unit of the analyte you specify.
# Example: 'DEPTH_TO_WATER' returns 'ft'
# Parameters:
# analyte_name (string): name of the analyte to be processed
def get_unit(self, analyte_name):
return self.unit_dictionary[analyte_name]
# Description:
# Filters construction data based on one column. You only specify ONE column to filter by, but can selected MANY values for the entry.
# Parameters:
# data (dataframe): dataframe to filter
# col (string): column to filter. Example: col='STATION_ID'
# equals (list of strings): values to filter col by. Examples: equals=['FAI001A', 'FAI001B']
def filter_by_column(self, data=None, col=None, equals=[]):
if(data is None):
return 'ERROR: DataFrame was not provided to this function.'
else:
if(str(type(data)).lower().find('dataframe') == -1):
return 'ERROR: Data provided is not a pandas DataFrame.'
else:
data = data
# DATA VALIDATION
if(col==None):
return 'ERROR: Specify a column name to filter by.'
data_cols = list(data.columns)
if((col in data_cols)==False): # Make sure column name exists
return 'Error: Column name "{}" does not exist'.format(col)
if(equals==[]):
return 'ERROR: Specify a value that "{}" should equal to'.format(col)
data_val = list(data[col])
for value in equals:
if((value in data_val)==False):
return 'ERROR: No value equal to "{}" in "{}".'.format(value, col)
# QUERY
final_data = pd.DataFrame()
for value in equals:
current_data = data[data[col]==value]
final_data = pd.concat([final_data, current_data])
return final_data
# Description:
# Returns a list of the well names filtered by the unit(s) specified.
# Parameters:
# units (list of strings): Letter of the well to be filtered (e.g. [‘A’] or [‘A’, ‘D’])
def filter_wells(self, units):
data = self.data
if(units==None):
units= ['A', 'B', 'C', 'D']
def getUnits():
wells = list(np.unique(data.STATION_ID))
wells = pd.DataFrame(wells, columns=['STATION_ID'])
for index, row in wells.iterrows():
mo = re.match('.+([0-9])[^0-9]*$', row.STATION_ID)
last_index = mo.start(1)
wells.at[index, 'unit'] = row.STATION_ID[last_index+1:]
u = wells.unit.iloc[index]
if(len(u)==0): # if has no letter, use D
wells.at[index, 'unit'] = 'D'
if(len(u)>1): # if has more than 1 letter, remove the extra letter
if(u.find('R')>0):
wells.at[index, 'unit'] = u[:-1]
else:
wells.at[index, 'unit'] = u[1:]
u = wells.unit.iloc[index]
if(u=='A' or u=='B' or u=='C' or u=='D'):
pass
else:
wells.at[index, 'unit'] = 'D'
return wells
df = getUnits()
res = df.loc[df.unit.isin(units)]
return list(res.STATION_ID)
# Description:
# Removes outliers from a dataframe based on the z_scores and returns the new dataframe.
# Parameters:
# data (dataframe): data for the outliers to removed from
# z_threshold (float): z_score threshold to eliminate.
def remove_outliers(self, data, z_threshold=4):
z = np.abs(stats.zscore(data))
row_loc = np.unique(np.where(z > z_threshold)[0])
data = data.drop(data.index[row_loc])
return data
# Description:
# Returns a csv file saved to save_dir with details pertaining to the specified analyte.
# Details include the well names, the date ranges and the number of unique samples.
# Parameters:
# analyte_name (string): name of the analyte to be processed
# save_dir (string): name of the directory you want to save the csv file to
def get_analyte_details(self, analyte_name, filter=False, col=None, equals=[], save_to_file = False, save_dir='analyte_details'):
data = self.data
data = data[data.ANALYTE_NAME == analyte_name].reset_index().drop('index', axis=1)
data = data[~data.RESULT.isna()]
data = data.drop(['ANALYTE_NAME', 'RESULT', 'RESULT_UNITS'], axis=1)
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
info = []
wells = np.unique(data.STATION_ID.values)
for well in wells:
current = data[data.STATION_ID == well]
startDate = current.COLLECTION_DATE.min().date()
endDate = current.COLLECTION_DATE.max().date()
numSamples = current.duplicated().value_counts()[0]
info.append({'Well Name': well, 'Start Date': startDate, 'End Date': endDate,
'Date Range (days)': endDate-startDate ,
'Unique samples': numSamples})
details = pd.DataFrame(info)
details.index = details['Well Name']
details = details.drop('Well Name', axis=1)
details = details.sort_values(by=['Start Date', 'End Date'])
details['Date Range (days)'] = (details['Date Range (days)']/ np.timedelta64(1, 'D')).astype(int)
if(save_to_file):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
details.to_csv(save_dir + '/' + analyte_name + '_details.csv')
return details
# Description:
# Returns a dataframe with a summary of the data for certain analytes.
# Summary includes the date ranges and the number of unique samples and other statistics for the analyte results.
# Parameters:
# analytes (list of strings): list of analyte names to be processed. If left empty, a list of all the analytes in the data will be used.
# sort_by (string): {‘date’, ‘samples’, ‘wells’} sorts the data by either the dates by entering: ‘date’, the samples by entering: ‘samples’, or by unique well locations by entering ‘wells’.
# ascending (bool): flag to sort in ascending order.
def get_data_summary(self, analytes=None, sort_by='date', ascending=False, filter=False, col=None, equals=[]):
data = self.data
if(analytes == None):
analytes = data.ANALYTE_NAME.unique()
data = data.loc[data.ANALYTE_NAME.isin(analytes)].drop(['RESULT_UNITS'], axis=1)
data = data[~data.duplicated()] # remove duplicates
data.COLLECTION_DATE = pd.to_datetime(data.COLLECTION_DATE)
data = data[~data.RESULT.isna()]
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
info = []
for analyte_name in analytes:
query = data[data.ANALYTE_NAME == analyte_name]
startDate = min(query.COLLECTION_DATE)
endDate = max(query.COLLECTION_DATE)
numSamples = query.shape[0]
wellCount = len(query.STATION_ID.unique())
stats = query.RESULT.describe().drop('count', axis=0)
stats = pd.DataFrame(stats).T
stats_col = [x for x in stats.columns]
result = {'Analyte Name': analyte_name, 'Start Date': startDate, 'End Date': endDate,
'Date Range (days)':endDate-startDate, '# unique wells': wellCount,'# samples': numSamples,
'Unit': self.get_unit(analyte_name) }
for num in range(len(stats_col)):
result[stats_col[num]] = stats.iloc[0][num]
info.append(result)
details = pd.DataFrame(info)
details.index = details['Analyte Name']
details = details.drop('Analyte Name', axis=1)
if(sort_by.lower() == 'date'):
details = details.sort_values(by=['Start Date', 'End Date', 'Date Range (days)'], ascending=ascending)
elif(sort_by.lower() == 'samples'):
details = details.sort_values(by=['# samples'], ascending=ascending)
elif(sort_by.lower() == 'wells'):
details = details.sort_values(by=['# unique wells'], ascending=ascending)
return details
# Description:
# Displays the analyte names available at given well locations.
# Parameters:
# well_name (string): name of the well. If left empty, all wells are returned.
# filter (bool): flag to indicate filtering
# col (string): column to filter results
# equals (list of strings): value to match column name. Multiple values are accepted.
def get_well_analytes(self, well_name=None, filter=False, col=None, equals=[]):
data = self.data
bb = "\033[1m"
be = "\033[0m"
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(data.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
data = data[data['STATION_ID'].isin(intersect_wells)]
if(well_name==None):
wells = list(data.STATION_ID.unique())
else:
wells = [well_name]
for well in wells:
print("{}{}{}".format(bb,str(well), be))
analytes = sorted(list(data[data.STATION_ID==well].ANALYTE_NAME.unique()))
print(str(analytes) +'\n')
# Description:
# Filters data by passing the data and specifying the well_name and analyte_name
# Parameters:
# well_name (string): name of the well to be processed
# analyte_name (string): name of the analyte to be processed
def query_data(self, well_name, analyte_name):
data = self.data
query = data[data.STATION_ID == well_name]
query = query[query.ANALYTE_NAME == analyte_name]
if(query.shape[0] == 0):
return 0
else:
return query
# Description:
# Plot concentrations over time of a specified well and analyte with a smoothed curve on interpolated data points.
# Parameters:
# well_name (string): name of the well to be processed
# analyte_name (string): name of the analyte to be processed
# log_transform (bool): choose whether or not the data should be transformed to log base 10 values
# alpha (int): value between 0 and 10 for line smoothing
# year_interval (int): plot by how many years to appear in the axis e.g.(1 = every year, 5 = every 5 years, ...)
# plot_inline (bool): choose whether or not to show plot inline
# save_dir (string): name of the directory you want to save the plot to
def plot_data(self, well_name, analyte_name, log_transform=True, alpha=0,
plot_inline=True, year_interval=2, x_label='Years', y_label='', save_dir='plot_data', filter=False, col=None, equals=[]):
# Gets appropriate data (well_name and analyte_name)
query = self.query_data(well_name, analyte_name)
query = self.simplify_data(data=query)
if(type(query)==int and query == 0):
return 'No results found for {} and {}'.format(well_name, analyte_name)
else:
if(filter):
filter_res = self.filter_by_column(data=self.construction_data, col=col, equals=equals)
if('ERROR:' in str(filter_res)):
return filter_res
query_wells = list(query.STATION_ID.unique())
filter_wells = list(filter_res.index.unique())
intersect_wells = list(set(query_wells) & set(filter_wells))
if(len(intersect_wells)<=0):
return 'ERROR: No results for this query with the specifed filter parameters.'
query = query[query['STATION_ID'].isin(intersect_wells)]
x_data = query.COLLECTION_DATE
x_data = pd.to_datetime(x_data)
y_data = query.RESULT
if(log_transform):
y_data = np.log10(y_data)
# Remove any NaN as a result of the log transformation
nans = ~np.isnan(y_data)
x_data = x_data[nans]
y_data = y_data[nans]
x_RR = x_data.astype(int).to_numpy()
# Remove any duplicate dates
unique = ~pd.Series(x_data).duplicated()
x_data = x_data[unique]
y_data = y_data[unique]
unique = ~pd.Series(y_data).duplicated()
x_data = x_data[unique]
y_data = y_data[unique]
x_RR = x_data.astype(int).to_numpy()
nu = x_data.shape[0]
result = None
while result is None:
if(nu < 5):
return 'ERROR: Could not plot {}, {}'.format(well_name, analyte_name)
break
nu = nu - 1
x_data = x_data[:nu]
x_RR = x_RR[:nu]
y_data = y_data[:nu]
try:
# fit the supersmoother model
model = SuperSmoother(alpha=alpha)
model.fit(x_RR, y_data)
y_pred = model.predict(x_RR)
r = model.cv_residuals()
out = abs(r) > 2.2*np.std(r)
out_x = x_data[out]
out_y = y_data[out]
plt.figure(figsize=(8,8))
ax = plt.axes()
years = mdates.YearLocator(year_interval) # every year
months = mdates.MonthLocator() # every month
yearsFmt = mdates.DateFormatter('%Y')
for label in ax.get_xticklabels():
label.set_rotation(30)
label.set_horizontalalignment('center')
ax = plt.gca()
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.autoscale_view()
unit = query.RESULT_UNITS.values[0]
ax.set_title(well_name + ' - ' + analyte_name, fontweight='bold')
ttl = ax.title
ttl.set_position([.5, 1.05])
if(y_label==''):
if(log_transform):
ax.set_ylabel('log-Concentration (' + unit + ')')
else:
ax.set_ylabel('Concentration (' + unit + ')')
else:
ax.set_ylabel(y_label)
ax.set_xlabel(x_label)
small_fontSize = 15
large_fontSize = 20
plt.rc('axes', titlesize=large_fontSize)
plt.rc('axes', labelsize=large_fontSize)
plt.rc('legend', fontsize=small_fontSize)
plt.rc('xtick', labelsize=small_fontSize)
plt.rc('ytick', labelsize=small_fontSize)
ax.plot(x_data, y_data, ls='', marker='o', ms=5, color='black', alpha=1)
ax.plot(x_data, y_pred, ls='-', marker='', ms=5, color='black', alpha=0.5, label="Super Smoother")
ax.plot(out_x , out_y, ls='', marker='o', ms=5, color='red', alpha=1, label="Outliers")
ax.legend(bbox_to_anchor=(1.04, 1), loc='upper left', borderaxespad=0.)
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.05, 0.85, 'Samples: {}'.format(nu), transform=ax.transAxes,
fontsize=small_fontSize,
fontweight='bold',
verticalalignment='top',
bbox=props)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig(save_dir + '/' + well_name + '-' + analyte_name +'.png', bbox_inches="tight")
if(plot_inline):
plt.show()
plt.clf()
plt.cla()
plt.close()
result = 1
except:
pass
# Description:
# Plot concentrations over time for every well and analyte with a smoothed curve on interpolated data points.
# Parameters:
# log_transform (bool): choose whether or not the data should be transformed to log base 10 values
# alpha (int): value between 0 and 10 for line smoothing
# year_interval (int): plot by how many years to appear in the axis e.g.(1 = every year, 5 = every 5 years, ...)
# plot_inline (bool): choose whether or not to show plot inline
# save_dir (string): name of the directory you want to save the plot to
def plot_all_data(self, log_transform=True, alpha=0, year_interval=2, plot_inline=True, save_dir='plot_data'):
analytes = ['TRITIUM','URANIUM-238','IODINE-129','SPECIFIC CONDUCTANCE', 'PH', 'DEPTH_TO_WATER']
wells = np.array(data.STATION_ID.values)
wells = np.unique(wells)
success = 0
errors = 0
for well in wells:
for analyte in analytes:
plot = self.plot_data(well, analyte,
log_transform=log_transform,
alpha=alpha,
year_interval=year_interval,
plot_inline=plot_inline,
save_dir=save_dir)
if 'ERROR:' in str(plot):
errors = errors + 1
else:
success = success + 1
print("Success: ", success)
print("Errors: ", errors)
# Description:
# Plots a heatmap of the correlations of the important analytes over time for a specified well.
# Parameters:
# well_name (string): name of the well to be processed
# show_symmetry (bool): choose whether or not the heatmap should show the same information twice over the diagonal
# color (bool): choose whether or not the plot should be in color or in greyscale
# save_dir (string): name of the directory you want to save the plot to
def plot_correlation_heatmap(self, well_name, show_symmetry=True, color=True, save_dir='plot_correlation_heatmap'):
data = self.data
query = data[data.STATION_ID == well_name]
a = list(np.unique(query.ANALYTE_NAME.values))
b = ['TRITIUM','IODINE-129','SPECIFIC CONDUCTANCE', 'PH','URANIUM-238', 'DEPTH_TO_WATER']
analytes = self.__custom_analyte_sort(list(set(a) and set(b)))
query = query.loc[query.ANALYTE_NAME.isin(analytes)]
analytes = self.__custom_analyte_sort(np.unique(query.ANALYTE_NAME.values))
x = query[['COLLECTION_DATE', 'ANALYTE_NAME']]
unique = ~x.duplicated()
query = query[unique]
piv = query.reset_index().pivot(index='COLLECTION_DATE',columns='ANALYTE_NAME', values='RESULT')
piv = piv[analytes]
totalSamples = piv.shape[0]
piv = piv.dropna()
samples = piv.shape[0]
if(samples < 5):
return 'ERROR: {} does not have enough samples to plot.'.format(well_name)
else:
scaler = StandardScaler()
pivScaled = scaler.fit_transform(piv)
pivScaled = pd.DataFrame(pivScaled, columns=piv.columns)
pivScaled.index = piv.index
corr_matrix = pivScaled.corr()
if(show_symmetry):
mask = None
else:
mask = np.triu(corr_matrix)
if(color):
cmap = 'RdBu'
else:
cmap = 'binary'
fig, ax = plt.subplots(figsize=(8,6))
ax.set_title(well_name + '_correlation', fontweight='bold')
ttl = ax.title
ttl.set_position([.5, 1.05])
props = dict(boxstyle='round', facecolor='grey', alpha=0.15)
ax.text(1.3, 1.05, 'Start date: {}\nEnd date: {}\n\nSamples: {} of {}'.format(piv.index[0], piv.index[-1], samples, totalSamples), transform=ax.transAxes, fontsize=15, fontweight='bold', verticalalignment='bottom', bbox=props)
ax = sns.heatmap(corr_matrix,
ax=ax,
mask=mask,
vmin=-1, vmax=1,
xticklabels=corr_matrix.columns,
yticklabels=corr_matrix.columns,
cmap=cmap,
annot=True,
linewidths=1,
cbar_kws={'orientation': 'vertical'})
if not os.path.exists(save_dir):
os.makedirs(save_dir)
fig.savefig(save_dir + '/' + well_name + '_correlation.png', bbox_inches="tight")
# Description:
# Plots a heatmap of the correlations of the important analytes over time for each well in the dataset.
# Parameters:
# show_symmetry (bool): choose whether or not the heatmap should show the same information twice over the diagonal
# color (bool): choose whether or not the plot should be in color or in greyscale
# save_dir (string): name of the directory you want to save the plot to
def plot_all_correlation_heatmap(self, show_symmetry=True, color=True, save_dir='plot_correlation_heatmap'):
data = self.data
wells =
|
np.array(data.STATION_ID.values)
|
numpy.array
|
#!/usr/bin/env python
import os, sys, re, copy
import numpy as np
try:
import indigo
except:
pass
import aqml.cheminfo as co
import aqml.cheminfo.graph as cg
import networkx as nx
import aqml.cheminfo.molecule.geometry as cmg
from aqml.cheminfo.molecule.elements import Elements
#__all__ = [ 'rawmol_indigo' ]
T,F = True,False
class Graph(object):
def __init__(self, g):
g1 = (g > 0).astype(np.int)
np.fill_diagonal(g1, 0)
self.g1 = g1
self.nb = g1.sum()/2 # num of bonds (i.e., edges)
self.bonds = [ list(edge) for edge in \
np.array( list( np.where(np.triu(g)>0) ) ).T ]
self.gnx = nx.from_numpy_matrix(g1)
@property
def is_connected(self):
if not hasattr(self, '_ic'):
self._ic = nx.is_connected(self.gnx)
return self._ic
def get_pls(self):
""" calc shortest path lengths """
_na = g.shape[0] # number_of_nodes
pls = -1 * np.ones((_na, _na))
np.fill_diagonal(pls,[0]*_na)
for i in range(_na):
for j in range(i+1,_na):
if nx.has_path(self.gnx,i,j):
pls[i,j]=pls[j,i]=nx.shortest_path_length(self.gnx,i,j)
return pls
@property
def pls(self):
if not hasattr(self, '_pls'):
self._pls = self.get_pls()
return self._pls
def get_shortest_path(self, i, j):
return list( nx.shortest_path(self.gnx, i, j) )
def get_paths(self, i, j):
""" return shortest paths connecting tow nodes i & j """
paths = []
for p in nx.all_shortest_paths(self.gnx, source=i, target=j):
paths += list(p)
return paths
class RawMol(Graph):
def __init__(self, obj, ivdw=False, scale=1.0, iprt=F):
""" initialize a molecule"""
if isinstance(obj,(tuple,list)):
zs, coords = obj
else:
sa = obj.__str__()
if ('atoms' in sa) or ('molecule' in sa):
zs, coords = obj.zs, obj.coords
else:
raise Exception('#ERROR: input obj not supported')
self.obj = obj
self.iprt = iprt
self.scale = scale
self._coords = np.array(coords)
self._zs = np.array(zs, np.int)
self.symbols = [ co.chemical_symbols[zi] for zi in zs ]
self._na = len(zs)
self._ias = np.arange(self._na)
self.pt = Elements( list(zs) )
self.cns0 = np.array([ co.cnsr[zi] for zi in self._zs ], dtype=int)
self.connect()
#if (not self.is_connected) or ivdw:
# self.connect_vdw_inter(scale=scale)
def connect(self):
"""
establish connectivity between atoms from geometry __ONLY__
"""
ps = self._coords
rs = self.pt.rcs
rmax = rs.max()
ds = np.sqrt((np.square(ps[:,np.newaxis]-ps).sum(axis=2)))
self.ds = ds
ds2 = ds * ds
rs1, rs2 = np.meshgrid(rs,rs)
ds2r = (rs1 + rs2 + 0.45)**2
# step 1) get a preliminary connectivity table g
g0 = np.logical_and( ds2 > 0.16, ds2 <= ds2r )
self.g0 = np.array(g0,np.int)
g = g0.astype(np.int)
cns = g.sum(axis=0)
#print('cns=',cns)
# step 2) refine the g
maxnbs = self.pt.maxnbs
for ia in range(self._na):
zi = self._zs[ia]
if zi == 1:
if g[ia].sum() > 1:
jas = self._ias[g[ia]>0]
if 1 in self._zs[jas]:
ja = jas[ self._zs[jas]==1 ]
else:
# remove the longer bond
ds2i = ds2[ia,jas]
maxd = ds2i.max()
ja = jas[ds2i==maxd]
g[ia,ja] = 0
g[ja,ia] = 0
cns = g.sum(axis=0)
else:
if cns[ia] == 1: continue
while 1:
gg = cmg.GraphGeometry(self.obj, g)
mbs3 = gg.get_angles([ia], 'degree')
angs = mbs3.values()
#print('angs=', angs)
angmin = 180.0 if len(angs) == 0 else np.min([ min(angs_i) for angs_i in angs ])
# for "C1=C2CN=C1NC2", min(angs) can reach 46.0 degree
#print( 'angmin=',angmin, angs)
if (cns[ia] > maxnbs[ia] or angmin < 45): # 50.0):
#some bond exceeds max valence
#now remove the bond with max bond length
jas = self._ias[g[ia]>0]
dsj = ds[ia,jas]
ja = jas[dsj==np.max(dsj)][0]
g[ia,ja] = g[ja,ia] = 0
cns = g.sum(axis=0)
#print ' * ia,ja = ', ia,ja
#print ia, zi, cns[ia],maxnbs[ia], np.concatenate(angs).min()
#assert cns[ia] <= maxnbs[ia], '#ERROR: still more valence than allowed'
else:
break
self.g = g
#gnx = nx.from_numpy_matrix(g)
#self.gnx = gnx
#self.is_connected = nx.is_connected(gnx)
Graph.__init__(self, g)
@property
def nscu(self):
if not hasattr(self, '_nscu'):
self._nscu = self.get_nscu()
return self._nscu
def get_nscu(self):
cns = self.g.sum(axis=0)
zs = self._zs
scus = {}
ias2 = self._ias[ np.logical_and(cns==2, zs==6) ] # now ...=C=C=...
g1 = self.g[ias2][:,ias2]
clqs = []
if len(g1)>0:
raise Exception('Todo: cannot distinguish between -C#C-C#C- and >C=C=C=C< ...')
cns1 = g1.sum(axis=0)
for cq in cg.Graph(g1).find_cliques():
ias_end = ias2[cns1==1]
ias_others = ias2[cns1>1]
for ie in ias_end:
jas = []
vis1 = set() # visited nodes
ias1 = self._ias[ np.logical_and(cns==1, zs>1) ]
for ia1 in ias1:
if ia1 in vis1: continue
z1 = zs[ia1]
jas = self._ias[self.g[ia1]>0]
if z1 in [8,16,34,52]:
assert len(jas)==1
ja = jas[0]
nnbr1 = self.g[ja,ias1].sum()
#nbrsj = self._ias[self.g[ja]>0]
if nnbr1 == 1:
t = [ co.chemical_symbols[zi] for zi in [zs[ia1],zs[ja]] ]
seti = [ia1,ja]
scu = '{s[0]}={s[1]}'.format(s=t)
elif nnbr1 == 2:
nbrsj = ias1[ self.g[ja,ias1]>0 ]
seti = [ja, nbrsj[0], nbrsj[1]]
zsj = zs[nbrsj]; zsj.sort()
zsi = [zsj[0], zs[ja], zsj[1]]
t = [ co.chemical_symbols[zi] for zi in zsi ]
scu = '{s[0]}={s[1]}={s[2]}'.format(s=t)
else:
raise Exception('Todo... Possibly ClO4 group?')
elif z1 in [6,]: # -[N+]#[C-]
assert len(jas)==1
seti = [ia1, jas[0]]
scu = '[{}+]#[C-]'.format( co.chemical_symbols[zs[jas[0]]] )
elif z1 in [1,9,17,35,53]:
seti = [ia1]
scu = co.chemical_symbols[z1]
else:
raise Exception('#ERROR: what the hell happend?')
vis1.update(seti)
## now update counts
if scu not in scus:
scus[scu] = [ set(seti) ]
else:
scus[scu].append( set(seti) )
iasr = np.setdiff1d(self._ias, list(vis1))
for ia in iasr:
scu = '%s%d'%( co.chemical_symbols[zs[ia]], cns[ia] )
seti = [ia]
if scu not in scus:
scus[scu] = [set(seti)]
else:
scus[scu].append( set(seti) )
return scus
def get_fragments(self):
mols = []
if self.is_connected:
mols = [ co.atoms(self._zs, self._coords) ]
else:
for sg in nx.connected_component_subgraphs(self.gnx):
idx = list(sg.nodes())
mols.append( co.atoms(self._zs[idx], self._coords[idx]) )
return mols
@property
def fragments(self):
if not hasattr(self, '_fragments'):
self._fragments = self.get_fragments()
return self._fragments
def connect_vdw_inter(self): #,scale=1.0):
"""
add vdw bond between standalone submols in the system
"""
g2 =
|
np.zeros((self._na,self._na))
|
numpy.zeros
|
#!/usr/bin/env python3
try:
import matplotlib
matplotlib.verbose = True
# matplotlib.use('Gtk3Agg')
# matplotlib.use('QtAgg')
# matplotlib.use('TkAgg')
# matplotlib.use('Agg')
import matplotlib.pyplot as plt
# plt.switch_backend('Agg')
# print(matplotlib.get_backend())
except:
pass
import os, sys, time, \
random, \
subprocess, glob, re, \
numpy as np, \
multiprocessing as mp, \
logging, \
collections, \
functools, signal, shutil
from os import path as osp
from IPython import embed
from easydict import EasyDict as edict
import cv2, cvbase as cvb, copy, pandas as pd, math
import collections, h5py
from sklearn.preprocessing import normalize
from scipy.spatial.distance import cdist
# import redis, networkx as nx, \
# yaml, subprocess, pprint, json, \
# csv, argparse, string, colorlog, \
# shutil, itertools,pathlib,
# from IPython import embed
# from tensorboardX import SummaryWriter
glvars = {}
root_path = osp.normpath(
osp.join(osp.abspath(osp.dirname(__file__)), )
) + '/'
home_path = os.environ['HOME'] + '/'
work_path = home_path + '/work/'
share_path = '/data1/share/'
share_path3 = '/home/share/'
share_path2 = '/data2/share/'
sys.path.insert(0, root_path)
os.environ.setdefault('log', '1')
os.environ.setdefault('pytorch', '1')
os.environ.setdefault('tensorflow', '0')
os.environ.setdefault('chainer', '0')
# os.environ['MXNET_CPU_WORKER_NTHREADS'] = '3'
# os.environ['MXNET_ENGINE_TYPE'] = 'ThreadedEnginePerDevice'
timer = cvb.Timer()
stream_handler = None
def set_stream_logger(log_level=logging.INFO):
# return None
global stream_handler
import colorlog
sh = colorlog.StreamHandler()
sh.setLevel(log_level)
sh.setFormatter(
colorlog.ColoredFormatter(
' %(asctime)s %(filename)s [line:%(lineno)d] %(log_color)s%(levelname)s%(reset)s %(message)s'))
# if stream_handler is not None:
# logging.root.removeHandler(stream_handler)
logging.root.addHandler(sh)
return sh
file_hander = None
def set_file_logger(work_dir=None, log_level=logging.INFO):
# return None
global file_hander
work_dir = work_dir or root_path
if not osp.exists(work_dir):
os.system(f"mkdir -p '{work_dir}'")
fh = logging.FileHandler(os.path.join(work_dir, 'log-ing'))
fh.setLevel(log_level)
fh.setFormatter(
logging.Formatter('%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s'))
# if file_hander is not None:
# logging.root.removeHandler(file_hander)
logging.root.addHandler(fh)
return fh
if os.environ.get('log', '0') == '1':
logging.root.setLevel(logging.INFO)
stream_handler = set_stream_logger(logging.INFO)
file_hander = set_file_logger(log_level=logging.INFO)
if os.environ.get('chainer', "1") == "1":
import chainer
from chainer import cuda
# xp = cuda.get_array_module( )
old_repr = chainer.Variable.__repr__
chainer.Variable.__str__ = lambda obj: (f'ch {tuple(obj.shape)} {obj.dtype} '
f'{old_repr(obj)} '
f'type: {obj.dtype} shape: {obj.shape} ch')
chainer.Variable.__repr__ = chainer.Variable.__str__
logging.info(f'import chainer {timer.since_last_check()}')
if os.environ.get('pytorch', "1") == "1":
tic = time.time()
# os.environ["MKL_NUM_THREADS"] = "4"
# os.environ["OMP_NUM_THREADS"] = "4"
os.environ["NCCL_DEBUG"] = "INFO"
# os.environ["NCCL_DEBUG_SUBSYS"] = "ALL"
import torch
import torchvision
import torch.utils.data
from torch import nn
import torch.nn.functional as F
old_repr = torch.Tensor.__repr__
torch.Tensor.__repr__ = lambda obj: (f'th {tuple(obj.shape)} {obj.type()} '
f'{old_repr(obj)} '
f'type: {obj.type()} shape: {obj.shape} th') if obj.is_contiguous() else (
f'{tuple(obj.shape)} {obj.type()} '
f'{old_repr(obj.contiguous())} '
f'type: {obj.type()} shape: {obj.shape}')
logging.info(f'import pytorch {time.time() - tic}')
def allow_growth():
import tensorflow as tf
oldinit = tf.Session.__init__
def myinit(session_object, target='', graph=None, config=None):
if config is None:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
oldinit(session_object, target, graph, config)
tf.Session.__init__ = myinit
return oldinit
if os.environ.get('tensorflow', '0') == '1':
tic = time.time()
import tensorflow as tf
# import tensorflow.contrib
oldinit = allow_growth()
print('import tf', time.time() - tic)
'''
%load_ext autoreload
%autoreload 2
%matplotlib inline
import matplotlib
matplotlib.style.use('ggplot')
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
'''
# torch.set_default_tensor_type(torch.cuda.DoubleTensor)
# ori_np_err = np.seterr(all='raise') # 1/100000=0 will be error
## ndarray will be pretty
np.set_string_function(lambda arr: f'np {arr.shape} {arr.dtype} '
f'{arr.__str__()} '
f'dtype:{arr.dtype} shape:{arr.shape} np', repr=True)
## print(ndarray) will be pretty (and pycharm dbg)
# np.set_string_function(lambda arr: f'np {arr.shape} {arr.dtype} \n'
# f'{arr.__repr__()} \n'
# f'dtype:{arr.dtype} shape:{arr.shape} np', repr=False)
## fail
# old_np_repr = np.ndarray.__repr__
# np.ndarray.__repr__ = lambda arr: (f'{arr.shape} {arr.dtype} \n'
# f'{old_np_repr(arr)} \n'
# f'dtype:{arr.dtype} shape:{arr.shape}')
logging.info('import lz')
def simplify_conf(conf):
conf2 = {k: v for k, v in conf.items() if not isinstance(v, (dict, np.ndarray))}
logging.info(f'training conf is {conf2}')
return conf2
def swa_bn_update(loader, model, device=0):
logging.info('update bn ')
def _check_bn_apply(module, flag):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
flag[0] = True
def _check_bn(model):
flag = [False]
model.apply(lambda module: _check_bn_apply(module, flag))
return flag[0]
def _reset_bn(module):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.running_mean = torch.zeros_like(module.running_mean)
module.running_var = torch.ones_like(module.running_var)
def _get_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
momenta[module] = module.momentum
def _set_momenta(module, momenta):
if issubclass(module.__class__, torch.nn.modules.batchnorm._BatchNorm):
module.momentum = momenta[module]
if not _check_bn(model):
return
was_training = model.training
model.train()
momenta = {}
model.apply(_reset_bn)
model.apply(lambda module: _get_momenta(module, momenta))
n = 0
for idx, input in enumerate(loader):
if idx % 999==9:
print(idx, len(loader))
if isinstance(input, (list, tuple)):
input = input[0]
if isinstance(input, (dict,)):
input =input['imgs']
b = input.size(0)
momentum = b / float(n + b)
for module in momenta.keys():
module.momentum = momentum
if device is not None:
input = input.to(device)
model(input)
n += b
model.apply(lambda module: _set_momenta(module, momenta))
model.train(was_training)
def init_dev(n=(0,)):
import os
import logging
if not isinstance(n, collections.Sequence):
n = (n,)
logging.info('use gpu {}'.format(n))
home = os.environ['HOME']
if isinstance(n, int) or n is None:
n = (n,)
devs = ''
for n_ in n:
devs += str(n_) + ','
devs = devs.strip(',')
os.environ["CUDA_VISIBLE_DEVICES"] = devs
set_env('PATH', home + '/local/cuda/bin')
set_env('LD_LIBRARY_PATH', home + '/local/cuda/lib64:' +
home + '/local/cuda/extras/CUPTI/lib64')
def set_env(key, value):
if key in os.environ:
os.environ[key] = value + ':' + os.environ[key]
else:
os.environ[key] = value
# todo occupy and release
def occupy(dev=range(8)):
import tensorflow as tf
init_dev(dev)
newinit = tf.Session.__init__
if 'oldinit' in locals():
tf.Session.__init__ = oldinit
var = tf.constant(1)
with tf.Session() as sess:
sess.run([var])
while True:
time.sleep(10)
# tf.Session.__init__ = newinit
# if something like Runtime Error : an illegal memory access was encountered occur
# os.environ['CUDA_LAUNCH_BLOCKING'] = "1"
'''
oldinit = Session.__init__
def myinit(session_object, target='', graph=None, config=None):
if config is None:
config = ConfigProto()
config.gpu_options.allow_growth = True
oldinit(session_object, target, graph, config)
Session.__init__ = myinit
'''
def allow_growth_conf():
import tensorflow as tf
_sess_config = tf.ConfigProto(allow_soft_placement=True)
_sess_config.gpu_options.allow_growth = True
return _sess_config
def allow_growth_sess():
import tensorflow as tf
tf_graph = tf.get_default_graph()
_sess_config = tf.ConfigProto(allow_soft_placement=True)
_sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=_sess_config, graph=tf_graph)
return sess
def allow_growth_keras():
import keras
keras.backend.set_session(allow_growth_sess())
def judgenan(x):
return not not torch.isnan(x).any().item() or not not torch.isinf(x).any().item()
def get_mem():
import psutil
while True:
try:
mem = psutil.virtual_memory()
break
except:
pass
free = mem.free / 1024 ** 3
available = mem.available / 1024 ** 3
return available
import gpustat
ndevs = len(gpustat.GPUStatCollection.new_query().gpus)
def get_gpu_mem(ind=0):
gpus = gpustat.GPUStatCollection.new_query().gpus
return gpus[ind].entry['memory.used'] / gpus[ind].entry['memory.total'] * 100
def get_utility(ind=0):
import gpustat
gpus = gpustat.GPUStatCollection.new_query().gpus
return gpus[ind].entry['utilization.gpu']
def show_dev():
res = []
for ind in range(ndevs):
mem = get_gpu_mem(ind)
print(ind, mem)
res.append(mem)
return res
def get_dev(n=1, ok=range(ndevs), mem_thresh=(0.1, 0.15), sleep=23.3): # 0.3: now occupy smaller than 0.3
if not isinstance(mem_thresh, collections.Sequence):
mem_thresh = (mem_thresh,)
def get_poss_dev():
mems = [get_gpu_mem(ind) for ind in ok]
inds, mems = cosort(ok, mems, return_val=True)
devs = [ind for ind, mem in zip(inds, mems) if mem < mem_thresh[0] * 100]
return devs
devs = get_poss_dev()
logging.info('Auto select gpu')
# gpustat.print_gpustat()
show_dev()
while len(devs) < n:
devs = get_poss_dev()
print('no enough device available')
# gpustat.print_gpustat()
show_dev()
sleep = int(sleep)
time.sleep(random.randint(max(0, sleep - 20), sleep + 20))
return devs[:n]
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
def cpu_priority(level=19):
import psutil
p = psutil.Process(os.getpid())
p.nice(level)
def cloud_normpath(path):
path = str(path)
path = osp.normpath(path)
path = path.replace('s3:/', 's3://')
return path
def mkdir_p(path, delete=True, verbose=True):
path = str(path)
if path == '':
return
if delete and osp.exists(path):
rm(path)
if not osp.exists(path):
os.makedirs(path, exist_ok=True)
path = cloud_normpath(path)
if not osp.exists(path):
os.makedirs(path, exist_ok=True)
class Logger(object):
def __init__(self, fpath=None, console=sys.stdout):
self.console = console
self.file = None
if fpath is not None:
mkdir_p(os.path.dirname(fpath), delete=False)
# rm(fpath)
self.file = open(fpath, 'a')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def set_file_logger_prt(path=root_path):
# todo
path = str(path) + '/'
# sys.stdout = Logger(path + 'log-prt')
# sys.stderr = Logger(path + 'log-prt-err')
if os.environ.get('log', '0') == '1':
set_file_logger_prt()
class Timer(object):
def __init__(self, print_tmpl=None, start=True, ):
self._is_running = False
self.print_tmpl = print_tmpl if print_tmpl else '{:.3f}'
if start:
self.start()
@property
def is_running(self):
"""bool: indicate whether the timer is running"""
return self._is_running
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
print(self.print_tmpl.format(self.since_last_check()))
self._is_running = False
def start(self):
"""Start the timer."""
if not self._is_running:
self._t_start = time.time()
self._is_running = True
self._t_last = time.time()
def since_start(self, aux=''):
"""Total time since the timer is started.
Returns(float): the time in seconds
"""
if not self._is_running:
raise ValueError('timer is not running')
self._t_last = time.time()
logging.info(f'{aux} time {self.print_tmpl.format(self._t_last - self._t_start)}')
return self._t_last - self._t_start
def since_last_check(self, aux='', verbose=True):
"""Time since the last checking.
Either :func:`since_start` or :func:`since_last_check` is a checking operation.
Returns(float): the time in seconds
"""
if not self._is_running:
raise ValueError('timer is not running')
dur = time.time() - self._t_last
self._t_last = time.time()
if verbose:
logging.info(f'{aux} time {self.print_tmpl.format(dur)}')
return dur
timer = Timer()
def get_md5(url):
if isinstance(url, str):
url = url.encode('utf-8')
import hashlib
m = hashlib.md5()
m.update(url)
return m.hexdigest()
def load_cfg(cfg_file):
from importlib import import_module
sys.path.append(osp.dirname(cfg_file))
module_name = osp.basename(cfg_file).rstrip('.py')
cfg = import_module(module_name)
return cfg
# Based on an original idea by https://gist.github.com/nonZero/2907502 and heavily modified.
class Uninterrupt(object):
"""
Use as:
with Uninterrupt() as u:
while not u.interrupted:
# train
"""
def __init__(self, sigs=(signal.SIGINT,), verbose=False):
self.sigs = sigs
self.verbose = verbose
self.interrupted = False
self.orig_handlers = None
def __enter__(self):
if self.orig_handlers is not None:
raise ValueError("Can only enter `Uninterrupt` once!")
self.interrupted = False
self.orig_handlers = [signal.getsignal(sig) for sig in self.sigs]
def handler(signum, frame):
self.release()
self.interrupted = True
if self.verbose:
print("Interruption scheduled...", )
for sig in self.sigs:
signal.signal(sig, handler)
return self
def __exit__(self, type_, value, tb):
self.release()
def release(self):
if self.orig_handlers is not None:
for sig, orig in zip(self.sigs, self.orig_handlers):
signal.signal(sig, orig)
self.orig_handlers = None
def mail(content, to_mail=('<EMAIL>',)):
import datetime, collections
user_passes = json_load(home_path + 'Dropbox/mail.json')
user_pass = user_passes[0]
time_str = datetime.datetime.now().strftime('%m-%d %H:%M')
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
s = smtplib.SMTP(host=user_pass['host'], port=user_pass['port'], timeout=10)
s.starttls()
s.login(user_pass['username'], user_pass['password'])
title = 'ps: ' + content.split('\r\n')[0]
title = title[:20]
content = time_str + '\r\n' + content
if isinstance(to_mail, collections.Sequence):
to_mail = ', '.join(to_mail)
msg = MIMEMultipart('alternative')
msg['Subject'] = title
msg['From'] = user_pass['username']
msg['To'] = to_mail
# msg['Cc'] = to_mail
msg.attach(MIMEText(content, 'plain'))
s.sendmail(msg['From'], msg['To'], msg.as_string())
s.quit()
def df2md(df1):
import tabulate
return tabulate.tabulate(df1, headers="keys", tablefmt="pipe")
def stat(arr):
def stat_np(array):
array = np.asarray(array)
return dict(zip(
['min', 'mean', 'median', 'max', 'shape'],
[np.min(array), np.mean(array), np.median(array), np.max(array), np.shape(array)]
))
def stat_th(tensor):
return dict(zip(
['min', 'mean', 'median', 'max', ],
[torch.min(tensor).item(), torch.mean(tensor).item(), torch.median(tensor).item(), torch.max(
tensor).item()]
))
if type(arr).__module__ == 'torch':
return stat_th(arr)
else:
return stat_np(arr)
def sel_np(A):
import json
dtype = str(A.dtype)
shape = A.shape
A = A.ravel().tolist()
sav = {'shape': shape, 'dtype': dtype,
'A': A}
return json.dumps(sav)
def desel_np(s):
import json
sav = json.loads(s)
A = sav['A']
A = np.array(A, dtype=sav['dtype']).reshape(sav['shape'])
return A
def to_image(arr):
from PIL import Image
if type(arr).__module__ == 'PIL.Image':
return arr
if type(arr).__module__ == 'numpy':
return Image.fromarray(arr)
def to_numpy(tensor):
import PIL
if isinstance(tensor, torch.autograd.Variable):
tensor = tensor.detach()
if torch.is_tensor(tensor):
if tensor.shape == ():
tensor = tensor.item()
tensor = np.asarray([tensor])
elif np.prod(tensor.shape) == 1:
tensor = tensor.item()
tensor = np.asarray([tensor])
else:
tensor = tensor.cpu().numpy()
tensor = np.asarray(tensor)
if type(tensor).__module__ == 'PIL.Image':
tensor = np.asarray(tensor)
# elif type(tensor).__module__ != 'numpy':
# raise ValueError("Cannot convert {} to numpy array"
# .format(type(tensor)))
return tensor
def to_torch(ndarray):
if ndarray is None:
return None
if isinstance(ndarray, collections.Sequence):
return [to_torch(ndarray_) for ndarray_ in ndarray if ndarray_ is not None]
# if isinstance(ndarray, torch.autograd.Variable):
# ndarray = ndarray.data
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def norm_np(tensor):
min, max = tensor.min(), tensor.max()
tensor += min
tensor /= (max - min)
tensor *= 255
return tensor
def norm_th(tensor):
min, max = tensor.min(), tensor.max()
return tensor.add_(min).div_(max - min)
def load_state_dict(model, state_dict, prefix='', de_prefix=''):
own_state = model.state_dict()
success = []
if prefix != '':
state_dict = {prefix + name: param for name, param in state_dict.items()}
elif de_prefix != '':
state_dict = {name.replace(de_prefix, ''): param for name, param in state_dict.items()}
for name, param in state_dict.items():
if name not in own_state:
print('ignore key "{}" in his state_dict'.format(name))
continue
if isinstance(param, nn.Parameter):
param = param.clone()
if own_state[name].size() == param.size():
own_state[name].copy_(param)
# print('{} {} is ok '.format(name, param.size()))
success.append(name)
else:
logging.error('dimension mismatch for param "{}", in the model are {}'
' and in the checkpoint are {}, ...'.format(
name, own_state[name].size(), param.size()))
missing = set(own_state.keys()) - set(success)
if len(missing) > 0:
print('missing keys in my state_dict: "{}"'.format(missing))
def grid_iter(*args):
import itertools
res = list(itertools.product(*args))
np.random.shuffle(res)
for arg in res:
if len(arg) == 1:
yield arg[0]
else:
yield arg
def cross_iter(*args):
start = [t[0] for t in args]
yield start
for ind, arg in enumerate(args):
if len(arg) > 1:
bak = start[ind]
for ar in arg[1:]:
start[ind] = ar
yield start
start[ind] = bak
def shuffle_iter(iter):
iter = list(iter)
np.random.shuffle(iter)
for iter_ in iter:
yield iter_
def optional_arg_decorator(fn):
def wrapped_decorator(*args):
if len(args) == 1 and callable(args[0]):
return fn(args[0])
else:
def real_decorator(decoratee):
return fn(decoratee, *args)
return real_decorator
return wrapped_decorator
def randomword(length=9, ):
import random
import string
return ''.join(random.choice(string.ascii_letters + string.digits + '_') for _ in range(length))
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
def cosort(ind, val, return_val=False):
ind = np.asarray(ind)
val = np.asarray(val)
comb = zip(ind, val)
comb_sorted = sorted(comb, key=lambda x: x[1])
if not return_val:
return np.array([comb_[0] for comb_ in comb_sorted])
else:
return np.array([comb_[0] for comb_ in comb_sorted]), np.array([comb_[1] for comb_ in
comb_sorted])
@optional_arg_decorator
def timeit(fn, info=''):
def wrapped_fn(*arg, **kwargs):
start = time.time()
res = fn(*arg, **kwargs)
diff = time.time() - start
logging.info((info + 'takes time {}').format(diff))
return res
return wrapped_fn
class Database(object):
def __init__(self, file, mode='a'):
import h5py
if mode == 'r':
try:
self.fid = h5py.File(file, mode)
except OSError as inst:
logging.error(f'{inst}')
cp(file, file + f'.{randomword()}')
self.fid = h5py.File(file, mode)
else:
self.fid = h5py.File(file, mode)
# rm(file)
# self.fid = h5py.File(file, 'w')
# logging.error(f'{file} is delete and write !!')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.fid.close()
def __getitem__(self, keys):
if isinstance(keys, (tuple, list)):
return [self._get_single_item(k) for k in keys]
return self._get_single_item(keys)
def _get_single_item(self, key):
return np.asarray(self.fid[key])
def __setitem__(self, key, value):
value = np.asarray(value)
if key in self.fid:
if self.fid[key].shape == value.shape and \
self.fid[key].dtype == value.dtype:
logging.debug('shape type same, old is updated, {} {} '.format(value, np.count_nonzero(value == -1))
)
self.fid[key][...] = value
else:
logging.debug('old shape {} new shape {} updated'.format(
self.fid[key].shape, value.shape))
del self.fid[key]
self.fid.create_dataset(key, data=value)
else:
self.fid.create_dataset(key, data=value)
def __delitem__(self, key):
del self.fid[key]
def __len__(self):
return len(self.fid)
def __iter__(self):
return iter(self.fid)
def flush(self):
self.fid.flush()
def close(self):
self.flush()
self.fid.close()
def keys(self):
return self.fid.keys()
def pickle_dump(data, file, **kwargs):
import pickle, pathlib
# python2 can read 2
kwargs.setdefault('protocol', pickle.HIGHEST_PROTOCOL)
if isinstance(file, str) or isinstance(file, pathlib.Path):
mkdir_p(osp.dirname(file), delete=False)
print('pickle into', file)
with open(file, 'wb') as f:
pickle.dump(data, f, **kwargs)
elif hasattr(file, 'write'):
pickle.dump(data, file, **kwargs)
else:
raise TypeError("file must be str of file-object")
def get_img_size(img='/data1/xinglu/prj/test.jpg', verbose=True):
try:
out, err = shell(f'convert "{img}" -print "(%w, %h)" ', verbose=verbose)
out = eval(out)
out = (out[1], out[0])
except:
out = cv2.imread(img).shape[:2]
return out
def pickle_load(file, **kwargs):
import pickle
if isinstance(file, str):
with open(file, 'rb') as f:
data = pickle.load(f, **kwargs)
elif hasattr(file, 'read'):
data = pickle.load(file, **kwargs)
return data
def df_dump(df, path, name='df'):
df.to_hdf(path, name, mode='w')
def df_load(path, name='df'):
import pandas as pd
return pd.read_hdf(path, name)
import struct
cv_type_to_dtype = {
5: np.dtype('float32'),
7: np.dtype('float16')
}
dtype_to_cv_type = {v: k for k, v in cv_type_to_dtype.items()}
def read_mat(f):
"""
Reads an OpenCV mat from the given file opened in binary mode
"""
rows, cols, stride, type_ = struct.unpack('iiii', f.read(4 * 4))
mat = np.fromstring(f.read(rows * stride), dtype=cv_type_to_dtype[type_])
return mat.reshape(rows, cols)
def load_mat(filename):
"""
Reads a OpenCV Mat from the given filename
"""
return read_mat(open(filename, 'rb'))
def write_mat(f, m):
"""Write mat m to file f"""
import struct
if len(m.shape) == 1:
rows = m.shape[0]
cols = 1
else:
rows, cols = m.shape
header = struct.pack('iiii', rows, cols, cols * 4, dtype_to_cv_type[m.dtype])
f.write(header)
f.write(m.data)
def save_mat(filename, m):
"""Saves mat m to the given filename"""
return write_mat(open(filename, 'wb'), m)
def yaml_load(file, **kwargs):
from yaml import Loader
import yaml
kwargs.setdefault('Loader', Loader)
if isinstance(file, str):
with open(file, 'r') as f:
obj = yaml.load(f, **kwargs)
elif hasattr(file, 'read'):
obj = yaml.load(file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
return obj
def yaml_dump(obj, file=None, **kwargs):
import yaml
from yaml import Dumper
kwargs.setdefault('Dumper', Dumper)
if file is None:
return yaml.dump(obj, **kwargs)
elif isinstance(file, str):
with open(file, 'w') as f:
yaml.dump(obj, f, **kwargs)
elif hasattr(file, 'write'):
yaml.dump(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
# torch.nn.utils.clip_grad_value_(self.model.parameters(), 5)
def clip_grad_value_(parameters, clip_value=5):
r"""Clips gradient of an iterable of parameters at specified value.
Gradients are modified in-place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
clip_value (float or int): maximum allowed value of the gradients.
The gradients are clipped in the range
:math:`\left[\text{-clip\_value}, \text{clip\_value}\right]`
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
clip_value = float(clip_value)
for p in filter(lambda p: p.grad is not None, parameters):
if torch.isnan(p.grad.data).any().item():
print('nan ', p.shape)
if torch.isinf(p.grad.data).any().item():
print('nan ', p.shape)
p.grad.data.clamp_(min=-clip_value, max=clip_value)
def json_dump(obj, file, mode='a'): # write not append!
# import codecs
import json
if isinstance(file, str):
# with codecs.open(file, mode, encoding='utf-8') as fp:
with open(file, 'w') as fp:
json.dump(obj, fp, sort_keys=True, indent=4
# ensure_ascii=False
)
elif hasattr(file, 'write'):
json.dump(obj, file)
def json_load(file):
import json
if isinstance(file, str):
with open(file, 'r') as f:
obj = json.load(f)
elif hasattr(file, 'read'):
obj = json.load(file)
else:
raise TypeError('"file" must be a filename str or a file-object')
return obj
def msgpack_dump(obj, file, **kwargs):
file = str(file)
import msgpack, msgpack_numpy as m
kwargs.setdefault('allow_np', True)
allow_np = kwargs.pop('allow_np')
if allow_np:
kwargs.setdefault('default', m.encode)
kwargs.setdefault('use_bin_type', True)
try:
with open(file, 'wb') as fp:
msgpack.pack(obj, fp, **kwargs)
except Exception as e:
logging.warning(f'{e}')
logging.warning('cannot dump')
obj = copy.deepcopy(obj)
obj2 = to_json_format(obj)
with open(file, 'wb') as fp:
msgpack.pack(obj2, fp, **kwargs)
logging.warning('dump succes')
def msgpack_dumps(obj, **kwargs):
import msgpack, msgpack_numpy as m
kwargs.setdefault('allow_np', True)
allow_np = kwargs.pop('allow_np')
if allow_np:
kwargs.setdefault('default', m.encode)
kwargs.setdefault('use_bin_type', True)
return msgpack.packb(obj, **kwargs)
def msgpack_load(file, **kwargs):
assert osp.exists(file)
import msgpack, gc, msgpack_numpy as m
gc.disable()
kwargs.setdefault('allow_np', True)
allow_np = kwargs.pop('allow_np')
if allow_np:
kwargs.setdefault('object_hook', m.decode)
kwargs.setdefault('use_list', False)
kwargs.setdefault('raw', False)
with open(file, 'rb') as f:
res = msgpack.unpack(f, **kwargs)
gc.enable()
return res
def msgpack_loads(file, **kwargs):
import msgpack, gc, msgpack_numpy as m
gc.disable()
kwargs.setdefault('use_list', False)
kwargs.setdefault('raw', False)
# todo support numpy
obj = msgpack.unpackb(file, **kwargs)
gc.enable()
return obj
def append_file(line, file=None):
file = file or 'append.txt'
with open(file, 'a') as f:
f.writelines(line + '\n')
def write_list(file, l, sort=False, delimiter=' ', fmt='%.18e'):
l = np.array(l)
if sort:
l = np.sort(l, axis=0)
np.savetxt(file, l, delimiter=delimiter, fmt=fmt)
class AsyncDumper(mp.Process):
def __init__(self):
self.queue = mp.Queue()
super(AsyncDumper, self).__init__()
def run(self):
while True:
data, out_file = self.queue.get()
if data is None:
break
pickle_dump(data, out_file)
def dump(self, obj, filename):
self.queue.put((obj, filename))
def aria(url, dir_, fn):
return shell(f'aria2c -c -s16 -k1M -x16 "{url}" -o "{fn}" -d "{dir_}"', )
def hostname():
msg = shell('hostname')[0]
return msg.strip('\n')
def shell(cmd, block=True, return_msg=True, verbose=True, timeout=None):
import os
my_env = os.environ.copy()
home = os.path.expanduser('~')
my_env['PATH'] = home + "/anaconda3/bin/:" + my_env['PATH']
my_env['http_proxy'] = ''
my_env['https_proxy'] = ''
if verbose:
logging.info('cmd is ' + cmd)
if block:
# subprocess.call(cmd.split())
task = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env,
preexec_fn=os.setsid
)
if return_msg:
msg = task.communicate(timeout)
msg = [msg_.decode('utf-8') for msg_ in msg]
if msg[0] != '' and verbose:
logging.info('stdout {}'.format(msg[0]))
if msg[1] != '' and verbose:
logging.error(f'stderr {msg[1]}, cmd {cmd}')
return msg
else:
return task
else:
logging.debug('Non-block!')
task = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env,
preexec_fn=os.setsid
)
return task
def ln(path, to_path):
if osp.exists(to_path):
print('error! exist ' + to_path)
path = osp.abspath(path)
cmd = "ln -s " + path + " " + to_path
# print(cmd)
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc
def tar(path, to_path=None):
if not osp.exists(path):
return
if not osp.exists(to_path):
mkdir_p(to_path)
if os.path.exists(to_path) and not len(os.listdir(to_path)) == 0:
rm(path)
return
if to_path is not None:
cmd = "tar xf " + path + " -C " + to_path
print(cmd)
else:
cmd = "tar xf " + path
shell(cmd, block=True)
if os.path.exists(path):
rm(path)
def rm(path, block=True, remove=False):
path = osp.abspath(path)
if not osp.exists(path):
logging.info(f'no need rm {path}')
stdout, _ = shell('which trash', verbose=False)
if remove:
return shell(f'rm -rf "{path}"', block=block)
if 'trash' not in stdout:
dst = glob.glob('{}.bak*'.format(path))
parsr = re.compile(r'{}.bak(\d+?)'.format(path))
used = [0, ]
for d in dst:
m = re.match(parsr, d)
if not m:
used.append(0)
elif m.groups()[0] == '':
used.append(0)
else:
used.append(int(m.groups()[0]))
dst_path = '{}.bak{}'.format(path, max(used) + 1)
cmd = 'mv {} {} '.format(path, dst_path)
return shell(cmd, block=block)
else:
return shell(f'trash -r "{path}"', block=block)
def show_img(path):
from IPython.display import Image
fig = Image(filename=path)
return fig
def plt_imshow(img, ax=None, keep_ori_size=False, inp_mode='rgb'):
img = to_img(img)
if inp_mode == 'bgr':
img = img[..., ::-1]
if ax is None:
h, w, = img.shape[0], img.shape[1]
inchh = h / 100
inchw = w / 100
if keep_ori_size:
plt.figure(figsize=(inchw, inchh,))
else:
plt.figure()
plt.imshow(img)
plt.axis('off')
else:
ax.imshow(img)
ax.set_yticks([])
ax.set_xticks([])
ax.axis('off')
def plt_imshow_board(img, ax=None, color=None):
img = to_img(img)
if ax is None:
plt.figure()
plt.imshow(img)
plt.axis('off')
else:
ax.imshow(img)
import matplotlib.patches as patches
M, N = img.shape[0], img.shape[1]
line = [(0, 0), (0, M),
(N, M), (N, 0),
]
path = patches.Polygon(line, facecolor='none', edgecolor=color,
linewidth=5, closed=True, joinstyle='round')
ax.add_patch(path)
ax.axis('off')
ax.set_yticks([])
ax.set_xticks([])
margin = 2
ax.set_xlim(-margin, N + margin)
ax.set_ylim(M + margin, -margin)
def plt_imshow_tensor(imgs, ncol=10, limit=None):
import torchvision
if isinstance(imgs, list):
imgs = np.asarray(imgs)
if imgs.shape[-1] == 3:
imgs = np.transpose(imgs, (0, 3, 1, 2))
imgs_thumb = torchvision.utils.make_grid(
to_torch(imgs), normalize=False, scale_each=True,
nrow=ncol, ).numpy()
imgs_thumb = to_img(imgs_thumb)
maxlen = max(imgs_thumb.shape)
if limit is not None:
imgs_thumb = cvb.resize_keep_ar(imgs_thumb, limit, limit, )
# print(imgs_thumb.shape)
plt_imshow(imgs_thumb, keep_ori_size=True)
def plt2tensor():
import io
from torchvision import transforms as trans
from PIL import Image
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
roc_curve = Image.open(buf)
roc_curve_tensor = trans.ToTensor()(roc_curve)
return roc_curve_tensor
def to_img(img, target_shape=None):
from PIL import Image
img = np.asarray(img)
img = img.copy()
shape = img.shape
if len(shape) == 3 and shape[-1] == 4:
img = img[..., :3]
if len(shape) == 3 and shape[0] == 3:
img = img.transpose(1, 2, 0)
img = np.array(img, order='C')
# if img.dtype == np.float32 or img.dtype == np.float64:
img -= img.min()
img = img / (img.max() + 1e-6)
img *= 255
img = np.array(img, dtype=np.uint8)
if len(shape) == 3 and shape[-1] == 1:
img = img[..., 0]
if target_shape:
# img = np.uint8(Image.fromarray(img).resize(target_shape, Image.ANTIALIAS)) # 128,256
img = img.astype('float32')
img = to_torch(img).unsqueeze(0).unsqueeze(0)
img = F.interpolate(img, size=(256, 128), mode='bilinear', align_corners=True)
img = img.squeeze(0).squeeze(0)
img = to_numpy(img).astype('uint8')
return img.copy()
def plt_matshow(mat, figsize=(6, 6)):
fig, ax = plt.subplots(figsize=figsize)
ax.matshow(mat)
ax.axis('off')
# plt.figure(figsize=(6,6))
# plt.matshow(mat, fignum=1)
# plt.axis('off')
# plt.colorbar()
def apply_colormap_on_image(org_im, activation, colormap_name='viridis', alpha=.4, thresh=30):
"""
Apply heatmap on image
Args:
org_img (PIL img): Original image
activation_map (numpy arr): Activation map (grayscale) 0-255
colormap_name (str): Name of the colormap
"""
import matplotlib.cm as mpl_color_map
from PIL import Image
org_im = Image.fromarray(to_img(org_im))
# Get colormap
color_map = mpl_color_map.get_cmap(colormap_name)
no_trans_heatmap = color_map(activation)
# Change alpha channel in colormap to make sure original image is displayed
heatmap = copy.copy(no_trans_heatmap)
heatmap[:, :, 3] = alpha
heatmap[:, :, 3][activation < thresh] = 0
heatmap = Image.fromarray((heatmap * 255).astype(np.uint8))
no_trans_heatmap = Image.fromarray((no_trans_heatmap * 255).astype(np.uint8))
# Apply heatmap on iamge
heatmap_on_image = Image.new("RGBA", org_im.size)
heatmap_on_image = Image.alpha_composite(heatmap_on_image, org_im.convert('RGBA'))
heatmap_on_image = Image.alpha_composite(heatmap_on_image, heatmap)
no_trans_heatmap = to_img(no_trans_heatmap)
heatmap_on_image = to_img(heatmap_on_image)
return no_trans_heatmap, heatmap_on_image
def show_pdf(path):
from IPython.display import IFrame
path = osp.relpath(path)
return IFrame(path, width=600, height=300)
def print_graph_info():
import tensorflow as tf
graph = tf.get_default_graph()
graph.get_tensor_by_name("Placeholder:0")
layers = [op.name for op in graph.get_operations() if op.type ==
"Placeholder"]
print([graph.get_tensor_by_name(layer + ":0") for layer in layers])
print([op.type for op in graph.get_operations()])
print([n.name for n in tf.get_default_graph().as_graph_def().node])
print([v.name for v in tf.global_variables()])
print(graph.get_operations()[20])
def chdir_to_root(fn):
def wrapped_fn(*args, **kwargs):
restore_path = os.getcwd()
os.chdir(root_path)
res = fn(*args, **kwargs)
os.chdir(restore_path)
return res
return wrapped_fn
def scp(src, dest, dry_run=False):
cmd = ('scp -r ' + src + ' ' + dest)
print(cmd)
if dry_run:
return
return shell(cmd, block=False)
def read_list(file, delimi=" "):
if osp.exists(file):
lines = np.genfromtxt(file, dtype='str', delimiter=delimi)
return lines
else:
return []
def cp(from_path, to):
dst_dir = osp.dirname(to)
if not osp.exists(dst_dir):
mkdir_p(dst_dir)
shell('cp -r ' + from_path + ' ' + to)
def mv(from_path, to):
if isinstance(from_path, list):
for from_ in from_path:
mv(from_, to)
elif isinstance(to, list):
for to_ in to:
mv(from_path, to_)
else:
shell(f'''mv "{from_path}" "{to}"''')
def dict_concat(d_l):
d1 = d_l[0].copy()
for d in d_l[1:]:
d1.update(d)
return d1
def dict_update(to, from_dict, must_exist=True):
to = to.copy()
from_dict = from_dict.copy()
to = edict(to)
from_dict = edict(from_dict)
for k, v in from_dict.items():
if k not in to:
if not must_exist:
logging.debug('ori dict do not have key {}'.format(k))
else:
raise ValueError('ori dict do not have key {}'.format(k))
try:
assert to[k] == v
except Exception as inst:
logging.debug(
'update ori key {} from {} to {}'.format(k, to.get(k, None), v))
to[k] = v
return to
def face_detect(path='/data1/xinglu/prj/test.jpg'):
cmd = f'''
curl -X POST "https://api-cn.faceplusplus.com/facepp/v3/detect" \
-F "api_key=<KEY>" \
-F "api_secret=<KEY>" \
-F "image_file=@{path}" \
-F "return_landmark=1" \
-F "return_attributes=gender,age,headpose,facequality"
'''
out, err = shell(cmd)
return out
def clean_name(name):
if isinstance(name, list):
return [clean_name(n) for n in name]
import re
name = re.findall('([a-zA-Z0-9/-]+)(?::\d+)?', name)[0]
name = re.findall('([a-zA-Z0-9/-]+)(?:_\d+)?', name)[0]
return name
class Struct(object):
def __init__(self, entries):
self.__dict__.update(entries)
def __getitem__(self, item):
return self.__dict__[item]
def dict2obj(d):
return Struct(d)
def dict2str(others):
name = ''
for key, val in others.iteritems():
name += '_' + str(key)
if isinstance(val, dict):
name += '_' + dict2str(val)
elif isinstance(val, list):
for val_ in val:
name += '-' + str(val_)
else:
name += '_' + str(val)
return name
def list2str(li, delimier=''):
name = ''
for name_ in li:
name += (str(name_) + delimier)
return name
def rsync(from_, to):
cmd = ('rsync -avzP ' + from_ + ' ' + to)
print(cmd)
return shell(cmd, block=False)
def i_vis_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
import tensorflow as tf
from IPython.display import display, HTML, SVG
import os
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
import tensorflow as tf
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes(
"<stripped %d bytes>" % size)
return strip_def
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph' + str(np.random.rand()))
iframe = """
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
def my_wget(fid, fname):
shell('rm -rf /tmp/cookies.txt')
task = shell(
f"wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={fid}' -O- ",
return_msg=False
)
out, err = task.communicate()
out = out.decode('utf-8')
print(out)
if len(re.findall(r'.*confirm=([0-9a-zA-Z_]+).*', out)) == 0:
print('no confirm continue')
return 100
confirm = re.findall(r'.*confirm=([0-9a-zA-Z_]+).*', out)[0]
if task.poll() != 0:
print(confirm)
raise ValueError('fail')
task = shell(
f"wget -c --load-cookies /tmp/cookies.txt 'https://docs.google.com/uc?export=download&confirm={confirm}&id={fid}' -O {fname}",
block=False)
return task
# caution: may be shallow!
def to_json_format(obj, allow_np=True):
import collections, torch
if isinstance(obj, np.ndarray):
if obj.dtype == object:
return obj.tolist()
else:
if allow_np:
return np.asarray(obj, order="C")
else:
return to_json_format(obj.tolist())
elif isinstance(obj, (list, tuple, collections.deque)):
return [to_json_format(subobj, allow_np) for subobj in obj]
elif isinstance(obj, dict):
for key in obj.keys():
obj[key] = to_json_format(obj[key], allow_np)
return obj
elif isinstance(obj, (int, str, float)):
return obj
elif isinstance(obj, torch.Tensor):
return obj.cpu().numpy()
elif isinstance(obj, np.int64):
return int(obj)
elif isinstance(obj, np.float32):
return float(obj)
elif obj is None:
return obj
else:
raise ValueError(f'unkown {type(obj)}')
# return obj
# todo imtate this
# def default_collate(batch):
# "Puts each data field into a tensor with outer dimension batch size"
# if torch.is_tensor(batch[0]):
# out = None
# if _use_shared_memory:
# # If we're in a background process, concatenate directly into a
# # shared memory tensor to avoid an extra copy
# # batch
# numel = sum([x.numel() for x in batch])
# storage = batch[0].storage()._new_shared(numel)
# out = batch[0].new(storage)
# return torch.stack(batch, 0, out=out)
# elif type(batch[0]).__module__ == 'numpy':
# elem = batch[0]
# if type(elem).__name__ == 'ndarray':
# return torch.stack([torch.from_numpy(b) for b in batch], 0)
# if elem.shape == (): # scalars
# py_type = float if elem.dtype.name.startswith('float') else int
# return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
# elif isinstance(batch[0], int):
# return torch.LongTensor(batch)
# elif isinstance(batch[0], float):
# return torch.DoubleTensor(batch)
# elif isinstance(batch[0], string_classes):
# return batch
# elif isinstance(batch[0], collections.Mapping):
# return {key: default_collate([d[key] for d in batch]) for key in batch[0]}
# elif isinstance(batch[0], collections.Sequence):
# transposed = zip(*batch)
# return [default_collate(samples) for samples in transposed]
#
# raise TypeError(("batch must contain tensors, numbers, dicts or lists; found {}"
# .format(type(batch[0]))))
def preprocess(img, landmark, **kwargs):
from skimage import transform as trans
if isinstance(img, str):
img = cvb.read_img(img, **kwargs)
assert img is not None
img = img.copy()
# image_size = []
# str_image_size = kwargs.get('image_size', '')
# if len(str_image_size) > 0:
# image_size = [int(x) for x in str_image_size.split(',')]
# if len(image_size) == 1:
# image_size = [image_size[0], image_size[0]]
# assert len(image_size) == 2
# assert image_size[0] == 112
# assert image_size[0] == 112 or image_size[1] == 96
image_size = [112, 112]
assert len(image_size) == 2
src = np.array([
[30.2946, 51.6963],
[65.5318, 51.5014],
[48.0252, 71.7366],
[33.5493, 92.3655],
[62.7299, 92.2041]], dtype=np.float32)
if image_size[1] == 112:
src[:, 0] += 8.0
dst = landmark.astype(np.float32)
dst = dst.reshape(-1, 2) # todo, this means dst mast be 5 row
if dst.shape[0] == 3:
src = src[[0, 1, 2], :]
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
warped = cv2.warpAffine(img, M, (image_size[1], image_size[0]), borderValue=0.0)
# tform3 = trans.ProjectiveTransform()
# tform3.estimate(src, dst)
# warped = trans.warp(img, tform3, output_shape=_shape)
return warped
def face_orientation(frame, landmarks):
size = frame.shape # (height, width, color_channel)
image_points = np.array([
(landmarks[4], landmarks[5]), # Nose tip
# (landmarks[10], landmarks[11]), # Chin
(landmarks[0], landmarks[1]), # Left eye left corner
(landmarks[2], landmarks[3]), # Right eye right corne
(landmarks[6], landmarks[7]), # Left Mouth corner
(landmarks[8], landmarks[9]) # Right mouth corner
], dtype="double")
model_points = np.array([
(0.0, 0.0, 0.0), # Nose tip
# (0.0, -330.0, -65.0), # Chin
(-165.0, 170.0, -135.0), # Left eye left corner
(165.0, 170.0, -135.0), # Right eye right corne
(-150.0, -150.0, -125.0), # Left Mouth corner
(150.0, -150.0, -125.0) # Right mouth corner
])
# Camera internals
center = (size[1] / 2, size[0] / 2)
focal_length = center[0] / np.tan(60 / 2 * np.pi / 180)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype="double"
)
dist_coeffs = np.zeros((4, 1)) # Assuming no lens distortion
(success, rotation_vector, translation_vector) = cv2.solvePnP(
model_points, image_points, camera_matrix,
dist_coeffs,
# flags=cv2.SOLVEPNP_ITERATIVE
)
axis = np.float32([[500, 0, 0],
[0, 500, 0],
[0, 0, 500]])
imgpts, jac = cv2.projectPoints(axis, rotation_vector, translation_vector, camera_matrix, dist_coeffs)
modelpts, jac2 = cv2.projectPoints(model_points, rotation_vector, translation_vector, camera_matrix,
dist_coeffs)
rvec_matrix = cv2.Rodrigues(rotation_vector)[0]
proj_matrix = np.hstack((rvec_matrix, translation_vector))
eulerAngles = cv2.decomposeProjectionMatrix(proj_matrix)[6]
pitch, yaw, roll = [math.radians(_) for _ in eulerAngles]
pitch = math.degrees(math.asin(math.sin(pitch)))
roll = -math.degrees(math.asin(math.sin(roll)))
yaw = math.degrees(math.asin(math.sin(yaw)))
return imgpts, modelpts, (str(int(roll)), str(int(pitch)), str(int(yaw))), (landmarks[4], landmarks[5])
def cal_sim(yyfea, yy2fea):
from scipy.spatial.distance import cdist
dist = cdist(yyfea, yy2fea, metric='cosine')
cossim = 1 - dist
return cossim
def get_normalized_pnt(nose, pnt):
nose = np.asarray(nose).reshape(2, )
pnt = np.asarray(pnt).reshape(2, )
dir = pnt - nose
norm = np.sqrt((dir ** 2).sum())
dir /= norm
pnt = nose + dir * 50
return pnt
# random_colors = [ tuple(np.random.random_integers(0, 255, size=3)) for i in range(19) ]
random_colors = [(0, 255, 0), (255, 0, 0), (0, 0, 255),
(171, 46, 62),
(105, 246, 7),
(19, 73, 138),
(31, 210, 138),
(35, 125, 76),
(86, 6, 147),
(249, 24, 45),
(241, 214, 87),
(102, 255, 173),
(202, 146, 236),
(163, 196, 242),
(24, 48, 244),
(187, 142, 60),
(20, 146, 34),
(226, 97, 210),
(184, 40, 125),
(208, 152, 12),
(108, 158, 78),
(91, 145, 136),
]
def df_unique(df):
def is_all_same(lst):
lst = [lsti if not isinstance(lsti, np.ndarray) else lsti.tolist() for lsti in lst]
res = [lsti == lst[0] for lsti in lst]
try:
return
|
np.asarray(res)
|
numpy.asarray
|
"""!
@brief A dataset creation which is compatible with pytorch framework
and much faster in loading time depending on the new version of
loading only the appropriate files that might be needed
@author <NAME> {<EMAIL>}
@copyright University of illinois at Urbana Champaign
"""
import torch
import argparse
import os
import sys
import glob2
import numpy as np
from sklearn.externals import joblib
import scipy.io.wavfile as wavfile
from torch.utils.data import Dataset, DataLoader
from pprint import pprint
root_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../')
sys.path.insert(0, root_dir)
import spatial_two_mics.utils.audio_mixture_constructor as \
mixture_creator
import spatial_two_mics.config as config
import spatial_two_mics.data_generator.dataset_storage as \
dataset_storage
class PytorchMixtureDataset(Dataset):
"""
This is a general compatible class for pytorch datasets.
@note Each instance of the dataset should be stored using
joblib.dump() and this is the way that it would be returned.
After some transformations.
The path of all datasets should be defined inside config.
All datasets should be formatted with appropriate subfolders of
train / test and val and under them there should be all the
available files.
"""
def __init__(self,
dataset='timit',
partition='train',
n_samples=[512, 128, 256],
n_sources=2,
genders=['f', 'm'],
n_fft=512,
win_len=512,
hop_length=128,
mixture_duration=2.0,
force_delays=[-1, 1],
get_top=None,
labels_mask='duet',
**kwargs):
self.dataset_params = {
'dataset': dataset,
'n_samples': n_samples,
'n_sources': n_sources,
'genders': genders,
'force_delays': force_delays
}
if labels_mask == 'duet' or labels_mask == 'ground_truth':
self.selected_mask = labels_mask
else:
raise NotImplementedError("There is no available mask "
"called: {}".format(labels_mask))
self.partition = partition
dataset_name = dataset_storage.create_dataset_name(
self.dataset_params)
self.dataset_dirpath = os.path.join(
config.DATASETS_DIR,
dataset_name,
partition)
self.dataset_stats_path = self.dataset_dirpath + '_stats'
if not os.path.isdir(self.dataset_dirpath):
raise IOError("Dataset folder {} not found!".format(
self.dataset_dirpath))
else:
print("Loading files from {} ...".format(
self.dataset_dirpath))
self.mixture_folders = glob2.glob(os.path.join(
self.dataset_dirpath, '*'))
if get_top is not None:
self.mixture_folders = self.mixture_folders[:get_top]
self.n_samples = len(self.mixture_folders)
# preprocess -- store all absolute spectra values for faster
# loading during run time
self.store_directly_abs_spectra()
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
"""!
Depending on the selected partition it returns accordingly
the following objects:
if self.partition == 'train':
(abs_tfs, selected_mask)
else if partition == 'test' or 'val'
(abs_tfs, selected_mask, wavs_list, real_tfs, imag_tfs)"""
mix_folder = self.mixture_folders[idx]
try:
abs_tfs = joblib.load(os.path.join(mix_folder, 'abs_tfs'))
except:
raise IOError("Failed to load data from path: {} "
"for absolute spectra.".format(mix_folder))
try:
if self.selected_mask == 'duet':
mask = joblib.load(os.path.join(mix_folder,
'soft_labeled_mask'))
else:
mask = joblib.load(os.path.join(mix_folder,
'ground_truth_mask'))
except:
raise IOError("Failed to load data from path: {} "
"for tf label masks".format(mix_folder))
if self.partition == 'train':
return abs_tfs, mask
try:
real_p = os.path.join(mix_folder, 'real_tfs')
imag_p = os.path.join(mix_folder, 'imag_tfs')
wavs_p= os.path.join(mix_folder, 'wavs')
real_tfs = joblib.load(real_p)
imag_tfs = joblib.load(imag_p)
wavs_list = joblib.load(wavs_p)
wavs_list =
|
np.array(wavs_list)
|
numpy.array
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 06 08:39:25 2016
@author: shilei
Loader and checker for M3D-C1 RMP output files
"""
import numpy as np
from scipy.io.netcdf import netcdf_file
from scipy.interpolate import interp1d, RectBivariateSpline, UnivariateSpline
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from sdp.math.smooth import smooth
class RmpLoader(object):
"""Loader for M3D-C1 RMP output files
Initialization
****************
__init__(self, filename, mode=r'full', scale=1, debug=False)
Initialize the loader
filename should contain the full path to the netcdf file generated by
M3DC1.
:param string filename: full or relative path to the netcdf file
generated by M3DC1
:param string mode: loading mode. mode='full' will automatically read
all the desired data in the file, and is the
default mode. mode='eq_only' reads only the
equilibrium, no RMP variables. mode='least' only
initialize the object, nothing will be read.
:param float scale: scale factor for RMP perturbations.
Optional, default to be 1.
:param bool debug:
If True, alpha_mn will also be calculated by loaded dB_mn, and
additional interpolators will be created. All debug quantities are
hided with preceding '_' in front of the names.
Optional, default to be True.
Attributes
**********
Equilibrium attributes:
npsi: grid number in psi
mpol: grid number in theta
Example
*******
"""
def __init__(self, filename, mode=r'full', scale=1, debug=False,
boundary_inner=0, boundary_outer=0):
"""Initialize the loader
filename should contain the full path to the netcdf file generated by
M3DC1.
:param string filename: full or relative path to the netcdf file
generated by M3DC1
:param string mode: loading mode. mode='full' will automatically read
all the desired data in the file, and is the
default mode. mode='eq_only' reads only the
equilibrium, no RMP variables. mode='least' only
initialize the object, nothing will be read.
:param float scale: scale factor for RMP perturbations.
Optional, default to be 1.
:param bool debug:
If True, alpha_mn will also be calculated by loaded dB_mn, and
additional interpolators will be created. All debug quantities are
hided with preceding '_' in front of the names.
Optional, default to be False.
:param float boundary_inner:
Artificial linear decay boundary inner location in the unit of normalized psi_p
:param float boundary_outer:
Artificial linear decay boundary outer location in the unit of normalized psi_p
"""
# check validity of the input parameters
# inner boundary must be on the right of the outer boundary
assert boundary_inner >= boundary_outer
self.filename = filename
self.scale = scale
self._debug = debug
self._boundary_inner = boundary_inner
self._boundary_outer = boundary_outer
if mode == 'least':
return
else:
self.load_equilibrium(self.filename)
if mode == 'full':
self.load_rmp(self.filename)
self._apply_boundary_decay()
self.generate_interpolators()
def load_equilibrium(self, filename):
"""load the equilibrium data from M3DC1 output netcdf file
:param string filename: full or relative path to the netcdf file
generated by M3DC1
"""
m3d_raw = netcdf_file(filename, 'r')
# npsi is the number of grid points in psi
# mpol is the number of grid points in theta
self.npsi = m3d_raw.dimensions['npsi']
self.mpol = m3d_raw.dimensions['mpol']
# 1D quantities are function of psi only
# we have
# poloidal flux, axis value zero, always increase, in weber
self.poloidal_flux = np.copy(m3d_raw.variables['flux_pol'].data)
# poloidal flux, in weber/radian
self.psi_p = np.copy(m3d_raw.variables['psi'].data)
self.psi_p -= self.psi_p[0]
if (self.psi_p[-1] < 0):
self.isign = -1
else:
self.isign = 1
# psi_abs is the flipped psi_p so it's increasing.
# WARNING: SPECIAL CARE IS NEEDED WHEN CALCULATING MAGNETIC FIELD LINE
self.psi_abs = self.psi_p*self.isign
# normalized psi, normalized to psi_wall
self.psi_n = np.copy(m3d_raw.variables['psi_norm'].data)
# uniform theta is generated, including the end point at theta=2pi
self.theta = np.linspace(0, 2*np.pi, self.mpol+1)
# toroidal current enclosed in a flux surface
self.I = np.copy(m3d_raw.variables['current'].data)
self.I *= 2e-7
# R B_phi is also a flux function
self.F = np.copy(m3d_raw.variables['F'].data)
# equilibrium electron density
self.ne = np.copy(m3d_raw.variables['ne'].data)
# safety factor
self.q = np.copy(m3d_raw.variables['q'].data)
# total pressure
self.p = np.copy(m3d_raw.variables['p'].data)
# electron pressure
self.pe = np.copy(m3d_raw.variables['pe'].data)
# 2D quantities will depend on theta, we'll add one end point at
# theta=2pi, and assign periodic value there
# R in (R, PHI, Z) coordinates
self.R = np.empty((self.npsi, self.mpol+1))
self.R[:, :-1] = m3d_raw.variables['rpath'][:,:]
self.R[:, -1] = self.R[:, 0]
# Z in (R, PHI, Z)
self.Z = np.empty((self.npsi, self.mpol+1))
self.Z[:, :-1] = m3d_raw.variables['zpath'][:,:]
self.Z[:, -1] = self.Z[:, 0]
# poloidal magnetic field
self.B_p = np.empty((self.npsi, self.mpol+1))
self.B_p[:, :-1] = m3d_raw.variables['Bp'][:,:]
self.B_p[:, -1] = self.B_p[:, 0]
# Jacobian
self.Jacobian = np.empty((self.npsi, self.mpol+1))
self.Jacobian[:, :-1] = m3d_raw.variables['jacobian'][:,:]
self.Jacobian[:, -1] = self.Jacobian[:, 0]
m3d_raw.close()
def load_rmp(self, filename):
"""load the resonant magnetic perturbations
:param string filename: full or relative path to the netcdf file
generated by M3DC1
"""
#todo coordinates convention needs to be sorted out
m3d_raw = netcdf_file(filename, 'r')
# mode numbers in m
self.m = np.copy(m3d_raw.variables['m'].data)
# mode number in n
self.n = m3d_raw.ntor
# In our convention, alpha is a complex number, and the resonant form
# has cos and sin part on real and imaginary parts respectively
self.alpha_m = np.copy(m3d_raw.variables['alpha_real'].data) + \
1j*np.copy(m3d_raw.variables['alpha_imag'].data)
# Convert the unit of alpha_m to be consistent with SI units
# Note that the B_mn quantities are in Gauss while the equilibrium
# B field quantities are in Tesla and meter
self.alpha_m *= 1e-4
# rescale the perturbation to desired level
self.alpha_m *= self.scale
# dB_m is the perpendicular component of perturbed magnetic field
# Fourier decomposition in theta
self.dB_m = np.copy(m3d_raw.variables['bmn_real'].data) + \
1j* np.copy(m3d_raw.variables['bmn_imag'].data)
# dB_m is in Gauss, we need to convert it into Tesla
self.dB_m *= 1e-4
self.dB_m *= self.scale
self.A = np.copy(m3d_raw.variables['area'].data)
m3d_raw.close()
# check if the mode number is inversed, if so, change it back to
# increasing order
if self.m[0]>self.m[-1]:
self.m = np.fft.ifftshift(self.m[::-1])
self.alpha_m = np.fft.ifftshift(self.alpha_m[:, ::-1], axes=-1)
self.dB_m = np.fft.ifftshift(self.dB_m[:,::-1], axes=-1)
else:
self.m = np.fft.ifftshift(self.m)
self.alpha_m = np.fft.ifftshift(self.alpha_m[:, :], axes=-1)
self.dB_m = np.fft.ifftshift(self.dB_m[:,:], axes=-1)
# for alpha and dalpha_dtheta values in real space, we add the
# theta=2pi end point, and assign periodic values
self.alpha = np.empty((self.npsi, self.mpol+1), dtype=np.complex)
self.dalpha_dtheta = np.empty((self.npsi, self.mpol+1),
dtype=np.complex)
self.dB = np.empty((self.npsi, self.mpol+1), dtype=np.complex)
# Then, the real space alpha can be obtained by FFT. Check Nate's note
# on the normalization convention, as well as scipy's FFT
# documentation.
self.alpha[:, :-1] = np.fft.fft(self.alpha_m, axis=-1)
self.alpha[:, -1] = self.alpha[:, 0]
# The derivatives respect to theta can also be calculated by FFT
self.dalpha_dtheta[:, :-1] = np.fft.fft(-1j*self.m*self.alpha_m,
axis=-1)
self.dalpha_dtheta[:, -1] = self.dalpha_dtheta[:, 0]
# Smooth the derivative for 2 passes of 121
# smooth(self.dalpha_dtheta, periodic=1, passes=2)
# delta_B is also calculated by FFT
self.dB[:, :-1] = np.fft.fft(self.dB_m, axis=-1)
self.dB[:, -1] = self.dB[:, 0]
if self._debug:
# calculate alpha_m from dB_m and get alpha
self._calc_alpha_m()
self._alpha_c = np.empty((self.npsi, self.mpol+1),
dtype=np.complex)
self._dalpha_dtheta_c = np.empty((self.npsi, self.mpol+1),
dtype=np.complex)
self._alpha_c[:,:-1] = np.fft.fft(self._alpha_mc, axis=-1)
self._alpha_c[:,-1] = self._alpha_c[:,0]
self._dalpha_dtheta_c[:, :-1] = np.fft.fft(-1j*self.m*\
self._alpha_mc,
axis=-1)
self._dalpha_dtheta_c[:, -1] = self._dalpha_dtheta_c[:, 0]
@property
def _linear_boundary(self):
"""evaluate linear boundary function on the loaded psi_n locations
The function is symmetric. In the center, (inner, 1-inner), it's 1;
outer part, (0, outer) & (1-outer, 1), it's 0; and linearly connected
between 0 and 1 in the transition regions, (outer, inner) & (1-inner,
1-outer)
It can be evaluated as the multiplication of two 1-side step-like
functions, each of which is a linear function whose value is bounded
within (0,1).
:param array(float) psi_n: evaluation locations in normalized psi_p
unit
"""
boundary_width = self._boundary_inner - self._boundary_outer
f_left = np.ones_like(self.psi_n)
if boundary_width>0:
f_left = (self.psi_n-self._boundary_outer)/boundary_width
f_left[self.psi_n < self._boundary_outer] = 0
f_left[self.psi_n > self._boundary_inner] = 1
f_right = np.ones_like(self.psi_n)
if boundary_width>0:
f_right = (1-self.psi_n-self._boundary_outer)/boundary_width
f_right[self.psi_n > 1-self._boundary_outer] = 0
f_right[self.psi_n < 1-self._boundary_inner] = 1
return f_left*f_right
def _apply_boundary_decay(self):
"""Linear decay boundary is applied to alpha and dalpha_dtheta values"""
_boundary = self._linear_boundary
self.alpha = self.alpha * _boundary[:,np.newaxis]
self.dalpha_dtheta = self.dalpha_dtheta * _boundary[:,np.newaxis]
def _calc_alpha_m(self):
""" Calculate alpha_mn based on the load in B_mn, F, I, A, and a given
n.
"""
res = self.m*self.F[:, np.newaxis]+self.n*self.I[:, np.newaxis]
self._alpha_mc = 1j*self.A[:, np.newaxis]*self.dB_m/ ((2*np.pi)**2*res)
def rescale(self, scale=1):
"""Rescale the perturbations"""
ratio = scale/self.scale
self.scale = scale
self.alpha *= ratio
self.alpha_m *= ratio
self.dalpha_dtheta *= ratio
self.dB_m *= ratio
if self._debug:
self._alpha_mc *= ratio
self._alpha_c *= ratio
self._dalpha_dtheta_c *= ratio
self.generate_interpolators()
def generate_interpolators(self):
""" Create the interpolators for the loaded quantities
"""
# 1-D quantities are directly interpolated on psi
self.q_interp = UnivariateSpline(self.psi_abs, self.q)
self.I_interp = UnivariateSpline(self.psi_abs, self.I)
self.F_interp = UnivariateSpline(self.psi_abs, self.F)
# 2-D quantities are interpolated on psi-theta plane
# the periodicity along theta in the values is guaranteed
self.R_interp = RectBivariateSpline(self.psi_abs, self.theta, self.R)
self.Z_interp = RectBivariateSpline(self.psi_abs, self.theta, self.Z)
self.Bp_interp = RectBivariateSpline(self.psi_abs,self.theta,self.B_p)
self.alpha_re_interp = RectBivariateSpline(self.psi_abs, self.theta,
np.real(self.alpha))
self.alpha_im_interp = RectBivariateSpline(self.psi_abs, self.theta,
np.imag(self.alpha))
self.dadt_re_interp = RectBivariateSpline(self.psi_abs,self.theta,
np.real(self.dalpha_dtheta))
self.dadt_im_interp = RectBivariateSpline(self.psi_abs,self.theta,
np.imag(self.dalpha_dtheta))
if self._debug:
self._alpha_c_re_interp = RectBivariateSpline(self.psi_abs,
self.theta,
np.real(self._alpha_c))
self._alpha_c_im_interp = RectBivariateSpline(self.psi_abs,
self.theta,
np.imag(self._alpha_c))
self._dadt_c_re_interp = RectBivariateSpline(self.psi_abs,
self.theta,
np.real(self._dalpha_dtheta_c))
self._dadt_c_im_interp = RectBivariateSpline(self.psi_abs,
self.theta,
np.imag(self._dalpha_dtheta_c))
def choose_harmonics(self, m='all'):
"""Calculate and return an alpha that only uses a chosen set of poloidal
harmonics.
:param m: the chosen poloidal harmonics, default to be 'all'
:type m: array of int, or 'all'
"""
if m=='all':
self.m_chosen = np.copy(self.m)
self.alpha[:, :-1] = np.fft.fft(self.alpha_m, axis=-1)
self.alpha[:, -1] = self.alpha[:,0]
# The derivatives respect to theta can also be calculated by FFT
self.dalpha_dtheta[:, :-1] = np.fft.fft(-1j*self.m*self.alpha_m,
axis=-1)
self.dalpha_dtheta[:, -1] = self.dalpha_dtheta[:, 0]
self._apply_boundary_decay()
self.generate_interpolators()
else:
self.m_chosen = np.array([m]).flatten()
# pick the chosen m numbers using a logical mask
chosen_mask = np.zeros_like(self.m)
for m_i in self.m_chosen:
chosen_mask = np.logical_or(chosen_mask,
|
np.abs(self.m)
|
numpy.abs
|
import logging
import numpy as np
from scipy.special import expit
from scipy.stats import zscore
logger = logging.getLogger('pyglmnet')
def set_log_level(verbose):
"""Convenience function for setting the log level.
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
"""
if isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, str):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
logger.setLevel(verbose)
# output log to console
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter(" %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def softmax(w):
"""
Softmax function of given array of number w
"""
w = np.array(w)
maxes = np.amax(w, axis=1)
maxes = maxes.reshape(maxes.shape[0], 1)
e = np.exp(w - maxes)
dist = e / np.sum(e, axis=1, keepdims=True)
return dist
class GLM:
"""Generalized Linear Model (GLM)
This is class implements elastic-net regularized generalized linear models.
The core algorithm is defined in the ariticle
Parameters
----------
distr: str, distribution family in this following
'poisson' or 'normal' or 'binomial' or 'multinomial'
default: 'poisson'
alpha: float, the weighting between L1 and L2 norm in penalty term
loss function i.e.
P(beta) = 0.5*(1-alpha)*|beta|_2^2 + alpha*|beta|_1
default: 0.5
reg_lambda: array or list, array of regularized parameters of penalty term i.e.
(1/2*N) sum(y - beta*X) + lambda*P
where lambda is number in reg_lambda list
default: np.logspace(np.log(0.5), np.log(0.01), 10, base=np.exp(1))
learning_rate: float, learning rate for gradient descent,
default: 1e-4
max_iter: int, maximum iteration for the model, default: 100
threshold: float, threshold for convergence. Optimization loop will stop
below setting threshold, default: 1e-3
verbose: boolean, if True it will print output while iterating
Reference
---------
Friedman, <NAME> (2010). Regularization Paths for Generalized Linear
Models via Coordinate Descent, J Statistical Software.
https://core.ac.uk/download/files/153/6287975.pdf
"""
def __init__(self, distr='poisson', alpha=0.05,
reg_lambda=np.logspace(np.log(0.5), np.log(0.01), 10, base=np.exp(1)),
learning_rate=1e-4, max_iter=100, verbose=False):
self.distr = distr
self.alpha = alpha
self.reg_lambda = reg_lambda
self.learning_rate = learning_rate
self.max_iter = max_iter
self.fit_params = None
self.threshold = 1e-3
set_log_level(verbose)
def qu(self, z):
"""The non-linearity."""
eps = np.spacing(1)
qu = dict(poisson=np.log(1 + eps + np.exp(z)),
normal=z, binomial=expit(z),
multinomial=softmax(z))
return qu[self.distr]
def lmb(self, beta0, beta, X):
"""Conditional intensity function."""
z = beta0 +
|
np.dot(X, beta)
|
numpy.dot
|
import numpy as np
from .._shared._geometry import polygon_clip
from ._draw import (_coords_inside_image, _line, _line_aa,
_polygon, _ellipse_perimeter,
_circle_perimeter, _circle_perimeter_aa,
_bezier_curve)
def _ellipse_in_shape(shape, center, radii, rotation=0.):
"""Generate coordinates of points within ellipse bounded by shape.
Parameters
----------
shape : iterable of ints
Shape of the input image. Must be length 2.
center : iterable of floats
(row, column) position of center inside the given shape.
radii : iterable of floats
Size of two half axes (for row and column)
rotation : float, optional
Rotation of the ellipse defined by the above, in radians
in range (-PI, PI), in contra clockwise direction,
with respect to the column-axis.
Returns
-------
rows : iterable of ints
Row coordinates representing values within the ellipse.
cols : iterable of ints
Corresponding column coordinates representing values within the ellipse.
"""
r_lim, c_lim = np.ogrid[0:float(shape[0]), 0:float(shape[1])]
r_org, c_org = center
r_rad, c_rad = radii
rotation %= np.pi
sin_alpha, cos_alpha = np.sin(rotation), np.cos(rotation)
r, c = (r_lim - r_org), (c_lim - c_org)
distances = ((r * cos_alpha + c * sin_alpha) / r_rad) ** 2 \
+ ((r * sin_alpha - c * cos_alpha) / c_rad) ** 2
return np.nonzero(distances < 1)
def ellipse(r, c, r_radius, c_radius, shape=None, rotation=0.):
"""Generate coordinates of pixels within ellipse.
Parameters
----------
r, c : double
Centre coordinate of ellipse.
r_radius, c_radius : double
Minor and major semi-axes. ``(r/r_radius)**2 + (c/c_radius)**2 = 1``.
shape : tuple, optional
Image shape which is used to determine the maximum extent of output pixel
coordinates. This is useful for ellipses which exceed the image size.
By default the full extent of the ellipse are used.
rotation : float, optional (default 0.)
Set the ellipse rotation (rotation) in range (-PI, PI)
in contra clock wise direction, so PI/2 degree means swap ellipse axis
Returns
-------
rr, cc : ndarray of int
Pixel coordinates of ellipse.
May be used to directly index into an array, e.g.
``img[rr, cc] = 1``.
Examples
--------
>>> from skimage.draw import ellipse
>>> img = np.zeros((10, 12), dtype=np.uint8)
>>> rr, cc = ellipse(5, 6, 3, 5, rotation=np.deg2rad(30))
>>> img[rr, cc] = 1
>>> img
array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
Notes
-----
The ellipse equation::
((x * cos(alpha) + y * sin(alpha)) / x_radius) ** 2 +
((x * sin(alpha) - y * cos(alpha)) / y_radius) ** 2 = 1
Note that the positions of `ellipse` without specified `shape` can have
also, negative values, as this is correct on the plane. On the other hand
using these ellipse positions for an image afterwards may lead to appearing
on the other side of image, because ``image[-1, -1] = image[end-1, end-1]``
>>> rr, cc = ellipse(1, 2, 3, 6)
>>> img = np.zeros((6, 12), dtype=np.uint8)
>>> img[rr, cc] = 1
>>> img
array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1]], dtype=uint8)
"""
center = np.array([r, c])
radii = np.array([r_radius, c_radius])
# allow just rotation with in range +/- 180 degree
rotation %= np.pi
# compute rotated radii by given rotation
r_radius_rot = abs(r_radius * np.cos(rotation)) \
+ c_radius * np.sin(rotation)
c_radius_rot = r_radius * np.sin(rotation) \
+ abs(c_radius * np.cos(rotation))
# The upper_left and lower_right corners of the smallest rectangle
# containing the ellipse.
radii_rot = np.array([r_radius_rot, c_radius_rot])
upper_left =
|
np.ceil(center - radii_rot)
|
numpy.ceil
|
#
# plate localization
#
# for sunny days
#import tensorflow as tf
import cv2
import imutils
import numpy as np
from imutils import paths
#import RDetectPlates as detplt
from imutils import perspective
import matplotlib.pyplot as plt
#import RLpPreprocess as prpr
#
# trying different images by addressing different paths
#
path = '/path to image data/'
imgs = sorted(list(paths.list_images(path)), reverse=True)
rnd = np.random.randint(0, len(imgs)-1, 1)[0]
#rnd = 39
# testing the detector:
det_1 = 'fh'
det_2 = 'cmp'
# convexhull extractıon: mergıng closed contours
# the measure for evaluatıng closedness of the contours>
def find_if_close(cnt1,cnt2):
row1,row2 = cnt1.shape[0],cnt2.shape[0]
for i in range(row1):
for j in range(row2):
dist = np.linalg.norm(cnt1[i]-cnt2[j])
if abs(dist) < 25:
return True
elif i==row1-1 and j==row2-1:
return False
# convexhull drawıng system
def convexhull(contours):
LENGTH = len(contours)
status = np.zeros((LENGTH,1))
for i,cnt1 in enumerate(contours):
x = i
if i != LENGTH-1:
for j,cnt2 in enumerate(contours[i+1:]):
x = x+1
dist = find_if_close(cnt1,cnt2)
if dist == True:
val = min(status[i],status[x])
status[x] = status[i] = val
else:
if status[x]==status[i]:
status[x] = i+1
unified = []
maximum = int(status.max())+1
for i in range(maximum):
pos = np.where(status==i)[0]
if pos.size != 0:
cont = np.vstack(contours[i] for i in pos)
hull = cv2.convexHull(cont)
unified.append(hull)
return unified
run = True
# the goal of this part is to find plate!!
while(run):
run = False
# for first path
#imgOrg = cv2.imread(path) #imgOrg = np.asarray(img)
# for second path
imgOrg = cv2.imread(imgs[rnd]) # imgs[rnd]
#prpr.preprocess(img)
#s_x, s_y, ch = img.shape
#intface = paths.list_images(path) # list()
#imgOrg = sorted(intface, reverse=True)
plt.imshow(imgOrg)
plt.close()
try:
gimg = cv2.cvtColor(imgOrg, cv2.COLOR_BGR2GRAY)
except:
print('there is an error in making img gray')
#plt.imshow(gimg)
#plt.close()
#
# working on Fatihs
# this part should be checked once again
#
detector = 'fh'
if detector == det_1:
retRegions = [] # this will be the return value
retCoords = [] # this will be the return value
# # defining kernels
#
# Vertical Kernels
vertKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 5))
pKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
#
# Horizontal Kernels
bKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (2, 1))
b2Kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 2))
smallKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (4, 3))
HKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 3))
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 4)) # the rectangle kernel
superKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 3)) # 27,3 check 29 also
#
# poss_plate = []
# det_plate = []
# bigpics = []
# # poss_plate.append(gray)
# to_end = False
# max_lim = 10
# tolerance = 100
#
# # for i in range(max_lim):
# # convert the image to grayscale, and apply the blackhat operation
# img_gray = cv2.cvtColor(self.imgOriginal, cv2.COLOR_BGR2GRAY)
# # step one
#
# gradX = np.absolute(cv2.Sobel(img_gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1))
# (minVal, maxVal) = (np.min(gradX), np.max(gradX))
# gradX = (255 * ((gradX - minVal) / (maxVal - minVal))).astype("uint8")
#
# # I used one name for readability and memory usage
# # step two
# gray = cv2.medianBlur(cv2.blur(cv2.GaussianBlur(gradX, (15, 15), 10), (15, 15)), 15)
# gray = cv2.erode(gray, superKernel, iterations=1)
# gray = cv2.erode(gray, rectKernel, iterations=3)
# gray = cv2.dilate(gray, rectKernel, iterations=2)
# gray = cv2.GaussianBlur(gray, (5, 5), 10)
# mx_v = np.amax(gray)
# _, gray = cv2.threshold(gray, 0.3 * mx_v, mx_v, cv2.THRESH_BINARY)
# gray = cv2.dilate(gray, smallKernel, iterations=10)
#
# _, cnts, _ = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#
# #
# for cnt in cnts:
# #
# RecPnt = np.int0(cv2.boxPoints(cv2.minAreaRect(cnt)))
# bigpics.append(RecPnt)
#
bigpics = [] # this will be the return value
#retCoords = [] # this will be the return value
# initialize the rectangular and square kernels to be applied to the image,
# then initialize the list of license plate regions
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 5))
squareKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (11, 5))
# convert the image to grayscale, and apply the blackhat operation
#img_gray = cv2.cvtColor(self.imgOriginal, cv2.COLOR_BGR2GRAY)
blackhat = cv2.morphologyEx(gimg, cv2.MORPH_BLACKHAT, rectKernel)
# find regions in the image that are light
light = cv2.morphologyEx(gimg, cv2.MORPH_CLOSE, rectKernel)
light = cv2.threshold(light, 0, 255, cv2.THRESH_BINARY)[1]
# compute the Scharr gradient representation of the blackhat image and scale the
# resulting image into the range [0, 255]
gradX = cv2.Sobel(blackhat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX),
|
np.max(gradX)
|
numpy.max
|
import numpy as np
import pytest
from amodem import dsp
from amodem import recv
from amodem import detect
from amodem import equalizer
from amodem import sampling
from amodem import config
from amodem import common
config = config.fastest()
def test_detect():
P = sum(equalizer.prefix)
t = np.arange(P * config.Nsym) * config.Ts
x = np.cos(2 * np.pi * config.Fc * t)
detector = detect.Detector(config, pylab=common.Dummy())
samples, amp, freq_err = detector.run(x)
assert abs(1 - amp) < 1e-12
assert abs(freq_err) < 1e-12
x = np.cos(2 * np.pi * (2*config.Fc) * t)
with pytest.raises(ValueError):
detector.run(x)
with pytest.raises(ValueError):
detector.max_offset = 0
detector.run(x)
def test_prefix():
omega = 2 * np.pi * config.Fc / config.Fs
symbol = np.cos(omega * np.arange(config.Nsym))
signal = np.concatenate([c * symbol for c in equalizer.prefix])
def symbols_stream(signal):
sampler = sampling.Sampler(signal)
return dsp.Demux(sampler=sampler, omegas=[omega], Nsym=config.Nsym)
r = recv.Receiver(config, pylab=common.Dummy())
r._prefix(symbols_stream(signal))
with pytest.raises(ValueError):
silence = 0 * signal
r._prefix(symbols_stream(silence))
def test_find_start():
sym = np.cos(2 * np.pi * config.Fc * np.arange(config.Nsym) * config.Ts)
detector = detect.Detector(config, pylab=common.Dummy())
length = 200
prefix = postfix = np.tile(0 * sym, 50)
carrier =
|
np.tile(sym, length)
|
numpy.tile
|
"""
Utilities to preprocess and visualize 2D MRI liver data: Multiecho and IDEAL
Useful for training segmentation and classification models
"""
import itertools
import os
import json
import nibabel as nib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import random
import scipy.ndimage.morphology
import scipy.spatial.distance as distance
import skimage
import sklearn
import sklearn.metrics
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from scipy.stats import rankdata
from absl import logging
def histogram_equalize(img):
img_cdf, bin_centers = skimage.exposure.cumulative_distribution(img, nbins=1024)
return np.interp(img, bin_centers, img_cdf)
def load_image(image_path, equalize=True, return_nifti=False):
if os.path.exists(image_path):
image_nii = nib.load(image_path)
image_data = image_nii.get_fdata()
image_slice = image_data[:, :, 0, :]
if equalize:
image_slice = histogram_equalize(image_slice)
else:
logging.warn("{} does not exist".format(image_path))
image_slice, image_nii = None, None
if return_nifti:
return image_slice, image_nii
else:
return image_slice
def load_mask(mask_path):
"""
Load mask nifti file from mask_path. Data should be [H, W, D=1, C]
Use dilation-fill-erode to remove holes / concavities from annotation artifacts.
returns a [H, W] shaped mask
Reduces away the channel dimension with a max.
"""
if os.path.exists(mask_path):
mask_nii = nib.load(mask_path)
mask_data = np.max(mask_nii.get_fdata()[:, :, 0, :], axis=-1)
mask_data = scipy.ndimage.binary_dilation(mask_data, iterations=2)
mask_data = scipy.ndimage.morphology.binary_fill_holes(mask_data.astype(np.int)).astype(np.float64)
mask_data = scipy.ndimage.morphology.binary_erosion(mask_data, iterations=2)
mask_slice = np.stack([1 - mask_data[:, :, ],
mask_data[:, :, ]], axis=2)
else:
logging.warn("{} does not exist".format(mask_path))
mask_slice = None
return mask_slice
def preprocess_magnitude_phase(magnitude_raw, phase_raw, magnitude_mult,
phase_mult, name=None):
if magnitude_raw is None:
return None
if phase_raw is None:
return None
magnitude = magnitude_mult * magnitude_raw
mM = magnitude.max()
if mM > 10.:
logging.warn("({}) final magnitude has large max magnitude: {}".format(name, mM))
if mM < .1:
logging.warn("({}) final magnitude has small max magnitude: {}".format(name, mM))
phase = phase_mult * phase_raw
pM, pm = phase.max(), phase.min()
if not np.isclose(pM - pm, np.pi * 2, rtol=1e-02, atol=1e-02):
logging.warn("processed phase has support not close to 2pi: {}, {}".format(pm, pM))
phase_sin = np.sin(phase)
phase_cos = np.cos(phase)
try:
magnt_psin_pcos = np.concatenate([magnitude, phase_sin, phase_cos], axis=-1)
except ValueError:
logging.error("({}) failed to concat magnitude, phase_sin, phase_cos. {} {} {}".format(
name, repr(magnitude.shape), repr(phase_sin.shape), repr(phase_cos.shape)))
return None
return magnt_psin_pcos
def plot_mask_image(image_slice, mask_slice):
"""
visual diagnostics combining an image with its liver mask
"""
if (image_slice is not None) and (mask_slice is not None):
fig, axes = plt.subplots(1, 4)
plt.axis('off')
both = np.concatenate([image_slice, mask_slice], axis=2)
axes[0].imshow(both[:, :, [3, 5, 7]], origin="lower")
axes[1].imshow(both[:, :, [3, 11, 7]], origin="lower")
axes[2].imshow(both[:, :, [0, 11, 9]], origin="lower")
axes[3].imshow(both[:, :, [4, 11, 6]], origin="lower")
return image_slice, mask_slice
def image_sanity_fail(image, shape, description):
"""
Sanity check on images: training and testing; shape needs to match.
description affects the logging, on failure.
"""
if image is None:
logging.error("{} : image is None".format(description))
return True
elif image.shape != shape:
logging.error("{} : shape is {}, (expecting {})".format(
description, repr(image.shape), repr(shape)))
return True
else:
return False
def mask_sanity_fail(mask, shape, description):
"""
Sanity check on training masks; shape needs to match.
description affects the logging, on failure.
"""
if mask is None:
logging.warn("{} : mask is None".format(description))
return True
if mask.shape != shape:
logging.warn("{} : shape is {}, (expecting {})".format(
description, repr(mask.shape), repr(shape)))
return True
mm = mask[..., 1].mean()
if mm > .5 or mm < .02: # mostly should be .07 to .12
logging.warn("{} : foreground mean {}".format(description, mm))
return True
def np_rescale(_x, axis):
M = np.max(_x, axis=axis, keepdims=True)
m = np.min(_x, axis=axis, keepdims=True)
d = M - m + 1e-5
return (_x - m) / d
def rankdata_each(x_hwc):
h, w, c = x_hwc.shape
z_hwc = np.stack([np_rescale(rankdata(x_hwc[..., _].flatten()), axis=0)
for _ in range(c)], axis=1).reshape(list(x_hwc.shape))
return z_hwc
def _pca_rank_scale_rgb_tiles(z_img, npc=7, triples=((0, 1, 2), (0, 3, 4), (0, 5, 6), None)):
"""
z_img is of shape nhwc. nhw dimensions are flattened and the result is fit by a pca.
triples specified
"""
pca = PCA(n_components=npc)
z = z_img.reshape([-1, z_img.shape[-1]])
z = StandardScaler().fit_transform(z)
pixel_pca_components = pca.fit_transform(z)
pixel_pca_df = pd.DataFrame(data=pixel_pca_components, columns=['pc{}'.format(_) for _ in range(npc)])
pixel_pca_arr = pixel_pca_df.to_numpy()
tiles = []
for t in triples:
if t is None: # top 3 principal components, without percentile-normalization
arr = np_rescale(pixel_pca_arr[:, :3], axis=1).reshape(list(z_img.shape)[:-1] + [3, ])
else:
assert len(t) == 3
arr = np.stack([np_rescale(rankdata(pixel_pca_arr[:, _]), axis=0)
for _ in t], axis=1).reshape(list(z_img.shape)[:-1] + [3, ])
tiles.append(arr)
return tiles
def visualize_head(r, c, vis_dict, image_name, indices=None, titles=None, suptitle=None):
"""
Creates a figure of r by c subplots. The images are selected from vis_dict.
The images to plot are keyed by a list of indices.
titles are keyed in the same nmanner as vis_dict. Save the resulting figure as image_name.
"""
if indices is None:
indices = range(min(r * c, len(vis_dict)))
plt.figure(figsize=(16, 16))
for j, i in enumerate(indices):
plt.subplot(r, c, j + 1)
plt.axis('off')
if titles is not None:
plt.title(titles[i])
plt.imshow(vis_dict[i])
plt.axis('off')
if suptitle is not None:
plt.suptitle(suptitle)
parent_dir, _ = os.path.split(image_name)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
plt.savefig(image_name)
plt.close()
def get_srnp(gt, pred):
tn, fp, fn, tp = sklearn.metrics.confusion_matrix(gt, pred).ravel()
return {'specificity': tn / (tn + fp),
'recall': tp / (tp + fn),
'negativepv': tn / (tn + fn),
'precision': tp / (tp + fp)}
def evaluate_srnpdj(gt, pred, K=25):
"""
Parameters:
gt: np.array([N, ...])
ground truth array of binary labels (integers)
pred: np.array([, ...])
same shape as ground truth
Returns:
srnpdj_N6: np.array(shape=[N, 6])
for each paired item in (gt, pred), reteurn 5 evaluation numberse:
specificity, recall, npv, ppv (prcision), dice-sim, jaccard.
idx_low_metric_dict: dict(str: [int])
dictionary of indices with lowest specificities, recall, npv, ppv, and dicesim
"""
dicesim = []
jaccard = []
specificity = []
recall = []
negativepv = []
precision = []
for _y_true, _y_pred in zip(gt, pred):
_y_true, _y_pred = _y_true.flatten(), _y_pred.flatten()
srnp = get_srnp(_y_true, _y_pred)
specificity.append(srnp['specificity'])
recall.append(srnp['recall'])
negativepv.append(srnp['negativepv'])
precision.append(srnp['precision'])
dicesim.append(1. - distance.dice(_y_true, _y_pred))
jaccard.append(sklearn.metrics.jaccard_score(_y_true, _y_pred))
specificity = np.array(specificity)
recall = np.array(recall)
negativepv = np.array(negativepv)
precision = np.array(precision)
dicesim = np.array(dicesim)
jaccard = np.array(jaccard)
logging.info("dicesim shape: {}".format(repr(dicesim.shape)))
lowest_specificity = specificity.argsort()[:K]
lowest_recall = recall.argsort()[:K]
lowest_npv = negativepv.argsort()[:K]
lowest_ppv = precision.argsort()[:K]
lowest_dicesim = dicesim.argsort()[:K]
lowest_jaccard = jaccard.argsort()[:K]
srnpdj_N6 = np.stack([specificity, recall, negativepv, precision, dicesim, jaccard], axis=1)
idx_low_metric_dict = {
'specificity': lowest_specificity,
'recall': lowest_recall,
'npv': lowest_npv,
'ppv': lowest_ppv,
'dicesim': lowest_dicesim,
'jaccard': lowest_jaccard,
}
return srnpdj_N6, idx_low_metric_dict
def visualize_predictions(x_img, gt, pred, names, prefix, img_dir, r=None, c=5, rot90=0):
"""
Creates several figure of r by c subplots. The images are saved into img_dir. rot90 is an
integer indicating the number of right angle ccw rotations to do. 4 means do nothing.
prefix is used in the file names of images saved to disk.
x_img is a bunch of multi channel raw images. We use rankorder and pca to visualize the
pixels.
gt and pred are ground truth and prediction masks. We visualize them overlayed on one of the input channels.
We compute the ids of x_img with the lowest specificity, recall, npv, ppv, and dice.
We plot the worst cases for each.
Returns:
srnpdj_N6: np.array(shape=[N, 6])
for each paired item in (gt, pred), reteurn 5 evaluation numberse:
specificity, recall, npv, ppv (prcision), dice-sim, jaccard.
"""
if r is None:
r = min(5, int(len(gt) / c))
K = r * c
srnpdj_N6, idx_low_metric_dict = evaluate_srnpdj(gt, pred, K=K)
blue_cidx = 0 # pick any channel
blue = np_rescale(x_img[:, :, :, blue_cidx:blue_cidx + 1], axis=None)
red, green = gt, pred
logging.info("rgb: {} {} {}".format(red.shape, green.shape, blue.shape))
rgb = np.concatenate([red, green, blue], axis=3)
vis_idx = set(itertools.chain(*idx_low_metric_dict.values()))
vis_idx = vis_idx.union(range(r * c))
visuals_dict = dict() # compute only the images that score badly on some metric
for i in vis_idx:
tiles = _pca_rank_scale_rgb_tiles(x_img[i, :, :, :], triples=((0, 1, 2), (0, 0, 0), None))
tiles_A = np.concatenate([tiles[0], tiles[1]], axis=0)
tiles_B = np.concatenate([rgb[i], tiles[2]], axis=0)
tiles_AB = np.concatenate([tiles_A, tiles_B], axis=1)
tiles_AB = np.rot90(tiles_AB, k=rot90)
visuals_dict[i] = tiles_AB
visualize_head(r, c, visuals_dict, os.path.join(img_dir, 'sseg',
'{}_sseg.png'.format(prefix)),
indices=range(r * c), titles=names, suptitle='{}_{}'.format(prefix, 'samples'))
for metric_name, low_idx in idx_low_metric_dict.items():
visualize_head(r, c, visuals_dict, os.path.join(img_dir, 'low_{}'.format(metric_name),
'{}_low_{}.png'.format(prefix, metric_name)),
indices=low_idx, titles=names, suptitle='{}_low_{}'.format(prefix, metric_name))
low_metric_ids = {k: [names[_] for _ in v] for k, v in idx_low_metric_dict.items()}
with open(os.path.join(img_dir, '{}_low_metric_ids.json'.format(prefix)), 'w') as json_file:
json.dump(low_metric_ids, json_file, indent=4)
plt.figure(figsize=(16, 16))
for i in range(4 * 5):
tiles = _pca_rank_scale_rgb_tiles(x_img[i, :, :, :], triples=((0, 1, 2), (0, 3, 4), (0, 0, 0), None))
tiles_A = np.concatenate([tiles[0], tiles[1]], axis=0)
tiles_B = np.concatenate([tiles[2], tiles[3]], axis=0)
tiles_AB = np.concatenate([tiles_A, tiles_B], axis=1)
tiles_AB = np.rot90(tiles_AB, k=rot90)
plt.subplot(4, 5, i + 1)
plt.axis('off')
plt.title(names[i])
plt.imshow(tiles_AB[:, :, :])
plt.savefig(os.path.join(img_dir, '{}_visual.png'.format(prefix)))
plt.close()
return srnpdj_N6
def make_thresh_conservative(_x, _t, q=75): # q = 75: keep 1 quarter of the mask.
# takes in probabilities _x and float threshold _t. returns _t2, a more stringent threshold.
_flat = _x.flatten()
_idx = _flat >= _t
_t2 = np.percentile(_flat[_idx], q)
return _t2
# Use for scalar regression models
class ImageMaskNiftiReader(object):
def __init__(self, magnitude_path_dict, phase_path_dict, mask_path_dict):
self.magnitude_path_dict = magnitude_path_dict
self.phase_path_dict = phase_path_dict
self.mask_path_dict = mask_path_dict
def get_combined_image_mask(self, subject_id):
magnitude_path = self.magnitude_path_dict[subject_id]
phase_path = self.phase_path_dict[subject_id]
mask_path = self.mask_path_dict[subject_id]
loaded_magnitude, magnitude_nifti = load_image(magnitude_path, equalize=False, return_nifti=True)
loaded_phase = load_image(phase_path, equalize=False)
loaded_magn_sin_cos = preprocess_magnitude_phase(loaded_magnitude, loaded_phase, magnitude_mult=1 / 200.,
phase_mult=np.pi / 4096., name=subject_id)
loaded_mask = load_mask(mask_path)
return combine_image_mask(loaded_magn_sin_cos, loaded_mask, method="concat")
def combine_image_mask(image, mask, method="concat"):
if method == 'concat':
return np.concatenate([image, mask], axis=-1)
elif method == 'fg_only':
raise NotImplementedError()
elif method == 'bg_fg':
bg, fg = np.split(mask, 2, axis=-1)
bg_im = bg * image
fg_im = fg * image
return np.concatenate([bg_im, fg_im], axis=-1)
else:
raise ValueError("unknown combine_image_mask method: {}".format(method))
def yield_supervision(ids, im_mask_reader, float_df, isna_df=None, batch_size=4,
rand_rot_deg=0, rand_translate=0, skip_all_na=True,
shuffle=True, num_loops=None, skip_partial_batch=True,
HWC_check=(256, 232, 20), yield_format=None):
if num_loops is None:
loop_iter = iter(int, 1) # infinite loop
else:
loop_iter = range(num_loops)
for _loop in loop_iter:
im_mask_list, float_list, isna_list, batch_ids = [], [], [], []
idx = list(range(len(ids)))
if shuffle:
random.shuffle(idx)
for _i in idx:
curr_id = ids[_i]
try:
curr_float = float_df.loc[curr_id].values
if isna_df is not None:
curr_isna = isna_df.loc[curr_id].values
else:
curr_isna = ~np.isfinite(curr_float)
if np.all(curr_isna) and skip_all_na:
# this entry does not have any valid supervision
# logging.info('skipping {} -- all supervision na'.format(curr_id))
continue
else:
curr_float[curr_isna] = 0.
im_mask = im_mask_reader.get_combined_image_mask(curr_id)
if im_mask.shape != HWC_check:
logging.warn('invalid im_mask shape for id={} (is {}. expected {})-- skipping item and continuing in generator'.format(
curr_id, repr(im_mask.shape), repr(HWC_check)))
continue
# simple data augmentation
if rand_rot_deg:
deg = np.random.uniform(-rand_rot_deg, rand_rot_deg)
im_mask = scipy.ndimage.rotate(im_mask, deg, reshape=False)
if rand_translate:
padded = np.pad(im_mask,
((rand_translate, rand_translate),
(rand_translate, rand_translate), (0, 0)), mode='constant')
sh = np.random.randint(0, rand_translate * 2)
sw = np.random.randint(0, rand_translate * 2)
eh = sh + im_mask.shape[0]
ew = sw + im_mask.shape[1]
im_mask = padded[sh:eh, sw:ew, :]
except (FileNotFoundError, ValueError, KeyError) as e:
logging.warn(repr(e))
logging.warn('unable to read data for id={} -- skipping item and continuing in generator'.format(curr_id))
continue
im_mask_list.append(im_mask)
float_list.append(curr_float)
isna_list.append(curr_isna)
batch_ids.append(curr_id)
if len(im_mask_list) == batch_size:
try:
batch_im_mask =
|
np.stack(im_mask_list, axis=0)
|
numpy.stack
|
# Created by <NAME>
# All right reserved
# Department of Computer Science
# the University of Warwick
# <EMAIL>
import itertools as it
import math
import random
import sys
from concurrent import futures
from copy import deepcopy
from os import remove
from os.path import abspath
import category_encoders as ce
import dill
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy.stats as stats
import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib.widgets import Slider
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import OneHotEncoder
from torch.autograd import Variable
from torch.distributions import Categorical
from torch.multiprocessing import Pool
from dbestclient.ml.integral import approx_count, prepare_reg_density_data
from dbestclient.ml.embedding import columns2sentences,WordEmbedding
# https://www.katnoria.com/mdn/
# https://github.com/sagelywizard/pytorch-mdn
"""A module for a mixture density network layer
For more info on MDNs, see _Mixture Desity Networks_ by Bishop, 1994.
"""
class MDN(nn.Module):
"""A mixture density network layer
The input maps to the parameters of a MoG probability distribution, where
each Gaussian has O dimensions and diagonal covariance.
Arguments:
in_features (int): the number of dimensions in the input
out_features (int): the number of dimensions in the output
num_gaussians (int): the number of Gaussians per output dimensions
Input:
minibatch (BxD): B is the batch size and D is the number of input
dimensions.
Output:
(pi, sigma, mu) (BxG, BxGxO, BxGxO): B is the batch size, G is the
number of Gaussians, and O is the number of dimensions for each
Gaussian. Pi is a multinomial distribution of the Gaussians. Sigma
is the standard deviation of each Gaussian. Mu is the mean of each
Gaussian.
"""
def __init__(self, in_features, out_features, num_gaussians, device):
super(MDN, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.num_gaussians = num_gaussians
self.pi = nn.Sequential(
nn.Linear(in_features, num_gaussians),
nn.Softmax(dim=1)
)
self.sigma = nn.Linear(in_features, out_features * num_gaussians)
self.mu = nn.Linear(in_features, out_features * num_gaussians)
self.pi = self.pi.to(device)
self.mu = self.mu.to(device)
self.sigma = self.sigma.to(device)
def forward(self, minibatch):
pi = self.pi(minibatch)
sigma = torch.exp(self.sigma(minibatch))
sigma = sigma.view(-1, self.num_gaussians, self.out_features)
mu = self.mu(minibatch)
mu = mu.view(-1, self.num_gaussians, self.out_features)
return pi, sigma, mu
# ONEOVERSQRT2PI = 1.0 / math.sqrt(2 * math.pi)
def gaussian_probability(sigma, mu, data):
"""Returns the probability of `data` given MoG parameters `sigma` and `mu`.
Arguments:
sigma (BxGxO): The standard deviation of the Gaussians. B is the batch
size, G is the number of Gaussians, and O is the number of
dimensions per Gaussian.
mu (BxGxO): The means of the Gaussians. B is the batch size, G is the
number of Gaussians, and O is the number of dimensions per Gaussian.
data (BxI): A batch of data. B is the batch size and I is the number of
input dimensions.
Returns:
probabilities (BxG): The probability of each point in the probability
of the distribution in the corresponding sigma/mu index.
"""
data = data.unsqueeze(1).expand_as(sigma)
ret = 1.0 / math.sqrt(2 * math.pi) * torch.exp(-0.5 *
((data - mu) / sigma) ** 2) / sigma
return torch.prod(ret, 2)
def mdn_loss(pi, sigma, mu, target, device):
"""Calculates the error, given the MoG parameters and the target
The loss is the negative log likelihood of the data given the MoG
parameters.
"""
prob = pi * gaussian_probability(sigma, mu, target)
nll = -torch.log(torch.sum(prob, dim=1)).to(device)
return torch.mean(nll)
def sample(pi, sigma, mu):
"""Draw samples from a MoG.
"""
categorical = Categorical(pi)
pis = list(categorical.sample().data)
sample = Variable(sigma.data.new(sigma.size(0), sigma.size(2)).normal_())
for i, idx in enumerate(pis):
sample[i] = sample[i].mul(sigma[i, idx]).add(mu[i, idx])
return sample
def gaussion_predict(weights: list, mus: list, sigmas: list, xs: list, n_jobs=1):
if n_jobs == 1:
result = np.array([np.multiply(stats.norm(mus, sigmas).pdf(x),
weights).sum(axis=1).tolist() for x in xs]).transpose()
else:
with Pool(processes=n_jobs) as pool:
instances = []
results = []
for x in xs:
i = pool.apply_async(
gaussion_predict, (weights, mus, sigmas, [x], 1))
instances.append(i)
for i in instances:
result = i.get()
# print("partial result", result)
results.append(result)
result = np.concatenate(results, axis=1)
# with futures.ThreadPoolExecutor() as executor:
# for x in xs:
# future = executor.submit(
# gaussion_predict, weights, mus, sigmas, [x], 1)
# results.append(future.result())
# result = np.concatenate(results, axis=1)
return result
def gm(weights: list, mus: list, vars: list, x: list, b_plot=False, n_division=100):
""" given a list of points, calculate the gaussian mixture probability
Args:
weights (list): weights
mus (list): the centroids of gaussions.
vars (list): the variances.
x (list): the targeting points.
b_plot (bool, optional): whether return the value for plotting. Defaults to False.
n_division (int, optional): number of division, if b_plot=True. Defaults to 100.
Returns:
float: the pdf of a gaussian mixture.
"""
if not b_plot:
result = [stats.norm(mu_i, vars_i).pdf(
x)*weights_i for mu_i, vars_i, weights_i in zip(mus, vars, weights)]
result = sum(result)
# result = 0
# for index in range(len(weights)):
# result += stats.norm(mus[index], vars[index]
# ).pdf(x) * weights[index]
# print(result)
return result
else:
xs = np.linspace(-1, 1, n_division)
# ys = [gm(weights, mus, vars, xi, b_plot=False) for xi in xs]
ys = gm(weights, mus, vars, xs, b_plot=False)
return xs, ys
# plt.plot(xs, ys)
# plt.show()
def normalize(x_point: float, mean: float, width: float) -> float:
"""normalize the data point
Args:
x (float): the data point
mean (float): the mean value
width (float): the width
Returns:
float: the normalized value
"""
return (x_point - mean) / width * 2
def denormalize(x_point: float, mean: float, width: float) -> float:
"""de-normalize the data point
Args:
x (float): the data point
mean (float): the mean value
width (float): the width
Returns:
float: the de-normalized value
"""
return 0.5 * width * x_point + mean
def de_serialize(file: str):
"""de-serialize the model from a file.
Args:
file (str): the file path.
Returns:
Callable: the model.
"""
with open(file, 'rb') as f:
return dill.load(f)
class GenericMdn:
def __init__(self, config):
self.meanx = None
self.widthx = None
self.config = config
def fit(self, runtime_config):
raise NotImplementedError("Method fit() is not implemented.")
def fit_grid_search(self, runtime_config):
raise NotImplementedError(
"Method fit_grid_search() is not implemented.")
def predict(self, runtime_config):
raise NotImplementedError("Method predict() is not implemented.")
def normalize(self, xs: np.array):
"""normalize the data
Args:
x (list): the data points to be normalized.
mean (float): the mean value of x.
width (float): the range of x.
Returns:
list: the normalized data.
"""
return (xs - self.meanx) / self.widthx * 2
def denormalize(self, xs):
"""de-normalize the data
Args:
x (list): the data points to be de-normalized.
mean (float): the mean value of x.
width (float): the range of x.
Returns:
list: the de-normalized data.
"""
return 0.5 * self.widthx * xs + self.meanx
class RegMdnGroupBy():
""" This class implements the regression using mixture density network for group by queries.
"""
def __init__(self, config, b_store_training_data=False, b_normalize_data=True):
if b_store_training_data:
self.x_points = None # query range
self.y_points = None # aggregate value
self.z_points = None # group by balue
self.sample_x = None # used in the score() function
self.sample_g = None
self.sample_average_y = None
self.b_store_training_data = b_store_training_data
self.meanx = None
self.widthx = None
self.meany = None
self.widthy = None
self.model = None
self.last_xs = None
self.last_pi = None
self.last_mu = None
self.last_sigma = None
self.config = config
self.b_normalize_data = b_normalize_data
self.enc = None
def fit(self, z_group: list, x_points: list, y_points: list, runtime_config, lr: float = 0.001, n_workers=0):
"""fit the MDN regression model.
Args:
z_group (list): group by values
x_points (list): x points
y_points (list): y points
n_epoch (int, optional): number of epochs for training. Defaults to 100.
n_gaussians (int, optional): the number of gaussions. Defaults to 5.
n_hidden_layer (int, optional): the number of hidden layers. Defaults to 1.
n_mdn_layer_node (int, optional): the node number in the hidden layer. Defaults to 10.
lr (float, optional): the learning rate of the MDN network for training. Defaults to 0.001.
Raises:
ValueError: The hidden layer should be 1 or 2.
Returns:
RegMdnGroupBy: The regression model.
"""
n_epoch = self.config.config["n_epoch"]
n_gaussians = self.config.config["n_gaussians_reg"]
n_hidden_layer = self.config.config["n_hidden_layer"]
n_mdn_layer_node = self.config.config["n_mdn_layer_node_reg"]
b_grid_search = self.config.config["b_grid_search"]
encoder = self.config.config["encoder"]
device = runtime_config["device"]
if not b_grid_search:
if encoder == "onehot":
self.enc = OneHotEncoder(handle_unknown='ignore')
zs_encoded = z_group
zs_encoded = self.enc.fit_transform(zs_encoded).toarray()
elif encoder == "binary":
# print(z_group)
# prepare column names for binary encoding
columns = list(range(len(z_group[0])))
self.enc = ce.BinaryEncoder(cols=columns)
zs_encoded = self.enc.fit_transform(z_group).to_numpy()
elif encoder == "embedding":
sentences = columns2sentences(z_group, x_points, y_points)
self.enc = WordEmbedding()
self.enc.fit(sentences, gbs=["gb"],dim=self.config.config["n_embedding_dim"])
gbs_data = z_group.reshape(1,-1)[0]
zs_encoded = self.enc.predicts(gbs_data)
# raise TypeError("embedding is not supported yet.")
if self.b_normalize_data:
self.meanx = (np.max(x_points) + np.min(x_points)) / 2
self.widthx = np.max(x_points) - np.min(x_points)
self.meany = (np.max(y_points) + np.min(y_points)) / 2
self.widthy = np.max(y_points) - np.min(y_points)
x_points = np.array([normalize(i, self.meanx, self.widthx)
for i in x_points])
y_points = np.array([normalize(i, self.meany, self.widthy)
for i in y_points])
if self.b_store_training_data:
self.x_points = x_points
self.y_points = y_points
self.z_points = z_group
else:
# delete the previous stored data in grid search, to save space.
self.x_points = None
self.y_points = None
self.z_points = None
if encoder in ["onehot", "binary", "embedding"]:
xs_encoded = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[xs_encoded, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
else:
xzs = [[x_point, z_point]
for x_point, z_point in zip(x_points, z_group)]
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs]) # transform to torch tensors
y_points = y_points[:, np.newaxis]
tensor_ys = torch.stack([torch.Tensor(i) for i in y_points])
# move variables to cuda
tensor_xzs = tensor_xzs.to(device)
tensor_ys = tensor_ys.to(device)
my_dataset = torch.utils.data.TensorDataset(
tensor_xzs, tensor_ys) # create your dataloader
my_dataloader = torch.utils.data.DataLoader(
my_dataset, batch_size=self.config.config["batch_size"], shuffle=True, num_workers=n_workers)
if encoder == "onehot":
input_dim = sum([len(i) for i in self.enc.categories_]) + 1
elif encoder == "binary":
input_dim = len(self.enc.base_n_encoder.feature_names) + 1
elif encoder == "embedding":
input_dim = self.enc.dim + 1
else:
raise ValueError("Encoding should be binary or onehot")
# initialize the model
if n_hidden_layer == 1:
self.model = nn.Sequential(
nn.Linear(input_dim, n_mdn_layer_node),
nn.Tanh(),
nn.Dropout(0.1),
MDN(n_mdn_layer_node, 1, n_gaussians, device)
)
elif n_hidden_layer == 2:
self.model = nn.Sequential(
nn.Linear(input_dim, n_mdn_layer_node),
nn.Tanh(),
nn.Linear(n_mdn_layer_node, n_mdn_layer_node),
nn.Tanh(),
nn.Dropout(0.1),
MDN(n_mdn_layer_node, 1, n_gaussians, device)
)
else:
raise ValueError(
"The hidden layer should be 1 or 2, but you provided "+str(n_hidden_layer))
self.model = self.model.to(device)
optimizer = optim.Adam(self.model.parameters(), lr=lr)
decay_rate = 0.96
my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer=optimizer, gamma=decay_rate)
for epoch in range(n_epoch):
if runtime_config["v"]:
if epoch % 1 == 0:
print("< Epoch {}".format(epoch))
# train the model
for minibatch, labels in my_dataloader:
minibatch.to(device)
labels.to(device)
self.model.zero_grad()
pi, sigma, mu = self.model(minibatch)
loss = mdn_loss(pi, sigma, mu, labels, device)
loss.backward()
optimizer.step()
my_lr_scheduler.step()
self.model.eval()
print("Finish regression training.")
return self
else:
return self.fit_grid_search(z_group, x_points, y_points, runtime_config)
def fit_grid_search(self, z_group: list, x_points: list, y_points: list, runtime_config):
"""use grid search to tune the hyper parameters.
Args:
z_group (list): group by values
x_points (list): independent values
y_points (list): dependent values
Returns:
RegMdnGroupBy: the fitted model
"""
param_grid = {'epoch': [5], 'lr': [0.001], 'node': [
5, 10, 20], 'hidden': [1, 2], 'gaussian_reg': [3, 5]}
# param_grid = {'epoch': [5], 'lr': [0.001], 'node': [
# 5], 'hidden': [1], 'gaussian': [3]}
errors = []
combinations = it.product(*(param_grid[Name] for Name in param_grid))
combinations = list(combinations)
combs = []
for combination in combinations:
idx = 0
comb = {}
# print(combination)
for key in param_grid:
comb[key] = combination[idx]
idx += 1
combs.append(comb)
self.b_store_training_data = True
for para in combs:
print("Grid search for parameter set :", para)
config = self.config.copy()
config.config["n_gaussians_reg"] = para['gaussian_reg']
# config.config["n_gaussians_density"] = para['gaussian_density']
config.config["n_epoch"] = para['epoch']
config.config["n_hidden_layer"] = para['hidden']
config.config["n_mdn_layer_node_reg"] = para['node']
config.config["b_grid_search"] = False
instance = RegMdnGroupBy(config, b_store_training_data=True).fit(z_group, x_points, y_points,
runtime_config, lr=para['lr'])
errors.append(instance.score(runtime_config))
print("errors for grid search ", errors)
index = errors.index(min(errors))
para = combs[index]
print("Finding the best configuration for the network", para)
self.b_store_training_data = False
# release space
self.x_points = None
self.y_points = None
self.z_points = None
self.sample_x = None
self.sample_g = None
self.sample_average_y = None
config = self.config.copy()
config.config["n_gaussians_reg"] = para['gaussian_reg']
# config.config["n_gaussians_density"] = para['gaussian_density']
# config.config["n_epoch"] = para['epoch']
config.config["n_hidden_layer"] = para['hidden']
config.config["n_mdn_layer_node_reg"] = para['node']
config.config["b_grid_search"] = False
instance = RegMdnGroupBy(config).fit(z_group, x_points, y_points,
runtime_config, lr=para['lr'])
print("-"*80)
return instance
def predict(self, z_group: list, x_points: list, runtime_config, b_plot=False) -> list:
"""provide predictions for given groups and points.
Args:
z_group (list): the group by values
x_points (list): the corresponding x points
b_plot (bool, optional): to plot the data or not.. Defaults to False.
Raises:
Exception: [description]
Returns:
list: the predictions.
"""
# torch.set_num_threads(4)
# check input data type, and convert to np.array
if type(z_group) is list:
z_group = np.array(z_group)
if type(x_points) is list:
x_points = np.array(x_points)
encoder = self.config.config["encoder"]
device = runtime_config["device"]
if encoder == 'no':
convert2float = True
if convert2float:
try:
zs_float = []
for item in z_group:
if item[0] == "":
zs_float.append([0.0])
else:
zs_float.append([(float)(item[0])])
z_group = zs_float
except:
raise Exception
if self.b_normalize_data:
x_points = normalize(x_points, self.meanx, self.widthx)
if encoder == "onehot":
# zs_encoded = z_group # [:, np.newaxis]
zs_encoded = self.enc.transform(z_group).toarray()
x_points = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[x_points, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
elif encoder == "binary":
zs_encoded = self.enc.transform(z_group).to_numpy()
x_points = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[x_points, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
elif encoder == "embedding":
zs_transformed = z_group.reshape(1,-1)[0]
zs_encoded = self.enc.predicts(zs_transformed)
x_points = x_points[:, np.newaxis]
xzs_encoded = np.concatenate(
[x_points, zs_encoded], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_encoded])
else:
xzs = [[x_point, z_point]
for x_point, z_point in zip(x_points, z_group)]
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs])
tensor_xzs = tensor_xzs.to(device)
self.model = self.model.to(device)
pis, sigmas, mus = self.model(tensor_xzs)
if not b_plot:
pis = pis.cpu().detach().numpy() # [0]
# sigmas = sigmas.detach().numpy().reshape(len(sigmas), -1)[0]
mus = mus.cpu().detach().numpy().reshape(len(z_group), -1) # [0]
predictions = np.sum(np.multiply(pis, mus), axis=1)
if self.b_normalize_data:
predictions = [denormalize(pred, self.meany, self.widthy)
for pred in predictions]
return predictions
else:
samples = sample(pis, sigmas, mus).data.numpy().reshape(-1)
if self.b_normalize_data:
samples = [denormalize(pred, self.meany, self.widthy)
for pred in samples]
# plt.scatter(z_group, x_points, samples)
# plt.show()
# return samples
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if len(self.x_points) > 2000:
idx = np.random.randint(0, len(self.x_points), 2000)
if self.b_normalize_data:
x_samples = [denormalize(i, self.meanx, self.widthx)
for i in self.x_points[idx]]
y_samples = [denormalize(i, self.meany, self.widthy)
for i in self.y_points[idx]]
ax.scatter(x_samples,
self.z_points[idx], y_samples)
else:
ax.scatter(self.x_points, self.z_points, self.y_points)
if self.b_normalize_data:
x_points = denormalize(x_points, self.meanx, self.widthx)
if len(samples) > 2000:
idx = np.random.randint(0, len(x_points), 2000)
ax.scatter(np.array(x_points)[idx], np.array(
z_group)[idx], np.array(samples)[idx])
else:
ax.scatter(x_points, z_group, samples)
ax.set_xlabel('query range attribute')
ax.set_ylabel('group by attribute')
ax.set_zlabel('aggregate attribute')
plt.show()
return samples
def score(self, runtime_config) -> float:
""" evaluate the error for this model. currenltly,
it is the sum of all absolute errors, for a random sample of points.
Raises:
ValueError: b_store_training_data must be set to True to enable the score() function.
Returns:
float: the absolute error
"""
gs = ["g1", "g2", "g3", "g4", "g5"]
if not self.b_store_training_data:
raise ValueError(
"b_store_training_data must be set to True to enable the score() function.")
else:
# groups = self.enc.categories_[0]
if self.sample_x is None:
# process group by values
data = {gs[i]: [row[i] for row in self.z_points]
for i in range(len(self.z_points[0]))}
# append x y values
data['x'] = denormalize(self.x_points, self.meanx, self.widthx)
data['y'] = denormalize(self.y_points, self.meany, self.widthy)
df = pd.DataFrame(data)
columns = list(df.columns.values)
columns.remove("y")
# df = pd.DataFrame(
# {'g': self.z_points, 'x': denormalize(self.x_points, self.meanx, self.widthx), 'y': denormalize(self.y_points, self.meany, self.widthy)})
# mean_y = df.groupby(['g', 'x'])['y'].mean() # .reset_index()
mean_y = df.groupby(columns)['y'].mean() # .reset_index()
# print(df)
# raise Exception
# make the same index here
df = df.set_index(columns) # df = df.set_index(['g', 'x'])
df['mean_y'] = mean_y
# print(df)
df = df.reset_index() # to take the hierarchical index off again
df = df.sample(
n=min(1000, len(self.x_points)), random_state=1, replace=False)
self.sample_x = df["x"].values
# for g in columns[:-1]:
# self.sample_g = df["g"].values
self.sample_g = df[columns[:-1]].values
# raise Exception
self.sample_average_y = df["mean_y"].values
predictions = self.predict(
self.sample_g, self.sample_x, runtime_config)
errors = [abs(pred-tru)
for pred, tru in zip(predictions, self.sample_average_y)]
errors = sum(sorted(errors)[10:-10])
return errors
class RegMdn():
""" This class implements the regression using mixture density network.
"""
# , n_mdn_layer_node=20, b_one_hot=True
def __init__(self, config, dim_input, b_store_training_data=False):
if b_store_training_data:
self.xs = None # query range
self.ys = None # aggregate value
self.zs = None # group by balue
self.b_store_training_data = b_store_training_data
self.meanx = None
self.widthx = None
self.meany = None
self.widthy = None
self.meanz = None
self.widthz = None
self.model = None
self.is_normalized = False
self.dim_input = dim_input
self.is_training_data_denormalized = False
self.last_xs = None
self.last_pi = None
self.last_mu = None
self.last_sigma = None
self.enc = None
self.config = config
# num_epoch=400, num_gaussians=5
def fit(self, xs, ys, runtime_config, b_show_plot=False, b_normalize=True):
""" fit a regression y= R(x)"""
if len(xs.shape) != 2:
raise Exception("xs should be 2-d, but got unexpected shape.")
if self.dim_input == 1:
return self.fit2d(xs, ys, runtime_config, b_show_reg_plot=b_show_plot,
b_normalize=b_normalize, )
elif self.dim_input == 2:
return self.fit3d(xs[:, 0], xs[:, 1], ys, runtime_config, b_show_plot=b_show_plot,
b_normalize=b_normalize, )
else:
print("dimension mismatch")
sys.exit(0)
def predict(self, xs, runtime_config, b_show_plot=False):
""" make predictions"""
if self.dim_input == 1:
return self.predict2d(xs, runtime_config, b_show_plot=b_show_plot)
elif self.dim_input == 2:
return self.predict3d(xs[:, 0], xs[:, 1], runtime_config, b_show_plot=b_show_plot)
else:
print("dimension mismatch")
sys.exit(0)
def fit3d(self, xs, zs, ys, runtime_config, b_show_plot=False, b_normalize=True, n_workers=0):
""" fit a regression y = R(x,z)
Args:
xs ([float]): query range attribute
zs ([float]): group by attribute
ys ([float]): aggregate attribute
b_show_plot (bool, optional): whether to show the plot. Defaults to True.
"""
b_one_hot = True
device = runtime_config["device"]
n_mdn_layer_node = self.config.config["n_mdn_layer_node"]
num_gaussians = self.config.config["n_gaussions"]
num_epoch = self.config.config["n_epoch"]
if b_one_hot:
self.enc = OneHotEncoder(handle_unknown='ignore')
zs_onehot = zs[:, np.newaxis]
zs_onehot = self.enc.fit_transform(zs_onehot).toarray()
if b_normalize:
self.meanx = (np.max(xs) + np.min(xs)) / 2
self.widthx = np.max(xs) - np.min(xs)
self.meany = (np.max(ys) + np.min(ys)) / 2
self.widthy = np.max(ys) - np.min(ys)
# self.meanz = np.mean(zs)
# self.widthz = np.max(zs)-np.min(zs)
# s= [(i-meanx)/1 for i in x]
xs = np.array([self.normalize(i, self.meanx, self.widthx)
for i in xs])
ys = np.array([self.normalize(i, self.meany, self.widthy)
for i in ys])
# zs = np.array([self.normalize(i, self.meanz, self.widthz)
# for i in zs])
self.is_normalized = True
if self.b_store_training_data:
self.xs = xs
self.ys = ys
self.zs = zs
if b_show_plot:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xs, zs, ys)
ax.set_xlabel('query range attribute')
ax.set_ylabel('group by attribute')
ax.set_zlabel('aggregate attribute')
plt.show()
if b_one_hot:
xs_onehot = xs[:, np.newaxis]
xzs_onehot = np.concatenate(
[xs_onehot, zs_onehot], axis=1).tolist()
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs_onehot]) # transform to torch tensors
else:
xzs = [[xs[i], zs[i]] for i in range(len(xs))]
tensor_xzs = torch.stack([torch.Tensor(i)
for i in xzs]) # transform to torch tensors
ys = ys[:, np.newaxis]
tensor_ys = torch.stack([torch.Tensor(i) for i in ys])
# move variables to cuda
tensor_xzs = tensor_xzs.to(device)
tensor_ys = tensor_ys.to(device)
my_dataset = torch.utils.data.TensorDataset(
tensor_xzs, tensor_ys) # create your datset
# , num_workers=8) # create your dataloader
my_dataloader = torch.utils.data.DataLoader(
my_dataset, batch_size=self.config.config["batch_size"], shuffle=False, num_workers=n_workers)
input_dim = len(self.enc.categories_[0]) + 1
# initialize the model
self.model = nn.Sequential(
nn.Linear(input_dim, n_mdn_layer_node), # self.dim_input
nn.Tanh(),
nn.Dropout(0.01),
MDN(n_mdn_layer_node, 1, num_gaussians, device)
)
self.model = self.model.to(device)
optimizer = optim.Adam(self.model.parameters())
for epoch in range(num_epoch):
if epoch % 100 == 0:
print("< Epoch {}".format(epoch))
# train the model
for minibatch, labels in my_dataloader:
minibatch.to(device)
labels.to(device)
self.model.zero_grad()
pi, sigma, mu = self.model(minibatch)
loss = mdn_loss(pi, sigma, mu, labels, device)
loss.backward()
optimizer.step()
return self
def fit3d_grid_search(self, xs: list, zs: list, ys: list, runtime_config, b_normalize=True):
""" fit the regression, using grid search to find the optimal parameters.
Args:
xs (list): x points.
zs (list): group by attributes
ys (list): y values.
b_normalize (bool, optional): whether the values should be normalized
for training. Defaults to True.
Returns:
RegMdn: the model.
"""
param_grid = {'epoch': [5], 'lr': [0.001, 0.0001], 'node': [
5, 10, 20], 'hidden': [1, 2], 'gaussian': [2, 4]}
# param_grid = {'epoch': [2], 'lr': [0.001], 'node': [4, 12], 'hidden': [1, 2], 'gaussian': [10]}
errors = []
combinations = it.product(*(param_grid[Name] for Name in param_grid))
combinations = list(combinations)
combs = []
for combination in combinations:
idx = 0
comb = {}
for key in param_grid:
comb[key] = combination[idx]
idx += 1
combs.append(comb)
self.b_store_training_data = True
# for para in combs:
# print("Grid search for parameter set :", para)
# instance = self.fit(zs, xs, b_normalize=b_normalize, num_gaussians=para['gaussian'], num_epoch=para['epoch'],
# n_mdn_layer_node=para['node'], lr=para['lr'], hidden=para['hidden'], b_grid_search=False)
# errors.append(instance.score())
# index = errors.index(min(errors))
# para = combs[index]
# print("Finding the best configuration for the network", para)
# self.b_store_training_data = False
# instance = self.fit(zs, xs, b_normalize=True, num_gaussians=para['gaussian'], num_epoch=20,
# n_mdn_layer_node=para['node'], lr=para['lr'], hidden=para['hidden'], b_grid_search=False)
# return instance
def fit2d(self, xs, ys, runtime_config, b_show_reg_plot=False, b_normalize=True,
b_show_density_plot=False, n_workers=0):
""" fit a regression y = R(x)
Args:
xs([float]): query range attribute
ys([float]): aggregate attribute
b_show_plot(bool, optional): whether to show the plot. Defaults to True.
"""
n_mdn_layer_node = self.config.config["n_mdn_layer_node"]
num_epoch = self.config.config["n_epoch"]
if b_normalize:
self.meanx = (np.max(xs) + np.min(xs)) / 2
self.widthx = np.max(xs) - np.min(xs)
self.meany = (np.max(ys) + np.min(ys)) / 2
self.widthy =
|
np.max(ys)
|
numpy.max
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: <NAME>
"""
Boundary
========
To do:
* Define boundary conditions for hipims model
-----------
"""
# Created on Tue Mar 31 16:05:27 2020
import warnings
import numpy as np
import pandas as pd
import matplotlib.patches as mplP
from . import spatial_analysis as sp
#%% boundary class definition
class Boundary(object):
"""
Class for boundary conditions
Attributes:
num_of_bound: number of boundaries
type: a list of string 'open', 'rigid', 'fall'
'open': timeseries of boundary depth/discharge/velocity can
be given for this type. If no given timeseries data,
water will flow out flatly
'rigid': no outlet
'fall': water flow out like a fall, a fix zero water depth and
velocities will be given
extent: (2-col numpy array) poly points to define the extent of a
IO boundary. If extent is not given, then the boundary is
the domain outline
hSources: a two-col numpy array. The 1st col is time(s). The 2nd
col is water depth(m)
hUSources: a two-col numpy array. The 1st col is time(s). The 2nd
col is discharge(m3/s) or a three-col numpy array, the 2nd
col and the 3rd col are velocities(m/s) in x and y
direction, respectively.
h_code: 3-element int to define the type of depth boundary
hU_code: 3-element int to define th type of velocity boundary
description: (str) description of a boundary
"""
def __init__(self, boundary_list=None, outline_boundary='fall'):
"""Initialise the object
Args:
boundary_list: (list of dicts), each dict contain keys (polyPoints,
type, h, hU) to define a IO boundary's position, type, and
Input-Output (IO) sources timeseries. Keys including:
1.polyPoints is a numpy array giving X(1st col) and Y(2nd col)
coordinates of points to define the position of a boundary.
A bound without polyPoints is regarded as the outline_boundary.
2.type: string, type of the boundary
'open': timeseries of boundary depth/discharge/velocity
can be given for this type. If no given timeseries
data, water will flow out flatly
'rigid': water cannot flow in or out
'fall': water flow out like a fall, a fix zero water depth
and velocities will be given
3.h: a two-col numpy array. The 1st col is time(s). The 2nd col
is water depth(m)
4.hU: a two-col numpy array. The 1st col is time(s). The 2nd
col is discharge(m3/s) or a three-col numpy array, the 2nd
col and the 3rd col are velocities(m/s) in x and y
direction, respectively.
outline_boundary: (str) 'open'|'rigid', default outline boundary is
open and both h and hU are set as zero
if h or hU is given, then the boundary type is set as 'open' in
function _setup_boundary_data_table
"""
data_table = _setup_boundary_data_table(boundary_list, outline_boundary)
data_table = _get_boundary_code(data_table)
num_of_bound = data_table.shape[0]
self.data_table = data_table
self.num_of_bound = num_of_bound
self.h_sources = data_table['hSources']
self.hU_sources = data_table['hUSources']
self.boundary_list = boundary_list
self.outline_boundary = outline_boundary
self.cell_subs = None
self.cell_id = None
def print_summary(self):
"""Print the summary information
"""
print('Number of boundaries: '+str(self.num_of_bound))
for n in range(self.num_of_bound):
if self.cell_subs is not None:
num_cells = self.cell_subs[n][0].size
description = self.data_table.description[n] \
+ ', number of cells: '+str(num_cells)
print(str(n)+'. '+description)
def get_summary(self):
""" Get summary information strings
"""
summary_dict = {}
summary_dict['Number of boundaries'] = str(self.num_of_bound)
summary_str = []
for n in np.arange(self.num_of_bound):
if self.cell_subs is not None:
num_cells = self.cell_subs[n][0].size
description = self.data_table.description[n] \
+ ', number of cells: '+str(num_cells)
summary_str.append(str(n)+'. '+description)
summary_dict['Boundary details'] = summary_str
return summary_dict
def _fetch_boundary_cells(self, valid_subs, outline_subs, dem_header):
""" To get the subsripts and id of boundary cells on the domain grid
valid_subs, outline_subs, dem_header are from hipims object
_valid_cell_subs, _outline_cell_subs
cell_subs: (tuple)subscripts of outline boundary cells
cell_id: (numpy vector)valid id of outline boundary cells
"""
# to get outline cell id based on _outline_cell_subs
vector_id = np.arange(valid_subs[0].size)
nrows = dem_header['nrows']
ncols = dem_header['ncols']
cellsize = dem_header['cellsize']
xllcorner = dem_header['xllcorner']
yllcorner = dem_header['yllcorner']
grid_cell_id = np.zeros((nrows, ncols))
grid_cell_id[valid_subs] = vector_id
outline_id = grid_cell_id[outline_subs]
outline_id = outline_id.astype('int64')
# to get boundary cells based on the spatial extent of each boundary
bound_cell_x = xllcorner+(outline_subs[1]+0.5)*cellsize
bound_cell_y = yllcorner+(nrows-outline_subs[0]-0.5) *cellsize
n = 1 # sequence number of boundaries
data_table = self.data_table
cell_subs = []
cell_id = []
for n in range(data_table.shape[0]):
if data_table.extent[n] is None: #outline boundary
dem_extent = sp.header2extent(dem_header)
polyPoints = sp.extent2shape_points(dem_extent)
elif len(data_table.extent[n]) == 2:
xyv = data_table.extent[n]
polyPoints = sp.extent2shape_points([np.min(xyv[:, 0]),
np.max(xyv[:, 0]),
|
np.min(xyv[:, 1])
|
numpy.min
|
import cv2
import numpy as np
import torch
from simplecv.data._th_preprocess import _th_resize_to_range
from simplecv.data._th_preprocess import _th_mean_std_normalize
from simplecv.data._np_preprocess import _np_resize_to_range
from simplecv.data._np_preprocess import _np_mean_std_normalize
from simplecv.data._np_preprocess import _np_random_crop
from simplecv.data._np_preprocess import _np_im_random_scale
from simplecv.data._np_preprocess import _np_im_scale
from simplecv.data._np_preprocess import sliding_window
from simplecv.data._th_preprocess import _th_divisible_pad as th_divisible_pad
def random_crop(image, crop_size):
if isinstance(image, np.ndarray):
return _np_random_crop(image, crop_size)
else:
raise ValueError('The type {} is not support'.format(type(image)))
def divisible_pad(image_list, size_divisor=128, to_tensor=True):
"""
Args:
image_list: a list of images with shape [channel, height, width]
size_divisor: int
to_tensor: whether to convert to tensor
Returns:
blob: 4-D ndarray of shape [batch, channel, divisible_max_height, divisible_max_height]
"""
max_shape = np.array([im.shape for im in image_list]).max(axis=0)
max_shape[1] = int(
|
np.ceil(max_shape[1] / size_divisor)
|
numpy.ceil
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import copy
from functools import reduce
import numpy
import scipy.linalg
from pyscf import lib
from pyscf.gto import mole
from pyscf.lib import logger
from pyscf.scf import hf
from pyscf import __config__
LINEAR_DEP_THRESHOLD = getattr(__config__, 'scf_addons_remove_linear_dep_threshold', 1e-8)
CHOLESKY_THRESHOLD = getattr(__config__, 'scf_addons_cholesky_threshold', 1e-10)
LINEAR_DEP_TRIGGER = getattr(__config__, 'scf_addons_remove_linear_dep_trigger', 1e-10)
def frac_occ_(mf, tol=1e-3):
'''
Addons for SCF methods to assign fractional occupancy for degenerated
occpupied HOMOs.
Examples::
>>> mf = gto.M(atom='O 0 0 0; O 0 0 1', verbose=4).RHF()
>>> mf = scf.addons.frac_occ(mf)
>>> mf.run()
'''
from pyscf.scf import uhf, rohf
old_get_occ = mf.get_occ
mol = mf.mol
def guess_occ(mo_energy, nocc):
sorted_idx = numpy.argsort(mo_energy)
homo = mo_energy[sorted_idx[nocc-1]]
lumo = mo_energy[sorted_idx[nocc]]
frac_occ_lst = abs(mo_energy - homo) < tol
integer_occ_lst = (mo_energy <= homo) & (~frac_occ_lst)
mo_occ = numpy.zeros_like(mo_energy)
mo_occ[integer_occ_lst] = 1
degen = numpy.count_nonzero(frac_occ_lst)
frac = nocc - numpy.count_nonzero(integer_occ_lst)
mo_occ[frac_occ_lst] = float(frac) / degen
return mo_occ, numpy.where(frac_occ_lst)[0], homo, lumo
get_grad = None
if isinstance(mf, uhf.UHF):
def get_occ(mo_energy, mo_coeff=None):
nocca, noccb = mol.nelec
mo_occa, frac_lsta, homoa, lumoa = guess_occ(mo_energy[0], nocca)
mo_occb, frac_lstb, homob, lumob = guess_occ(mo_energy[1], noccb)
if abs(homoa - lumoa) < tol or abs(homob - lumob) < tol:
mo_occ = numpy.array([mo_occa, mo_occb])
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s '
'%6g for beta orbitals %s',
mo_occa[frac_lsta[0]], frac_lsta,
mo_occb[frac_lstb[0]], frac_lstb)
logger.info(mf, ' alpha HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.info(mf, ' beta HOMO = %.12g LUMO = %.12g', homob, lumob)
logger.debug(mf, ' alpha mo_energy = %s', mo_energy[0])
logger.debug(mf, ' beta mo_energy = %s', mo_energy[1])
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
elif isinstance(mf, rohf.ROHF):
def get_occ(mo_energy, mo_coeff=None):
nocca, noccb = mol.nelec
mo_occa, frac_lsta, homoa, lumoa = guess_occ(mo_energy, nocca)
mo_occb, frac_lstb, homob, lumob = guess_occ(mo_energy, noccb)
if abs(homoa - lumoa) < tol or abs(homob - lumob) < tol:
mo_occ = mo_occa + mo_occb
logger.warn(mf, 'fraction occ = %6g for alpha orbitals %s '
'%6g for beta orbitals %s',
mo_occa[frac_lsta[0]], frac_lsta,
mo_occb[frac_lstb[0]], frac_lstb)
logger.info(mf, ' HOMO = %.12g LUMO = %.12g', homoa, lumoa)
logger.debug(mf, ' mo_energy = %s', mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
def get_grad(mo_coeff, mo_occ, fock):
occidxa = mo_occ > 0
occidxb = mo_occ > 1
viridxa = ~occidxa
viridxb = ~occidxb
uniq_var_a = viridxa.reshape(-1,1) & occidxa
uniq_var_b = viridxb.reshape(-1,1) & occidxb
if getattr(fock, 'focka', None) is not None:
focka = fock.focka
fockb = fock.fockb
elif getattr(fock, 'ndim', None) == 3:
focka, fockb = fock
else:
focka = fockb = fock
focka = reduce(numpy.dot, (mo_coeff.T.conj(), focka, mo_coeff))
fockb = reduce(numpy.dot, (mo_coeff.T.conj(), fockb, mo_coeff))
g = numpy.zeros_like(focka)
g[uniq_var_a] = focka[uniq_var_a]
g[uniq_var_b] += fockb[uniq_var_b]
return g[uniq_var_a | uniq_var_b]
else: # RHF
def get_occ(mo_energy, mo_coeff=None):
nocc = (mol.nelectron+1) // 2 # n_docc + n_socc
mo_occ, frac_lst, homo, lumo = guess_occ(mo_energy, nocc)
n_docc = mol.nelectron // 2
n_socc = nocc - n_docc
if abs(homo - lumo) < tol or n_socc:
mo_occ *= 2
degen = len(frac_lst)
mo_occ[frac_lst] -= float(n_socc) / degen
logger.warn(mf, 'fraction occ = %6g for orbitals %s',
mo_occ[frac_lst[0]], frac_lst)
logger.info(mf, 'HOMO = %.12g LUMO = %.12g', homo, lumo)
logger.debug(mf, ' mo_energy = %s', mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
mf.get_occ = get_occ
if get_grad is not None:
mf.get_grad = get_grad
return mf
frac_occ = frac_occ_
def dynamic_occ_(mf, tol=1e-3):
assert(isinstance(mf, hf.RHF))
old_get_occ = mf.get_occ
def get_occ(mo_energy, mo_coeff=None):
mol = mf.mol
nocc = mol.nelectron // 2
sort_mo_energy = numpy.sort(mo_energy)
lumo = sort_mo_energy[nocc]
if abs(sort_mo_energy[nocc-1] - lumo) < tol:
mo_occ = numpy.zeros_like(mo_energy)
mo_occ[mo_energy<lumo] = 2
lst = abs(mo_energy - lumo) < tol
mo_occ[lst] = 0
logger.warn(mf, 'set charge = %d', mol.charge+int(lst.sum())*2)
logger.info(mf, 'HOMO = %.12g LUMO = %.12g',
sort_mo_energy[nocc-1], sort_mo_energy[nocc])
logger.debug(mf, ' mo_energy = %s', sort_mo_energy)
else:
mo_occ = old_get_occ(mo_energy, mo_coeff)
return mo_occ
mf.get_occ = get_occ
return mf
dynamic_occ = dynamic_occ_
def dynamic_level_shift_(mf, factor=1.):
'''Dynamically change the level shift in each SCF cycle. The level shift
value is set to (HF energy change * factor)
'''
old_get_fock = mf.get_fock
last_e = [None]
def get_fock(h1e, s1e, vhf, dm, cycle=-1, diis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
if cycle >= 0 or diis is not None:
ehf =(numpy.einsum('ij,ji', h1e, dm) +
numpy.einsum('ij,ji', vhf, dm) * .5)
if last_e[0] is not None:
level_shift_factor = abs(ehf-last_e[0]) * factor
logger.info(mf, 'Set level shift to %g', level_shift_factor)
last_e[0] = ehf
return old_get_fock(h1e, s1e, vhf, dm, cycle, diis, diis_start_cycle,
level_shift_factor, damp_factor)
mf.get_fock = get_fock
return mf
dynamic_level_shift = dynamic_level_shift_
def float_occ_(mf):
'''
For UHF, allowing the Sz value being changed during SCF iteration.
Determine occupation of alpha and beta electrons based on energy spectrum
'''
from pyscf.scf import uhf
assert(isinstance(mf, uhf.UHF))
def get_occ(mo_energy, mo_coeff=None):
mol = mf.mol
ee = numpy.sort(numpy.hstack(mo_energy))
n_a = numpy.count_nonzero(mo_energy[0]<(ee[mol.nelectron-1]+1e-3))
n_b = mol.nelectron - n_a
if mf.nelec is None:
nelec = mf.mol.nelec
else:
nelec = mf.nelec
if n_a != nelec[0]:
logger.info(mf, 'change num. alpha/beta electrons '
' %d / %d -> %d / %d',
nelec[0], nelec[1], n_a, n_b)
mf.nelec = (n_a, n_b)
return uhf.UHF.get_occ(mf, mo_energy, mo_coeff)
mf.get_occ = get_occ
return mf
dynamic_sz_ = float_occ = float_occ_
def follow_state_(mf, occorb=None):
occstat = [occorb]
old_get_occ = mf.get_occ
def get_occ(mo_energy, mo_coeff=None):
if occstat[0] is None:
mo_occ = old_get_occ(mo_energy, mo_coeff)
else:
mo_occ = numpy.zeros_like(mo_energy)
s = reduce(numpy.dot, (occstat[0].T, mf.get_ovlp(), mo_coeff))
nocc = mf.mol.nelectron // 2
#choose a subset of mo_coeff, which maximizes <old|now>
idx = numpy.argsort(numpy.einsum('ij,ij->j', s, s))
mo_occ[idx[-nocc:]] = 2
logger.debug(mf, ' mo_occ = %s', mo_occ)
logger.debug(mf, ' mo_energy = %s', mo_energy)
occstat[0] = mo_coeff[:,mo_occ>0]
return mo_occ
mf.get_occ = get_occ
return mf
follow_state = follow_state_
def mom_occ_(mf, occorb, setocc):
'''Use maximum overlap method to determine occupation number for each orbital in every
iteration. It can be applied to unrestricted HF/KS and restricted open-shell
HF/KS.'''
from pyscf.scf import uhf, rohf
if isinstance(mf, uhf.UHF):
coef_occ_a = occorb[0][:, setocc[0]>0]
coef_occ_b = occorb[1][:, setocc[1]>0]
elif isinstance(mf, rohf.ROHF):
if mf.mol.spin != (numpy.sum(setocc[0]) - numpy.sum(setocc[1])):
raise ValueError('Wrong occupation setting for restricted open-shell calculation.')
coef_occ_a = occorb[:, setocc[0]>0]
coef_occ_b = occorb[:, setocc[1]>0]
else:
raise RuntimeError('Cannot support this class of instance %s' % mf)
log = logger.Logger(mf.stdout, mf.verbose)
def get_occ(mo_energy=None, mo_coeff=None):
if mo_energy is None: mo_energy = mf.mo_energy
if mo_coeff is None: mo_coeff = mf.mo_coeff
if isinstance(mf, rohf.ROHF): mo_coeff = numpy.array([mo_coeff, mo_coeff])
mo_occ = numpy.zeros_like(setocc)
nocc_a = int(numpy.sum(setocc[0]))
nocc_b = int(numpy.sum(setocc[1]))
s_a = reduce(numpy.dot, (coef_occ_a.T, mf.get_ovlp(), mo_coeff[0]))
s_b = reduce(numpy.dot, (coef_occ_b.T, mf.get_ovlp(), mo_coeff[1]))
#choose a subset of mo_coeff, which maximizes <old|now>
idx_a = numpy.argsort(numpy.einsum('ij,ij->j', s_a, s_a))[::-1]
idx_b = numpy.argsort(numpy.einsum('ij,ij->j', s_b, s_b))[::-1]
mo_occ[0][idx_a[:nocc_a]] = 1.
mo_occ[1][idx_b[:nocc_b]] = 1.
log.debug(' New alpha occ pattern: %s', mo_occ[0])
log.debug(' New beta occ pattern: %s', mo_occ[1])
if isinstance(mf.mo_energy, numpy.ndarray) and mf.mo_energy.ndim == 1:
log.debug1(' Current mo_energy(sorted) = %s', mo_energy)
else:
log.debug1(' Current alpha mo_energy(sorted) = %s', mo_energy[0])
log.debug1(' Current beta mo_energy(sorted) = %s', mo_energy[1])
if (int(numpy.sum(mo_occ[0])) != nocc_a):
log.error('mom alpha electron occupation numbers do not match: %d, %d',
nocc_a, int(numpy.sum(mo_occ[0])))
if (int(
|
numpy.sum(mo_occ[1])
|
numpy.sum
|
import os, sys
cmd_folder = "../../../vis"
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from get_boxlib import ReadBoxLib, get_files
import numpy as np
from scipy.special import jv, hankel2
import pylab as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
import pickle
from scipy.optimize import fmin
def check():
plt.rc("font", family="serif")
plt.rc("font", size=8)
plt.rc("mathtext", fontset="cm")
# matplotlib.rc('text', usetex = True)
params= {'text.latex.preamble' : [r'\usepackage{amsmath}']}
plt.rcParams.update(params)
gs = gridspec.GridSpec(nrows=3, ncols=3) #, width_ratios=[0.35, 1], wspace=0.05, bottom=0.14, top=0.97, left=0.1, right=0.85)
#==============================================================================
# Simulation results
#==============================================================================
# get a list of all the files in this directory
files = get_files('.', include=['plt'], exclude=["temp"], get_all=True)
f = files[-1]
fig = plt.figure(figsize=(8,5))
data = ReadBoxLib(f)
time = data.time
data = ReadBoxLib(f, max_level=-1, limits=[[-5,-5],[8,5]])
# ---
xn, rho_e = data.get("rho-electron1", grid="node")
xn, rho_i = data.get("rho-ion1", grid="node")
xn, m_e = data.get("mass-electron1", grid="node")
xn, m_i = data.get("mass-ion1", grid="node")
xn, q_e = data.get("charge-electron1", grid="node")
xn, q_i = data.get("charge-ion1", grid="node")
nd_e1 = rho_e/m_e
nd_i1 = rho_i/m_i
cd1 = (q_e*rho_e/m_e + q_i*rho_i/m_i)
# ---
xn, rho_e = data.get("rho-electron2", grid="node")
xn, rho_i = data.get("rho-ion2", grid="node")
xn, m_e = data.get("mass-electron2", grid="node")
xn, m_i = data.get("mass-ion2", grid="node")
xn, q_e = data.get("charge-electron2", grid="node")
xn, q_i = data.get("charge-ion2", grid="node")
nd_e2 = rho_e/m_e
nd_i2 = rho_i/m_i
cd2 = (q_e*rho_e/m_e + q_i*rho_i/m_i)
# ---
xn, Bx = data.get("x_B-field1", grid="node")
xn, By = data.get("y_B-field1", grid="node")
xn, Bz = data.get("z_B-field1", grid="node")
B1 = np.sqrt(Bx**2 + By**2 + Bz**2)
xn, Dx = data.get("x_D-field1", grid="node")
xn, Dy = data.get("y_D-field1", grid="node")
xn, Dz = data.get("z_D-field1", grid="node")
D1 = np.sqrt(Dx**2 + Dy**2 + Dz**2)
# ---
xn, Bx = data.get("x_B-field2", grid="node")
xn, By = data.get("y_B-field2", grid="node")
xn, Bz = data.get("z_B-field2", grid="node")
B2 = np.sqrt(Bx**2 + By**2 + Bz**2)
xn, Dx = data.get("x_D-field2", grid="node")
xn, Dy = data.get("y_D-field2", grid="node")
xn, Dz = data.get("z_D-field2", grid="node")
D2 = np.sqrt(Dx**2 + Dy**2 + Dz**2)
# ---
xc, vf_fluid = data.get("vfrac-ion1")
xc, vf_field = data.get("vfrac-field1")
data.close()
yn, xn = np.meshgrid(xn[1], xn[0])
yc, xc = np.meshgrid(xc[1], xc[0])
plot = [
{"data":[cd1,cd2], "label":r"$\varrho_c$", "loc":[0,0], "eb":vf_fluid, "cmap":"bwr"},
{"data":[B1, B2], "label":r"$\left|\mathbf{B}\right|$", "loc":[1,0], "eb":vf_field, "cmap":"viridis"},
{"data":[D1,D2], "label":r"$\left|\mathbf{D}\right|$", "loc":[2,0], "eb":vf_field, "cmap":"viridis"},
{"data":[nd_e1,nd_e2], "label":r"$n_e$", "loc":[0,1], "eb":vf_fluid, "cmap":"viridis"},
{"data":[nd_i1,nd_i2], "label":r"$n_i$", "loc":[1,1], "eb":vf_fluid, "cmap":"viridis"},
]
axes = []
for p in plot:
ax = fig.add_subplot(gs[p["loc"][0], p["loc"][1]]); axes.append(ax)
ni, nj = xc.shape
plot_data = np.hstack((p["data"][0][:,0:int(nj/2)], p["data"][1][:,int(nj/2)::]))
color = "w"
if p["cmap"] == "bwr":
big = np.max(
|
np.abs(plot_data)
|
numpy.abs
|
import numpy as np
from price_simulator.src.utils.storage import Storage
def test_set_up():
storage = Storage()
storage.set_up(3, 1000, 10)
assert storage.update_steps == 100
assert len(storage.running_rewards) == 3
assert len(storage.running_rewards) == 3
assert len(storage.running_rewards) == 3
storage = Storage()
storage.set_up(3, 10, 20)
assert storage.update_steps == 1
def test_incremental_update():
avg = np.array([0])
for cnt in range(100):
cnt += 1
avg = Storage().incremental_update(np.array([10, 20]), avg, cnt)
assert np.all(avg == np.array([10, 20]))
def test_observe():
n_periods = 100
desired_length = 10
n_agents = 2
storage = Storage()
storage.set_up(n_agents, n_periods, desired_length)
for _ in range(n_periods):
storage.observe(np.array([10, 20]), np.array([30, 40]), np.array([50, 0]))
assert storage.average_rewards.shape == (desired_length, n_agents)
assert storage.average_actions.shape == (desired_length, n_agents)
assert storage.average_quantities.shape == (desired_length, n_agents)
assert np.all(storage.average_rewards == np.repeat(np.array([[10, 20]]), repeats=desired_length, axis=0))
assert np.all(storage.average_actions == np.repeat(np.array([[30, 40]]), repeats=desired_length, axis=0))
assert np.all(storage.average_quantities == np.repeat(
|
np.array([[50, 0]])
|
numpy.array
|
import numpy as np
import pybullet as p
from PhysicalEngine.utils import stability_func, macro_const, utils
import pickle
import os
from multiprocessing import Pool
const = macro_const.Const()
def adjust_parameter_threshold(param, stab, targ_stab = 0.5, rate = 0.03):
"""
"""
param_update = param + (stab - targ_stab)*rate
param_diff= param - param_update
return param_update, param_diff
def find_stability_parameters(box_configs, param_init=0.08, targ_stab=0.50, iteration=500, stab_tol=0.02, isshow=False):
"""
Accomplish function to iteratively find the best position displacement parameter for specific stability of a configuration.
We iteratively examined stability of a specific parameter with enough iteration times, then optimized the parameter using a gradient descent method.
-------------------------------
box_configs[string]: pickle files recorded positions of boxes.
param_init[float]: initialized displacement parameter.
targ_stab[float]: target stability of the configuration.
iteration[int]: simulation iteration time for evaluating stability of each parameter.
stab_tol[float]: stability tolerance. The parameter will be convergent when its difference to the target stability smaller than stab_tol.
Return:
--------------
stab_all[float]: Stabilities correspond to each parameter across the whole optimization progress.
param_all[float]: Parameters iteratively across the whole optimization progress.
"""
# Link Physical Agent
if not isshow:
AgentID = p.connect(p.DIRECT)
else:
AgentID = p.connect(p.GUI, options='--width=512 --height=768 --background_color_red=0 --background_color_green=0 --background_color_blue=0')
# Load Plane
floorID = stability_func.create_floor(color=const.BLACK, friction=1, client=AgentID)
# Do not render images during building
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 0)
p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER, 0)
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, 0)
# Load box
box_size = [[0.4, 0.4, 3*0.4], [0.4, 3*0.4, 0.4], [3*0.4, 0.4, 0.4]]
# Load boxes
# Initialize box positions
with open(box_configs, 'rb') as f:
params = pickle.load(f)
pos_list = np.array(params['pos_list'])
box_list = np.array(params['box_list'])
boxIDs = []
for i in range(len(params['box_list'])):
boxIDs.append(stability_func.create_box(
pos_list[i,:],
box_size[box_list[i]],
mass=0.2,
friction=1))
box_pos_ori_all = []
box_ori_ori_all = []
for i, boxID in enumerate(boxIDs):
pos_ori, ori_ori = p.getBasePositionAndOrientation(boxID)
box_pos_ori_all.append(pos_ori)
box_ori_ori_all.append(ori_ori)
stab_all = []
param_all = []
param = 1.0*param_init
stab = 1.0
while True:
# Adjust iteration, larger error, less iterations
if round(np.abs(stab-targ_stab),2) >= 0.10:
rate = 0.10
elif round(np.abs(stab-targ_stab),2) >= 0.07:
rate = 0.05
elif round(np.abs(stab-targ_stab),2) >= 0.03:
rate = 0.03
elif round(np.abs(stab-targ_stab),2) > stab_tol:
rate = 0.01
else:
break
print('Iteration time {}'.format(iteration))
actual_list = []
# Rendering
if isshow:
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
for ite in range(iteration):
# First Prepare initial stimulus
for i, boxID in enumerate(boxIDs):
p.resetBasePositionAndOrientation(boxID, box_pos_ori_all[i], box_ori_ori_all[i])
# Adjust Config
box_pos_adj, box_ori_adj = stability_func.adjust_confg_position_fixdistance(boxIDs, param)
for i, boxID in enumerate(boxIDs):
p.resetBasePositionAndOrientation(boxID, box_pos_adj[i], box_ori_adj[i])
# Provide gravity
p.setGravity(0,0,-9.8)
p.setTimeStep(const.TIME_STEP)
for i in range(500):
p.stepSimulation()
p.setGravity(0,0,0)
box_pos_fin_all = []
for i, boxID in enumerate(boxIDs):
box_pos_fin, _ = p.getBasePositionAndOrientation(boxID)
box_pos_fin_all.append(box_pos_fin)
isstable = stability_func.examine_stability(box_pos_adj, box_pos_fin_all, tol=1e-3)
if True in isstable:
# print(' Actual: Fall')
actual_list.append(False)
else:
# print(' Actual: Stable')
actual_list.append(True)
actual_list = np.array(actual_list)
stab = sum(actual_list)/len(actual_list)
# Adjust parameter
param, _ = adjust_parameter_threshold(param, stab, targ_stab=targ_stab, rate=rate)
print('Target Stability{}; Stability: {}; Updated Parameters: {}'.format(targ_stab, stab, param))
# An unstable initialized configuration could cause parameters smaller than 0.
# -1 means invalid value
if param < 0:
stab_all.append(-1)
param_all.append(-1)
break
else:
stab_all.append(stab)
param_all.append(param)
stab_all = np.array(stab_all)
param_all =
|
np.array(param_all)
|
numpy.array
|
import os
import csv
import cv2
import copy
import random
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
STEERING_CORRECTION_LEFT = 0.2
STEERING_CORRECTION_RIGHT = 0.2
USE_SIDE_CAMERAS = True
FLIP_IMAGES = False
## Customize data structure
class SteeringData:
def __init__(self, image_path, steer, flipped_flag):
self.image_path = image_path
self.steer = steer
self.flipped_flag = flipped_flag
self.shadow_flag = 0
self.bright_flag = 0
self.blur_flag = 0
## Reading data from csv file
# saving it as the customized data structure
# The flipped_flag and the label of each flipped image should be saved
# if we use flipped images.
def csv_read(csv_path):
print("Reading data from csv file...")
data = []
with open(csv_path) as csv_file:
reader = csv.reader(csv_file)
for line in reader:
center_image_path = line[0]
if os.path.isfile(center_image_path):
center_steer = float(line[3])
# the flipped_flag is zero
data.append(SteeringData(center_image_path, center_steer, 0))
if FLIP_IMAGES:
# set the flipped_flag to one to for flipping image when generating data
data.append(SteeringData(center_image_path, -center_steer,1))
if USE_SIDE_CAMERAS:
left_image_path = line[1]
if os.path.isfile(left_image_path):
left_steer = center_steer + STEERING_CORRECTION_LEFT
data.append(SteeringData(left_image_path, left_steer, 0))
if FLIP_IMAGES:
data.append(SteeringData(left_image_path, -left_steer, 1))
right_image_path = line[2]
if os.path.isfile(right_image_path):
right_steer = center_steer - STEERING_CORRECTION_RIGHT
data.append(SteeringData(right_image_path, right_steer, 0))
if FLIP_IMAGES:
data.append(SteeringData(right_image_path, -right_steer, 1))
print("Reading is done.")
return shuffle(data)
## Spliting data to training data and validation data
def load_data_sets(csv_path, split=0.2):
data = csv_read(csv_path)
train, valid = train_test_split(data, test_size=split)
return train, valid
## Getting bin counts of a data set
def get_bin_counts(x):
steers = [item.steer for item in x]
bin_count = 25
max_bin = np.max(steers)
min_bin = np.min(steers)
spread = max_bin - min_bin
bin_size = spread / bin_count
bins = [min_bin + i*bin_size for i in range(bin_count)]
bins.append(max_bin + 0.1)
hist, bin_edges = np.histogram(steers, bins)
# desired_count_per_bin = int(np.mean(bin_counts)) * 2
desired_per_bin = int(np.mean(hist)*1)
return bin_edges, hist, desired_per_bin
# This method takes the dataset supplied by x
# and adds images.
# Existing images in the dataset are copied
# and augmented by a random blur, random
# shadows, and/or random brightness changes.
def augment_dataset(x, fix_dist):
bin_edges, hist, desired_per_bin = get_bin_counts(x)
copy_times = np.float32((desired_per_bin-hist)/hist)
copy_times_accum = np.zeros_like(copy_times)
augmented = []
for i in range(len(x)):
data = x[i]
index = np.digitize(data.steer, bin_edges) -1
copy_times_accum[index] += copy_times[index]
copy_times_integer = np.int32(copy_times_accum[index])
copy_times_accum[index] -= copy_times_integer
for j in range(copy_times_integer):
new_data = copy.deepcopy(data)
new_data.shadow_flag = int(np.random.uniform(0,1) + 0.5)
new_data.blur_flag = int(np.random.uniform(0,1) + 0.5)
new_data.bright_flag = int(np.random.uniform(0,1) + 0.5)
augmented.append(new_data)
if (fix_dist):
return fix_distribution(x + augmented, bin_edges, hist, desired_per_bin)
else:
return x + augmented
##
def fix_distribution(training_set, bin_edges, hist, desired_per_bin):
# ensure we don't divide by zero
non_zero_hist = np.array(hist)
non_zero_hist[non_zero_hist==0] = desired_per_bin
keep_percentage = np.float32(desired_per_bin/non_zero_hist)
def should_keep(item):
prob_to_keep = keep_percentage[np.digitize(item.steer, bin_edges)-1]
random_prob = np.random.uniform(0,1)
return (random_prob <= prob_to_keep)
trimmed_training_set = [item for item in training_set if should_keep(item)]
return trimmed_training_set
## images are loaded in BGR colorspace by OpenCV with the shape of
# (160, 320, 3). (H, W, C)
# But drive.py loads image in RGB.
# Thus the training image will convert to RGB to ensure the consistency of colorspace
## Crop 50 pixels off the top of the image,
# and 20 pixels off the bottom.
# Then resize to original size since the test image saved by drive.py is (320,160,3)
def preprocess_image(img):
rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#cropped = bgr[50:140, :] # height, width
#resized = cv2.resize(cropped, (128,128))
#resized = cv2.resize(cropped, (320,160)) # width, height
return rgb
##
def random_blur(image):
# Generate a random odd number for our
# kernel size between 3 and 9
kernel_size = (np.random.randint(1, 5) * 2) + 1
# Blur and return
return cv2.GaussianBlur(image, (kernel_size, kernel_size), 0)
##
def random_shadow(image):
height, width = image.shape[:2]
number_of_shadows = np.random.randint(1, 6)
list_of_shadows = []
# define every shadow by randomly determine
# the number and positions of a polygon's vertices
for i in range(number_of_shadows):
shadow_vertices = []
number_of_vertices = np.random.randint(3,26)
for j in range(number_of_vertices):
position_x = width * np.random.uniform()
position_y = height * np.random.uniform()
shadow_vertices.append((position_x,position_y))
list_of_shadows.append(np.array([shadow_vertices], dtype=np.int32))
# create a mask with the same dimensions as the original image
mask = np.zeros((height,width))
# fill all the shadow polygon
for shadow in list_of_shadows:
cv2.fillPoly(mask, shadow, 255)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
v_channel = hsv[:,:,2]
# randomly choose a drakness of the shadows
# lower numbers result in darker shadows
random_darkness = np.random.randint(45, 75) /100.0
v_channel[mask==255] = v_channel[mask==255]*random_darkness
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
##
def random_brightness(image):
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
v_channel = image_hsv[:,:,2]
# get a random number to represent the change in brightness
# note that all the v channel values should add a same number
'''
int8: -128~127, int16: -32768~32767, uint8: 0~255
differnt types of data can not operate with each other
'''
brightness_change = np.random.randint(0,100,dtype=np.uint8)
# apple brightness change
# be sure that v_channel value will not less 0 or great 255 while adding or substracting !
if (np.random.uniform(0,1) > 0.5):
v_channel[v_channel>(255-brightness_change)] = 255
v_channel[v_channel<=(255-brightness_change)] += brightness_change
else:
v_channel[v_channel<(brightness_change)] = 0
v_channel[v_channel>=(brightness_change)] -= brightness_change
# using v[v >= make_positive] + brightness_change can avoid this problem!
# put the changed v channel back to hsv and covert to rgb
# this line can be deleted becase that v_channel is the reference of image_hsv[]
image_hsv[:,:,2] = v_channel
return cv2.cvtColor(image_hsv, cv2.COLOR_HSV2BGR)
## Reading data from local files
## process images by their flags
def get_generator(images, batch_size):
while True:
# grap a random sample of size "batch_size"
# from the "images" array
batch = np.random.choice(a=images, size=batch_size)
X = []
y = []
for index in range(len(batch)):
# instance a custom class
image_data = batch[index]
image_path = image_data.image_path
if os.path.isfile(image_path):
# Read the image, apply data augmentation
# and add to the batch
image = cv2.imread(image_path)
steer = image_data.steer
if image is not None:
if image_data.flipped_flag == 1:
image = cv2.flip(image, 1)
if image_data.blur_flag == 1:
image = random_blur(image)
if image_data.bright_flag == 1:
image = random_brightness(image)
if image_data.shadow_flag == 1:
image = random_shadow(image)
# covert to RGB
image = preprocess_image(image)
X.append(image)
y.append(steer)
# covert to numpy arrays
X = np.array(X)
y =
|
np.array(y)
|
numpy.array
|
# This model is developed for Oxford Martin School Transboundary Resource Management
# Jordan River Basin Project Energy Model
# First created by <NAME>, 3/5/2020, Istanbul
#libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import time
#keeps the simulation time
start_time = time.time()
#reads inputs such as hourly consumption, and renewable (solar and wind) generation for 1MW installed capacity
df2 = pd.read_csv("data/EnergyModel_Inputs2.csv", sep = ",")
hours = np.array(list(range(1,len(df2)+1)))
demand = np.array(df2.loc[:, 'Hourly Consumption'])
generated_solar = np.array(df2.loc[:, 'Generated Solar'])
generated_wind = np.array(df2.loc[:, 'Generated Wind'])
water_rel_demand = np.zeros(len(df2)) # if there will be time series for that it can be replaced. Currently none
num_hours = len(hours)
#installed capacities
base_load_capacity = 4800.0
naturalgas_capacity = 12000.0
diesel_capacity = 0.0
solar_capacity = 9500.0
wind_capacity = 21.0
#coeffs
fac_reserve = 0.2
base_load_fac = 0.5
energy_loss_grid = 0.0
energy_loss_storage = 0.0
max_ramp_rate = 1000.0 #MWh
grid_connection = 1000.0 #MW
storage_capacity = 5000.0 #MWh
#production capacities depending on the installed capacity
base_load = np.ones(len(df2)) * base_load_capacity * base_load_fac
naturalgas = np.ones(len(df2)) * naturalgas_capacity
diesel = np.ones(len(df2)) * diesel_capacity
generated_solar = generated_solar * solar_capacity
generated_wind = generated_wind * wind_capacity
#derived series
generated_renewable = np.zeros(num_hours)
used_baseload = np.zeros(num_hours)
used_wind = np.zeros(num_hours)
used_solar = np.zeros(num_hours)
used_renewable = np.zeros(num_hours)
used_naturalgas =
|
np.zeros(num_hours)
|
numpy.zeros
|
import legacy_code.simple_siamese.hyperparameters as hp
import numpy as np
def sigmoid(z):
return 1. / (1. + np.exp(-z))
def binary_cross_entropy(h, y):
return -y * np.log(h) + (1 - y) * np.log(1 - h)
def activation(prev, weights, bias):
prev_copy = np.r_[np.ones(prev.shape[1])[np.newaxis], prev]
weights_copy = np.c_[bias, weights]
return sigmoid(np.matmul(weights_copy, prev_copy))
def numpy_feedforward(x_1, x_2, twin_weights, joined_weights,
twin_bias, joined_bias):
# activation value matrices of the two twin networks and the joined network
a_1 = np.ndarray(hp.TWIN_L, dtype=np.matrix)
a_2 = np.ndarray(hp.TWIN_L, dtype=np.matrix)
a_d = np.ndarray(hp.JOINED_L, dtype=np.matrix)
# transposing horizontal input vectors (or matrices) into feature vectors
if len(x_1.shape) == 1:
a_1[0] = x_1[np.newaxis].T
a_2[0] = x_2[np.newaxis].T
else:
a_1[0] = x_1.T
a_2[0] = x_2.T
# forward propagation of twins
for i in range(1, hp.TWIN_L):
a_1[i] = activation(a_1[i - 1], twin_weights[i - 1], twin_bias[i - 1])
a_2[i] = activation(a_2[i - 1], twin_weights[i - 1], twin_bias[i - 1])
# element wise squared diffrence of two twin network becomes the joined input
a_d[0] = np.square(a_1[hp.TWIN_L - 1] - a_2[hp.TWIN_L - 1])
# forward propagation of the joined network
for i in range(1, hp.JOINED_L):
a_d[i] = activation(a_d[i - 1], joined_weights[i - 1], joined_bias[i - 1])
return a_1, a_2, a_d
def regularize(weights, bias, gradients, layers):
for n in range(1, layers):
regularization_offset = hp.REG_CONST \
* np.concatenate((bias[n - 1], weights[n - 1]), axis=1)
gradients[n - 1] += regularization_offset
gradients[n - 1][0] -= regularization_offset[0]
def cost_derivatives(x_1, x_2, y, twin_weights, twin_bias,
joined_weights, joined_bias):
# zero initializes cost and gradients
modelcost = np.float(0)
twin1_transformations_derivatives = np.ndarray(hp.TWIN_L - 1, dtype=np.ndarray)
twin2_transformations_derivatives = np.ndarray(hp.TWIN_L - 1, dtype=np.ndarray)
twin_weights_gradients = np.ndarray(hp.TWIN_L - 1, dtype=np.matrix)
joined_transformations_derivatives = np.ndarray(hp.JOINED_L - 1, dtype=np.ndarray)
joined_weights_gradients = np.ndarray(hp.JOINED_L - 1, dtype=np.matrix)
for i in range(1, hp.TWIN_L):
twin_weights_gradients[i - 1] = np.matrix(
np.zeros((hp.TWIN_NET[i], hp.TWIN_NET[i-1] + 1)))
for i in range(1, hp.JOINED_L):
joined_weights_gradients[i - 1] = np.matrix(
np.zeros((hp.JOINED_NET[i], hp.JOINED_NET[i - 1] + 1)))
# sum up the derivatives of cost for each sample
for i in range(0, hp.SAMPLE_SIZE):
(a_1, a_2, a_d) = numpy_feedforward(x_1[i], x_2[i], twin_weights,
joined_weights, twin_bias, joined_bias)
modelcost += binary_cross_entropy(a_d[hp.JOINED_L - 1], y[i])
# backpropagate through joined network
joined_transformations_derivatives[hp.JOINED_L - 2] = \
a_d[hp.JOINED_L - 1] - y[i]
for n in reversed(range(0, hp.JOINED_L - 2)):
# n is the n + 1 layer in the network
next_layer_transforms_gradients = joined_transformations_derivatives[n + 1]
next_layer_weights = joined_weights[n + 1]
this_layer_activations_gradients = a_d[n + 1] * (1 - a_d[n + 1])
joined_transformations_derivatives[n] = \
np.matmul(next_layer_weights.T, next_layer_transforms_gradients) \
* this_layer_activations_gradients
joined_input_derivatives = np.matmul(joined_weights[0].T, \
joined_transformations_derivatives[0])
# backpropagate through twin networks
outlayer = hp.TWIN_L - 1
a_1_out = a_1[outlayer]
a_2_out = a_2[outlayer]
twin1_transformations_derivatives[hp.TWIN_L - 2] = \
2 * (a_1_out - a_2_out) * a_1_out * (1 - a_1_out) \
* joined_input_derivatives
twin2_transformations_derivatives[hp.TWIN_L - 2] = \
2 * (a_1_out - a_2_out) * a_2_out * (1 - a_2_out) \
* joined_input_derivatives
for n in reversed(range(0, hp.TWIN_L - 2)):
twin1_transformations_derivatives[n] = \
np.matmul(twin_weights[n + 1].T, twin1_transformations_derivatives[n + 1]) \
* (a_1[n + 1] * (1 - a_1[n + 1]))
twin2_transformations_derivatives[n] = \
np.matmul(twin_weights[n + 1].T, twin2_transformations_derivatives[n + 1]) \
* (a_2[n + 1] * (1 - a_2[n + 1]))
# calculate gradients of weights in relation to their transformations
for n in range(1, hp.JOINED_L):
ad_concat_1 = np.r_[np.ones(a_d[n - 1].shape[1])[np.newaxis], a_d[n - 1]]
joined_weights_gradients[n - 1] += \
np.matmul(joined_transformations_derivatives[n - 1], ad_concat_1.T)
for n in range(1, hp.TWIN_L):
a1_concat_1 = np.r_[np.ones(a_1[n - 1].shape[1])[np.newaxis], a_1[n - 1]]
a2_concat_1 = np.r_[np.ones(a_2[n - 1].shape[1])[np.newaxis], a_2[n - 1]]
twin_weights_gradients[n - 1] += \
np.add(
|
np.matmul(twin1_transformations_derivatives[n - 1], a1_concat_1.T)
|
numpy.matmul
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
"""Base class for defining preprocessing, as well as two concrete examples."""
from abc import ABCMeta, abstractmethod
from collections import deque
import numpy as np
from PIL import Image
class Preprocessing(object):
"""Base class for defining preprocessing.
All subclass constructors will take input_shape as the first argument.
"""
__metaclass__ = ABCMeta
def __init__(self, input_shape):
"""Constructor for base Preprocessing class."""
self._input_shape = input_shape
@abstractmethod
def output_shape(self):
"""Return shape of preprocessed observation."""
pass
@abstractmethod
def reset(self):
"""Reset preprocessing pipeline for new episode."""
pass
@abstractmethod
def preprocess(self, observation):
"""Return preprocessed observation."""
pass
class AtariPreprocessing(Preprocessing):
"""Preprocess screen images from Atari 2600 games.
The image is represented by an array of shape (210, 160, 3). See
https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf
for more details.
"""
def __init__(self, input_shape, history_len=4):
super(AtariPreprocessing, self).__init__(input_shape)
self.__history_len = history_len
self.__processed_image_seq = deque(maxlen=history_len)
self.reset()
def output_shape(self):
"""Return shape of preprocessed Atari images."""
return (self.__history_len, 84, 84)
def reset(self):
"""Reset preprocessing pipeline for new episode."""
self.__previous_raw_image =
|
np.zeros(self._input_shape, dtype=np.uint8)
|
numpy.zeros
|
import numpy as np
import numba
import time
def add(x, y, z):
for i in numba.prange(x.shape[0]):
z[i] = np.sin(x[i]) + np.cos(y[i])
N = 1000000
x = np.random.rand(N)
y = np.random.rand(N)
z1 =
|
np.empty(N)
|
numpy.empty
|
import logging
import numpy as np
import scipy.sparse as sps
from scipy.linalg import qr
logger = logging.getLogger(__name__)
def pca_y(x, k, num_iters=2):
"""
PCA using QR factorization.
See:
An algorithm for the principal component analysis of large data sets.
Halko, Martinsson, Shkolnisky, Tygert , SIAM 2011.
:param x: Data matrix
:param k: Number of estimated Principal Components.
:param num_iters: Number of dot product applications.
:return: (left Singular Vectors, Singular Values, right Singular Vectors)
"""
m, n = x.shape
def operator(mat):
return x.dot(mat)
def operator_transpose(mat):
return np.conj(x.T).dot(mat)
flag = False
if m < n:
flag = True
operator_transpose, operator = operator, operator_transpose
m, n = n, m
ones = np.ones((n, k + 2))
if x.dtype == np.dtype("complex"):
h = operator(
(2 * np.random.random((k + 2, n)).T - ones)
+ 1j * (2 * np.random.random((k + 2, n)).T - ones)
)
else:
h = operator(2 * np.random.random((k + 2, n)).T - ones)
f = [h]
for _ in range(num_iters):
h = operator_transpose(h)
h = operator(h)
f.append(h)
f = np.concatenate(f, axis=1)
# f has e-16 error, q has e-13
q, _, _ = qr(f, mode="economic", pivoting=True)
b = np.conj(operator_transpose(q)).T
u, s, v = np.linalg.svd(b, full_matrices=False)
# not sure how to fix the signs but it seems like I dont need to
# TODO use fix_svd, here and matlab
# u, v = fix_svd(u, v)
v = v.conj()
u = np.dot(q, u)
u = u[:, :k]
v = v[:k]
s = s[:k]
if flag:
u, v = v.T, u.T
return u, s, v
def bispec_operator_1(freqs):
max_freq = np.max(freqs)
count = 0
for i in range(2, max_freq):
for j in range(1, min(i, max_freq - i + 1)):
k = i + j
id1 = np.where(freqs == i)[0]
id2 = np.where(freqs == j)[0]
id3 = np.where(freqs == k)[0]
nd1 = len(id1)
nd2 = len(id2)
nd3 = len(id3)
count += nd1 * nd2 * nd3
full_list =
|
np.zeros((count, 3), dtype="int")
|
numpy.zeros
|
"""
Module for extracting values of raster sampledata at location given by a vector dataset.
"""
import numpy as np
import geopandas as gpd
import os
from pathlib import Path
import numpy as np
import pandas as pd
import rasterio
from tqdm import tqdm
from ..raster.gdalutils import rasterize
from ..raster.rasterprocessing import create_distance_to_raster_border
from ..vector import calc_distance_to_border
def extract(src_vector: str,
burn_attribute: str,
src_raster: list,
dst_names: list,
dst_dir: str,
dist2pb: bool = False,
dist2rb: bool = False,
src_raster_template: str = None,
gdal_dtype: int = 4,
n_jobs: int = 1) -> int:
"""Extract pixel values of a list of single-band raster files overlaying with a vector dataset.
This function does not return the extracted values but stores them in the ``dst_dir`` directory.
The extracted values of each raster will be stored as a separate *NumPy* binary file as well as
the values of the ``burn_attribute``.
Additionally, the folder will contain one or more intermediate GeoTIFF files, e.g, the
rasterized ``burn_attribute`` and, if selected, the ``dist2pb`` and/or ``dist2rp`` layer.
Note that also the pixel coordinates will be extracted and stored as ``aux_coord_y`` and
``aux_coord_x``. Therefore these names should be avoided in ``dst_names``.
The function ``add_vector_data_attributes_to_extracted`` can be used to add other attributes
from ``src_vector`` to the store of extracted values such that they can be loaded easily
together with the other data.
With ``load_extracted`` the data can then be loaded conveniently.
If a file with a given name already exists the raster will be skipped.
Arguments:
src_vector {str} -- Filename of the vector dataset. Currently, it must have the same CRS as the raster.
burn_attribute {str} -- Name of the attribute column in the ``src_vector`` dataset to be
stored with the extracted data. This should usually be a unique ID for the features
(points, lines, polygons) in the vector dataset. Note that this attribute should not contain zeros
since this value is internally used for pixels that should not be extracted, or, in other words,
that to not overlap with the vector data.
src_raster {list} -- List of file paths of the single-band raster files from which to extract the pixel
values from.
dst_names {list} -- List corresponding to ``src_raster`` names used to store and later
identify the extracted to.
dst_dir {str} -- Directory to store the data to.
Keyword Arguments:
dist2pb {bool} -- Create an additional auxiliary layer containing the distance to the closest
polygon border for each extracted pixels. Defaults to ``False``.
dist2rb {bool} -- Create an additional auxiliary layer containing the distance to the closest
raster border for each extracted pixels. Defaults to ``False``.
src_raster_template {str} -- A template raster to be used for rasterizing the vectorfile.
Usually the first element of ``src_raster``. (default: {None})
gdal_dtype {int} -- Numeric GDAL data type, defaults to 4 which is UInt32.
See https://github.com/mapbox/rasterio/blob/master/rasterio/dtypes.py for useful look-up
tables.
n_jobs {int} -- Number of parallel processors to be use for extraction. -1 uses all processors.
Defaults to 1.
Returns:
[int] -- If successful the function returns 0 as an exit code and 1 otherwise.
"""
if src_raster_template is None:
src_raster_template = src_raster[0]
path_rasterized = os.path.join(dst_dir, f"burn_attribute_rasterized_{burn_attribute}.tif")
paths_extracted_aux = {ele: os.path.join(dst_dir, f"{ele}.npy") \
for ele in [f"aux_vector_{burn_attribute}",
"aux_coord_x",
"aux_coord_y"]}
if dist2pb:
path_dist2pb = os.path.join(dst_dir, f"aux_vector_dist2pb.tif")
paths_extracted_aux["aux_vector_dist2pb"] = os.path.join(dst_dir, f"aux_vector_dist2pb.npy")
if dist2rb:
path_dist2rb = os.path.join(dst_dir, f"aux_raster_dist2rb.tif")
paths_extracted_aux["aux_raster_dist2rb"] = os.path.join(dst_dir, f"aux_raster_dist2rb.npy")
paths_extracted_raster = {}
for path, name in zip(src_raster, dst_names):
dst = f"{os.path.join(dst_dir, name)}.npy"
if not os.path.exists(dst):
paths_extracted_raster[path] = dst
if not os.path.exists(dst_dir):
os.makedirs(dst_dir)
# if it does not already exist, here we first create the rasterized data
if not os.path.exists(path_rasterized):
if src_raster_template is None:
src_raster_template = src_raster[0]
# print("Rasterizing vector attribute.")
rasterize(src_vector=src_vector,
burn_attribute=burn_attribute,
src_raster_template=src_raster_template,
dst_rasterized=path_rasterized,
gdal_dtype=gdal_dtype)
# if any of the destination files do not exist we need the locations of the pixels to be
# extracted in form of a numpy array bool (mask_arr) that fits the rasters from which we will
# extract below
if not (all([os.path.exists(path) for path in paths_extracted_aux.values()]) and \
all([os.path.exists(path) for path in paths_extracted_raster.values()])):
# print("Creating mask array for pixels to be extracted.")
mask_arr = _get_mask_array(path_rasterized, paths_extracted_aux, burn_attribute)
else:
return 0
# create the pixel coordinates if they do not exist
if not all([os.path.exists(paths_extracted_aux["aux_coord_x"]),
os.path.exists(paths_extracted_aux["aux_coord_y"])]):
_create_and_save_coords(path_rasterized, paths_extracted_aux, mask_arr)
if dist2pb and not os.path.exists(paths_extracted_aux["aux_vector_dist2pb"]):
calc_distance_to_border(polygons=src_vector,
template_raster=path_rasterized,
dst_raster=path_dist2pb,
overwrite=True,
keep_interim_files=False)
_extract_and_save_one_layer(path_dist2pb,
paths_extracted_aux["aux_vector_dist2pb"],
mask_arr)
if dist2rb and not os.path.exists(paths_extracted_aux["aux_raster_dist2rb"]):
create_distance_to_raster_border(src_raster = Path(path_rasterized),
dst_raster = Path(path_dist2rb),
maxdist=None, # None means we calculate distances for all pixels
overwrite=True)
_extract_and_save_one_layer(path_dist2rb,
paths_extracted_aux["aux_raster_dist2rb"],
mask_arr)
# lets extract the raster values in case of sequential processing
# or remove existing raster layers to prepare parallel processing
if n_jobs == 1:
for path_src, path_dst in tqdm(paths_extracted_raster.items(),
total=len(paths_extracted_raster)):
_extract_and_save_one_layer(path_src, path_dst, mask_arr)
else:
import multiprocessing as mp
if n_jobs == -1:
n_jobs = mp.cpu_count()
pool = mp.Pool(processes=n_jobs)
_ = [pool.apply_async(_extract_and_save_one_layer,
args=(src, dst, mask_arr)) for \
src, dst in paths_extracted_raster.items()]
pool.close()
pool.join()
return 0
def _get_mask_array(path_rasterized, paths_extracted_aux, burn_attribute):
with rasterio.open(path_rasterized) as src:
fids_arr = src.read()
mask_arr = fids_arr > 0
if not os.path.exists(paths_extracted_aux[f"aux_vector_{burn_attribute}"]):
fids = fids_arr[mask_arr]
del fids_arr
np.save(paths_extracted_aux[f"aux_vector_{burn_attribute}"], fids)
del fids
return mask_arr
def _create_and_save_coords(path_rasterized, paths_extracted_aux, mask_arr):
src = rasterio.open(path_rasterized)
coords = {"x": rasterio.transform.xy(src.meta["transform"],
rows=[0] * src.meta["width"],
cols=np.arange(src.meta["width"]),
offset='center')[0],
"y": rasterio.transform.xy(src.meta["transform"],
rows=np.arange(src.meta["height"]),
cols=[0] * src.meta["height"],
offset='center')[1]}
coords_2d_array_x, coords_2d_array_y = np.meshgrid(coords["x"], coords["y"])
del coords
np.save(paths_extracted_aux["aux_coord_x"],
np.expand_dims(coords_2d_array_x, axis=0)[mask_arr])
del coords_2d_array_x
np.save(paths_extracted_aux["aux_coord_y"],
np.expand_dims(coords_2d_array_y, axis=0)[mask_arr])
del coords_2d_array_y
def _extract_and_save_one_layer(path_src, path_dst, mask_arr):
with rasterio.open(path_src) as src:
raster_vals = src.read()[mask_arr]
|
np.save(path_dst, raster_vals)
|
numpy.save
|
#!/usr/bin/env python
# coding: utf-8
import pymesh
from IPython.core.debugger import set_trace
from scipy.spatial import cKDTree
import time
import os
import numpy as np
import os
import matplotlib.pyplot as plt
import glob
from Bio.PDB import *
import copy
import scipy.sparse as spio
import sys
# import the right version of open3d
from masif.geometry.open3d_import import PointCloud, read_point_cloud, \
Vector3dVector, Feature, registration_ransac_based_on_feature_matching, \
TransformationEstimationPointToPoint, CorrespondenceCheckerBasedOnEdgeLength, \
CorrespondenceCheckerBasedOnDistance, CorrespondenceCheckerBasedOnNormal, \
RANSACConvergenceCriteria
# Local imports
from masif.default_config.masif_opts import masif_opts
from masif.masif_ppi_search.alignment_utils_masif_search import get_patch_geo, multidock, \
subsample_patch_coords, compute_nn_score, get_target_vix
from masif.masif_ppi_search.transformation_training_data.score_nn import ScoreNN
"""
Hard-coded configuration, change accordingly!"
Target name - hard coded and tested with PD-L1 (PDB id: 4ZQK). You can change you your own target and test.
In general this will work well with targets where MaSIF-site labels the site well and where there is a high
amount of shape complementarity
"""
target_name = "4ZQK_A"
target_ppi_pair_id = "4ZQK_A_B"
"""
Descriptor cutoff: This is the key parameter for the speed of the method. The lower the value,
the faster the method, but also the higher the number of false negatives. Values ABOVE
this cutoff are discareded. Recommended values: 1.7-2.2.
"""
DESC_DIST_CUTOFF=1.7
"""
Iface cutoff: Patches are also filtered by their MaSIF-site score. Patches whose center
point has a value BELOW this score are discarded.
The higher the value faster the method, but also the higher the number of false negatives.
Recommended values: 0.8
"""
IFACE_CUTOFF=0.8
def blockPrint():
sys.stdout = open(os.devnull, "w")
sys.stderr = open(os.devnull, "w")
def enablePrint():
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
nn_model = ScoreNN()
start_time = time.time()
"""
pdl1_benchmark.py: Scan a large database of proteins for binders of PD-L1. The ground truth is PD-L1 in the bound state (chain A of PDB id: 4ZQK)
<NAME> and <NAME> - LPDI STI EPFL 2019
Released under an Apache License 2.0
"""
masif_root = os.environ["masif_root"]
top_dir = os.path.join(masif_root, "data/masif_pdl1_benchmark/")
surf_dir = os.path.join(top_dir, masif_opts["ply_chain_dir"])
iface_dir = os.path.join(
top_dir, masif_opts["site"]["out_pred_dir"]
)
ply_iface_dir = os.path.join(
top_dir, masif_opts["site"]["out_surf_dir"]
)
desc_dir = os.path.join(masif_opts["ppi_search"]["desc_dir"])
pdb_dir = os.path.join(top_dir, masif_opts["pdb_chain_dir"])
precomp_dir = os.path.join(
top_dir, masif_opts["site"]["masif_precomputation_dir"]
)
# Go through every 9A patch in top_dir -- get the one with the highest iface mean 12A around it.
target_ply_fn = os.path.join(ply_iface_dir, target_name + ".ply")
mesh = pymesh.load_mesh(target_ply_fn)
iface = mesh.get_attribute("vertex_iface")
target_coord = subsample_patch_coords(target_ppi_pair_id, "p1", precomp_dir)
target_vix = get_target_vix(target_coord, iface)
target_pcd = read_point_cloud(target_ply_fn)
target_desc = np.load(os.path.join(desc_dir, target_ppi_pair_id, "p1_desc_flipped.npy"))
# Get the geodesic patch and descriptor patch for the target.
target_patch, target_patch_descs = get_patch_geo(
target_pcd, target_coord, target_vix, target_desc, flip=True, outward_shift=0.25
)
out_patch = open("target.vert", "w+")
for point in target_patch.points:
out_patch.write("{}, {}, {}\n".format(point[0], point[1], point[2]))
out_patch.close()
# Match descriptors that have a descriptor distance less than K
def match_descriptors(
in_desc_dir, in_iface_dir, pids, target_desc, desc_dist_cutoff=2.2, iface_cutoff=0.8
):
all_matched_names = []
all_matched_vix = []
all_matched_desc_dist = []
count_proteins = 0
for ppi_pair_id in os.listdir(in_desc_dir):
if ".npy" in ppi_pair_id or ".txt" in ppi_pair_id:
continue
mydescdir = os.path.join(in_desc_dir, ppi_pair_id)
for pid in pids:
try:
fields = ppi_pair_id.split("_")
if pid == "p1":
pdb_chain_id = fields[0] + "_" + fields[1]
elif pid == "p2":
pdb_chain_id = fields[0] + "_" + fields[2]
iface = np.load(in_iface_dir + "/pred_" + pdb_chain_id + ".npy")[0]
descs = np.load(mydescdir + "/" + pid + "_desc_straight.npy")
except:
continue
print(pdb_chain_id)
name = (ppi_pair_id, pid)
count_proteins += 1
diff = np.sqrt(np.sum(np.square(descs - target_desc), axis=1))
true_iface = np.where(iface > iface_cutoff)[0]
near_points = np.where(diff < desc_dist_cutoff)[0]
selected = np.intersect1d(true_iface, near_points)
if len(selected > 0):
all_matched_names.append([name] * len(selected))
all_matched_vix.append(selected)
all_matched_desc_dist.append(diff[selected])
print("Matched {}".format(ppi_pair_id))
print("Scores: {} {}".format(iface[selected], diff[selected]))
print("Iterated over {} proteins.".format(count_proteins))
return all_matched_names, all_matched_vix, all_matched_desc_dist, count_proteins
def align_and_save(
out_filename_base,
patch,
transformation,
source_structure,
):
structure_atoms = [atom for atom in source_structure.get_atoms()]
structure_coords = [x.get_coord() for x in structure_atoms]
structure_coord_pcd = PointCloud()
structure_coord_pcd.points = Vector3dVector(structure_coords)
structure_coord_pcd.transform(transformation)
for ix, v in enumerate(structure_coord_pcd.points):
structure_atoms[ix].set_coord(v)
io = PDBIO()
io.set_structure(source_structure)
io.save(out_filename_base + ".pdb")
# Save patch
out_patch = open(out_filename_base + ".vert", "w+")
for point in patch.points:
out_patch.write("{}, {}, {}\n".format(point[0], point[1], point[2]))
out_patch.close()
return 0
## Load the structures of the target
target_pdb_id = "4ZQK"
target_chain = "A"
target_pdb_dir = pdb_dir
parser = PDBParser()
target_struct = parser.get_structure(
"{}_{}".format(target_pdb_id, target_chain),
os.path.join(target_pdb_dir, "{}_{}.pdb".format(target_pdb_id, target_chain)),
)
# Make a ckdtree with the target.
target_ckdtree = cKDTree(target_patch.points)
desc_scores = []
desc_pos = []
inlier_scores = []
inlier_pos = []
(matched_names, matched_vix, matched_desc_dist, count_proteins) = match_descriptors(
desc_dir, iface_dir, ["p1", "p2"], target_desc[target_vix],
desc_dist_cutoff=DESC_DIST_CUTOFF, iface_cutoff=IFACE_CUTOFF
)
matched_names = np.concatenate(matched_names, axis=0)
matched_vix = np.concatenate(matched_vix, axis=0)
matched_desc_dist =
|
np.concatenate(matched_desc_dist, axis=0)
|
numpy.concatenate
|
import h5py
import numpy as np
import constants as ct
import shape as sh
import potential as pt
from scipy.optimize import least_squares, minimize
from scipy.spatial import cKDTree
from astropy.cosmology import FlatLambdaCDM
# Dictionary of file output names
fname = {
"L205n2500TNG": "TNG300_L1",
"L205n1250TNG": "TNG300_L2",
"L205n625TNG": "TNG300_L3",
"L205n2500TNG_DM": "TNG300DM_L1",
"L205n1250TNG_DM": "TNG300DM_L2",
"L205n625TNG_DM": "TNG300DM_L3",
"L75n1820TNG": "TNG100_L1",
"L75n910TNG": "TNG100_L2",
"L75n455TNG": "TNG100_L3",
"L75n1820TNG_DM": "TNG100DM_L1",
"L75n910TNG_DM": "TNG100DM_L2",
"L75n455TNG_DM": "TNG100DM_L3",
}
# Dictionary of gravitational softening values used -- [DM or STAR, GAS / h]
# NOTE: I have converted the GAS values to NO h values, the values were not
# consistent in the TNG tables (for some reason!)
# NOTE: Values here are in kpc
soften = {
"L205n2500TNG": [0.15, 0.369],
"L205n1250TNG": [2.95, 0.738],
"L205n625TNG": [5.90, 1.476],
"L75n1820TNG": [0.74, 0.185],
"L75n910TNG": [0.15, 0.369],
"L75n455TNG": [2.95, 0.738],
}
"""
This class stores computes which simulation is associated with a
given halo, stores it and then compute various quantities. There
are a couple of external routines, but most is consolidated here
Takes the volume class as input, which has read a snapshot
"""
class halo:
def __init__(self, volume, Nbins=25):
"""
Take cosmology from the volume class instance
Arguments:
-volume : An instance of the entire_snapshot_read class
-Nbins : Number of bins in the radial profile [INTEGER]
"""
# Boxsize, cosmology, simulation to cgs unit conversions
self.boxsize = volume.BoxSize
self.axp = volume.axp
self.hubp = volume.hub
self.redshift = volume.redshift
self.OmegaB = volume.omega_b
self.OmegaM = volume.omega_m
self.OmegaL = volume.omega_l
self.Ulength = volume.Ulength
self.Umass = volume.Umass
self.Uvelc = volume.Uvelc
# Set tags for output
self.path = volume.path
self.simtag = self.path.split("/")[-2]
self.fname = fname[self.simtag]
self.snap = volume.snap
# Create radial bins
self.Nbins = Nbins
self.set_up_radial_profile()
return
def halo_data_store(self, mpi, subfind_table, volume, Extent=5.0, R200scale=False):
"""
Find all particles within given sphere for every halo of interest
then send particles to desired task and store
Arguments:
-mpi : An instance of the mpi class
-subfind_table : An instance of the build_table class
-volume : An instance of the entire_snapshot_read class
-Extent : Halocentric radial extent to extract particles to [FLOAT]
-R200scale : BOOLEAN, if TRUE rescale the extent by halo's R200 value
"""
if not mpi.Rank:
print(" > Distributing particles", flush=True)
# Set Extent of cut sphere
self.Extent = Extent
self.halo_data = {}
self.Nhalos = len(subfind_table.tags)
dims = np.array([self.boxsize, self.boxsize, self.boxsize])
# Loop over haloes of interest
Ntask_per_node = int(np.rint(mpi.NProcs / mpi.NNodes))
offset = 0
for j in range(0, self.Nhalos, 1):
# Scale extraction range
if R200scale:
Extent = self.Extent * subfind_table.R200[j]
else:
Extent = self.Extent * ct.Mpc_cm
# Select task to send particle data to
destination = (j % mpi.NNodes) * Ntask_per_node + (offset % Ntask_per_node)
if destination >= mpi.NProcs:
destination -= mpi.NProcs
if j > 0 and j % mpi.NNodes == mpi.NNodes - 1:
offset += 1
if not mpi.Rank:
print(" -{0:04d} {1:03d}".format(j, destination), flush=True)
if destination == mpi.Rank:
htag = subfind_table.tags[j]
self.halo_data[htag] = {}
# Find contributing cells/particles -- centering on halo
vkey = sorted(volume.__dict__.keys())
if "pos" in vkey:
Grad = volume.pos - subfind_table.CoP[j]
Grad = np.where(Grad > 0.5 * dims, Grad - dims, Grad)
Grad = np.where(Grad < -0.5 * dims, Grad + dims, Grad)
Grad = np.sqrt((Grad ** 2.0).sum(axis=-1))
gdx = np.where(Grad <= Extent)[0]
del Grad
if "DMpos" in vkey:
DMrad = volume.DMpos - subfind_table.CoP[j]
DMrad = np.where(DMrad > 0.5 * dims, DMrad - dims, DMrad)
DMrad = np.where(DMrad < -0.5 * dims, DMrad + dims, DMrad)
DMrad = np.sqrt((DMrad ** 2.0).sum(axis=-1))
ddx =
|
np.where(DMrad <= Extent)
|
numpy.where
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
class Planet():
"""
The class called Planet is initialised with constants appropriate
for the given target planet, including the atmospheric density profile
and other constants
"""
def __init__(self, atmos_func='exponential',
atmos_filename='./armageddon/resources/' +
'AltitudeDensityTable.csv',
Cd=1., Ch=0.1, Q=1e7, Cl=1e-3, alpha=0.3, Rp=6371e3,
g=9.81, H=8000., rho0=1.2):
"""
Set up the initial parameters and constants for the target planet
Parameters
----------
atmos_func : string, optional
Function which computes atmospheric density, rho, at altitude, z.
Default is the exponential function rho = rho0 exp(-z/H).
Options are 'exponential', 'tabular' and 'constant'
atmos_filename : string, optional
Name of the filename to use with the tabular atmos_func option
Cd : float, optional
The drag coefficient
Ch : float, optional
The heat transfer coefficient
Q : float, optional
The heat of ablation (J/kg)
Cl : float, optional
Lift coefficient
alpha : float, optional
Dispersion coefficient
Rp : float, optional
Planet radius (m)
rho0 : float, optional
Air density at zero altitude (kg/m^3)
g : float, optional
Surface gravity (m/s^2)
H : float, optional
Atmospheric scale height (m)
"""
# Input constants
self.Cd = Cd
self.Ch = Ch
self.Q = Q
self.Cl = Cl
self.alpha = alpha
self.Rp = Rp
self.g = g
self.H = H
self.rho0 = rho0
self.atmos_filename = atmos_filename
self.tabular_dict = {}
try:
# set function to define atmoshperic density
if atmos_func == 'exponential':
# rhoa will change as z change
self.rhoa = lambda z: self.rho0 *
|
np.exp(-z / self.H)
|
numpy.exp
|
from unittest import TestCase
from tempfile import TemporaryDirectory
from pathlib import Path
from giant.camera_models import PinholeModel, OwenModel, BrownModel, OpenCVModel, save, load
import numpy as np
import giant.rotations as at
import lxml.etree as etree
class TestPinholeModel(TestCase):
def setUp(self):
self.Class = PinholeModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
estimation_parameters=['focal_length', 'px'])
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 0, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
def test_estimation_parameters(self):
model = self.Class()
model.estimation_parameters = 'kx'
self.assertEqual(model.estimation_parameters, ['kx'])
model.estimate_multiple_misalignments = False
model.estimation_parameters = ['px', 'py', 'Multiple misalignments']
self.assertEqual(model.estimation_parameters, ['px', 'py', 'multiple misalignments'])
self.assertTrue(model.estimate_multiple_misalignments)
def test_kx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_ky(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 3, 0]]))
self.assertEqual(model.ky, 3)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_px(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 20], [0, 3, 0]]))
self.assertEqual(model.px, 20)
model.px = 100
self.assertEqual(model.px, 100)
self.assertEqual(model.intrinsic_matrix[0, 2], 100)
def test_py(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 0, 10]]))
self.assertEqual(model.py, 10)
model.py = 100
self.assertEqual(model.py, 100)
self.assertEqual(model.intrinsic_matrix[1, 2], 100)
def test_a1(self):
model = self.Class(temperature_coefficients=np.array([10, 0, 0]))
self.assertEqual(model.a1, 10)
model.a1 = 100
self.assertEqual(model.a1, 100)
self.assertEqual(model.temperature_coefficients[0], 100)
def test_a2(self):
model = self.Class(temperature_coefficients=np.array([0, 10, 0]))
self.assertEqual(model.a2, 10)
model.a2 = 100
self.assertEqual(model.a2, 100)
self.assertEqual(model.temperature_coefficients[1], 100)
def test_a3(self):
model = self.Class(temperature_coefficients=np.array([0, 0, 10]))
self.assertEqual(model.a3, 10)
model.a3 = 100
self.assertEqual(model.a3, 100)
self.assertEqual(model.temperature_coefficients[2], 100)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, px=100, py=-5)
np.testing.assert_array_almost_equal(model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix,
[0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_get_temperature_scale(self):
model = self.Class(temperature_coefficients=[1, 2, 3.])
self.assertEqual(model.get_temperature_scale(1), 7)
np.testing.assert_array_equal(model.get_temperature_scale([1, 2]), [7, 35])
np.testing.assert_array_equal(model.get_temperature_scale([-1, 2.]), [-1, 35])
def test_apply_distortion(self):
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class()
for inp in inputs:
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, inp)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=1, a2=2, a3=3)
with self.subTest(misalignment=None):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
gnom, _, pix = model.get_projections(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(gnom, gnom_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
gnom, _, pix = model.get_projections(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
gnom, _, pix = model.get_projections(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(gnom, gnom_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5, a1=-1e-3, a2=1e-6, a3=-7e-8)
with self.subTest(misalignment=None):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=1):
for point in points:
pix = model.project_onto_image(point, temperature=1)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(1)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
with self.subTest(temperature=-10.5):
for point in points:
pix = model.project_onto_image(point, temperature=-10.5)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true *= model.get_temperature_scale(-10.5)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[0] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point[:2]) / point[2]
gnom_true[1] *= -1
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pix = model.project_onto_image(point)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, px=1500, py=1500.5,
misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
point_new = rot_mat @ point
pix = model.project_onto_image(point, image=0)
gnom_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
pix = model.project_onto_image(point, image=1)
gnom_true = -model.focal_length * np.array(point[:2]) / point[2]
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], gnom_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pix, pix_true)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40,
"px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "kx": 300, "ky": 400,
"px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dmisalignment(self):
def num_deriv(loc, dtheta, delta=1e-10) -> np.ndarray:
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [delta, 0, 0]).squeeze()
point_pert_x_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, delta, 0]).squeeze()
point_pert_y_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) + [0, 0, delta]).squeeze()
point_pert_z_f = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [delta, 0, 0]).squeeze()
point_pert_x_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, delta, 0]).squeeze()
point_pert_y_b = mis_pert @ loc
mis_pert = at.rotvec_to_rotmat(np.array(dtheta) - [0, 0, delta]).squeeze()
point_pert_z_b = mis_pert @ loc
return np.array([(point_pert_x_f - point_pert_x_b) / (2 * delta),
(point_pert_y_f - point_pert_y_b) / (2 * delta),
(point_pert_z_f - point_pert_z_b) / (2 * delta)]).T
inputs = [[1, 0, 0], [0, 1, 0], [0, 0, 1], [np.sqrt(3), np.sqrt(3), np.sqrt(3)],
[-1, 0, 0], [0, -1, 0], [0, 0, -1], [-np.sqrt(3), -np.sqrt(3), -np.sqrt(3)],
[1, 0, 100], [0, 0.5, 1]]
misalignment = [[1e-8, 0, 0], [0, 1e-8, 0], [0, 0, 1e-8], [1e-9, 1e-9, 1e-9],
[-1e-8, 0, 0], [0, -1e-8, 0], [0, 0, -1e-8], [-1e-9, -1e-9, -1e-9],
[1e-9, 2.3e-9, -0.5e-9]]
for mis in misalignment:
with self.subTest(misalignment=mis):
for inp in inputs:
num = num_deriv(inp, mis)
# noinspection PyTypeChecker
ana = self.Class._compute_dcamera_point_dmisalignment(inp)
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-4)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dgnomic_dcamera_point(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0, 0]
gnom_pert_x_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, delta, 0]
gnom_pert_y_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) + [0, 0, delta]
gnom_pert_z_f = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [delta, 0, 0]
gnom_pert_x_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, delta, 0]
gnom_pert_y_b = cmodel.get_projections(loc_pert)[0]
loc_pert = np.array(loc) - [0, 0, delta]
gnom_pert_z_b = cmodel.get_projections(loc_pert)[0]
return np.array([(gnom_pert_x_f - gnom_pert_x_b) / (2 * delta),
(gnom_pert_y_f - gnom_pert_y_b) / (2 * delta),
(gnom_pert_z_f - gnom_pert_z_b) / (2 * delta)]).T
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [0.5, 1e-14, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dcamera_point(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-9, rtol=1e-5)
def test__compute_dgnomic_dfocal_length(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
gnom_pert_f = model_pert.get_projections(loc)[0]
model_pert = cmodel.copy()
model_pert.focal_length -= delta
gnom_pert_b = model_pert.get_projections(loc)[0]
# noinspection PyTypeChecker
return np.asarray((gnom_pert_f - gnom_pert_b) / (2 * delta))
intrins_coefs = [{"focal_length": 1},
{"focal_length": 2.5},
{"focal_length": 1000},
{"focal_length": 0.1}]
inputs = [[0, 0, 1], [0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dgnomic_dfocal_length(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10, rtol=1e-5)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dtemperature_coeffs(self):
def num_deriv(loc, cmodel, delta=1e-6, temperature=0) -> np.ndarray:
loc = np.array(loc)
model_pert = cmodel.copy()
model_pert.a1 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 += delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_f = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a1 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a1_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a2 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a2_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.a3 -= delta
loc_copy = loc * model_pert.get_temperature_scale(temperature)
pix_pert_a3_b = model_pert.intrinsic_matrix[:, :2] @ loc_copy + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_a1_f - pix_pert_a1_b) / (2 * delta),
(pix_pert_a2_f - pix_pert_a2_b) / (2 * delta),
(pix_pert_a3_f - pix_pert_a3_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 1.5, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 1.5, "py": 0, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 1.5, 'a1': 0, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 1.5, 'a2': 0, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 1.5, 'a3': 0},
{"kx": 0, "ky": 0, "px": 0, "py": 0, 'a1': 0, 'a2': 0, 'a3': 1.5},
{"kx": 1.5, "ky": 1.5, "px": 1.5, "py": 1.5, 'a1': 1.5, 'a2': 1.5, 'a3': 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temperatures = [0, 1, -1, -10.5, 10.5]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
for temp in temperatures:
with self.subTest(temp=temp, **intrins_coef):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_dtemperature_coeffs(inp, temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2)]).T
# TODO: investigate why this fails with slightly larger misalignments and temperature coefficients
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-4, "a2": 2e-7, "a3": 3e-8,
"misalignment": [[2e-15, -1.2e-14, 5e-16], [-1e-14, 2e-14, -1e-15]]}
inputs = [[0.5, 0, 1], [0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1], [0, -0.5, 1], [-0.5, -0.5, 1],
[5, 10, 1000.23], [[10], [-22], [1200.23]]]
temperatures = [0, -1, 1, -10.5, 10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for inp in inputs:
for temp in temperatures:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-2, atol=1e-10)
num = num_deriv(inp, model, delta=1, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-2)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_misalignment = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_misalignment
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_misalignment
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_misalignment
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_misalignment
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_misalignment
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_misalignment
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_misalignment * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_misalignment * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_misalignment * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "kx": 30, "ky": 40, "px": 4005.23, "py": 4005.23,
"a1": 1e-5, "a2": 1e-6, "a3": 1e-7,
"misalignment": [[0, 0, 1e-15], [0, 2e-15, 0], [3e-15, 0, 0]]}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1000]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-2, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1, image=ind, nimages=numim, temperature=temp))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-2, atol=1e-10)
def test_remove_jacobian_columns(self):
jac = np.arange(30).reshape(1, -1)
model = self.Class()
for est_param, vals in model.element_dict.items():
model.estimation_parameters = [est_param]
expected = jac[0, vals]
np.testing.assert_array_equal(model._remove_jacobian_columns(jac), [expected])
def test_apply_update(self):
model_param = {"focal_length": 0, "kx": 0, "ky": 0,
"px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]]}
model = self.Class(**model_param, estimation_parameters=['intrinsic',
'temperature dependence',
'multiple misalignments'])
update_vec = np.arange(14)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[8:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
gnomic = [[1, 0], [0, 1], [-1, 0], [0, -1],
[0.5, 0], [0, 0.5], [-0.5, 0], [0, -0.5],
[0.5, 0.5], [-0.5, -0.5], [0.5, -0.5], [-0.5, 0.5],
[[1, 0, 0.5], [0, 1.5, -0.5]]]
model = self.Class(kx=2000, ky=-3000.2, px=1025, py=937.567,
a1=1e-3, a2=2e-6, a3=-5.5e-8)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnoms in gnomic:
for temp in temperatures:
with self.subTest(gnoms=gnoms, temp=temp):
dis_gnoms = np.asarray(model.apply_distortion(gnoms)).astype(float)
dis_gnoms *= model.get_temperature_scale(temp)
pixels = ((model.intrinsic_matrix[:, :2] @ dis_gnoms).T + model.intrinsic_matrix[:, 2]).T
gnoms_solved = model.pixels_to_gnomic(pixels, temperature=temp)
np.testing.assert_allclose(gnoms_solved, gnoms)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "px": 4005.23, 'py': 2000.33, 'a1': 1e-6, 'a2': 1e-5, 'a3': 2e-5}
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
model = self.Class(**intrins_param)
temperatures = [0, 1, -1, 10.5, -10.5]
for gnom in pinhole:
gnom = np.asarray(gnom).astype(float)
for temp in temperatures:
with self.subTest(gnom=gnom, temp=temp):
mm_dist = model.apply_distortion(np.array(gnom))
temp_scale = model.get_temperature_scale(temp)
mm_dist *= temp_scale
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnom *= temp_scale
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnom).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000,
"px": 4005.23, 'py': 2000.33, 'a1': 1e-5, 'a2': -1e-10, 'a3': 2e-4,
'misalignment': [[1e-10, 2e-13, -3e-12], [4e-8, -5.3e-9, 9e-15]]}
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**intrins_param)
# TODO: consider adjusting so this isn't needed
model.estimate_multiple_misalignments = True
for vec in camera_vecs:
for image in [0, 1]:
for temp in temperatures:
with self.subTest(vec=vec, image=image, temp=temp):
pixel_loc = model.project_onto_image(vec, image=image, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=image, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 0, 3], [0, 5, 6]]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'])
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 0, 13], [0, 15, 16]]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'])
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(model2.field_of_view, modeltest.field_of_view)
self.assertEqual(model2.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model2.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model2.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model2.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model2.estimation_parameters, modeltest.estimation_parameters)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(model1.field_of_view, modeltest.field_of_view)
self.assertEqual(model1.use_a_priori, modeltest.use_a_priori)
self.assertEqual(model1.estimate_multiple_misalignments, modeltest.estimate_multiple_misalignments)
np.testing.assert_array_equal(model1.intrinsic_matrix, modeltest.intrinsic_matrix)
np.testing.assert_array_equal(model1.misalignment, modeltest.misalignment)
np.testing.assert_array_equal(model1.estimation_parameters, modeltest.estimation_parameters)
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
# noinspection PyTypeChecker
np.testing.assert_allclose(dist, 0, atol=1e-10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
def test_undistort_image(self):
# not sure how best to do this test...
pass
def test_copy(self):
model = self.Class()
model_copy = model.copy()
model.kx = 1000
model.ky = 999
model.px = 100
model.py = -20
model.a1 = 5
model.a2 = 6
model.a3 = 7
model._focal_length = 11231
model.field_of_view = 1231231
model.use_a_priori = True
model.estimation_parameters = ['a1', 'kx', 'ky']
model.estimate_multiple_misalignments = True
model.misalignment = [1231241, 123124, .12]
self.assertNotEqual(model.kx, model_copy.kx)
self.assertNotEqual(model.ky, model_copy.ky)
self.assertNotEqual(model.px, model_copy.px)
self.assertNotEqual(model.py, model_copy.py)
self.assertNotEqual(model.a1, model_copy.a1)
self.assertNotEqual(model.a2, model_copy.a2)
self.assertNotEqual(model.a3, model_copy.a3)
self.assertNotEqual(model.focal_length, model_copy.focal_length)
self.assertNotEqual(model.field_of_view, model_copy.field_of_view)
self.assertNotEqual(model.use_a_priori, model_copy.use_a_priori)
self.assertNotEqual(model.estimate_multiple_misalignments, model_copy.estimate_multiple_misalignments)
self.assertNotEqual(model.estimation_parameters, model_copy.estimation_parameters)
self.assertTrue((model.misalignment != model_copy.misalignment).all())
def test_to_from_elem(self):
element = etree.Element(self.Class.__name__)
model = self.Class(focal_length=20, field_of_view=5, use_a_priori=True,
misalignment=[1, 2, 3], kx=2, ky=200, px=50, py=300,
a1=37, a2=1, a3=-1230,
estimation_parameters=['a1', 'multiple misalignments'], n_rows=20, n_cols=30)
model_copy = model.copy()
with self.subTest(misalignment=True):
element = model.to_elem(element, misalignment=True)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
self.assertEqual(model, model_new)
with self.subTest(misalignment=False):
element = model.to_elem(element, misalignment=False)
self.assertEqual(model, model_copy)
model_new = self.Class.from_elem(element)
model.estimation_parameters[-1] = 'single misalignment'
model.estimate_multiple_misalignments = False
model.misalignment = np.zeros(3)
self.assertEqual(model, model_new)
class TestOwenModel(TestPinholeModel):
def setUp(self):
self.Class = OwenModel
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 7))
model = self.Class(kx=1, ky=2, px=4, py=5, focal_length=10.5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80, kyx=90,
estimation_parameters=['focal_length', 'px'], n_rows=500, n_cols=600,
e1=1, radial2=2, pinwheel2=3, e4=4, tangential_x=6, e5=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [90, 2, 5]])
self.assertEqual(model.focal_length, 10.5)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['focal_length', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, [2, 4, 5, 6, 1, 3])
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_kyx(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [3, 0, 0]]))
self.assertEqual(model.kyx, 3)
model.kyx = 100
self.assertEqual(model.kyx, 100)
self.assertEqual(model.intrinsic_matrix[1, 0], 100)
def test_e1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.e1, 1)
model.e1 = 100
self.assertEqual(model.e1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_e2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.e2, 1)
model.e2 = 100
self.assertEqual(model.e2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_e3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.e3, 1)
model.e3 = 100
self.assertEqual(model.e3, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_e4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.e4, 1)
model.e4 = 100
self.assertEqual(model.e4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_e5(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.e5, 1)
model.e5 = 100
self.assertEqual(model.e5, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_e6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.e6, 1)
model.e6 = 100
self.assertEqual(model.e6, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_pinwheel1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1, 0]))
self.assertEqual(model.pinwheel1, 1)
model.pinwheel1 = 100
self.assertEqual(model.pinwheel1, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_pinwheel2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 0, 1]))
self.assertEqual(model.pinwheel2, 1)
model.pinwheel2 = 100
self.assertEqual(model.pinwheel2, 100)
self.assertEqual(model.distortion_coefficients[5], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_tangential_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0, 0]))
self.assertEqual(model.tangential_y, 1)
model.tangential_y = 100
self.assertEqual(model.tangential_y, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tangential_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0, 0]))
self.assertEqual(model.tangential_x, 1)
model.tangential_x = 100
self.assertEqual(model.tangential_x, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_apply_distortion(self):
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [0.5, 0], [(1.5 + 1.5 ** 3), 0], [-1.5 + 1.5 ** 3, 0],
[[(1.5 + 1.5 ** 3)], [0]], [[(1.5 + 1.5 ** 3), 0.5], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [2.5, 2.5]],
[[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 2.5], [0, 0.5], [0, 1.5 + 1.5 ** 3], [0, -1.5 + 1.5 ** 3], [[0], [1.5 + 1.5 ** 3]],
[[0, 0], [1.5 + 1.5 ** 3, 0.5]], [2.5, 2.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 3], [-1.5, -1.5 ** 3],
[[1.5], [1.5 ** 3]], [[1.5, -1], [1.5 ** 3, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[-1.5 ** 3], [1.5]],
[[-1.5 ** 3, 1.5], [1.5, -1]],
[1 - np.sqrt(2) * 1.5, 1 + np.sqrt(2) * 1.5]],
[[0, 0], [1, 1.5], [-1, -1.5], [1.5, 1.5 ** 5], [-1.5, -1.5 ** 5],
[[1.5], [1.5 ** 5]], [[1.5, -1], [1.5 ** 5, -1.5]],
[-1.5, 1], [1.5, -1], [-1.5 ** 5, 1.5], [1.5 ** 5, -1.5], [[-1.5 ** 5], [1.5]],
[[-1.5 ** 5, 1.5], [1.5, -1]],
[1 - 2 * np.sqrt(2) * 1.5, 1 + 2 * np.sqrt(2) * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23e-8, a1=1e-1, a2=1e-6, a3=-3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[np.pi, 0, 0])
with self.subTest(misalignment=[np.pi, 0, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[0] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[0, np.pi, 0])
with self.subTest(misalignment=[0, np.pi, 0]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point[:2]) / point[2]
pin_true[1] *= -1
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[1, 0.2, 0.3])
with self.subTest(misalignment=[1, 0.2, 0.3]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
rot_mat = at.rotvec_to_rotmat([1, 0.2, 0.3]).squeeze()
for point in points:
point_new = rot_mat @ point
pin, dist, pix = model.get_projections(point, image=0)
pin_true = model.focal_length * np.array(point_new[:2]) / np.array(point_new[2])
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
pin, dist, pix = model.get_projections(point, image=1)
pin_true = -model.focal_length * np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
np.testing.assert_array_almost_equal(pix, pix_true)
def test_project_onto_image(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, a1=1, a2=2, a3=-3)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
_, __, pix = model.get_projections(point, temperature=temp)
pix_proj = model.project_onto_image(point, temperature=temp)
np.testing.assert_array_equal(pix, pix_proj)
model = self.Class(focal_length=8.7, kx=500, ky=500.5, kxy=1.5, kyx=-1.5, px=1500, py=1500.5,
radial2=1e-3, radial4=-2.2e-5, tangential_y=1e-3, tangential_x=1e-6,
pinwheel1=1e-6, pinwheel2=-2.23 - 8, misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]])
model.estimate_multiple_misalignments = True
with self.subTest(misalignment=[[1, 0.2, 0.3], [0, 0, np.pi]]):
for point in points:
_, __, pix = model.get_projections(point, image=0)
pix_proj = model.project_onto_image(point, image=0)
np.testing.assert_array_equal(pix, pix_proj)
_, __, pix = model.get_projections(point, image=1)
pix_proj = model.project_onto_image(point, image=1)
np.testing.assert_array_equal(pix, pix_proj)
def test_compute_pixel_jacobian(self):
def num_deriv(uvec, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
uvec = np.array(uvec).reshape(3, -1)
pix_true = cmodel.project_onto_image(uvec, image=image, temperature=temperature)
uvec_pert = uvec + [[delta], [0], [0]]
pix_pert_x_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [delta], [0]]
pix_pert_y_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec + [[0], [0], [delta]]
pix_pert_z_f = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[delta], [0], [0]]
pix_pert_x_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [delta], [0]]
pix_pert_y_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
uvec_pert = uvec - [[0], [0], [delta]]
pix_pert_z_b = cmodel.project_onto_image(uvec_pert, image=image, temperature=temperature)
return np.array([(pix_pert_x_f-pix_pert_x_b)/(2*delta),
(pix_pert_y_f-pix_pert_y_b)/(2*delta),
(pix_pert_z_f-pix_pert_z_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_pixel_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-6)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_dcamera_point_dgnomic(self):
def num_deriv(gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def g2u(g):
v = np.vstack([g, cmodel.focal_length*np.ones(g.shape[-1])])
return v/np.linalg.norm(v, axis=0, keepdims=True)
gnomic_locations = np.asarray(gnomic_locations).reshape(2, -1)
gnom_pert = gnomic_locations + [[delta], [0]]
cam_loc_pert_x_f = g2u(gnom_pert)
gnom_pert = gnomic_locations + [[0], [delta]]
cam_loc_pert_y_f = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[delta], [0]]
cam_loc_pert_x_b = g2u(gnom_pert)
gnom_pert = gnomic_locations - [[0], [delta]]
cam_loc_pert_y_b = g2u(gnom_pert)
return np.array([(cam_loc_pert_x_f -cam_loc_pert_x_b)/(2*delta),
(cam_loc_pert_y_f -cam_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for gnom in input.T:
jac_ana.append(
model._compute_dcamera_point_dgnomic(gnom, np.sqrt(np.sum(gnom*gnom) + model.focal_length**2)))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test__compute_dgnomic_ddist_gnomic(self):
def num_deriv(dist_gnomic_locations, cmodel, delta=1e-6) -> np.ndarray:
def dg2g(dg):
gnomic_guess = dg.copy()
# perform the fpa
for _ in np.arange(20):
# get the distorted location assuming the current guess is correct
gnomic_guess_distorted = cmodel.apply_distortion(gnomic_guess)
# subtract off the residual distortion from the gnomic guess
gnomic_guess += dg - gnomic_guess_distorted
# check for convergence
if np.all(np.linalg.norm(gnomic_guess_distorted - dg, axis=0) <= 1e-15):
break
return gnomic_guess
dist_gnomic_locations = np.asarray(dist_gnomic_locations).reshape(2, -1)
dist_gnom_pert = dist_gnomic_locations + [[delta], [0]]
gnom_loc_pert_x_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations + [[0], [delta]]
gnom_loc_pert_y_f = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[delta], [0]]
gnom_loc_pert_x_b = dg2g(dist_gnom_pert)
dist_gnom_pert = dist_gnomic_locations - [[0], [delta]]
gnom_loc_pert_y_b = dg2g(dist_gnom_pert)
return np.array([(gnom_loc_pert_x_f - gnom_loc_pert_x_b)/(2*delta),
(gnom_loc_pert_y_f - gnom_loc_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 0.1], [0.1, 0], [0.1, 0.1]]).T,
np.array([[-0.1, 0], [0, -0.1], [-0.1, -0.1], [0.1, -0.1], [-0.1, 0.1]]).T]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for input in inputs:
with self.subTest(input=input):
jac_ana = []
for dist_gnom in input.T:
jac_ana.append(model._compute_dgnomic_ddist_gnomic(dist_gnom))
jac_ana = np.array(jac_ana)
jac_num = num_deriv(input, model)
np.testing.assert_almost_equal(jac_ana, jac_num)
def test_compute_unit_vector_jacobian(self):
def num_deriv(pixels, cmodel, delta=1e-6, image=0, temperature=0) -> np.ndarray:
pixels = np.array(pixels).reshape(2, -1)
pix_pert = pixels + [[delta], [0]]
uvec_pert_x_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels + [[0], [delta]]
uvec_pert_y_f = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[delta], [0]]
uvec_pert_x_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
pix_pert = pixels - [[0], [delta]]
uvec_pert_y_b = cmodel.pixels_to_unit(pix_pert, image=image, temperature=temperature)
return np.array([(uvec_pert_x_f-uvec_pert_x_b)/(2*delta),
(uvec_pert_y_f-uvec_pert_y_b)/(2*delta)]).swapaxes(0, -1)
model_param = {"focal_length": 100.75, "radial2": 1.5e-8, "radial4": 1.5e-8, "tangential_x": 1.5e-8,
"tangential_y": 1.5e-8, "pinwheel1": 1.5e-8, "pinwheel2": 1.5e-8, "kx": 300, "ky": 400,
"kxy": 0.5, "kyx": -0.8, "px": 1005.23, "py": 1005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0, 0]]).T,
np.array([[0, 2000], [2000, 0], [2000, 2000]]).T,
np.array([[1000, 1000], [1000, 2000], [2000, 1000], [0, 1000], [1000, 0]]).T]
temperatures = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temperatures:
for input in inputs:
for image in range(3):
with self.subTest(image=image, temp=temp, input=input):
jac_ana = model.compute_unit_vector_jacobian(input, image=image, temperature=temp)
jac_num = num_deriv(input, model, image=image, temperature=temp, delta=1e-2)
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test__compute_ddistortion_dgnomic(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
dist_pert_x_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) + [0, delta]
dist_pert_y_f = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [delta, 0]
dist_pert_x_b = cmodel.apply_distortion(loc_pert) - loc_pert
loc_pert = np.array(loc) - [0, delta]
dist_pert_y_b = cmodel.apply_distortion(loc_pert) - loc_pert
return np.array(
[(dist_pert_x_f - dist_pert_x_b) / (2 * delta), (dist_pert_y_f - dist_pert_y_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5},
{"e1": -1.5, "e2": -1.5, "e3": -1.5, "e4": -1.5, "e5": -1.5, "e6": -1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistortion_dgnomic(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_ddistorted_gnomic(self):
def num_deriv(loc, cmodel, delta=1e-8, temperature=0) -> np.ndarray:
loc_pert = np.array(loc) + [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) + [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_f = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [delta, 0]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_x_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
loc_pert = np.array(loc) - [0, delta]
loc_pert *= cmodel.get_temperature_scale(temperature)
pix_pert_y_b = cmodel.intrinsic_matrix[:, :2] @ loc_pert + cmodel.intrinsic_matrix[:, 2]
return np.array(
[(pix_pert_x_f - pix_pert_x_b) / (2 * delta), (pix_pert_y_f - pix_pert_y_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 1.5, "a2": 0, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 1.5, "a3": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0, "a1": 0, "a2": 0, "a3": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5,
"a1": 1.5, "a2": 1.5, "a3": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
temps = [0, 1, -1, 10.5, -10.5]
for temp in temps:
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef, temp=temp):
for inp in inputs:
num = num_deriv(inp, model, temperature=temp)
ana = model._compute_dpixel_ddistorted_gnomic(temperature=temp)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_dpixel_dintrinsic(self):
def num_deriv(loc, cmodel, delta=1e-6) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.intrinsic_matrix[:, :2] @ loc + model_pert.intrinsic_matrix[:, 2]
return np.array([(pix_pert_kx_f - pix_pert_kx_b) / (2 * delta),
(pix_pert_kxy_f - pix_pert_kxy_b) / (2 * delta),
(pix_pert_kyx_f - pix_pert_kyx_b) / (2 * delta),
(pix_pert_ky_f - pix_pert_ky_b) / (2 * delta),
(pix_pert_px_f - pix_pert_px_b) / (2 * delta),
(pix_pert_py_f - pix_pert_py_b) / (2 * delta)]).T
intrins_coefs = [{"kx": 1.5, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 1.5, "ky": 0, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 1.5, "kyx": 0, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 1.5, "px": 0, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 1.5, "py": 0},
{"kx": 0, "kxy": 0, "ky": 0, "kyx": 0, "px": 0, "py": 1.5},
{"kx": 1.5, "kxy": 1.5, "ky": 1.5, "kyx": 1.5, "px": 1.5, "py": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for intrins_coef in intrins_coefs:
model = self.Class(**intrins_coef)
with self.subTest(**intrins_coef):
for inp in inputs:
num = num_deriv(inp, model)
ana = model._compute_dpixel_dintrinsic(np.array(inp))
np.testing.assert_allclose(num, ana, atol=1e-10)
def test__compute_ddistorted_gnomic_ddistortion(self):
def num_deriv(loc, cmodel, delta=1e-8) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.radial2 += delta
loc_pert_r2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 += delta
loc_pert_r4_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y += delta
loc_pert_ty_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x += delta
loc_pert_tx_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
loc_pert_p1_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
loc_pert_p2_f = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial2 -= delta
loc_pert_r2_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.radial4 -= delta
loc_pert_r4_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
loc_pert_ty_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
loc_pert_tx_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
loc_pert_p1_b = model_pert.apply_distortion(loc)
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
loc_pert_p2_b = model_pert.apply_distortion(loc)
return np.array([(loc_pert_r2_f - loc_pert_r2_b) / (2 * delta),
(loc_pert_r4_f - loc_pert_r4_b) / (2 * delta),
(loc_pert_ty_f - loc_pert_ty_b) / (2 * delta),
(loc_pert_tx_f - loc_pert_tx_b) / (2 * delta),
(loc_pert_p1_f - loc_pert_p1_b) / (2 * delta),
(loc_pert_p2_f - loc_pert_p2_b) / (2 * delta)]).T
dist_coefs = [{"radial2": 1.5, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5}]
inputs = [[1e-6, 1e-6], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [0, 1], [0, -1], [0, 1.5], [0, -1.5], [1, 1]]
for dist_coef in dist_coefs:
model = self.Class(**dist_coef)
with self.subTest(**dist_coef):
for inp in inputs:
r = np.sqrt(inp[0] ** 2 + inp[1] ** 2)
r2 = r ** 2
r3 = r ** 3
r4 = r ** 4
num = num_deriv(inp, model)
ana = model._compute_ddistorted_gnomic_ddistortion(np.array(inp), r, r2, r3, r4)
np.testing.assert_allclose(num, ana, atol=1e-10)
def test_get_jacobian_row(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
delta_m = 1e-6
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta_m
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta_m
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta_m
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta_m
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta_m
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta_m
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta_m * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta_m * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta_m * 2)]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [[0.1, 0, 1], [0, 0.1, 1], [0.1, 0.1, 1], [-0.1, 0, 1], [0, -0.1, 1], [-0.1, -0.1, 1],
[5, 10, 1000.23], [[1], [2], [1200.23]]]
temps = [0, 1, -1, 10.5, -10.5]
model = self.Class(**model_param)
model.estimate_multiple_misalignments = True
for temp in temps:
for inp in inputs:
with self.subTest(temp=temp, inp=inp):
num = num_deriv(inp, model, delta=1e-3, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 0, 1, temperature=temp)
np.testing.assert_allclose(ana, num, rtol=1e-3, atol=1e-10)
num = num_deriv(inp, model, delta=1e-3, image=1, temperature=temp)
ana = model._get_jacobian_row(np.array(inp), 1, 2, temperature=temp)
np.testing.assert_allclose(ana, num, atol=1e-10, rtol=1e-3)
def test_compute_jacobian(self):
def num_deriv(loc, cmodel, delta=1e-8, image=0, nimages=1, temperature=0) -> np.ndarray:
model_pert = cmodel.copy()
model_pert.focal_length += delta
pix_pert_f_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.focal_length -= delta
pix_pert_f_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx += delta
pix_pert_kx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy += delta
pix_pert_kxy_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx += delta
pix_pert_kyx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky += delta
pix_pert_ky_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px += delta
pix_pert_px_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py += delta
pix_pert_py_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kx -= delta
pix_pert_kx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kxy -= delta
pix_pert_kxy_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.kyx -= delta
pix_pert_kyx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.ky -= delta
pix_pert_ky_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.px -= delta
pix_pert_px_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.py -= delta
pix_pert_py_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 += delta
pix_pert_r2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 += delta
pix_pert_r4_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y += delta
pix_pert_ty_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x += delta
pix_pert_tx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 += delta
pix_pert_p1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 += delta
pix_pert_p2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial2 -= delta
pix_pert_r2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.radial4 -= delta
pix_pert_r4_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_y -= delta
pix_pert_ty_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.tangential_x -= delta
pix_pert_tx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel1 -= delta
pix_pert_p1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.pinwheel2 -= delta
pix_pert_p2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 += delta
pix_pert_a1_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 += delta
pix_pert_a2_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 += delta
pix_pert_a3_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a1 -= delta
pix_pert_a1_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a2 -= delta
pix_pert_a2_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.a3 -= delta
pix_pert_a3_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] += delta
pix_pert_mx_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] += delta
pix_pert_my_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] += delta
pix_pert_mz_f = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][0] -= delta
pix_pert_mx_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][1] -= delta
pix_pert_my_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
model_pert = cmodel.copy()
model_pert.misalignment[image][2] -= delta
pix_pert_mz_b = model_pert.project_onto_image(loc, image=image, temperature=temperature).flatten()
return np.vstack([(pix_pert_f_f - pix_pert_f_b) / (delta * 2),
(pix_pert_kx_f - pix_pert_kx_b) / (delta * 2),
(pix_pert_kxy_f - pix_pert_kxy_b) / (delta * 2),
(pix_pert_kyx_f - pix_pert_kyx_b) / (delta * 2),
(pix_pert_ky_f - pix_pert_ky_b) / (delta * 2),
(pix_pert_px_f - pix_pert_px_b) / (delta * 2),
(pix_pert_py_f - pix_pert_py_b) / (delta * 2),
(pix_pert_r2_f - pix_pert_r2_b) / (delta * 2),
(pix_pert_r4_f - pix_pert_r4_b) / (delta * 2),
(pix_pert_ty_f - pix_pert_ty_b) / (delta * 2),
(pix_pert_tx_f - pix_pert_tx_b) / (delta * 2),
(pix_pert_p1_f - pix_pert_p1_b) / (delta * 2),
(pix_pert_p2_f - pix_pert_p2_b) / (delta * 2),
(pix_pert_a1_f - pix_pert_a1_b) / (delta * 2),
(pix_pert_a2_f - pix_pert_a2_b) / (delta * 2),
(pix_pert_a3_f - pix_pert_a3_b) / (delta * 2),
np.zeros((image * 3, 2)),
(pix_pert_mx_f - pix_pert_mx_b) / (delta * 2),
(pix_pert_my_f - pix_pert_my_b) / (delta * 2),
(pix_pert_mz_f - pix_pert_mz_b) / (delta * 2),
np.zeros(((nimages - image - 1) * 3, 2))]).T
model_param = {"focal_length": 100.75, "radial2": 1.5e-5, "radial4": 1.5e-5, "tangential_x": 1.5e-5,
"tangential_y": 1.5e-5, "pinwheel1": 1.5e-5, "pinwheel2": 1.5e-5, "kx": 30, "ky": 40,
"kxy": 0.5, "kyx": -0.8, "px": 4005.23, "py": 4005.23,
"misalignment": [[1e-8, 1e-9, 1e-10], [-1e-8, 2e-9, -1e-11], [2e-10, -5e-12, 1e-9]],
"a1": 1e-6, "a2": 2e-7, "a3": 3e-8}
inputs = [np.array([[0.5, 0, 1]]).T,
np.array([[0, 0.5, 1], [0.5, 0.5, 1], [-0.5, 0, 1]]).T,
np.array([[0.1, -0.5, 1], [-0.5, -0.5, 1], [5, 10, 1000.23], [1, 2, 1200.23]]).T]
temperatures = [0, 1, -1, 10.5, -10.5, [1, -10, 10]]
model = self.Class(**model_param, estimation_parameters=['intrinsic', 'temperature dependence',
'multiple misalignments'])
for temp in temperatures:
with self.subTest(temp=temp):
model.use_a_priori = False
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
np.testing.assert_allclose(jac_ana, np.vstack(jac_num), rtol=1e-3, atol=1e-10)
model.use_a_priori = True
jac_ana = model.compute_jacobian(inputs, temperature=temp)
jac_num = []
numim = len(inputs)
for ind, inp in enumerate(inputs):
if isinstance(temp, list):
templ = temp[ind]
else:
templ = temp
for vec in inp.T:
jac_num.append(num_deriv(vec.T, model, delta=1e-4, image=ind, nimages=numim, temperature=templ))
jac_num = np.vstack(jac_num)
jac_num = np.pad(jac_num, [(0, jac_num.shape[1]), (0, 0)], 'constant', constant_values=0)
jac_num[-jac_num.shape[1]:] = np.eye(jac_num.shape[1])
np.testing.assert_allclose(jac_ana, jac_num, rtol=1e-3, atol=1e-10)
def test_apply_update(self):
model_param = {"focal_length": 0, "radial2": 0, "radial4": 0, "tangential_x": 0,
"tangential_y": 0, "pinwheel1": 0, "pinwheel2": 0, "kx": 0, "ky": 0,
"kxy": 0, "kyx": 0, "px": 0,
"misalignment": [[0, 0, 0], [0, 0, 0]],
"a1": 0, "a2": 0, "a3": 0}
model = self.Class(**model_param, estimation_parameters=['intrinsic', "temperature dependence",
'multiple misalignments'])
update_vec = np.arange(22)
model.apply_update(update_vec)
keys = list(model_param.keys())
keys.remove('misalignment')
for key in keys:
self.assertEqual(getattr(model, key), update_vec[model.element_dict[key][0]])
for ind, vec in enumerate(update_vec[16:].reshape(-1, 3)):
np.testing.assert_array_almost_equal(at.Rotation(vec).q, at.Rotation(model.misalignment[ind]).q)
def test_pixels_to_gnomic(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
'a1': 1e-6, 'a2': 1e-7, 'a3': 1e-8}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
mm_undist = model.pixels_to_gnomic(pix_dist, temperature=temp)
np.testing.assert_allclose(mm_undist, gnoms, atol=1e-13)
def test_undistort_pixels(self):
intrins_param = {"kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8, "px": 4005.23, 'py': 2000.33,
"a1": 1e-3, "a2": 1e-4, "a3": 1e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3}]
pinhole = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for gnoms in pinhole:
with self.subTest(**dist, temp=temp, gnoms=gnoms):
gnoms = np.array(gnoms).astype(np.float64)
mm_dist = model.apply_distortion(np.array(gnoms))
mm_dist *= model.get_temperature_scale(temp)
pix_dist = ((model.intrinsic_matrix[:, :2] @ mm_dist).T + model.intrinsic_matrix[:, 2]).T
pix_undist = model.undistort_pixels(pix_dist, temperature=temp)
gnoms *= model.get_temperature_scale(temp)
pix_pinhole = ((model.intrinsic_matrix[:, :2] @ gnoms).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_allclose(pix_undist, pix_pinhole, atol=1e-13)
def test_pixels_to_unit(self):
intrins_param = {"focal_length": 32.7, "kx": 3000, "ky": 4000, "kxy": 0.5, "kyx": -0.8,
"px": 4005.23, 'py': 2000.33, "a1": 1e-6, "a2": 1e-7, "a3": -3e-5}
dist_coefs = [{"radial2": 1.5e-3, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 1.5e-3, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 1.5e-3, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 1.5e-3,
"pinwheel1": 0, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 1.5e-3, "pinwheel2": 0},
{"radial2": 0, "radial4": 0, "tangential_x": 0, "tangential_y": 0,
"pinwheel1": 0, "pinwheel2": 1.5e-3},
{"radial2": 1.5e-3, "radial4": 1.5e-3, "tangential_x": 1.5e-3, "tangential_y": 1.5e-3,
"pinwheel1": 1.5e-3, "pinwheel2": 1.5e-3},
{"misalignment": np.array([1e-11, 2e-12, -1e-10])},
{"misalignment": np.array([[1e-11, 2e-12, -1e-10], [-1e-13, 1e-11, 2e-12]]),
"estimation_parameters": "multiple misalignments"}]
camera_vecs = [[0, 0, 1], [0.01, 0, 1], [-0.01, 0, 1], [0, 0.01, 1], [0, -0.01, 1], [0.01, 0.01, 1],
[-0.01, -0.01, 1], [[0.01, -0.01], [-0.01, 0.01], [1, 1]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for temp in temperatures:
for dist in dist_coefs:
model = self.Class(**dist, **intrins_param)
for vec in camera_vecs:
with self.subTest(**dist, temp=temp, vec=vec):
pixel_loc = model.project_onto_image(vec, image=-1, temperature=temp)
unit_vec = model.pixels_to_unit(pixel_loc, image=-1, temperature=temp)
unit_true = np.array(vec).astype(np.float64)
unit_true /= np.linalg.norm(unit_true, axis=0, keepdims=True)
np.testing.assert_allclose(unit_vec, unit_true, atol=1e-13)
def test_overwrite(self):
model1 = self.Class(field_of_view=10, intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]),
distortion_coefficients=np.array([1, 2, 3, 4, 5, 6]), focal_length=60,
misalignment=[[1, 2, 3], [4, 5, 6]], use_a_priori=False,
estimation_parameters=['multiple misalignments'], a1=0, a2=3, a3=5)
model2 = self.Class(field_of_view=20, intrinsic_matrix=np.array([[11, 12, 13], [14, 15, 16]]),
distortion_coefficients=np.array([11, 12, 13, 14, 15, 16]), focal_length=160,
misalignment=[[11, 12, 13], [14, 15, 16]], use_a_priori=True,
estimation_parameters=['single misalignment'], a1=-100, a2=-200, a3=-300)
modeltest = model1.copy()
modeltest.overwrite(model2)
self.assertEqual(modeltest, model2)
modeltest = model2.copy()
modeltest.overwrite(model1)
self.assertEqual(modeltest, model1)
def test_intrinsic_matrix_inv(self):
model = self.Class(kx=5, ky=10, kxy=20, kyx=-30.4, px=100, py=-5)
np.testing.assert_array_almost_equal(
model.intrinsic_matrix @ np.vstack([model.intrinsic_matrix_inv, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
np.testing.assert_array_almost_equal(
model.intrinsic_matrix_inv @ np.vstack([model.intrinsic_matrix, [0, 0, 1]]),
[[1, 0, 0], [0, 1, 0]])
def test_distort_pixels(self):
model = self.Class(kx=1000, ky=-950.5, px=4500, py=139.32, a1=1e-3, a2=1e-4, a3=1e-5,
kxy=0.5, kyx=-8, radial2=1e-5, radial4=1e-5, pinwheel2=1e-7, pinwheel1=-1e-12,
tangential_x=1e-6, tangential_y=2e-12)
pixels = [[0, 1], [1, 0], [-1, 0], [0, -1], [9000., 200.2],
[[4500, 100, 10.98], [0, 139.23, 200.3]]]
temperatures = [0, 1, -1, 10.5, -10.5]
for pix in pixels:
for temp in temperatures:
with self.subTest(pix=pix, temp=temp):
undist_pix = model.undistort_pixels(pix, temperature=temp)
dist_pix = model.distort_pixels(undist_pix, temperature=temp)
np.testing.assert_allclose(dist_pix, pix, atol=1e-10)
def test_distortion_map(self):
model = self.Class(kx=100, ky=-985.234, px=1000, py=1095, kxy=10, kyx=-5,
e1=1e-6, e2=1e-12, e3=-4e-10, e5=6e-7, e6=-1e-5, e4=1e-7,
a1=1e-6, a2=-1e-7, a3=4e-12)
rows, cols, dist = model.distortion_map((2000, 250), step=10)
rl, cl = np.arange(0, 2000, 10), np.arange(0, 250, 10)
rs, cs = np.meshgrid(rl, cl, indexing='ij')
np.testing.assert_array_equal(rows, rs)
np.testing.assert_array_equal(cols, cs)
distl = model.distort_pixels(np.vstack([cs.flatten(), rs.flatten()]))
np.testing.assert_array_equal(distl - np.vstack([cs.flatten(), rs.flatten()]), dist)
class TestBrownModel(TestPinholeModel):
def setUp(self):
self.Class = BrownModel
# Not supported for this model
test__compute_dgnomic_dfocal_length = None
def test___init__(self):
model = self.Class(intrinsic_matrix=np.array([[1, 2, 3], [4, 5, 6]]), field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)],
distortion_coefficients=np.array([1, 2, 3, 4, 5]),
estimation_parameters='basic intrinsic',
a1=1, a2=2, a3=3)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 2, 3], [4, 5, 6]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['basic intrinsic'])
self.assertEqual(model.a1, 1)
self.assertEqual(model.a2, 2)
self.assertEqual(model.a3, 3)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
model = self.Class(kx=1, fy=2, px=4, py=5, field_of_view=20.5,
use_a_priori=True, misalignment=[np.zeros(3), np.ones(3)], kxy=80,
estimation_parameters=['kx', 'px'], n_rows=500, n_cols=600,
radial2=1, radial4=2, k3=3, p1=4, tiptilt_x=5)
np.testing.assert_array_equal(model.intrinsic_matrix, [[1, 80, 4], [0, 2, 5]])
self.assertEqual(model.focal_length, 1)
self.assertEqual(model.field_of_view, 20.5)
self.assertTrue(model.use_a_priori)
self.assertEqual(model.estimation_parameters, ['kx', 'px'])
self.assertEqual(model.n_rows, 500)
self.assertEqual(model.n_cols, 600)
np.testing.assert_array_equal(model.distortion_coefficients, np.arange(1, 6))
def test_fx(self):
model = self.Class(intrinsic_matrix=np.array([[1, 0, 0], [0, 0, 0]]))
self.assertEqual(model.kx, 1)
model.kx = 100
self.assertEqual(model.kx, 100)
self.assertEqual(model.intrinsic_matrix[0, 0], 100)
def test_fy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 0, 0], [0, 1, 0]]))
self.assertEqual(model.ky, 1)
model.ky = 100
self.assertEqual(model.ky, 100)
self.assertEqual(model.intrinsic_matrix[1, 1], 100)
def test_kxy(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.kxy, 1)
model.kxy = 100
self.assertEqual(model.kxy, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_alpha(self):
model = self.Class(intrinsic_matrix=np.array([[0, 1, 0], [0, 0, 0]]))
self.assertEqual(model.alpha, 1)
model.alpha = 100
self.assertEqual(model.alpha, 100)
self.assertEqual(model.intrinsic_matrix[0, 1], 100)
def test_k1(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.k1, 1)
model.k1 = 100
self.assertEqual(model.k1, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_k2(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.k2, 1)
model.k2 = 100
self.assertEqual(model.k2, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_k3(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.k3, 1)
model.k3 = 100
self.assertEqual(model.k3, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_p1(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.p1, 1)
model.p1 = 100
self.assertEqual(model.p1, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_p2(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.p2, 1)
model.p2 = 100
self.assertEqual(model.p2, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_radial2(self):
model = self.Class(distortion_coefficients=np.array([1, 0, 0, 0, 0]))
self.assertEqual(model.radial2, 1)
model.radial2 = 100
self.assertEqual(model.radial2, 100)
self.assertEqual(model.distortion_coefficients[0], 100)
def test_radial4(self):
model = self.Class(distortion_coefficients=np.array([0, 1, 0, 0, 0]))
self.assertEqual(model.radial4, 1)
model.radial4 = 100
self.assertEqual(model.radial4, 100)
self.assertEqual(model.distortion_coefficients[1], 100)
def test_radial6(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 1, 0, 0]))
self.assertEqual(model.radial6, 1)
model.radial6 = 100
self.assertEqual(model.radial6, 100)
self.assertEqual(model.distortion_coefficients[2], 100)
def test_tiptilt_y(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 1, 0]))
self.assertEqual(model.tiptilt_y, 1)
model.tiptilt_y = 100
self.assertEqual(model.tiptilt_y, 100)
self.assertEqual(model.distortion_coefficients[3], 100)
def test_tiptilt_x(self):
model = self.Class(distortion_coefficients=np.array([0, 0, 0, 0, 1]))
self.assertEqual(model.tiptilt_x, 1)
model.tiptilt_x = 100
self.assertEqual(model.tiptilt_x, 100)
self.assertEqual(model.distortion_coefficients[4], 100)
def test_apply_distortion(self):
dist_coefs = [{"k1": 1.5, "k2": 0, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 1.5, "k3": 0, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 1.5, "p1": 0, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 1.5, "p2": 0},
{"k1": 0, "k2": 0, "k3": 0, "p1": 0, "p2": 1.5}]
inputs = [[0, 0], [1, 0], [-1, 0], [1.5, 0], [-1.5, 0], [[1.5], [0]], [[1.5, -1], [0, 0]],
[0, 1], [0, -1], [0, 1.5], [0, -1.5], [[0], [1.5]], [[0, 0], [1.5, -1]], [1, 1]]
solus = [[[0, 0], [2.5, 0], [-2.5, 0], [1.5 + 1.5 ** 4, 0], [-(1.5 + 1.5 ** 4), 0],
[[(1.5 + 1.5 ** 4)], [0]], [[(1.5 + 1.5 ** 4), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 4)], [0, -(1.5 + 1.5 ** 4)], [[0], [(1.5 + 1.5 ** 4)]],
[[0, 0], [(1.5 + 1.5 ** 4), -2.5]], [1 + 2 * 1.5, 1 + 2 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 6), 0], [-(1.5 + 1.5 ** 6), 0],
[[(1.5 + 1.5 ** 6)], [0]], [[(1.5 + 1.5 ** 6), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 6)], [0, -(1.5 + 1.5 ** 6)], [[0], [(1.5 + 1.5 ** 6)]],
[[0, 0], [(1.5 + 1.5 ** 6), -2.5]], [1 + 4 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [2.5, 0], [-2.5, 0], [(1.5 + 1.5 ** 8), 0], [-(1.5 + 1.5 ** 8), 0],
[[(1.5 + 1.5 ** 8)], [0]], [[(1.5 + 1.5 ** 8), -2.5], [0, 0]],
[0, 2.5], [0, -2.5], [0, (1.5 + 1.5 ** 8)], [0, -(1.5 + 1.5 ** 8)], [[0], [(1.5 + 1.5 ** 8)]],
[[0, 0], [(1.5 + 1.5 ** 8), -2.5]], [1 + 8 * 1.5, 1 + 8 * 1.5]],
[[0, 0], [1, 1.5], [-1, 1.5], [1.5, 1.5 ** 3], [-1.5, 1.5 ** 3], [[1.5], [1.5 ** 3]],
[[1.5, -1], [1.5 ** 3, 1.5]],
[0, 1 + 3 * 1.5], [0, -1 + 3 * 1.5], [0, 1.5 + 3 * 1.5 ** 3], [0, -1.5 + 3 * 1.5 ** 3],
[[0], [1.5 + 3 * 1.5 ** 3]],
[[0, 0], [1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5]], [1 + 2 * 1.5, 1 + 4 * 1.5]],
[[0, 0], [1 + 3 * 1.5, 0], [-1 + 3 * 1.5, 0], [1.5 + 3 * 1.5 ** 3, 0], [-1.5 + 3 * 1.5 ** 3, 0],
[[1.5 + 3 * 1.5 ** 3], [0]], [[1.5 + 3 * 1.5 ** 3, -1 + 3 * 1.5], [0, 0]],
[1.5, 1], [1.5, -1], [1.5 ** 3, 1.5], [1.5 ** 3, -1.5], [[1.5 ** 3], [1.5]],
[[1.5 ** 3, 1.5], [1.5, -1]],
[1 + 4 * 1.5, 1 + 2 * 1.5]]]
for dist, sols in zip(dist_coefs, solus):
with self.subTest(**dist):
model = self.Class(**dist)
for inp, solu in zip(inputs, sols):
gnom_dist = model.apply_distortion(np.array(inp))
np.testing.assert_array_almost_equal(gnom_dist, solu)
def test_get_projections(self):
points = [[0, 0, 1], [-0.1, 0.2, 2.2], [[-0.1], [0.2], [2.2]], [[-0.1, 0], [0.2, 0], [2.2, 1]]]
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, a1=1e-1, a2=-1e-6, a3=3e-7)
temps = [0, 1, -1, 10, -10]
for temp in temps:
with self.subTest(misalignment=None, temp=temp):
for point in points:
pin, dist, pix = model.get_projections(point, temperature=temp)
pin_true = np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
dist_true *= model.get_temperature_scale(temp)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_equal(pin, pin_true)
np.testing.assert_array_equal(dist, dist_true)
np.testing.assert_array_equal(pix, pix_true)
model = self.Class(fx=4050.5, fy=3050.25, alpha=1.5, px=1500, py=1500.5,
k1=0.5, k2=-0.3, k3=0.15, p1=1e-7, p2=1e-6, misalignment=[0, 0, np.pi])
with self.subTest(misalignment=[0, 0, np.pi]):
for point in points:
pin, dist, pix = model.get_projections(point)
pin_true = -np.array(point[:2]) / point[2]
dist_true = model.apply_distortion(pin_true)
pix_true = (np.matmul(model.intrinsic_matrix[:, :2], dist_true).T + model.intrinsic_matrix[:, 2]).T
np.testing.assert_array_almost_equal(pin, pin_true)
np.testing.assert_array_almost_equal(dist, dist_true)
|
np.testing.assert_array_almost_equal(pix, pix_true)
|
numpy.testing.assert_array_almost_equal
|
import numpy as np
import math
import cv2
from scipy.spatial.transform import Rotation
# Calculate transformation matrices and average them
def getTransformations(n_id, tvec_m, tvec_n, rvec_m, rvec_n, tvec_orig_m, tvec_orig_n, rvec_orig_m,
rvec_orig_n, dRot_m, dRot_n, allow_use, ALLOW_LIMIT, tvec_max_n, tvec_min_n):
if allow_use < ALLOW_LIMIT:
tvec_m = np.transpose(tvec_m) # tvec of 'm' marker
tvec_n = np.transpose(tvec_n) # tvec of 'n' marker
tvec_orig_m = np.transpose(tvec_orig_m) # origin of 'm' in global coordinates
tvec_orig_n = np.transpose(tvec_orig_n) # origin to be of 'n' in global coordinates
dtvec = tvec_m - tvec_n # vector from 'm' to 'n' marker in the camera's coordinate system
# get the markers' rotation matrices respectively
R_m = cv2.Rodrigues(rvec_m)[0]
R_n = cv2.Rodrigues(rvec_n)[0]
# for filtering out min and max values
tvec_temp = tvec_orig_m + dRot_m.dot(-R_m.T.dot(dtvec))
if np.linalg.norm(tvec_temp) > np.linalg.norm(tvec_max_n):
tvec_max_n = tvec_temp
if
|
np.linalg.norm(tvec_temp)
|
numpy.linalg.norm
|
from torch.nn.utils.prune import L1Unstructured, RandomUnstructured
from copy import deepcopy
import numpy as np
import torch
from collections import OrderedDict
from torch.nn.utils.prune import RandomStructured, LnStructured, identity
def init_for_pruning(smodel, to_be_pruned: [str], last_layer_index=-1):
# don't prune the output layer
for layer in list(smodel.children())[:last_layer_index]:
for pname in to_be_pruned:
identity(layer, pname)
def calculate_mask_sparsity(model):
'''
returns a tuple (ms for the whole model, [ms by layers])'''
params_l, remain_l = [], []
for layer in model.children():
mask = layer.mask
n_params = mask.shape[0] * mask.shape[1]
params_l.append(n_params)
remain_l.append(np.sum(mask.numpy()))
ms = np.sum(remain_l) / np.sum(params_l)
ms_l = np.array(remain_l) / np.array(params_l)
return ms, ms_l
def infer_one_shot_prs(sparse_models):
return list(sparse_models['lenet'].keys())
# pruning with pytorch
# PRE-DEFINED SPARSITY
def prune_with_predefined_sparsity(model, density, random_mask=True, prune_all_layers=False, unstructured=True):
amount = 1-density
last_layer_index = len(list(model.children())) if prune_all_layers else -1
masks = pruning_masks(model,
unstructured=unstructured,
amount=amount,
random_mask=random_mask,
style='nodes',
last_layer_index=last_layer_index)
init_for_pruning(model, ['weight'],
last_layer_index=last_layer_index)
for j, layer in enumerate(list(model.children())[:last_layer_index]):
del layer._buffers['weight_mask']
layer.register_buffer('weight_mask', masks[j])
return model
def pruning_masks(dmodel, unstructured: bool, amount: float, random_mask=False, n_norm=1, style='nodes', last_layer_index=-1):
'''
get pruning masks for weights (not biases) in structured manner,
the same amount on every hidden layer (i.e. the output layer is excluded!)
returns mask to be applied
'''
masks = []
# decide the pruning method 1) unstructured/structured and 2) random/magnitude
if unstructured:
pruning_method = RandomUnstructured(
amount=amount) if random_mask else L1Unstructured(amount=amount)
else: # structured pruning, should we prune nodes or channels?
dim = 0
if style == 'channels':
dim = -1
pruning_method = RandomStructured(amount=amount, dim=dim) if random_mask else LnStructured(
amount=amount, n=n_norm, dim=dim)
for layer in list(dmodel.children())[:last_layer_index]:
trained_weight = getattr(layer, 'weight')
masks.append(pruning_method.compute_mask(
trained_weight, torch.ones_like(trained_weight)))
return masks
def prune_models(models_org, models_trained, unstructured: bool, amount: float, random_mask: bool, random_init: bool, prune_all_layers: bool):
'''
models is list of dense models for one architecture
'''
sms = []
for i, model in enumerate(models_trained):
# create the sparse model
sparse_m = deepcopy(models_org[i])
last_layer_index = len(list(sparse_m.children())
) if prune_all_layers else -1
# get the pruning masks
dmodel = deepcopy(model)
masks = pruning_masks(dmodel,
unstructured=unstructured,
amount=amount,
random_mask=random_mask,
style='nodes',
last_layer_index=last_layer_index)
init_for_pruning(sparse_m, ['weight'],
last_layer_index=last_layer_index)
for j, layer in enumerate(list(sparse_m.children())[:last_layer_index]):
del layer._buffers['weight_mask']
layer.register_buffer('weight_mask', masks[j])
# if random init, re-initialize the params
if random_init:
sparse_m.init_params()
sms.append(sparse_m)
return sms
# PRUNE NETWORKS
def create_random_init_models(dense_models_org, dense_models, one_shot_pruning_rates,
layer_wise_pruning, prune_weights,
n_models, input_features, output_dim,
random_init=True, random_mask=True,
names=['lenet', 'deepfc'],
prune_all_layers=False):
random_init_models = OrderedDict()
for name in names:
rim = OrderedDict()
for pr in one_shot_pruning_rates:
print(f'{name}: pr {pr}', end='\r')
rim_pr = prune_models(models_org=dense_models_org[name],
models_trained=dense_models[name],
unstructured=prune_weights,
amount=1-pr,
random_mask=random_mask, random_init=random_init,
prune_all_layers=prune_all_layers)
rim[pr] = rim_pr
random_init_models[name] = rim
print("", end='\r')
return random_init_models
def create_winning_tickets(dense_models_org, dense_models_trained, prune_weights,
one_shot_pruning_rates, layer_wise_pruning,
n_models, input_features, output_dim, names=[
'lenet', 'deepfc'],
prune_all_layers=False):
original_init_models = OrderedDict()
for name in names:
oim = OrderedDict()
for pr in one_shot_pruning_rates:
oim_pr = prune_models(dense_models_org[name], dense_models_trained[name],
unstructured=prune_weights, amount=1-pr,
random_mask=False, random_init=False,
prune_all_layers=prune_all_layers)
oim[pr] = oim_pr
original_init_models[name] = oim
return original_init_models
# OLD NODE WISE PRUNING
def get_pruned_dim(pruning_rate, original_dim, round=False):
r = 0.5 if round else 0
dim = int(r + pruning_rate * original_dim)
return dim if dim > 0 else 1
def get_dim(pruning_rate, layer):
return get_pruned_dim(pruning_rate, layer.out_features)
def original_weights_for_pruned_lenet(original_weights, pruned_nodes_idx):
l1 = original_weights[0][pruned_nodes_idx[0], :]
l2 = original_weights[1][pruned_nodes_idx[1], :][:, pruned_nodes_idx[0]]
out = original_weights[2][:, pruned_nodes_idx[1]]
return [l1, l2, out]
def apply_node_wise_pruning_to_weights(original_weights, pruned_nodes_idx):
pruned = []
n_layers = len(original_weights)
for n, w in enumerate(original_weights):
if n == 0: # input layer
pruned.append(w[pruned_nodes_idx[n], :])
elif n == n_layers - 1: # output layer
pruned.append(w[:, pruned_nodes_idx[n-1]])
else: # hidden layers
pruned.append(w[pruned_nodes_idx[n], :][:, pruned_nodes_idx[n-1]])
return pruned
def prune_nodes_large_final(trained_weights, pruning_rate: float):
'''
trained weights is a list of numpy arrays
pruning rate e [0,1]
'''
target_dims = [int(trained_weights[n].shape[0] * (1-pruning_rate))
for n in range(len(trained_weights))]
pruned_nodes_idx = []
# don't prune the output layer
for i, w in enumerate(trained_weights[:-1]):
w_mean = np.mean(
|
np.abs(w)
|
numpy.abs
|
import numpy as np
from scipy import stats as sps
class TSBandit:
"""
Implements a Thompson-Sampling Multiarmed Bandit algorithm for the item recommendation problem
See (insert link) for details
"""
def __init__(self, M, l):
"""
Creates a new instance of the TS Bandit for item recommendation problem
Inits Beta params [a, b] to uniformly sampled in [0, 1]
:param M: number of actions
:param l: return this number of recommendations
"""
self.l = l
self.M = M
self.params = np.ones(shape=(self.M, 2))
def predict(self):
"""
Get the next prediction from the bandit
:return: an np array with action probabilities, an np array of selected actions
"""
pr = sps.beta.rvs(a=self.params[:,0], b=self.params[:,1])
rec = np.argsort(-pr)[:self.l]
return pr, rec
def update(self, actions, response):
"""
Updates the bandit with responses to previously given actions
:param actions: actions for which updates are
:param response: the rewards for the actions
:return: an updated params
"""
self.params[actions] +=
|
np.vstack([response, 1 - response])
|
numpy.vstack
|
## @ingroupMethods-Noise-Fidelity_One-Noise_Tools
# generate_microphone_points.py
#
# Created: Sep 2021, <NAME>
# ----------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------
import numpy as np
# ----------------------------------------------------------------------
# Compute Microphone Points
# ---------------------------------------------------------------------
## @ingroupMethods-Noise-Fidelity_One-Noise_Tools
def generate_ground_microphone_points(min_x,max_x,min_y,max_y,N_x,N_y):
"""This computes the absolute microphone/observer locations on level ground.
Assumptions:
None
Source:
N/A
Inputs:
min_x - minimum x coordinate of noise evaluation plane [meters]
max_x - maximum x coordinate of noise evaluation plane [meters]
min_y - minimum y coordinate of noise evaluation plane [meters]
max_x - maximim y coordinate of noise evaluation plane [meters]
N_x - number of microphones on x-axis
N_y - number of microphones on y-axis
Outputs:
gm_mic_locations - cartesian coordiates of all microphones defined [meters]
Properties Used:
N/A
"""
num_gm = N_x*N_y
gm_mic_locations = np.zeros((num_gm,3))
x_coords_0 = np.repeat(np.linspace(min_x,max_x,N_x)[:,np.newaxis],N_y, axis = 1)
y_coords_0 = np.repeat(np.linspace(min_y,max_y,N_y)[:,np.newaxis],N_x, axis = 1).T
z_coords_0 = np.zeros_like(x_coords_0)
gm_mic_locations[:,0] = x_coords_0.reshape(num_gm)
gm_mic_locations[:,1] = y_coords_0.reshape(num_gm)
gm_mic_locations[:,2] = z_coords_0.reshape(num_gm)
return gm_mic_locations
## @ingroupMethods-Noise-Fidelity_One-Noise_Tools
def generate_building_microphone_points(building_locations,building_dimensions,N_x,N_y,N_z):
"""This computes the absolute microphone/observer locations on the surface of rectilinear buildinsg.
Assumptions:
Microhpone locations are uniformly distributed on the surface
Source:
N/A
Inputs:
building_locations - cartesian coordinates of the base of buildings [meters]
building_dimensions - dimensions of buildings [length,width,height] [meters]
building_microphone_resolution - resolution of microphone array [unitless]
N_x - discretization of points in x dimension on building surface [meters]
N_y - discretization of points in y dimension on building surface [meters]
N_z - discretization of points in z dimension on building surface [meters]
Outputs:
building_mic_locations - cartesian coordiates of all microphones defined on buildings [meters]
Properties Used:
N/A
"""
building_locations = np.array(building_locations)
building_dimensions = np.array(building_dimensions)
N_b = len(building_locations)
num_mics_on_xz_surface = N_x*N_z
num_mics_on_yz_surface = N_y*N_z
num_mics_on_xy_surface = N_x*N_y
num_mics_per_building = 2*(num_mics_on_xz_surface +num_mics_on_yz_surface) + num_mics_on_xy_surface
b_mic_locations = np.empty((N_b,num_mics_per_building,3))
x0 = building_locations[:,0]
y0 = building_locations[:,1]
z0 = building_locations[:,2]
l = building_dimensions[:,0]
w = building_dimensions[:,1]
h = building_dimensions[:,2]
# surface 1 (front)
x_coords_1 = np.repeat(np.repeat(np.atleast_2d(x0-l/2).T,N_y,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
Y_1 = np.repeat(np.repeat(np.atleast_2d(y0).T,N_y,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
YW_1 = np.repeat(np.repeat(np.atleast_2d(w/2).T,N_y,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
non_dim_y_1 = np.repeat(np.repeat(np.linspace(-1,1,N_y)[:,np.newaxis],N_z, axis = 1)[np.newaxis,:,:],N_b, axis = 0)
y_coords_1 = non_dim_y_1*YW_1 + Y_1
Z_1 = np.repeat(np.repeat(np.atleast_2d(h).T,N_y,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
non_dim_z_1 = np.repeat(np.repeat(np.linspace(0,1,N_z)[:,np.newaxis],N_y, axis = 1).T[np.newaxis,:,:],N_b, axis = 0)
z_coords_1 = non_dim_z_1*Z_1
start_idx_1 = 0
end_idx_1 = num_mics_on_yz_surface
b_mic_locations[:,start_idx_1:end_idx_1 ,0] = x_coords_1.reshape(N_b,N_y*N_z)
b_mic_locations[:,start_idx_1:end_idx_1 ,1] = y_coords_1.reshape(N_b,N_y*N_z)
b_mic_locations[:,start_idx_1:end_idx_1 ,2] = z_coords_1.reshape(N_b,N_y*N_z)
# surface 2 (right)
X_2 = np.repeat(np.repeat(np.atleast_2d(x0).T,N_x,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
XW_2 = np.repeat(np.repeat(np.atleast_2d(l/2).T,N_x,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
non_dim_x_2 = np.repeat(np.repeat(np.linspace(-1,1,N_x)[:,np.newaxis],N_z, axis = 1)[np.newaxis,:,:],N_b, axis = 0)
x_coords_2 = non_dim_x_2*XW_2 + X_2
y_coords_2 = np.repeat(np.repeat(np.atleast_2d(y0+w/2).T,N_x,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
Z_2 = np.repeat(np.repeat(np.atleast_2d(h).T,N_x,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
non_dim_z_2 = np.repeat(np.repeat(np.linspace(0,1,N_z)[:,np.newaxis],N_x, axis = 1).T[np.newaxis,:,:],N_b, axis = 0)
z_coords_2 = non_dim_z_2*Z_2
start_idx_2 = end_idx_1
end_idx_2 = start_idx_2 + num_mics_on_xz_surface
b_mic_locations[:,start_idx_2:end_idx_2 ,0] = x_coords_2.reshape(N_b,N_y*N_z)
b_mic_locations[:,start_idx_2:end_idx_2 ,1] = y_coords_2.reshape(N_b,N_y*N_z)
b_mic_locations[:,start_idx_2:end_idx_2 ,2] = z_coords_2.reshape(N_b,N_y*N_z)
# surface 3 (back)
x_coords_3 = np.repeat(np.repeat(np.atleast_2d(x0+l/2).T,N_y,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
Y_3 = np.repeat(np.repeat(np.atleast_2d(y0).T,N_y,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
YW_3 = np.repeat(np.repeat(np.atleast_2d(w/2).T,N_y,axis = 1)[:,:,np.newaxis],N_z,axis = 2)
non_dim_y_3 = np.repeat(np.repeat(np.linspace(-1,1,N_y)[:,np.newaxis],N_z, axis = 1)[np.newaxis,:,:],N_b, axis = 0)
y_coords_3 = non_dim_y_3*YW_3 + Y_3
Z_3 = np.repeat(np.repeat(
|
np.atleast_2d(h)
|
numpy.atleast_2d
|
# standard modules
import ast
import datetime
import os
import logging
import sys
# 3rd party modules
import numpy
import matplotlib.dates as mdt
import xlrd
# PFP modules
import constants as c
import qcio
import qcts
import qcutils
logger = logging.getLogger("pfp_log")
# GapFillParseControlFile parses the L4 control file
def GapFillParseControlFile(cf, ds, series, ds_alt):
# find the section containing the series
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
# return empty handed if the series is not in a section
if len(section) == 0:
return
if "GapFillFromAlternate" in cf[section][series].keys():
# create the alternate dictionary in ds
gfalternate_createdict(cf, ds, series, ds_alt)
if "GapFillUsingSOLO" in cf[section][series].keys():
# create the SOLO dictionary in ds
gfSOLO_createdict(cf, ds, series)
if "GapFillUsingMDS" in cf[section][series].keys():
# create the MDS dictionary in ds
gfMDS_createdict(cf, ds, series)
if "GapFillFromClimatology" in cf[section][series].keys():
# create the climatology dictionary in the data structure
gfClimatology_createdict(cf, ds, series)
if "MergeSeries" in cf[section][series].keys():
# create the merge series dictionary in the data structure
gfMergeSeries_createdict(cf, ds, series)
def gfalternate_createdict(cf, ds, series, ds_alt):
"""
Purpose:
Creates a dictionary in ds to hold information about the alternate data used to gap fill the tower data.
Usage:
Side effects:
Author: PRI
Date: August 2014
"""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("GapFillFromAlternate: Series %s not found in control file, skipping ...", series)
return
# create the alternate directory in the data structure
if "alternate" not in dir(ds):
ds.alternate = {}
# name of alternate output series in ds
output_list = cf[section][series]["GapFillFromAlternate"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this output
ds.alternate[output] = {}
ds.alternate[output]["label_tower"] = series
# source name
ds.alternate[output]["source"] = cf[section][series]["GapFillFromAlternate"][output]["source"]
# site name
ds.alternate[output]["site_name"] = ds.globalattributes["site_name"]
# alternate data file name
# first, look in the [Files] section for a generic file name
file_list = cf["Files"].keys()
lower_file_list = [item.lower() for item in file_list]
if ds.alternate[output]["source"].lower() in lower_file_list:
# found a generic file name
i = lower_file_list.index(ds.alternate[output]["source"].lower())
ds.alternate[output]["file_name"] = cf["Files"][file_list[i]]
else:
# no generic file name found, look for a file name in the variable section
ds.alternate[output]["file_name"] = cf[section][series]["GapFillFromAlternate"][output]["file_name"]
# if the file has not already been read, do it now
if ds.alternate[output]["file_name"] not in ds_alt:
ds_alternate = qcio.nc_read_series(ds.alternate[output]["file_name"],fixtimestepmethod="round")
gfalternate_matchstartendtimes(ds,ds_alternate)
ds_alt[ds.alternate[output]["file_name"]] = ds_alternate
# get the type of fit
ds.alternate[output]["fit_type"] = "OLS"
if "fit" in cf[section][series]["GapFillFromAlternate"][output]:
if cf[section][series]["GapFillFromAlternate"][output]["fit"].lower() in ["ols","ols_thru0","mrev","replace","rma","odr"]:
ds.alternate[output]["fit_type"] = cf[section][series]["GapFillFromAlternate"][output]["fit"]
else:
logger.info("gfAlternate: unrecognised fit option for series %s, used OLS", output)
# correct for lag?
if "lag" in cf[section][series]["GapFillFromAlternate"][output]:
if cf[section][series]["GapFillFromAlternate"][output]["lag"].lower() in ["no","false"]:
ds.alternate[output]["lag"] = "no"
elif cf[section][series]["GapFillFromAlternate"][output]["lag"].lower() in ["yes","true"]:
ds.alternate[output]["lag"] = "yes"
else:
logger.info("gfAlternate: unrecognised lag option for series %s", output)
else:
ds.alternate[output]["lag"] = "yes"
# choose specific alternate variable?
if "usevars" in cf[section][series]["GapFillFromAlternate"][output]:
ds.alternate[output]["usevars"] = ast.literal_eval(cf[section][series]["GapFillFromAlternate"][output]["usevars"])
# alternate data variable name if different from name used in control file
if "alternate_name" in cf[section][series]["GapFillFromAlternate"][output]:
ds.alternate[output]["alternate_name"] = cf[section][series]["GapFillFromAlternate"][output]["alternate_name"]
else:
ds.alternate[output]["alternate_name"] = series
# results of best fit for plotting later on
ds.alternate[output]["results"] = {"startdate":[],"enddate":[],"No. points":[],"No. filled":[],
"r":[],"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
"Avg (Tower)":[],"Avg (Alt)":[],
"Var (Tower)":[],"Var (Alt)":[],"Var ratio":[]}
# create an empty series in ds if the alternate output series doesn't exist yet
if output not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,output)
qcutils.CreateSeries(ds,output,data,flag,attr)
qcutils.CreateSeries(ds,series+"_composite",data,flag,attr)
def gfalternate_matchstartendtimes(ds,ds_alternate):
"""
Purpose:
Match the start and end times of the alternate and tower data.
The logic is as follows:
- if there is no overlap between the alternate and tower data then
dummy series with missing data are created for the alternate data
for the period of the tower data
- if the alternate and tower data overlap then truncate or pad (with
missing values) the alternate data series so that the periods of the
tower data and alternate data match.
Usage:
gfalternate_matchstartendtimes(ds,ds_alternate)
where ds is the data structure containing the tower data
ds_alternate is the data structure containing the alternate data
Author: PRI
Date: July 2015
"""
# check the time steps are the same
ts_tower = int(ds.globalattributes["time_step"])
ts_alternate = int(ds_alternate.globalattributes["time_step"])
if ts_tower!=ts_alternate:
msg = " GapFillFromAlternate: time step for tower and alternate data are different, returning ..."
logger.error(msg)
ds.returncodes["GapFillFromAlternate"] = "error"
return
# get the start and end times of the tower and the alternate data and see if they overlap
ldt_alternate = ds_alternate.series["DateTime"]["Data"]
start_alternate = ldt_alternate[0]
ldt_tower = ds.series["DateTime"]["Data"]
end_tower = ldt_tower[-1]
# since the datetime is monotonically increasing we need only check the start datetime
overlap = start_alternate<=end_tower
# do the alternate and tower data overlap?
if overlap:
# index of alternate datetimes that are also in tower datetimes
#alternate_index = qcutils.FindIndicesOfBInA(ldt_tower,ldt_alternate)
#alternate_index = [qcutils.find_nearest_value(ldt_tower, dt) for dt in ldt_alternate]
# index of tower datetimes that are also in alternate datetimes
#tower_index = qcutils.FindIndicesOfBInA(ldt_alternate,ldt_tower)
#tower_index = [qcutils.find_nearest_value(ldt_alternate, dt) for dt in ldt_tower]
tower_index, alternate_index = qcutils.FindMatchingIndices(ldt_tower, ldt_alternate)
# check that the indices point to the same times
ldta = [ldt_alternate[i] for i in alternate_index]
ldtt = [ldt_tower[i] for i in tower_index]
if ldta!=ldtt:
# and exit with a helpful message if they dont
logger.error(" Something went badly wrong and I'm giving up")
sys.exit()
# get a list of alternate series
alternate_series_list = [item for item in ds_alternate.series.keys() if "_QCFlag" not in item]
# number of records in truncated or padded alternate data
nRecs_tower = len(ldt_tower)
# force the alternate dattime to be the tower date time
ds_alternate.series["DateTime"] = ds.series["DateTime"]
# loop over the alternate series and truncate or pad as required
# truncation or padding is handled by the indices
for series in alternate_series_list:
if series in ["DateTime","DateTime_UTC"]: continue
# get the alternate data
data,flag,attr = qcutils.GetSeriesasMA(ds_alternate,series)
# create an array of missing data of the required length
data_overlap = numpy.full(nRecs_tower,c.missing_value,dtype=numpy.float64)
flag_overlap = numpy.ones(nRecs_tower,dtype=numpy.int32)
# replace missing data with alternate data where times match
data_overlap[tower_index] = data[alternate_index]
flag_overlap[tower_index] = flag[alternate_index]
# write the truncated or padded series back into the alternate data structure
qcutils.CreateSeries(ds_alternate,series,data_overlap,flag_overlap,attr)
# update the number of records in the file
ds_alternate.globalattributes["nc_nrecs"] = nRecs_tower
else:
# there is no overlap between the alternate and tower data, create dummy series
nRecs = len(ldt_tower)
ds_alternate.globalattributes["nc_nrecs"] = nRecs
ds_alternate.series["DateTime"] = ds.series["DateTime"]
alternate_series_list = [item for item in ds_alternate.series.keys() if "_QCFlag" not in item]
for series in alternate_series_list:
if series in ["DateTime","DateTime_UTC"]:
continue
_, _, attr = qcutils.GetSeriesasMA(ds_alternate, series)
data = numpy.full(nRecs, c.missing_value, dtype=numpy.float64)
flag = numpy.ones(nRecs, dtype=numpy.int32)
qcutils.CreateSeries(ds_alternate, series, data, flag, attr)
ds.returncodes["GapFillFromAlternate"] = "normal"
def gfClimatology_createdict(cf, ds, series):
""" Creates a dictionary in ds to hold information about the climatological data used
to gap fill the tower data."""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf, series=series,mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section) == 0:
logger.error("GapFillFromClimatology: Series %s not found in control file, skipping ...", series)
return
# create the climatology directory in the data structure
if "climatology" not in dir(ds):
ds.climatology = {}
# name of alternate output series in ds
output_list = cf[section][series]["GapFillFromClimatology"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this output
ds.climatology[output] = {}
ds.climatology[output]["label_tower"] = series
# site name
ds.climatology[output]["site_name"] = ds.globalattributes["site_name"]
# Climatology file name
file_list = cf["Files"].keys()
lower_file_list = [item.lower() for item in file_list]
# first, look in the [Files] section for a generic file name
if "climatology" in lower_file_list:
# found a generic file name
i = lower_file_list.index("climatology")
ds.climatology[output]["file_name"] = cf["Files"][file_list[i]]
else:
# no generic file name found, look for a file name in the variable section
ds.climatology[output]["file_name"] = cf[section][series]["GapFillFromClimatology"][output]["file_name"]
# climatology variable name if different from name used in control file
if "climatology_name" in cf[section][series]["GapFillFromClimatology"][output]:
ds.climatology[output]["climatology_name"] = cf[section][series]["GapFillFromClimatology"][output]["climatology_name"]
else:
ds.climatology[output]["climatology_name"] = series
# climatology gap filling method
if "method" not in cf[section][series]["GapFillFromClimatology"][output].keys():
# default if "method" missing is "interpolated_daily"
ds.climatology[output]["method"] = "interpolated_daily"
else:
ds.climatology[output]["method"] = cf[section][series]["GapFillFromClimatology"][output]["method"]
# create an empty series in ds if the climatology output series doesn't exist yet
if output not in ds.series.keys():
data, flag, attr = qcutils.MakeEmptySeries(ds, output)
qcutils.CreateSeries(ds, output, data, flag, attr)
def gfMDS_createdict(cf, ds, series):
"""
Purpose:
Create an information dictionary for MDS gap filling from the contents
of the control file.
Usage:
info["MDS"] = gfMDS_createdict(cf)
Author: PRI
Date: May 2018
"""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("GapFillUsingMDS: Series %s not found in control file, skipping ...", series)
return
# create the MDS attribute (a dictionary) in ds, this will hold all MDS settings
if "mds" not in dir(ds):
ds.mds = {}
# name of MDS output series in ds
output_list = cf[section][series]["GapFillUsingMDS"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this series
ds.mds[output] = {}
# get the target
if "target" in cf[section][series]["GapFillUsingMDS"][output]:
ds.mds[output]["target"] = cf[section][series]["GapFillUsingMDS"][output]["target"]
else:
ds.mds[output]["target"] = series
# site name
ds.mds[output]["site_name"] = ds.globalattributes["site_name"]
# list of SOLO settings
if "mds_settings" in cf[section][series]["GapFillUsingMDS"][output]:
mdss_list = ast.literal_eval(cf[section][series]["GapFillUsingMDS"][output]["mds_settings"])
# list of drivers
ds.mds[output]["drivers"] = ast.literal_eval(cf[section][series]["GapFillUsingMDS"][output]["drivers"])
# list of tolerances
ds.mds[output]["tolerances"] = ast.literal_eval(cf[section][series]["GapFillUsingMDS"][output]["tolerances"])
# get the ustar filter option
opt = qcutils.get_keyvaluefromcf(cf, [section, series, "GapFillUsingMDS", output], "turbulence_filter", default="")
ds.mds[output]["turbulence_filter"] = opt
# get the day/night filter option
opt = qcutils.get_keyvaluefromcf(cf, [section, series, "GapFillUsingMDS", output], "daynight_filter", default="")
ds.mds[output]["daynight_filter"] = opt
# check that all requested targets and drivers have a mapping to
# a FluxNet label, remove if they don't
fluxnet_label_map = {"Fc":"NEE", "Fe":"LE", "Fh":"H",
"Fsd":"SW_IN", "Ta":"TA", "VPD":"VPD"}
for mds_label in ds.mds:
ds.mds[mds_label]["mds_label"] = mds_label
pfp_target = ds.mds[mds_label]["target"]
if pfp_target not in fluxnet_label_map:
msg = " Target ("+pfp_target+") not supported for MDS gap filling"
logger.warning(msg)
del ds.mds[mds_label]
else:
ds.mds[mds_label]["target_mds"] = fluxnet_label_map[pfp_target]
pfp_drivers = ds.mds[mds_label]["drivers"]
for pfp_driver in pfp_drivers:
if pfp_driver not in fluxnet_label_map:
msg = "Driver ("+pfp_driver+") not supported for MDS gap filling"
logger.warning(msg)
ds.mds[mds_label]["drivers"].remove(pfp_driver)
else:
if "drivers_mds" not in ds.mds[mds_label]:
ds.mds[mds_label]["drivers_mds"] = []
ds.mds[mds_label]["drivers_mds"].append(fluxnet_label_map[pfp_driver])
if len(ds.mds[mds_label]["drivers"]) == 0:
del ds.mds[mds_label]
return
def gfMergeSeries_createdict(cf,ds,series):
""" Creates a dictionary in ds to hold information about the merging of gap filled
and tower data."""
merge_prereq_list = ["Fsd","Fsu","Fld","Flu","Ts","Sws"]
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf,series=series,mode="quiet")
# create the merge directory in the data structure
if "merge" not in dir(ds): ds.merge = {}
# check to see if this series is in the "merge first" list
# series in the "merge first" list get merged first so they can be used with existing tower
# data to re-calculate Fg, Fn and Fa
merge_order = "standard"
if series in merge_prereq_list: merge_order = "prerequisite"
if merge_order not in ds.merge.keys(): ds.merge[merge_order] = {}
# create the dictionary keys for this series
ds.merge[merge_order][series] = {}
# output series name
ds.merge[merge_order][series]["output"] = series
# site name
ds.merge[merge_order][series]["source"] = ast.literal_eval(cf[section][series]["MergeSeries"]["Source"])
# create an empty series in ds if the output series doesn't exist yet
if ds.merge[merge_order][series]["output"] not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,ds.merge[merge_order][series]["output"])
qcutils.CreateSeries(ds,ds.merge[merge_order][series]["output"],data,flag,attr)
def gfSOLO_createdict(cf,ds,series):
""" Creates a dictionary in ds to hold information about the SOLO data used
to gap fill the tower data."""
# get the section of the control file containing the series
section = qcutils.get_cfsection(cf,series=series,mode="quiet")
# return without doing anything if the series isn't in a control file section
if len(section)==0:
logger.error("GapFillUsingSOLO: Series %s not found in control file, skipping ...", series)
return
# create the solo directory in the data structure
if "solo" not in dir(ds): ds.solo = {}
# name of SOLO output series in ds
output_list = cf[section][series]["GapFillUsingSOLO"].keys()
# loop over the outputs listed in the control file
for output in output_list:
# create the dictionary keys for this series
ds.solo[output] = {}
# get the target
if "target" in cf[section][series]["GapFillUsingSOLO"][output]:
ds.solo[output]["label_tower"] = cf[section][series]["GapFillUsingSOLO"][output]["target"]
else:
ds.solo[output]["label_tower"] = series
# site name
ds.solo[output]["site_name"] = ds.globalattributes["site_name"]
# list of SOLO settings
if "solo_settings" in cf[section][series]["GapFillUsingSOLO"][output]:
ss_list = ast.literal_eval(cf[section][series]["GapFillUsingSOLO"][output]["solo_settings"])
ds.solo[output]["solo_settings"] = {}
ds.solo[output]["solo_settings"]["nodes_target"] = int(ss_list[0])
ds.solo[output]["solo_settings"]["training"] = int(ss_list[1])
ds.solo[output]["solo_settings"]["factor"] = int(ss_list[2])
ds.solo[output]["solo_settings"]["learningrate"] = float(ss_list[3])
ds.solo[output]["solo_settings"]["iterations"] = int(ss_list[4])
# list of drivers
ds.solo[output]["drivers"] = ast.literal_eval(cf[section][series]["GapFillUsingSOLO"][output]["drivers"])
# apply ustar filter
opt = qcutils.get_keyvaluefromcf(cf,[section,series,"GapFillUsingSOLO",output],
"turbulence_filter",default="")
ds.solo[output]["turbulence_filter"] = opt
opt = qcutils.get_keyvaluefromcf(cf,[section,series,"GapFillUsingSOLO",output],
"daynight_filter",default="")
ds.solo[output]["daynight_filter"] = opt
# results of best fit for plotting later on
ds.solo[output]["results"] = {"startdate":[],"enddate":[],"No. points":[],"r":[],
"Bias":[],"RMSE":[],"Frac Bias":[],"NMSE":[],
"Avg (obs)":[],"Avg (SOLO)":[],
"Var (obs)":[],"Var (SOLO)":[],"Var ratio":[],
"m_ols":[],"b_ols":[]}
# create an empty series in ds if the SOLO output series doesn't exist yet
if output not in ds.series.keys():
data,flag,attr = qcutils.MakeEmptySeries(ds,output)
qcutils.CreateSeries(ds,output,data,flag,attr)
# functions for GapFillUsingMDS: not implemented yet
def GapFillFluxUsingMDS(cf, ds, series=""):
section = qcutils.get_cfsection(cf, series=series, mode="quiet")
if len(section)==0:
return
if "GapFillFluxUsingMDS" in cf[section][series].keys():
logger.info(" GapFillFluxUsingMDS: not implemented yet")
return
# functions for GapFillFromClimatology
def GapFillFromClimatology(ds):
'''
Gap fill missing data using data from the climatology spreadsheet produced by
the climatology.py script.
'''
if "climatology" not in dir(ds): return
# tell the user what we are going to do
msg = " Reading climatology file and creating climatology series"
logger.info(msg)
# loop over the series to be gap filled using climatology
cli_xlbooks = {}
for output in ds.climatology.keys():
# check to see if there are any gaps in "series"
#index = numpy.where(abs(ds.series[label]['Data']-float(c.missing_value))<c.eps)[0]
#if len(index)==0: continue # no gaps found in "series"
cli_filename = ds.climatology[output]["file_name"]
if not os.path.exists(cli_filename):
logger.error(" GapFillFromClimatology: Climatology file %s doesn't exist", cli_filename)
continue
if cli_filename not in cli_xlbooks: cli_xlbooks[cli_filename] = xlrd.open_workbook(cli_filename)
# local pointers to the series name and climatology method
label = ds.climatology[output]["label_tower"]
method = ds.climatology[output]["method"]
# do the gap filling
# choose the gap filling method
if method=="monthly":
gfClimatology_monthly(ds,label,output,cli_xlbooks)
elif method=="interpolated daily":
gfClimatology_interpolateddaily(ds,label,output,cli_xlbooks)
else:
logger.error(" GapFillFromClimatology: unrecognised method option for %s", label)
continue
if 'GapFillFromClimatology' not in ds.globalattributes['Functions']:
ds.globalattributes['Functions'] = ds.globalattributes['Functions']+', GapFillFromClimatology'
# remove the "climatology" attribute from ds
#del ds.climatology
def gfClimatology_interpolateddaily(ds,series,output,xlbooks):
"""
Gap fill using data interpolated over a 2D array where the days are
the rows and the time of day is the columns.
"""
# gap fill from interpolated 30 minute data
xlfilename = ds.climatology[output]["file_name"]
sheet_name = series+'i(day)'
if sheet_name not in xlbooks[xlfilename].sheet_names():
msg = " gfClimatology: sheet "+sheet_name+" not found, skipping ..."
logger.warning(msg)
return
ldt = ds.series["DateTime"]["Data"]
thissheet = xlbooks[xlfilename].sheet_by_name(sheet_name)
datemode = xlbooks[xlfilename].datemode
basedate = datetime.datetime(1899, 12, 30)
nts = thissheet.ncols - 1
ndays = thissheet.nrows - 2
# read the time stamp values from the climatology worksheet
tsteps = thissheet.row_values(1,start_colx=1,end_colx=nts+1)
# read the data from the climatology workbook
val1d = numpy.ma.zeros(ndays*nts,dtype=numpy.float64)
# initialise an array for the datetime of the climatological values
cdt = [None]*nts*ndays
# loop over the rows (days) of data
for xlRow in range(ndays):
# get the Excel datetime value
xldatenumber = int(thissheet.cell_value(xlRow+2,0))
# convert this to a Python Datetime
xldatetime = basedate+datetime.timedelta(days=xldatenumber+1462*datemode)
# fill the climatology datetime array
cdt[xlRow*nts:(xlRow+1)*nts] = [xldatetime+datetime.timedelta(hours=hh) for hh in tsteps]
# fill the climatological value array
val1d[xlRow*nts:(xlRow+1)*nts] = thissheet.row_values(xlRow+2,start_colx=1,end_colx=nts+1)
# get the data to be filled with climatological values
data,flag,attr = qcutils.GetSeriesasMA(ds,series)
# get an index of missing values
idx = numpy.where(numpy.ma.getmaskarray(data)==True)[0]
#idx = numpy.ma.where(numpy.ma.getmaskarray(data)==True)[0]
# there must be a better way to do this ...
# simply using the index (idx) to set a slice of the data array to the gap filled values in val1d
# does not seem to work (mask stays true on replaced values in data), the work around is to
# step through the indices, find the time of the missing value in data, find the same time in the
# gap filled values val1d and set the missing element of data to this element of val1d
# actually ...
# this may not be the fastest but it may be the most robust because it matches dates of missing data
# to dates in the climatology file
for ii in idx:
try:
jj = qcutils.find_nearest_value(cdt, ldt[ii])
data[ii] = val1d[jj]
flag[ii] = numpy.int32(40)
except ValueError:
data[ii] = numpy.float64(c.missing_value)
flag[ii] = numpy.int32(41)
# put the gap filled data back into the data structure
qcutils.CreateSeries(ds,output,data,flag,attr)
def gfClimatology_monthly(ds,series,output,xlbook):
""" Gap fill using monthly climatology."""
thissheet = xlbook.sheet_by_name(series)
val1d = numpy.zeros_like(ds.series[series]['Data'])
values = numpy.zeros([48,12])
for month in range(1,13):
xlCol = (month-1)*5 + 2
values[:,month-1] = thissheet.col_values(xlCol)[2:50]
for i in range(len(ds.series[series]['Data'])):
h = numpy.int(2*ds.series['Hdh']['Data'][i])
m = numpy.int(ds.series['Month']['Data'][i])
val1d[i] = values[h,m-1]
index = numpy.where(abs(ds.series[output]['Data']-c.missing_value)<c.eps)[0]
ds.series[output]['Data'][index] = val1d[index]
ds.series[output]['Flag'][index] = numpy.int32(40)
# functions for GapFillUsingInterpolation
def GapFillUsingInterpolation(cf,ds):
"""
Purpose:
Gap fill variables in the data structure using interpolation.
All variables in the [Variables], [Drivers] and [Fluxes] section
are processed.
Usage:
qcgf.GapFillUsingInterpolation(cf,ds)
where cf is a control file object
ds is a data structure
Author: PRI
Date: September 2016
"""
label_list = qcutils.get_label_list_from_cf(cf)
maxlen = int(qcutils.get_keyvaluefromcf(cf,["Options"],"MaxGapInterpolate",default=2))
if maxlen==0:
msg = " Gap fill by interpolation disabled in control file"
logger.info(msg)
return
for label in label_list:
section = qcutils.get_cfsection(cf, series=label)
if "MaxGapInterpolate" in cf[section][label]:
maxlen = int(qcutils.get_keyvaluefromcf(cf,[section,label],"MaxGapInterpolate",default=2))
if maxlen==0:
msg = " Gap fill by interpolation disabled for "+label
logger.info(msg)
continue
qcts.InterpolateOverMissing(ds,series=label,maxlen=2)
# miscellaneous L4 routines
def gf_getdiurnalstats(DecHour,Data,ts):
nInts = 24*int((60/ts)+0.5)
Num =
|
numpy.ma.zeros(nInts,dtype=int)
|
numpy.ma.zeros
|
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.morphology import binary_erosion, distance_transform_edt
from scipy.ndimage import find_objects
import random
from random import uniform, random, randint, getrandbits
from scipy import interpolate
import copy
from scipy.ndimage.filters import generic_filter
try:
import edt
except Exception:
pass
try:
import dataset_iterator.helpers as dih
except:
dih=None
from .helpers import ensure_multiplicity
def batch_wise_fun(fun):
#return lambda batch : np.stack([fun(batch[i]) for i in range(batch.shape[0])], 0)
def func(batch):
for b in range(batch.shape[0]):
batch[b] = fun(batch[b])
return batch
return func
def apply_and_stack_channel(*funcs):
return lambda batch : np.concatenate([fun(batch) for fun in funcs], -1)
def identity(batch):
return batch
def level_set(label_img, max_distance=None, dtype=np.float32):
if not np.any(label_img): # empty image
baseline = np.ones_like(label_img, dtype=dtype)
if max_distance is not None:
return baseline * max_distance # base line = max possible distance value
else:
return baseline * max(label_img.shape)
inside = distance_transform_edt(label_img).astype(dtype, copy=False) # edm inside
outside = distance_transform_edt(np.where(label_img, False, True)).astype(dtype, copy=False)
if max_distance is not None:
inside[inside<-max_distance] = -max_distance
outside[outside>max_distance] = max_distance
return outside - inside
def unet_weight_map(batch, wo=10, sigma=5, max_background_ratio=0, set_contours_to_zero=False, dtype=np.float32):
"""Implementation of Unet weight map as in <NAME>., <NAME>., & <NAME>. (2015, October).
U-net: Convolutional networks for biomedical image segmentation.
Parameters
----------
batch : type
ND array of shape (batch, Y, X, nchan) of labeld images
if nchan>1 function is applied separately on each channel
wo : float
cf Unet paper
sigma : float
cf Unet paper
max_background_ratio : bool
limits the ratio (background volume / foreground volume).
useful when foreground is rare, in which case the weight of forground will be: max_background_ratio / (1 + max_background_ratio)
if 0, not limit
set_contours_to_zero : bool
if true, weight of object contours is set to zero
dtype : numpy.dtype
weight map data type
Returns
-------
type
numpy nd array of same shape as batch
"""
if batch.shape[-1]>1:
wms = [unet_weight_map(batch[...,i:i+1], wo, sigma, max_background_ratio, True, dtype) for i in range(batch.shape[-1])]
return np.concatenate(wms, axis=-1)
else:
s2 = sigma * sigma * 2
wm = weight_map_mask_class_balance(batch, max_background_ratio, True, dtype)
if wo>0 or set_contours_to_zero:
for i in range(batch.shape[0]):
im = batch[i]
labels = np.unique(im)
labels = labels[labels!=0]
if labels.shape[0]>1 and wo>0:
edms=[distance_transform_edt(np.invert(im==l)) for l in labels]
edm = np.concatenate(edms, axis=-1)
edm = np.partition(edm, 1)[...,:2] # get the 2 min values
edm = np.sum(edm, axis=-1, keepdims=True)
bckg_wm = 1 + wo * np.exp(- edm * edm / s2)
bckg_subset = im==0
wm[i][bckg_subset] = bckg_wm[bckg_subset]
if labels.shape[0]>0 and set_contours_to_zero:
contours = get_contour_mask(im[...,0], fun=_get_contours_binary_2d)
wm[i,...,0][contours] = 0
return wm
def weight_map_mask_class_balance(batch, max_background_ratio=0, set_background_to_one=False, dtype=np.float32):
wm = np.ones(shape = batch.shape, dtype=dtype)
if max_background_ratio<0:
return wm
n_nonzeros = np.count_nonzero(batch)
if n_nonzeros!=0:
n_tot = np.prod(batch.shape)
p_back = (n_tot - n_nonzeros) / n_tot
background_ratio = (n_tot - n_nonzeros) / n_nonzeros
if max_background_ratio>0 and background_ratio>max_background_ratio:
p_back = max_background_ratio / (1 + max_background_ratio)
if set_background_to_one:
wm[batch!=0] = p_back / (1 - p_back)
else:
wm[batch!=0] = p_back
wm[batch==0] = 1-p_back
return wm
def multilabel_edt(label_img, closed_end=True):
'''
multilabel edt requires edt package.
along y-axis (1st axis) : out-of-bound is considered as foreground of upper and lower ends if closed_end=False else only for lower end
'''
y_up = 1 if closed_end else 0
if len(label_img.shape)==3:
squeeze = True
label_img = np.squeeze(label_img, -1)
else:
squeeze=False
label_img = edt.edt(np.pad(label_img, pad_width=((y_up, 0),(1, 1)), mode='constant', constant_values=0), black_border=False)[y_up:,1:-1]
if squeeze:
label_img = np.expand_dims(label_img, -1)
return label_img
def binarize(img, dtype=np.float32):
return np.where(img, dtype(1), dtype(0))
def binary_erode_labelwise(label_img):
'''
in-place erosion of square 8-connectivity, label by label, with border value = 1
'''
# todo: set structure as argument, but adapt region dilatation to this parameter
regDilSize = 1
regions = find_objects(label_img)
shape = label_img.shape
for val, region in enumerate(regions, start=1):
if region is not None:
# extend region in order to avoid border effect when set border_value = 1
region = list(region)
for i, s in enumerate(region):
region[i] = slice(max(0, s.start-regDilSize), min(s.stop+regDilSize, shape[i]), None)
region = tuple(region)
subregion = label_img[region]
eroded = binary_erosion(subregion == val, border_value = 1)
subregion[(subregion == val) *np.logical_not(eroded)] = 0 # erase eroded region only within object
def _get_contours_2d(element):
v = element[4]
if v==0:
return False
else:
for vv in element:
if vv!=v:
return True
return False
def _get_contours_binary_2d(element):
if element[4]==0:
return False
else:
for vv in element:
if vv==0:
return True
return False
def _get_touching_contours(element):
v = element[4]
if v==0:
return False
else:
for vv in element:
if vv!=v and vv!=0:
return True
return False
def get_contour_mask(labeled_image, output=None, fun=_get_contours_2d):
shape = labeled_image.shape
if len(shape)==3:
assert shape[2] == 1, "only valid for 2D images"
output = np.zeros(shape=shape, dtype=np.bool_)
get_contour_mask(labeled_image[...,0], output[...,0], fun)
return output
elif len(shape)>3:
raise ValueError("only valid for 2D images")
if output is None:
output =
|
np.zeros(shape=labeled_image.shape, dtype=np.bool_)
|
numpy.zeros
|
import gym
import numpy as np
import time
import highway_env
import torch
from torch.utils.data import Dataset
# from scripts.utils import record_videos, show_videos
envs = {
0: "myenv-c1-v0", # 直线
1: "myenv-c2-v0", # 中度弯曲
2: "myenv-c3-v0", # 大量弯曲
}
N = 50
# 1保持 0减速 2加速 lon
# -1左 0保持 1右 latral
"""
| 0 1 2
-1 |
0 |
1 |
"""
labels_index = np.arange(9).reshape(3, 3)
def anchor_selector():
"""
选不同的卯
:return:
"""
# 选择不同的道路
env_lucky = envs[np.random.choice(np.arange(3))]
# print("env is {}".format(env_lucky))
env = gym.make(env_lucky)
# env = record_videos(env)
# 选择不同的初始状态
lanes_count = env.config["lanes_count"]
lane_id = np.random.choice(np.arange(lanes_count))
# print("v lane id is {}".format(lane_id))
if lane_id == 0:
target_lane_id = np.random.choice([0, 1])
elif lane_id == lanes_count - 1:
target_lane_id = np.random.choice([lanes_count - 1, lanes_count - 2])
else:
target_lane_id = np.random.choice([lane_id - 1, lane_id, lane_id + 1])
# print("target lane id is {}".format(target_lane_id))
lon_operation = np.random.choice([0, 1, 2]) # 1保持 0减速 2加速
# print("1保持 0减速 2加速 - is {}".format(lon_operation))
v_lane_id = ("a", "b", lane_id)
target_lane_id2 = ("a", "b", target_lane_id)
v_target_s = (lon_operation - 1) * 5 + env.vehicle.speed
v_target_s = np.clip(0, 30, v_target_s)
positon_x = np.random.choice(np.arange(0, env.road.network.get_lane(v_lane_id).length, 5))
positon_y = np.random.choice(np.arange(-2, 2.1, 0.5))
heading = np.random.choice(
env.road.network.get_lane(v_lane_id).heading_at(positon_x) + np.arange(-np.pi / 12, np.pi / 12, 10))
speed = np.random.choice(np.arange(0, 25, 2))
position = env.road.network.get_lane(v_lane_id).position(positon_x, positon_y)
inital_state = [position, heading, speed]
env.config["v_lane_id"] = v_lane_id
env.config["v_target_id"] = target_lane_id2
env.config["v_x"] = positon_x
env.config["v_y"] = positon_y
env.config["v_h"] = heading
env.config["v_s"] = speed
env.config["v_target_s"] = v_target_s
env.reset()
p = env.vehicle.position
i_h = env.vehicle.heading
i_s = env.vehicle.speed
temp = [p[0], p[1], i_h, i_s]
x_road, y_road = env.vehicle.target_lane_position(p)
# temp = temp.extend(x_road)
# temp = temp.extend(y_road)
action_his_omega = []
action_his_accel = []
action = 1
x_his, y_his, h_his, s_his = [], [], [], []
for _ in range(N):
if env.vehicle.on_road is False:
print("出去了")
break
env.step(action)
action_his_omega.append(env.vehicle.action["steering"])
action_his_accel.append(env.vehicle.action["acceleration"])
x_his.append(env.vehicle.position[0])
y_his.append(env.vehicle.position[1])
h_his.append(env.vehicle.heading)
s_his.append(env.vehicle.speed)
# env.render()
# time.sleep(0.1)
env.close()
# temp = temp.extend(action_his_omega)
# temp = temp.extend(action_his_accel)
tt = temp + x_road + y_road + action_his_omega + action_his_accel
# pp = np.vstack((
# np.array(x_his),
# np.array(y_his),
# np.array(h_his),
# np.array(s_his),
# ))
pp = x_his + y_his + h_his + s_his
lane_change = target_lane_id - lane_id
label = labels_index[lane_change + 1, lon_operation]
# s0 = np.array(temp)
s0 = temp
# ss = np.vstack((
# np.array(x_road),
# np.array(y_road),
# ))
ss = x_road + y_road
# aa = np.vstack((
# np.array(action_his_omega),
# np.array(action_his_accel)
# ))
aa = action_his_omega + action_his_accel
return s0, ss, aa, tt, pp, label
def generator(num_data):
datas = []
labels = []
s0_b = []
ss_b = []
aa_b = []
pp_b = []
for i in range(num_data):
print("第 {} 个样本".format(i + 1))
s0, ss, aa, tt, pp, label = anchor_selector()
datas.append(tt)
labels.append(label)
s0_b.append(s0)
ss_b.append(ss)
aa_b.append(aa)
pp_b.append(pp)
d = np.array(datas)
l = np.array(labels)
s0_b = np.array(s0_b)
ss_b = np.array(ss_b)
aa_b = np.array(aa_b)
pp_b = np.array(pp_b)
np.save("data.npy", d)
np.save("label.npy", l)
|
np.save("s0_b.npy", s0_b)
|
numpy.save
|
import collections
import numpy as np
from matplotlib import pyplot as plt
class GCSPlaneLocation(object):
def __init__(self, longitude, latitude):
self.longitude = np.array(longitude)
self.latitude = np.array(latitude)
@property
def shape(self):
if list(np.shape(self.longitude)) == list(np.shape(self.latitude)):
return np.shape(self.longitude)
raise ValueError('Unmatched shape.')
class GCSLocation(GCSPlaneLocation):
def __init__(self, longitude, latitude, altitude):
super(GCSLocation, self).__init__(longitude, latitude)
self.altitude = np.array(altitude)
class Airspace(collections.namedtuple('Airspace', ('centre', 'radius'))):
def generate_airline(self, distance, angle, height, reverse=False):
phy = np.arccos(distance / self.radius)
location_1 = GCSLocation(
self.centre.longitude + distance * np.cos(angle - phy),
self.centre.latitude + distance * np.sin(angle - phy),
height)
location_2 = GCSLocation(
self.centre.longitude + distance * np.cos(angle + phy),
self.centre.latitude + distance * np.sin(angle + phy),
height)
if reverse:
return location_2, location_1
return location_1, location_2
def random_airlines(self, num):
airlines = []
while len(airlines) <= num:
r0 = np.random.normal(0.5, 0.3) #
if 0 < r0 < 1:
r0 = r0 * self.radius
theta0 = np.random.uniform(0., 2 * np.pi)
phy0 = (np.random.normal(theta0 + 0.5 * np.pi, 0.1))
a = 1
b = 2 * r0 * np.cos(theta0 - phy0)
c = r0 ** 2 - self.radius ** 2
delta = b ** 2 - 4. * a * c
t1 = (- b + np.sqrt(delta)) / (2. * a)
t2 = (- b - np.sqrt(delta)) / (2. * a)
boundaries = GCSPlaneLocation(
[self.centre.longitude + r0 * np.cos(theta0) + t1 * np.cos(phy0),
self.centre.longitude + r0 * np.cos(theta0) + t2 * np.cos(phy0)],
[self.centre.latitude + r0 * np.sin(theta0) + t1 * np.sin(phy0),
self.centre.latitude + r0 *
|
np.sin(theta0)
|
numpy.sin
|
import logging
import subprocess
import sys
import numpy as np
import os
import scipy.integrate
from scipy.special import erf
from scipy.interpolate import UnivariateSpline
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import maximum_bipartite_matching
from copy import deepcopy
import matplotlib as mpl
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import NullFormatter
import matplotlib.pyplot as plt
from plotbin import sauron_colormap as pb_sauron_colormap
from plotbin import display_pixels
# from loess.loess_2d import loess_2d
from dynamite import kinematics
from dynamite import weight_solvers
from dynamite import physical_system as physys
class ReorderLOSVDError(Exception):
pass
class Plotter():
"""Class to hold plotting routines
Class containing methods for plotting results. Each plotting method saves a
plot in the `outplot/plots` directory, and returns a `matplotlib` `figure`
object.
Parameters
----------
config : a ``dyn.config_reader.Configuration`` object
"""
def __init__(self, config=None):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
if config is None:
text = f'{__class__.__name__} needs configuration object, ' \
'None provided.'
self.logger.error(text)
raise ValueError(text)
self.config = config
self.system = config.system
self.settings = config.settings
self.all_models = config.all_models
self.input_directory = config.settings.io_settings['input_directory']
self.plotdir = config.settings.io_settings['plot_directory']
self.modeldir = config.settings.io_settings['model_directory']
pb_sauron_colormap.register_sauron_colormap()
def make_chi2_vs_model_id_plot(self, which_chi2=None, figtype=None):
"""
Generates a (kin)chi2 vs. model id plot
Parameters
----------
which_chi2 : STR, optional
Determines whether chi2 or kinchi2 is used. If None, the setting
in the configuration file's parameter settings is used.
Must be None, 'chi2', or 'kinchi2'. The default is None.
figtype : STR, optional
Determines the file extension to use when saving the figure.
If None, the default setting is used ('.png').
Raises
------
ValueError
If which_chi2 is not one of None, 'chi2', or 'kinchi2'.
Returns
-------
fig : matplotlib.pyplot.figure
Figure instance.
"""
if figtype is None:
figtype = '.png'
if which_chi2 is None:
which_chi2 = self.settings.parameter_space_settings['which_chi2']
if which_chi2 not in ('chi2', 'kinchi2'):
text = 'which_chi2 needs to be chi2 or kinchi2, ' \
f'but it is {which_chi2}'
self.logger.error(text)
raise ValueError(text)
n_models = len(self.all_models.table)
fig = plt.figure()
plt.plot([i for i in range(n_models)],
self.all_models.table[which_chi2],
'rx')
plt.gca().set_title(f'{which_chi2} vs. model id')
plt.xlabel('model id')
plt.ylabel(which_chi2)
fig.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
self.logger.info(f'{which_chi2} vs. model id plot created '
f'({n_models} models).')
figname = self.plotdir + which_chi2 + '_progress_plot' + figtype
fig.savefig(figname)
self.logger.info(f'Plot {figname} saved in {self.plotdir}')
return fig
def make_chi2_plot(self, which_chi2=None, n_excl=0, figtype=None):
"""
Generates a chisquare plot
The models generated are shown on a grid of parameter space.
The best-fit model is marked with a black cross.
The coloured circles represent models within 3 sigma
confidence level (light colours and larger circles
indicate smaller values of the chisquare). The small
black dots indicate the models outside this confidence region.
Parameters
----------
which_chi2 : STR, optional
Determines whether chi2 or kinchi2 is used. If None, the setting
in the configuration file's parameter settings is used.
Must be None, 'chi2', or 'kinchi2'. The default is None.
nexcl : integer, optional
Determines how many models (in the initial burn-in phase of
the fit) to exclude from the plot. Must be an integer number.
Default is 0 (all models are shown). Use this with caution!
figtype : STR, optional
Determines the file extension to use when saving the figure.
If None, the default setting is used ('.png').
Raises
------
ValueError
If which_chi2 is not one of None, 'chi2', or 'kinchi2'.
Returns
-------
fig : matplotlib.pyplot.figure
Figure instance.
"""
if figtype is None:
figtype = '.png'
if which_chi2 is None:
which_chi2 = self.settings.parameter_space_settings['which_chi2']
if which_chi2 not in ('chi2', 'kinchi2'):
text = 'which_chi2 needs to be chi2 or kinchi2, ' \
f'but it is {which_chi2}'
self.logger.error(text)
raise ValueError(text)
self.logger.info(f'Making chi2 plot scaled according to {which_chi2}')
pars = self.config.parspace
val = deepcopy(self.all_models.table)
# exclude the first 50, 100 (specified by the user)
# models in case the values were really off there
# (or alternatively based on too big Delta chi2)
val = val[n_excl:]
#only use models that are finished
val=val[val['all_done']==True]
# add black hole scaling
scale_factor = np.zeros(len(val))
for i in range(len(val)):
chi2val = val[which_chi2][i]
model_id=np.where(self.all_models.table[which_chi2]==chi2val)[0][0]
scale_factor[i] = \
self.all_models.get_model_velocity_scaling_factor( \
model_id=model_id)
dh = self.system.get_all_dark_non_plummer_components()
dh = dh[0] # take the first as there should only be one of these
if type(dh) is physys.NFW:
val[f'c-{dh.name}'] = val[f'c-{dh.name}']
val[f'f-{dh.name}'] = val[f'f-{dh.name}']
elif type(dh) is physys.NFW_m200_c:
pass
elif type(dh) is physys.Hernquist:
val[f'rhoc-{dh.name}']= val[f'rhoc-{dh.name}']*scale_factor**2
elif type(dh) is physys.TriaxialCoredLogPotential:
val[f'Vc-{dh.name}'] = val[f'Vc-{dh.name}']*scale_factor
elif type(dh) is physys.GeneralisedNFW:
val[f'Mvir-{dh.name}'] = val[f'Mvir-{dh.name}']*scale_factor**2
else:
text = f'unknown dark halo type component'
self.logger.error(text)
raise ValueError(text)
# get the plummer component i.e. black hole
bh = self.system.get_component_from_class(physys.Plummer)
val[f'm-{bh.name}'] = np.log10(val[f'm-{bh.name}']*scale_factor**2)
#get number and names of parameters that are not fixed
nofix_sel=[]
nofix_name=[]
nofix_latex=[]
nofix_islog=[]
for i in np.arange(len(pars)):
if pars[i].fixed==False:
pars[i].name
nofix_sel.append(i)
if pars[i].name == 'ml':
nofix_name.insert(0, 'ml')
nofix_latex.insert(0, pars[i].LaTeX)
nofix_islog.insert(0, pars[i].logarithmic)
else:
nofix_name.append(pars[i].name)
nofix_latex.append(pars[i].LaTeX)
nofix_islog.append(pars[i].logarithmic)
nnofix=len(nofix_sel)
nf=len(val)
## 1 sigma confidence level
chlim = np.sqrt(self.config.get_2n_obs())
chi2pmin=np.min(val[which_chi2])
chi2t = val[which_chi2] - chi2pmin
val.add_column(chi2t, name='chi2t')
val.sort(['chi2t'])
#start of the plotting
figname = self.plotdir + which_chi2 + '_plot' + figtype
colormap_orig = mpl.cm.viridis
colormap = mpl.cm.get_cmap('viridis_r')
fig = plt.figure(figsize=(10, 10))
for i in range(0, nnofix - 1):
for j in range(nnofix-1, i, -1):
xtit = ''
ytit = ''
if i==0 : ytit = nofix_latex[j]
xtit = nofix_latex[i]
pltnum = (nnofix-1-j) * (nnofix-1) + i+1
ax = plt.subplot(nnofix-1, nnofix-1, pltnum)
plt.plot(val[nofix_name[i]],val[nofix_name[j]], 'D',
color='black', markersize=2)
for k in range(nf - 1, -1, -1):
if val['chi2t'][k]/chlim<=3: #only significant chi2 values
color = colormap(val['chi2t'][k]/chlim)
# * 240) #colours the significant chi2
markersize = 10-3*(val['chi2t'][k]/(chlim))
#smaller chi2 become bigger :)
plt.plot((val[nofix_name[i]])[k],
(val[nofix_name[j]])[k], 'o',
markersize=markersize, color=color)
if val['chi2t'][k]==0:
plt.plot((val[nofix_name[i]])[k],
(val[nofix_name[j]])[k], 'x',
markersize=10, color='k')
if nofix_islog[i]:
ax.set_xscale('log')
if nofix_islog[j]:
ax.set_yscale('log')
if j==i+1:
ax.set_xlabel(xtit, fontsize=12)
ax.set_xmargin(0.5)
nbins = len(ax.get_xticklabels())
ax.xaxis.set_major_locator(MaxNLocator(nbins=nbins, prune='lower'))
else:
ax.set_xticks([])
if i==0:
ax.set_ylabel(ytit, fontsize=12)
else:
ax.yaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_minor_formatter(NullFormatter())
plt.subplots_adjust(hspace=0)
plt.subplots_adjust(wspace=0)
axcb = fig.add_axes([0.75, 0.07, 0.2, 0.02])
cb = mpl.colorbar.ColorbarBase(axcb,
cmap=plt.get_cmap('viridis_r'),
norm=mpl.colors.Normalize(vmin=0., vmax=3),
orientation='horizontal')
plt.subplots_adjust(top=0.99, right=0.99, bottom=0.07, left=0.1)
fig.savefig(figname)
self.logger.info(f'Plot {figname} saved in {self.plotdir}')
return fig
def make_contour_plot(self):
# first version written by sabine, will add in the weekend
#
pass
def plot_kinematic_maps(self,
model=None,
kin_set=0,
cbar_lims='default',
figtype=None,
**kwargs):
"""
Generates a kinematic map of a model with v, sigma, h3, h4...
Maps of the surface brightness, mean line-of-sight velocity,
velocity dispersion, and higher order Gauss–Hermite moments
are shown. The first row are data, the second row the best-fit
model, and the third row the residuals.
Parameters
----------
model : model, optional
Determines which model is used for the plot.
If model = None, the model corresponding to the minimum
chisquare (so far) is used; the setting in the configuration
file's parameter settings is used to determine which chisquare
to consider. The default is None.
kin_set : integer or 'all'
Determines which kinematic set to use for the plot.
The value of this parameter should be the index of the data
set (e.g. kin_set=0 , kin_set=1). The default is kin_set=0.
If kin_set='all', several kinematic maps are produced, one
for each kinematic dataset. A list of (fig,kin_set_name) is
returned where fig are figure objects and kin_set_name are
the names of the kinematics sets.
cbar_lims : STR
Determines which set of values is used to determine the
limiting values defining the colorbar used in the plots.
Accepted values: 'model', 'data', 'combined', 'default'.
The default is 'data' for GaussHermite kinematics, and [0,3] for
BayesLOSVD kinematics where reduced chi2 values are plotted.
figtype : STR, optional
Determines the file extension to use when saving the figure.
If None, the default setting is used ('.png').
Raises
------
ValueError
If kin_set is not smaller than the number of kinematic sets.
ValueError
If cbar_lims is not one of 'model', 'data', or 'combined'.
Returns
-------
list or `matplotlib.pyplot.figure`
if kin_set == 'all', returns `(matplotlib.pyplot.figure, string)`, i.e.
Figure instances along with kinemtics name or figure instance
else, returns a `matplotlib.pyplot.figure`
"""
# Taken from schw_kin.py.
if figtype is None:
figtype = '.png'
stars = \
self.system.get_component_from_class(physys.TriaxialVisibleComponent)
n_kin = len(stars.kinematic_data)
#########################################
if kin_set == 'all':
self.logger.info(f'Plotting kinematic maps for {n_kin} kin_sets.')
figures = []
for i in range(n_kin):
fig = self.plot_kinematic_maps(model=model,
kin_set=i,
cbar_lims=cbar_lims)
figures.append((fig, stars.kinematic_data[i].name))
return figures # returns a list of (fig,kin_name) tuples
#########################################
if kin_set >= n_kin:
text = f'kin_set must be < {n_kin}, but it is {kin_set}'
self.logger.error(text)
raise ValueError(text)
kin_name = stars.kinematic_data[kin_set].name
self.logger.info(f'Plotting kinematic maps for kin_set no {kin_set}: '
f'{kin_name}')
if model is None:
which_chi2 = self.settings.parameter_space_settings['which_chi2']
models_done = np.where(self.all_models.table['all_done'])
min_chi2 = min(m[which_chi2]
for m in self.all_models.table[models_done])
t = self.all_models.table.copy(copy_data=True) # deep copy!
t.add_index(which_chi2)
model_id = t.loc_indices[min_chi2]
model = self.all_models.get_model_from_row(model_id)
kin_type = type(stars.kinematic_data[kin_set])
ws_type = self.settings.weight_solver_settings['type']
if kin_type is kinematics.GaussHermite:
if ws_type == 'LegacyWeightSolver':
if cbar_lims=='default':
cbar_lims = 'data'
fig = self._plot_kinematic_maps_gaussherm(
model,
kin_set,
cbar_lims=cbar_lims,
**kwargs)
else:
self.logger.info(f'Gauss Hermite kinematic maps can only be '
'plot if LegacyWeightSolver is used')
fig = plt.figure(figsize=(27, 12))
elif kin_type is kinematics.BayesLOSVD:
if cbar_lims=='default':
cbar_lims = [0,3]
fig = self._plot_kinematic_maps_bayeslosvd(
model,
kin_set,
cbar_lims=cbar_lims,
**kwargs)
figname = self.plotdir + f'kinematic_map_{kin_name}' + figtype
fig.savefig(figname, dpi=300)
return fig
def _plot_kinematic_maps_bayeslosvd(self,
model,
kin_set,
cmap=None,
cbar_lims=[0,3],
color_dat='0.3',
color_mod='C2'):
"""Short summary.
Parameters
----------
model : type
Description of parameter `model`.
kin_set : type
Description of parameter `kin_set`.
cmap : type
Description of parameter `cmap`.
cbar_lims : type
Description of parameter `cbar_lims`.
color_dat : type
Description of parameter `color_dat`.
color_mod : type
Description of parameter `color_mod`.
Returns
-------
type
Description of returned object.
"""
# get the data
stars = \
self.system.get_component_from_class(physys.TriaxialVisibleComponent)
kin_set = stars.kinematic_data[kin_set]
# helper function to decide which losvds to plot
def dissimilar_subset_greedy_search(distance_matrix, target_size):
"""Greedy algorithm to find dissimilar subsets
Args:
distance_matrix (array): 2D matrix of pairwise distances.
target_size (int): Desired size of subset.
Returns:
tuple: (list of index values of subset in distance_matrix,
minimum pairwise distance in this subset)
"""
n = distance_matrix.shape[0]
idx = np.unravel_index(np.argmax(distance_matrix), distance_matrix.shape)
idx = list(idx)
tmp = distance_matrix[idx][:,idx]
for n0 in range(3, target_size+1):
iii = list(range(n))
for idx0 in idx:
iii.remove(idx0)
ttt = []
for i in iii:
idx_tmp = idx + [i]
tmp = distance_matrix[idx_tmp][:,idx_tmp]
ttt += [np.min(tmp[np.triu_indices(n0, k=1)])]
idx += [iii[np.argmax(ttt)]]
tmp = distance_matrix[idx][:,idx]
min_pairwise_dist = np.min(tmp[np.triu_indices(target_size, k=1)])
return idx, min_pairwise_dist
# helper function to get positions of a regular 3x3 grid on the map
def get_coords_of_regular_3by3_grid():
# get range of x and y values
minx = np.min(kin_set.data['xbin'])
maxx = np.max(kin_set.data['xbin'])
x = np.array([minx, maxx])
miny = np.min(kin_set.data['ybin'])
maxy = np.max(kin_set.data['ybin'])
y = np.array([miny, maxy])
x, y = kin_set.convert_to_plot_coords(x, y)
# get 3 evenly spaced coords in x and y
# taking every other element from 7 points i.e - 0 1 0 1 0 1 0
xgrid = np.linspace(*x, 7)[1::2]
ygrid = np.linspace(*y, 7)[1::2]
xgrid = np.sort(xgrid) # sort left to right
ygrid = np.sort(ygrid)[::-1] # sort top to bottom
xgrid, ygrid = np.meshgrid(xgrid, ygrid, indexing='xy')
xgrid, ygrid = np.ravel(xgrid), np.ravel(ygrid)
return xgrid, ygrid
# helper function to map positions of chosen LOSVDs to regular 3x3 grid
def find_min_dist_matching_of_bipartite_graph(dist,
n_iter_binary_search=15):
# dist = distance matrix between 2 sets of points (a1, a2, ..., aN)
# and (b1, b2, ..., bN). We want to find a 1-1 matching between a's
# and b's so that distances between matched pairs are not too big.
# The decision problem version of this optimization problem is to
# find the smallest distance threshold t such that the bipartite
# graph G has a matching, where G has nodes connected iff they are
# within distance t. This function finds this threshold using a
# binary search.
def has_complete_matching(threshold):
G = csr_matrix(dist<threshold)
matching = maximum_bipartite_matching(G, perm_type='columns')
return -1 not in matching
lo, hi = np.min(dist), np.max(dist)
test = (has_complete_matching(lo), has_complete_matching(hi))
if test==(False,True):
pass
else:
raise ReorderLOSVDError('Error Reordering LOSVDs')
for i in range(n_iter_binary_search):
med = np.mean([lo, hi])
med_has_matching = has_complete_matching(med)
if med_has_matching:
lo = lo
hi = med
else:
lo = med
hi = hi
if has_complete_matching(lo):
threshold = lo
elif has_complete_matching(hi):
threshold = hi
else:
raise ReorderLOSVDError('Error Reordering LOSVDs')
return threshold
# helper function to reorder the plotted LOSVDs into a sensible order
def reorder_losvds(idx_to_plot):
x = kin_set.data['xbin'][idx_to_plot]
y = kin_set.data['ybin'][idx_to_plot]
x, y = kin_set.convert_to_plot_coords(x, y)
xg, yg = get_coords_of_regular_3by3_grid()
# get distance between plot positions and regular grid
dist = (x[:,np.newaxis] - xg[np.newaxis,:])**2
dist += (y[:,np.newaxis] - yg[np.newaxis,:])**2
dist = dist**0.5
threshold = find_min_dist_matching_of_bipartite_graph(dist)
graph = csr_matrix(dist<threshold)
idx_reorder = maximum_bipartite_matching(graph, perm_type='columns')
idx_to_plot = np.array(idx_to_plot)[idx_reorder]
return idx_to_plot
# get the model LOSVDs
orblib = model.get_orblib()
weight_solver = model.get_weights(orblib)
orblib.read_losvd_histograms()
losvd_orblib = kin_set.transform_orblib_to_observables(
orblib.losvd_histograms[0],
None)
losvd_model = np.einsum('ijk,i->jk', losvd_orblib, model.weights)
# normalise LOSVDs to same scale at data, i.e. summing to 1
losvd_model = (losvd_model.T/np.sum(losvd_model, 1)).T
# get chi2's
chi2_per_losvd_bin = losvd_model - kin_set.data['losvd']
chi2_per_losvd_bin = chi2_per_losvd_bin/kin_set.data['dlosvd']
chi2_per_losvd_bin = chi2_per_losvd_bin**2.
chi2_per_apertur = np.sum(chi2_per_losvd_bin, 1)
reduced_chi2_per_apertur = chi2_per_apertur/kin_set.data.meta['nvbins']
# pick a subset of 9 LOSVDs to plot which are not similar to one another
dist = 1.*kin_set.data['losvd']
dist = dist[:,np.newaxis,:] - dist[np.newaxis,:,:]
dist = np.sum(dist**2., 2)**0.5
idx_to_plot, _ = dissimilar_subset_greedy_search(dist, 9)
# reorder losvds so they increase like "reading direction" i.e. from
# left to right, then from top to bottom
try:
idx_to_plot = reorder_losvds(idx_to_plot)
except ReorderLOSVDError:
txt = 'Failed to reorder LOSVDs in a sensible way. '
txt += 'Using an arbitary order instead.'
self.logger.info(txt)
pass
# setup the figure
fig = plt.figure(figsize=(7.5, 3.5))
gs = fig.add_gridspec(ncols=2, nrows=1, top=0.9)
# add axis for chi map
ax_chi2 = fig.add_subplot(gs[1])
ax_chi2.minorticks_off()
ax_chi2.tick_params(length=3)
ax_chi2.set_xlabel('x [arcsec]')
ax_chi2.set_ylabel('y [arcsec]')
pos = ax_chi2.get_position()
dx = 0.04
pos = [pos.x0+dx, pos.y0, pos.width-dx, pos.height]
ax_chi2.set_position(pos)
# add axes for losvds
gs2 = gs[0].subgridspec(3, 3, hspace=0, wspace=0)
ax_losvds = []
for i in range(3):
for j in range(3):
ax = fig.add_subplot(gs2[i,j])
ax_losvds += [ax]
if i<2:
ax.set_xticks([])
else:
ax.set_xlabel('$v_\mathrm{LOS}$ [km/s]')
ax.set_yticks([])
# get cmap
vmin, vmax = cbar_lims
if cmap is None:
if (vmin<1) and (1<vmax):
cmap = self.shiftedColorMap(
plt.cm.RdYlBu_r,
start=0,
midpoint=(1.-vmin)/(vmax-vmin),
stop=1.0)
else:
cmap = plt.cm.RdYlBu_r
# plot the chi2 map
map_plotter = kin_set.get_map_plotter()
plt.sca(ax_chi2)
map_plotter(reduced_chi2_per_apertur,
label='$\chi^2_r$',
colorbar=True,
vmin=vmin,
vmax=vmax,
cmap=cmap)
mean_chi2r = np.mean(reduced_chi2_per_apertur)
ax_chi2.set_title(f'$\chi^2_r={mean_chi2r:.2f}$')
# plot locations of LOSVDs
x = kin_set.data['xbin'][idx_to_plot]
y = kin_set.data['ybin'][idx_to_plot]
x, y = kin_set.convert_to_plot_coords(x, y)
ax_chi2.plot(x, y, 'o', ms=15, c='none', mec='0.2')
for i, (x0,y0) in enumerate(zip(x,y)):
ax_chi2.text(x0, y0, f'{i+1}', ha='center', va='center')
# plot LOSVDs
varr = kin_set.data.meta['vcent']
for i, (idx0, ax0) in enumerate(zip(idx_to_plot, ax_losvds)):
col = (reduced_chi2_per_apertur[idx0]-vmin)/(vmax-vmin)
col = cmap(col)
ax0.text(0.05, 0.95, f'{i+1}',
transform=ax0.transAxes, ha='left', va='top',
bbox = dict(boxstyle=f"circle", fc=col, alpha=0.5)
)
dat_line, = ax0.plot(varr,
kin_set.data['losvd'][idx0],
ls=':',
color=color_dat)
dat_band = ax0.fill_between(
varr,
kin_set.data['losvd'][idx0]-kin_set.data['dlosvd'][idx0],
kin_set.data['losvd'][idx0]+kin_set.data['dlosvd'][idx0],
alpha=0.2,
color=color_dat,
)
mod_line, = ax0.plot(varr,
losvd_model[idx0],
'-',
color=color_mod)
ylim = ax0.get_ylim()
ax0.set_ylim(0, ylim[1])
# add legend()
pos = ax_losvds[1].get_position()
x0 = pos.x0+pos.width/2.
fig.legend([(dat_line, dat_band), mod_line],
['data', 'model'],
bbox_to_anchor=[x0, 0.95],
loc='center',
ncol=2)
return fig
def _plot_kinematic_maps_gaussherm(self, model, kin_set, cbar_lims='data'):
stars = \
self.system.get_component_from_class(physys.TriaxialVisibleComponent)
kinem_fname = model.directory + 'nn_kinem.out'
body_kinem = np.genfromtxt(kinem_fname, skip_header=1)
first_bin = sum(k.n_apertures for k in stars.kinematic_data[:kin_set])
n_bins = stars.kinematic_data[kin_set].n_apertures
body_kinem = body_kinem[first_bin:first_bin+n_bins]
self.logger.debug(f'kin_set={kin_set}, plotting bins '
f'{first_bin} through {first_bin+n_bins-1}')
# if kin_set==0:
# n_bins=stars.kinematic_data[0].n_apertures
# body_kinem=body_kinem[0:n_bins,:]
# self.logger.info(f'first_bin=0, last_bin={n_bins}')
# elif kin_set==1:
# n_bins1=stars.kinematic_data[0].n_apertures
# n_bins2=stars.kinematic_data[1].n_apertures
# body_kinem=body_kinem[n_bins1:n_bins1+n_bins2,:]
# self.logger.info(f'first_bin={n_bins1}, last_bin={n_bins1+n_bins2}')
# else:
# text = f'kin_set must be 0 or 1, not {kin_set}'
# self.logger.error(text)
# raise ValueError(text)
if self.settings.weight_solver_settings['number_GH'] == 2:
id_num, flux, fluxm, velm, vel, dvel, sigm, sig, dsig = body_kinem.T
#to not need to change the plotting routine below, higher moments are set to 0
h3m, h3, dh3, h4m, h4, dh4 = vel*0, vel*0, vel*0+0.4, vel*0, vel*0, vel*0+0.4
if self.settings.weight_solver_settings['number_GH'] == 4:
id_num, flux, fluxm, velm, vel, dvel, sigm, sig, dsig, h3m, h3, dh3, h4m, h4, dh4 = body_kinem.T
if self.settings.weight_solver_settings['number_GH'] == 6:
id_num, flux, fluxm, velm, vel, dvel, sigm, sig, dsig, h3m, h3, dh3, h4m, h4, dh4, h5m, h5, dh5, h6m, h6, dh6 = body_kinem.T
#still ToDO: Add the kinematic map plots for h5 and h6
text = '`cbar_lims` must be one of `model`, `data` or `combined`'
if not cbar_lims in ['model', 'data', 'combined']:
self.logger.error(text)
raise AssertionError(text)
if cbar_lims=='model':
vmax = np.max(np.abs(velm))
smax, smin = np.max(sigm), np.min(sigm)
h3max, h3min = 0.15, -0.15
h4max, h4min = 0.15, -0.15
elif cbar_lims=='data':
vmax = np.max(np.abs(vel))
smax, smin = np.max(sig), np.min(sig)
h3max, h3min = 0.15, -0.15
h4max, h4min = 0.15, -0.15
if h4max == h4min:
h4max, h4min = np.max(h4m), np.min(h4m)
elif cbar_lims=='combined':
tmp = np.hstack((velm, vel))
vmax = np.max(np.abs(tmp))
tmp = np.hstack((sigm, sig))
smax, smin = np.max(tmp), np.min(tmp)
tmp = np.hstack((h3m, h3))
h3max, h3min = np.max(tmp), np.min(tmp)
tmp = np.hstack((h4m, h4))
h4max, h4min = np.max(tmp), np.min(tmp)
else:
self.logger.error('unknown choice of `cbar_lims`')
# Read aperture.dat
# The angle that is saved in this file is measured counter clock-wise
# from the galaxy major axis to the X-axis of the input data.
aperture_fname = stars.kinematic_data[kin_set].aperturefile
aperture_fname = self.input_directory + aperture_fname
lines = [line.rstrip('\n').split() for line in open(aperture_fname)]
minx = np.float(lines[1][0])
miny = np.float(lines[1][1])
sx = np.float(lines[2][0])
sy = np.float(lines[2][1])
sy = sy + miny
angle_deg = np.float(lines[3][0])
nx = np.int(lines[4][0])
ny = np.int(lines[4][1])
dx = sx / nx
self.logger.debug(f"Pixel grid dimension is dx={dx},nx={nx},ny={ny}")
grid = np.zeros((nx, ny), dtype=int)
xr = np.arange(nx, dtype=float) * dx + minx + 0.5 * dx
yc = np.arange(ny, dtype=float) * dx + miny + 0.5 * dx
xi = np.outer(xr, (yc * 0 + 1))
xt = xi.T.flatten()
yi = np.outer((xr * 0 + 1), yc)
yt = yi.T.flatten()
self.logger.debug(f'PA: {angle_deg}')
xi = xt
yi = yt
# read bins.dat
bin_fname = stars.kinematic_data[kin_set].binfile
bin_fname = self.input_directory + bin_fname
lines_bins = [line.rstrip('\n').split() for line in open(bin_fname)]
i = 0
str_head = []
i_var = []
grid = []
while i < len(lines_bins):
for x in lines_bins[i]:
if i == 0:
str_head.append(str(x))
if i == 1:
i_var.append(np.int(x))
if i > 1:
grid.append(np.int(x))
i += 1
str_head = str(str_head[0])
i_var = int(i_var[0])
grid = np.ravel(np.array(grid))
# bins start counting at 1 in fortran and at 0 in idl:
grid = grid - 1
# Only select the pixels that have a bin associated with them.
s = np.ravel(np.where((grid >= 0)))
fhist, fbinedge = np.histogram(grid[s], bins=len(flux))
flux = flux / fhist
fluxm = fluxm / fhist
### plot settings
minf = min(np.array(list(map(np.log10, flux[grid[s]] / max(flux)))))
maxf = max(np.array(list(map(np.log10, flux[grid[s]] / max(flux)))))
minfm = min(np.array(list(map(np.log10, fluxm[grid[s]] / max(fluxm)))))
maxfm = max(np.array(list(map(np.log10, fluxm[grid[s]] / max(fluxm)))))
# The galaxy has NOT already rotated with PA to make major axis aligned with x
fig = plt.figure(figsize=(27, 12))
plt.subplots_adjust(hspace=0.7,
wspace=0.01,
left=0.01,
bottom=0.05,
top=0.99,
right=0.99)
sauron_colormap = plt.get_cmap('sauron')
sauron_r_colormap = plt.get_cmap('sauron_r')
#colormapname = plt.get_cmap('cmr.ember')
kw_display_pixels = dict(pixelsize=dx,
angle=angle_deg,
colorbar=True,
nticks=7,
cmap='sauron')
#cmap='cmr.ember')
x, y = xi[s], yi[s]
### PLOT THE REAL DATA
ax1 = plt.subplot(3, 5, 1)
c = np.array(list(map(np.log10, flux[grid[s]] / max(flux))))
display_pixels.display_pixels(x, y, c,
vmin=minf, vmax=maxf,
**kw_display_pixels)
ax1.set_title('surface brightness (log)',fontsize=20, pad=20)
ax2 = plt.subplot(3, 5, 2)
display_pixels.display_pixels(x, y, vel[grid[s]],
vmin=-1.0 * vmax, vmax=vmax,
**kw_display_pixels)
ax2.set_title('velocity',fontsize=20, pad=20)
ax3 = plt.subplot(3, 5, 3)
display_pixels.display_pixels(x, y, sig[grid[s]],
vmin=smin, vmax=smax,
**kw_display_pixels)
ax3.set_title('velocity dispersion',fontsize=20, pad=20)
ax4 = plt.subplot(3, 5, 4)
display_pixels.display_pixels(x, y, h3[grid[s]],
vmin=h3min, vmax=h3max,
**kw_display_pixels)
ax4.set_title(r'$h_{3}$ moment',fontsize=20, pad=20)
ax5 = plt.subplot(3, 5, 5)
display_pixels.display_pixels(x, y, h4[grid[s]],
vmin=h4min, vmax=h4max,
**kw_display_pixels)
ax5.set_title(r'$h_{4}$ moment',fontsize=20, pad=20)
### PLOT THE MODEL DATA
plt.subplot(3, 5, 6)
c = np.array(list(map(np.log10, fluxm[grid[s]] / max(fluxm))))
display_pixels.display_pixels(x, y, c,
vmin=minfm, vmax=maxfm,
**kw_display_pixels)
plt.subplot(3, 5, 7)
display_pixels.display_pixels(x, y, velm[grid[s]],
vmin=-1.0 * vmax, vmax=vmax,
**kw_display_pixels)
plt.subplot(3, 5, 8)
display_pixels.display_pixels(x, y, sigm[grid[s]],
vmin=smin, vmax=smax,
**kw_display_pixels)
plt.subplot(3, 5, 9)
display_pixels.display_pixels(x, y, h3m[grid[s]],
vmin=h3min, vmax=h3max,
**kw_display_pixels)
plt.subplot(3, 5, 10)
display_pixels.display_pixels(x, y, h4m[grid[s]],
vmin=h4min, vmax=h4max,
**kw_display_pixels)
kw_display_pixels = dict(pixelsize=dx,
angle=angle_deg,
colorbar=True,
nticks=7,
cmap='bwr')
### PLOT THE ERROR NORMALISED RESIDUALS
plt.subplot(3, 5, 11)
c = (fluxm[grid[s]] - flux[grid[s]]) / flux[grid[s]]
display_pixels.display_pixels(x, y, c,
vmin=-0.05, vmax=0.05,
**kw_display_pixels)
plt.subplot(3, 5, 12)
c = (velm[grid[s]] - vel[grid[s]]) / dvel[grid[s]]
display_pixels.display_pixels(x, y, c,
vmin=-10, vmax=10,
**kw_display_pixels)
plt.subplot(3, 5, 13)
c = (sigm[grid[s]] - sig[grid[s]]) / dsig[grid[s]]
display_pixels.display_pixels(x, y, c,
vmin=-10, vmax=10,
**kw_display_pixels)
plt.subplot(3, 5, 14)
c = (h3m[grid[s]] - h3[grid[s]]) / dh3[grid[s]]
display_pixels.display_pixels(x, y, c,
vmin=-10, vmax=10,
**kw_display_pixels)
plt.subplot(3, 5, 15)
c = (h4m[grid[s]] - h4[grid[s]]) / dh4[grid[s]]
display_pixels.display_pixels(x, y, c,
vmin=-10, vmax=10,
**kw_display_pixels)
fig.subplots_adjust(left=0.04, wspace=0.3,
hspace=0.01, right=0.97)
kwtext = dict(size=20, ha='center', va='center', rotation=90.)
fig.text(0.015, 0.83, 'data', **kwtext)
fig.text(0.015, 0.53, 'model', **kwtext)
fig.text(0.015, 0.2, 'residual', **kwtext)
return fig
#############################################################################
######## Routines from schw_mass.py, necessary for mass_plot ################
#############################################################################
def intg2_trimge_intrmass(self, phi, theta, Fxyparm):
rr = Fxyparm[4,0]
den_pot_pc = Fxyparm[0,:]
sig_pot_pc = Fxyparm[1,:]
q_pot = Fxyparm[2,:]
p_pot = Fxyparm[3,:]
sth = np.sin(theta)
cth =
|
np.cos(theta)
|
numpy.cos
|
import copy
from math import atan2, cos, hypot, radians, sin
import numpy
class Vector(object):
"""
Vector data for a :class:`FlowSolution`, also the base for
:class:`GridCoordinates`.
In Cartesian coordinates, array indexing order is x,y,z;
so an 'i-face' is a y,z surface.
In cylindrical coordinates, array indexing order is z,r,t;
so an 'i-face' is an r,t surface.
"""
def __init__(self):
self.x = None
self.y = None
self.z = None
self.r = None
self.t = None
self._ghosts = (0, 0, 0, 0, 0, 0)
def _get_ghosts(self):
return self._ghosts
def _set_ghosts(self, ghosts):
if len(ghosts) < 2*len(self.shape):
raise ValueError('ghosts must be a %d-element array'
% (2*len(self.shape)))
for i in ghosts:
if i < 0:
raise ValueError('All ghost values must be >= 0')
self._ghosts = ghosts
ghosts = property(_get_ghosts, _set_ghosts,
doc='Number of ghost/rind planes for each index direction.')
@property
def shape(self):
""" Data index limits, not including 'ghost/rind' planes. """
ijk = self.real_shape
if len(ijk) < 1:
return ()
ghosts = self._ghosts
imax = ijk[0] - (ghosts[0] + ghosts[1])
if len(ijk) < 2:
return (imax,)
jmax = ijk[1] - (ghosts[2] + ghosts[3])
if len(ijk) < 3:
return (imax, jmax)
kmax = ijk[2] - (ghosts[4] + ghosts[5])
return (imax, jmax, kmax)
@property
def real_shape(self):
""" Data index limits, including any 'ghost/rind' planes. """
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
return arr.shape
return ()
def is_equivalent(self, other, name, logger, tolerance=0.):
"""
Test if self and `other` are equivalent.
other: :class:`Vector`
The vector to check against.
name: string
Name of this vector, used for reporting differences.
logger: :class:`Logger` or None
Used to log debug messages that will indicate what if anything is
not equivalent.
tolerance: float
The maximum relative difference in array values to be considered
equivalent.
"""
if not isinstance(other, Vector):
logger.debug('other is not a Vector object.')
return False
for component in ('x', 'y', 'z', 'r', 't'):
if not self._check_equivalent(other, name, component, logger,
tolerance):
return False
if other.ghosts != self.ghosts:
logger.debug('ghost cell counts are not equal: %s vs. %s.',
other.ghosts, self.ghosts)
return False
return True
def _check_equivalent(self, other, name, component, logger, tolerance):
""" Check equivalence to a component array. """
arr = getattr(self, component)
other_arr = getattr(other, component)
if arr is None:
if other_arr is not None:
logger.debug("%s has no %s component but 'other' does.", name,
component.upper())
return False
else:
if tolerance > 0.:
if not numpy.allclose(other_arr, arr, tolerance, tolerance):
logger.debug("%s %s values are not 'close'.", name,
component.upper())
return False
else:
try:
if (other_arr != arr).any():
logger.debug('%s %s values are not equal.', name,
component.upper())
return False
except Exception as exc:
logger.debug('%s %s: %r vs. %r: %s', name, component.upper(),
other_arr, arr, exc)
logger.debug('!=: %r', other_arr != arr)
return False
return True
def extract(self, imin, imax, jmin=None, jmax=None, kmin=None, kmax=None,
ghosts=None):
"""
Construct a new :class:`Vector` from data extracted from the
specified region.
imin, imax, jmin, jmax, kmin, kmax: int
Specifies the region to extract.
Negative values are relative to the size in that dimension,
so -1 refers to the last element. For 2D zones omit kmin and kmax.
For 1D zones omit jmin, jmax, kmin, and kmax.
ghosts: int[]
Numer of ghost/rind planes for the new zone.
If ``None`` the existing specification is used.
"""
ghosts = ghosts or self._ghosts
i = len(self.shape)
if i == 3:
if kmin is None or kmax is None or jmin is None or jmax is None:
raise ValueError('3D extract requires jmin, jmax, kmin, and kmax')
return self._extract_3d(imin, imax, jmin, jmax, kmin, kmax, ghosts)
elif i == 2:
if kmin is not None or kmax is not None:
raise ValueError('2D extract undefined for kmin or kmax')
if jmin is None or jmax is None:
raise ValueError('2D extract requires jmin and jmax')
return self._extract_2d(imin, imax, jmin, jmax, ghosts)
elif i == 1:
if kmin is not None or kmax is not None:
raise ValueError('1D extract undefined for jmin, jmax, kmin, or kmax')
return self._extract_1d(imin, imax, ghosts)
else:
raise RuntimeError('Vector is empty!')
def _extract_3d(self, imin, imax, jmin, jmax, kmin, kmax, new_ghosts):
""" 3D (index space) extraction. """
ghosts = self._ghosts
# Support end-relative indexing and adjust for existing ghost planes.
vec_imax, vec_jmax, vec_kmax = self.shape
if imin < 0:
imin += vec_imax
imin += ghosts[0]
if imax < 0:
imax += vec_imax
imax += ghosts[0]
if jmin < 0:
jmin += vec_jmax
jmin += ghosts[2]
if jmax < 0:
jmax += vec_jmax
jmax += ghosts[2]
if kmin < 0:
kmin += vec_kmax
kmin += ghosts[4]
if kmax < 0:
kmax += vec_kmax
kmax += ghosts[4]
# Adjust for new ghost/rind planes.
imin -= new_ghosts[0]
imax += new_ghosts[1]
jmin -= new_ghosts[2]
jmax += new_ghosts[3]
kmin -= new_ghosts[4]
kmax += new_ghosts[5]
# Check limits.
if imin < 0 or imax > vec_imax+ghosts[1] or \
jmin < 0 or jmax > vec_jmax+ghosts[3] or \
kmin < 0 or kmax > vec_kmax+ghosts[5]:
region = (imin, imax, jmin, jmax, kmin, kmax)
original = (0, vec_imax+ghosts[1], 0, vec_jmax+ghosts[3],
0, vec_kmax+ghosts[5])
raise ValueError('Extraction region %s exceeds original %s'
% (region, original))
# Extract.
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
setattr(vec, component,
arr[imin:imax+1, jmin:jmax+1, kmin:kmax+1])
return vec
def _extract_2d(self, imin, imax, jmin, jmax, new_ghosts):
""" 2D (index space) extraction. """
ghosts = self._ghosts
# Support end-relative indexing and adjust for existing ghost planes.
vec_imax, vec_jmax = self.shape
if imin < 0:
imin += vec_imax
imin += ghosts[0]
if imax < 0:
imax += vec_imax
imax += ghosts[0]
if jmin < 0:
jmin += vec_jmax
jmin += ghosts[2]
if jmax < 0:
jmax += vec_jmax
jmax += ghosts[2]
# Check limits.
if imin < 0 or imax > vec_imax+ghosts[1] or \
jmin < 0 or jmax > vec_jmax+ghosts[3]:
region = (imin, imax, jmin, jmax)
original = (0, vec_imax+ghosts[1], 0, vec_jmax+ghosts[3])
raise ValueError('Extraction region %s exceeds original %s'
% (region, original))
# Extract.
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
setattr(vec, component,
arr[imin:imax+1, jmin:jmax+1])
return vec
def _extract_1d(self, imin, imax, new_ghosts):
""" 1D (index space) extraction. """
ghosts = self._ghosts
# Support end-relative indexing and adjust for existing ghost planes.
vec_imax, = self.shape
if imin < 0:
imin += vec_imax
imin += ghosts[0]
if imax < 0:
imax += vec_imax
imax += ghosts[0]
# Check limits.
if imin < 0 or imax > vec_imax+ghosts[1]:
region = (imin, imax)
original = (0, vec_imax+ghosts[1])
raise ValueError('Extraction region %s exceeds original %s'
% (region, original))
# Extract.
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
setattr(vec, component, arr[imin:imax+1])
return vec
def extend(self, axis, delta, npoints):
"""
Construct a new :class:`Vector` by replication.
axis: 'i', 'j', or 'k'
Index axis to extend.
delta: float.
Direction. A negative value adds points before the current
zero-index of `axis`.
npoints: int > 0
Number of points to add in `axis` dimension.
"""
if not delta:
raise ValueError('delta must be non-zero')
if npoints < 1:
raise ValueError('npoints must be >= 1')
i = len(self.shape)
if i == 3:
if axis not in ('i', 'j', 'k'):
raise ValueError('axis must be i, j, or k')
return self._extend_3d(axis, delta, npoints)
elif i == 2:
if axis not in ('i', 'j'):
raise ValueError('axis must be i or j')
return self._extend_2d(axis, delta, npoints)
elif i == 1:
if axis != 'i':
raise ValueError('axis must be i')
return self._extend_1d(delta, npoints)
else:
raise RuntimeError('Vector is empty!')
def _extend_3d(self, axis, delta, npoints):
""" 3D (index space) extension. """
imax, jmax, kmax = self.real_shape
if axis == 'i':
new_shape = (imax + npoints, jmax, kmax)
indx = imax if delta > 0 else npoints
elif axis == 'j':
new_shape = (imax, jmax + npoints, kmax)
indx = jmax if delta > 0 else npoints
else:
new_shape = (imax, jmax, kmax + npoints)
indx = kmax if delta > 0 else npoints
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
new_arr = numpy.zeros(new_shape)
if axis == 'i':
if delta > 0:
new_arr[0:indx, :, :] = arr
for i in range(npoints):
new_arr[indx+i, :, :] = arr[-1, :, :]
else:
new_arr[indx:, :, :] = arr
for i in range(npoints):
new_arr[i, :, :] = arr[0, :, :]
elif axis == 'j':
if delta > 0:
new_arr[:, 0:indx, :] = arr
for j in range(npoints):
new_arr[:, indx+j, :] = arr[:, -1, :]
else:
new_arr[:, indx:, :] = arr
for j in range(npoints):
new_arr[:, j, :] = arr[:, 0, :]
else:
if delta > 0:
new_arr[:, :, 0:indx] = arr
for k in range(npoints):
new_arr[:, :, indx+k] = arr[:, :, -1]
else:
new_arr[:, :, indx:] = arr
for k in range(npoints):
new_arr[:, :, k] = arr[:, :, 0]
setattr(vec, component, new_arr)
vec.ghosts = copy.copy(self._ghosts)
return vec
def _extend_2d(self, axis, delta, npoints):
""" 2D (index space) extension. """
imax, jmax = self.real_shape
if axis == 'i':
new_shape = (imax + npoints, jmax)
indx = imax if delta > 0 else npoints
else:
new_shape = (imax, jmax + npoints)
indx = jmax if delta > 0 else npoints
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
new_arr = numpy.zeros(new_shape)
if axis == 'i':
if delta > 0:
new_arr[0:indx, :] = arr
for i in range(npoints):
new_arr[indx+i, :] = arr[-1, :]
else:
new_arr[indx:, :] = arr
for i in range(npoints):
new_arr[i, :] = arr[0, :]
else:
if delta > 0:
new_arr[:, 0:indx] = arr
for j in range(npoints):
new_arr[:, indx+j] = arr[:, -1]
else:
new_arr[:, indx:] = arr
for j in range(npoints):
new_arr[:, j] = arr[:, 0]
setattr(vec, component, new_arr)
vec.ghosts = copy.copy(self._ghosts)
return vec
def _extend_1d(self, delta, npoints):
""" 1D (index space) extension. """
imax, = self.real_shape
new_shape = (imax + npoints,)
indx = imax if delta > 0 else npoints
vec = Vector()
for component in ('x', 'y', 'z', 'r', 't'):
arr = getattr(self, component)
if arr is not None:
new_arr = numpy.zeros(new_shape)
if delta > 0:
new_arr[0:indx] = arr
for i in range(npoints):
new_arr[indx+i] = arr[-1]
else:
new_arr[indx:] = arr
for i in range(npoints):
new_arr[i] = arr[0]
setattr(vec, component, new_arr)
vec.ghosts = copy.copy(self._ghosts)
return vec
def flip_z(self):
""" Convert to other-handed coordinate system. """
if self.z is None:
raise AttributeError('flip_z: no Z component')
self.z *= -1.
def make_cartesian(self, grid, axis='z'):
"""
Convert to Cartesian coordinate system.
grid: :class:`GridCoordinates`
Must be in cylindrical form.
axis: string
Specifies which is the cylinder axis ('z' or 'x').
"""
if grid.shape != self.shape:
raise NotImplementedError('make_cartesian: grid shape mismatch'
' not supported')
gt_flat = grid.t.flat
r_flat = self.r.flat
t_flat = self.t.flat
if axis == 'z' or self.z is None:
self.x = self.r.copy()
self.y = self.r.copy()
x_flat = self.x.flat
y_flat = self.y.flat
for i in range(self.r.size):
gt = gt_flat[i]
sine = sin(gt)
cosine = cos(gt)
r = r_flat[i]
t = t_flat[i]
x_flat[i] = r*cosine - t*sine
y_flat[i] = r*sine + t*cosine
self.r = None
self.t = None
elif axis == 'x':
self.x = self.z
self.y = self.r.copy()
self.z = self.r.copy()
y_flat = self.y.flat
z_flat = self.z.flat
for i in range(self.r.size):
gt = gt_flat[i]
sine = sin(gt)
cosine = cos(gt)
r = r_flat[i]
t = t_flat[i]
y_flat[i] = r*cosine - t*sine
z_flat[i] = r*sine + t*cosine
self.r = None
self.t = None
else:
raise ValueError("axis must be 'z' or 'x'")
def make_cylindrical(self, grid, axis='z'):
"""
Convert to cylindrical coordinate system.
grid: :class:`GridCoordinates`
Must be in cylindrical form.
axis: string
Specifies which is the cylinder axis ('z' or 'x').
"""
if grid.shape != self.shape:
raise NotImplementedError('make_cylindrical: grid shape mismatch'
' not supported')
gt_flat = grid.t.flat
self.r = self.x.copy()
self.t = self.x.copy()
r_flat = self.r.flat
t_flat = self.t.flat
if axis == 'z' or self.z is None:
x_flat = self.x.flat
y_flat = self.y.flat
for i in range(self.x.size):
gt = gt_flat[i]
x = x_flat[i]
y = y_flat[i]
magnitude = hypot(x, y)
rel_theta = atan2(y, x) - gt
r_flat[i] = magnitude * cos(rel_theta)
t_flat[i] = magnitude * sin(rel_theta)
self.x = None
self.y = None
elif axis == 'x':
y_flat = self.y.flat
z_flat = self.z.flat
for i in range(self.y.size):
gt = gt_flat[i]
y = y_flat[i]
z = z_flat[i]
magnitude = hypot(y, z)
rel_theta = atan2(z, y) - gt
r_flat[i] = magnitude * cos(rel_theta)
t_flat[i] = magnitude * sin(rel_theta)
self.z = self.x
self.x = None
self.y = None
else:
raise ValueError("axis must be 'z' or 'x'")
def rotate_about_x(self, deg):
"""
Rotate about the X axis.
deg: float (degrees)
Amount of rotation.
"""
if self.y is None:
raise AttributeError('rotate_about_x: no Y component')
if self.z is None:
raise AttributeError('rotate_about_x: no Z component')
sine = sin(radians(deg))
cosine = cos(radians(deg))
y_new = self.y*cosine - self.z*sine
self.z = self.z*cosine + self.y*sine
self.y = y_new
def rotate_about_y(self, deg):
"""
Rotate about the Y axis.
deg: float (degrees)
Amount of rotation.
"""
if self.x is None:
raise AttributeError('rotate_about_y: no X component')
if self.z is None:
raise AttributeError('rotate_about_y: no Z component')
sine = sin(radians(deg))
cosine = cos(radians(deg))
x_new = self.x*cosine - self.z*sine
self.z = self.z*cosine + self.x*sine
self.x = x_new
def rotate_about_z(self, deg):
"""
Rotate about the Z axis.
deg: float (degrees)
Amount of rotation.
"""
if self.x is None:
raise AttributeError('rotate_about_z: no X component')
if self.y is None:
raise AttributeError('rotate_about_z: no Y component')
sine = sin(radians(deg))
cosine = cos(radians(deg))
x_new = self.x*cosine - self.y*sine
self.y = self.y*cosine + self.x*sine
self.x = x_new
def promote(self):
""" Promote from N-dimensional to N+1 dimensional index space. """
shape = self.real_shape
if len(shape) > 2:
raise RuntimeError('Vector is 3D')
elif len(shape) > 1:
imax, jmax = shape
if self.x is not None: # x,y -> x,y,z
new_arr = numpy.zeros((imax, jmax, 1))
new_arr[:, :, 0] = self.x[:, :]
self.x = new_arr
new_arr = numpy.zeros((imax, jmax, 1))
new_arr[:, :, 0] = self.y[:, :]
self.y = new_arr
if self.z is not None:
new_arr = numpy.zeros((1, imax, jmax))
new_arr[:, :, 0] = self.z[:, :]
self.z = new_arr
else:
self.z = numpy.zeros((imax, jmax, 1))
else: # r,t -> z,r,t (note index order change!)
new_arr = numpy.zeros((1, imax, jmax))
new_arr[0, :, :] = self.r[:, :]
self.r = new_arr
new_arr = numpy.zeros((1, imax, jmax))
new_arr[0, :, :] = self.t[:, :]
self.t = new_arr
if self.z is not None:
new_arr = numpy.zeros((1, imax, jmax))
new_arr[0, :, :] = self.z[:, :]
self.z = new_arr
else:
self.z = numpy.zeros((1, imax, jmax))
elif len(shape) > 0:
imax = shape[0]
if self.x is not None: # x -> x,y[,z]
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.x[:]
self.x = new_arr
if self.y is not None:
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.y[:]
self.y = new_arr
if self.z is not None:
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.z[:]
self.z = new_arr
else:
self.y = numpy.zeros((imax, 1))
else: # r,t -> r,t[,z]
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.r[:]
self.r = new_arr
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.t[:]
self.t = new_arr
if self.z is not None:
new_arr = numpy.zeros((imax, 1))
new_arr[:, 0] = self.z[:]
self.z = new_arr
else:
raise RuntimeError('Vector is empty!')
def demote(self):
""" Demote from N-dimensional to N-1 dimensional index space. """
shape = self.real_shape
ghosts = self._ghosts
if len(shape) > 2:
imax, jmax, kmax = shape
imx = imax - (ghosts[0] + ghosts[1])
jmx = jmax - (ghosts[2] + ghosts[3])
kmx = kmax - (ghosts[4] + ghosts[5])
if imx == 1:
if self.x is not None:
new_arr =
|
numpy.zeros((jmax, kmax))
|
numpy.zeros
|
##########################################################################
# QN-S3VM BFGS optimizer for semi-supervised support vector machines.
#
# This implementation provides both a L-BFGS optimization scheme
# for semi-supvised support vector machines. Details can be found in:
#
# <NAME>, <NAME>, <NAME>, <NAME>, Sparse quasi-
# Newton optimization for semi-supervised support vector ma-
# chines, in: Proc. of the 1st Int. Conf. on Pattern Recognition
# Applications and Methods, 2012, pp. 45-54.
#
# Version: 0.1 (September, 2012)
#
# Bugs: Please send any bugs to "f DOT gieseke AT uni-oldenburg.de"
#
#
# Copyright (C) 2012 <NAME>, <NAME>, <NAME>, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# INSTALLATION and DEPENDENCIES
#
# The module should work out of the box, given Python and Numpy (http://numpy.scipy.org/)
# and Scipy (http://scipy.org/) installed correctly.
#
# We have tested the code on Ubuntu 12.04 (32 Bit) with Python 2.7.3, Numpy 1.6.1,
# and Scipy 0.9.0. Installing these packages on a Ubuntu- or Debian-based systems
# can be done via "sudo apt-get install python python-numpy python-scipy".
#
#
# RUNNING THE EXAMPLES
#
# For a description of the data sets, see the paper mentioned above and the references
# therein. Running the command "python qns3vm.py" should yield an output similar to:
#
# Sparse text data set instance
# Number of labeled patterns: 48
# Number of unlabeled patterns: 924
# Number of test patterns: 974
# Time needed to compute the model: 0.775886058807 seconds
# Classification error of QN-S3VM: 0.0667351129363
#
# Dense gaussian data set instance
# Number of labeled patterns: 25
# Number of unlabeled patterns: 225
# Number of test patterns: 250
# Time needed to compute the model: 0.464584112167 seconds
# Classification error of QN-S3VM: 0.012
#
# Dense moons data set instance
# Number of labeled patterns: 5
# Number of unlabeled patterns: 495
# Number of test patterns: 500
# Time needed to compute the model: 0.69714307785 seconds
# Classification error of QN-S3VM: 0.0
##########################################################################
import array as arr
import math
import copy as cp
import logging
import numpy as np
from numpy import multiply, zeros, float64, array, newaxis, ndarray, ones
import operator
from time import time
import sys
from scipy import optimize
import scipy.sparse.csc as csc
from scipy import sparse
import scipy
import warnings
from ipdb import set_trace
warnings.simplefilter('error')
__author__ = '<NAME>, <NAME>, <NAME>, <NAME>'
__version__ = '0.1'
class QN_S3VM:
"""
L-BFGS optimizer for semi-supervised support vector machines (S3VM).
"""
def __init__(self, X_l, L_l, X_u, random_generator=None, ** kw):
"""
Initializes the model. Detects automatically if dense or sparse data is provided.
Keyword arguments:
X_l -- patterns of labeled part of the data
L_l -- labels of labeled part of the data
X_u -- patterns of unlabeled part of the data
random_generator -- particular instance of a random_generator (default None)
kw -- additional parameters for the optimizer
lam -- regularization parameter lambda (default 1, must be a float > 0)
lamU -- cost parameter that determines influence of unlabeled patterns (default 1, must be float > 0)
sigma -- kernel width for RBF kernel (default 1.0, must be a float > 0)
kernel_type -- "Linear" or "RBF" (default "Linear")
numR -- implementation of subset of regressors. If None is provided, all patterns are used
(no approximation). Must fulfill 0 <= numR <= len(X_l) + len(X_u) (default None)
estimate_r -- desired ratio for positive and negative assigments for
unlabeled patterns (-1.0 <= estimate_r <= 1.0). If estimate_r=None,
then L_l is used to estimate this ratio (in case len(L_l) >=
minimum_labeled_patterns_for_estimate_r. Otherwise use estimate_r = 0.0
(default None)
minimum_labeled_patterns_for_estimate_r -- see above (default 0)
BFGS_m -- BFGS parameter (default 50)
BFGS_maxfun -- BFGS parameter, maximum number of function calls (default 500)
BFGS_factr -- BFGS parameter (default 1E12)
BFGS_pgtol -- BFGS parameter (default 1.0000000000000001e-05)
"""
self.__model = None
# Initiate model for sparse data
if isinstance(X_l, csc.csc_matrix):
self.__data_type = "sparse"
self.__model = QN_S3VM_Sparse(
X_l, L_l, X_u, random_generator, ** kw)
# Initiate model for dense data
elif (isinstance(X_l[0], list)) or (isinstance(X_l[0], ndarray)):
self.__data_type = "dense"
self.__model = QN_S3VM_Dense(
X_l, L_l, X_u, random_generator, ** kw)
# Data format unknown
if self.__model is None:
logging.info("Data format for patterns is unknown.")
sys.exit(0)
def train(self):
"""
Training phase.
Returns:
The computed partition for the unlabeled patterns.
"""
return self.__model.train()
def getPredictions(self, X, real_valued=False):
"""
Computes the predicted labels for a given set of patterns
Keyword arguments:
X -- The set of patterns
real_valued -- If True, then the real prediction values are returned
Returns:
The predictions for the list X of patterns.
"""
return self.__model.getPredictions(X, real_valued=False)
def predict(self, x):
"""
Predicts a label (-1 or +1) for the pattern
Keyword arguments:
x -- The pattern
Returns:
The prediction for x.
"""
return self.__model.predict(x)
def predictValue(self, x):
"""
Computes f(x) for a given pattern (see Representer Theorem)
Keyword arguments:
x -- The pattern
Returns:
The (real) prediction value for x.
"""
return self.__model.predictValue(x)
def getNeededFunctionCalls(self):
"""
Returns the number of function calls needed during
the optimization process.
"""
return self.__model.getNeededFunctionCalls()
def mygetPreds(self, X, real_valued=False):
return self.__model.mygetPreds(X, real_valued)
##########################################################################
##########################################################################
class QN_S3VM_Dense:
"""
BFGS optimizer for semi-supervised support vector machines (S3VM).
Dense Data
"""
parameters = {
'lam': 1,
'lamU': 1,
'sigma': 1,
'kernel_type': "Linear",
'numR': None,
'estimate_r': None,
'minimum_labeled_patterns_for_estimate_r': 0,
'BFGS_m': 50,
'BFGS_maxfun': 500,
'BFGS_factr': 1E12,
'BFGS_pgtol': 1.0000000000000001e-05,
'BFGS_verbose': -1,
'surrogate_s': 3.0,
'surrogate_gamma': 20.0,
'breakpoint_for_exp': 500
}
def __init__(self, X_l, L_l, X_u, random_generator, ** kw):
"""
Intializes the S3VM optimizer.
"""
self.__random_generator = random_generator
self.__X_l, self.__X_u, self.__L_l = X_l, X_u, L_l
assert len(X_l) == len(L_l)
self.__X = cp.deepcopy(self.__X_l)
self.__X.extend(cp.deepcopy(self.__X_u))
self.__size_l, self.__size_u, self.__size_n = len(
X_l), len(X_u), len(X_l) + len(X_u)
self.__matrices_initialized = False
self.__setParameters(** kw)
self.__kw = kw
def train(self):
"""
Training phase.
Returns:
The computed partition for the unlabeled patterns.
"""
indi_opt = self.__optimize()
self.__recomputeModel(indi_opt)
predictions = self.__getTrainingPredictions(self.__X)
return predictions
def mygetPreds(self, X, real_valued=False):
KNR = self.__kernel.computeKernelMatrix(X, self.__Xreg)
KNU_bar = self.__kernel.computeKernelMatrix(
X, self.__X_u_subset, symmetric=False)
KNU_bar_horizontal_sum = (
1.0 / len(self.__X_u_subset)) * KNU_bar.sum(axis=1)
KNR = KNR - KNU_bar_horizontal_sum[newaxis].T - \
self.__KU_barR_vertical_sum[newaxis] + self.__KU_barU_bar_sum
preds = KNR @ self.__c[0:self.__dim - 1, :] + \
self.__c[self.__dim - 1, :]
return preds
def getPredictions(self, X, real_valued=False):
"""
Computes the predicted labels for a given set of patterns
Keyword arguments:
X -- The set of patterns
real_valued -- If True, then the real prediction values are returned
Returns:
The predictions for the list X of patterns.
"""
KNR = self.__kernel.computeKernelMatrix(X, self.__Xreg)
KNU_bar = self.__kernel.computeKernelMatrix(
X, self.__X_u_subset, symmetric=False)
KNU_bar_horizontal_sum = (
1.0 / len(self.__X_u_subset)) * KNU_bar.sum(axis=1)
KNR = KNR - KNU_bar_horizontal_sum[newaxis].T - \
self.__KU_barR_vertical_sum[newaxis] + self.__KU_barU_bar_sum
preds = KNR @ self.__c[0:self.__dim - 1, :] + \
self.__c[self.__dim - 1, :]
if real_valued == True:
return preds.flatten(order='C').tolist()
else:
return np.sign(np.sign(preds) + 0.1).flatten(order='C').tolist()
def predict(self, x):
"""
Predicts a label for the pattern
Keyword arguments:
x -- The pattern
Returns:
The prediction for x.
"""
return self.getPredictions([x], real_valued=False)[0]
def predictValue(self, x):
"""
Computes f(x) for a given pattern (see Representer Theorem)
Keyword arguments:
x -- The pattern
Returns:
The (real) prediction value for x.
"""
return self.getPredictions([x], real_valued=True)[0]
def getNeededFunctionCalls(self):
"""
Returns the number of function calls needed during
the optimization process.
"""
return self.__needed_function_calls
def __setParameters(self, ** kw):
for attr, val in list(kw.items()):
self.parameters[attr] = val
self.__lam = float(self.parameters['lam'])
assert self.__lam > 0
self.__lamU = float(self.parameters['lamU'])
assert self.__lamU > 0
self.__lam_Uvec = [float( self.__lamU) * i for i in [ 0, 0.000001, 0.0001, 0.01, 0.1, 0.5, 1]]
self.__sigma = float(self.parameters['sigma'])
assert self.__sigma > 0
self.__kernel_type = str(self.parameters['kernel_type'])
if self.parameters['numR'] is not None:
self.__numR = int(self.parameters['numR'])
assert (self.__numR <= len(self.__X)) and (self.__numR > 0)
else:
self.__numR = len(self.__X)
self.__regressors_indices = sorted(
self.__random_generator.sample(
list(
range(
0, len(
self.__X))), self.__numR))
self.__dim = self.__numR + 1 # add bias term b
self.__minimum_labeled_patterns_for_estimate_r = float(
self.parameters['minimum_labeled_patterns_for_estimate_r'])
# If reliable estimate is available or can be estimated, use it, otherwise
# assume classes to be balanced (i.e., estimate_r=0.0)
if self.parameters['estimate_r'] is not None:
self.__estimate_r = float(self.parameters['estimate_r'])
elif len(self.__L_l) >= self.__minimum_labeled_patterns_for_estimate_r:
self.__estimate_r = (1.0 / len(self.__L_l)) * np.sum(self.__L_l)
else:
self.__estimate_r = 0.0
self.__BFGS_m = int(self.parameters['BFGS_m'])
self.__BFGS_maxfun = int(self.parameters['BFGS_maxfun'])
self.__BFGS_factr = float(self.parameters['BFGS_factr'])
# This is a hack for 64 bit systems (Linux). The machine precision
# is different for the BFGS optimizer (Fortran code) and we fix this
# by:
is_64bits = sys.maxsize > 2**32
if is_64bits:
logging.debug("64-bit system detected, modifying BFGS_factr!")
self.__BFGS_factr = 0.000488288 * self.__BFGS_factr
self.__BFGS_pgtol = float(self.parameters['BFGS_pgtol'])
self.__BFGS_verbose = int(self.parameters['BFGS_verbose'])
self.__surrogate_gamma = float(self.parameters['surrogate_gamma'])
self.__s = float(self.parameters['surrogate_s'])
self.__breakpoint_for_exp = float(
self.parameters['breakpoint_for_exp'])
self.__b = self.__estimate_r
# size of unlabeled patterns to estimate mean (used for balancing
# constraint)
self.__max_unlabeled_subset_size = 1000
def __optimize(self):
logging.debug("Starting optimization with BFGS ...")
self.__needed_function_calls = 0
self.__initializeMatrices()
# starting point
c_current = zeros(self.__dim, float64)
c_current[self.__dim - 1] = self.__b
# Annealing sequence.
for i in range(len(self.__lam_Uvec)):
self.__lamU = self.__lam_Uvec[i]
# crop one dimension (in case the offset b is fixed)
c_current = c_current[:self.__dim - 1]
c_current = self.__localSearch(c_current)
# reappend it if needed
c_current = np.append(c_current, self.__b)
f_opt = self.__getFitness(c_current)
return c_current, f_opt
def __localSearch(self, start):
c_opt, f_opt, d = optimize.fmin_l_bfgs_b(self.__getFitness, start,
m=self.__BFGS_m, fprime=self.__getFitness_Prime,
maxfun=self.__BFGS_maxfun, factr=self.__BFGS_factr,
pgtol=self.__BFGS_pgtol, iprint=self.__BFGS_verbose)
self.__needed_function_calls += int(d['funcalls'])
return c_opt
def __initializeMatrices(self):
if self.__matrices_initialized == False:
logging.debug("Initializing matrices...")
# Initialize labels
x = arr.array('i')
for l in self.__L_l:
x.append(l)
self.__YL = array(x, dtype=float64)
self.__YL = self.__YL.transpose()
# Initialize kernel matrices
if (self.__kernel_type == "Linear"):
self.__kernel = LinearKernel()
elif (self.__kernel_type == "RBF"):
self.__kernel = RBFKernel(self.__sigma)
self.__Xreg = (
array(
self.__X)[
self.__regressors_indices,
:].tolist())
self.__KLR = self.__kernel.computeKernelMatrix(
self.__X_l, self.__Xreg, symmetric=False)
self.__KUR = self.__kernel.computeKernelMatrix(
self.__X_u, self.__Xreg, symmetric=False)
#self.__KNR = cp.deepcopy(bmat([[self.__KLR], [self.__KUR]]))
self.__KNR = cp.deepcopy(np.vstack((self.__KLR, self.__KUR)))
self.__KRR = self.__KNR[self.__regressors_indices, :]
# Center patterns in feature space (with respect to approximated
# mean of unlabeled patterns in the feature space)
subset_unlabled_indices = sorted(
self.__random_generator.sample(
list(
range(
0, len(
self.__X_u))), min(
self.__max_unlabeled_subset_size, len(
self.__X_u))))
self.__X_u_subset = (
array(
self.__X_u)[
subset_unlabled_indices,
:].tolist())
self.__KNU_bar = self.__kernel.computeKernelMatrix(
self.__X, self.__X_u_subset, symmetric=False)
self.__KNU_bar_horizontal_sum = (
1.0 / len(self.__X_u_subset)) * self.__KNU_bar.sum(axis=1)
self.__KU_barR = self.__kernel.computeKernelMatrix(
self.__X_u_subset, self.__Xreg, symmetric=False)
self.__KU_barR_vertical_sum = (
1.0 / len(self.__X_u_subset)) * self.__KU_barR.sum(axis=0)
self.__KU_barU_bar = self.__kernel.computeKernelMatrix(
self.__X_u_subset, self.__X_u_subset, symmetric=False)
self.__KU_barU_bar_sum = (
1.0 / (len(self.__X_u_subset)))**2 * self.__KU_barU_bar.sum()
self.__KNR = self.__KNR - self.__KNU_bar_horizontal_sum - \
self.__KU_barR_vertical_sum + self.__KU_barU_bar_sum
self.__KRR = self.__KNR[self.__regressors_indices, :]
self.__KLR = self.__KNR[list(range(0, len(self.__X_l))), :]
self.__KUR = self.__KNR[list(
range(len(self.__X_l), len(self.__X))), :]
self.__matrices_initialized = True
def __getFitness(self, c):
# Check whether the function is called from the bfgs solver
# (that does not optimize the offset b) or not
if len(c) == self.__dim - 1:
c = np.append(c, self.__b)
c = array(c)[newaxis]
b = c[:, self.__dim - 1].T
c_new = c[:, 0:self.__dim - 1].T
preds_labeled = self.__surrogate_gamma * \
(1.0 - multiply(self.__YL, self.__KLR @ c_new + b))
preds_unlabeled = self.__KUR @ c_new + b
# This vector has a "one" for each "numerically instable" entry;
# "zeros" for "good ones".
preds_labeled_conflict_indicator = np.sign(
np.sign(preds_labeled / self.__breakpoint_for_exp - 1.0) + 1.0)
# This vector has a one for each good entry and zero otherwise
preds_labeled_good_indicator = (-1) * \
(preds_labeled_conflict_indicator - 1.0)
preds_labeled_for_conflicts = multiply(
preds_labeled_conflict_indicator, preds_labeled)
preds_labeled = multiply(preds_labeled, preds_labeled_good_indicator)
# Compute values for good entries
preds_labeled_log_exp = np.log(1.0 + np.exp(preds_labeled))
# Compute values for instable entries
preds_labeled_log_exp = multiply(
preds_labeled_good_indicator,
preds_labeled_log_exp)
# Replace critical values with values
preds_labeled_final = preds_labeled_log_exp + preds_labeled_for_conflicts
term1 = (1.0 / (self.__surrogate_gamma * self.__size_l)) * \
np.sum(preds_labeled_final)
preds_unlabeled_squared = multiply(preds_unlabeled, preds_unlabeled)
term2 = (float(self.__lamU) / float(self.__size_u)) * \
np.sum(np.exp(-self.__s * preds_unlabeled_squared))
term3 = self.__lam * (c_new.T @ self.__KRR @ c_new)
return (term1 + term2 + term3)[0, 0]
def __getFitness_Prime(self, c):
# Check whether the function is called from the bfgs solver
# (that does not optimize the offset b) or not
if len(c) == self.__dim - 1:
c = np.append(c, self.__b)
c = array(c)[newaxis]
b = c[:, self.__dim - 1].T
c_new = c[:, 0:self.__dim - 1].T
preds_labeled = self.__surrogate_gamma * \
(1.0 - multiply(self.__YL, self.__KLR @ c_new + b))
preds_unlabeled = (self.__KUR @ c_new + b)
# This vector has a "one" for each "numerically instable" entry;
# "zeros" for "good ones".
preds_labeled_conflict_indicator = np.sign(
np.sign(preds_labeled / self.__breakpoint_for_exp - 1.0) + 1.0)
# This vector has a one for each good entry and zero otherwise
preds_labeled_good_indicator = (-1) * \
(preds_labeled_conflict_indicator - 1.0)
preds_labeled =
|
multiply(preds_labeled, preds_labeled_good_indicator)
|
numpy.multiply
|
import math
import cv2
import numpy as np
#-----------------------------#
# 计算原始输入图像
# 每一次缩放的比例
#-----------------------------#
def calculateScales(img):
pr_scale = 1.0
h,w,_ = img.shape
#--------------------------------------------#
# 将最大的图像大小进行一个固定
# 如果图像的短边大于500,则将短边固定为500
# 如果图像的长边小于500,则将长边固定为500
#--------------------------------------------#
if min(w,h)>500:
pr_scale = 500.0/min(h,w)
w = int(w*pr_scale)
h = int(h*pr_scale)
elif max(w,h)<500:
pr_scale = 500.0/max(h,w)
w = int(w*pr_scale)
h = int(h*pr_scale)
#------------------------------------------------#
# 建立图像金字塔的scales,防止图像的宽高小于12
#------------------------------------------------#
scales = []
factor = 0.709
factor_count = 0
minl = min(h,w)
while minl >= 12:
scales.append(pr_scale*pow(factor, factor_count))
minl *= factor
factor_count += 1
return scales
#-----------------------------#
# 将长方形调整为正方形
#-----------------------------#
def rect2square(rectangles):
w = rectangles[:,2] - rectangles[:,0]
h = rectangles[:,3] - rectangles[:,1]
l =
|
np.maximum(w,h)
|
numpy.maximum
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
------------------------------------------------------------
% File: template_2_printable.py
% Description:
% Author: J.G.Aguado
% Email: <EMAIL>
% Date of creation: 8/19/2021
------------------------------------------------------------
"""
import datetime
import numpy as np
import math
import cairo
import yaml
def frame(config, spice):
surface = cairo.ImageSurface(cairo.FORMAT_RGB24,
config['general']['size'][0],
config['general']['size'][1])
pad = 2
grad = config['general']['color grad']
r, g, b = spice['color']
r0, g0, b0 = r + grad, g + grad, b + grad
r1, g1, b1 = r - grad, g - grad, b - grad
ctx = cairo.Context(surface)
ctx.scale(1, 1)
ctx.set_source_rgb(0, 0, 0)
ctx.rectangle(0, 0, config['general']['size'][0], config['general']['size'][1])
ctx.fill()
ctx.rectangle(pad, pad, config['general']['size'][0]-2*pad, config['general']['size'][1]-2*pad)
pattern = cairo.LinearGradient(0, 0, 0, config['general']['size'][1])
pattern.add_color_stop_rgb(0, r0 / 255, g0 / 255, b0 / 255)
pattern.add_color_stop_rgb(1, r1 / 255, g1 / 255, b1 / 255)
ctx.set_source(pattern)
ctx.fill()
roundrect(ctx, config['general']['pad'], config['general']['pad'],
config['general']['size'][0] - 2 * config['general']['pad'],
config['general']['size'][1] - 2 * config['general']['pad'],
config['general']['radius'])
return ctx, surface
def roundrect(ctx, x, y, width, height, rad=10, thickness=2, color=[255,255,255]):
r,g,b = [x/255 for x in color]
ctx.arc(x + rad, y + rad, rad,
math.pi, 3 * math.pi / 2)
ctx.arc(x + width - rad, y + rad, rad,
3 * math.pi / 2, 0)
ctx.arc(x + width - rad, y + height - rad,
rad, 0, math.pi / 2)
ctx.arc(x + rad, y + height - rad, rad,
math.pi / 2, math.pi)
ctx.close_path()
ctx.set_source_rgb(r, g, b);
ctx.fill_preserve();
ctx.set_source_rgba(r, g, b);
ctx.set_line_width(thickness);
ctx.stroke();
def draw_image(ctx, image, xc, yc, width, height, angle=None):
ctx.save()
image_surface = cairo.ImageSurface.create_from_png(image)
w = image_surface.get_width()
h = image_surface.get_height()
scale_xy = min(width/w, height/h)
w = scale_xy * w
h = scale_xy * h
""" Best rotation """
width_rot = []
height_rot = []
comb = []
for theta in np.arange(0,91):
theta = theta*math.pi/180
width = w * abs(np.cos(theta)) + h * abs(np.sin(theta))
height = w * abs(np.sin(theta)) + h * abs(np.cos(theta))
width_rot.append(width)
height_rot.append(height)
comb.append(abs(1-abs(width/ height)))
comb = np.array(comb)
if angle is None:
angle = np.argmin(comb)
print(angle)
""" Computations """
angle = angle * math.pi / 180
alpha = math.atan((h/2)/ (w/2))
r = math.hypot(w/2, h/2)
x, y = xc - r*np.cos(alpha), yc - r*np.sin(alpha)
beta = alpha + angle
xc1 = x + r * np.cos(beta)
yc1 = y + r * np.sin(beta)
x1, y1 = x + xc -xc1, y + yc - yc1
# ****** Aux plotting lines ******
# ctx.move_to(0, 0)
# ctx.line_to(xc, yc)
# ctx.set_source_rgb(1, 0, 0)
# ctx.stroke()
#
# ctx.move_to(0, 0)
# ctx.line_to(x, y)
# ctx.set_source_rgb(0, 1, 0)
# ctx.stroke()
#
# ctx.move_to(x, y)
# ctx.line_to(xc1, yc1)
# ctx.set_source_rgb(0, 0, 1)
# ctx.stroke()
#
# ctx.move_to(xc1, yc1)
# ctx.line_to(x1, y1)
# ctx.set_source_rgb(1, 0, 1)
# ctx.stroke()
""" Translate """
ctx.translate(x1,y1)
""" Rotate """
ctx.rotate(angle)
""" Scale & write """
ctx.scale(scale_xy, scale_xy)
ctx.set_source_surface(image_surface, 0, 0)
ctx.paint()
ctx.restore()
return
def names(ctx, config, spice):
ctx.set_source_rgb(0, 0, 0)
ctx.set_font_size(config['text']['size'])
ctx.select_font_face(config['text']['type'],
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
xc, yc, w, h = config['text']['x'], config['text']['y'],\
config['text']['width'], config['text']['height']
for string, ii in zip([spice['es'], spice['en'], spice['de']], [-1, 0, 1]):
text_props = ctx.text_extents(string)
ts = w/text_props.width
if ts < 1:
size = config['text']['size'] - ((1-ts)*config['text']['scale'])
elif ts > 1:
size = config['text']['size'] + 2
ctx.set_font_size(size)
text_props = ctx.text_extents(string)
nx = -text_props.width/2
ny = text_props.height/2
x = xc + nx
y = yc + ii* h/3 + ny
ctx.move_to(x, y)
ctx.show_text(string)
def main(path, simulation=True):
with open(path, 'rt', encoding='utf8') as yaml_file:
config = yaml.load(yaml_file)
""" Individual labels generator """
labels = []
for spice_type in config['spices']:
for spice in config['spices'][spice_type]['items']:
ctx, surface = frame(config, config['spices'][spice_type])
draw_image(ctx, spice['img'],
config['image']['x'],
config['image']['y'],
config['image']['width'],
config['image']['height'])
names(ctx, config, spice)
output = '.\labels\\' + spice['es'] + '_label.png'
surface.write_to_png(output)
labels.append(output)
""" Gropu by 24 labels """
pad = 25
w, h = 2480, 3508
width, height = w-2*pad, h-2*pad
a4 = cairo.ImageSurface(cairo.FORMAT_RGB24,w, h)
ctx = cairo.Context(a4)
ctx.scale(1, 1)
ctx.set_source_rgb(1, 1, 1)
ctx.rectangle(0, 0, w, h)
ctx.fill()
idx = 0
for column in np.arange(pad, width, width/3):
for row in
|
np.arange(pad, height, height/8)
|
numpy.arange
|
# -*- coding: utf-8 -*-
# Wrapper for easy use of SciKit learn models
from libs.base import *
import numpy as np
import pickle
## OK, this could just work, but its not tested
## And, we should use some kind of *args to pass multiple arguments...
class SKLModel:
def __init__(self, value = {}, skClass="linear_model", skMethod="SGDRegressor"):
if (value == {}):
skClass = "linear_model"
skM = self._skimport("sklearn."+skClass)
self.Model = getattr(skM, "SGDRegressor")()
else:
self.Model = pickle.loads(value["picklestr"])
def get_dict(self):
""" Return a dictionary with key:"picklestring" and \
value:pickle.dumps(model)
Used to store the model as a pickle object in database.
"""
s = pickle.dumps(self.Model)
return {"picklestr":s}
def update(self, X=np.array([[]]), y=np.array([])):
""" Update the linear model.
:param np.array X: Multi dimensional array (matrix) with n rows of p features.
:param np.array y: Array with n targets
n can be 1 for incremental updates (default)
"""
self.Model.partial_fit(X,y)
def predict(self, X=
|
np.array([[]])
|
numpy.array
|
# -*- encoding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import logging
import pandas as pd
import numpy as np
import numpy.testing as nptest
import pytest
from pydse.arma import ARMA, ARMAError
from pydse import arma
from pydse import data
__author__ = "<NAME>"
__copyright__ = "Blue Yonder"
__license__ = "new BSD"
logging.basicConfig(level=logging.WARN)
def test_arma_construction():
AR = ([1, .5, .3, 0, .2, .1, 0, .2, .05, 1, .5, .3], [3, 2, 2])
MA = ([1, .2, 0, .1, 0, 0, 1, .3], [2, 2, 2])
X = ([1, 2, 3, 4, 5, 6], [1, 2, 3])
# Check construction
ARMA(A=AR, B=MA)
ARMA(A=AR)
ARMA(A=AR, B=MA, C=X)
MA = ([1, 0.2, 0, .1], [2, 2, 1])
X = ([1, 2, 3, 4, 5, 6], [2, 1, 3])
with pytest.raises(ARMAError):
ARMA(A=AR, B=MA)
ARMA(A=AR, C=X)
AR = ([1, .5, .3, 0, .2, .1, 0, .2, .05, 1, .5, .3], [3, 2, 2])
MA = ([1, 2, 0, .1, 0, 0, 1, .3], [2, 2, 2])
TREND = [1, 2]
ARMA(A=AR, B=MA, TREND=TREND)
TREND = [[1, 2], [3, 4]]
ARMA(A=AR, B=MA, TREND=TREND)
TREND = [[1, 2], [3, 4], [4, 5]]
ARMA(A=AR, B=MA, TREND=TREND)
TREND = [1, 2, 3]
# give a (3,) array while expecting a (2,) array as p = 2
with pytest.raises(ARMAError):
ARMA(A=AR, B=MA, TREND=TREND)
TREND = [[1, 2, 3], [1, 2, 3], [1, 2, 3]]
# give a (3, 3) array while expect a (X, 2) array as p = 2
with pytest.raises(ARMAError):
ARMA(A=AR, B=MA, TREND=TREND)
TREND = [[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[1, 2, 3], [1, 2, 3], [1, 2, 3]]]
# give a (3, 3, 3) array while expect a 2-d matrix
with pytest.raises(ARMAError):
ARMA(A=AR, B=MA, TREND=TREND)
def test_simulate_arma():
AR = (np.array([1, .5, .3, 0, .2, .1, 0, .2, .05, 1, .5, .3]),
|
np.array([3, 2, 2])
|
numpy.array
|
import argparse
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import GroupShuffleSplit, StratifiedShuffleSplit
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from data import load_wiki_data, load_legal_data, load_glove
from model import Model
def shuffleSplit(ss, X, y, groups):
#encompass both group and stratified shuffle splitting
if groups is not None:
return ss.split(X, y, groups)
else:
return ss.split(X, y)
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataset", help="Use 'legal' (default) or 'wiki' dataset", choices=['legal', 'wiki'], default='legal')
parser.add_argument("--baseline", help="Use baseline classifier", action="store_true")
parser.add_argument("-s", "--splits", help="Number of cross-validations", type=int, default=1)
parser.add_argument("-t", "--test_size", help="Proportion of test split", type=float, default=0.2)
parser.add_argument("-g", "--glove_size", help="GloVe embedding size", type=int, choices=[50, 100, 200, 300], default=50)
parser.add_argument("-n", "--hidden_size", help="RNN hidden weights size", type=int, default=8)
parser.add_argument("-l", "--lr", help="RNN learning rate", type=float, default=0.01)
parser.add_argument("-r", "--dropout", help="RNN dropout rate", type=float, default=0.1)
parser.add_argument("-c", "--cell_type", help="RNN cell type; 'g' for GRU (default) or 'l' for LSTM", choices=['g', 'l'], default='g')
parser.add_argument("-b", "--batch_size", help="Training batch size", type=int, default=512)
parser.add_argument("-e", "--epochs", help="Training epochs", type=int, default=2)
parser.add_argument("-o", "--random_seed", help="Global random seed (cross-validation split, batch shuffling and Tensorflow initializations and operations)", type=int, default=None)
parser.add_argument("-v", "--visualize", help="Visualize output as HTML; use 's' for 0.1%% samples or 'e' for all errors", choices=[None, 's', 'e'], default=None)
args = parser.parse_args()
if args.dataset == 'wiki':
#train_set, dev_set, test_set = load_wiki_data()
X, y, groups = load_wiki_data(mode='cv')
ss = GroupShuffleSplit(n_splits=args.splits, test_size=args.test_size, random_state=args.random_seed)
elif args.dataset == 'legal':
#train_set, dev_set, test_set = load_legal_data()
X, y = load_legal_data(mode='cv')
groups = None
ss = StratifiedShuffleSplit(n_splits=args.splits, test_size=args.test_size, random_state=args.random_seed)
if args.random_seed is not None:
|
np.random.seed(args.random_seed)
|
numpy.random.seed
|
import os, sys
sys.path.insert(0, './')
import random
import numpy as np
from skimage.util import random_noise
from PIL import Image, ImageFilter
import cv2
from data.dataloader import Dataset
from data.dataloader import DataLoader
import copy
from tools import crash_on_ipy
'''
this is superresolution data loader with choice lr_img from triple(1M, 2M, 4M)
and the hr_img is screen records with usm preocession
'''
class TrainDataset(Dataset):
def __init__(self, tmp1, tmp2, scale=2, crop_size=128, cfg=None):
super(TrainDataset, self).__init__()
self.crop_size = crop_size
self.scale = scale
self.cfg = cfg
hr_path_file = '/workspace/nas_mengdongwei/dataset/lol/my_lol_frames_usm_ps_4k/train_crop_paths.txt'
lr_1M_path_file = '/workspace/nas_mengdongwei/dataset/lol/my_lol_frames_1080_1M/crop_1M_frames_paths.txt'
lr_2M_path_file = '/workspace/nas_mengdongwei/dataset/lol/my_lol_frames_1080_2M/crop_2M_frames_paths.txt'
lr_4M_path_file = '/workspace/nas_mengdongwei/dataset/lol/my_lol_frames_1080_4M/crop_4M_frames_paths.txt'
self.hr_paths = [x.strip() for x in open(hr_path_file, 'r').readlines()]
self.lr_1M_paths = [x.strip() for x in open(lr_1M_path_file, 'r').readlines()]
self.lr_2M_paths = [x.strip() for x in open(lr_2M_path_file, 'r').readlines()]
self.lr_4M_paths = [x.strip() for x in open(lr_4M_path_file, 'r').readlines()]
assert(len(self.hr_paths) == len(self.lr_1M_paths))
assert(len(self.hr_paths) == len(self.lr_2M_paths))
assert(len(self.hr_paths) == len(self.lr_4M_paths))
self.hr_paths.sort()
self.lr_1M_paths.sort()
self.lr_2M_paths.sort()
self.lr_4M_paths.sort()
self.num_sample = len(self.hr_paths)
def _read_pair_img(self, index):
if random.random() > 0.7:
lr_img = Image.open(self.lr_4M_paths[index])
elif random.random() > 0.1:
lr_img = Image.open(self.lr_2M_paths[index])
else:
lr_img = Image.open(self.lr_1M_paths[index])
if index <= 43308:
lr_img = lr_img.resize((240, 240))
hr_img = Image.open(self.hr_paths[index])
return hr_img, lr_img
def _lr_add_noise(self, lr):
if random.random() > 0.7:
lr_mode = lr.mode
lr_np = np.array(lr)
if self.cfg.dataset.add_gaussian_noise:
if random.random() > 0.5:
lr_np = random_noise(lr_np, mode='gaussian', clip=True)
else:
lr_np = random_noise(lr_np, mode='poisson', clip=True)
if self.cfg.dataset.add_sp_noise:
lr_np = random_noise(lr_np, mode='s&p', clip=True)
lr_np = lr_np * 255
lr_np = lr_np.astype(np.uint8)
lr = Image.fromarray(lr_np)
if random.random() > 1:
lr_np = np.array(lr)
lr_np = jpeg_compress(lr_np)
lr_np = lr_np.astype(np.uint8)
lr = Image.fromarray(lr_np)
return lr
def __getitem__(self, index):
while(True):
try:
index = random.randint(43309, self.num_sample)
hr, lr = self._read_pair_img(index)
#lr = self._lr_add_noise(lr)
lr = lr.convert('YCbCr').split()[0]
lr = np.expand_dims(np.array(lr), -1)
hr = hr.convert('YCbCr').split()[0]
hr = np.expand_dims(np.array(hr), -1)
hr, lr = random_crop(hr, lr, size=self.crop_size, scale=self.scale)
hr, lr = random_flip_and_rotate(hr, lr)
cubic = cv2.resize(lr, (hr.shape[1], hr.shape[0]), interpolation=cv2.INTER_LINEAR)
cubic =
|
np.expand_dims(cubic, -1)
|
numpy.expand_dims
|
import numpy as np
import cv2
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras import backend as K
@tf.custom_gradient
def guidedRelu(x):
def grad(dy):
return tf.cast(dy>0,"float32") * tf.cast(x>0, "float32") * dy
return tf.nn.relu(x), grad
# Reference: https://github.com/eclique/keras-gradcam with adaption to tensorflow 2.0
class GuidedBackprop:
def __init__(self,model, layerName=None):
self.model = model
self.layerName = layerName
if self.layerName == None:
self.layerName = self.find_target_layer()
self.gbModel = self.build_guided_model()
def find_target_layer(self):
for layer in reversed(self.model.layers):
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find 4D layer. Cannot apply Guided Backpropagation")
def build_guided_model(self):
gbModel = Model(
inputs = [self.model.inputs],
outputs = [self.model.get_layer(self.layerName).output]
)
layer_dict = [layer for layer in gbModel.layers[1:] if hasattr(layer,"activation")]
for layer in layer_dict:
if layer.activation == tf.keras.activations.relu:
layer.activation = guidedRelu
return gbModel
def guided_backprop(self, images, upsample_size):
"""Guided Backpropagation method for visualizing input saliency."""
with tf.GradientTape() as tape:
inputs = tf.cast(images, tf.float32)
tape.watch(inputs)
outputs = self.gbModel(inputs)
grads = tape.gradient(outputs, inputs)[0]
saliency = cv2.resize(np.asarray(grads), upsample_size)
return saliency
def deprocess_image(x):
"""Same normalization as in:
https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py
"""
# normalize tensor: center on 0., ensure std is 0.25
x = x.copy()
x -= x.mean()
x /= (x.std() + K.epsilon())
x *= 0.25
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if K.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x =
|
np.clip(x, 0, 255)
|
numpy.clip
|
import pickle
import numpy as np
import matplotlib.pyplot as plt
def plot_graphs(x, y, title, xlabel, ylabel, legend_labels, savefile):
plt.figure()
n = len(x)
for i in range(n):
plt.plot(x[i], y[i], label=legend_labels[i])
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.savefig(savefile + ".png")
def mnist_tuning(num_users, direction, lrs, model="mlp"):
dataset = "mnist"
epochs = 100
frac = "1.0"
iid = 1
local_bs = 10
topk = 0.001
topk_d = 0.001
all_experiments = []
for lr in lrs:
experiments = []
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sgd", lr, direction, topk, topk_d, 1)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
all_experiments.append(np.mean(np.array(experiments)[:, 3], axis=0))
filename = '{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}]' \
.format(dataset, model, epochs, num_users, frac, iid, local_bs, "sgd", direction, topk, topk_d, 1)
savefile = '../save/plots/tuning/{}/'.format(dataset) + filename
plot_graphs([list(range(epochs))] * len(lrs), all_experiments, "Tuning Learning Rate", "Epoch", "Accuracy", lrs, savefile)
def mnist_sparse_tuning(num_users, direction, lrs, model="mlp"):
dataset = "mnist"
epochs = 100
frac = "1.0"
iid = 1
local_bs = 10
topk = 0.001
topk_d = 0.001
all_experiments = []
for lr in lrs:
filename = '{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, epochs, num_users, frac, iid, local_bs, "sparsetopk", lr, direction, topk, topk_d, 1)
experiments = []
filepath = '../save/{}-{}/'.format(dataset, model) + filename
with open(filepath, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
all_experiments.append(np.mean(np.array(experiments)[:, 3], axis=0))
filename = '{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}]' \
.format(dataset, model, epochs, num_users, frac, iid, local_bs, "sparsetopk", direction, topk, topk_d, 1)
savefile = '../save/plots/tuning/{}/'.format(dataset) + filename
plot_graphs([list(range(epochs))] * len(lrs), all_experiments, "", "Epoch", "Accuracy", lrs, savefile)
def mnist_comparison(num_users, lrs, model="mlp"):
dataset = "mnist"
epochs = 100
frac = "1.0"
iid = 1
local_bs = 10
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[0], 0, topk, topk_d, 1)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
all_experiments.append(np.mean(np.array(experiments)[:, 3], axis=0))
experiments = []
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", lrs[1], 1, topk, topk_d, 1)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
all_experiments.append(np.mean(np.array(experiments)[:, 3], axis=0))
experiments = []
file_name = '../save/{}-{}/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, dataset, model, epochs, num_users, frac, iid,
local_bs, "sgd", lrs[2], 1, topk, topk_d, 1)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
all_experiments.append(np.mean(np.array(experiments)[:, 3], axis=0))
filename = model + "_"+ str(num_users) + "_comparison"
savefile = '../save/plots/tuning/{}/'.format(dataset) + filename
plot_graphs([list(range(epochs))] * len(lrs), all_experiments, "Number of Workers: " + str(num_users), "Epoch", "Accuracy", ["unidirectional (lr = {})".format(lrs[0]), "bidirectional (lr = {})".format(lrs[1]), "sgd (lr = {})".format(lrs[2])], savefile)
def plot_xi_values():
dataset = "mnist"
model = "mlp"
epochs = 100
num_users = 20
frac = "1.0"
iid = 1
local_bs = 10
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for number in [1]:
file_name = '../save/mnist-final-v1/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", 0.08, 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, 4], axis=0)
print(xi_values)
all_experiments.append(xi_values)
print(np.max(xi_values))
print(np.mean(xi_values))
# print(np.mean(experiments))
experiments = []
for number in [1]:
file_name = '../save/mnist-final-v1/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", 0.05, 0, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, 4], axis=0)
all_experiments.append(xi_values)
print(np.max(xi_values))
print(np.mean(xi_values))
batches = len(all_experiments[0])
plot_graphs([list(range(batches))] * 2, all_experiments, "", "Batch", r"$\xi$", ["bidirectional", "unidirectional"], "xi_comparison_20")
def plot_xi_values_lhs():
dataset = "mnist"
model = "mlp"
epochs = 30
num_users = 50
frac = "1.0"
iid = 1
local_bs = 10
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for number in [1]:
file_name = '../save/mnist-final-v1/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", 0.09, 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, 9], axis=0)
print(xi_values)
all_experiments.append(xi_values)
print(np.max(xi_values))
print(np.mean(xi_values))
print(np.min(xi_values))
# print(np.mean(experiments))
experiments = []
for number in [1]:
file_name = '../save/mnist-final-v1/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", 0.08, 0, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, 9], axis=0)
all_experiments.append(xi_values)
print(np.max(xi_values))
print(np.mean(xi_values))
print(np.min(xi_values))
batches = len(all_experiments[0])
plot_graphs([list(range(batches))] * 2, all_experiments, "", "Batch", "Magnitude", ["bidirectional", "unidirectional"], "xi_comparison_20")
def plot_xi_values_rhs():
dataset = "mnist"
model = "mlp"
epochs = 30
num_users = 10
frac = "1.0"
iid = 1
local_bs = 10
topk = 0.001
topk_d = 0.001
all_experiments = []
experiments = []
for number in [1]:
file_name = '../save/mnist-final-v1/{}_{}_EPOCH[{}]_USERS[{}]_C[{}]_iid[{}]_B[{}]_OPT[{}]_LR[{}]_DIR[{}]_TOPK[{}]_TOPKD[{}]_NUM[{}].pkl' \
.format(dataset, model, epochs, num_users, frac, iid,
local_bs, "sparsetopk", 0.05, 1, topk, topk_d, number)
with open(file_name, 'rb') as pickle_file:
experiments.append(pickle.load(pickle_file))
xi_values = np.mean(np.array(experiments)[:, 10], axis=0)
print(xi_values)
all_experiments.append(xi_values)
print(np.max(xi_values))
print(np.mean(xi_values))
print(
|
np.min(xi_values)
|
numpy.min
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""evaluate example"""
import sys
import os
import time
import numpy as np
from scipy.special import softmax
from lenet5_net import LeNet5
from mindspore import Model
from mindspore import Tensor
from mindspore import context
from mindspore import nn
from mindspore.nn import Cell
from mindspore.ops.operations import TensorAdd
from mindspore.nn import SoftmaxCrossEntropyWithLogits
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindarmour.attacks import FastGradientSignMethod
from mindarmour.attacks import GeneticAttack
from mindarmour.attacks.black.black_model import BlackModel
from mindarmour.defenses import NaturalAdversarialDefense
from mindarmour.evaluations import BlackDefenseEvaluate
from mindarmour.evaluations import DefenseEvaluate
from mindarmour.utils.logger import LogUtil
from mindarmour.detectors.black.similarity_detector import SimilarityDetector
sys.path.append("..")
from data_processing import generate_mnist_dataset
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
LOGGER = LogUtil.get_instance()
TAG = 'Defense_Evaluate_Example'
def get_detector(train_images):
encoder = Model(EncoderNet(encode_dim=256))
detector = SimilarityDetector(max_k_neighbor=50, trans_model=encoder)
detector.fit(inputs=train_images)
return detector
class EncoderNet(Cell):
"""
Similarity encoder for input data
"""
def __init__(self, encode_dim):
super(EncoderNet, self).__init__()
self._encode_dim = encode_dim
self.add = TensorAdd()
def construct(self, inputs):
"""
construct the neural network
Args:
inputs (Tensor): input data to neural network.
Returns:
Tensor, output of neural network.
"""
return self.add(inputs, inputs)
def get_encode_dim(self):
"""
Get the dimension of encoded inputs
Returns:
int, dimension of encoded inputs.
"""
return self._encode_dim
class ModelToBeAttacked(BlackModel):
"""
model to be attack
"""
def __init__(self, network, defense=False, train_images=None):
super(ModelToBeAttacked, self).__init__()
self._network = network
self._queries = []
self._defense = defense
self._detector = None
self._detected_res = []
if self._defense:
self._detector = get_detector(train_images)
def predict(self, inputs):
"""
predict function
"""
query_num = inputs.shape[0]
results = []
if self._detector:
for i in range(query_num):
query = np.expand_dims(inputs[i].astype(np.float32), axis=0)
result = self._network(Tensor(query)).asnumpy()
det_num = len(self._detector.get_detected_queries())
self._detector.detect([query])
new_det_num = len(self._detector.get_detected_queries())
# If attack query detected, return random predict result
if new_det_num > det_num:
results.append(result + np.random.rand(*result.shape))
self._detected_res.append(True)
else:
results.append(result)
self._detected_res.append(False)
results = np.concatenate(results)
else:
results = self._network(Tensor(inputs.astype(np.float32))).asnumpy()
return results
def get_detected_result(self):
return self._detected_res
def test_black_defense():
# load trained network
current_dir = os.path.dirname(os.path.abspath(__file__))
ckpt_name = os.path.abspath(os.path.join(
current_dir, './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'))
# ckpt_name = './trained_ckpt_file/checkpoint_lenet-10_1875.ckpt'
wb_net = LeNet5()
load_dict = load_checkpoint(ckpt_name)
load_param_into_net(wb_net, load_dict)
# get test data
data_list = "./MNIST_unzip/test"
batch_size = 32
ds_test = generate_mnist_dataset(data_list, batch_size=batch_size,
sparse=False)
inputs = []
labels = []
for data in ds_test.create_tuple_iterator():
inputs.append(data[0].astype(np.float32))
labels.append(data[1])
inputs = np.concatenate(inputs).astype(np.float32)
labels = np.concatenate(labels).astype(np.float32)
labels_sparse = np.argmax(labels, axis=1)
target_label = np.random.randint(0, 10, size=labels_sparse.shape[0])
for idx in range(labels_sparse.shape[0]):
while target_label[idx] == labels_sparse[idx]:
target_label[idx] = np.random.randint(0, 10)
target_label = np.eye(10)[target_label].astype(np.float32)
attacked_size = 50
benign_size = 500
attacked_sample = inputs[:attacked_size]
attacked_true_label = labels[:attacked_size]
benign_sample = inputs[attacked_size:attacked_size + benign_size]
wb_model = ModelToBeAttacked(wb_net)
# gen white-box adversarial examples of test data
wb_attack = FastGradientSignMethod(wb_net, eps=0.3)
wb_adv_sample = wb_attack.generate(attacked_sample,
attacked_true_label)
wb_raw_preds = softmax(wb_model.predict(wb_adv_sample), axis=1)
accuracy_test = np.mean(
np.equal(np.argmax(wb_model.predict(attacked_sample), axis=1),
np.argmax(attacked_true_label, axis=1)))
LOGGER.info(TAG, "prediction accuracy before white-box attack is : %s",
accuracy_test)
accuracy_adv = np.mean(np.equal(np.argmax(wb_raw_preds, axis=1),
np.argmax(attacked_true_label, axis=1)))
LOGGER.info(TAG, "prediction accuracy after white-box attack is : %s",
accuracy_adv)
# improve the robustness of model with white-box adversarial examples
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=False)
opt = nn.Momentum(wb_net.trainable_params(), 0.01, 0.09)
nad = NaturalAdversarialDefense(wb_net, loss_fn=loss, optimizer=opt,
bounds=(0.0, 1.0), eps=0.3)
wb_net.set_train(False)
nad.batch_defense(inputs[:5000], labels[:5000], batch_size=32, epochs=10)
wb_def_preds = wb_net(Tensor(wb_adv_sample)).asnumpy()
wb_def_preds = softmax(wb_def_preds, axis=1)
accuracy_def = np.mean(np.equal(np.argmax(wb_def_preds, axis=1),
np.argmax(attacked_true_label, axis=1)))
LOGGER.info(TAG, "prediction accuracy after defense is : %s", accuracy_def)
# calculate defense evaluation metrics for defense against white-box attack
wb_def_evaluate = DefenseEvaluate(wb_raw_preds, wb_def_preds,
np.argmax(attacked_true_label, axis=1))
LOGGER.info(TAG, 'defense evaluation for white-box adversarial attack')
LOGGER.info(TAG,
'classification accuracy variance (CAV) is : {:.2f}'.format(
wb_def_evaluate.cav()))
LOGGER.info(TAG, 'classification rectify ratio (CRR) is : {:.2f}'.format(
wb_def_evaluate.crr()))
LOGGER.info(TAG, 'classification sacrifice ratio (CSR) is : {:.2f}'.format(
wb_def_evaluate.csr()))
LOGGER.info(TAG,
'classification confidence variance (CCV) is : {:.2f}'.format(
wb_def_evaluate.ccv()))
LOGGER.info(TAG, 'classification output stability is : {:.2f}'.format(
wb_def_evaluate.cos()))
# calculate defense evaluation metrics for defense against black-box attack
LOGGER.info(TAG, 'defense evaluation for black-box adversarial attack')
bb_raw_preds = []
bb_def_preds = []
raw_query_counts = []
raw_query_time = []
def_query_counts = []
def_query_time = []
def_detection_counts = []
# gen black-box adversarial examples of test data
bb_net = LeNet5()
load_param_into_net(bb_net, load_dict)
bb_model = ModelToBeAttacked(bb_net, defense=False)
attack_rm = GeneticAttack(model=bb_model, pop_size=6, mutation_rate=0.05,
per_bounds=0.1, step_size=0.25, temp=0.1,
sparse=False)
attack_target_label = target_label[:attacked_size]
true_label = labels_sparse[:attacked_size + benign_size]
# evaluate robustness of original model
# gen black-box adversarial examples of test data
for idx in range(attacked_size):
raw_st = time.time()
raw_sl, raw_a, raw_qc = attack_rm.generate(
np.expand_dims(attacked_sample[idx], axis=0),
np.expand_dims(attack_target_label[idx], axis=0))
raw_t = time.time() - raw_st
bb_raw_preds.extend(softmax(bb_model.predict(raw_a), axis=1))
raw_query_counts.extend(raw_qc)
raw_query_time.append(raw_t)
for idx in range(benign_size):
raw_st = time.time()
bb_raw_pred = softmax(
bb_model.predict(
|
np.expand_dims(benign_sample[idx], axis=0)
|
numpy.expand_dims
|
import unittest
from numpy import alltrue, arange, array, ravel, transpose, zeros, inf, isinf
from numpy.testing import assert_equal, assert_
from chaco.api import DataRange2D, GridDataSource, PointDataSource
class DataRange2DTestCase(unittest.TestCase):
def test_empty_range(self):
r = DataRange2D()
assert_ary_(r.low, array([-inf, -inf]))
assert_ary_(r.high, array([inf, inf]))
self.assertTrue(r.low_setting == ('auto', 'auto'))
self.assertTrue(r.high_setting == ('auto', 'auto'))
r.low = array([5.0, 5.0])
r.high = array([10.0, 10.0])
assert_ary_(r.low_setting, array([5.0, 5.0]))
assert_ary_(r.high_setting, array([10.0, 10.0]))
assert_ary_(r.low, array([5.0, 5.0]))
assert_ary_(r.high, array([10.0, 10.0]))
return
def test_single_source(self):
r = DataRange2D()
x = arange(10.)
y = arange(0., 100., 10.)
ds = PointDataSource(transpose(array([x, y])), sort_order="none")
r.add(ds)
assert_ary_(r.low, array([0., 0.]))
assert_ary_(r.high, array([9.0, 90.0]))
r.low = [3.0, 30.0]
r.high = [6.0, 60.0]
assert_ary_(r.low_setting, array([3.0, 30.0]))
assert_ary_(r.high_setting, array([6.0, 60.0]))
assert_ary_(r.low, array([3.0, 30.0]))
assert_ary_(r.high, array([6.0, 60.0]))
r.refresh()
assert_ary_(r.low_setting, array([3.0, 30.0]))
assert_ary_(r.high_setting, array([6.0, 60.0]))
assert_ary_(r.low, array([3.0, 30.0]))
assert_ary_(r.high, array([6.0, 60.0]))
r.low = ('auto', 'auto')
self.assertTrue(r.low_setting == ('auto', 'auto'))
assert_ary_(r.low, array([0.0, 0.0]))
return
def test_constant_values(self):
r = DataRange2D()
ds = PointDataSource(array([[5.0, 5.0]]), sort_order="none")
r.add(ds)
# A constant value > 1.0, by default, gets a range that brackets
# it to the nearest power of ten above and below
assert_ary_(r.low, array([1.0, 1.0]))
assert_ary_(r.high, array([10.0, 10.0]))
r.remove(ds)
ds = PointDataSource(array([[31.4, 9.7]]))
r.add(ds)
assert_ary_(r.low, array([10.0, 1.0]))
assert_ary_(r.high, array([100.0, 10.0]))
r.remove(ds)
ds = PointDataSource(array([[0.125, 0.125]]))
r.add(ds)
assert_ary_(r.low, array([0.0, 0.0]))
assert_ary_(r.high, array([0.25, 0.25]))
r.remove(ds)
ds = PointDataSource(array([[-0.125, -0.125]]))
r.add(ds)
assert_ary_(r.low, array([-0.25, -0.25]))
assert_ary_(r.high, array([0.0, 0.0]))
return
def test_multi_source(self):
x = arange(10.)
y = arange(0., 100., 10.)
foo = transpose(array([x, y]))
bar = transpose(array([y, x]))
ds1 = PointDataSource(foo)
ds2 = PointDataSource(bar)
r = DataRange2D(ds1, ds2)
assert_ary_(r.low, [0.0, 0.0])
assert_ary_(r.high, [90., 90.])
return
def test_grid_source(self):
test_xd1 = array([1, 2, 3])
test_yd1 = array([1.5, 0.5, -0.5, -1.5])
test_sort_order1 = ('ascending', 'descending')
test_xd2 = array([0, 50, 100])
test_yd2 = array([0.5, 0.75, 1])
ds1 = GridDataSource(
xdata=test_xd1, ydata=test_yd1, sort_order=test_sort_order1)
ds2 = GridDataSource(xdata=test_xd2, ydata=test_yd2)
r = DataRange2D()
r.add(ds1)
assert_ary_(r.low, array([1, -1.5]))
assert_ary_(r.high, array([3, 1.5]))
r.add(ds2)
assert_ary_(r.low,
|
array([0.0, -1.5])
|
numpy.array
|
import numpy as np
from .boxes import RectengularBox
from .velocities import create_velocities
class RandomBeadsBox(RectengularBox):
def __init__(self, n_atoms, sidelength, mode='mc', mc_r_min=1.0):
"""
Parameters
----------
n_atoms
sidelength
mode : str, optional
mode can be ('mc', 'plain')
'mc' uses Monte-Carlo moves to place the atoms.
'plain' just place them randomly
"""
super().__init__(n_atoms,
a=sidelength,
b=sidelength,
c=sidelength)
if mode == 'mc':
self.place_atoms_via_montecarlo(self.positions, self.box, r_min=mc_r_min)
else:
self.place_atoms_random(self.positions, self.box)
def place_atoms_random(self, positions, box):
print("place atoms randomly in the box")
# Handle box
box_vectors =
|
np.linalg.norm(box, axis=1)
|
numpy.linalg.norm
|
#
"""
Python tools for creating the MIRI MRS 1d extraction reference files
(extract1d and apcor).
This is the ASDF variant
Author: <NAME> (<EMAIL>)
REVISION HISTORY:
12-Feb-2020 First written (D. Law)
27-Sep-2020 Add in extract1d file as well (D. Law)
29-Oct-2020 Convert to asdf output (D. Law)
04-Dec-2020 Change schema names (D. Law)
"""
import asdf
from asdf import AsdfFile
from astropy.io import fits
from astropy.time import Time
import datetime
import os as os
import numpy as np
import pdb
import matplotlib as mpl
from matplotlib import pyplot as plt
import miri3d.cubepar.make_cubepar as mc
from jwst import datamodels
from jwst.datamodels import Extract1dIFUModel
from jwst.datamodels import MirMrsApcorrModel
from jwst.datamodels import util
import miricoord.mrs.makesiaf.makesiaf_mrs as mksiaf
#############################
def make_all():
make_x1dpar()
make_apcorrpar()
#############################
def make_x1dpar():
# Set the output data directory
data_dir=os.path.expandvars('$MIRI3D_DATA_DIR')
outdir=os.path.join(data_dir,'x1d/temp/')
# Set the output filename including an MJD stamp
now=Time.now()
now.format='fits'
mjd=int(now.mjd)
filename='miri-extract1d-'+str(mjd)+'.asdf'
outfile=os.path.join(outdir,filename)
plotname = 'miri-extract1d-'+str(mjd)+'.png'
outplot = os.path.join(outdir,plotname)
thisfile=__file__
_,thisfile=os.path.split(thisfile)
# CDP input directory
cdp_dir=os.path.expandvars('$MIRIBOX')
cdp_dir=os.path.join(cdp_dir,'CDP/CDP-7/MRS_APERCORR/')
# Make the reference file dictionary
ff=make_x1d_fromdict(now,cdp_dir,outplot)
# Add history info
ff.add_history_entry('1D Extraction defaults')
ff.add_history_entry('DOCUMENT: TBD')
ff.add_history_entry('SOFTWARE: https://github.com/STScI-MIRI/miri3d/tree/master/miri3d/x1d/make_x1d.py')
ff.add_history_entry('DATA USED: CDP-7')
# Write out the file
ff.write_to(outfile,all_array_storage='inline')
print('Wrote file ',outfile)
# now validate this with the schema. If it does not validate an error is returned
# working on how to return true or something that says "YES IT WORKDED"
af = asdf.open(outfile, custom_schema="http://stsci.edu/schemas/jwst_datamodel/extract1difu.schema")
af.validate()
#############################
# This routine is just kept around for reference; it's what we'd
# do if we were not using the data model to create the file
def make_x1d_fromdict(now,cdp_dir,outplot):
meta={}
meta['telescope']='JWST'
meta['pedigree']='GROUND'
meta['description']='Default MIRI MRS Extract1d parameters'
meta['date']=now.value
meta['reftype']='EXTRACT1D'
meta['exposure']={
'type': 'MIR_MRS'
}
meta['useafter']='2000-01-01T00:00:00'
meta['version']=int(now.mjd)
meta['author']='<NAME>'
meta['origin']='STSCI'
meta['model_type']='Extract1dIFUModel'
meta['history']='1D Extraction defaults'
meta['history']+=' DOCUMENT: TBD'
meta['history']+=' SOFTWARE: https://github.com/STScI-MIRI/miri3d/tree/master/miri3d/x1d/make_x1d.py'
meta['history']+=' DATA USED: CDP-7'
meta['history']+=' Updated 4/26/21 to decrease background annulus size'
meta['instrument']={
'name': 'MIRI'
}
meta['region_type']='target'
meta['subtract_background']=True
meta['method']='subpixel'
meta['subpixels']=10
print('Figuring out wavelength ranges')
wmin1A,_=mc.waveminmax('1A')
_,wmax4C=mc.waveminmax('4C')
print('Building tables')
# Set up placeholder vectors
waves=np.arange(wmin1A,wmax4C,0.01,dtype='float32')
nwave=len(waves)
radius=np.ones(nwave,dtype='float32')
inbkg=np.zeros(nwave,dtype='float32')
outbkg=np.zeros(nwave,dtype='float32')
axratio=np.ones(nwave,dtype='float32')
axangle=np.zeros(nwave,dtype='float32')
# Populate real values
# Read in the CDP files
files=['MIRI_FM_MIRIFUSHORT_1SHORT_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFUSHORT_1MEDIUM_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFUSHORT_1LONG_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFUSHORT_2SHORT_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFUSHORT_2MEDIUM_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFUSHORT_2LONG_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFULONG_3SHORT_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFULONG_3MEDIUM_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFULONG_3LONG_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFULONG_4SHORT_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFULONG_4MEDIUM_APERCORR_07.00.00.fits',
'MIRI_FM_MIRIFULONG_4LONG_APERCORR_07.00.00.fits']
inwave=[]
inap=[]
for file in files:
fullfile = os.path.join(cdp_dir, file)
hdu=fits.open(fullfile)
data=hdu[1].data
inwave.append(data['wavelength'])
inap.append(data['a_aperture'])
# Compile into big vectors
# Simple polynomial fit to the aperture
thefit=np.polyfit(np.array(inwave).ravel(),np.array(inap).ravel(),1)
poly=np.poly1d(thefit)
radius=poly(waves)
# Background annulus
# Note that Ch1 can be much more generous than Ch4; FWHM increases
# by a factor of 5 from Ch1 to Ch4 but FOV only by a factor of 2.
# We also should not apply any sudden steps in the annulus size
# between channels, otherwise that will manifest as a step in the required
# aperture correction between channels, and we're assuming that it can be
# smooth with wavelength so everything interpolates from the same table.
# Therefore, we'll make annuli that shrink linearly (relative to FWHM)
# with wavelength
in1,in2 = np.min(radius)*2.5, np.max(radius)*1.02
out1,out2 = np.min(radius)*3.0, np.max(radius)*1.5
inbkg=np.float32(np.interp(waves,np.array([np.min(waves),np.max(waves)]),
np.array([in1,in2])))
outbkg=np.float32(np.interp(waves,np.array([
|
np.min(waves)
|
numpy.min
|
# -*- coding: utf-8 -*-
import random,os
import config
import subprocess
from random import shuffle
#import numpy as np
# import pandas as pd
import time
import ReadFile
import WriteFileAll
import time,os,sys
import string
import glob
import math
import numpy as np
def q30_m(f,sample):
cov_data=ReadFile.main(f,0,'\t')
# cov_data=random.choices(cov_data, k=1000)
include=0
L=[]
fastq=""
LQ=[]
for k in range (len(cov_data)):
if k%4 == 1:
fastq=cov_data[k][0]
if k%4 == 3:
q=list(cov_data[k][0])
q30_ratio=qscore(q)
LQ.append(q30_ratio)
if k > 100000:
break
q30_mean=np.quantile(
|
np.array(LQ)
|
numpy.array
|
from .myqt import QT
import pyqtgraph as pg
import numpy as np
import pandas as pd
from .base import WidgetBase
import time
from spikeinterface.toolkit.postprocessing.unit_localization import possible_localization_methods
class MyViewBox(pg.ViewBox):
doubleclicked = QT.pyqtSignal(float, float)
def mouseDoubleClickEvent(self, ev):
pos = self.mapToView(ev.pos())
x, y = pos.x(), pos.y()
self.doubleclicked.emit(x, y)
ev.accept()
def raiseContextMenu(self, ev):
#for some reasons enableMenu=False is not taken (bug ????)
pass
class ProbeView(WidgetBase):
_params = [
#~ {'name': 'colormap', 'type': 'list', 'value': 'inferno', 'values': ['inferno', 'summer', 'viridis', 'jet'] },
{'name': 'show_channel_id', 'type': 'bool', 'value': True},
{'name': 'radius', 'type': 'float', 'value': 40.},
{'name': 'change_channel_visibility', 'type': 'bool', 'value': True},
{'name': 'change_unit_visibility', 'type': 'bool', 'value': True},
{'name': 'method_localize_unit', 'type': 'list', 'values': possible_localization_methods},
]
_need_compute = True
def __init__(self, controller=None, parent=None):
WidgetBase.__init__(self, parent=parent, controller=controller)
self.layout = QT.QVBoxLayout()
self.setLayout(self.layout)
self.graphicsview = pg.GraphicsView()
self.layout.addWidget(self.graphicsview)
self.initialize_plot()
def initialize_plot(self):
self.viewBox = MyViewBox()
#~ self.viewBox.doubleclicked.connect(self.open_settings)
self.viewBox.doubleclicked.connect(self.on_pick_unit)
#~ self.viewBox.disableAutoRange()
#~ self.plot = pg.PlotItem(viewBox=self.viewBox)
#~ self.graphicsview.setCentralItem(self.plot)
#~ self.plot.hideButtons()
self.plot = pg.PlotItem(viewBox=self.viewBox)
self.plot.getViewBox().disableAutoRange()
self.graphicsview.setCentralItem(self.plot)
self.plot.getViewBox().setAspectLocked(lock=True, ratio=1)
self.plot.hideButtons()
#~ self.plot.showAxis('left', False)
#~ self.plot.showAxis('bottom', False)
# probe
probe = self.controller.get_probe()
contact_vertices = probe.get_contact_vertices()
planar_contour = probe.probe_planar_contour
self.contact_positions = probe.contact_positions
# small hack to connect to the first point
contact_vertices = [np.concatenate([e, e[:1, :]], axis=0) for e in contact_vertices]
vertices = np.concatenate(contact_vertices)
connect = np.ones(vertices.shape[0], dtype='bool')
pos = 0
for e in contact_vertices[:-1]:
pos += e .shape[0]
connect[pos-1] = False
self.contacts = pg.PlotCurveItem(vertices[:, 0], vertices[:, 1], pen='#7FFF00', fill='#7F7F0C', connect=connect)
self.plot.addItem(self.contacts)
if planar_contour is not None:
self.contour = pg.PlotCurveItem(planar_contour[:, 0], planar_contour[:, 1], pen='#7FFF00')
self.plot.addItem(self.contour)
# ROI
self.channel_labels = []
for i, channel_id in enumerate(self.controller.channel_ids):
#TODO label channels
label = pg.TextItem(f'{channel_id}', color='#FFFFFF', anchor=(0.5, 0.5), border=None)#, fill=pg.mkColor((128,128,128, 180)))
label.setPos(self.contact_positions[i, 0], self.contact_positions[i, 1])
self.plot.addItem(label)
self.channel_labels.append(label)
radius = self.params['radius']
x, y = self.contact_positions.mean(axis=0)
self.roi = pg.CircleROI([x - radius, y - radius], [radius * 2, radius * 2], pen='#7F7F0C') #pen=(4,9),
self.plot.addItem(self.roi)
self.roi.sigRegionChanged.connect(self.on_roi_change)
# units
#~ self.unit_positions
unit_positions = self.controller.unit_positions
brush = [self.controller.qcolors[u] for u in self.controller.unit_ids]
self.scatter = pg.ScatterPlotItem(pos=unit_positions, pxMode=False, size=10, brush=brush)
self.plot.addItem(self.scatter)
# range
xlim0 = np.min(self.contact_positions[:, 0]) - 20
xlim1 = np.max(self.contact_positions[:, 0]) + 20
ylim0 = np.min(self.contact_positions[:, 1]) - 20
ylim1 = np.max(self.contact_positions[:, 1]) + 20
self.plot.setXRange(xlim0, xlim1)
self.plot.setYRange(ylim0, ylim1)
def _refresh(self):
r = self.roi.state['size'][0] / 2
x = self.roi.state['pos'].x() + r
y = self.roi.state['pos'].y() + r
radius = self.params['radius']
self.roi.setSize(radius * 2)
self.roi.setPos(x - radius, y-radius)
if self.params['show_channel_id']:
for label in self.channel_labels:
label.show()
else:
for label in self.channel_labels:
label.hide()
def on_roi_change(self, emit_signals=True):
r = self.roi.state['size'][0] / 2
x = self.roi.state['pos'].x() + r
y = self.roi.state['pos'].y() + r
self.params.blockSignals(True)
self.params['radius'] = r
self.params.blockSignals(False)
if emit_signals:
self.roi.blockSignals(True)
if self.params['change_channel_visibility']:
#~ t0 = time.perf_counter()
dist = np.sqrt(np.sum((self.contact_positions - np.array([[x, y]]))**2, axis=1))
visible_channel_inds, = np.nonzero(dist < r)
order = np.argsort(dist[visible_channel_inds])
visible_channel_inds = visible_channel_inds[order]
self.controller.set_channel_visibility(visible_channel_inds)
self.channel_visibility_changed.emit()
#~ t1 = time.perf_counter()
#~ print(' probe view change_channel_visibility', t1-t0)
if self.params['change_unit_visibility']:
#~ t0 = time.perf_counter()
dist = np.sqrt(np.sum((self.controller.unit_positions - np.array([[x, y]]))**2, axis=1))
for unit_index, unit_id in enumerate(self.controller.unit_ids):
self.controller.unit_visible_dict[unit_id] = (dist[unit_index] < r)
#~ t1 = time.perf_counter()
#~ print(' probe view part1 change_unit_visibility', t1-t0)
self.controller.update_visible_spikes()
self.unit_visibility_changed.emit()
#~ t2 = time.perf_counter()
#~ print(' probe view part2 change_unit_visibility', t2-t0)
self.roi.blockSignals(False)
def on_unit_visibility_changed(self):
# this change the ROI and so change also channel_visibility
visible_mask = list(self.controller.unit_visible_dict.values())
n = np.sum(visible_mask)
if n == 1:
unit_index = np.nonzero(visible_mask)[0][0]
x, y = self.controller.unit_positions[unit_index, :]
radius = self.params['radius']
self.roi.blockSignals(True)
self.roi.setPos(x - radius, y - radius)
self.roi.blockSignals(False)
self.on_roi_change(emit_signals=False)
def on_channel_visibility_changed(self):
pass
def on_pick_unit(self, x, y):
unit_positions = self.controller.unit_positions
pos =
|
np.array([x, y])
|
numpy.array
|
# Copyright (C) 2013 <NAME>
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from phonopy.structure.cells import get_reduced_bases
search_space = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, -1],
[0, 1, 0],
[0, 1, 1],
[1, -1, -1],
[1, -1, 0],
[1, -1, 1],
[1, 0, -1],
[1, 0, 0],
[1, 0, 1],
[1, 1, -1],
[1, 1, 0],
[1, 1, 1],
[-1, -1, -1],
[-1, -1, 0],
[-1, -1, 1],
[-1, 0, -1],
[-1, 0, 0],
[-1, 0, 1],
[-1, 1, -1],
[-1, 1, 0],
[-1, 1, 1],
[0, -1, -1],
[0, -1, 0],
[0, -1, 1],
[0, 0, -1]], dtype='intc')
def get_qpoints_in_Brillouin_zone(primitive_vectors, qpoints):
bz = BrillouinZone(primitive_vectors)
bz.run(qpoints)
return bz.get_shortest_qpoints()
class BrillouinZone(object):
def __init__(self, primitive_vectors):
self._primitive_vectors = primitive_vectors # column vectors
self._tolerance = min(np.sum(primitive_vectors ** 2, axis=0)) * 0.01
self._reduced_bases = get_reduced_bases(primitive_vectors.T)
self._tmat = np.dot(np.linalg.inv(self._primitive_vectors),
self._reduced_bases)
self._tmat_inv = np.linalg.inv(self._tmat)
self._shortest_qpoints = None
def run(self, qpoints):
reduced_qpoints = np.dot(qpoints, self._tmat_inv.T)
self._shortest_qpoints = []
for q in reduced_qpoints:
distances = np.array([(np.dot(self._reduced_bases, q + g) ** 2).sum()
for g in search_space], dtype='double')
min_dist = min(distances)
shortest_indices = [i for i, d in enumerate(distances - min_dist)
if abs(d) < self._tolerance]
self._shortest_qpoints.append(
np.dot(search_space[shortest_indices] + q, self._tmat.T))
def get_shortest_qpoints(self):
return self._shortest_qpoints
if __name__ == '__main__':
from phonopy.interface.vasp import read_vasp
from phonopy.structure.symmetry import Symmetry, get_lattice_vector_equivalence
from phonopy.structure.spglib import get_ir_reciprocal_mesh, relocate_BZ_grid_address
import sys
cell = read_vasp(sys.argv[1])
symmetry = Symmetry(cell)
mesh = [4, 4, 4]
is_shift =
|
np.array([0, 0, 0], dtype='intc')
|
numpy.array
|
# this part copy from Yufeng Shen's Code:
#https://github.com/Yufeng-shen/nfHEDMtools/blob/master/Simulation.py
import numpy as np
from fractions import Fraction
from math import floor
from hexomap import utility
# from matplotlib import path
class Detector:
def __init__(self):
self.Norm = np.array([0, 0, 1])
self.CoordOrigin = np.array([0., 0., 0.])
self.Jvector = np.array([1, 0, 0])
self.Kvector = np.array([0, -1, 0])
self.PixelJ = 0.00148
self.PixelK = 0.00148
self.NPixelJ = 2048
self.NPixelK = 2048
def Move(self, J, K, trans, tilt):
self.CoordOrigin -= J * self.Jvector * self.PixelJ + K * self.Kvector * self.PixelK
self.CoordOrigin = tilt.dot(self.CoordOrigin) + trans
self.Norm = tilt.dot(self.Norm)
self.Jvector = tilt.dot(self.Jvector)
self.Kvector = tilt.dot(self.Kvector)
def IntersectionIdx(self, ScatterSrc, TwoTheta, eta, bIdx=True):
#print('eta:{0}'.format(eta))
#self.Print()
dist = self.Norm.dot(self.CoordOrigin - ScatterSrc)
scatterdir = np.array([np.cos(TwoTheta), np.sin(TwoTheta) * np.sin(eta), np.sin(TwoTheta) * np.cos(eta)])
InterPos = dist / (self.Norm.dot(scatterdir)) * scatterdir + ScatterSrc
J = (self.Jvector.dot(InterPos - self.CoordOrigin) / self.PixelJ)
K = (self.Kvector.dot(InterPos - self.CoordOrigin) / self.PixelK)
if 0 <= int(J) < self.NPixelJ and 0 <= int(K) < self.NPixelK:
if bIdx == True:
return int(J), int(K)
else:
return J, K
else:
return -1
def BackProj(self, HitPos, omega, TwoTheta, eta):
"""
HitPos: ndarray (3,)
The position of hitted point on lab coord, unit in mm
"""
scatterdir = np.array([np.cos(TwoTheta), np.sin(TwoTheta) * np.sin(eta), np.sin(TwoTheta) * np.cos(eta)])
t = HitPos[2] / (np.sin(TwoTheta) * np.cos(eta))
x = HitPos[0] - t * np.cos(TwoTheta)
y = HitPos[1] - t * np.sin(TwoTheta) * np.sin(eta)
truex = np.cos(omega) * x + np.sin(omega) * y
truey = -np.sin(omega) * x + np.cos(omega) * y
return np.array([truex, truey])
def Idx2LabCord(self, J, K):
return J * self.PixelJ * self.Jvector + K * self.PixelK * self.Kvector + self.CoordOrigin
def Reset(self):
self.__init__()
def Print(self):
print("Norm: ", self.Norm)
print("CoordOrigin: ", self.CoordOrigin)
print("J vector: ", self.Jvector)
print("K vector: ", self.Kvector)
class CrystalStr:
def __init__(self, material='new'):
self.name = material
self.AtomPos = []
self.AtomZs = []
self.symtype = None
if material == 'gold':
self.symtype = 'Cubic'
self.PrimA = 4.08 * np.array([1, 0, 0])
self.PrimB = 4.08 * np.array([0, 1, 0])
self.PrimC = 4.08 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 79)
self.addAtom([0, 0.5, 0.5], 79)
self.addAtom([0.5, 0, 0.5], 79)
self.addAtom([0.5, 0.5, 0], 79)
elif material == 'copper':
self.symtype = 'Cubic'
self.PrimA = 3.61 * np.array([1, 0, 0])
self.PrimB = 3.61 * np.array([0, 1, 0])
self.PrimC = 3.61 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 29)
self.addAtom([0, 0.5, 0.5], 29)
self.addAtom([0.5, 0, 0.5], 29)
self.addAtom([0.5, 0.5, 0], 29)
elif material == 'copperBCC':
self.symtype = 'Cubic'
self.PrimA = 2.947 * np.array([1, 0, 0])
self.PrimB = 2.947 * np.array([0, 1, 0])
self.PrimC = 2.947 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 29)
self.addAtom([0.5, 0.5, 0.5], 29)
elif material == 'copperFCC':
self.symtype = 'Cubic'
self.PrimA = 3.692 * np.array([1, 0, 0])
self.PrimB = 3.692 * np.array([0, 1, 0])
self.PrimC = 3.692 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 29)
self.addAtom([0, 0.5, 0.5], 29)
self.addAtom([0.5, 0, 0.5], 29)
self.addAtom([0.5, 0.5, 0], 29)
elif material == 'stainless_steel':
self.symtype = 'Cubic'
self.PrimA = 3.59 * np.array([1, 0, 0])
self.PrimB = 3.59 * np.array([0, 1, 0])
self.PrimC = 3.59 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0, 0.5, 0.5], 26)
self.addAtom([0.5, 0, 0.5], 26)
self.addAtom([0.5, 0.5, 0], 26)
elif material == 'iron_bcc':
# bcc lattice
self.symtype = 'Cubic'
self.PrimA = 2.856 * np.array([1, 0, 0])
self.PrimB = 2.856 * np.array([0, 1, 0])
self.PrimC = 2.856 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0.5, 0.5, 0.5], 26)
elif material == 'iron_fcc':
self.symtype = 'Cubic'
self.PrimA = 2.856 * np.array([1, 0, 0])
self.PrimB = 2.856 * np.array([0, 1, 0])
self.PrimC = 2.856 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0, 0.5, 0.5], 26)
self.addAtom([0.5, 0, 0.5], 26)
self.addAtom([0.5, 0.5, 0], 26)
elif material == 'SrTiO3':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 22)
self.addAtom([0.5, 0.5, 0.5], 38)
self.addAtom([0.5, 0, 0], 8)
self.addAtom([0, 0.5, 0], 8)
self.addAtom([0, 0, 0.5], 8)
elif material == 'SrTiO3_v1':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 38)
self.addAtom([0.5, 0.5, 0.5], 22)
self.addAtom([0.5, 0.5, 0], 8)
self.addAtom([0, 0.5, 0.5], 8)
self.addAtom([0.5, 0, 0.5], 8)
elif material == 'SrTiO3_v2':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 38)
#self.addAtom([0.5, 0.5, 0.5], 22)
self.addAtom([0.5, 0.5, 0], 8)
self.addAtom([0, 0.5, 0.5], 8)
self.addAtom([0.5, 0, 0.5], 8)
elif material == 'SrTiO3_v3':
self.symtype = 'Cubic'
self.PrimA = 3.9053 * np.array([1, 0, 0])
self.PrimB = 3.9053 * np.array([0, 1, 0])
self.PrimC = 3.9053 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 38)
#self.addAtom([0.5, 0.5, 0.5], 22)
self.addAtom([0.5, 0.5, 0], 38)
self.addAtom([0, 0.5, 0.5], 38)
self.addAtom([0.5, 0, 0.5], 38)
elif material == 'Ti7':
self.symtype = 'Hexagonal'
self.PrimA = 2.92539 * np.array([1, 0, 0])
self.PrimB = 2.92539 * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = 4.67399 * np.array([0, 0, 1])
self.addAtom([1 / 3.0, 2 / 3.0, 1 / 4.0], 22)
self.addAtom([2 / 3.0, 1 / 3.0, 3 / 4.0], 22)
elif material == 'WE43':
# not tested, use Mg to approximate
self.symtype = 'Hexagonal'
a = 3.2094
c = 5.2107
self.PrimA = a * np.array([1, 0, 0])
self.PrimB = a * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = c * np.array([0, 0, 1])
self.addAtom([1 / 3.0, 2 / 3.0, 1 / 4.0], 12)
self.addAtom([2 / 3.0, 1 / 3.0, 3 / 4.0], 12)
elif material == 'Ti64_alpha':
self.symtype = 'Hexagonal'
self.PrimA = 2.930 * np.array([1, 0, 0])
self.PrimB = 2.930 * np.array([np.cos(np.pi * 2 / 3), np.sin(np.pi * 2 / 3), 0])
self.PrimC = 4.677 * np.array([0, 0, 1])
self.addAtom([1 / 3.0, 2 / 3.0, 1 / 4.0], 22)
self.addAtom([2 / 3.0, 1 / 3.0, 3 / 4.0], 22)
elif material == 'Ti64_beta':
# bcc lattice
self.symtype = 'Cubic'
self.PrimA = 3.224 * np.array([1, 0, 0])
self.PrimB = 3.224 * np.array([0, 1, 0])
self.PrimC = 3.224 * np.array([0, 0, 1])
self.addAtom([0, 0, 0], 26)
self.addAtom([0.5, 0.5, 0.5], 26)
elif material == 'UO2':
# bcc lattice
self.symtype = 'Cubic'
self.PrimA = 5.471 * np.array([1, 0, 0])
self.PrimB = 5.471 * np.array([0, 1, 0])
self.PrimC = 5.471 *
|
np.array([0, 0, 1])
|
numpy.array
|
import os
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
def get_dataset_from_folder(folder):
imgs = []
labels = []
for root, dirs, files in os.walk(folder):
for file in files:
print(file)
if file[-4:] == ".png":
labels.append(int(file.split("_")[0]))
img = plt.imread(os.path.join(folder,file))
img = np.sum(img, axis=2)
img = img / np.max(img)
print(np.max(img))
imgs.append(img)
return np.reshape(np.array(imgs), [len(imgs),28,28,1]), np.array(labels)
mnist = tf.keras.datasets.mnist
real_imgs, real_labels = get_dataset_from_folder("train_images")
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images =
|
np.reshape(train_images, [train_images.shape[0], 28, 28, 1])
|
numpy.reshape
|
""" Block Coordinate Descent (BCD) for signal decomposition
Author: <NAME>
"""
import numpy as np
from time import time
from osd.utilities import calc_obj, progress
def run_bcd(data, components, num_iter=50, use_ix=None, X_init=None,
stopping_tolerance=1e-6, verbose=True):
if use_ix is None:
use_ix = np.ones_like(data, dtype=bool)
y = data
if len(data.shape) == 1:
T = len(data)
p = 1
else:
T, p = data.shape
K = len(components)
rho = 2 / T / p
if X_init is None:
if p == 1:
X = np.zeros((K, T))
else:
X =
|
np.zeros((K, T, p))
|
numpy.zeros
|
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
* http://nghiaho.com/?page_id=846
NB Opposite Sign Convention to GLM
--------------------------------------
* http://planning.cs.uiuc.edu/node102.html
* http://planning.cs.uiuc.edu/node103.html
| r11 r12 r13 |
| r21 r22 r23 |
| r31 r32 r33 |
Rz (yaw) conterclockwise "alpha"
cosZ -sinZ 0
sinZ cosZ 0
0 0 1
Ry (pitch) counterclockwise "beta"
cosY 0 sinY
0 1 0
-sinY 0 cosY
Rx (roll) counterclockwise "gamma"
1 0 0
0 cosX -sinX
0 sinX cosX
yawPitchRoll
Rz Ry Rx
Rzyx = Rz(alpha).Ry(beta).Rx(gamma)
^^^^^^ roll first
First roll Rx, then pitch Ry then finally yaw Rz
11: cosZ cosY 12: cosZ sinY sinX - sinZ cosX 13: cosZ sinY cosX + sinZ sinX
21: sinZ cosY 22: sinZ sinY sinX + cosZ cosX 23: sinZ sinY cosX - cosZ sinX
31: -sinY 32: cosY sinX 33: cosY cosX
r32/r33 = cosY sinX / cosY cosX = tanX
r32^2 + r33^2 = cosY^2 sinX^2 + cosY^2 cosX^2 = cosY^2
-r31/sqrt(r32^2 + r33^2) = sinY / cosY = tanY
r21/r11 = tanZ
r11^2 + r21^2 = cosZ^2 cosY^2 + sinZ^2 cosY^2 = cosY^2 "cosb"^2
-r31/sqrt(r11^2 + r21^2) = sinY / cosY = tanY
cosY->0 => sinY=>1
... DONT FOLLOW THE LEAP TO sinZ = 0, cosZ = 1
-r23/r22 = -(sinZ sinY cosX - cosZ sinX) / (sinZ sinY sinX + cosZ cosX )
how is this meant to yield tanY ??? ... perhaps a mal-assumption made here that sinY->0 ???
cosZ sinX / cosZ cosX -> tanX (if sinY->0, BUT IT DOESNT ???)
* https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2012/07/euler-angles1.pdf
* ~/opticks_refs/Extracting_Euler_Angles.pdf
"""
import numpy as np
def GetAngles(m):
"""
51 G4ThreeVector G4GDMLWriteDefine::GetAngles(const G4RotationMatrix& mtx)
52 {
53 G4double x,y,z;
54 G4RotationMatrix mat = mtx;
55 mat.rectify(); // Rectify matrix from possible roundoff errors
56
57 // Direction of rotation given by left-hand rule; clockwise rotation
58
59 static const G4double kMatrixPrecision = 10E-10;
60 const G4double cosb = std::sqrt(mtx.xx()*mtx.xx()+mtx.yx()*mtx.yx());
.. r11^2 + r21^2
61
62 if (cosb > kMatrixPrecision)
63 {
64 x = std::atan2(mtx.zy(),mtx.zz());
.. r32 r33
65 y = std::atan2(-mtx.zx(),cosb);
.. -r31
66 z = std::atan2(mtx.yx(),mtx.xx());
.. r21 r11
67 }
68 else
69 {
70 x = std::atan2(-mtx.yz(),mtx.yy());
.. -r23 r22
71 y = std::atan2(-mtx.zx(),cosb);
.. huh division by smth very small... unhealthy
.. -r31 sqrt(r11^2 + r21^2)
72 z = 0.0;
73 }
74
75 return G4ThreeVector(x,y,z);
76 }
"""
pass
def extractEulerAnglesXYZ(M, unit=np.pi/180., dtype=np.float32):
"""
https://github.com/jzrake/glm/commit/d3313421c664db5bd1b672d39ba3faec0d430117
https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl
https://gamedev.stackexchange.com/questions/50963/how-to-extract-euler-angles-from-transformation-matrix
~/opticks_refs/Extracting_Euler_Angles.pdf
::
template<typename T>
GLM_FUNC_DECL void extractEulerAngleXYZ(mat<4, 4, T, defaultp> const & M,
T & t1,
T & t2,
T & t3)
{
float T1 = glm::atan2<T, defaultp>(M[2][1], M[2][2]);
float C2 = glm::sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]);
float T2 = glm::atan2<T, defaultp>(-M[2][0], C2);
float S1 = glm::sin(T1);
float C1 = glm::cos(T1);
float T3 = glm::atan2<T, defaultp>(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]);
t1 = -T1;
t2 = -T2;
t3 = -T3;
}
"""
T1 = np.arctan2(M[2][1], M[2][2]);
C2 = np.sqrt(M[0][0]*M[0][0] + M[1][0]*M[1][0]);
T2 = np.arctan2(-M[2][0], C2);
S1 = np.sin(T1);
C1 = np.cos(T1);
T3 = np.arctan2(S1*M[0][2] - C1*M[0][1], C1*M[1][1] - S1*M[1][2 ]);
t1 = -T1;
t2 = -T2;
t3 = -T3;
return np.array([t1/unit,t2/unit,t3/unit], dtype=dtype)
def yawPitchRoll(yaw, pitch, roll, dtype=np.float32):
"""
yaw: Z
https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl
::
template<typename T>
GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> yawPitchRoll
(
T const & yaw,
T const & pitch,
T const & roll
)
{
T tmp_ch = glm::cos(yaw);
T tmp_sh = glm::sin(yaw);
T tmp_cp = glm::cos(pitch);
T tmp_sp = glm::sin(pitch);
T tmp_cb = glm::cos(roll);
T tmp_sb = glm::sin(roll);
mat<4, 4, T, defaultp> Result;
Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb;
Result[0][1] = tmp_sb * tmp_cp;
Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb;
Result[0][3] = static_cast<T>(0);
Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb;
Result[1][1] = tmp_cb * tmp_cp;
Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb;
Result[1][3] = static_cast<T>(0);
Result[2][0] = tmp_sh * tmp_cp;
Result[2][1] = -tmp_sp;
Result[2][2] = tmp_ch * tmp_cp;
Result[2][3] = static_cast<T>(0);
Result[3][0] = static_cast<T>(0);
Result[3][1] = static_cast<T>(0);
Result[3][2] = static_cast<T>(0);
Result[3][3] = static_cast<T>(1);
return Result;
}
"""
tmp_ch = np.cos(yaw);
tmp_sh = np.sin(yaw);
tmp_cp = np.cos(pitch);
tmp_sp = np.sin(pitch);
tmp_cb = np.cos(roll);
tmp_sb = np.sin(roll);
Result = np.eye(4, dtype=dtype)
Result[0][0] = tmp_ch * tmp_cb + tmp_sh * tmp_sp * tmp_sb;
Result[0][1] = tmp_sb * tmp_cp;
Result[0][2] = -tmp_sh * tmp_cb + tmp_ch * tmp_sp * tmp_sb;
Result[0][3] = 0;
Result[1][0] = -tmp_ch * tmp_sb + tmp_sh * tmp_sp * tmp_cb;
Result[1][1] = tmp_cb * tmp_cp;
Result[1][2] = tmp_sb * tmp_sh + tmp_ch * tmp_sp * tmp_cb;
Result[1][3] = 0;
Result[2][0] = tmp_sh * tmp_cp;
Result[2][1] = -tmp_sp;
Result[2][2] = tmp_ch * tmp_cp;
Result[2][3] = 0;
Result[3][0] = 0;
Result[3][1] = 0;
Result[3][2] = 0;
Result[3][3] = 1;
return Result;
def eulerAngleX(angleX, dtype=np.float32):
"""
* opposite sign to *roll* of http://planning.cs.uiuc.edu/node102.html
/usr/local/opticks/externals/glm/glm-0.9.6.3/glm/gtx/euler_angles.inl::
35 template <typename T>
36 GLM_FUNC_QUALIFIER tmat4x4<T, defaultp> eulerAngleX
37 (
38 T const & angleX
39 )
40 {
41 T cosX = glm::cos(angleX);
42 T sinX = glm::sin(angleX);
43
44 return tmat4x4<T, defaultp>(
45 T(1), T(0), T(0), T(0),
46 T(0), cosX, sinX, T(0),
47 T(0),-sinX, cosX, T(0),
48 T(0), T(0), T(0), T(1));
49 }
50
"""
m = np.eye(4, dtype=dtype)
cosX = np.cos(angleX);
sinX = np.sin(angleX);
m[0] = [1., 0., 0., 0.]
m[1] = [0., cosX, sinX, 0.]
m[2] = [0., -sinX, cosX, 0.]
m[3] = [0., 0., 0., 1.]
return m
def eulerAngleY(angleY, dtype=np.float32):
"""
* opposite sign to *pitch* of http://planning.cs.uiuc.edu/node102.html
/usr/local/opticks/externals/glm/glm-0.9.6.3/glm/gtx/euler_angles.inl
::
51 template <typename T>
52 GLM_FUNC_QUALIFIER tmat4x4<T, defaultp> eulerAngleY
53 (
54 T const & angleY
55 )
56 {
57 T cosY = glm::cos(angleY);
58 T sinY = glm::sin(angleY);
59
60 return tmat4x4<T, defaultp>(
61 cosY, T(0), -sinY, T(0),
62 T(0), T(1), T(0), T(0),
63 sinY, T(0), cosY, T(0),
64 T(0), T(0), T(0), T(1));
65 }
"""
m =
|
np.eye(4, dtype=dtype)
|
numpy.eye
|
import numpy as np
import pandas as pd
from anndata import AnnData
import igraph
import matplotlib.pyplot as plt
from scipy import sparse
from typing import Union, Optional
from scanpy.plotting._utils import savefig_or_show
from ..tools.utils import getpath
from .trajectory import trajectory as plot_trajectory
from .utils import setup_axes
from ..tools.graph_operations import subset_tree
from .. import settings
import scanpy as sc
def modules(
adata: AnnData,
root_milestone,
milestones,
color: str = "milestones",
show_traj: bool = False,
layer: Optional[str] = None,
smooth: bool = False,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
**kwargs,
):
"""\
Plot the mean expression of the early and late modules.
Parameters
----------
adata
Annotated data matrix.
root_milestone
tip defining progenitor branch.
milestones
tips defining the progenies branches.
color
color the cells with variable from adata.obs.
show_traj
show trajectory on the early module plot.
layer
layer to use to compute mean of module.
show
show the plot.
save
save the plot.
kwargs
arguments to pass to :func:`scFates.pl.trajectory` if `show_traj=True`, else to :func:`scanpy.pl.embedding`
Returns
-------
If `show==False` a tuple of :class:`~matplotlib.axes.Axes`
"""
plt.rcParams["axes.grid"] = False
X_early, X_late = get_modules(adata, root_milestone, milestones, layer)
cells = X_early.index
verb = settings.verbosity
settings.verbosity = 1
nmil = len(adata.uns["graph"]["milestones"])
if nmil > 4:
adata_c = subset_tree(adata, root_milestone, milestones, copy=True)
adata_c.obsm["X_early"] = X_early.loc[adata_c.obs_names].values
adata_c.obsm["X_late"] = X_late.loc[adata_c.obs_names].values
else:
adata_c = AnnData(
X_early.values,
obs=adata.obs,
uns=adata.uns,
obsm={"X_early": X_early.values, "X_late": X_late.values},
obsp=adata.obsp,
)
settings.verbosity = verb
if smooth:
adata_c.obsm["X_early"] = adata_c.obsp["connectivities"].dot(
adata_c.obsm["X_early"]
)
adata_c.obsm["X_late"] = adata_c.obsp["connectivities"].dot(
adata_c.obsm["X_late"]
)
axs, _, _, _ = setup_axes(panels=[0, 1])
color = "old_milestones" if ((color == "milestones") & (nmil > 4)) else color
if show_traj:
plot_trajectory(
adata_c,
basis="early",
root_milestone=root_milestone,
milestones=milestones,
color_cells=color,
show=False,
title="",
legend_loc="none",
ax=axs[0],
**kwargs,
)
else:
sc.pl.embedding(
adata_c[cells],
basis="early",
color=color,
legend_loc="none",
title="",
show=False,
ax=axs[0],
**kwargs,
)
sc.pl.embedding(
adata_c[cells],
basis="late",
color=color,
legend_loc="none",
show=False,
title="",
ax=axs[1],
**kwargs,
)
axs[0].set_xlabel("early " + milestones[0])
axs[0].set_ylabel("early " + milestones[1])
axs[1].set_xlabel("late " + milestones[0])
axs[1].set_ylabel("late " + milestones[1])
savefig_or_show("modules", show=show, save=save)
def get_modules(adata, root_milestone, milestones, layer):
graph = adata.uns["graph"]
dct = graph["milestones"]
leaves = list(map(lambda leave: dct[leave], milestones))
root = dct[root_milestone]
name = root_milestone + "->" + "<>".join(milestones)
stats = adata.uns[name]["fork"]
if "milestones_colors" not in adata.uns or len(adata.uns["milestones_colors"]) == 1:
from . import palette_tools
palette_tools._set_default_colors_for_categorical_obs(adata, "milestones")
mlsc = adata.uns["milestones_colors"].copy()
mls = adata.obs.milestones.cat.categories.tolist()
dct = dict(zip(mls, mlsc))
df = adata.obs.copy(deep=True)
edges = graph["pp_seg"][["from", "to"]].astype(str).apply(tuple, axis=1).values
img = igraph.Graph()
img.add_vertices(
np.unique(graph["pp_seg"][["from", "to"]].values.flatten().astype(str))
)
img.add_edges(edges)
cells = np.unique(
np.concatenate(
[
getpath(
img, root, adata.uns["graph"]["tips"], leaves[0], graph, df
).index,
getpath(
img, root, adata.uns["graph"]["tips"], leaves[1], graph, df
).index,
]
)
)
if layer is None:
if sparse.issparse(adata.X):
X = pd.DataFrame(
np.array(adata[cells, stats.index].X.A),
index=cells,
columns=stats.index,
)
else:
X = pd.DataFrame(
np.array(adata[cells, stats.index].X),
index=cells,
columns=stats.index,
)
else:
if sparse.issparse(adata.layers[layer]):
X = pd.DataFrame(
|
np.array(adata[cells, stats.index].layers[layer].A)
|
numpy.array
|
import time
import sys
import math
import random
import george
import numpy as np
import os
#acq function
from trimtuner.acquisition_functions.constrained_entropy_search import Constrained_EntropySearch
from trimtuner.acquisition_functions.marginalization import MarginalizationGPMCMC, MarginalizationDT
from robo.acquisition_functions.ei import *
#heuristics to filter
from trimtuner.maximizers.random_sampling import RandomSampling
from trimtuner.maximizers.cea import CEA
#from trimtuner.maximizers.direct import Direct
#from trimtuner.maximizers.cmaes import CMAES
#models
from trimtuner.models.trimtuner_dt import EnsembleDTs
from trimtuner.models.trimtuner_gp import EnsembleGPs
from robo.priors.env_priors import EnvPrior
#bootstrapping
from trimtuner.trimtuner.initial_sampling import initial_sampling_trimtuner
#incumbent estimation
from trimtuner.trimtuner.incumbent_estimation import incumbent_estimation_cea, incumbent_estimation
def transform(s, s_min, s_max):
s_transform = (np.log2(s) - np.log2(s_min)) / (np.log2(s_max) - np.log2(s_min))
return s_transform
def retransform(s_transform, s_min, s_max):
s = np.rint(2 ** (s_transform * (np.log2(s_max) - np.log2(s_min)) + np.log2(s_min)))
return int(s)
class Logs():
#class to print log files
def __init__(self, seed, initSamples, model, heuristic):
dir = os.path.abspath(os.getcwd())
path = dir + "/runLogs"
self.initSamples = initSamples
self.seed = seed
if not os.path.isdir(path):
try:
os.mkdir(path) #create runLogs folder
except OSError:
print("Creation of the directory %s failed" % path)
else:
print("Successfully created the directory %s " % path)
filename_orig = path + "/trimtuner_logs_seed" + str(seed) + "_initSamples" + str(initSamples) + "_model_" + model + "_heuristic_" + heuristic
filename = filename_orig + ".txt"
counter = 1
while os.path.isfile(filename):
filename = filename_orig + "_" + str(counter) + ".txt"
counter += 1
if counter >= 10000:
print("ERROR createing the log files!!! Check folder " + path)
sys.stdout.flush()
sys.exit(0)
#filename += ".txt"
self.file_logs = open(filename, "w")
self.file_logs.write("runID;initSamples;explorationNumber;incumbent;incTime;incAcc;incCost;configTested;Time;Acc;Cost;Overhead;CumulativeCost;\n")
def printLogs(self, it, inc, incTime, incAcc, incCost, conf, confTime, confAcc, confCost, overhead, CumulativeCost):
strWrite = str(self.seed) + ";" + str(self.initSamples) + ";" + str(it) + ";" + str(inc) + ";" + str(incTime) + ";" + str(incAcc) + ";" + str(incCost) + ";" + str(conf) + ";" + str(confTime) + ";" + str(confAcc) + ";" + str(confCost) + ";" + str(overhead) + ";" + str(CumulativeCost) + "\n"
self.file_logs.write(strWrite)
def close(self):
self.file_logs.close()
##################################################################################
# TrimTuner:
# Efficient Optimization of Machine Learning Jobs in the Cloud via Sub-Sampling
#
##################################################################################
def trimtuner(objective_function, all_configs, constraints, seed, filterHeuristic, model,
lower, upper, s_min, s_max, n_init=30, num_iterations=100, subsets=[60, 10, 4, 2]):
# internal paramaters
burnin=100
chain_length=100
n_hypers=12
#percentage of unexplored configs to test in the acquisition function
per = 0.1
np.random.seed(seed)
rng = np.random.RandomState(np.random.randint(0, 10000))
#assert n_init * len(
assert n_init <= num_iterations, "Number of initial points (n_init) has to be smaller than the number of iterations"
assert lower.shape[0] == upper.shape[0], "Dimension miss match between upper and lower bound"
assert model == "gp" or model == "dt", "ERROR: wrong model techniques. Chose 'gp' for Gaussian Processes or 'dt' for an ensemble decision tress"
assert filterHeuristic == "cea" or filterHeuristic == "random" or filterHeuristic == "nofilter", "ERROR: wrong filtering heuristic. Chose 'cea', 'random', or 'nofilter'!"
costCumulative = 0
n_dims = lower.shape[0]
# Bookkeeping logs
logs = Logs(seed, n_init, model, filterHeuristic)
unexplored_Set = all_configs # list with all possible configurations
training_Set = [] # traning set
X = []
y = []
c = []
if model == "dt":
#ensemble of descision trees
number_trees = 10
model_objective = EnsembleDTs(number_trees, seed)
model_cost = EnsembleDTs(number_trees, seed)
elif model == "gp":
#Gaussian Processes
#kernels functions based on FABOLAS
# Define model for the objective function
cov_amp = 1 # Covariance amplitude
kernel = cov_amp
for d in range(n_dims):
kernel *= george.kernels.Matern52Kernel(np.ones([1])*0.01, ndim=n_dims+1, axes=d)
# Kernel for the environmental variable
# We use (1-s)**2 as basis function for the Bayesian linear kernel
env_kernel = george.kernels.BayesianLinearRegressionKernel(log_a=0.1,log_b=0.1,ndim=n_dims + 1,axes=n_dims)
kernel *= env_kernel
# Take 3 times more samples than we have hyperparameters
if n_hypers < 2 * len(kernel):
n_hypers = 3 * len(kernel)
if n_hypers % 2 == 1:
n_hypers += 1
prior = EnvPrior(len(kernel)+1, n_ls=n_dims, n_lr=2, rng=rng)
quadratic_bf = lambda x: (1 - x) ** 2
linear_bf = lambda x: x
#model for accuracy
model_objective = EnsembleGPs(kernel,
prior=prior,
burnin_steps=burnin,
chain_length=chain_length,
n_hypers=n_hypers,
normalize_output=False,
basis_func=quadratic_bf,
lower=lower,
upper=upper,
rng=rng)
# Define model for the cost function
cost_cov_amp = 1
cost_kernel = cost_cov_amp
for d in range(n_dims):
cost_kernel *= george.kernels.Matern52Kernel(np.ones([1])*0.01, ndim=n_dims+1, axes=d)
cost_env_kernel = george.kernels.BayesianLinearRegressionKernel(log_a=0.1,log_b=0.1,ndim=n_dims+1,axes=n_dims)
cost_kernel *= cost_env_kernel
cost_prior = EnvPrior(len(cost_kernel)+1, n_ls=n_dims, n_lr=2, rng=rng)
#model for cost
model_cost = EnsembleGPs(cost_kernel,
prior=cost_prior,
burnin_steps=burnin,
chain_length=chain_length,
n_hypers=n_hypers,
basis_func=linear_bf,
normalize_output=False,
lower=lower,
upper=upper,
rng=rng)
# Extend input space by task variable
extend_lower = np.append(lower, 0)
extend_upper =
|
np.append(upper, 1)
|
numpy.append
|
import numpy as np
from r_support import matrix, logger
def get_num_batches(n, batch_size):
return int(np.ceil(n * 1.0 / batch_size))
def get_sgd_batch(x, y, i, batch_size, shuffled_idxs=None):
s = i * batch_size
e = min(x.shape[0], (i + 1) * batch_size)
if shuffled_idxs is None:
idxs = np.arange(s, e)
else:
idxs = shuffled_idxs[np.arange(s, e)]
return matrix(x[idxs, :], ncol=x.shape[1]), y[idxs]
def avg_loss_check(losses, epoch, n=20, eps=1e-6):
if epoch < n + 1:
return False
avg1 = np.mean(losses[(epoch-1-n):(epoch-1)])
avg2 = np.mean(losses[(epoch-n):(epoch)])
if np.abs(avg1 - avg2) < eps:
return True
return False
def debug_log_sgd_losses(sgd_type, losses, epoch, n=20):
if False:
# disable logging -- should be used in PRODUCTION
return
elif True:
# minimal info
logger.debug("[%s] epochs: %d; avg last %d losses:%f" %
(sgd_type, epoch, n, np.mean(losses[(epoch-min(n, epoch)):(epoch)])))
else:
# maximum info
logger.debug("[%s] epochs: %d; avg last %d losses:%f\n%s\n%s" %
(sgd_type, epoch, n, np.mean(losses[(epoch-min(n, epoch)):(epoch)]),
str(list(losses[0:min(n, epoch)])),
str(list(losses[(epoch-min(n, epoch)):(epoch)]))))
def sgd(w0, x, y, f, grad, learning_rate=0.01,
batch_size=100, max_epochs=1000, eps=1e-6, shuffle=False, rng=None):
n = x.shape[0]
n_batches = get_num_batches(n, batch_size)
w = np.copy(w0)
epoch_losses = np.zeros(max_epochs, dtype=float)
epoch = 0
w_best = np.copy(w0)
loss_best = np.inf
if n <= batch_size:
shuffle = False # no need to shuffle since all instances will be used up in one batch
if shuffle:
shuffled_idxs = np.arange(n)
if rng is None:
np.random.shuffle(shuffled_idxs)
else:
rng.shuffle(shuffled_idxs)
else:
shuffled_idxs = None
while epoch < max_epochs:
losses = np.zeros(n_batches, dtype=float)
for i in range(n_batches):
xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
if xi.shape[0] == 0:
raise ValueError("Batch size of 0")
g = grad(w, xi, yi)
w -= learning_rate * g
losses[i] = f(w, xi, yi)
if False:
g_norm = g.dot(g)
if np.isnan(g_norm) or np.isinf(g_norm):
logger.debug("|grad|=%f, i=%d/%d, epoch:%d" % (g.dot(g), i+1, n_batches, epoch))
logger.debug("|w0|=%f" % w0.dot(w0))
raise ArithmeticError("grad is nan/inf in sgd")
loss = np.mean(losses)
if np.isnan(loss):
logger.debug("loss is nan")
logger.debug("|w|=%f" % w.dot(w))
raise ArithmeticError("loss is nan in sgd")
epoch_losses[epoch] = loss
if loss < loss_best:
# pocket algorithm
np.copyto(w_best, w)
loss_best = loss
epoch += 1
if loss < eps:
break
debug_log_sgd_losses("sgd", epoch_losses, epoch, n=20)
# logger.debug("epochs: %d" % epoch)
# logger.debug("net losses:")
# logger.debug("epoch losses:\n%s" % str(epoch_losses[0:epoch]))
# logger.debug("best loss: %f" % loss_best)
return w_best
def sgdRMSProp(w0, x, y, f, grad, learning_rate=0.01,
batch_size=100, max_epochs=1000, delta=1e-6, ro=0.9, eps=1e-6,
shuffle=False, rng=None):
n = x.shape[0]
n_batches = get_num_batches(n, batch_size)
w = np.copy(w0)
r = np.zeros(len(w0), dtype=w0.dtype) # gradient accumulation variable
epoch_losses = np.zeros(max_epochs, dtype=float)
epoch = 0
w_best = np.copy(w0)
loss_best = np.inf
if n <= batch_size:
# no need to shuffle since all instances will be used up in one batch
shuffle = False
if shuffle:
shuffled_idxs = np.arange(n)
if rng is None:
np.random.shuffle(shuffled_idxs)
else:
rng.shuffle(shuffled_idxs)
else:
shuffled_idxs = None
prev_loss = np.inf
while epoch < max_epochs:
losses = np.zeros(n_batches, dtype=float)
for i in range(n_batches):
xi, yi = get_sgd_batch(x, y, i, batch_size, shuffled_idxs=shuffled_idxs)
g = grad(w, xi, yi)
r[:] = ro * r + (1 - ro) * np.multiply(g, g)
dw_scale = (learning_rate / (np.sqrt(delta + r)))
dw = np.multiply(dw_scale, g)
w[:] = w - dw
losses[i] = f(w, xi, yi)
loss =
|
np.mean(losses)
|
numpy.mean
|
import csv
import json
import cv2
from scipy import ndimage
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from scipy.misc import imresize
import pydot
###### Load Udacity data ###############
data_path ='/opt/carnd_p3/data/'
samples = []
with open(data_path+'driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None) # skip the headers
for line in reader:
samples.append(line)
#### split train and validation samples
shuffle(samples)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
### Resize Image for fast training ###
img_height = 70
img_width = 224
def resize_img(img):
crop_img = img[53:153,:,:]
resized_img=imresize(crop_img, (img_height, img_width), interp='bilinear')
return resized_img
### Image Augmentation ###########
def augment_image(img):
# random translate vertically
cols = img.shape[1]
rows = img.shape[0]
transY = np.random.randint(-10,10,1)
M = np.float32([[1,0,0],[0,1,transY]])
img = cv2.warpAffine(img,M,(cols,rows))
image = cv2.cvtColor(img,cv2.COLOR_RGB2YUV)
# random brightness
random_bright = 1+np.random.uniform(-0.3,0.3)
image[:,:,0] = image[:,:,0]*random_bright
image[:,:,0][image[:,:,0]>255] = 255
# random shadow
mid =np.random.randint(0,rows)
shadow_factor = np.random.uniform(0.7, 0.9)
if np.random.randint(2)==0:
image[:,0:mid,0] = image[:,0:mid,0]*shadow_factor
else:
image[:,mid:,0] = image[:,mid:,0]*shadow_factor
image = cv2.cvtColor(image,cv2.COLOR_YUV2RGB)
return image
# create adjusted steering measurements for the side camera images
correction = 0.25 # this is a parameter to tune
### Python generator function
def generator(samples, batch_size=64, valid_flag=False):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
center_image = ndimage.imread(data_path+'IMG/'+batch_sample[0].split('/')[-1])
center_image = resize_img(center_image)
if not valid_flag:
center_image = augment_image(center_image)
center_angle = float(batch_sample[3])
# Append center image to the dataset
images.append(center_image)
angles.append(center_angle)
# Append flipped center image to the dataset only if the steering angle>0.3
if abs(center_angle)>0.3:
images.append(np.fliplr(center_image))
angles.append(-center_angle)
# Append left image to the dataset
left_image = ndimage.imread(data_path+'IMG/'+batch_sample[1].split('/')[-1])
left_image = resize_img(left_image)
if not valid_flag:
left_image = augment_image(left_image)
left_angle=center_angle+correction
images.append(left_image)
angles.append(left_angle)
# Append flipped left image to the dataset only if the steering angle>0.3
if abs(left_angle)>0.3:
images.append(np.fliplr(left_image))
angles.append(-left_angle)
# Append right image to the dataset
right_image = ndimage.imread(data_path+'IMG/'+batch_sample[2].split('/')[-1])
right_image = resize_img(right_image)
if not valid_flag:
right_image = augment_image(right_image)
right_angle=center_angle-correction
images.append(right_image)
angles.append(right_angle)
# Append flipped right image to the dataset only if the steering angle>0.3
if abs(right_angle)>0.3:
images.append(np.fliplr(right_image))
angles.append(-right_angle)
# trim image to only see section with road
X_train =
|
np.array(images)
|
numpy.array
|
import numpy as np
from .qoperator import asOperator
class QuantumStateError(Exception):
pass
class QuantumState:
"""base class"""
_state = None
_is_ensemble = None
_is_pure = None
@property
def N(self):
return len(self._state)
@property
def is_ensemble(self):
return self._is_ensemble
@property
def is_pure(self):
return self._is_pure
def as_vector(self):
return self._state.flatten()
def __repr__(self):
return str(self._state)
@property
def probabilities(self):
raise NotImplementedError
@property
def fidelity(self):
raise NotImplementedError
class StateVector(QuantumState):
"""vector dynamics -- quantum state as normalized vector"""
def __init__(self, vector):
self._state = vector
self._is_pure = True
self._is_ensemble = False
def asDensityMatrix(self):
"""ensemble description"""
return DensityMatrix(np.outer(self.coeffs, self.coeffs.conj()))
@property
def coeffs(self):
return self._state
@property
def probabilities(self):
return np.power(np.abs(self.coeffs), 2)
@property
def fidelity(self):
return np.sum(self.probabilities)
return np.linalg.norm(self._state)
@property
def is_normalized(self):
return np.isclose(self.fidelity, 1)
def ev(self, operator):
op = asOperator(operator)
if not op.N == self.N:
raise QuantumStateError
return np.vdot(state, np.dot(operator, state))
class DensityMatrix(QuantumState):
"""matrix dynamics -- quantum state as statistical ensemble"""
def __init__(self, matrix):
if not np.isclose(matrix.trace(), 1):
raise QuantumStateError('No statistical probability conservation')
if not np.linalg.norm(matrix) <= 1:
if not is_idempotent(matrix):
raise QuantumStateError('Neither pure nor mixed state')
self._state = matrix
self._is_pure = np.isclose(self.fidelity, 1)
self._is_ensemble = True
@property
def entropy(self):
"""shannon entropy"""
w, v =
|
np.linalg.eigh(self._state)
|
numpy.linalg.eigh
|
#! /usr/env/python
"""
Python implementation of ModelGrid, a base class used to create and manage
grids for 2D numerical models.
Do NOT add new documentation here. Grid documentation is now built in a semi-
automated fashion. To modify the text seen on the web, edit the files
`docs/text_for_[gridfile].py.txt`.
"""
import numpy
import numpy as np
import warnings
from time import time
import six
from six.moves import range
from landlab.testing.decorators import track_this_method
from landlab.utils import count_repeated_values
from landlab.core.utils import argsort_points_by_x_then_y
from landlab.utils.decorators import make_return_array_immutable, deprecated
from landlab.field import ModelDataFields, ModelDataFieldsMixIn
from landlab.field.scalar_data_fields import FieldError
from . import grid_funcs as gfuncs
from ..core.utils import as_id_array
from ..core.utils import add_module_functions_to_class
from .decorators import (override_array_setitem_and_reset, return_id_array,
return_readonly_id_array)
from ..utils.decorators import cache_result_in_object
from ..layers.eventlayers import EventLayersMixIn
from .nodestatus import (CORE_NODE, FIXED_VALUE_BOUNDARY,
FIXED_GRADIENT_BOUNDARY, LOOPED_BOUNDARY,
CLOSED_BOUNDARY)
from .linkstatus import ACTIVE_LINK, FIXED_LINK, INACTIVE_LINK
from .linkstatus import set_status_at_link
#: Indicates an index is, in some way, *bad*.
BAD_INDEX_VALUE = -1
# DEJH thinks the user should be able to override this value if they want
# Map names grid elements to the ModelGrid attribute that contains the count
# of that element in the grid.
_ARRAY_LENGTH_ATTRIBUTES = {
'node': 'number_of_nodes',
'patch': 'number_of_patches',
'link': 'number_of_links',
'corner': 'number_of_corners',
'face': 'number_of_faces',
'cell': 'number_of_cells',
'active_link': 'number_of_active_links',
'active_face': 'number_of_active_faces',
'core_node': 'number_of_core_nodes',
'core_cell': 'number_of_core_cells',
}
# Fields whose sizes can not change.
_SIZED_FIELDS = {'node', 'link', 'patch', 'corner', 'face', 'cell', }
def _sort_points_into_quadrants(x, y, nodes):
"""Divide x, y points into quadrants.
Divide points with locations given in the *x*, and *y* arrays into north,
south, east, and west quadrants. Returns nodes contained in quadrants
(west, east, north, south).
Parameters
----------
x : array_like
X-coordinates of points.
y : array_like
Y-coordinates of points.
nodes : array_like
Nodes associated with points.
Returns
-------
tuple of array_like
Tuple of nodes in each coordinate. Nodes are grouped as
(*east*, *north*, *west*, *south*).
Examples
--------
>>> import numpy as np
>>> from landlab.grid.base import _sort_points_into_quadrants
>>> x = np.array([0, 1, 0, -1])
>>> y = np.array([1, 0, -1, 0])
>>> nodes = np.array([1, 2, 3, 4])
>>> _sort_points_into_quadrants(x, y, nodes)
(array([2]), array([1]), array([4]), array([3]))
"""
above_x_axis = y > 0
right_of_y_axis = x > 0
closer_to_y_axis = numpy.abs(y) >= numpy.abs(x)
north_nodes = nodes[above_x_axis & closer_to_y_axis]
south_nodes = nodes[(~ above_x_axis) & closer_to_y_axis]
east_nodes = nodes[right_of_y_axis & (~ closer_to_y_axis)]
west_nodes = nodes[(~ right_of_y_axis) & (~ closer_to_y_axis)]
return (east_nodes, north_nodes, west_nodes, south_nodes)
def _default_axis_names(n_dims):
"""Name of each axis.
Parameters
----------
n_dims : int
Number of spatial dimensions.
Returns
-------
tuple of str
Name of each axis.
Examples
--------
>>> from landlab.grid.base import _default_axis_names
>>> _default_axis_names(1)
('x',)
>>> _default_axis_names(2)
('y', 'x')
>>> _default_axis_names(3)
('z', 'y', 'x')
"""
_DEFAULT_NAMES = ('z', 'y', 'x')
return _DEFAULT_NAMES[- n_dims:]
def _default_axis_units(n_dims):
"""Unit names for each axis.
Parameters
----------
n_dims : int
Number of spatial dimensions.
Returns
-------
tuple of str
Units of each axis.
Examples
--------
>>> from landlab.grid.base import _default_axis_units
>>> _default_axis_units(1)
('-',)
>>> _default_axis_units(2)
('-', '-')
>>> _default_axis_units(3)
('-', '-', '-')
"""
return ('-', ) * n_dims
def find_true_vector_from_link_vector_pair(L1, L2, b1x, b1y, b2x, b2y):
r"""Separate a pair of links with vector values into x and y components.
The concept here is that a pair of adjacent links attached to a node are
projections of a 'true' but unknown vector. This function finds and returns
the x and y components of this true vector. The trivial case is the
situation in which the two links are orthogonal and aligned with the grid
axes, in which case the vectors of these two links *are* the x and y
components.
Parameters
----------
L1, L2 : float
Values (magnitudes) associated with the two links
b1x, b1y, b2x, b2y : float
Unit vectors of the two links
Returns
-------
ax, ay : float
x and y components of the 'true' vector
Notes
-----
The function does an inverse vector projection. Suppose we have a given
'true' vector :math:`a`, and we want to project it onto two other lines
with unit vectors (b1x,b1y) and (b2x,b2y). In the context of Landlab,
the 'true' vector is some unknown vector quantity, which might for
example represent the local water flow velocity. The lines represent two
adjacent links in the grid.
Let :math:`\mathbf{a}` be the true vector, :math:`\mathbf{B}` be a
different vector with unit vector :math:`\mathbf{b}`, and :math:`L`
be the scalar projection of *a* onto *B*. Then,
..math::
L = \mathbf{a} \dot \mathbf{b} = a_x b_x + a_y b_y,
where :math:`(a_x,a_y)` are the components of **a** and :math:`(b_x,b_y)`
are the components of the unit vector **b**.
In this case, we know *b* (the link unit vector), and we want to know the
*x* and *y* components of **a**. The problem is that we have one equation
and two unknowns (:math:`a_x` and :math:`a_y`). But we can solve this if
we have *two* vectors, both of which are projections of **a**. Using the
subscripts 1 and 2 to denote the two vectors, we can obtain equations for
both :math:`a_x` and :math:`a_y`:
..math::
a_x = L_1 / b_{1x} - a_y b_{1y} / b_{1x}
a_y = L_2 / b_{2y} - a_x b_{2x} / b_{2y}
Substituting the second into the first,
..math::
a_x = [L_1/b_{1x}-L_2 b_{1y}/(b_{1x} b_{2y})] / [1-b_{1y} b_{2x}/(b_{1x} b_{2y})]
Hence, we find the original vector :math:`(a_x,a_y)` from two links with
unit vectors :math:`(b_{1x},b_{1y})` and :math:`(b_{2x},b_{2y})` and
associated values :math:`L_1` and :math:`L_2`.
Note that the above equations require that :math:`b_{1x}>0` and
:math:`b_{2y}>0`. If this isn't the case, we invert the order of the two
links, which requires :math:`b_{2x}>0` and :math:`b_{1y}>0`. If none of
these conditions is met, then we have a degenerate case.
Examples
--------
The following example represents the active links in a 7-node hexagonal
grid, with just one core node. The 'true' vector has a magnitude of 5 units
and an orientation of 30 degrees, pointing up and to the right (i.e., the
postive-x and postive-y quadrant), so that its vector components are 4 (x)
and 3 (y) (in other words, it is a 3-4-5 triangle). The values assigned to
L below are the projection of that true vector onto the six link
vectors. The algorithm should recover the correct vector component
values of 4 and 3. The FOR loop examines each pair of links in turn.
>>> import numpy as np
>>> from landlab.grid.base import find_true_vector_from_link_vector_pair
>>> bx = np.array([0.5, -0.5, -1., -0.5, 1., 0.5])
>>> by = np.array([0.866, 0.866, 0., -0.866, 0., -0.866])
>>> L = np.array([4.6, 0.6, -4., -4.6, 4., -0.6])
>>> for i in range(5):
... ax, ay = find_true_vector_from_link_vector_pair(
... L[i], L[i+1], bx[i], by[i], bx[i+1], by[i+1])
... round(ax,1), round(ay,1)
(4.0, 3.0)
(4.0, 3.0)
(4.0, 3.0)
(4.0, 3.0)
(4.0, 3.0)
"""
assert ((b1x != 0 and b2y != 0) or (b2x != 0 and b1y != 0)), \
'Improper unit vectors'
if b1x != 0. and b2y != 0.:
ax = (L1 / b1x - L2 * (b1y / (b1x * b2y))) / \
(1. - (b1y * b2x) / (b1x * b2y))
ay = L2 / b2y - ax * (b2x / b2y)
elif b2x != 0. and b1y != 0.:
ax = (L2 / b2x - L1 * (b2y / (b2x * b1y))) / \
(1. - (b2y * b1x) / (b2x * b1y))
ay = L1 / b1y - ax * (b1x / b1y)
return ax, ay
class ModelGrid(ModelDataFieldsMixIn, EventLayersMixIn):
"""Base class for 2D structured or unstructured grids for numerical models.
The idea is to have at least two inherited
classes, RasterModelGrid and DelaunayModelGrid, that can create and
manage grids. To this might be added a GenericModelGrid, which would
be an unstructured polygonal grid that doesn't necessarily obey or
understand the Delaunay triangulation, but rather simply accepts
an input grid from the user. Also a :class:`~.HexModelGrid` for hexagonal.
Attributes
----------
at_node : dict-like
Values at nodes.
at_cell : dict-like
Values at cells.
at_link : dict-like
Values at links.
at_face : dict-like
Values at faces.
at_grid: dict-like
Global values
Other Parameters
----------------
axis_name : tuple, optional
Name of axes
axis_units : tuple, optional
Units of coordinates
"""
BC_NODE_IS_CORE = CORE_NODE
BC_NODE_IS_FIXED_VALUE = FIXED_VALUE_BOUNDARY
BC_NODE_IS_FIXED_GRADIENT = FIXED_GRADIENT_BOUNDARY
BC_NODE_IS_LOOPED = LOOPED_BOUNDARY
BC_NODE_IS_CLOSED = CLOSED_BOUNDARY
BC_LINK_IS_ACTIVE = ACTIVE_LINK
BC_LINK_IS_FIXED = FIXED_LINK
BC_LINK_IS_INACTIVE = INACTIVE_LINK
# Debugging flags (if True, activates some output statements)
_DEBUG_VERBOSE = False
_DEBUG_TRACK_METHODS = False
at_node = {} # : Values defined at nodes
at_link = {} # : Values defined at links
at_patch = {} # : Values defined at patches
at_corner = {} # : Values defined at corners
at_face = {} # : Values defined at faces
at_cell = {} # : Values defined at cells
def __init__(self, **kwds):
super(ModelGrid, self).__init__()
self.axis_name = kwds.get('axis_name', _default_axis_names(self.ndim))
self.axis_units = kwds.get(
'axis_units', _default_axis_units(self.ndim))
self._link_length = None
self._all_node_distances_map = None
self._all_node_azimuths_map = None
self.bc_set_code = 0
# Sort links according to the x and y coordinates of their midpoints.
# Assumes 1) node_at_link_tail and node_at_link_head have been
# created, and 2) so have node_x and node_y.
# self._sort_links_by_midpoint()
for loc in _SIZED_FIELDS:
size = self.number_of_elements(loc)
ModelDataFields.new_field_location(self, loc, size=size)
ModelDataFields.new_field_location(self, 'grid', size=1)
# for loc in _UNSIZED_FIELDS:
# ModelDataFields.new_field_location(self, loc, size=None)
ModelDataFields.set_default_group(self, 'node')
def _create_neighbor_list(self, **kwds):
"""Create list of neighbor node IDs.
Creates a list of IDs of neighbor nodes for each node, as a
2D array. Only record neighbor nodes that are on the other end of an
*active* link. Nodes attached to *inactive* links or neighbor nodes
that would be outside of the grid are given an ID of
:const:`~landlab.grid.base.BAD_INDEX_VALUE`.
Neighbors are ordered as [*right*, *top*, *left*, *bottom*].
"""
self._active_neighbor_nodes = self.adjacent_nodes_at_node.copy()
self._active_neighbor_nodes[
self.active_link_dirs_at_node == 0] = BAD_INDEX_VALUE
self.neighbor_list_created = True
return self._active_neighbor_nodes
@classmethod
def from_file(cls, file_like):
params = load_params(file_like)
return cls.from_dict(params)
@classmethod
def from_dict(cls, params):
raise NotImplementedError('from_dict')
def _initialize(self):
raise NotImplementedError('_initialize')
@property
def ndim(self):
"""Number of spatial dimensions of the grid.
LLCATS: GINF
"""
return 2
def _setup_nodes(self):
"""Set up the node id array."""
self._nodes = np.arange(self.number_of_nodes, dtype=int)
return self._nodes
@property
@make_return_array_immutable
def nodes(self):
"""Get node ids for the grid.
Examples
--------
>>> from landlab import RadialModelGrid
>>> mg = RadialModelGrid(num_shells=1)
>>> mg.nodes
array([0, 1, 2, 3, 4, 5, 6])
LLCATS: NINF
"""
try:
return self._nodes
except AttributeError:
return self._setup_nodes()
@property
@override_array_setitem_and_reset('reset_status_at_node')
def status_at_node(self):
"""Get array of the boundary status for each node.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab import FIXED_GRADIENT_BOUNDARY, FIXED_LINK
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node.reshape((4, 5))
array([[1, 1, 1, 1, 1],
[1, 0, 0, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 1, 1, 1]], dtype=uint8)
>>> np.any(mg.status_at_link == FIXED_LINK)
False
>>> mg.status_at_node[mg.nodes_at_left_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.status_at_node.reshape((4, 5))
array([[2, 1, 1, 1, 1],
[2, 0, 0, 0, 1],
[2, 0, 0, 0, 1],
[2, 1, 1, 1, 1]], dtype=uint8)
>>> np.any(mg.status_at_link == FIXED_LINK) # links auto-update
True
LLCATS: NINF BC
"""
return self._node_status
@status_at_node.setter
def status_at_node(self, new_status):
"""Set the array of node boundary statuses."""
self._node_status[:] = new_status[:]
self.reset_status_at_node()
@property
@deprecated(use='adjacent_nodes_at_node', version=1.2)
@make_return_array_immutable
def neighbors_at_node(self):
"""Get neighboring nodes.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> grid = RasterModelGrid((4, 3))
>>> neighbors = grid.adjacent_nodes_at_node.copy()
>>> neighbors[neighbors == BAD_INDEX_VALUE] = -1
>>> neighbors # doctest: +NORMALIZE_WHITESPACE
array([[ 1, 3, -1, -1], [ 2, 4, 0, -1], [-1, 5, 1, -1],
[ 4, 6, -1, 0], [ 5, 7, 3, 1], [-1, 8, 4, 2],
[ 7, 9, -1, 3], [ 8, 10, 6, 4], [-1, 11, 7, 5],
[10, -1, -1, 6], [11, -1, 9, 7], [-1, -1, 10, 8]])
LLCATS: NINF CONN
"""
return self.adjacent_nodes_at_node
@property
@cache_result_in_object()
@make_return_array_immutable
def adjacent_nodes_at_node(self):
"""Get adjacent nodes.
Examples
--------
>>> from landlab import HexModelGrid
>>> grid = HexModelGrid(3, 3)
>>> grid.adjacent_nodes_at_node
array([[ 1, 4, 3, -1, -1, -1],
[ 2, 5, 4, 0, -1, -1],
[ 6, 5, 1, -1, -1, -1],
[ 4, 7, 0, -1, -1, -1],
[ 5, 8, 7, 3, 0, 1],
[ 6, 9, 8, 4, 1, 2],
[ 9, 5, 2, -1, -1, -1],
[ 8, 3, 4, -1, -1, -1],
[ 9, 7, 4, 5, -1, -1],
[ 8, 5, 6, -1, -1, -1]])
LLCATS: NINF CONN
"""
node_is_at_tail = np.choose(self.link_dirs_at_node + 1,
np.array((1, -1, 0), dtype=np.int8))
out = self.nodes_at_link[self.links_at_node, node_is_at_tail]
out[node_is_at_tail == -1] = -1
return out
@property
@cache_result_in_object()
@return_readonly_id_array
def active_adjacent_nodes_at_node(self):
"""Adjacent nodes for each grid node.
For each grid node, get the adjacent nodes ordered
counterclockwise starting from the positive x axis.
Examples
--------
>>> from landlab import RasterModelGrid, HexModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.active_adjacent_nodes_at_node[(-1, 6, 2), ]
array([[-1, -1, -1, -1],
[ 7, 11, 5, 1],
[-1, 7, -1, -1]])
Setting a node to closed causes all links touching it to
be inactive.
>>> grid.status_at_node[6] = grid.BC_NODE_IS_CLOSED
>>> grid.active_adjacent_nodes_at_node[(-1, 6, 2), ]
array([[-1, -1, -1, -1],
[-1, -1, -1, -1],
[-1, 7, -1, -1]])
>>> grid.active_adjacent_nodes_at_node[7]
array([ 8, 12, -1, 2])
>>> grid.active_adjacent_nodes_at_node[2]
array([-1, 7, -1, -1])
>>> grid = HexModelGrid(3, 2)
>>> grid.status_at_node[0] = grid.BC_NODE_IS_CLOSED
>>> grid.active_adjacent_nodes_at_node
array([[-1, -1, -1, -1, -1, -1],
[-1, 3, -1, -1, -1, -1],
[ 3, -1, -1, -1, -1, -1],
[ 4, 6, 5, 2, -1, 1],
[-1, 3, -1, -1, -1, -1],
[-1, -1, 3, -1, -1, -1],
[-1, 3, -1, -1, -1, -1]])
LLCATS: NINF CONN BC
"""
return np.choose(
self.status_at_link[self.links_at_node] == ACTIVE_LINK,
(-1, self.adjacent_nodes_at_node))
@property
@deprecated(use='active_adjacent_nodes_at_node', version=1.2)
@cache_result_in_object()
@return_readonly_id_array
def active_neighbors_at_node(self):
"""Get list of neighbor node IDs.
Return lists of neighbor nodes, where the neighbor is connected by an
active link. For each node, the list gives neighbor ids as [right, top,
left, bottom]. Nodes at the end of inactive links or nodes in missing
positions get BAD_INDEX_VALUE.
Examples
--------
>>> from landlab import RasterModelGrid, HexModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.active_adjacent_nodes_at_node[(-1, 6, 2), ]
array([[-1, -1, -1, -1],
[ 7, 11, 5, 1],
[-1, 7, -1, -1]])
Setting a node to closed causes all links touching it to
be inactive.
>>> grid.status_at_node[6] = grid.BC_NODE_IS_CLOSED
>>> grid.active_adjacent_nodes_at_node[(-1, 6, 2), ]
array([[-1, -1, -1, -1],
[-1, -1, -1, -1],
[-1, 7, -1, -1]])
>>> grid.active_adjacent_nodes_at_node[7]
array([ 8, 12, -1, 2])
>>> grid.active_adjacent_nodes_at_node[2]
array([-1, 7, -1, -1])
>>> grid = HexModelGrid(3, 2)
>>> grid.status_at_node[0] = grid.BC_NODE_IS_CLOSED
>>> grid.active_adjacent_nodes_at_node
array([[-1, -1, -1, -1, -1, -1],
[-1, 3, -1, -1, -1, -1],
[ 3, -1, -1, -1, -1, -1],
[ 4, 6, 5, 2, -1, 1],
[-1, 3, -1, -1, -1, -1],
[-1, -1, 3, -1, -1, -1],
[-1, 3, -1, -1, -1, -1]])
LLCATS: NINF CONN BC
"""
return self.active_adjacent_nodes_at_node
@property
@make_return_array_immutable
def links_at_node(self):
"""Get links of nodes.
Returns
-------
(NODES, LINKS) ndarray of int
Link for the nodes of a grid. The shape of the matrix will be
number of nodes rows by max number of links per node. Order is
anticlockwise from east.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 3))
>>> grid.links_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 2, -1, -1], [ 1, 3, 0, -1], [-1, 4, 1, -1],
[ 5, 7, -1, 2], [ 6, 8, 5, 3], [-1, 9, 6, 4],
[10, 12, -1, 7], [11, 13, 10, 8], [-1, 14, 11, 9],
[15, -1, -1, 12], [16, -1, 15, 13], [-1, -1, 16, 14]])
>>> grid.links_at_node[4]
array([6, 8, 5, 3])
>>> grid.links_at_node[(4, 7), :]
array([[ 6, 8, 5, 3], [11, 13, 10, 8]])
LLCATS: NINF LINF CONN
"""
return self._links_at_node
@property
@make_return_array_immutable
def link_dirs_at_node(self):
"""Link directions at each node: 1=incoming, -1=outgoing, 0=none.
Returns
-------
(NODES, LINKS) ndarray of int
Link directions relative to the nodes of a grid. The shape of the
matrix will be number of nodes rows by max number of links per
node. A zero indicates no link at this position.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 3))
>>> grid.link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[-1, -1, 0, 0], [-1, -1, 1, 0], [ 0, -1, 1, 0],
[-1, -1, 0, 1], [-1, -1, 1, 1], [ 0, -1, 1, 1],
[-1, -1, 0, 1], [-1, -1, 1, 1], [ 0, -1, 1, 1],
[-1, 0, 0, 1], [-1, 0, 1, 1], [ 0, 0, 1, 1]],
dtype=int8)
>>> grid.link_dirs_at_node[4]
array([-1, -1, 1, 1], dtype=int8)
>>> grid.link_dirs_at_node[(4, 7), :]
array([[-1, -1, 1, 1],
[-1, -1, 1, 1]], dtype=int8)
LLCATS: NINF LINF CONN
"""
return self._link_dirs_at_node
@property
@make_return_array_immutable
@cache_result_in_object()
def active_link_dirs_at_node(self):
"""
Link flux directions at each node: 1=incoming flux, -1=outgoing
flux, 0=no flux. Note that inactive links receive zero, but active
and fixed links are both reported normally.
Returns
-------
(NODES, LINKS) ndarray of int
Link directions relative to the nodes of a grid. The shape of the
matrix will be number of nodes rows by max number of links per
node. A zero indicates no link at this position.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 3))
>>> grid.status_at_node[grid.nodes_at_left_edge] = CLOSED_BOUNDARY
>>> grid.active_link_dirs_at_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 0, 0, 0], [ 0, -1, 0, 0], [ 0, 0, 0, 0],
[ 0, 0, 0, 0], [-1, -1, 0, 1], [ 0, 0, 1, 0],
[ 0, 0, 0, 0], [-1, -1, 0, 1], [ 0, 0, 1, 0],
[ 0, 0, 0, 0], [ 0, 0, 0, 1], [ 0, 0, 0, 0]],
dtype=int8)
LLCATS: NINF LINF CONN
"""
return np.choose(self.link_status_at_node == ACTIVE_LINK,
(0, self.link_dirs_at_node))
@property
@make_return_array_immutable
@cache_result_in_object()
def link_status_at_node(self):
return self.status_at_link[self.links_at_node]
@property
def node_at_cell(self):
"""Node ID associated with grid cells.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> grid = RasterModelGrid((4, 5))
>>> grid.node_at_cell # doctest: +NORMALIZE_WHITESPACE
array([ 6, 7, 8,
11, 12, 13])
LLCATS: NINF CINF CONN
"""
return self._node_at_cell
@property
def cell_at_node(self):
"""Node ID associated with grid cells.
Examples
--------
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> grid = RasterModelGrid((4, 5))
>>> ids = grid.cell_at_node
>>> ids[ids == BAD_INDEX_VALUE] = -1
>>> ids # doctest: +NORMALIZE_WHITESPACE
array([-1, -1, -1, -1, -1,
-1, 0, 1, 2, -1,
-1, 3, 4, 5, -1,
-1, -1, -1, -1, -1])
LLCATS: CINF NINF CONN
"""
return self._cell_at_node
@property
@return_readonly_id_array
@cache_result_in_object()
def core_nodes(self):
"""Get array of core nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.core_nodes
array([ 6, 7, 8, 11, 12, 13])
LLCATS: NINF BC
"""
return numpy.where(self.status_at_node == CORE_NODE)[0]
@property
@return_readonly_id_array
def boundary_nodes(self):
"""Get array of boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.boundary_nodes
array([ 0, 1, 2, 3, 4, 5, 9, 10, 14, 15, 16, 17, 18, 19])
LLCATS: NINF BC
"""
try:
return self._boundary_nodes
except:
(boundary_node_ids, ) = numpy.where(self._node_status != CORE_NODE)
return boundary_node_ids
@property
@return_readonly_id_array
def open_boundary_nodes(self):
"""Get array of open boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> for edge in (mg.nodes_at_left_edge, mg.nodes_at_right_edge,
... mg.nodes_at_bottom_edge):
... mg.status_at_node[edge] = CLOSED_BOUNDARY
>>> mg.open_boundary_nodes
array([16, 17, 18])
LLCATS: NINF BC
"""
(open_boundary_node_ids, ) = numpy.where(
(self._node_status != CLOSED_BOUNDARY) &
(self._node_status != CORE_NODE))
return open_boundary_node_ids
@property
@return_readonly_id_array
def closed_boundary_nodes(self):
"""Get array of closed boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[mg.nodes_at_top_edge] = CLOSED_BOUNDARY
>>> mg.closed_boundary_nodes
array([15, 16, 17, 18, 19])
LLCATS: NINF BC
"""
(closed_boundary_node_ids, ) = numpy.where(
self._node_status == CLOSED_BOUNDARY)
return closed_boundary_node_ids
@property
@return_readonly_id_array
def fixed_gradient_boundary_nodes(self):
"""Get array of fixed gradient boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid, FIXED_GRADIENT_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[mg.nodes_at_top_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.fixed_gradient_boundary_nodes
array([15, 16, 17, 18, 19])
LLCATS: NINF BC
"""
(fixed_gradient_boundary_node_ids, ) = numpy.where(
self._node_status == FIXED_GRADIENT_BOUNDARY)
return fixed_gradient_boundary_node_ids
@property
@return_readonly_id_array
def fixed_gradient_boundary_node_fixed_link(self):
"""
An array of the fixed_links connected to fixed gradient boundary nodes.
Note that on a raster, some nodes (notably the corners) can be
FIXED_GRADIENT_BOUNDARY, but not have a true FIXED_LINK neighboring
link. In such cases, the link returned will be a closed link joining
the corner node to a neighboring FIXED_GRADIENT_BOUNDARY node (see
example).
An AssertionError will be raised if for some reason a
FIXED_GRADIENT_BOUNDARY node exists which has neither a
FIXED_GRADIENT_BOUNDARY neighbor, or a FIXED_LINK.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab import FIXED_GRADIENT_BOUNDARY
>>> grid = RasterModelGrid((3, 4))
>>> leftedge = grid.nodes_at_left_edge
>>> grid.status_at_node[leftedge] = FIXED_GRADIENT_BOUNDARY
>>> grid.fixed_gradient_boundary_nodes
array([0, 4, 8])
>>> grid.fixed_gradient_boundary_node_fixed_link
array([ 3, 7, 10])
"""
try:
return self._fixed_gradient_boundary_node_links
except AttributeError:
self._create_fixed_gradient_boundary_node_links()
return self._fixed_gradient_boundary_node_links
@property
@return_readonly_id_array
def fixed_gradient_boundary_node_anchor_node(self):
"""
Returns the node at the other end of the fixed link for a fixed
gradient boundary node.
Degenerate FIXED_GRADIENT_BOUNDARY nodes (e.g., corners) are handled as
in :func:`fixed_gradient_boundary_node_fixed_link`, by pointing to a
neighboring FIXED_GRADIENT_BOUNDARY node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab import FIXED_GRADIENT_BOUNDARY
>>> grid = RasterModelGrid((3, 4))
>>> leftedge = grid.nodes_at_left_edge
>>> grid.status_at_node[leftedge] = FIXED_GRADIENT_BOUNDARY
>>> grid.fixed_gradient_boundary_nodes
array([0, 4, 8])
>>> grid.fixed_gradient_boundary_node_fixed_link
array([ 3, 7, 10])
>>> grid.fixed_gradient_boundary_node_anchor_node
array([4, 5, 4])
"""
try:
return self._fixed_gradient_boundary_node_anchor_node
except AttributeError:
self._create_fixed_gradient_boundary_node_anchor_node()
return self._fixed_gradient_boundary_node_anchor_node
def _create_fixed_gradient_boundary_node_links(self):
"""
Builds a data structure to hold the fixed_links which control the
values of any FIXED_GRADIENT_BOUNDARY nodes in the grid.
An AssertionError will be raised if for some reason a
FIXED_GRADIENT_BOUNDARY node exists which has neither a
FIXED_GRADIENT_BOUNDARY neighbor, or a FIXED_LINK.
"""
self._fixed_grad_links_created = True
self._fixed_gradient_boundary_node_links = np.empty_like(
self.fixed_gradient_boundary_nodes, dtype=int)
fix_nodes = self.fixed_gradient_boundary_nodes
neighbor_links = self.links_at_node[fix_nodes] # -1s
boundary_exists = self.link_dirs_at_node[fix_nodes]
# next line retains -1 indexes
link_stat_badind = self.status_at_link[neighbor_links] == FIXED_LINK
true_connection = np.logical_and(link_stat_badind, boundary_exists)
true_fix_nodes = true_connection.sum(axis=1).astype(bool)
self._fixed_gradient_boundary_node_links[true_fix_nodes] = (
neighbor_links[true_connection])
# resolve any corner nodes
neighbor_nodes = self.adjacent_nodes_at_node[fix_nodes] # BAD_INDEX_VALUEs
neighbor_nodes[neighbor_nodes == BAD_INDEX_VALUE] = -1
fixed_grad_neighbor = np.logical_and((self.status_at_node[
neighbor_nodes] == FIXED_GRADIENT_BOUNDARY), boundary_exists)
# ^True when FIXED_GRADIENT_BOUNDARY for real
# winnow it down to only one possibility for fixed_grad neighbor:
which_neighbor = np.argmax(fixed_grad_neighbor, axis=1)
indexing_range = np.arange(fixed_grad_neighbor.shape[0])
a_link_to_fixed_grad = neighbor_links[indexing_range, which_neighbor]
corners = np.logical_not(true_fix_nodes)
assert np.all(
fixed_grad_neighbor[indexing_range, which_neighbor][corners])
self._fixed_gradient_boundary_node_links[
corners] = a_link_to_fixed_grad[corners]
def _create_fixed_gradient_boundary_node_anchor_node(self):
"""
Builds a data structure to hold the nodes which anchor the
values of any FIXED_GRADIENT_BOUNDARY nodes in the grid, i.e., those
at the other ends of the FIXED_LINKS.
An AssertionError will be raised if for some reason a
FIXED_GRADIENT_BOUNDARY node exists which has neither a
FIXED_GRADIENT_BOUNDARY neighbor, or a FIXED_LINK.
"""
self._fixed_grad_links_created = True
fix_grad_nodes = self.fixed_gradient_boundary_nodes
self._fixed_gradient_boundary_node_anchor_node = np.empty_like(
fix_grad_nodes)
heads_and_tails = np.empty((fix_grad_nodes.size, 2))
which_one = np.empty_like(heads_and_tails, dtype=bool)
heads_and_tails[:, 0] = self.node_at_link_head[
self.fixed_gradient_boundary_node_fixed_link]
heads_and_tails[:, 1] = self.node_at_link_tail[
self.fixed_gradient_boundary_node_fixed_link]
which_one[:, 0] = heads_and_tails[:, 0] == fix_grad_nodes
which_one[:, 1] = heads_and_tails[:, 1] == fix_grad_nodes
assert np.all(which_one.sum(axis=1) == 1)
self._fixed_gradient_boundary_node_anchor_node = heads_and_tails[
np.logical_not(which_one)]
@property
@return_readonly_id_array
@cache_result_in_object()
def fixed_value_boundary_nodes(self):
"""Get array of fixed value boundary nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5), 1.)
Initially all the perimeter nodes are fixed value boundary.
>>> grid.fixed_value_boundary_nodes
array([ 0, 1, 2, 3, 4, 5, 9, 10, 14, 15, 16, 17, 18, 19])
Set left, right, and bottom edges to closed.
>>> for edge in (grid.nodes_at_left_edge, grid.nodes_at_right_edge,
... grid.nodes_at_bottom_edge):
... grid.status_at_node[edge] = grid.BC_NODE_IS_CLOSED
Now nodes on just the top edge are fixed.
>>> grid.fixed_value_boundary_nodes
array([16, 17, 18])
LLCATS: NINF BC
"""
return numpy.where(self._node_status == FIXED_VALUE_BOUNDARY)[0]
@property
@return_readonly_id_array
@cache_result_in_object()
def active_faces(self):
"""Get array of active faces.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.active_faces
array([0, 1, 2, 3, 4, 5, 6])
>>> from landlab import CLOSED_BOUNDARY
>>> grid.status_at_node[6] = CLOSED_BOUNDARY
>>> grid.active_faces
array([0, 2, 5])
LLCATS: FINF BC
"""
return self.face_at_link[self.active_links]
@property
@return_readonly_id_array
@cache_result_in_object()
def active_links(self):
"""Get array of active links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.active_links
array([ 4, 5, 7, 8, 9, 11, 12])
LLCATS: LINF BC
"""
return np.where(self.status_at_link == ACTIVE_LINK)[0]
@property
@return_readonly_id_array
@cache_result_in_object()
def fixed_links(self):
"""Get array of fixed links.
Examples
--------
>>> from landlab import RasterModelGrid, FIXED_GRADIENT_BOUNDARY
>>> grid = RasterModelGrid((3, 4))
>>> grid.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([1, 1, 1, 1,
1, 0, 0, 1,
1, 1, 1, 1], dtype=uint8)
>>> grid.fixed_links.size
0
>>> grid.status_at_node[:4] = FIXED_GRADIENT_BOUNDARY
>>> grid.status_at_node # doctest: +NORMALIZE_WHITESPACE
array([2, 2, 2, 2,
1, 0, 0, 1,
1, 1, 1, 1], dtype=uint8)
>>> grid.fixed_links
array([4, 5])
LLCATS: LINF BC
"""
return np.where(self.status_at_link == FIXED_LINK)[0]
@property
@cache_result_in_object()
@return_readonly_id_array
def node_at_core_cell(self):
"""Get array of nodes associated with core cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5), 1.)
Initially each cell's node is core.
>>> grid.node_at_core_cell
array([ 6, 7, 8,
11, 12, 13])
Setting a node to closed causes means its cell is also
"closed".
>>> grid.status_at_node[8] = grid.BC_NODE_IS_CLOSED
>>> grid.node_at_core_cell
array([ 6, 7, 11, 12, 13])
LLCATS: NINF CINF BC CONN
"""
return numpy.where(self.status_at_node == CORE_NODE)[0]
@property
@make_return_array_immutable
@cache_result_in_object()
def core_cells(self):
"""Get array of core cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5), 1.)
Initially all of the cells are "core".
>>> grid.core_cells
array([0, 1, 2,
3, 4, 5])
Setting a node to closed causes its cell to no longer be core.
>>> grid.status_at_node[8] = grid.BC_NODE_IS_CLOSED
>>> grid.core_cells
array([0, 1, 3, 4, 5])
LLCATS: CINF BC
"""
return self.cell_at_node[self.core_nodes]
@property
def nodes_at_link(self):
"""Get array of the nodes at each link.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4), 1.)
>>> mg.nodes_at_link # doctest: +NORMALIZE_WHITESPACE
array([[ 0, 1], [ 1, 2], [ 2, 3],
[ 0, 4], [ 1, 5], [ 2, 6], [ 3, 7],
[ 4, 5], [ 5, 6], [ 6, 7],
[ 4, 8], [ 5, 9], [ 6, 10], [ 7, 11],
[ 8, 9], [ 9, 10], [10, 11]])
LLCATS: NINF LINF CONN
"""
return self._nodes_at_link
@property
def node_at_link_head(self):
"""Get array of the node at each link head (*to-node*).
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.node_at_link_head[:5]
array([1, 2, 3, 4, 5])
LLCATS: NINF LINF CONN
"""
return self._nodes_at_link[:, 1]
@property
def node_at_link_tail(self):
"""Get array of the node at each link tail (*from-node*).
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.node_at_link_tail[:5]
array([0, 1, 2, 3, 0])
LLCATS: NINF LINF CONN
"""
return self._nodes_at_link[:, 0]
@property
def face_at_link(self):
"""Get array of faces associated with links.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid, BAD_INDEX_VALUE
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.face_at_link[5:7]
array([0, 1])
>>> np.all(mg.face_at_link[:5]==BAD_INDEX_VALUE)
True
LLCATS: FINF LINF CONN
"""
try:
return self._face_at_link
except AttributeError:
return self._create_face_at_link()
@property
def link_at_face(self):
"""Get array of links associated with faces.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.link_at_face[0:3]
array([5, 6, 7])
LLCATS: LINF FINF CONN
"""
try:
return self._link_at_face
except AttributeError:
return self._create_link_at_face()
@property
def number_of_nodes(self):
"""Total number of nodes.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_nodes
20
LLCATS: NINF
"""
return len(self._cell_at_node)
@property
def number_of_corners(self):
"""Total number of corners.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_corners
12
LLCATS: CNINF
"""
return self.number_of_patches
@property
def number_of_cells(self):
"""Total number of cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_cells
6
LLCATS: CINF
"""
return len(self._node_at_cell)
@property
def number_of_links(self):
"""Total number of links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.number_of_links
17
LLCATS: LINF
"""
return len(self.nodes_at_link)
@property
def number_of_faces(self):
"""Total number of faces.
Returns
-------
int
Total number of faces in the grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.number_of_faces
7
LLCATS: FINF
"""
return len(self.link_at_face)
@property
def number_of_active_faces(self):
"""Total number of active faces.
Returns
-------
int
Total number of active faces in the grid.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4))
>>> grid.number_of_active_faces
7
The number of active faces is updated when a node status changes.
>>> from landlab import CLOSED_BOUNDARY
>>> grid.status_at_node[6] = CLOSED_BOUNDARY
>>> grid.number_of_active_faces
3
LLCATS: FINF BC
"""
return self.active_faces.size
@property
def number_of_core_nodes(self):
"""Number of core nodes.
The number of core nodes on the grid (i.e., excluding all boundary
nodes).
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_core_nodes
6
>>> grid.status_at_node[7] = CLOSED_BOUNDARY
>>> grid.number_of_core_nodes
5
LLCATS: NINF BC
"""
return self.core_nodes.size
@property
def number_of_core_cells(self):
"""Number of core cells.
A core cell excludes all boundary cells.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> grid = RasterModelGrid((4, 5))
>>> grid.number_of_core_cells
6
>>> grid.status_at_node[7] = CLOSED_BOUNDARY
>>> grid.number_of_core_cells
5
LLCATS: CINF BC
"""
return self.core_cells.size
@property
def number_of_active_links(self):
"""Number of active links.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.number_of_active_links
17
>>> for edge in (mg.nodes_at_left_edge, mg.nodes_at_right_edge,
... mg.nodes_at_bottom_edge):
... mg.status_at_node[edge] = CLOSED_BOUNDARY
>>> mg.number_of_active_links
10
LLCATS: LINF BC
"""
return self.active_links.size
@property
def number_of_fixed_links(self):
"""Number of fixed links.
Examples
--------
>>> from landlab import RasterModelGrid, FIXED_GRADIENT_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.number_of_fixed_links
0
>>> mg.status_at_node[mg.nodes_at_top_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.number_of_fixed_links
3
LLCATS: LINF BC
"""
return self.fixed_links.size
def number_of_elements(self, name):
"""Number of instances of an element.
Get the number of instances of a grid element in a grid.
Parameters
----------
name : {'node', 'cell', 'link', 'face', 'core_node', 'core_cell',
'active_link', 'active_face'}
Name of the grid element.
Returns
-------
int
Number of elements in the grid.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.number_of_elements('node')
20
>>> mg.number_of_elements('core_cell')
6
>>> mg.number_of_elements('link')
31
>>> mg.number_of_elements('active_link')
17
>>> mg.status_at_node[8] = CLOSED_BOUNDARY
>>> mg.number_of_elements('link')
31
>>> mg.number_of_elements('active_link')
13
LLCATS: GINF
"""
try:
return getattr(self, _ARRAY_LENGTH_ATTRIBUTES[name])
except KeyError:
raise TypeError(
'{name}: element name not understood'.format(name=name))
@property
@make_return_array_immutable
def node_x(self):
"""Get array of the x-coordinates of nodes.
See also
--------
x_of_node
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.node_x.reshape((4, 5))
array([[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.]])
LLCATS: NINF MEAS
"""
return self._xy_of_node[:, 0]
@property
@make_return_array_immutable
def node_y(self):
"""Get array of the y-coordinates of nodes.
See also
--------
y_of_node
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.node_y.reshape((4, 5))
array([[ 0., 0., 0., 0., 0.],
[ 2., 2., 2., 2., 2.],
[ 4., 4., 4., 4., 4.],
[ 6., 6., 6., 6., 6.]])
LLCATS: NINF MEAS
"""
return self._xy_of_node[:, 1]
@property
@make_return_array_immutable
def xy_of_node(self):
"""Get array of the x- and y-coordinates of nodes.
See also
--------
x_of_node, y_of_node
Exquivalent methods for just x and y coordinates.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((3, 4), (2., 3.))
>>> grid.xy_of_node # doctest: +NORMALIZE_WHITESPACE
array([[ 0., 0.], [ 3., 0.], [ 6., 0.], [ 9., 0.],
[ 0., 2.], [ 3., 2.], [ 6., 2.], [ 9., 2.],
[ 0., 4.], [ 3., 4.], [ 6., 4.], [ 9., 4.]])
>>> np.all(grid.xy_of_node[:, 0] == grid.x_of_node)
True
>>> np.all(grid.xy_of_node[:, 1] == grid.y_of_node)
True
LLCATS: NINF MEAS
"""
return self._xy_of_node
@property
@make_return_array_immutable
def x_of_node(self):
"""Get array of the x-coordinates of nodes.
See also
--------
node_x
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_node.reshape((4, 5))
array([[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.],
[ 0., 3., 6., 9., 12.]])
LLCATS: NINF MEAS
"""
return self._xy_of_node[:, 0]
@property
@make_return_array_immutable
def y_of_node(self):
"""Get array of the y-coordinates of nodes.
See also
--------
node_y
Exquivalent method.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_node.reshape((4, 5))
array([[ 0., 0., 0., 0., 0.],
[ 2., 2., 2., 2., 2.],
[ 4., 4., 4., 4., 4.],
[ 6., 6., 6., 6., 6.]])
LLCATS: NINF MEAS
"""
return self._xy_of_node[:, 1]
@property
@make_return_array_immutable
def x_of_cell(self):
"""Get array of the x-coordinates of nodes at cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_cell.reshape((2, 3))
array([[ 3., 6., 9.],
[ 3., 6., 9.]])
LLCATS: CINF MEAS
"""
return self.x_of_node[self.node_at_cell]
@property
@make_return_array_immutable
def y_of_cell(self):
"""Get array of the y-coordinates of nodes at cells.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_cell.reshape((2, 3))
array([[ 2., 2., 2.],
[ 4., 4., 4.]])
LLCATS: CINF MEAS
"""
return self.y_of_node[self.node_at_cell]
@property
@cache_result_in_object()
@make_return_array_immutable
def x_of_link(self):
"""Get array of the x-coordinates of link midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_link # doctest: +NORMALIZE_WHITESPACE
array([ 1.5, 4.5, 7.5, 10.5, 0. , 3. , 6. , 9. , 12. ,
1.5, 4.5, 7.5, 10.5, 0. , 3. , 6. , 9. , 12. ,
1.5, 4.5, 7.5, 10.5, 0. , 3. , 6. , 9. , 12. ,
1.5, 4.5, 7.5, 10.5])
LLCATS: LINF MEAS
"""
return np.mean(self.x_of_node[self.nodes_at_link], axis=1)
@property
@cache_result_in_object()
@make_return_array_immutable
def y_of_link(self):
"""Get array of the y-coordinates of link midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_link # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 0., 1., 1., 1., 1., 1.,
2., 2., 2., 2., 3., 3., 3., 3., 3.,
4., 4., 4., 4., 5., 5., 5., 5., 5.,
6., 6., 6., 6.])
LLCATS: LINF MEAS
"""
return np.mean(self.y_of_node[self.nodes_at_link], axis=1)
@property
@cache_result_in_object()
@make_return_array_immutable
def x_of_face(self):
"""Get array of the x-coordinates of face midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.x_of_face # doctest: +NORMALIZE_WHITESPACE
array([ 3. , 6. , 9. , 1.5, 4.5, 7.5, 10.5,
3. , 6. , 9. , 1.5, 4.5, 7.5, 10.5,
3. , 6. , 9. ])
LLCATS: FINF MEAS
"""
return self.x_of_link[self.link_at_face]
@property
@cache_result_in_object()
@make_return_array_immutable
def y_of_face(self):
"""Get array of the y-coordinates of face midpoints.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.y_of_face # doctest: +NORMALIZE_WHITESPACE
array([ 1., 1., 1., 2., 2., 2., 2., 3., 3., 3.,
4., 4., 4., 4., 5., 5., 5.])
LLCATS: FINF MEAS
"""
return self.y_of_link[self.link_at_face]
@make_return_array_immutable
def node_axis_coordinates(self, axis=0):
"""Get the coordinates of nodes along a particular axis.
Return node coordinates from a given *axis* (defaulting to 0). Axis
numbering is the same as that for numpy arrays. That is, the zeroth
axis is along the rows, and the first along the columns.
Parameters
----------
axis : int, optional
Coordinate axis.
Returns
-------
ndarray
Coordinates of nodes for a given axis.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.node_axis_coordinates(0) # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 0., 0.,
1., 1., 1., 1., 1.,
2., 2., 2., 2., 2.,
3., 3., 3., 3., 3.])
>>> grid.node_axis_coordinates(1) # doctest: +NORMALIZE_WHITESPACE
array([ 0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.,
0., 1., 2., 3., 4.])
LLCATS: GINF NINF MEAS
"""
AXES = ('node_y', 'node_x')
try:
return getattr(self, AXES[axis])
except IndexError:
raise ValueError("'axis' entry is out of bounds")
@property
def axis_units(self):
"""Get units for each axis.
Returns
-------
tuple of str
The units (as a string) for each of a grid's coordinates.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((4, 5), (2., 3.))
>>> mg.axis_units
('-', '-')
>>> mg.axis_units = ('km', 'km')
>>> mg.axis_units
('km', 'km')
LLCATS: GINF
"""
return self._axis_units
@axis_units.setter
def axis_units(self, new_units):
"""Set the units for each coordinate axis."""
if len(new_units) != self.ndim:
raise ValueError('length of units does not match grid dimension')
self._axis_units = tuple(new_units)
@property
def axis_name(self):
"""Get the name of each coordinate axis.
Returns
-------
tuple of str
The names of each axis.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.axis_name
('y', 'x')
>>> grid.axis_name = ('lon', 'lat')
>>> grid.axis_name
('lon', 'lat')
LLCATS: GINF
"""
return self._axis_name
@axis_name.setter
def axis_name(self, new_names):
"""Set the names of a grid's coordinate axes.
Raises
------
ValueError
If the number of dimension do not match.
Examples
--------
>>> from landlab import RasterModelGrid
>>> grid = RasterModelGrid((4, 5))
>>> grid.axis_name = ('lon', 'lat')
>>> grid.axis_name
('lon', 'lat')
"""
if len(new_names) != self.ndim:
raise ValueError('length of names does not match grid dimension')
self._axis_name = tuple(new_names)
@property
@make_return_array_immutable
@cache_result_in_object()
def status_at_link(self):
"""Get array of the status of all links.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab import CLOSED_BOUNDARY, FIXED_GRADIENT_BOUNDARY
>>> mg = RasterModelGrid((4, 5), 1.)
>>> mg.status_at_node[mg.nodes_at_left_edge] = CLOSED_BOUNDARY
>>> mg.status_at_node[mg.nodes_at_right_edge] = FIXED_GRADIENT_BOUNDARY
>>> mg.status_at_link # doctest: +NORMALIZE_WHITESPACE
array([4, 4, 4, 4, 4, 0, 0, 0, 4, 4, 0, 0, 2, 4, 0, 0, 0, 4, 4, 0, 0,
2, 4, 0, 0, 0, 4, 4, 4, 4, 4], dtype=uint8)
LLCATS: BC LINF
"""
return set_status_at_link(self.status_at_node[self.nodes_at_link])
@property
@return_readonly_id_array
def link_at_face(self):
"""Get links associated with faces.
Returns an array of the link IDs for the links that intersect
faces.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4))
>>> mg.link_at_face
array([ 4, 5, 7, 8, 9, 11, 12])
LLCATS: LINF FINF MEAS
"""
try:
return self._link_at_face
except AttributeError:
return self._create_link_at_face()
def _create_number_of_links_at_node(self):
"""Find and record how many links are attached to each node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4))
>>> mg.number_of_links_at_node
array([2, 3, 3, 2, 3, 4, 4, 3, 2, 3, 3, 2])
"""
self._number_of_links_at_node = np.zeros(self.number_of_nodes,
dtype=np.int)
node_at_link_tail = self.node_at_link_tail
node_at_link_head = self.node_at_link_head
for ln in range(self.number_of_links):
self._number_of_links_at_node[node_at_link_tail[ln]] += 1
self._number_of_links_at_node[node_at_link_head[ln]] += 1
@property
def number_of_links_at_node(self):
"""Number of links connected to each node.
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 4))
>>> mg.number_of_links_at_node
array([2, 3, 3, 2, 3, 4, 4, 3, 2, 3, 3, 2])
LLCATS: LINF NINF CONN
"""
try:
return self._number_of_links_at_node
except AttributeError:
self._create_number_of_links_at_node()
return self._number_of_links_at_node
def _create_links_and_link_dirs_at_node(self):
"""Make arrays with links and link directions at each node.
Examples
--------
>>> from landlab import HexModelGrid
>>> hg = HexModelGrid(3, 3)
>>> hg.links_at_node
array([[ 0, 3, 2, -1, -1, -1],
[ 1, 5, 4, 0, -1, -1],
[ 7, 6, 1, -1, -1, -1],
[ 8, 11, 2, -1, -1, -1],
[ 9, 13, 12, 8, 3, 4],
[10, 15, 14, 9, 5, 6],
[16, 10, 7, -1, -1, -1],
[17, 11, 12, -1, -1, -1],
[18, 17, 13, 14, -1, -1],
[18, 15, 16, -1, -1, -1]])
>>> hg.link_dirs_at_node
array([[-1, -1, -1, 0, 0, 0],
[-1, -1, -1, 1, 0, 0],
[-1, -1, 1, 0, 0, 0],
[-1, -1, 1, 0, 0, 0],
[-1, -1, -1, 1, 1, 1],
[-1, -1, -1, 1, 1, 1],
[-1, 1, 1, 0, 0, 0],
[-1, 1, 1, 0, 0, 0],
[-1, 1, 1, 1, 0, 0],
[ 1, 1, 1, 0, 0, 0]], dtype=int8)
"""
# Find maximum number of links per node
nlpn = self.number_of_links_at_node
# ^this fn should become member and property
max_num_links = np.amax(nlpn)
nlpn[:] = 0 # we'll zero it out, then rebuild it
# Create arrays for link-at-node information
self._links_at_node = - np.ones((self.number_of_nodes, max_num_links),
dtype=int)
self._link_dirs_at_node = np.zeros((self.number_of_nodes,
max_num_links), dtype=np.int8)
# Sweep over all links
node_at_link_tail = self.node_at_link_tail
node_at_link_head = self.node_at_link_head
for lk in range(self.number_of_links):
# Find the IDs of the tail and head nodes
t = node_at_link_tail[lk]
h = node_at_link_head[lk]
# Add this link to the list for this node, set the direction
# (outgoing, indicated by -1), and increment the number found so
# far
self._links_at_node[t][nlpn[t]] = lk
self._links_at_node[h][nlpn[h]] = lk
self._link_dirs_at_node[t][nlpn[t]] = -1
self._link_dirs_at_node[h][nlpn[h]] = 1
nlpn[t] += 1
nlpn[h] += 1
# Sort the links at each node by angle, counter-clockwise from +x
self._sort_links_at_node_by_angle()
@property
@make_return_array_immutable
def angle_of_link(self):
"""Find and return the angle of a link about the node at the link tail.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.angle_of_link / np.pi * 3. # 60 degree segments
array([ 0., 2., 1., 2., 1., 0., 0., 1., 2., 1., 2., 0.])
LLCATS: LINF MEAS
"""
try:
if not self._angle_of_link_created:
self._create_angle_of_link()
except AttributeError:
self._create_angle_of_link()
return self._angle_of_link_bothends[-1]
@property
@make_return_array_immutable
def angle_of_link_about_head(self):
"""Find and return the angle of a link about the node at the link head.
Because links have direction, their angle can be specified as an angle
about either the node at the link head, or the node at the link tail.
The default behaviour of `angle_of_link` is to return the angle about
the link tail, but this method gives the angle about the link head.
Examples
--------
>>> from landlab import HexModelGrid
>>> mg = HexModelGrid(3, 2)
>>> mg.angle_of_link_about_head[:3] / np.pi * 3. # 60 deg segments
array([ 3., 5., 4.])
LLCATS: LINF MEAS
"""
try:
if not self._angle_of_link_created:
self._create_angle_of_link()
except AttributeError:
self._create_angle_of_link()
return self._angle_of_link_bothends[1]
def _create_angle_of_link(self):
"""
Build a dict with keys (-1, 1) that contains the angles of the links
about both the link heads (1) and link tails (-1).
Notes
-----
dx and dy are the x and y differences between the link endpoints.
Multiplying this by dirs orients these offsets correctly (i.e.,
the correct node is the origin). The call to arctan2 calculates
the angle in radians. Angles in the lower two quadrants will be
negative and clockwise from the positive x axis. We want them
counter-clockwise, which is what the last couple of lines before
the return statement do.
LLCATS: LINF MEAS
"""
self._angle_of_link_bothends = {}
for dirs in (-1, 1):
dx = -dirs * (self.node_x[self.node_at_link_head] -
self.node_x[self.node_at_link_tail])
dy = -dirs * (self.node_y[self.node_at_link_head] -
self.node_y[self.node_at_link_tail])
ang = np.arctan2(dy, dx)
(lower_two_quads, ) = np.where(ang < 0.0)
ang[lower_two_quads] = (2 * np.pi) + ang[lower_two_quads]
(no_link, ) = np.where(dirs == 0)
ang[no_link] = 2*np.pi
self._angle_of_link_bothends[dirs] = ang.copy()
self._angle_of_link_created = True
def _sort_links_at_node_by_angle(self):
"""Sort the links_at_node and link_dirs_at_node arrays by angle.
"""
ang = self.angle_of_link[self.links_at_node]
linkhead_at_node = self.link_dirs_at_node == 1
ang[linkhead_at_node] = self.angle_of_link_about_head[
self.links_at_node[linkhead_at_node]]
ang[self.link_dirs_at_node == 0] = 100.
argsorted = np.argsort(ang, axis=1)
indices = np.indices(ang.shape)[0] * ang.shape[1] + argsorted
self._links_at_node.flat = self._links_at_node.flat[indices.flatten()]
self._link_dirs_at_node.flat = self._link_dirs_at_node.flat[
indices.flatten()]
def resolve_values_on_links(self, link_values, out=None):
"""Resolve the xy-components of links.
Resolves values provided defined on links into the x and y directions.
Returns values_along_x, values_along_y
LLCATS: LINF
"""
return gfuncs.resolve_values_on_links(self, link_values, out=out)
@deprecated(use='no replacement', version=1.0)
def resolve_values_on_active_links(self, link_values, out=None):
"""Resolve the xy-components of active links.
Resolves values provided defined on active links into the x and y
directions.
Returns values_along_x, values_along_y
LLCATS: LINF
"""
return gfuncs.resolve_values_on_active_links(self, link_values,
out=out)
def link_at_node_is_upwind(self, values, out=None):
"""
Return a boolean the same shape as :func:`links_at_node` which flags
links which are upwind of the node as True.
link_at_node_is_upwind iterates across the grid and identifies the link
values at each link connected to a node. It then uses the
link_dirs_at_node data structure to identify links bringing flux into
the node. It then return a boolean array the same shape as
links_at_node flagging these links. e.g., for a raster, the returned
array will be shape (nnodes, 4).
Parameters
----------
values : str or array
Name of variable field defined at links, or array of values at
links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Must be correct shape and boolean dtype.
Returns
-------
ndarray
Boolean of which links are upwind at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.at_link['grad'] = np.array([-1., -2., -1.,
... -2., -3., -4., -5.,
... -1., -2., -1.,
... -1., -2., -3., -4.,
... -1., -2., -1.])
>>> rmg.link_at_node_is_upwind('grad')
array([[False, False, False, False],
[False, False, True, False],
[False, False, True, False],
[False, False, True, False],
[False, False, False, True],
[False, False, True, True],
[False, False, True, True],
[False, False, True, True],
[False, False, False, True],
[False, False, True, True],
[False, False, True, True],
[False, False, True, True]], dtype=bool)
LLCATS: LINF NINF CONN
"""
if out is None:
out = np.empty_like(self.links_at_node, dtype=bool)
else:
assert out.shape is self.links_at_node.shape
assert out.dtype is bool
if type(values) is str:
vals = self.at_link[values]
else:
assert len(values) == self.number_of_links
vals = values
values_at_links = vals[self.links_at_node] * self.link_dirs_at_node
# this procedure makes incoming links NEGATIVE
np.less(values_at_links, 0., out=out)
return out
def link_at_node_is_downwind(self, values, out=None):
"""
Return a boolean the same shape as :func:`links_at_node` which flags
links which are downwind of the node as True.
link_at_node_is_downwind iterates across the grid and identifies the
link values at each link connected to a node. It then uses the
link_dirs_at_node data structure to identify links carrying flux out of
the node. It then return a boolean array the same shape as
links_at_node flagging these links. e.g., for a raster, the returned
array will be shape (nnodes, 4).
Parameters
----------
values : str or array
Name of variable field defined at links, or array of values at
links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Must be correct shape and boolean dtype.
Returns
-------
ndarray
Boolean of which links are downwind at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.at_link['grad'] = np.array([-1., -2., -1.,
... -2., -3., -4., -5.,
... -1., -2., -1.,
... -1., -2., -3., -4.,
... -1., -2., -1.])
>>> rmg.link_at_node_is_downwind('grad')
array([[ True, True, False, False],
[ True, True, False, False],
[ True, True, False, False],
[False, True, False, False],
[ True, True, False, False],
[ True, True, False, False],
[ True, True, False, False],
[False, True, False, False],
[ True, False, False, False],
[ True, False, False, False],
[ True, False, False, False],
[False, False, False, False]], dtype=bool)
LLCATS: LINF NINF CONN
"""
if out is None:
out = np.empty_like(self.links_at_node, dtype=bool)
else:
assert out.shape is self.links_at_node.shape
assert out.dtype is bool
if type(values) is str:
vals = self.at_link[values]
else:
assert len(values) == self.number_of_links
vals = values
values_at_links = vals[self.links_at_node] * self.link_dirs_at_node
# this procedure makes incoming links NEGATIVE
np.greater(values_at_links, 0., out=out)
return out
def upwind_links_at_node(self, values, bad_index=-1):
"""
Return an (nnodes, X) shape array of link IDs of which links are upwind
of each node, according to *values* (field or array).
X is the maximum upwind links at any node. Nodes with fewer upwind
links than this have additional slots filled with *bad_index*. Links
are ordered anticlockwise from east.
Parameters
----------
values : str or array
Name of variable field defined at links, or array of values at
links.
bad_index : int
Index to place in array indicating no link.
Returns
-------
ndarray
Array of upwind link IDs
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> rmg.at_link['grad'] = np.array([-1., -2., -1.,
... -2., -3., -4., -5.,
... -1., -2., -1.,
... -1., -2., -3., -4.,
... -1., -2., -1.])
>>> rmg.upwind_links_at_node('grad', bad_index=-1)
array([[-1, -1],
[ 0, -1],
[ 1, -1],
[ 2, -1],
[ 3, -1],
[ 7, 4],
[ 8, 5],
[ 9, 6],
[10, -1],
[14, 11],
[15, 12],
[16, 13]])
LLCATS: LINF NINF CONN
"""
if type(values) is str:
vals = self.at_link[values]
else:
assert len(values) == self.number_of_links
vals = values
values_at_links = vals[self.links_at_node] * self.link_dirs_at_node
# this procedure makes incoming links NEGATIVE
unordered_IDs = np.where(values_at_links < 0., self.links_at_node,
bad_index)
bad_IDs = unordered_IDs == bad_index
nnodes = self.number_of_nodes
flat_sorter = (
|
np.argsort(bad_IDs, axis=1)
|
numpy.argsort
|
import multiprocessing as mp
import os
import warnings
from random import shuffle
from rdkit.Chem import MACCSkeys
import numpy as np
import pandas as pd
from rdkit.Chem import MolFromSmiles,AllChem
from rdkit.Chem.AllChem import GetMorganFingerprintAsBitVect
from rdkit import DataStructs
from sklearn.ensemble import RandomForestClassifier
import joblib
from sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score, roc_auc_score, cohen_kappa_score
from sklearn.metrics import confusion_matrix, silhouette_score, average_precision_score
import datetime
import argparse
import pandas as pd
import itertools
import numpy as np
import pickle
warnings.filterwarnings("ignore")
basePath=os.getcwd()
dictionary0 = {
'n_estimators':[10,50,100,300,700,1000],
'criterion':['gini','entropy'],
'max_features':['sqrt','log2']
}
hyperParams0 = pd.DataFrame(list(itertools.product(*dictionary0.values())), columns=dictionary0.keys())
hyperParams0.index=np.arange(len(hyperParams0.index.values))
parser = argparse.ArgumentParser()
parser.add_argument("-cl_file", help="cl per target", type=str, default=basePath+'/test_data/cl/')
parser.add_argument("-pertarget_file", help="smi per target", type=str, default=basePath+'/test_data/pertargetdata/')
parser.add_argument("-datasetNames", help="Dataset Name",type=str, default="ecfp6fcfp6MACCS")
parser.add_argument("-saveBasePath", help="saveBasePath", type=str, default=basePath+'/res_test_data/')
parser.add_argument("-ofolds", help="Outer Folds", nargs='+', type=int, default=[0, 1, 2])
parser.add_argument("-ifolds", help="Inner Folds", nargs='+', type=int, default=[0, 1, 2])
parser.add_argument("-pStart", help="Parameter Start Index", type=int, default=0)
parser.add_argument("-pEnd", help="Parameter End Index", type=int, default=24)
args = parser.parse_args()
cl_file = args.cl_file
pertarget_file = args.pertarget_file
datasetNames = args.datasetNames
saveBasePath = args.saveBasePath
compOuterFolds = args.ofolds
compInnerFolds = args.ifolds
paramStart = args.pStart
paramEnd = args.pEnd
compParams = list(range(paramStart, paramEnd))
print()
def bestSettingsSimple(perfFiles, nrParams):
aucFold=[]
for outind in range(0,3):
for foldInd in range(0, 2):
aucParam=[]
for paramNr in range(0, nrParams):
#try:
saveFile=open(perfFiles[outind][foldInd][paramNr], "rb")
aucRun=pickle.load(saveFile)
saveFile.close()
#except:
# pass
if(len(aucRun)>0):
aucParam.append(aucRun[-1])
aucParam=np.array(aucParam)
if(len(aucParam)>0):
aucFold.append(aucParam)
aucFold=np.array(aucFold)
aucMean=np.nanmean(aucFold, axis=0)
paramInd=np.nanmean(aucMean, axis=1).argmax()
return (paramInd, aucMean, aucFold)
def ClusterCV(csv_file):
tar_id = csv_file.split('.')[0]
file_name =pertarget_file + csv_file
clusterSampleFilename = os.path.join(cl_file, 'cl' + tar_id + ".info")
chembl_data = file_reader(file_name)
target_name = chembl_data.iloc[0,0]
labels = chembl_data.active_label
features = batchECFP(chembl_data.canonical_smiles)
clusterTab = pd.read_csv(clusterSampleFilename, header=None, index_col=False, sep=",")
df = clusterTab.values
folds = df[:, 0]
return folds, features, labels, target_name
def batchECFP(smiles, radius=3, nBits=2048):
smiles = np.array(smiles)
n = len(smiles)
fingerprints_0 = np.zeros((n, nBits), dtype=int)
fingerprints_1 =
|
np.zeros((n, nBits), dtype=int)
|
numpy.zeros
|
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.path as mpath
from matplotlib.patheffects import Stroke
import shapely.geometry as sgeom
import matplotlib.patches as mpatches
import os
import pandas as pd
import numpy as np
import gdal, osr
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from scipy.spatial import ConvexHull
from pyddem.vector_tools import SRTMGL1_naming_to_latlon, latlon_to_SRTMGL1_naming
import pyddem.vector_tools as ot
from pybob.image_tools import create_mask_from_shapefile
from pybob.GeoImg import GeoImg
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
from glob import glob
mpl.use('Agg')
plt.rcParams.update({'font.size': 5})
plt.rcParams.update({'lines.linewidth':0.35})
plt.rcParams.update({'axes.linewidth':0.35})
plt.rcParams.update({'lines.markersize':2.5})
plt.rcParams.update({'axes.labelpad':1.5})
# shp_buff = '/home/atom/data/inventory_products/RGI/00_rgi60/rgi60_buff_diss.shp'
shp_buff = '/data/icesat/travail_en_cours/romain/figures/rgi60_buff_diss.shp'
# in_csv='/home/atom/ongoing/work_worldwide/vol/final/dh_world_tiles_1deg.csv'
in_csv = '/data/icesat/travail_en_cours/romain/results/vol_final/dh_world_tiles_1deg.csv'
fig_width_inch = 3.1
fig = plt.figure(figsize=(2*fig_width_inch, 2.35 * fig_width_inch / 1.9716))
print('Plotting: '+in_csv)
df=pd.read_csv(in_csv)
df=df[df.category=='all']
# out_png = '/home/atom/ongoing/work_worldwide/figures/revised/ED_Figure_6.png'
# out_png = '/data/icesat/travail_en_cours/romain/figures/ED_Figure_6.png'
df_tmp = df[df.period=='2000-01-01_2005-01-01']
tiles = [latlon_to_SRTMGL1_naming(df_tmp.tile_latmin.values[i],df_tmp.tile_lonmin.values[i]) for i in range(len(df_tmp))]
areas = df_tmp.area.tolist()
# areas = [area/1000000 for area in areas]
#FOR EXAMPLE: CHANGING HERE TO VALID OBS + COLORBAR
dhs_1 = df[df.period=='2000-01-01_2005-01-01'].valid_obs.tolist()
dhs_2 = df[df.period=='2005-01-01_2010-01-01'].valid_obs.tolist()
dhs_3 = df[df.period=='2010-01-01_2015-01-01'].valid_obs.tolist()
dhs_4 = df[df.period=='2015-01-01_2020-01-01'].valid_obs.tolist()
errs_1 = df[df.period=='2000-01-01_2005-01-01'].err_dhdt.tolist()
errs_2 = df[df.period=='2005-01-01_2010-01-01'].err_dhdt.tolist()
errs_3 = df[df.period=='2010-01-01_2015-01-01'].err_dhdt.tolist()
errs_4 = df[df.period=='2015-01-01_2020-01-01'].err_dhdt.tolist()
areas = [area/1000000 for _, area in sorted(zip(tiles,areas))]
dhs_1 = [dh for _, dh in sorted(zip(tiles,dhs_1))]
dhs_2 = [dh for _, dh in sorted(zip(tiles,dhs_2))]
dhs_3 = [dh for _, dh in sorted(zip(tiles,dhs_3))]
dhs_4 = [dh for _, dh in sorted(zip(tiles,dhs_4))]
errs_1 = [err for _, err in sorted(zip(tiles,errs_1))]
errs_2 = [err for _, err in sorted(zip(tiles,errs_2))]
errs_3 = [err for _, err in sorted(zip(tiles,errs_3))]
errs_4 = [err for _, err in sorted(zip(tiles,errs_4))]
tiles = sorted(tiles)
group_by_spec = False
def latlon_to_2x2_tile_naming(lat, lon):
lon_sw = np.floor(lon / 2) * 2
lat_sw = np.floor(lat/2) * 2
if lat_sw >= 0:
str_lat = 'N'
else:
str_lat = 'S'
if lon_sw >= 0:
str_lon = 'E'
else:
str_lon = 'W'
tile_name_tdx = str_lat + str(int(abs(lat_sw))).zfill(2) + str_lon + str(int(abs(lon_sw))).zfill(3)
return tile_name_tdx
def latlon_to_spec_tile_naming(lat,lon):
if np.abs(lat)>=74:
lon_sw=
|
np.floor((lon-0.5)/2)
|
numpy.floor
|
import numpy as _numpy
import cupy as _cupy
from cupy_backends.cuda.libs import cublas as _cublas
from cupy_backends.cuda.libs import cusolver as _cusolver
from cupy.cuda import device as _device
def gesv(a, b):
"""Solve a linear matrix equation using cusolverDn<t>getr[fs]().
Computes the solution to a system of linear equation ``ax = b``.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, M)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(M)`` or ``(M, K)``.
Note: ``a`` and ``b`` will be overwritten.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim not in (1, 2):
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != a.shape[1]:
raise ValueError('a must be a square matrix.')
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
if a.dtype != b.dtype:
raise TypeError('dtype mismatch (a: {}, b: {})'.
format(a.dtype, b.dtype))
dtype = a.dtype
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise TypeError('unsupported dtype (actual:{})'.format(a.dtype))
helper = getattr(_cusolver, t + 'getrf_bufferSize')
getrf = getattr(_cusolver, t + 'getrf')
getrs = getattr(_cusolver, t + 'getrs')
n = b.shape[0]
nrhs = b.shape[1] if b.ndim == 2 else 1
if a._f_contiguous:
trans = _cublas.CUBLAS_OP_N
elif a._c_contiguous:
trans = _cublas.CUBLAS_OP_T
else:
raise ValueError('a must be F-contiguous or C-contiguous.')
if not b._f_contiguous:
raise ValueError('b must be F-contiguous.')
handle = _device.get_cusolver_handle()
dipiv = _cupy.empty(n, dtype=_numpy.int32)
dinfo = _cupy.empty(1, dtype=_numpy.int32)
lwork = helper(handle, n, n, a.data.ptr, n)
dwork = _cupy.empty(lwork, dtype=a.dtype)
# LU factrization (A = L * U)
getrf(handle, n, n, a.data.ptr, n, dwork.data.ptr, dipiv.data.ptr,
dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrf, dinfo)
# Solves Ax = b
getrs(handle, trans, n, nrhs, a.data.ptr, n,
dipiv.data.ptr, b.data.ptr, n, dinfo.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
getrs, dinfo)
def gels(a, b):
"""Solves over/well/under-determined linear systems.
Computes least-square solution to equation ``ax = b` by QR factorization
using cusolverDn<t>geqrf().
Args:
a (cupy.ndarray): The matrix with dimension ``(M, N)``.
b (cupy.ndarray): The matrix with dimension ``(M)`` or ``(M, K)``.
Returns:
cupy.ndarray:
The matrix with dimension ``(N)`` or ``(N, K)``.
"""
if a.ndim != 2:
raise ValueError('a.ndim must be 2 (actual: {})'.format(a.ndim))
if b.ndim == 1:
nrhs = 1
elif b.ndim == 2:
nrhs = b.shape[1]
else:
raise ValueError('b.ndim must be 1 or 2 (actual: {})'.format(b.ndim))
if a.shape[0] != b.shape[0]:
raise ValueError('shape mismatch (a: {}, b: {}).'.
format(a.shape, b.shape))
if a.dtype != b.dtype:
raise ValueError('dtype mismatch (a: {}, b: {}).'.
format(a.dtype, b.dtype))
dtype = a.dtype
if dtype == 'f':
t = 's'
elif dtype == 'd':
t = 'd'
elif dtype == 'F':
t = 'c'
elif dtype == 'D':
t = 'z'
else:
raise ValueError('unsupported dtype (actual: {})'.format(dtype))
geqrf_helper = getattr(_cusolver, t + 'geqrf_bufferSize')
geqrf = getattr(_cusolver, t + 'geqrf')
trsm = getattr(_cublas, t + 'trsm')
if t in 'sd':
ormqr_helper = getattr(_cusolver, t + 'ormqr_bufferSize')
ormqr = getattr(_cusolver, t + 'ormqr')
else:
ormqr_helper = getattr(_cusolver, t + 'unmqr_bufferSize')
ormqr = getattr(_cusolver, t + 'unmqr')
no_trans = _cublas.CUBLAS_OP_N
if dtype.char in 'fd':
trans = _cublas.CUBLAS_OP_T
else:
trans = _cublas.CUBLAS_OP_C
m, n = a.shape
mn_min = min(m, n)
dev_info = _cupy.empty(1, dtype=_numpy.int32)
tau = _cupy.empty(mn_min, dtype=dtype)
cusolver_handle = _device.get_cusolver_handle()
cublas_handle = _device.get_cublas_handle()
one = _numpy.array(1.0, dtype=dtype)
if m >= n: # over/well-determined systems
a = a.copy(order='F')
b = b.copy(order='F')
# geqrf (QR decomposition, A = Q * R)
ws_size = geqrf_helper(cusolver_handle, m, n, a.data.ptr, m)
workspace = _cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, m, n, a.data.ptr, m, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# ormqr (Computes Q^T * B)
ws_size = ormqr_helper(
cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs, mn_min,
a.data.ptr, m, tau.data.ptr, b.data.ptr, m)
workspace = _cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, trans, m, nrhs,
mn_min, a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
# trsm (Solves R * X = (Q^T * B))
trsm(cublas_handle, _cublas.CUBLAS_SIDE_LEFT,
_cublas.CUBLAS_FILL_MODE_UPPER, no_trans,
_cublas.CUBLAS_DIAG_NON_UNIT, mn_min, nrhs,
one.ctypes.data, a.data.ptr, m, b.data.ptr, m)
return b[:n]
else: # under-determined systems
a = a.conj().T.copy(order='F')
bb = b
out_shape = (n,) if b.ndim == 1 else (n, nrhs)
b = _cupy.zeros(out_shape, dtype=dtype, order='F')
b[:m] = bb
# geqrf (QR decomposition, A^T = Q * R)
ws_size = geqrf_helper(cusolver_handle, n, m, a.data.ptr, n)
workspace = _cupy.empty(ws_size, dtype=dtype)
geqrf(cusolver_handle, n, m, a.data.ptr, n, tau.data.ptr,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
geqrf, dev_info)
# trsm (Solves R^T * Z = B)
trsm(cublas_handle, _cublas.CUBLAS_SIDE_LEFT,
_cublas.CUBLAS_FILL_MODE_UPPER, trans,
_cublas.CUBLAS_DIAG_NON_UNIT, m, nrhs,
one.ctypes.data, a.data.ptr, n, b.data.ptr, n)
# ormqr (Computes Q * Z)
ws_size = ormqr_helper(
cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n)
workspace = _cupy.empty(ws_size, dtype=dtype)
ormqr(cusolver_handle, _cublas.CUBLAS_SIDE_LEFT, no_trans, n, nrhs,
mn_min, a.data.ptr, n, tau.data.ptr, b.data.ptr, n,
workspace.data.ptr, ws_size, dev_info.data.ptr)
_cupy.linalg._util._check_cusolver_dev_info_if_synchronization_allowed(
ormqr, dev_info)
return b
def _batched_posv(a, b):
if not _cupy.cusolver.check_availability('potrsBatched'):
raise RuntimeError('potrsBatched is not available')
dtype = _numpy.promote_types(a.dtype, b.dtype)
dtype = _numpy.promote_types(dtype, 'f')
if dtype == 'f':
potrfBatched = _cusolver.spotrfBatched
potrsBatched = _cusolver.spotrsBatched
elif dtype == 'd':
potrfBatched = _cusolver.dpotrfBatched
potrsBatched = _cusolver.dpotrsBatched
elif dtype == 'F':
potrfBatched = _cusolver.cpotrfBatched
potrsBatched = _cusolver.cpotrsBatched
elif dtype == 'D':
potrfBatched = _cusolver.zpotrfBatched
potrsBatched = _cusolver.zpotrsBatched
else:
msg = ('dtype must be float32, float64, complex64 or complex128'
' (actual: {})'.format(a.dtype))
raise ValueError(msg)
a = a.astype(dtype, order='C', copy=True)
ap = _cupy._core._mat_ptrs(a)
lda, n = a.shape[-2:]
batch_size = int(
|
_numpy.prod(a.shape[:-2])
|
numpy.prod
|
#
# Created by: <NAME>, September 2002
#
import sys
import subprocess
import time
from functools import reduce
from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
assert_allclose, assert_almost_equal,
assert_array_equal)
import pytest
from pytest import raises as assert_raises
import numpy as np
from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
triu_indices)
from numpy.random import rand, randint, seed
from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
solve, ldl, norm, block_diag, qr, eigh)
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
import scipy.sparse as sps
try:
from scipy.linalg import _clapack as clapack
except ImportError:
clapack = None
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.blas import get_blas_funcs
REAL_DTYPES = [np.float32, np.float64]
COMPLEX_DTYPES = [np.complex64, np.complex128]
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
def generate_random_dtype_array(shape, dtype):
# generates a random matrix of desired data type of shape
if dtype in COMPLEX_DTYPES:
return (np.random.rand(*shape)
+ np.random.rand(*shape)*1.0j).astype(dtype)
return np.random.rand(*shape).astype(dtype)
def test_lapack_documented():
"""Test that all entries are in the doc."""
if lapack.__doc__ is None: # just in case there is a python -OO
pytest.skip('lapack.__doc__ is None')
names = set(lapack.__doc__.split())
ignore_list = set([
'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
'flapack', 'print_function', 'HAS_ILP64',
])
missing = list()
for name in dir(lapack):
if (not name.startswith('_') and name not in ignore_list and
name not in names):
missing.append(name)
assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
class TestFlapackSimple(object):
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
class TestLapack(object):
def test_flapack(self):
if hasattr(flapack, 'empty_module'):
# flapack module is empty
pass
def test_clapack(self):
if hasattr(clapack, 'empty_module'):
# clapack module is empty
pass
class TestLeastSquaresSolvers(object):
def test_gels(self):
seed(1234)
# Test fat/tall matrix argument handling - gh-issue #8329
for ind, dtype in enumerate(DTYPES):
m = 10
n = 20
nrhs = 1
a1 = rand(m, n).astype(dtype)
b1 = rand(n).astype(dtype)
gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
# Request of sizes
lwork = _compute_lwork(glslw, m, n, nrhs)
_, _, info = gls(a1, b1, lwork=lwork)
assert_(info >= 0)
_, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
assert_(info >= 0)
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gels, gels_lwork, geqrf = get_lapack_funcs(
('gels', 'gels_lwork', 'geqrf'), (a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
lwork = _compute_lwork(gels_lwork, m, n, nrhs)
lqr, x, info = gels(a1, b1, lwork=lwork)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
lqr_truth, _, _, _ = geqrf(a1)
assert_array_equal(lqr, lqr_truth)
def test_gelsd(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
-1, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
rwork_size = int(rwork)
iwork_size = iwork
x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
-1, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
assert_allclose(s,
np.array([13.035514762572043, 4.337666985231382],
dtype=dtype), rtol=25*np.finfo(dtype).eps)
def test_gelss(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([12.596017180511966,
0.583396253199685], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelss_lwork(m, n, nrhs, -1)
lwork = int(np.real(work))
v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
assert_allclose(s, np.array([13.035514762572043,
4.337666985231382], dtype=dtype),
rtol=25*np.finfo(dtype).eps)
def test_gelsy(self):
for dtype in REAL_DTYPES:
a1 = np.array([[1.0, 2.0],
[4.0, 5.0],
[7.0, 8.0]], dtype=dtype)
b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1], np.array([-14.333333333333323,
14.999999999999991],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
for dtype in COMPLEX_DTYPES:
a1 = np.array([[1.0+4.0j, 2.0],
[4.0+0.5j, 5.0-3.0j],
[7.0-2.0j, 8.0+0.7j]], dtype=dtype)
b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
(a1, b1))
m, n = a1.shape
if len(b1.shape) == 2:
nrhs = b1.shape[1]
else:
nrhs = 1
# Request of sizes
work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
lwork = int(np.real(work))
jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
lwork, False, False)
assert_allclose(x[:-1],
np.array([1.161753632288328-1.901075709391912j,
1.735882340522193+1.521240901196909j],
dtype=dtype),
rtol=25*np.finfo(dtype).eps)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
def test_geqrf_lwork(dtype, shape):
geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
m, n = shape
lwork, info = geqrf_lwork(m=m, n=n)
assert_equal(info, 0)
class TestRegression(object):
def test_ticket_1645(self):
# Check that RQ routines have correct lwork
for dtype in DTYPES:
a = np.zeros((300, 2), dtype=dtype)
gerqf, = get_lapack_funcs(['gerqf'], [a])
assert_raises(Exception, gerqf, a, lwork=2)
rq, tau, work, info = gerqf(a)
if dtype in REAL_DTYPES:
orgrq, = get_lapack_funcs(['orgrq'], [a])
assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
orgrq(rq[-2:], tau, lwork=2)
elif dtype in COMPLEX_DTYPES:
ungrq, = get_lapack_funcs(['ungrq'], [a])
assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
ungrq(rq[-2:], tau, lwork=2)
class TestDpotr(object):
def test_gh_2691(self):
# 'lower' argument of dportf/dpotri
for lower in [True, False]:
for clean in [True, False]:
np.random.seed(42)
x = np.random.normal(size=(3, 3))
a = x.dot(x.T)
dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
c, info = dpotrf(a, lower, clean=clean)
dpt = dpotri(c, lower)[0]
if lower:
assert_allclose(np.tril(dpt), np.tril(inv(a)))
else:
assert_allclose(np.triu(dpt), np.triu(inv(a)))
class TestDlasd4(object):
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
the singular value %i" % i)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
class TestTbtrs(object):
@pytest.mark.parametrize('dtype', DTYPES)
def test_nag_example_f07vef_f07vsf(self, dtype):
"""Test real (f07vef) and complex (f07vsf) examples from NAG
Examples available from:
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
* https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
"""
if dtype in REAL_DTYPES:
ab = np.array([[-4.16, 4.78, 6.32, 0.16],
[-2.25, 5.86, -4.82, 0]],
dtype=dtype)
b = np.array([[-16.64, -4.16],
[-13.78, -16.59],
[13.10, -4.94],
[-14.14, -9.96]],
dtype=dtype)
x_out = np.array([[4, 1],
[-1, -3],
[3, 2],
[2, -2]],
dtype=dtype)
elif dtype in COMPLEX_DTYPES:
ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
[-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
[1.62+3.68j, -2.77-1.93j, 0, 0]],
dtype=dtype)
b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
[-15.57 - 23.41j, -57.97 + 8.14j],
[-7.63 + 22.78j, 19.09 - 29.51j],
[-14.74 - 2.40j, 19.17 + 21.33j]],
dtype=dtype)
x_out = np.array([[2j, 1 + 5j],
[1 - 3j, -7 - 2j],
[-4.001887 - 4.988417j, 3.026830 + 4.003182j],
[1.996158 - 1.045105j, -6.103357 - 8.986653j]],
dtype=dtype)
else:
raise ValueError(f"Datatype {dtype} not understood.")
tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
x, info = tbtrs(ab=ab, b=b, uplo='L')
assert_equal(info, 0)
assert_allclose(x, x_out, rtol=0, atol=1e-5)
@pytest.mark.parametrize('dtype,trans',
[(dtype, trans)
for dtype in DTYPES for trans in ['N', 'T', 'C']
if not (trans == 'C' and dtype in REAL_DTYPES)])
@pytest.mark.parametrize('uplo', ['U', 'L'])
@pytest.mark.parametrize('diag', ['N', 'U'])
def test_random_matrices(self, dtype, trans, uplo, diag):
seed(1724)
# n, nrhs, kd are used to specify A and b.
# A is of shape n x n with kd super/sub-diagonals
# b is of shape n x nrhs matrix
n, nrhs, kd = 4, 3, 2
tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
is_upper = (uplo == 'U')
ku = kd * is_upper
kl = kd - ku
# Construct the diagonal and kd super/sub diagonals of A with
# the corresponding offsets.
band_offsets = range(ku, -kl - 1, -1)
band_widths = [n - abs(x) for x in band_offsets]
bands = [generate_random_dtype_array((width,), dtype)
for width in band_widths]
if diag == 'U': # A must be unit triangular
bands[ku] = np.ones(n, dtype=dtype)
# Construct the diagonal banded matrix A from the bands and offsets.
a = sps.diags(bands, band_offsets, format='dia')
# Convert A into banded storage form
ab = np.zeros((kd + 1, n), dtype)
for row, k in enumerate(band_offsets):
ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
# The RHS values.
b = generate_random_dtype_array((n, nrhs), dtype)
x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
assert_equal(info, 0)
if trans == 'N':
assert_allclose(a @ x, b, rtol=5e-5)
elif trans == 'T':
assert_allclose(a.T @ x, b, rtol=5e-5)
elif trans == 'C':
assert_allclose(a.H @ x, b, rtol=5e-5)
else:
raise ValueError('Invalid trans argument')
@pytest.mark.parametrize('uplo,trans,diag',
[['U', 'N', 'Invalid'],
['U', 'Invalid', 'N'],
['Invalid', 'N', 'N']])
def test_invalid_argument_raises_exception(self, uplo, trans, diag):
"""Test if invalid values of uplo, trans and diag raise exceptions"""
# Argument checks occur independently of used datatype.
# This mean we must not parameterize all available datatypes.
tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
ab = rand(4, 2)
b = rand(2, 4)
assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
def test_zero_element_in_diagonal(self):
"""Test if a matrix with a zero diagonal element is singular
If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
indicating the provided matrix is singular.
Note that ?tbtrs requires the matrix A to be stored in banded form.
In this form the diagonal corresponds to the last row."""
ab = np.ones((3, 4), dtype=float)
b = np.ones(4, dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
ab[-1, 3] = 0
_, info = tbtrs(ab=ab, b=b, uplo='U')
assert_equal(info, 4)
@pytest.mark.parametrize('ldab,n,ldb,nrhs', [
(5, 5, 0, 5),
(5, 5, 3, 5)
])
def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
"""Test ?tbtrs fails correctly if shapes are invalid."""
ab = np.ones((ldab, n), dtype=float)
b = np.ones((ldb, nrhs), dtype=float)
tbtrs = get_lapack_funcs('tbtrs', dtype=float)
assert_raises(Exception, tbtrs, ab, b)
def test_lartg():
for dtype in 'fdFD':
lartg = get_lapack_funcs('lartg', dtype=dtype)
f = np.array(3, dtype)
g = np.array(4, dtype)
if np.iscomplexobj(g):
g *= 1j
cs, sn, r = lartg(f, g)
assert_allclose(cs, 3.0/5.0)
assert_allclose(r, 5.0)
if np.iscomplexobj(g):
assert_allclose(sn, -4.0j/5.0)
assert_(type(r) == complex)
assert_(type(cs) == float)
else:
assert_allclose(sn, 4.0/5.0)
def test_rot():
# srot, drot from blas and crot and zrot from lapack.
for dtype in 'fdFD':
c = 0.6
s = 0.8
u = np.full(4, 3, dtype)
v = np.full(4, 4, dtype)
atol = 10**-(np.finfo(dtype).precision-1)
if dtype in 'fd':
rot = get_blas_funcs('rot', dtype=dtype)
f = 4
else:
rot = get_lapack_funcs('rot', dtype=dtype)
s *= -1j
v *= 1j
f = 4j
assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
[0, 0, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
[0, 0, f, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, offy=2),
[[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
[[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
[[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
[[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
[[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
assert_(a is u)
assert_(b is v)
assert_allclose(a, [5, 5, 5, 5], atol=atol)
assert_allclose(b, [0, 0, 0, 0], atol=atol)
def test_larfg_larf():
np.random.seed(1234)
a0 = np.random.random((4, 4))
a0 = a0.T.dot(a0)
a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
a0j = a0j.T.conj().dot(a0j)
# our test here will be to do one step of reducing a hermetian matrix to
# tridiagonal form using householder transforms.
for dtype in 'fdFD':
larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
if dtype in 'FD':
a = a0j.copy()
else:
a = a0.copy()
# generate a householder transform to clear a[2:,0]
alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
# create expected output
expected = np.zeros_like(a[:, 0])
expected[0] = a[0, 0]
expected[1] = alpha
# assemble householder vector
v = np.zeros_like(a[1:, 0])
v[0] = 1.0
v[1:] = x
# apply transform from the left
a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
# apply transform from the right
a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
assert_allclose(a[:, 0], expected, atol=1e-5)
assert_allclose(a[0, :], expected, atol=1e-5)
@pytest.mark.xslow
def test_sgesdd_lwork_bug_workaround():
# Test that SGESDD lwork is sufficiently large for LAPACK.
#
# This checks that workaround around an apparent LAPACK bug
# actually works. cf. gh-5401
#
# xslow: requires 1GB+ of memory
p = subprocess.Popen([sys.executable, '-c',
'import numpy as np; '
'from scipy.linalg import svd; '
'a = np.zeros([9537, 9537], dtype=np.float32); '
'svd(a)'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Check if it an error occurred within 5 sec; the computation can
# take substantially longer, and we will not wait for it to finish
for j in range(50):
time.sleep(0.1)
if p.poll() is not None:
returncode = p.returncode
break
else:
# Didn't exit in time -- probably entered computation. The
# error is raised before entering computation, so things are
# probably OK.
returncode = 0
p.terminate()
assert_equal(returncode, 0,
"Code apparently failed: " + p.stdout.read().decode())
class TestSytrd(object):
@pytest.mark.parametrize('dtype', REAL_DTYPES)
def test_sytrd_with_zero_dim_array(self, dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=dtype)
sytrd = get_lapack_funcs('sytrd', (A,))
assert_raises(ValueError, sytrd, A)
@pytest.mark.parametrize('dtype', REAL_DTYPES)
@pytest.mark.parametrize('n', (1, 3))
def test_sytrd(self, dtype, n):
A = np.zeros((n, n), dtype=dtype)
sytrd, sytrd_lwork = \
get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = \
np.arange(1, n*(n+1)//2+1, dtype=dtype)
# query lwork
lwork, info = sytrd_lwork(n)
assert_equal(info, 0)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
assert_allclose(d, np.diag(A))
assert_allclose(e, 0.0)
assert_allclose(tau, 0.0)
# and now for the proper test (lower=0 is the default)
data, d, e, tau, info = sytrd(A, lwork=lwork)
assert_equal(info, 0)
# assert Q^T*A*Q = tridiag(e, d, e)
# build tridiagonal matrix
T = np.zeros_like(A, dtype=dtype)
k = np.arange(A.shape[0])
T[k, k] = d
k2 = np.arange(A.shape[0]-1)
T[k2+1, k2] = e
T[k2, k2+1] = e
# build Q
Q = np.eye(n, n, dtype=dtype)
for i in range(n-1):
v = np.zeros(n, dtype=dtype)
v[:i] = data[:i, i+1]
v[i] = 1.0
H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
Q = np.dot(H, Q)
# Make matrix fully symmetric
i_lower = np.tril_indices(n, -1)
A[i_lower] = A.T[i_lower]
QTAQ = np.dot(Q.T, np.dot(A, Q))
# disable rtol here since some values in QTAQ and T are very close
# to 0.
assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
class TestHetrd(object):
@pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
def test_hetrd_with_zero_dim_array(self, complex_dtype):
# Assert that a 0x0 matrix raises an error
A = np.zeros((0, 0), dtype=complex_dtype)
hetrd = get_lapack_funcs('hetrd', (A,))
assert_raises(ValueError, hetrd, A)
@pytest.mark.parametrize('real_dtype,complex_dtype',
zip(REAL_DTYPES, COMPLEX_DTYPES))
@pytest.mark.parametrize('n', (1, 3))
def test_hetrd(self, n, real_dtype, complex_dtype):
A = np.zeros((n, n), dtype=complex_dtype)
hetrd, hetrd_lwork = \
get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
# some upper triangular array
A[np.triu_indices_from(A)] = (
np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
)
np.fill_diagonal(A, np.real(np.diag(A)))
# test query lwork
for x in [0, 1]:
_, info = hetrd_lwork(n, lower=x)
assert_equal(info, 0)
# lwork returns complex which segfaults hetrd call (gh-10388)
# use the safe and recommended option
lwork = _compute_lwork(hetrd_lwork, n)
# check lower=1 behavior (shouldn't do much since the matrix is
# upper triangular)
data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
assert_equal(info, 0)
assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
assert_allclose(d, np.real(
|
np.diag(A)
|
numpy.diag
|
import numpy as np
import os
from PySide import QtGui, QtCore
import sharppy.sharptab as tab
import sharppy.databases.inset_data as inset_data
from sharppy.sharptab.constants import *
import platform
## routine written by <NAME> and <NAME>
## <EMAIL> and <EMAIL>
__all__ = ['backgroundSTP', 'plotSTP']
class backgroundSTP(QtGui.QFrame):
'''
Draw the background frame and lines for the STP plot frame
'''
def __init__(self):
super(backgroundSTP, self).__init__()
self.initUI()
def initUI(self):
## window configuration settings,
## sich as padding, width, height, and
## min/max plot axes
self.setStyleSheet("QFrame {"
" background-color: rgb(0, 0, 0);"
" border-width: 1px;"
" border-style: solid;"
" border-color: #3399CC;}")
self.lpad = 0.; self.rpad = 0.
self.tpad = 15.; self.bpad = 15.
self.wid = self.size().width() - self.rpad
self.hgt = self.size().height() - self.bpad
self.tlx = self.rpad; self.tly = self.tpad
self.brx = self.wid; self.bry = self.hgt
self.stpmax = 11.; self.stpmin = 0.
fsize1 =
|
np.floor(.08 * self.hgt)
|
numpy.floor
|
import matplotlib
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pickle
from pmdarima import auto_arima
from scipy.signal import filtfilt
from scipy.stats import gaussian_kde
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import confusion_matrix
import sys
from tbats import TBATS
def feature_distributions(data, features, value_col, ncols = 3,
highlighting = None, edge_values = None,
xmin = None, xmax = None, n_bins = 100, n_grid = 500,
min_group_size = 100, max_group_spike = 1.2, log_y = True,
w_scale = 5, h_scale = 5, pad = 0.4, w_pad = 2.0, h_pad = 2.0,
anchor_legend = None):
"""
Plot continuous feature distributions as split by secondary category.
If there are enough sample of a type in the group of secondary category, the KDE will be plotted. Otherwise, the histogram will be plotted.
Options to add categorical highlights binning the continuous value (not recommended if plotting histograms) and/or marked edge labels.
Arguments:
data (pandas dataframe): pandas dataframe
features (list of strings): list of columns in date to plot (each as separate axis), split based on values in column
value_col (string): column name for continuous variable against which to plot
xmin (float): minimum value to plot
xmax (float): maximum value to plot
n_bins (int): number of bins for histogram
n_grid (int): resolution of KDE
highlighting (list of tuples: (float, float, color, string)): lower bound, upper bound, color, and label for highlighted ranges of continuous variable
edge_values (list of tuples: (float, color, string)): x-location, color, and label for a vertical line marking a specific value of the continuous variable
n_cols (int): number of columns for figure
min_group_size (int): number of samples for a given group in order to plot as KDE
max_group_spike (float): if KDE is unreasonably spiked (i.e. no distribution), plot histogram instead
log_y (bool): set y-scale to be logarithmic
w_scale (float): aspect ratio of axes
h_scale (float): aspect ratio of axes
pad (float): padding of subplots
w_pad (float): padding of subplots
h_pad(float): padding of subplots
anchor_legend (tuple of floats): x,y coordinates to pin legend to axis
Returns:
fig, ax: figure and axis handles
"""
nrows = int(np.ceil(len(features)/float(ncols)))
fig, ax = plt.subplots(nrows = nrows, ncols = ncols,
sharey = False, sharex = False,
figsize = (w_scale*ncols,h_scale*nrows))
ax= ax.reshape(-1)
fig.tight_layout(pad=pad, w_pad=w_pad, h_pad=h_pad)
for iax, f in enumerate(features):
feature_data = data[[f, value_col]].dropna(subset = [value_col])
if xmin is None:
xmin = feature_data[value_col].min()
if xmax is None:
xmax = feature_data[value_col].max()
kde_x = np.linspace(xmin,xmax,n_grid)
bin_width = (xmax-xmin)/n_bins
bins_x = np.arange(xmin-bin_width,xmax+bin_width, bin_width)+bin_width/2.
grouped_feature = feature_data.groupby(f)
for n, g in grouped_feature:
g = g[value_col]
if len(g.unique())>1:
kde_y = gaussian_kde(g)(kde_x)
if (np.max(kde_y) < max_group_spike)&(g.size > min_group_size):
ax[iax].plot(kde_x, kde_y, label = '%s: %d' %(n, g.size))
else:
g.plot.hist(ax = ax[iax], bins = bins_x, align = 'mid', alpha = 0.25,
density=True, label = '%s: %d' %(n, g.size))
else:
g.plot.hist(ax = ax[iax], bins = bins_x, align = 'mid', alpha = 0.25,
density=True, label = '%s: %d' %(n, g.size))
ax[iax].set_xlabel(value_col)
if log_y:
ax[iax].set_yscale('log')
ax[iax].set_ylabel('log PDF')
else:
ax[iax].set_ylabel('PDF')
if highlighting is not None:
for lb, rb, c, label in highlighting:
ax[iax].axvspan(lb, rb, alpha=0.25, color=c, label = label)
if edge_values is not None:
for v, c, label in edge_values:
ax[iax].axvline(x=v, color=c, linewidth = 3.,
linestyle = '--', label = label)
ax[iax].legend(title = f)
if anchor_legend is not None:
ax[iax].legend(bbox_to_anchor= anchor_legend)
return fig, ax
def jointplot(x, y, c = 'k', cmap = 'gray_r',
xmin = None, xmax = None, xdelta = None,
ymin = None, ymax = None, ydelta = None,
logscale = False, gridsize = 50, bins = None, alpha = 0.2,
joint_xlabel = None, joint_ylabel = None,
marginal_xlabel = None, marginal_ylabel = None,
fig_axes = None, joint_type = 'hex', scatter_label = '',
highlighting = None, edge_values = None, anchor_legend = None):
"""
Joint plot continuous feature distributions.
Option to plot as hexbinned or scatter joint distribution
Option to add categorical highlights binning the continuous value (not recommended if plotting histograms) and/or marked edge labels.
Arguments:
x (pandas series, floats): x-axis continuous values
y (pandas series, floats): y-axis continuous values
c (color identifier):
c_map (colormap identifier):
alpha (float): transparency of plots
xmin (float): minimum value to plot on x axis
xmax (float): maximum value to plot on x axis
xdelta (float): padding around xmin/xmax values for easier viewing
ymin (float): minimum value to plot on y axis
ymax (float): maximum value to plot on y axis
ydelta (float): padding around xmin/xmax values for easier viewing
bins (int): number of bins for histogram
gridsize (int): resolution of hexbin
highlighting (list of tuples: (float, float, color, string)): lower bound, upper bound, color, and label for highlighted ranges of continuous variable
edge_values (list of tuples: (float, color, string)): x-location, color, and label for a vertical line marking a specific value of the continuous variable
logscale (bool): set scales to be logarithmic
anchor_legend (tuple of floats): x,y coordinates to pin legend to axis
joint_xlabel (string): label for x axis of joint plot
joint_ylabel (string): label for x axis of joint plot
marginal_xlabel (string): label for y axis of vertical marginal plot
marginal_ylabel (string): label for y axis of horizontal marginal plot
fig_axes (tuple): tuple of returned values from this function; for plotting twice in same figure
joint_type (string): 'hex' or 'scatter'
scatter_label (string): legend value for scatter plot (if plotting twice in same figure)
Returns:
fig, ax_joint, ax_marg_x, ax_marg_y : handles to figure and each of the three subplot axes
"""
if fig_axes == None:
fig = plt.figure()
gs = GridSpec(4,4)
ax_joint = fig.add_subplot(gs[1:4,0:3])
ax_marg_x = fig.add_subplot(gs[0,0:3])
ax_marg_y = fig.add_subplot(gs[1:4,3])
else:
fig,ax_joint,ax_marg_x,ax_marg_y = fig_axes
if joint_type == 'hex':
ax_joint.hexbin(x,y, cmap = cmap, bins= 'log', gridsize = gridsize )
elif joint_type == 'scatter':
ax_joint.scatter(x,y, color = c, alpha= alpha, label = scatter_label)
if xmin is None:
xmin = min(x)
if xmax is None:
xmax = max(x)
if ymin is None:
ymin = min(y)
if ymax is None:
ymax = max(y)
if bins:
ax_marg_x.hist(x, density = False, color = c, alpha = alpha, bins = bins[0],
align = 'mid')
ax_marg_y.hist(y, density = False, color = c, alpha = alpha, bins = bins[1],
align = 'mid', orientation="horizontal")
else:
ax_marg_x.hist(x, density = False, color = c, alpha = alpha, range = (xmin, xmax),
align = 'mid')
ax_marg_y.hist(y, density = False, color = c, alpha = alpha, range = (ymin, ymax),
align = 'mid', orientation="horizontal")
if logscale:
ax_joint.set_xscale('log')
ax_joint.set_yscale('log')
ax_marg_x.set_xscale('log')
ax_marg_x.set_yscale('log')
ax_marg_y.set_xscale('log')
ax_marg_y.set_yscale('log')
else:
if xdelta is None:
xdelta = (xmax - xmin)/100.
if ydelta is None:
ydelta = (ymax - ymin)/100.
ax_joint.axis([xmin-xdelta, xmax+xdelta, ymin-ydelta, ymax+ydelta])
ax_marg_x.set_xlim([xmin-xdelta, xmax+xdelta])
ax_marg_y.set_ylim([ymin-ydelta, ymax+ydelta])
# Turn off tick labels on marginals
plt.setp(ax_marg_x.get_xticklabels(), visible=False)
plt.setp(ax_marg_y.get_yticklabels(), visible=False)
# Set labels on joint
if joint_xlabel is None:
try:
joint_xlabel = x.name
except:
joint_xlabel = ''
if joint_ylabel is None:
try:
joint_ylabel = y.name
except:
joint_ylabel = ''
ax_joint.set_xlabel(joint_xlabel)
ax_joint.set_ylabel(joint_ylabel)
# Set labels on marginals
if marginal_xlabel is None:
marginal_xlabel = 'Count'
if marginal_ylabel is None:
marginal_ylabel = 'Count'
ax_marg_y.set_xlabel(marginal_xlabel)
ax_marg_x.set_ylabel(marginal_ylabel )
if highlighting is not None:
for lb, rb, c, label in highlighting:
ax_joint.axvspan(lb, rb, alpha=0.25, color=c, label = label)
if edge_values is not None:
for v, c, label in edge_values:
ax_joint.axvline(x=v, color=c, linewidth = 3.,
linestyle = '--', label = label)
if anchor_legend is not None:
ax_joint.legend(bbox_to_anchor= anchor_legend)
return fig, ax_joint, ax_marg_x, ax_marg_y
# Adapted from https://matplotlib.org/gallery/images_contours_and_fields/image_annotated_heatmap.html
def heatmap(data, row_labels, col_labels,
row_title = '', col_title = '', ax=None,
cbar_kw={}, cbarlabel="", vmin=None, vmax=None,
x_tick_rotation = 0, **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
vmin : lower bound for colormap
vmax : upper bound for colormap
x_tick_rotation : angle for x axis labels
Returns:
im, cbar : handles to image and colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
if not vmin:
vmin = np.amin(data)
if not vmax:
vmax = np.amax(data)
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=x_tick_rotation, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
for _, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(
|
np.arange(data.shape[0]+1)
|
numpy.arange
|
import cv2
import numpy as np
import sys
def convert_color_space_BGR_to_RGB(img_BGR):
#print(img_BGR)
B = img_BGR[:,:,0]/255
G = img_BGR[:,:,1]/255
R = img_BGR[:,:,2]/255
img_RGB = np.stack([R, G, B], axis=2)
#print(img_RGB)
return img_RGB
def convert_color_space_RGB_to_BGR(img_RGB):
R = source[:,:,0]*255
G = source[:,:,1]*255
B = source[:,:,2]*255
img_BGR = np.stack([B, G, R], axis=2)
return img_BGR
def convert_color_space_RGB_to_Lab(img_RGB):
'''
convert image color space RGB to Lab
'''
#print(img_RGB)
R = img_RGB[:,:,0]
G = img_RGB[:,:,1]
B = img_RGB[:,:,2]
L = 0.3811*R + 0.5783*G + 0.0402*B
M = 0.1967*R + 0.7244*G + 0.0782*B
S = 0.0241*R + 0.1288*G + 0.8444*B
L = np.log10(L)
M = np.log10(M)
S = np.log10(S)
new_l = 1.0 / np.sqrt(3)*L + 1.0 / np.sqrt(3)*M + 1.0 / np.sqrt(3)*S
new_alpha = 1.0 / np.sqrt(6)*L + 1.0 / np.sqrt(6)*M - 2 / np.sqrt(6)*S
new_beta = 1.0 / np.sqrt(2)*L - 1.0 / np.sqrt(2)*M + 0 *S
img_Lab = np.stack([new_l, new_alpha, new_beta], axis=2)
#print(img_Lab)
return img_Lab
def convert_color_space_Lab_to_BGR(img_Lab):
'''
convert image color space Lab to RGB
'''
l_result = img_Lab[:,:,0]
alpha_result = img_Lab[:,:,1]
beta_result = img_Lab[:,:,2]
L = np.sqrt(3.0) / 3.0 * l_result + np.sqrt(6) / 6.0 * alpha_result + np.sqrt(2) / 2.0 * beta_result
M = np.sqrt(3.0) / 3.0 * l_result +
|
np.sqrt(6)
|
numpy.sqrt
|
#TfIdfVectorizer (tf_batch_onlybigrams_skip0)
import onnx
from onnx import helper
from onnx import numpy_helper
from onnx import AttributeProto, TensorProto, GraphProto
import numpy as np
from Compare_output import compare
# Create the inputs (ValueInfoProto)
x = helper.make_tensor_value_info('x', TensorProto.INT32, [2, 6])
# Create one output (ValueInfoProto)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 7])
ngram_counts =
|
np.array([0, 4])
|
numpy.array
|
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import sqrt, pi, cos, sin, log, exp, sinh, mod
from numpy.linalg import norm
from phidl.device_layout import Device
from phidl.device_layout import _parse_layer
from phidl.geometry import turn
import gdspy
def _arc(radius = 10, width = 0.5, theta = 45, start_angle = 0, angle_resolution = 2.5, layer = 0):
""" Creates an arc of arclength ``theta`` starting at angle ``start_angle`` """
inner_radius = radius-width/2
outer_radius = radius+width/2
angle1 = (start_angle)*pi/180
angle2 = (start_angle + theta)*pi/180
t = np.linspace(angle1, angle2, int(np.ceil(abs(theta)/angle_resolution)))
inner_points_x = (inner_radius*cos(t)).tolist()
inner_points_y = (inner_radius*sin(t)).tolist()
outer_points_x = (outer_radius*cos(t)).tolist()
outer_points_y = (outer_radius*sin(t)).tolist()
xpts = inner_points_x + outer_points_x[::-1]
ypts = inner_points_y + outer_points_y[::-1]
D = Device('arc')
D.add_polygon(points = (xpts,ypts), layer = layer)
D.add_port(name = 1, midpoint = (radius*cos(angle1), radius*sin(angle1)), width = width, orientation = start_angle - 90 + 180*(theta<0))
D.add_port(name = 2, midpoint = (radius*cos(angle2), radius*sin(angle2)), width = width, orientation = start_angle + theta + 90 - 180*(theta<0))
D.info['length'] = (abs(theta)*pi/180)*radius
return D
def _gradual_bend(
radius = 20,
width = 1.0,
angular_coverage=15,
num_steps=10,
angle_resolution=0.1,
start_angle=0,
direction='ccw',
layer=0,
):
"""
creates a 90-degree bent waveguide
the bending radius is gradually increased until it reaches the minimum
value of the radius at the "angular coverage" angle.
it essentially creates a smooth transition to a bent waveguide mode.
user can control number of steps provided.
direction determined by start angle and cw or ccw switch
############
with the default 10 "num_steps" and 15 degree coverage, effective radius is about 1.5*radius.
"""
angular_coverage=np.deg2rad(angular_coverage)
D = Device()
#determines the increment in radius through its inverse from 0 to 1/r
inc_rad =(radius**-1)/(num_steps)
angle_step = angular_coverage/num_steps
#construct a series of sub-arcs with equal angles but gradually decreasing bend radius
arcs = []
for x in range(num_steps):
A = _arc(radius=1/((x+1)*inc_rad),width=width,theta=np.rad2deg(angle_step),start_angle=x*np.rad2deg(angle_step),angle_resolution=angle_resolution,layer=layer)
a = D.add_ref(A)
arcs.append(a)
if x>0:
a.connect(port=1,destination=prevPort)
prevPort=a.ports[2]
D.add_port(name=1,port=arcs[0].ports[1])
#now connect a regular bend for the normal curved portion
B = _arc(radius=radius,width=width,theta=45-np.rad2deg(angular_coverage),start_angle=angular_coverage,angle_resolution=angle_resolution,layer=layer)
b = D.add_ref(B)
b.connect(port=1,destination=prevPort)
prevPort=b.ports[2]
D.add_port(name=2,port=prevPort)
#now create the overall structure
Total = Device()
#clone the half-curve into two objects and connect for a 90 deg bend.
D1 = Total.add_ref(D)
D2 = Total.add_ref(D)
D2.mirror(p1=[0,0],p2=[1,1])
D2.connect(port=2,destination=D1.ports[2])
Total.xmin=0
Total.ymin=0
#orient to default settings...
Total.mirror(p1=[0,0],p2=[1,1])
Total.mirror(p1=[0,0],p2=[1,0])
#orient to user-provided settings
if direction == 'cw':
Total.mirror(p1=[0,0],p2=[1,0])
Total.rotate(angle=start_angle,center=Total.center)
Total.center=[0,0]
Total.add_port(name=1,port=D1.ports[1])
Total.add_port(name=2,port=D2.ports[1])
return Total
def route_basic(port1, port2, path_type = 'sine', width_type = 'straight', width1 = None, width2 = None, num_path_pts = 99, layer = 0):
# Assuming they're both Ports for now
point_a = np.array(port1.midpoint)
if width1 is None: width1 = port1.width
point_b = np.array(port2.midpoint)
if width2 is None: width2 = port2.width
if round(abs(mod(port1.orientation - port2.orientation,360)),3) != 180:
raise ValueError('[DEVICE] route() error: Ports do not face each other (orientations must be 180 apart)')
orientation = port1.orientation
separation = point_b - point_a # Vector drawn from A to B
distance = norm(separation) # Magnitude of vector from A to B
rotation = np.arctan2(separation[1],separation[0])*180/pi # Rotation of vector from A to B
angle = rotation - orientation # If looking out along the normal of ``a``, the angle you would have to look to see ``b``
forward_distance = distance*cos(angle*pi/180)
lateral_distance = distance*
|
sin(angle*pi/180)
|
numpy.sin
|
import argparse
import numpy as np
from PIL import Image
import homomorphic
def main():
parser = argparse.ArgumentParser(description="Homomorphic filtering demo.")
parser.add_argument(
'--alpha',
metavar='float',
type=float,
help="Variable alpha. Default is 0.75",
default=0.75)
parser.add_argument(
'--beta',
metavar='float',
type=float,
help="Variable beta. Default is 1.25",
default=1.25)
parser.add_argument(
'--filter',
metavar='str',
type=str,
help="Filter to use. Either 'butterworth' or 'gaussian'. Default is 'butterworth'",
default='butterworth')
parser.add_argument(
'--cutoff-freq',
metavar='float',
type=float,
help="Cutoff frequency. Default is 30",
default=30)
parser.add_argument(
'--order',
metavar='float',
type=float,
help="Filter order, only used butterworth filter. Default is 2",
default=2)
parser.add_argument('imgpath', metavar='imgpath', type=str, help="Input image file")
args = parser.parse_args()
img = Image.open(args.imgpath)
if img.mode == 'RGB':
img = img.convert('L')
img =
|
np.asarray(img, dtype=np.uint8)
|
numpy.asarray
|
""" Convolutional Neural Network.
Build and train a convolutional neural network with TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: <NAME>
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import gzip
import imageio
import os
import matplotlib.pyplot as plt
import scipy.misc
import time
IMAGE_SIZE = 128
NUM_CHANNELS = 1
# Training Parameters
lr_vgg = 0.00001
lr_vgg_fc = 0.0001
lr_2_mid = 0.00005
lr_2_end = 0.00001
lr_3_mid = 0.00005
lr_3_end = 0.00001 #0.0002
# tf Graph input
trainSetName = tf.placeholder(tf.string, shape=[None])
slfTestSetName = tf.placeholder(tf.string, shape=[None])
evalSetName = tf.placeholder(tf.string, shape=[None])
Drop_rate = tf.placeholder(tf.float32) # dropout (keep probability)
Is_training = tf.placeholder(tf.bool)
def _parser(proto):
# Conver tfrecord to tensors
_features = tf.parse_single_example(
proto,
features = {
'image': tf.FixedLenFeature((),tf.string),
'mask': tf.FixedLenFeature((),tf.string),
'gt': tf.FixedLenFeature((),tf.string)
})
_image = tf.decode_raw(_features['image'], tf.float32)
_image = tf.reshape(_image, [IMAGE_SIZE,IMAGE_SIZE,3])
_mask = tf.decode_raw(_features['mask'], tf.float32)
_mask = tf.reshape(_mask, [IMAGE_SIZE,IMAGE_SIZE])
_gt = tf.decode_raw(_features['gt'], tf.float32)
_gt = tf.reshape(_gt, [IMAGE_SIZE,IMAGE_SIZE,3])
return _image, _mask, _gt
def _parser_eval(proto):
# Conver tfrecord to tensors
_features = tf.parse_single_example(
proto,
features = {
'image': tf.FixedLenFeature((),tf.string),
'mask': tf.FixedLenFeature((),tf.string)
})
_image = tf.decode_raw(_features['image'], tf.float32)
_image = tf.reshape(_image, [IMAGE_SIZE,IMAGE_SIZE,3])
_mask = tf.decode_raw(_features['mask'], tf.float32)
_mask = tf.reshape(_mask, [IMAGE_SIZE,IMAGE_SIZE])
return _image, _mask
def caldotprod(prediction, mask, groundtruth):
# normalize prediction
_prediction = ((prediction / 255.0) - 0.5) * 2.
_pred_norm = tf.sqrt(tf.reduce_sum(tf.multiply(_prediction, _prediction), axis=3, keep_dims=True))
_pred_normalized = tf.div(_prediction, tf.tile(_pred_norm, [1,1,1,3]))
# normalize groundtruth
_groundtruth = ((groundtruth / 255.0) - 0.5) * 2.
_gt_norm = tf.sqrt(tf.reduce_sum(tf.multiply(_groundtruth, _groundtruth), axis=3, keep_dims=True))
_gt_normalized = tf.div(_groundtruth, tf.tile(_gt_norm, [1,1,1,3]))
# calculate dot product = cos(theta)
_dotprod = tf.reduce_sum(tf.multiply(_pred_normalized, _gt_normalized), axis=3)
# mask object
_mask = tf.not_equal(mask, tf.zeros_like(mask))
_dotprod = tf.boolean_mask(_dotprod, _mask)
# fix nan by setting to -1
_dotprod = tf.where(tf.is_nan(_dotprod), tf.zeros_like(_dotprod)-1, _dotprod)
# clip to -1,+1
_dotprod = tf.clip_by_value(_dotprod, -1., 1.)
return _dotprod, prediction, mask, groundtruth
# prediction 0-255, groundtruth 0-255
def calcloss(_prediction, _mask, _groundtruth):
_dotprod, _, _, _ = caldotprod(_prediction, _mask, _groundtruth)
# calculate angles
_ang = tf.acos(_dotprod)
loss = -tf.reduce_mean(_dotprod)
# loss = tf.reduce_mean(_ang)
return loss
# vgg/conv1_1_W -- vgg origin
# vgg/conv1_1/kernel:0 -- var
def getVggStoredName(var):
# get name stored in vgg origin
if 'kernel' in var.op.name:
return var.op.name.replace('/kernel','_W')
elif 'bias' in var.op.name:
return var.op.name.replace('/bias','_b')
else:
print("Error: No kernel or bias")
def scalePrediction(_pred_single):
if np.max(_pred_single) > 255.0:
_pred_single = _pred_single - 255.0/2.0
_pred_single = _pred_single / np.max(_pred_single) * (255.0/2.0)
_pred_single = _pred_single + 255.0/2.0
min_pos = np.unravel_index(np.argmin(_pred_single),_pred_single.shape)
_pred_single[min_pos[0], min_pos[1], min_pos[2]] = 0;
_pred_single = np.clip(_pred_single, 0., 255.)
else:
max_pos = np.unravel_index(np.argmax(_pred_single),_pred_single.shape)
_pred_single[max_pos[0], max_pos[1], max_pos[2]] = 255.0;
_pred_single = np.clip(_pred_single, 0., 255.)
return _pred_single
# Create the neural network
def conv_net(img, mask, dropout, is_training, reuse):
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
img = tf.reshape(img, shape=[-1, IMAGE_SIZE, IMAGE_SIZE, 3], name='inputTensor')
mask = tf.reshape(mask, shape=[-1, IMAGE_SIZE, IMAGE_SIZE, 1], name='inputMask')
with tf.variable_scope('vgg', reuse = reuse):
# Convolution Layer with 64 filters and a kernel size of 3
conv1_1 = tf.layers.conv2d(inputs = img,
filters=64, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv1_1')
conv1_2 = tf.layers.conv2d(inputs = conv1_1,
filters=64, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv1_2')
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
pool1 = tf.layers.max_pooling2d(conv1_2, 2, 2)
# Convolution Layer with 32 filters and a kernel size of 3
conv2_1 = tf.layers.conv2d(inputs = pool1,
filters=128, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv2_1')
conv2_2 = tf.layers.conv2d(inputs = conv2_1,
filters=128, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv2_2')
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
pool2 = tf.layers.max_pooling2d(conv2_2, 2, 2)
# Convolution Layer with 256 filters and a kernel size of 3
conv3_1 = tf.layers.conv2d(inputs = pool2,
filters=256, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv3_1')
conv3_2 = tf.layers.conv2d(inputs = conv3_1,
filters=256, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv3_2')
conv3_3 = tf.layers.conv2d(inputs = conv3_2,
filters=256, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv3_3')
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
pool3 = tf.layers.max_pooling2d(conv3_3, 2, 2)
# Convolution Layer with 512 filters and a kernel size of 3
conv4_1 = tf.layers.conv2d(inputs = pool3,
filters=512, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv4_1')
conv4_2 = tf.layers.conv2d(inputs = conv4_1,
filters=512, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv4_2')
conv4_3 = tf.layers.conv2d(inputs = conv4_2,
filters=512, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv4_3')
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
pool4 = tf.layers.max_pooling2d(conv4_3, 2, 2)
# Convolution Layer with 512 filters and a kernel size of 3
conv5_1 = tf.layers.conv2d(inputs = pool4,
filters=512, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv5_1')
conv5_2 = tf.layers.conv2d(inputs = conv5_1,
filters=512, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv5_2')
conv5_3 = tf.layers.conv2d(inputs = conv5_2,
filters=512, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv5_3')
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
pool5 = tf.layers.max_pooling2d(conv5_3, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(pool5)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 4096, name='fc1')
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training) # drop = 0.5
fc1 = tf.nn.relu(fc1)
# Fully connected layer (in tf contrib folder for now)
fc2 = tf.layers.dense(fc1, 8*8*64, name='fc2')
fc2 = tf.reshape(fc2, [-1, 8, 8, 64])
fc2 = tf.nn.relu(fc2)
# Upsample
vgg_out = tf.image.resize_nearest_neighbor(fc2, size=(32, 32))
with tf.variable_scope('scale2', reuse = reuse):
conv2_img = tf.layers.conv2d(inputs = img,
filters=96, kernel_size=5, activation=tf.nn.relu, padding='same', name='conv2_img')
# Max Pooling (down-sampling) with strides of 4 and kernel size of 4
pool2_img = tf.layers.max_pooling2d(conv2_img, 4, 4)
conv2_mask = tf.layers.conv2d(inputs = mask,
filters=48, kernel_size=5, activation=tf.nn.relu, padding='same', name='conv2_mask')
# Max Pooling (down-sampling) with strides of 4 and kernel size of 4
pool2_mask = tf.layers.max_pooling2d(conv2_mask, 4, 4)
# Stack conv2img with vgg_out, aixs = channel
padding_pool2_img = tf.constant([[0, 0,], [0, 0], [0, 0], [0, 48+64]])
pool2_img = tf.pad(pool2_img, padding_pool2_img, "CONSTANT")
padding_pool2_mask = tf.constant([[0, 0,], [0, 0], [0, 0], [96, 64]])
pool2_mask = tf.pad(pool2_mask, padding_pool2_mask, "CONSTANT")
padding_vgg_out = tf.constant([[0, 0,], [0, 0], [0, 0], [96+48, 0]])
vgg_out = tf.pad(vgg_out, padding_vgg_out, "CONSTANT")
stack2 = pool2_img + pool2_mask + vgg_out
# Convolution Layer with 96+64 filters and a kernel size of 5
conv1 = tf.layers.conv2d(inputs = stack2,
filters=96+48+64, kernel_size=5, activation=tf.nn.relu, padding='same', name='conv1')
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(inputs = conv1,
filters=64, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv2')
# Convolution Layer with 64 filters and a kernel size of 3
conv3 = tf.layers.conv2d(inputs = conv2,
filters=64, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv3')
# Convolution Layer with 64 filters and a kernel size of 3
conv4 = tf.layers.conv2d(inputs = conv3,
filters=64, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv4')
# Convolution Layer with 3 filters and a kernel size of 3
conv5 = tf.layers.conv2d(inputs = conv4,
filters=3, kernel_size=3, activation=tf.nn.relu, padding='same', name='conv5')
# Upsample
scale2_out = tf.image.resize_nearest_neighbor(conv5, size=(128, 128))
with tf.variable_scope('scale3', reuse = reuse):
conv3_img = tf.layers.conv2d(inputs = img,
filters=96, kernel_size=9, activation=tf.nn.relu,
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0005/lr_3_end),
padding='same', name='conv3_img')
conv3_mask = tf.layers.conv2d(inputs = mask,
filters=48, kernel_size=9, activation=tf.nn.relu,
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0005/lr_3_end),
padding='same', name='conv3_mask')
padding_conv3_img = tf.constant([[0, 0,], [0, 0], [0, 0], [0, 48+3]])
conv3_img = tf.pad(conv3_img, padding_conv3_img, "CONSTANT")
padding_conv3_mask = tf.constant([[0, 0,], [0, 0], [0, 0], [96, 3]])
conv3_mask = tf.pad(conv3_mask, padding_conv3_mask, "CONSTANT")
padding_scale2_out = tf.constant([[0, 0,], [0, 0], [0, 0], [96+48, 0]])
scale2_out_padded = tf.pad(scale2_out, padding_scale2_out, "CONSTANT")
stack3 = conv3_img + conv3_mask + scale2_out_padded
# Convolution Layer with 96+48+3 filters and a kernel size of 9
conv1 = tf.layers.conv2d(inputs = stack3,
filters=96+48+3, kernel_size=9, activation=tf.nn.relu,
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0005/lr_3_end),
padding='same', name='conv1')
# Convolution Layer with 64 filters and a kernel size of 5
conv2 = tf.layers.conv2d(inputs = conv1,
filters=64, kernel_size=5, activation=tf.nn.relu,
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0005/lr_3_mid),
padding='same', name='conv2')
# Convolution Layer with 64 filters and a kernel size of 5
conv3 = tf.layers.conv2d(inputs = conv2,
filters=64, kernel_size=5, activation=tf.nn.relu,
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0005/lr_3_mid),
padding='same', name='conv3')
# Convolution Layer with 3 filters and a kernel size of 5
conv4 = tf.layers.conv2d(inputs = conv3,
filters=3, kernel_size=5, activation=tf.nn.relu,
kernel_regularizer = tf.contrib.layers.l2_regularizer(scale=0.0005/lr_3_end),
padding='same', name='conv4')
out = conv4
return out
# #
# ############################# DATASET ############################# #
# #
num_train_set = 18000
num_self_test_set = 20000-num_train_set
test_batch_size = 20
num_eval_set = 2000
batch_size = 18
trainingSetFiles = ['./trainTFRecords/' + str(I) +'.tfrecords' for I in range(0,num_train_set)]
slfTestSetFiles = ['./trainTFRecords/' + str(I) +'.tfrecords' for I in range(num_train_set,20000)]
evalSetFiles = ['./testTFRecords/' + str(I) +'.tfrecords' for I in range(0,2000)]
# construct dataset
# training set
trainSet = tf.data.TFRecordDataset(trainSetName).map(_parser).shuffle(buffer_size=3600).repeat().batch(batch_size)
it_train = trainSet.make_initializable_iterator()
# self-testing set
selftestSet = tf.data.TFRecordDataset(slfTestSetName).map(_parser).repeat().batch(test_batch_size)
it_selftest = selftestSet.make_initializable_iterator()
# test set (to upload)
evalSet = tf.data.TFRecordDataset(evalSetName).map(_parser_eval).batch(test_batch_size)
it_eval = evalSet.make_initializable_iterator()
# #
# ############################## BATCH ############################## #
# #
# get training/testing data
# N*128*128*3-(0,1), N*128*128-(0/1), N*128*128*3-(-1,1)
imageIn, maskIn, gtIn = it_train.get_next()
imageIn_slftest, maskIn_slftest, gtIn_slftest = it_selftest.get_next()
imageIn_eval, maskIn_eval = it_eval.get_next()
# #
# ########################### PRED & LOSS ########################### #
# #
# Construct model
prediction = conv_net(imageIn, maskIn, Drop_rate, is_training=True, reuse=False)
loss_op = calcloss(prediction, maskIn, gtIn)
# Construct test graph
prediction_slftest = conv_net(imageIn_slftest, maskIn_slftest, dropout=0.0, is_training=False, reuse=True)
loss_test_dotprod_op = caldotprod(prediction_slftest, maskIn_slftest, gtIn_slftest)
# Construct eval graph
prediction_eval = conv_net(imageIn_eval, maskIn_eval, dropout=0.0, is_training=False, reuse=True)
# #
# ############################# VAR MAN ############################# #
# #
# Manage vars
# Vgg var
vggConvVar_dict = {getVggStoredName(val):val for val in tf.global_variables() if 'vgg/conv' in val.op.name}
vggConvVar_list = list(vggConvVar_dict.values())
# scale2Var
vggFC_scale2Var_list = [var for var in tf.global_variables() if var not in vggConvVar_list and 'scale3' not in var.op.name]
vggFCVar_list = [var for var in tf.global_variables() if 'vgg/fc' in var.op.name]
scale2EndVar_list = [var for var in tf.global_variables() if 'scale2/conv1' in var.op.name or \
'scale2/conv5' in var.op.name or \
'2_img' in var.op.name or \
'2_mask' in var.op.name]
scale2MidVar_list = [var for var in tf.global_variables() if 'scale2' in var.op.name and var not in scale2EndVar_list]
# scale3Var
scale3Var_list = [var for var in tf.global_variables() if 'scale3' in var.op.name]
scale3EndVar_list = [var for var in scale3Var_list if 'conv1' in var.op.name or \
'conv4' in var.op.name or \
'img' in var.op.name or \
'mask' in var.op.name]
scale3MidVar_list = [var for var in scale3Var_list if var not in scale3EndVar_list]
# Savers
vggConvRestorer = tf.train.Saver(vggConvVar_dict)
scale2Restorer = tf.train.Saver(vggFC_scale2Var_list)
scale3Restorer = tf.train.Saver(scale3Var_list)
# for key, val in vggConvVar_dict.items():
# print(key, val)
# print("\n\n")
# for t in vggFC_scale2Var_list:
# print(t.op.name, t.shape)
# print("\n\n")
# for t in vggFCVar_list:
# print(t.op.name, t.shape)
# print("\n\n")
# for t in scale2MidVar_list:
# print(t.op.name, t.shape)
# print("\n\n")
# for t in scale2EndVar_list:
# print(t.op.name, t.shape)
# print("\n\n")
# for t in scale3Var_list:
# print(t.op.name, t.shape)
# print("\n\n")
# for t in scale3MidVar_list:
# print(t.op.name, t.shape)
# print("\n\n")
# for t in scale3EndVar_list:
# print(t.op.name, t.shape)
# print("\n\n")
# exit(0)
# #
# ########################### TRAIN & LOSS ########################### #
# #
num_epochs = 3
display_step = 10
test_step = num_train_set/batch_size # 1 per epoch
# Generate train op
# vgg
opt_vgg = tf.train.AdamOptimizer(learning_rate=lr_vgg)
# part 2
opt_vgg_fc = tf.train.AdamOptimizer(learning_rate=lr_vgg_fc)
opt_2mid = tf.train.AdamOptimizer(learning_rate=lr_2_mid)
opt_2end = tf.train.AdamOptimizer(learning_rate=lr_2_end)
# part 3
opt_3mid = tf.train.AdamOptimizer(learning_rate=lr_3_mid)
opt_3end = tf.train.AdamOptimizer(learning_rate=lr_3_end)
grads = tf.gradients(loss_op,
vggConvVar_list +
vggFCVar_list +
scale2MidVar_list +
scale2EndVar_list +
scale3MidVar_list +
scale3EndVar_list
)
# part 2
# grads_vggFC = grads[0:4]
# grads_s2mid = grads[4:10]
# grads_s2end = grads[10:18]
# vgg+2
# grads_vggConv = grads[0:26]
# grads_vggFC = grads[26:30]
# grads_s2mid = grads[30:36]
# grads_s2end = grads[36:44]
# part 3
# grads_s3mid = grads[0:4]
# grads_s3end = grads[4:12]
# alll
grads_vggConv = grads[0:26]
grads_vggFC = grads[26:30]
grads_s2mid = grads[30:36]
grads_s2end = grads[36:44]
grads_s3mid = grads[44:48]
grads_s3end = grads[48:56]
train_vggConv = opt_vgg.apply_gradients(zip(grads_vggConv, vggConvVar_list))
train_vggFC = opt_vgg_fc.apply_gradients(zip(grads_vggFC, vggFCVar_list))
train_2mid = opt_2mid.apply_gradients(zip(grads_s2mid, scale2MidVar_list))
train_2end = opt_2end.apply_gradients(zip(grads_s2end, scale2EndVar_list))
train_3mid = opt_3mid.apply_gradients(zip(grads_s3mid, scale3MidVar_list))
train_3end = opt_3end.apply_gradients(zip(grads_s3end, scale3EndVar_list))
train_op = tf.group(
train_vggConv,
train_vggFC,
train_2mid,
train_2end,
train_3mid,
train_3end
)
# Initialize the variables (i.e. assign their default value)
init_val = tf.global_variables_initializer()
# Start training
with tf.Session() as sess:
writer = tf.summary.FileWriter("./graph", sess.graph)
# Run the initializer
sess.run(tf.group(init_val, it_train.initializer, it_selftest.initializer, it_eval.initializer), \
feed_dict={
trainSetName: trainingSetFiles,
slfTestSetName: slfTestSetFiles,
evalSetName: evalSetFiles
}
)
vggConvRestorer.restore(sess, "./savedModel/vgg_new.ckpt")
scale2Restorer.restore(sess, "./savedModel/scale2_difflr.ckpt")
scale3Restorer.restore(sess, "./savedModel/scale3.ckpt")
# Run
step = 1
epoch = 1
tic = time.time()
loss_cum = 0
loss_selftest_prev = 1
while True:
# for i in range(0,10):
try:
# Run optimization op (backprop)
loss, _ = sess.run([loss_op, train_op], feed_dict={Drop_rate: 0.5})
loss_cum = loss_cum + loss
# Display training loss
if step % display_step == 0 or step == 1:
print("Epoch " + str(epoch) + ", " + \
"Step " + str(step) + ", Mini batch Loss= " + "{:.4f}".format(loss))
# Display self-test loss
if step % test_step == 0:
dotprod_selftest_cum = []
for test_batch_i in range(0, int(num_self_test_set/test_batch_size)):
dotprod_part, pred_selftest, mask_selftest, gt_selftest = sess.run(loss_test_dotprod_op)
dotprod_selftest = np.array(dotprod_part)
dotprod_selftest_cum =
|
np.concatenate((dotprod_selftest_cum,dotprod_selftest),axis=0)
|
numpy.concatenate
|
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensornetwork as tn
import pytest
import numpy as np
from tensornetwork.block_sparse import (U1Charge, BlockSparseTensor, Index,
BaseCharge)
from tensornetwork.block_sparse.charge import charge_equal
from tensornetwork.block_sparse.utils import _find_diagonal_sparse_blocks
def get_random(shape, num_charges, dtype=np.float64):
R = len(shape)
charges = [
BaseCharge(
np.random.randint(-5, 5, (shape[n], num_charges)),
charge_types=[U1Charge] * num_charges) for n in range(R)
]
flows = list(np.full(R, fill_value=False, dtype=np.bool))
indices = [Index(charges[n], flows[n]) for n in range(R)]
return BlockSparseTensor.random(indices=indices, dtype=dtype)
def get_square_matrix(shape, num_charges, dtype=np.float64):
charge = BaseCharge(
np.random.randint(-5, 5, (shape, num_charges)),
charge_types=[U1Charge] * num_charges)
flows = [True, False]
indices = [Index(charge, flows[n]) for n in range(2)]
return BlockSparseTensor.random(indices=indices, dtype=dtype)
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_split_node_full_svd_names(num_charges):
np.random.seed(10)
a = tn.Node(
get_random((10, 10), num_charges=num_charges), backend='symmetric')
e1 = a[0]
e2 = a[1]
left, s, right, _, = tn.split_node_full_svd(
a, [e1], [e2],
left_name='left',
middle_name='center',
right_name='right',
left_edge_name='left_edge',
right_edge_name='right_edge')
assert left.name == 'left'
assert s.name == 'center'
assert right.name == 'right'
assert left.edges[-1].name == 'left_edge'
assert s[0].name == 'left_edge'
assert s[1].name == 'right_edge'
assert right.edges[0].name == 'right_edge'
@pytest.mark.parametrize("num_charges", [1, 2])
def test_split_node_rq_names(num_charges):
np.random.seed(10)
a = tn.Node(
get_random((5, 5, 5, 5, 5), num_charges=num_charges), backend='symmetric')
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_rq(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
@pytest.mark.parametrize("num_charges", [1, 2])
def test_split_node_qr_names(num_charges):
np.random.seed(10)
a = tn.Node(
get_random((5, 5, 5, 5, 5), num_charges=num_charges), backend='symmetric')
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_qr(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
@pytest.mark.parametrize("num_charges", [1, 2])
def test_split_node_names(num_charges):
np.random.seed(10)
a = tn.Node(
get_random((5, 5, 5, 5, 5), num_charges=num_charges), backend='symmetric')
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right, _ = tn.split_node(
a,
left_edges,
right_edges,
left_name='left',
right_name='right',
edge_name='edge')
assert left.name == 'left'
assert right.name == 'right'
assert left.edges[-1].name == 'edge'
assert right.edges[0].name == 'edge'
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_split_node_rq_unitarity(dtype, num_charges):
np.random.seed(10)
a = tn.Node(
get_square_matrix(50, num_charges, dtype=dtype), backend='symmetric')
r, q = tn.split_node_rq(a, [a[0]], [a[1]])
r[1] | q[0]
qbar = tn.conj(q)
q[1] ^ qbar[1]
u1 = q @ qbar
qbar[0] ^ q[0]
u2 = qbar @ q
blocks, _, shapes = _find_diagonal_sparse_blocks(u1.tensor.flat_charges,
u1.tensor.flat_flows,
len(u1.tensor._order[0]))
for n, block in enumerate(blocks):
np.testing.assert_almost_equal(
np.reshape(u1.tensor.data[block], shapes[:, n]),
np.eye(N=shapes[0, n], M=shapes[1, n]))
blocks, _, shapes = _find_diagonal_sparse_blocks(u2.tensor.flat_charges,
u2.tensor.flat_flows,
len(u2.tensor._order[0]))
for n, block in enumerate(blocks):
np.testing.assert_almost_equal(
np.reshape(u2.tensor.data[block], shapes[:, n]),
np.eye(N=shapes[0, n], M=shapes[1, n]))
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_split_node_rq(dtype, num_charges):
np.random.seed(10)
a = tn.Node(
get_random((6, 7, 8, 9, 10), num_charges, dtype=dtype),
backend='symmetric')
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_rq(a, left_edges, right_edges)
tn.check_correct([left, right])
result = tn.contract(left[3])
np.testing.assert_allclose(result.tensor.data, a.tensor.data)
assert np.all([
charge_equal(result.tensor._charges[n], a.tensor._charges[n])
for n in range(len(a.tensor._charges))
])
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_split_node_qr_unitarity(dtype, num_charges):
np.random.seed(10)
a = tn.Node(
get_square_matrix(50, num_charges, dtype=dtype), backend='symmetric')
q, r = tn.split_node_qr(a, [a[0]], [a[1]])
r[0] | q[1]
qbar = tn.conj(q)
q[1] ^ qbar[1]
u1 = q @ qbar
qbar[0] ^ q[0]
u2 = qbar @ q
blocks, _, shapes = _find_diagonal_sparse_blocks(u1.tensor.flat_charges,
u1.tensor.flat_flows,
len(u1.tensor._order[0]))
for n, block in enumerate(blocks):
np.testing.assert_almost_equal(
np.reshape(u1.tensor.data[block], shapes[:, n]),
np.eye(N=shapes[0, n], M=shapes[1, n]))
blocks, _, shapes = _find_diagonal_sparse_blocks(u2.tensor.flat_charges,
u2.tensor.flat_flows,
len(u2.tensor._order[0]))
for n, block in enumerate(blocks):
np.testing.assert_almost_equal(
np.reshape(u2.tensor.data[block], shapes[:, n]),
np.eye(N=shapes[0, n], M=shapes[1, n]))
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_split_node_qr(dtype, num_charges):
np.random.seed(10)
a = tn.Node(
get_random((6, 7, 8, 9, 10), num_charges=num_charges, dtype=dtype),
backend='symmetric')
left_edges = []
for i in range(3):
left_edges.append(a[i])
right_edges = []
for i in range(3, 5):
right_edges.append(a[i])
left, right = tn.split_node_qr(a, left_edges, right_edges)
tn.check_correct([left, right])
result = tn.contract(left[3])
np.testing.assert_allclose(result.tensor.data, a.tensor.data)
assert np.all([
charge_equal(result.tensor._charges[n], a.tensor._charges[n])
for n in range(len(a.tensor._charges))
])
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_conj(dtype, num_charges):
np.random.seed(10)
a = tn.Node(
get_random((6, 7, 8, 9, 10), num_charges=num_charges, dtype=dtype),
backend='symmetric')
abar = tn.conj(a)
np.testing.assert_allclose(abar.tensor.data, a.backend.conj(a.tensor.data))
assert np.all([
charge_equal(abar.tensor._charges[n], a.tensor._charges[n])
for n in range(len(a.tensor._charges))
])
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_transpose(dtype, num_charges):
np.random.seed(10)
a = tn.Node(
get_random((6, 7, 8, 9, 10), num_charges=num_charges, dtype=dtype),
backend='symmetric')
order = [a[n] for n in reversed(range(5))]
transpa = tn.transpose(a, [4, 3, 2, 1, 0])
a.reorder_edges(order)
np.testing.assert_allclose(a.tensor.data, transpa.tensor.data)
def test_switch_backend():
np.random.seed(10)
a = tn.Node(np.random.rand(3, 3, 3), name="A", backend="numpy")
b = tn.Node(np.random.rand(3, 3, 3), name="B", backend="numpy")
c = tn.Node(np.random.rand(3, 3, 3), name="C", backend="numpy")
nodes = [a, b, c]
with pytest.raises(ValueError):
tn.switch_backend(nodes, 'symmetric')
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("num_charges", [1, 2, 3])
def test_contract_trace_edges(dtype, num_charges):
|
np.random.seed(10)
|
numpy.random.seed
|
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.models.cie_ucs` module.
"""
import numpy as np
import unittest
from itertools import permutations
from colour.models import (XYZ_to_UCS, UCS_to_XYZ, UCS_to_uv, uv_to_UCS,
UCS_uv_to_xy, xy_to_UCS_uv)
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
'TestXYZ_to_UCS', 'TestUCS_to_XYZ', 'TestUCS_to_uv', 'Testuv_to_UCS',
'TestUCS_uv_to_xy', 'TestXy_to_UCS_uv'
]
class TestXYZ_to_UCS(unittest.TestCase):
"""
Defines :func:`colour.models.cie_ucs.XYZ_to_UCS` definition unit tests
methods.
"""
def test_XYZ_to_UCS(self):
"""
Tests :func:`colour.models.cie_ucs.XYZ_to_UCS` definition.
"""
np.testing.assert_almost_equal(
XYZ_to_UCS(np.array([0.20654008, 0.12197225, 0.05136952])),
np.array([0.13769339, 0.12197225, 0.10537310]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_UCS(np.array([0.14222010, 0.23042768, 0.10495772])),
np.array([0.09481340, 0.23042768, 0.32701033]),
decimal=7)
np.testing.assert_almost_equal(
XYZ_to_UCS(np.array([0.07818780, 0.06157201, 0.28099326])),
np.array([0.05212520, 0.06157201, 0.19376075]),
decimal=7)
def test_n_dimensional_XYZ_to_UCS(self):
"""
Tests :func:`colour.models.cie_ucs.XYZ_to_UCS` definition n-dimensional
support.
"""
XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
UCS = XYZ_to_UCS(XYZ)
UCS = np.tile(UCS, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(XYZ_to_UCS(XYZ), UCS, decimal=7)
UCS = np.reshape(UCS, (2, 3, 3))
XYZ = np.reshape(XYZ, (2, 3, 3))
np.testing.assert_almost_equal(XYZ_to_UCS(XYZ), UCS, decimal=7)
def test_domain_range_scale_XYZ_to_UCS(self):
"""
Tests :func:`colour.models.cie_ucs.XYZ_to_UCS` definition domain and
range scale support.
"""
XYZ = np.array([0.0704953400, 0.1008000000, 0.0955831300])
UCS = XYZ_to_UCS(XYZ)
d_r = (('reference', 1), (1, 1), (100, 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
XYZ_to_UCS(XYZ * factor), UCS * factor, decimal=7)
@ignore_numpy_errors
def test_nan_XYZ_to_UCS(self):
"""
Tests :func:`colour.models.cie_ucs.XYZ_to_UCS` definition nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = set(permutations(cases * 3, r=3))
for case in cases:
XYZ = np.array(case)
XYZ_to_UCS(XYZ)
class TestUCS_to_XYZ(unittest.TestCase):
"""
Defines :func:`colour.models.cie_ucs.UCS_to_XYZ` definition unit tests
methods.
"""
def test_UCS_to_XYZ(self):
"""
Tests :func:`colour.models.cie_ucs.UCS_to_XYZ` definition.
"""
np.testing.assert_almost_equal(
UCS_to_XYZ(np.array([0.13769339, 0.12197225, 0.10537310])),
np.array([0.20654008, 0.12197225, 0.05136952]),
decimal=7)
np.testing.assert_almost_equal(
UCS_to_XYZ(np.array([0.09481340, 0.23042768, 0.32701033])),
np.array([0.14222010, 0.23042768, 0.10495772]),
decimal=7)
np.testing.assert_almost_equal(
UCS_to_XYZ(np.array([0.05212520, 0.06157201, 0.19376075])),
np.array([0.07818780, 0.06157201, 0.28099326]),
decimal=7)
def test_n_dimensional_UCS_to_XYZ(self):
"""
Tests :func:`colour.models.cie_ucs.UCS_to_XYZ` definition n-dimensional
support.
"""
UCS = np.array([0.13769339, 0.12197225, 0.10537310])
XYZ = UCS_to_XYZ(UCS)
UCS = np.tile(UCS, (6, 1))
XYZ = np.tile(XYZ, (6, 1))
np.testing.assert_almost_equal(UCS_to_XYZ(UCS), XYZ, decimal=7)
UCS = np.reshape(UCS, (2, 3, 3))
XYZ =
|
np.reshape(XYZ, (2, 3, 3))
|
numpy.reshape
|
import numpy as np
import tensorflow as tf
from pysc2.lib.actions import FunctionCall, FUNCTIONS
from rl.agents.runner import BaseRunner
from rl.common.pre_processing import Preprocessor
from rl.common.pre_processing import is_spatial_action, stack_ndarray_dicts
from rl.common.util import mask_unused_argument_samples, flatten_first_dims, flatten_first_dims_dict
class PPORunner():
def __init__(self, agent, envs, summary_writer, args):
"""
Args:
agent: A2CAgent instance.
envs: SubprocVecEnv instance.
summary_writer: summary writer to log episode scores.
args: {
train: whether to train the agent.
n_steps: number of agent steps for collecting rollouts.
discount: future reward discount.
}
"""
self.agent = agent
self.envs = envs
self.summary_writer = summary_writer
self.train = args.train
self.n_steps = args.steps_per_batch
self.discount = args.discount
self.noptepochs = 4 # TODO: get from args.
self.nminibatches = 4
self.preproc = Preprocessor()
self.last_obs = self.preproc.preprocess_obs(self.envs.reset())
self.old_log_probs = np.zeros((self.n_steps*self.envs.n_envs), dtype=np.float32)
print('\n### PPO Runner #######')
print(f'# agent = {self.agent}')
print(f'# train = {self.train}')
print(f'# n_steps = {self.n_steps}')
print(f'# discount = {self.discount}')
print(f'# noptepochs = {self.noptepochs}')
print(f'# nminibatches = {self.nminibatches}')
print('######################\n')
# TODO: we probably need to save this state during checkpoing
self.states = agent.initial_state
self.episode_counter = 1
self.max_score = 0.0
self.cumulative_score = 0.0
def run_batch(self, train_summary=False):
"""Collect trajectories for a single batch and train (if self.train).
Args:
train_summary: return a Summary of the training step (losses, etc.).
Returns:
result: None (if not self.train) or the return value of agent.train.
"""
nbatch = self.envs.n_envs*self.n_steps
assert nbatch % self.nminibatches == 0
nbatch_train = nbatch // self.nminibatches
last_obs = self.last_obs
shapes = (self.n_steps, self.envs.n_envs)
values = np.zeros(shapes, dtype=np.float32)
rewards = np.zeros(shapes, dtype=np.float32)
dones = np.zeros(shapes, dtype=np.float32)
all_actions, all_obs = [], []
mb_states = self.states # save the initial states at the beginning of each mb for later training.
for n in range(self.n_steps):
actions, values[n,:], states = self.agent.step(last_obs, self.states) # TODO: would be better if we could get the logprobs here instead of having to calls get_log_probs later.
actions = mask_unused_argument_samples(actions)
all_obs.append(last_obs)
all_actions.append(actions)
pysc2_actions = actions_to_pysc2(actions, size=last_obs['screen'].shape[1:3])
obs_raw = self.envs.step(pysc2_actions)
last_obs = self.preproc.preprocess_obs(obs_raw)
rewards[n,:], dones[n,:] = zip(*[(t.reward,t.last()) for t in obs_raw])
self.states = states
for t in obs_raw:
if t.last():
self.cumulative_score += self._summarize_episode(t)
next_values = self.agent.get_value(last_obs, states)
returns, advs = compute_returns_and_advs(rewards, dones, values, next_values, self.discount)
actions = stack_and_flatten_actions(all_actions)
obs = flatten_first_dims_dict(stack_ndarray_dicts(all_obs))
returns = flatten_first_dims(returns)
advs = flatten_first_dims(advs)
values = flatten_first_dims(values)
self.last_obs = last_obs
if self.train:
mbloss = []
old_log_probs = self.agent.get_log_probs(obs, self.states, actions)
if self.states is None:
# print('train')
inds =
|
np.arange(nbatch)
|
numpy.arange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 12:13:33 2018
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
#Python dependencies
from __future__ import division
import pandas as pd
import numpy as np
from scipy.constants import codata
from pylab import *
from scipy.optimize import curve_fit
import mpmath as mp
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
#from scipy.optimize import leastsq
pd.options.mode.chained_assignment = None
#Plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import seaborn as sns
import matplotlib.ticker as mtick
mpl.rc('mathtext', fontset='stixsans', default='regular')
mpl.rcParams.update({'axes.labelsize':22})
mpl.rc('xtick', labelsize=16)
mpl.rc('ytick', labelsize=16)
mpl.rc('legend',fontsize=14)
from scipy.constants import codata
F = codata.physical_constants['Faraday constant'][0]
Rg = codata.physical_constants['molar gas constant'][0]
### Importing PyEIS add-ons
from .PyEIS_Data_extraction import *
from .PyEIS_Lin_KK import *
from .PyEIS_Advanced_tools import *
### Frequency generator
##
#
def freq_gen(f_start, f_stop, pts_decade=7):
'''
Frequency Generator with logspaced freqencies
Inputs
----------
f_start = frequency start [Hz]
f_stop = frequency stop [Hz]
pts_decade = Points/decade, default 7 [-]
Output
----------
[0] = frequency range [Hz]
[1] = Angular frequency range [1/s]
'''
f_decades = np.log10(f_start) - np.log10(f_stop)
f_range = np.logspace(np.log10(f_start), np.log10(f_stop), num=np.around(pts_decade*f_decades).astype(int), endpoint=True)
w_range = 2 * np.pi * f_range
return f_range, w_range
### Simulation Element Functions
##
#
def elem_L(w, L):
'''
Simulation Function: -L-
Returns the impedance of an inductor
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Inductance [ohm * s]
'''
return 1j*w*L
def elem_C(w,C):
'''
Simulation Function: -C-
Inputs
----------
w = Angular frequency [1/s]
C = Capacitance [F]
'''
return 1/(C*(w*1j))
def elem_Q(w,Q,n):
'''
Simulation Function: -Q-
Inputs
----------
w = Angular frequency [1/s]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return 1/(Q*(w*1j)**n)
### Simulation Curciuts Functions
##
#
def cir_RsC(w, Rs, C):
'''
Simulation Function: -Rs-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
C = Capacitance [F]
'''
return Rs + 1/(C*(w*1j))
def cir_RsQ(w, Rs, Q, n):
'''
Simulation Function: -Rs-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
'''
return Rs + 1/(Q*(w*1j)**n)
def cir_RQ(w, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return (R/(1+R*Q*(w*1j)**n))
def cir_RsRQ(w, Rs='none', R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase elelment exponent [-]
fs = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RC(w, C='none', R='none', fs='none'):
'''
Simulation Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1. see cir_RQ() for details
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
R = Resistance [Ohm]
C = Capacitance [F]
fs = Summit frequency of RC circuit [Hz]
'''
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RsRQRQ(w, Rs, R='none', Q='none', n='none', fs='none', R2='none', Q2='none', n2='none', fs2='none'):
'''
Simulation Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RQ_fit()
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [Ohm]
R = Resistance [Ohm]
Q = Constant phase element [s^n/ohm]
n = Constant phase element exponent [-]
fs = Summit frequency of RQ circuit [Hz]
R2 = Resistance [Ohm]
Q2 = Constant phase element [s^n/ohm]
n2 = Constant phase element exponent [-]
fs2 = Summit frequency of RQ circuit [Hz]
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if R2 == 'none':
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif Q2 == 'none':
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
elif n2 == 'none':
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_RsRQQ(w, Rs, Q, n, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = Summit frequency of RQ circuit [Hz]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_Q(w,Q,n)
def cir_RsRQC(w, Rs, C, R1='none', Q1='none', n1='none', fs1='none'):
'''
Simulation Function: -Rs-RQ-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
Q1 = Constant phase element in (RQ) circuit [s^n/ohm]
n1 = Constant phase elelment exponent in (RQ) circuit [-]
fs1 = summit frequency of RQ circuit [Hz]
C = Constant phase element of series Q [s^n/ohm]
'''
return Rs + cir_RQ(w, R=R1, Q=Q1, n=n1, fs=fs1) + elem_C(w, C=C)
def cir_RsRCC(w, Rs, R1, C1, C):
'''
Simulation Function: -Rs-RC-C-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
C = Capacitance of series C [s^n/ohm]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ(w, Rs, R1, C1, Q, n):
'''
Simulation Function: -Rs-RC-Q-
Inputs
----------
w = Angular frequency [1/s]
Rs = Series Resistance [ohm]
R1 = Resistance in (RQ) circuit [ohm]
C1 = Constant phase element in (RQ) circuit [s^n/ohm]
Q = Constant phase element of series Q [s^n/ohm]
n = Constant phase elelment exponent of series Q [-]
'''
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
def Randles_coeff(w, n_electron, A, E='none', E0='none', D_red='none', D_ox='none', C_red='none', C_ox='none', Rg=Rg, F=F, T=298.15):
'''
Returns the Randles coefficient sigma [ohm/s^1/2].
Two cases: a) ox and red are both present in solution here both Cred and Dred are defined, b) In the particular case where initially
only Ox species are present in the solution with bulk concentration C*_ox, the surface concentrations may be calculated as function
of the electrode potential following Nernst equation. Here C_red and D_red == 'none'
Ref.:
- <NAME>., ISBN: 978-1-4614-8932-0, "Electrochemical Impedance Spectroscopy and its Applications"
- <NAME>., ISBN: 0-471-04372-9, <NAME>. R. (2001) "Electrochemical methods: Fundamentals and applications". New York: Wiley.
<NAME> (<EMAIL> // <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Bulk concetration of oxidized specie [mol/cm3]
C_red = Bulk concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = formal potential [V]
if reduced specie is absent == 'none'
Returns
----------
Randles coefficient [ohm/s^1/2]
'''
if C_red != 'none' and D_red != 'none':
sigma = ((Rg*T) / ((n_electron**2) * A * (F**2) * (2**(1/2)))) * ((1/(D_ox**(1/2) * C_ox)) + (1/(D_red**(1/2) * C_red)))
elif C_red == 'none' and D_red == 'none' and E!='none' and E0!= 'none':
f = F/(Rg*T)
x = (n_electron*f*(E-E0))/2
func_cosh2 = (np.cosh(2*x)+1)/2
sigma = ((4*Rg*T) / ((n_electron**2) * A * (F**2) * C_ox * ((2*D_ox)**(1/2)) )) * func_cosh2
else:
print('define E and E0')
Z_Aw = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Z_Aw
def cir_Randles(w, n_electron, D_red, D_ox, C_red, C_ox, Rs, Rct, n, E, A, Q='none', fs='none', E0=0, F=F, Rg=Rg, T=298.15):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with full complity of the warbug constant
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
Inputs
----------
n_electron = number of e- [-]
A = geometrical surface area [cm2]
D_ox = Diffusion coefficent of oxidized specie [cm2/s]
D_red = Diffusion coefficent of reduced specie [cm2/s]
C_ox = Concetration of oxidized specie [mol/cm3]
C_red = Concetration of reduced specie [mol/cm3]
T = Temperature [K]
Rg = Gas constant [J/molK]
F = Faradays consntat [C/mol]
E = Potential [V]
if reduced specie is absent == 'none'
E0 = Formal potential [V]
if reduced specie is absent == 'none'
Rs = Series resistance [ohm]
Rct = charge-transfer resistance [ohm]
Q = Constant phase element used to model the double-layer capacitance [F]
n = expononent of the CPE [-]
Returns
----------
The real and imaginary impedance of a Randles circuit [ohm]
'''
Z_Rct = Rct
Z_Q = elem_Q(w,Q,n)
Z_w = Randles_coeff(w, n_electron=n_electron, E=E, E0=E0, D_red=D_red, D_ox=D_ox, C_red=C_red, C_ox=C_ox, A=A, T=T, Rg=Rg, F=F)
return Rs + 1/(1/Z_Q + 1/(Z_Rct+Z_w))
def cir_Randles_simplified(w, Rs, R, n, sigma, Q='none', fs='none'):
'''
Simulation Function: Randles -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit with a simplified
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> / <EMAIL>)
'''
if R == 'none':
R = (1/(Q*(2*np.pi*fs)**n))
elif Q == 'none':
Q = (1/(R*(2*np.pi*fs)**n))
elif n == 'none':
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
# Polymer electrolytes
def cir_C_RC_C(w, Ce, Cb='none', Rb='none', fsb='none'):
'''
Simulation Function: -C-(RC)-C-
This circuit is often used for modeling blocking electrodes with a polymeric electrolyte, which exhibts a immobile ionic species in bulk that gives a capacitance contribution
to the otherwise resistive electrolyte
Ref:
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London, Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Ce = Interfacial capacitance [F]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = summit frequency of bulk (RC) circuit [Hz]
'''
Z_C = elem_C(w,C=Ce)
Z_RC = cir_RC(w, C=Cb, R=Rb, fs=fsb)
return Z_C + Z_RC
def cir_Q_RQ_Q(w, Qe, ne, Qb='none', Rb='none', fsb='none', nb='none'):
'''
Simulation Function: -Q-(RQ)-Q-
Modified cir_C_RC_C() circuits that can be used if electrodes and bulk are not behaving like ideal capacitors
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
Qe = Interfacial capacitance modeled with a CPE [F]
ne = Interfacial constant phase element exponent [-]
Rb = Bulk/series resistance [Ohm]
Qb = Bulk capacitance modeled with a CPE [s^n/ohm]
nb = Bulk constant phase element exponent [-]
fsb = summit frequency of bulk (RQ) circuit [Hz]
'''
Z_Q = elem_Q(w,Q=Qe,n=ne)
Z_RQ = cir_RQ(w, Q=Qb, R=Rb, fs=fsb, n=nb)
return Z_Q + Z_RQ
def tanh(x):
'''
As numpy gives errors when tanh becomes very large, above 10^250, this functions is used for np.tanh
'''
return (1-np.exp(-2*x))/(1+np.exp(-2*x))
def cir_RCRCZD(w, L, D_s, u1, u2, Cb='none', Rb='none', fsb='none', Ce='none', Re='none', fse='none'):
'''
Simulation Function: -RC_b-RC_e-Z_D
This circuit has been used to study non-blocking electrodes with an ioniocally conducting electrolyte with a mobile and immobile ionic specie in bulk, this is mixed with a
ionically conducting salt. This behavior yields in a impedance response, that consists of the interfacial impendaces -(RC_e)-, the ionically conducitng polymer -(RC_e)-,
and the diffusional impedance from the dissolved salt.
Refs.:
- <NAME>. and <NAME>., Electrochimica Acta, 27, 1671-1675, 1982, "Conductivity, Charge Transfer and Transport number - An AC-Investigation
of the Polymer Electrolyte LiSCN-Poly(ethyleneoxide)"
- <NAME>., and <NAME>. "Polymer Electrolyte Reviews - 1" Elsevier Applied Science Publishers LTD, London
Bruce, P. "Electrical Measurements on Polymer Electrolytes"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
----------
w = Angular frequency [1/s]
L = Thickness of electrode [cm]
D_s = Diffusion coefficient of dissolved salt [cm2/s]
u1 = Mobility of the ion reacting at the electrode interface
u2 = Mobility of other ion
Re = Interfacial resistance [Ohm]
Ce = Interfacial capacitance [F]
fse = Summit frequency of the interfacial (RC) circuit [Hz]
Rb = Bulk/series resistance [Ohm]
Cb = Bulk capacitance [F]
fsb = Summit frequency of the bulk (RC) circuit [Hz]
'''
Z_RCb = cir_RC(w, C=Cb, R=Rb, fs=fsb)
Z_RCe = cir_RC(w, C=Ce, R=Re, fs=fse)
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(x=alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ(w, Rs, L, Ri, Q='none', n='none'):
'''
Simulation Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = exponent for the interfacial capacitance [-]
'''
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_TLsQ
def cir_RsRQTLsQ(w, Rs, R1, fs1, n1, L, Ri, Q, n, Q1='none'):
'''
Simulation Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance(Q)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
Q = Interfacial capacitance of non-faradaic interface [F/cm]
n = Exponent for the interfacial capacitance [-]
Output
-----------
Impdance of Rs-(RQ)1-TLsQ
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs(w, Rs, L, Ri, R='none', Q='none', n='none', fs='none'):
'''
Simulation Function: -Rs-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R = Interfacial Charge transfer resistance [ohm*cm]
fs = Summit frequency of interfacial RQ circuit [Hz]
n = Exponent for interfacial RQ circuit [-]
Q = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-TLs(RQ)
'''
Phi = cir_RQ(w, R, Q, n, fs)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs(w, Rs, L, Ri, R1, n1, fs1, R2, n2, fs2, Q1='none', Q2='none'):
'''
Simulation Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
The simplified transmission line assumes that Ri is much greater than Rel (electrode resistance).
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> / <EMAIL>)
Inputs
-----------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = Exponent for RQ circuit [-]
Q1 = Constant phase element of RQ circuit [s^n/(ohm * cm)]
L = Length/Thickness of porous electrode [cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
R2 = Interfacial Charge transfer resistance [ohm*cm]
fs2 = Summit frequency of interfacial RQ circuit [Hz]
n2 = Exponent for interfacial RQ circuit [-]
Q2 = Constant phase element of interfacial capacitance [s^n/Ohm]
Output
-----------
Impedance of Rs-(RQ)1-TLs(RQ)2
'''
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
Phi = cir_RQ(w=w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
### Support function
def sinh(x):
'''
As numpy gives errors when sinh becomes very large, above 10^250, this functions is used instead of np/mp.sinh()
'''
return (1 - np.exp(-2*x))/(2*np.exp(-x))
def coth(x):
'''
As numpy gives errors when coth becomes very large, above 10^250, this functions is used instead of np/mp.coth()
'''
return (1 + np.exp(-2*x))/(1 - np.exp(-2*x))
###
def cir_RsTLQ(w, L, Rs, Q, n, Rel, Ri):
'''
Simulation Function: -R-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- Bisquert J. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ(w, L, Rs, Q, n, Rel, Ri, R1, n1, fs1, Q1='none'):
'''
Simulation Function: -R-RQ-TLQ- (interfacial non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
n = exponenet for interfacial RQ element [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL(w, L, Rs, R, fs, n, Rel, Ri, Q='none'):
'''
Simulation Function: -R-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- <NAME>. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = Interfacial charge transfer resistance [ohm * cm]
fs = Summit frequency for the interfacial RQ element [Hz]
n = Exponenet for interfacial RQ element [-]
Q = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = Electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = Thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R, Q=Q, n=n, fs=fs)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL(w, L, Rs, R1, fs1, n1, R2, fs2, n2, Rel, Ri, Q1='none', Q2='none'):
'''
Simulation Function: -R-RQ-TL- (interfacial reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
Ref.:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = Charge transfer resistance of RQ circuit [ohm]
fs1 = Summit frequency for RQ circuit [Hz]
n1 = exponent for RQ circuit [-]
Q1 = constant phase element of RQ circuit [s^n/(ohm * cm)]
R2 = interfacial charge transfer resistance [ohm * cm]
fs2 = Summit frequency for the interfacial RQ element [Hz]
n2 = exponenet for interfacial RQ element [-]
Q2 = Constant phase element for the interfacial capacitance [s^n/ohm]
Rel = electronic resistance of electrode [ohm/cm]
Ri = Ionic resistance inside of flodded pores [ohm/cm]
L = thickness of porous electrode [cm]
Output
--------------
Impedance of Rs-TL
'''
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
Z_RQ1 = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = cir_RQ(w, R=R2, Q=Q2, n=n2, fs=fs2)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
# Transmission lines with solid-state transport
def cir_RsTL_1Dsolid(w, L, D, radius, Rs, R, Q, n, R_w, n_w, Rel, Ri):
'''
Simulation Function: -R-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R = particle charge transfer resistance [ohm*cm^2]
Q = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
--------------
Impedance of Rs-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w,Q=Q,n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid(w, L, D, radius, Rs, R1, fs1, n1, R2, Q2, n2, R_w, n_w, Rel, Ri, Q1='none'):
'''
Simulation Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel
Warburg element is specific for 1D solid-state diffusion
Refs:
- <NAME>., and <NAME>., Advances in Electrochemistry and Electrochemical Engineering, p. 329, Wiley-Interscience, New York (1973)
- Bisquert J. Electrochemistry Communications 1, 1999, 429-435, "Anamalous transport effects in the impedance of porous film electrodes"
- <NAME>. J. Phys. Chem. B., 2000, 104, 2287-2298, "Doubling exponent models for the analysis of porous film electrodes by impedance.
Relaxation of TiO2 nanoporous in aqueous solution"
- Illig, J., Physically based Impedance Modelling of Lithium-ion Cells, KIT Scientific Publishing (2014)
- Scipioni, et al., ECS Transactions, 69 (18) 71-80 (2015)
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------------
Rs = Series resistance [ohm]
R1 = charge transfer resistance of the interfacial RQ element [ohm*cm^2]
fs1 = max frequency peak of the interfacial RQ element[Hz]
n1 = exponenet for interfacial RQ element
R2 = particle charge transfer resistance [ohm*cm^2]
Q2 = Summit frequency peak of RQ element in the modified randles element of a particle [Hz]
n2 = exponenet for internal RQ element in the modified randles element of a particle [-]
Rel = electronic resistance of electrode [ohm/cm]
Ri = ionic resistance of solution in flooded pores of electrode [ohm/cm]
R_w = polarization resistance of finite diffusion Warburg element [ohm]
n_w = exponent for Warburg element [-]
L = thickness of porous electrode [cm]
D = solid-state diffusion coefficient [cm^2/s]
radius = average particle radius [cm]
Output
------------------
Impedance of R-RQ-TL(Q(RW))
'''
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Z_RQ = cir_RQ(w=w, R=R1, Q=Q1, n=n1, fs=fs1)
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ + Z_TL
### Fitting Circuit Functions
##
#
def elem_C_fit(params, w):
'''
Fit Function: -C-
'''
C = params['C']
return 1/(C*(w*1j))
def elem_Q_fit(params, w):
'''
Fit Function: -Q-
Constant Phase Element for Fitting
'''
Q = params['Q']
n = params['n']
return 1/(Q*(w*1j)**n)
def cir_RsC_fit(params, w):
'''
Fit Function: -Rs-C-
'''
Rs = params['Rs']
C = params['C']
return Rs + 1/(C*(w*1j))
def cir_RsQ_fit(params, w):
'''
Fit Function: -Rs-Q-
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
return Rs + 1/(Q*(w*1j)**n)
def cir_RC_fit(params, w):
'''
Fit Function: -RC-
Returns the impedance of an RC circuit, using RQ definations where n=1
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['C']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("C") == -1: #elif Q == 'none':
R = params['R']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['C']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['C']
return cir_RQ(w, R=R, Q=C, n=1, fs=fs)
def cir_RQ_fit(params, w):
'''
Fit Function: -RQ-
Return the impedance of an RQ circuit:
Z(w) = R / (1+ R*Q * (2w)^n)
See Explanation of equations under cir_RQ()
The params.keys()[10:] finds the names of the user defined parameters that should be interated over if X == -1, if the paramter is not given, it becomes equal to 'none'
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
return R/(1+R*Q*(w*1j)**n)
def cir_RsRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-
Return the impedance of an Rs-RQ circuit. See details for RQ under cir_RsRQ_fit()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n))
def cir_RsRQRQ_fit(params, w):
'''
Fit Function: -Rs-RQ-RQ-
Return the impedance of an Rs-RQ circuit. See details under cir_RsRQRQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("'R2'") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'Q2'") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("'n2'") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("'fs2'") == -1: #elif fs == 'none':
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
Rs = params['Rs']
return Rs + (R/(1+R*Q*(w*1j)**n)) + (R2/(1+R2*Q2*(w*1j)**n2))
def cir_Randles_simplified_Fit(params, w):
'''
Fit Function: Randles simplified -Rs-(Q-(RW)-)-
Return the impedance of a Randles circuit. See more under cir_Randles_simplified()
NOTE: This Randles circuit is only meant for semi-infinate linear diffusion
<NAME> (<EMAIL> || <EMAIL>)
'''
if str(params.keys())[10:].find("'R'") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'Q'") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("'n'") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("'fs'") == -1: #elif fs == 'none':
R = params['R']
Q = params['Q']
n = params['n']
Rs = params['Rs']
sigma = params['sigma']
Z_Q = 1/(Q*(w*1j)**n)
Z_R = R
Z_w = sigma*(w**(-0.5))-1j*sigma*(w**(-0.5))
return Rs + 1/(1/Z_Q + 1/(Z_R+Z_w))
def cir_RsRQQ_fit(params, w):
'''
Fit Function: -Rs-RQ-Q-
See cir_RsRQQ() for details
'''
Rs = params['Rs']
Q = params['Q']
n = params['n']
Z_Q = 1/(Q*(w*1j)**n)
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_Q
def cir_RsRQC_fit(params, w):
'''
Fit Function: -Rs-RQ-C-
See cir_RsRQC() for details
'''
Rs = params['Rs']
C = params['C']
Z_C = 1/(C*(w*1j))
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
return Rs + Z_RQ + Z_C
def cir_RsRCC_fit(params, w):
'''
Fit Function: -Rs-RC-C-
See cir_RsRCC() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
C = params['C']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_C(w, C=C)
def cir_RsRCQ_fit(params, w):
'''
Fit Function: -Rs-RC-Q-
See cir_RsRCQ() for details
'''
Rs = params['Rs']
R1 = params['R1']
C1 = params['C1']
Q = params['Q']
n = params['n']
return Rs + cir_RC(w, C=C1, R=R1, fs='none') + elem_Q(w,Q,n)
# Polymer electrolytes
def cir_C_RC_C_fit(params, w):
'''
Fit Function: -C-(RC)-C-
See cir_C_RC_C() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Ce = params['Ce']
Z_C = 1/(Ce*(w*1j))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RC = (Rb/(1+Rb*Cb*(w*1j)))
return Z_C + Z_RC
def cir_Q_RQ_Q_Fit(params, w):
'''
Fit Function: -Q-(RQ)-Q-
See cir_Q_RQ_Q() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impedance
Qe = params['Qe']
ne = params['ne']
Z_Q = 1/(Qe*(w*1j)**ne)
# Bulk impedance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Qb = params['Qb']
nb = params['nb']
fsb = params['fsb']
Rb = (1/(Qb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("Qb") == -1: #elif Q == 'none':
Rb = params['Rb']
nb = params['nb']
fsb = params['fsb']
Qb = (1/(Rb*(2*np.pi*fsb)**nb))
if str(params.keys())[10:].find("nb") == -1: #elif n == 'none':
Rb = params['Rb']
Qb = params['Qb']
fsb = params['fsb']
nb = np.log(Qb*Rb)/np.log(1/(2*np.pi*fsb))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
nb = params['nb']
Qb = params['Qb']
Z_RQ = Rb/(1+Rb*Qb*(w*1j)**nb)
return Z_Q + Z_RQ
def cir_RCRCZD_fit(params, w):
'''
Fit Function: -RC_b-RC_e-Z_D
See cir_RCRCZD() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
# Interfacial impendace
if str(params.keys())[10:].find("Re") == -1: #if R == 'none':
Ce = params['Ce']
fse = params['fse']
Re = (1/(Ce*(2*np.pi*fse)))
if str(params.keys())[10:].find("Ce") == -1: #elif Q == 'none':
Re = params['Rb']
fse = params['fsb']
Ce = (1/(Re*(2*np.pi*fse)))
if str(params.keys())[10:].find("fse") == -1: #elif fs == 'none':
Re = params['Re']
Ce = params['Ce']
Z_RCe = (Re/(1+Re*Ce*(w*1j)))
# Bulk impendance
if str(params.keys())[10:].find("Rb") == -1: #if R == 'none':
Cb = params['Cb']
fsb = params['fsb']
Rb = (1/(Cb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("Cb") == -1: #elif Q == 'none':
Rb = params['Rb']
fsb = params['fsb']
Cb = (1/(Rb*(2*np.pi*fsb)))
if str(params.keys())[10:].find("fsb") == -1: #elif fs == 'none':
Rb = params['Rb']
Cb = params['Cb']
Z_RCb = (Rb/(1+Rb*Cb*(w*1j)))
# Mass transport impendance
L = params['L']
D_s = params['D_s']
u1 = params['u1']
u2 = params['u2']
alpha = ((w*1j*L**2)/D_s)**(1/2)
Z_D = Rb * (u2/u1) * (tanh(alpha)/alpha)
return Z_RCb + Z_RCe + Z_D
# Transmission lines
def cir_RsTLsQ_fit(params, w):
'''
Fit Function: -Rs-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsTLsQ()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
Phi = 1/(Q*(w*1j)**n)
X1 = Ri # ohm/cm
Lam = (Phi/X1)**(1/2) #np.sqrt(Phi/X1)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
#
# Z_TLsQ = Lam * X1 * coth_mp
Z_TLsQ = Lam * X1 * coth(x)
return Rs + Z_TLsQ
def cir_RsRQTLsQ_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLsQ-
TLs = Simplified Transmission Line, with a non-faradaic interfacial impedance (Q)
See more under cir_RsRQTLsQ
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Q = params['Q']
n = params['n']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
Phi = 1/(Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLsQ = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLsQ
def cir_RsTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line, with a faradaic interfacial impedance (RQ)
See mor under cir_RsTLs()
<NAME> (<EMAIL> / <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = R/(1+R*Q*(w*1j)**n)
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_TLs
def cir_RsRQTLs_Fit(params, w):
'''
Fit Function: -Rs-RQ-TLs-
TLs = Simplified Transmission Line with a faradaic interfacial impedance (RQ)
See more under cir_RsRQTLs()
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ = (R1/(1+R1*Q1*(w*1j)**n1))
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
if str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
if str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
if str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
Lam = (Phi/X1)**(1/2)
x = L/Lam
x_mp = mp.matrix(x) #x in mp.math format
coth_mp = []
for i in range(len(Lam)):
coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
Z_TLs = Lam * X1 * coth_mp
return Rs + Z_RQ + Z_TLs
def cir_RsTLQ_fit(params, w):
'''
Fit Function: -R-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTLQ_fit(params, w):
'''
Fit Function: -R-RQ-TLQ- (interface non-reacting, i.e. blocking electrode)
Transmission line w/ full complexity, which both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
Q = params['Q']
n = params['n']
#The impedance of the series resistance
Z_Rs = Rs
#The (RQ) circuit in series with the transmission line
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
if str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
if str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
# The Interfacial impedance is given by an -(RQ)- circuit
Phi = elem_Q(w, Q=Q, n=n)
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_Fit(params, w):
'''
Fit Function: -R-TLQ- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity, which both includes Ri and Rel
See cir_RsTL() for details
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R") == -1: #if R == 'none':
Q = params['Q']
n = params['n']
fs = params['fs']
R = (1/(Q*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("Q") == -1: #elif Q == 'none':
R = params['R']
n = params['n']
fs = params['fs']
Q = (1/(R*(2*np.pi*fs)**n))
if str(params.keys())[10:].find("n") == -1: #elif n == 'none':
R = params['R']
Q = params['Q']
fs = params['fs']
n = np.log(Q*R)/np.log(1/(2*np.pi*fs))
if str(params.keys())[10:].find("fs") == -1: #elif fs == 'none':
R = params['R']
n = params['n']
Q = params['Q']
Phi = (R/(1+R*Q*(w*1j)**n))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float(mp.sinh(x_mp[i]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_fit(params, w):
'''
Fit Function: -R-RQ-TL- (interface reacting, i.e. non-blocking)
Transmission line w/ full complexity including both includes Ri and Rel
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
Rel = params['Rel']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#
# # The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R2") == -1: #if R == 'none':
Q2 = params['Q2']
n2 = params['n2']
fs2 = params['fs2']
R2 = (1/(Q2*(2*np.pi*fs2)**n2))
elif str(params.keys())[10:].find("Q2") == -1: #elif Q == 'none':
R2 = params['R2']
n2 = params['n2']
fs2 = params['fs2']
Q2 = (1/(R2*(2*np.pi*fs2)**n1))
elif str(params.keys())[10:].find("n2") == -1: #elif n == 'none':
R2 = params['R2']
Q2 = params['Q2']
fs2 = params['fs2']
n2 = np.log(Q2*R2)/np.log(1/(2*np.pi*fs2))
elif str(params.keys())[10:].find("fs2") == -1: #elif fs == 'none':
R2 = params['R2']
n2 = params['n2']
Q2 = params['Q2']
Phi = (R2/(1+R2*Q2*(w*1j)**n2))
X1 = Ri
X2 = Rel
Lam = (Phi/(X1+X2))**(1/2)
x = L/Lam
# x_mp = mp.matrix(x) #x in mp.math format
# coth_mp = []
# sinh_mp = []
# for i in range(len(Lam)):
# coth_mp.append(float(mp.coth(x_mp[i]).real)+float((mp.coth(x_mp[i]).imag))*1j) #Handles coth with x having very large or very small numbers
# sinh_mp.append(float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real) + float(((1-mp.exp(-2*x_mp[i]))/(2*mp.exp(-x_mp[i]))).real)*1j)
# sinh_mp.append(float(mp.sinh(x_mp[i]).real)+float((mp.sinh(x_mp[i]).imag))*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/np.array(sinh_mp))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*Lam)/sinh(x))) + Lam * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
def cir_RsTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-TL(Q(RW))-
Transmission line w/ full complexity
See cir_RsTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R = params['R']
Q = params['Q']
n = params['n']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R
Z_Q = elem_Q(w=w, Q=Q, n=n)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_TL
def cir_RsRQTL_1Dsolid_fit(params, w):
'''
Fit Function: -R-RQ-TL(Q(RW))-
Transmission line w/ full complexity, which both includes Ri and Rel. The Warburg element is specific for 1D solid-state diffusion
See cir_RsRQTL_1Dsolid() for details
<NAME> (<EMAIL>)
<NAME> (<EMAIL> || <EMAIL>)
'''
Rs = params['Rs']
L = params['L']
Ri = params['Ri']
radius = params['radius']
D = params['D']
R2 = params['R2']
Q2 = params['Q2']
n2 = params['n2']
R_w = params['R_w']
n_w = params['n_w']
Rel = params['Rel']
Ri = params['Ri']
#The impedance of the series resistance
Z_Rs = Rs
# The Interfacial impedance is given by an -(RQ)- circuit
if str(params.keys())[10:].find("R1") == -1: #if R == 'none':
Q1 = params['Q1']
n1 = params['n1']
fs1 = params['fs1']
R1 = (1/(Q1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("Q1") == -1: #elif Q == 'none':
R1 = params['R1']
n1 = params['n1']
fs1 = params['fs1']
Q1 = (1/(R1*(2*np.pi*fs1)**n1))
elif str(params.keys())[10:].find("n1") == -1: #elif n == 'none':
R1 = params['R1']
Q1 = params['Q1']
fs1 = params['fs1']
n1 = np.log(Q1*R1)/np.log(1/(2*np.pi*fs1))
elif str(params.keys())[10:].find("fs1") == -1: #elif fs == 'none':
R1 = params['R1']
n1 = params['n1']
Q1 = params['Q1']
Z_RQ1 = (R1/(1+R1*Q1*(w*1j)**n1))
#The impedance of a 1D Warburg Element
time_const = (radius**2)/D
x = (time_const*w*1j)**n_w
x_mp = mp.matrix(x)
warburg_coth_mp = []
for i in range(len(w)):
warburg_coth_mp.append(float(mp.coth(x_mp[i]).real)+float(mp.coth(x_mp[i]).imag)*1j)
Z_w = R_w * np.array(warburg_coth_mp)/x
# The Interfacial impedance is given by a Randles Equivalent circuit with the finite space warburg element in series with R2
Z_Rct = R2
Z_Q = elem_Q(w,Q=Q2,n=n2)
Z_Randles = 1/(1/Z_Q + 1/(Z_Rct+Z_w)) #Ohm
# The Impedance of the Transmission Line
lamb = (Z_Randles/(Rel+Ri))**(1/2)
x = L/lamb
# lamb_mp = mp.matrix(x)
# sinh_mp = []
# coth_mp = []
# for j in range(len(lamb_mp)):
# sinh_mp.append(float(mp.sinh(lamb_mp[j]).real)+float((mp.sinh(lamb_mp[j]).imag))*1j)
# coth_mp.append(float(mp.coth(lamb_mp[j]).real)+float(mp.coth(lamb_mp[j]).imag)*1j)
#
# Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/np.array(sinh_mp))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * np.array(coth_mp)
Z_TL = ((Rel*Ri)/(Rel+Ri)) * (L+((2*lamb)/sinh(x))) + lamb * ((Rel**2 + Ri**2)/(Rel+Ri)) * coth(x)
return Z_Rs + Z_RQ1 + Z_TL
### Least-Squares error function
def leastsq_errorfunc(params, w, re, im, circuit, weight_func):
'''
Sum of squares error function for the complex non-linear least-squares fitting procedure (CNLS). The fitting function (lmfit) will use this function to iterate over
until the total sum of errors is minimized.
During the minimization the fit is weighed, and currently three different weigh options are avaliable:
- modulus
- unity
- proportional
Modulus is generially recommended as random errors and a bias can exist in the experimental data.
<NAME> (<EMAIL> || <EMAIL>)
Inputs
------------
- params: parameters needed for CNLS
- re: real impedance
- im: Imaginary impedance
- circuit:
The avaliable circuits are shown below, and this this parameter needs it as a string.
- C
- Q
- R-C
- R-Q
- RC
- RQ
- R-RQ
- R-RQ-RQ
- R-RQ-Q
- R-(Q(RW))
- R-(Q(RM))
- R-RC-C
- R-RC-Q
- R-RQ-Q
- R-RQ-C
- RC-RC-ZD
- R-TLsQ
- R-RQ-TLsQ
- R-TLs
- R-RQ-TLs
- R-TLQ
- R-RQ-TLQ
- R-TL
- R-RQ-TL
- R-TL1Dsolid (reactive interface with 1D solid-state diffusion)
- R-RQ-TL1Dsolid
- weight_func
Weight function
- modulus
- unity
- proportional
'''
if circuit == 'C':
re_fit = elem_C_fit(params, w).real
im_fit = -elem_C_fit(params, w).imag
elif circuit == 'Q':
re_fit = elem_Q_fit(params, w).real
im_fit = -elem_Q_fit(params, w).imag
elif circuit == 'R-C':
re_fit = cir_RsC_fit(params, w).real
im_fit = -cir_RsC_fit(params, w).imag
elif circuit == 'R-Q':
re_fit = cir_RsQ_fit(params, w).real
im_fit = -cir_RsQ_fit(params, w).imag
elif circuit == 'RC':
re_fit = cir_RC_fit(params, w).real
im_fit = -cir_RC_fit(params, w).imag
elif circuit == 'RQ':
re_fit = cir_RQ_fit(params, w).real
im_fit = -cir_RQ_fit(params, w).imag
elif circuit == 'R-RQ':
re_fit = cir_RsRQ_fit(params, w).real
im_fit = -cir_RsRQ_fit(params, w).imag
elif circuit == 'R-RQ-RQ':
re_fit = cir_RsRQRQ_fit(params, w).real
im_fit = -cir_RsRQRQ_fit(params, w).imag
elif circuit == 'R-RC-C':
re_fit = cir_RsRCC_fit(params, w).real
im_fit = -cir_RsRCC_fit(params, w).imag
elif circuit == 'R-RC-Q':
re_fit = cir_RsRCQ_fit(params, w).real
im_fit = -cir_RsRCQ_fit(params, w).imag
elif circuit == 'R-RQ-Q':
re_fit = cir_RsRQQ_fit(params, w).real
im_fit = -cir_RsRQQ_fit(params, w).imag
elif circuit == 'R-RQ-C':
re_fit = cir_RsRQC_fit(params, w).real
im_fit = -cir_RsRQC_fit(params, w).imag
elif circuit == 'R-(Q(RW))':
re_fit = cir_Randles_simplified_Fit(params, w).real
im_fit = -cir_Randles_simplified_Fit(params, w).imag
elif circuit == 'R-(Q(RM))':
re_fit = cir_Randles_uelectrode_fit(params, w).real
im_fit = -cir_Randles_uelectrode_fit(params, w).imag
elif circuit == 'C-RC-C':
re_fit = cir_C_RC_C_fit(params, w).real
im_fit = -cir_C_RC_C_fit(params, w).imag
elif circuit == 'Q-RQ-Q':
re_fit = cir_Q_RQ_Q_Fit(params, w).real
im_fit = -cir_Q_RQ_Q_Fit(params, w).imag
elif circuit == 'RC-RC-ZD':
re_fit = cir_RCRCZD_fit(params, w).real
im_fit = -cir_RCRCZD_fit(params, w).imag
elif circuit == 'R-TLsQ':
re_fit = cir_RsTLsQ_fit(params, w).real
im_fit = -cir_RsTLsQ_fit(params, w).imag
elif circuit == 'R-RQ-TLsQ':
re_fit = cir_RsRQTLsQ_Fit(params, w).real
im_fit = -cir_RsRQTLsQ_Fit(params, w).imag
elif circuit == 'R-TLs':
re_fit = cir_RsTLs_Fit(params, w).real
im_fit = -cir_RsTLs_Fit(params, w).imag
elif circuit == 'R-RQ-TLs':
re_fit = cir_RsRQTLs_Fit(params, w).real
im_fit = -cir_RsRQTLs_Fit(params, w).imag
elif circuit == 'R-TLQ':
re_fit = cir_RsTLQ_fit(params, w).real
im_fit = -cir_RsTLQ_fit(params, w).imag
elif circuit == 'R-RQ-TLQ':
re_fit = cir_RsRQTLQ_fit(params, w).real
im_fit = -cir_RsRQTLQ_fit(params, w).imag
elif circuit == 'R-TL':
re_fit = cir_RsTL_Fit(params, w).real
im_fit = -cir_RsTL_Fit(params, w).imag
elif circuit == 'R-RQ-TL':
re_fit = cir_RsRQTL_fit(params, w).real
im_fit = -cir_RsRQTL_fit(params, w).imag
elif circuit == 'R-TL1Dsolid':
re_fit = cir_RsTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsTL_1Dsolid_fit(params, w).imag
elif circuit == 'R-RQ-TL1Dsolid':
re_fit = cir_RsRQTL_1Dsolid_fit(params, w).real
im_fit = -cir_RsRQTL_1Dsolid_fit(params, w).imag
else:
print('Circuit is not defined in leastsq_errorfunc()')
error = [(re-re_fit)**2, (im-im_fit)**2] #sum of squares
#Different Weighing options, see Lasia
if weight_func == 'modulus':
weight = [1/((re_fit**2 + im_fit**2)**(1/2)), 1/((re_fit**2 + im_fit**2)**(1/2))]
elif weight_func == 'proportional':
weight = [1/(re_fit**2), 1/(im_fit**2)]
elif weight_func == 'unity':
unity_1s = []
for k in range(len(re)):
unity_1s.append(1) #makes an array of [1]'s, so that the weighing is == 1 * sum of squres.
weight = [unity_1s, unity_1s]
else:
print('weight not defined in leastsq_errorfunc()')
S = np.array(weight) * error #weighted sum of squares
return S
### Fitting Class
class EIS_exp:
'''
This class is used to plot and/or analyze experimental impedance data. The class has three major functions:
- EIS_plot()
- Lin_KK()
- EIS_fit()
- EIS_plot() is used to plot experimental data with or without fit
- Lin_KK() performs a linear Kramers-Kronig analysis of the experimental data set.
- EIS_fit() performs complex non-linear least-squares fitting of the experimental data to an equivalent circuit
<NAME> (<EMAIL> || <EMAIL>)
Inputs
-----------
- path: path of datafile(s) as a string
- data: datafile(s) including extension, e.g. ['EIS_data1', 'EIS_data2']
- cycle: Specific cycle numbers can be extracted using the cycle function. Default is 'none', which includes all cycle numbers.
Specific cycles can be extracted using this parameter, insert cycle numbers in brackets, e.g. cycle number 1,4, and 6 are wanted. cycle=[1,4,6]
- mask: ['high frequency' , 'low frequency'], if only a high- or low-frequency is desired use 'none' for the other, e.g. maks=[10**4,'none']
'''
def __init__(self, path, data, cycle='off', mask=['none','none']):
self.df_raw0 = []
self.cycleno = []
for j in range(len(data)):
if data[j].find(".mpt") != -1: #file is a .mpt file
self.df_raw0.append(extract_mpt(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".DTA") != -1: #file is a .dta file
self.df_raw0.append(extract_dta(path=path, EIS_name=data[j])) #reads all datafiles
elif data[j].find(".z") != -1: #file is a .z file
self.df_raw0.append(extract_solar(path=path, EIS_name=data[j])) #reads all datafiles
else:
print('Data file(s) could not be identified')
self.cycleno.append(self.df_raw0[j].cycle_number)
if np.min(self.cycleno[j]) <= np.max(self.cycleno[j-1]):
if j > 0: #corrects cycle_number except for the first data file
self.df_raw0[j].update({'cycle_number': self.cycleno[j]+np.max(self.cycleno[j-1])}) #corrects cycle number
# else:
# print('__init__ Error (#1)')
#currently need to append a cycle_number coloumn to gamry files
# adds individual dataframes into one
if len(self.df_raw0) == 1:
self.df_raw = self.df_raw0[0]
elif len(self.df_raw0) == 2:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1]], axis=0)
elif len(self.df_raw0) == 3:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2]], axis=0)
elif len(self.df_raw0) == 4:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3]], axis=0)
elif len(self.df_raw0) == 5:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4]], axis=0)
elif len(self.df_raw0) == 6:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5]], axis=0)
elif len(self.df_raw0) == 7:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6]], axis=0)
elif len(self.df_raw0) == 8:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7]], axis=0)
elif len(self.df_raw0) == 9:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8]], axis=0)
elif len(self.df_raw0) == 10:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9]], axis=0)
elif len(self.df_raw0) == 11:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10]], axis=0)
elif len(self.df_raw0) == 12:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], axis=0)
elif len(self.df_raw0) == 13:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11], self.df_raw0[12]], axis=0)
elif len(self.df_raw0) == 14:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], axis=0)
elif len(self.df_raw0) == 15:
self.df_raw = pd.concat([self.df_raw0[0], self.df_raw0[1], self.df_raw0[2], self.df_raw0[3], self.df_raw0[4], self.df_raw0[5], self.df_raw0[6], self.df_raw0[7], self.df_raw0[8], self.df_raw0[9], self.df_raw0[10], self.df_raw0[11]], self.df_raw0[12], self.df_raw0[13], self.df_raw0[14], axis=0)
else:
print("Too many data files || 15 allowed")
self.df_raw = self.df_raw.assign(w = 2*np.pi*self.df_raw.f) #creats a new coloumn with the angular frequency
#Masking data to each cycle
self.df_pre = []
self.df_limited = []
self.df_limited2 = []
self.df = []
if mask == ['none','none'] and cycle == 'off':
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_raw[self.df_raw.cycle_number == self.df_raw.cycle_number.unique()[i]])
elif mask == ['none','none'] and cycle != 'off':
for i in range(len(cycle)):
self.df.append(self.df_raw[self.df_raw.cycle_number == cycle[i]]) #extracting dataframe for each cycle
elif mask[0] != 'none' and mask[1] == 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f > mask[0])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_pre.cycle_number.unique())): #Appending data based on cycle number
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] != 'none' and mask[1] == 'none' and cycle != 'off': # or [i for i, e in enumerate(mask) if e == 'none'] == [0]
self.df_limited = self.df_raw.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle == 'off':
self.df_pre = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_pre.dropna(how='all', inplace=True)
for i in range(len(self.df_raw.cycle_number.unique())): #includes all data
self.df.append(self.df_pre[self.df_pre.cycle_number == self.df_pre.cycle_number.unique()[i]])
elif mask[0] == 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle != 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(cycle)):
self.df.append(self.df_limited[self.df_limited2.cycle_number == cycle[i]])
elif mask[0] != 'none' and mask[1] != 'none' and cycle == 'off':
self.df_limited = self.df_raw.mask(self.df_raw.f < mask[1])
self.df_limited2 = self.df_limited.mask(self.df_raw.f > mask[0])
for i in range(len(self.df_raw.cycle_number.unique())):
self.df.append(self.df_limited[self.df_limited2.cycle_number == self.df_raw.cycle_number.unique()[i]])
else:
print('__init__ error (#2)')
def Lin_KK(self, num_RC='auto', legend='on', plot='residuals', bode='off', nyq_xlim='none', nyq_ylim='none', weight_func='Boukamp', savefig='none'):
'''
Plots the Linear Kramers-Kronig (KK) Validity Test
The script is based on Boukamp and Schōnleber et al.'s papers for fitting the resistances of multiple -(RC)- circuits
to the data. A data quality analysis can hereby be made on the basis of the relative residuals
Ref.:
- Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
- Boukamp, B.A. J. Electrochem. Soc., 142, 6, 1885-1894
The function performs the KK analysis and as default the relative residuals in each subplot
Note, that weigh_func should be equal to 'Boukamp'.
<NAME> (<EMAIL> || <EMAIL>)
Optional Inputs
-----------------
- num_RC:
- 'auto' applies an automatic algorithm developed by Schōnleber, M. et al. Electrochimica Acta 131 (2014) 20-27
that ensures no under- or over-fitting occurs
- can be hardwired by inserting any number (RC-elements/decade)
- plot:
- 'residuals' = plots the relative residuals in subplots correspoding to the cycle numbers picked
- 'w_data' = plots the relative residuals with the experimental data, in Nyquist and bode plot if desired, see 'bode =' in description
- nyq_xlim/nyq_xlim: Change the x/y-axis limits on nyquist plot, if not equal to 'none' state [min,max] value
- legend:
- 'on' = displays cycle number
- 'potential' = displays average potential which the spectra was measured at
- 'off' = off
bode = Plots Bode Plot - options:
'on' = re, im vs. log(freq)
'log' = log(re, im) vs. log(freq)
're' = re vs. log(freq)
'log_re' = log(re) vs. log(freq)
'im' = im vs. log(freq)
'log_im' = log(im) vs. log(freq)
'''
if num_RC == 'auto':
print('cycle || No. RC-elements || u')
self.decade = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
self.number_RC = []
self.number_RC_sort = []
self.KK_u = []
self.KK_Rgreater = []
self.KK_Rminor = []
M = 2
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC.append(M)
self.number_RC_sort.append(M) #needed for self.KK_R
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC_sort.insert(0,0) #needed for self.KK_R
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC_sort)[i]):int(np.cumsum(self.number_RC_sort)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rgreater.append(np.where(np.array(self.KK_R)[i] >= 0, np.array(self.KK_R)[i], 0) )
self.KK_Rminor.append(np.where(np.array(self.KK_R)[i] < 0, np.array(self.KK_R)[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i]))))
for i in range(len(self.df)):
while self.KK_u[i] <= 0.75 or self.KK_u[i] >= 0.88:
self.number_RC_sort0 = []
self.KK_R_lim = []
self.number_RC[i] = self.number_RC[i] + 1
self.number_RC_sort0.append(self.number_RC)
self.number_RC_sort = np.insert(self.number_RC_sort0, 0,0)
self.Rparam[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[0] #Creates intial guesses for R's
self.t_const[i] = KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC[i])) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit[i] = minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC[i], weight_func, self.t_const[i]) ) #maxfev=99
self.R_names[i] = KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC[i]))[1] #creates R names
self.KK_R0 = np.delete(np.array(self.KK_R0), np.s_[0:len(self.KK_R0)])
self.KK_R0 = []
for q in range(len(self.df)):
for j in range(len(self.R_names[q])):
self.KK_R0.append(self.Lin_KK_Fit[q].params.get(self.R_names[q][j]).value)
self.KK_R_lim = np.cumsum(self.number_RC_sort) #used for KK_R[i]
self.KK_R[i] = self.KK_R0[self.KK_R_lim[i]:self.KK_R_lim[i+1]] #assigns resistances from each spectra to their respective df
self.KK_Rgreater[i] = np.where(np.array(self.KK_R[i]) >= 0, np.array(self.KK_R[i]), 0)
self.KK_Rminor[i] = np.where(np.array(self.KK_R[i]) < 0, np.array(self.KK_R[i]), 0)
self.KK_u[i] = 1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))
else:
print('['+str(i+1)+']'+' '+str(self.number_RC[i]),' '+str(np.round(self.KK_u[i],2)))
elif num_RC != 'auto': #hardwired number of RC-elements/decade
print('cycle || u')
self.decade = []
self.number_RC0 = []
self.number_RC = []
self.Rparam = []
self.t_const = []
self.Lin_KK_Fit = []
self.R_names = []
self.KK_R0 = []
self.KK_R = []
for i in range(len(self.df)):
self.decade.append(np.log10(np.max(self.df[i].f))-np.log10(np.min(self.df[i].f))) #determine the number of RC circuits based on the number of decades measured and num_RC
self.number_RC0.append(np.round(num_RC * self.decade[i]))
self.number_RC.append(np.round(num_RC * self.decade[i])) #Creats the the number of -(RC)- circuits
self.Rparam.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[0]) #Creates intial guesses for R's
self.t_const.append(KK_timeconst(w=self.df[i].w, num_RC=int(self.number_RC0[i]))) #Creates time constants values for self.number_RC -(RC)- circuits
self.Lin_KK_Fit.append(minimize(KK_errorfunc, self.Rparam[i], method='leastsq', args=(self.df[i].w.values, self.df[i].re.values, self.df[i].im.values, self.number_RC0[i], weight_func, self.t_const[i]) )) #maxfev=99
self.R_names.append(KK_Rnam_val(re=self.df[i].re, re_start=self.df[i].re.idxmin(), num_RC=int(self.number_RC0[i]))[1]) #creates R names
for j in range(len(self.R_names[i])):
self.KK_R0.append(self.Lin_KK_Fit[i].params.get(self.R_names[i][j]).value)
self.number_RC0.insert(0,0)
# print(report_fit(self.Lin_KK_Fit[i])) # prints fitting report
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
self.KK_Rgreater = []
self.KK_Rminor = []
self.KK_u = []
for i in range(len(self.df)):
self.KK_R.append(self.KK_R0[int(np.cumsum(self.number_RC0)[i]):int(np.cumsum(self.number_RC0)[i+1])]) #assigns resistances from each spectra to their respective df
self.KK_Rx = np.array(self.KK_R)
self.KK_Rgreater.append(np.where(self.KK_Rx[i] >= 0, self.KK_Rx[i], 0) )
self.KK_Rminor.append(np.where(self.KK_Rx[i] < 0, self.KK_Rx[i], 0) )
self.KK_u.append(1-(np.abs(np.sum(self.KK_Rminor[i]))/np.abs(np.sum(self.KK_Rgreater[i])))) #currently gives incorrect values
print('['+str(i+1)+']'+' '+str(np.round(self.KK_u[i],2)))
else:
print('num_RC incorrectly defined')
self.KK_circuit_fit = []
self.KK_rr_re = []
self.KK_rr_im = []
for i in range(len(self.df)):
if int(self.number_RC[i]) == 2:
self.KK_circuit_fit.append(KK_RC2(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 3:
self.KK_circuit_fit.append(KK_RC3(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 4:
self.KK_circuit_fit.append(KK_RC4(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 5:
self.KK_circuit_fit.append(KK_RC5(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 6:
self.KK_circuit_fit.append(KK_RC6(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 7:
self.KK_circuit_fit.append(KK_RC7(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 8:
self.KK_circuit_fit.append(KK_RC8(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 9:
self.KK_circuit_fit.append(KK_RC9(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 10:
self.KK_circuit_fit.append(KK_RC10(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 11:
self.KK_circuit_fit.append(KK_RC11(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 12:
self.KK_circuit_fit.append(KK_RC12(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 13:
self.KK_circuit_fit.append(KK_RC13(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 14:
self.KK_circuit_fit.append(KK_RC14(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 15:
self.KK_circuit_fit.append(KK_RC15(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 16:
self.KK_circuit_fit.append(KK_RC16(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 17:
self.KK_circuit_fit.append(KK_RC17(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 18:
self.KK_circuit_fit.append(KK_RC18(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 19:
self.KK_circuit_fit.append(KK_RC19(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 20:
self.KK_circuit_fit.append(KK_RC20(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 21:
self.KK_circuit_fit.append(KK_RC21(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 22:
self.KK_circuit_fit.append(KK_RC22(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 23:
self.KK_circuit_fit.append(KK_RC23(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 24:
self.KK_circuit_fit.append(KK_RC24(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 25:
self.KK_circuit_fit.append(KK_RC25(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 26:
self.KK_circuit_fit.append(KK_RC26(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 27:
self.KK_circuit_fit.append(KK_RC27(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 28:
self.KK_circuit_fit.append(KK_RC28(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 29:
self.KK_circuit_fit.append(KK_RC29(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 30:
self.KK_circuit_fit.append(KK_RC30(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 31:
self.KK_circuit_fit.append(KK_RC31(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 32:
self.KK_circuit_fit.append(KK_RC32(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 33:
self.KK_circuit_fit.append(KK_RC33(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 34:
self.KK_circuit_fit.append(KK_RC34(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 35:
self.KK_circuit_fit.append(KK_RC35(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 36:
self.KK_circuit_fit.append(KK_RC36(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 37:
self.KK_circuit_fit.append(KK_RC37(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 38:
self.KK_circuit_fit.append(KK_RC38(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 39:
self.KK_circuit_fit.append(KK_RC39(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 40:
self.KK_circuit_fit.append(KK_RC40(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 41:
self.KK_circuit_fit.append(KK_RC41(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 42:
self.KK_circuit_fit.append(KK_RC42(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 43:
self.KK_circuit_fit.append(KK_RC43(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 44:
self.KK_circuit_fit.append(KK_RC44(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 45:
self.KK_circuit_fit.append(KK_RC45(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 46:
self.KK_circuit_fit.append(KK_RC46(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 47:
self.KK_circuit_fit.append(KK_RC47(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 48:
self.KK_circuit_fit.append(KK_RC48(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 49:
self.KK_circuit_fit.append(KK_RC49(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 50:
self.KK_circuit_fit.append(KK_RC50(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 51:
self.KK_circuit_fit.append(KK_RC51(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 52:
self.KK_circuit_fit.append(KK_RC52(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 53:
self.KK_circuit_fit.append(KK_RC53(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 54:
self.KK_circuit_fit.append(KK_RC54(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 55:
self.KK_circuit_fit.append(KK_RC55(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 56:
self.KK_circuit_fit.append(KK_RC56(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 57:
self.KK_circuit_fit.append(KK_RC57(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 58:
self.KK_circuit_fit.append(KK_RC58(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 59:
self.KK_circuit_fit.append(KK_RC59(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 60:
self.KK_circuit_fit.append(KK_RC60(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 61:
self.KK_circuit_fit.append(KK_RC61(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 62:
self.KK_circuit_fit.append(KK_RC62(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 63:
self.KK_circuit_fit.append(KK_RC63(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 64:
self.KK_circuit_fit.append(KK_RC64(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 65:
self.KK_circuit_fit.append(KK_RC65(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 66:
self.KK_circuit_fit.append(KK_RC66(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 67:
self.KK_circuit_fit.append(KK_RC67(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 68:
self.KK_circuit_fit.append(KK_RC68(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 69:
self.KK_circuit_fit.append(KK_RC69(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 70:
self.KK_circuit_fit.append(KK_RC70(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 71:
self.KK_circuit_fit.append(KK_RC71(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 72:
self.KK_circuit_fit.append(KK_RC72(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 73:
self.KK_circuit_fit.append(KK_RC73(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 74:
self.KK_circuit_fit.append(KK_RC74(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 75:
self.KK_circuit_fit.append(KK_RC75(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 76:
self.KK_circuit_fit.append(KK_RC76(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 77:
self.KK_circuit_fit.append(KK_RC77(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 78:
self.KK_circuit_fit.append(KK_RC78(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 79:
self.KK_circuit_fit.append(KK_RC79(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
elif int(self.number_RC[i]) == 80:
self.KK_circuit_fit.append(KK_RC80(w=self.df[i].w, Rs=self.Lin_KK_Fit[i].params.get('Rs').value, R_values=self.KK_R[i], t_values=self.t_const[i]))
else:
print('RC simulation circuit not defined')
print(' Number of RC = ', self.number_RC)
self.KK_rr_re.append(residual_real(re=self.df[i].re, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the real part
self.KK_rr_im.append(residual_imag(im=self.df[i].im, fit_re=self.KK_circuit_fit[i].to_numpy().real, fit_im=-self.KK_circuit_fit[i].to_numpy().imag)) #relative residuals for the imag part
### Plotting Linear_kk results
##
#
### Label functions
self.label_re_1 = []
self.label_im_1 = []
self.label_cycleno = []
if legend == 'on':
for i in range(len(self.df)):
self.label_re_1.append("Z' (#"+str(i+1)+")")
self.label_im_1.append("Z'' (#"+str(i+1)+")")
self.label_cycleno.append('#'+str(i+1))
elif legend == 'potential':
for i in range(len(self.df)):
self.label_re_1.append("Z' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_im_1.append("Z'' ("+str(np.round(np.average(self.df[i].E_avg), 2))+' V)')
self.label_cycleno.append(str(np.round(np.average(self.df[i].E_avg), 2))+' V')
if plot == 'w_data':
fig = figure(figsize=(6, 8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.5, bottom=0.1, top=0.95)
ax = fig.add_subplot(311, aspect='equal')
ax1 = fig.add_subplot(312)
ax2 = fig.add_subplot(313)
colors = sns.color_palette("colorblind", n_colors=len(self.df))
colors_real = sns.color_palette("Blues", n_colors=len(self.df)+2)
colors_imag = sns.color_palette("Oranges", n_colors=len(self.df)+2)
### Nyquist Plot
for i in range(len(self.df)):
ax.plot(self.df[i].re, self.df[i].im, marker='o', ms=4, lw=2, color=colors[i], ls='-', alpha=.7, label=self.label_cycleno[i])
### Bode Plot
if bode == 'on':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z', -Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 're':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].re, color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("Z' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_re':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), self.df[i].im, color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("-Z'' [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log_im':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_cycleno[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(-Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
elif bode == 'log':
for i in range(len(self.df)):
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].re), color=colors_real[i+1], marker='D', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_re_1[i])
ax1.plot(np.log10(self.df[i].f), np.log10(self.df[i].im), color=colors_imag[i+1], marker='s', ms=3, lw=2.25, ls='-', alpha=.7, label=self.label_im_1[i])
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("log(Z', -Z'') [$\Omega$]")
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
### Kramers-Kronig Relative Residuals
for i in range(len(self.df)):
ax2.plot(np.log10(self.df[i].f), self.KK_rr_re[i]*100, color=colors_real[i+1], marker='D', ls='--', ms=6, alpha=.7, label=self.label_re_1[i])
ax2.plot(np.log10(self.df[i].f), self.KK_rr_im[i]*100, color=colors_imag[i+1], marker='s', ls='--', ms=6, alpha=.7, label=self.label_im_1[i])
ax2.set_xlabel("log(f) [Hz]")
ax2.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if np.min(self.KK_rr_im_min) > np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_re_min)*100*1.5, np.max(np.abs(self.KK_rr_re_min))*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_re_max)*100*.9], color='k', fontweight='bold')
elif np.min(self.KK_rr_im_min) < np.min(self.KK_rr_re_min):
ax2.set_ylim(np.min(self.KK_rr_im_min)*100*1.5, np.max(self.KK_rr_im_max)*100*1.5)
ax2.annotate('Lin-KK', xy=[np.min(np.log10(self.df[0].f)), np.max(self.KK_rr_im_max)*100*.9], color='k', fontweight='bold')
### Figure specifics
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.set_xlabel("Z' [$\Omega$]")
ax.set_ylabel("-Z'' [$\Omega$]")
if nyq_xlim != 'none':
ax.set_xlim(nyq_xlim[0], nyq_xlim[1])
if nyq_ylim != 'none':
ax.set_ylim(nyq_ylim[0], nyq_ylim[1])
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### Illustrating residuals only
elif plot == 'residuals':
colors = sns.color_palette("colorblind", n_colors=9)
colors_real = sns.color_palette("Blues", n_colors=9)
colors_imag = sns.color_palette("Oranges", n_colors=9)
### 1 Cycle
if len(self.df) == 1:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax = fig.add_subplot(231)
ax.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax.set_xlabel("log(f) [Hz]")
ax.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]")
if legend == 'on' or legend == 'potential':
ax.legend(loc='best', fontsize=10, frameon=False)
ax.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and write 'KK-Test' on RR subplot
self.KK_rr_im_min = np.min(self.KK_rr_im)
self.KK_rr_im_max = np.max(self.KK_rr_im)
self.KK_rr_re_min = np.min(self.KK_rr_re)
self.KK_rr_re_max = np.max(self.KK_rr_re)
if self.KK_rr_re_max > self.KK_rr_im_max:
self.KK_ymax = self.KK_rr_re_max
else:
self.KK_ymax = self.KK_rr_im_max
if self.KK_rr_re_min < self.KK_rr_im_min:
self.KK_ymin = self.KK_rr_re_min
else:
self.KK_ymin = self.KK_rr_im_min
if np.abs(self.KK_ymin) > self.KK_ymax:
ax.set_ylim(self.KK_ymin*100*1.5, np.abs(self.KK_ymin)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin)*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin) < self.KK_ymax:
ax.set_ylim(np.negative(self.KK_ymax)*100*1.5, np.abs(self.KK_ymax)*100*1.5)
if legend == 'on':
ax.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 2 Cycles
elif len(self.df) == 2:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
#cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 3 Cycles
elif len(self.df) == 3:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_xlabel("log(f) [Hz]")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.3], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.3], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.3], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 4 Cycles
elif len(self.df) == 4:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax2.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
ax3.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 5 Cycles
elif len(self.df) == 5:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=18)
ax4.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 6 Cycles
elif len(self.df) == 6:
fig = figure(figsize=(12, 3.8), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(234)
ax5 = fig.add_subplot(235)
ax6 = fig.add_subplot(236)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_xlabel("log(f) [Hz]")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 7 Cycles
elif len(self.df) == 7:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax3.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax5.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=15)
if legend == 'on' or legend == 'potential':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:
ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')
#Save Figure
if savefig != 'none':
fig.savefig(savefig)
### 8 Cycles
elif len(self.df) == 8:
fig = figure(figsize=(12, 5), dpi=120, facecolor='w', edgecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, hspace=0.25, wspace=0.25, bottom=0.1, top=0.95)
ax1 = fig.add_subplot(331)
ax2 = fig.add_subplot(332)
ax3 = fig.add_subplot(333)
ax4 = fig.add_subplot(334)
ax5 = fig.add_subplot(335)
ax6 = fig.add_subplot(336)
ax7 = fig.add_subplot(337)
ax8 = fig.add_subplot(338)
#cycle 1
ax1.plot(np.log10(self.df[0].f), self.KK_rr_re[0]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax1.plot(np.log10(self.df[0].f), self.KK_rr_im[0]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax1.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax1.legend(loc='best', fontsize=10, frameon=False)
ax1.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 2
ax2.plot(np.log10(self.df[1].f), self.KK_rr_re[1]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax2.plot(np.log10(self.df[1].f), self.KK_rr_im[1]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax2.legend(loc='best', fontsize=10, frameon=False)
ax2.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 3
ax3.plot(np.log10(self.df[2].f), self.KK_rr_re[2]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax3.plot(np.log10(self.df[2].f), self.KK_rr_im[2]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax3.legend(loc='best', fontsize=10, frameon=False)
ax3.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 4
ax4.plot(np.log10(self.df[3].f), self.KK_rr_re[3]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax4.plot(np.log10(self.df[3].f), self.KK_rr_im[3]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax4.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax4.legend(loc='best', fontsize=10, frameon=False)
ax4.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 5
ax5.plot(np.log10(self.df[4].f), self.KK_rr_re[4]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax5.plot(np.log10(self.df[4].f), self.KK_rr_im[4]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
if legend == 'on' or legend == 'potential':
ax5.legend(loc='best', fontsize=10, frameon=False)
ax5.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 6
ax6.plot(np.log10(self.df[5].f), self.KK_rr_re[5]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax6.plot(np.log10(self.df[5].f), self.KK_rr_im[5]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax6.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax6.legend(loc='best', fontsize=10, frameon=False)
ax6.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 7
ax7.plot(np.log10(self.df[6].f), self.KK_rr_re[6]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax7.plot(np.log10(self.df[6].f), self.KK_rr_im[6]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax7.set_xlabel("log(f) [Hz]")
ax7.set_ylabel("$\Delta$Z', $\Delta$-Z'' [%]", fontsize=14)
if legend == 'on' or legend == 'potential':
ax7.legend(loc='best', fontsize=10, frameon=False)
ax7.axhline(0, ls='--', c='k', alpha=.5)
# Cycle 8
ax8.plot(np.log10(self.df[7].f), self.KK_rr_re[7]*100, color=colors_real[3], marker='D', ls='--', ms=6, alpha=.7, label="$\Delta$Z'")
ax8.plot(np.log10(self.df[7].f), self.KK_rr_im[7]*100, color=colors_imag[3], marker='s', ls='--', ms=6, alpha=.7, label="$\Delta$-Z''")
ax8.set_xlabel("log(f) [Hz]")
if legend == 'on' or legend == 'potential':
ax8.legend(loc='best', fontsize=10, frameon=False)
ax8.axhline(0, ls='--', c='k', alpha=.5)
### Setting ylims and labeling plot with 'KK-Test' in RR subplot
self.KK_rr_im_min = []
self.KK_rr_im_max = []
self.KK_rr_re_min = []
self.KK_rr_re_max = []
self.KK_ymin = []
self.KK_ymax = []
for i in range(len(self.df)):
self.KK_rr_im_min.append(np.min(self.KK_rr_im[i]))
self.KK_rr_im_max.append(np.max(self.KK_rr_im[i]))
self.KK_rr_re_min.append(np.min(self.KK_rr_re[i]))
self.KK_rr_re_max.append(np.max(self.KK_rr_re[i]))
if self.KK_rr_re_max[i] > self.KK_rr_im_max[i]:
self.KK_ymax.append(self.KK_rr_re_max[i])
else:
self.KK_ymax.append(self.KK_rr_im_max[i])
if self.KK_rr_re_min[i] < self.KK_rr_im_min[i]:
self.KK_ymin.append(self.KK_rr_re_min[i])
else:
self.KK_ymin.append(self.KK_rr_im_min[i])
if np.abs(self.KK_ymin[0]) > self.KK_ymax[0]:
ax1.set_ylim(self.KK_ymin[0]*100*1.5, np.abs(self.KK_ymin[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymin[0])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax1.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[0])*100*1.5)
if legend == 'on':
ax1.annotate('Lin-KK, #1', xy=[np.min(np.log10(self.df[0].f)), np.abs(self.KK_ymax[0])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax1.annotate('Lin-KK, ('+str(np.round(np.average(self.df[0].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[0].f)), self.KK_ymax[0]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[1]) > self.KK_ymax[1]:
ax2.set_ylim(self.KK_ymin[1]*100*1.5, np.abs(self.KK_ymin[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.3], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), np.max(np.abs(self.KK_ymin[1]))*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[0]) < self.KK_ymax[0]:
ax2.set_ylim(np.negative(self.KK_ymax[1])*100*1.5, np.abs(self.KK_ymax[1])*100*1.5)
if legend == 'on':
ax2.annotate('Lin-KK, #2', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax2.annotate('Lin-KK ('+str(np.round(np.average(self.df[1].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[1].f)), self.KK_ymax[1]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[2]) > self.KK_ymax[2]:
ax3.set_ylim(self.KK_ymin[2]*100*1.5, np.abs(self.KK_ymin[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymin[2])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[2]) < self.KK_ymax[2]:
ax3.set_ylim(np.negative(self.KK_ymax[0])*100*1.5, np.abs(self.KK_ymax[2])*100*1.5)
if legend == 'on':
ax3.annotate('Lin-KK, #3', xy=[np.min(np.log10(self.df[2].f)), np.abs(self.KK_ymax[2])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax3.annotate('Lin-KK, ('+str(np.round(np.average(self.df[2].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[2].f)), self.KK_ymax[2]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[3]) > self.KK_ymax[3]:
ax4.set_ylim(self.KK_ymin[3]*100*1.5, np.abs(self.KK_ymin[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymin[3])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[3]) < self.KK_ymax[3]:
ax4.set_ylim(np.negative(self.KK_ymax[3])*100*1.5, np.abs(self.KK_ymax[3])*100*1.5)
if legend == 'on':
ax4.annotate('Lin-KK, #4', xy=[np.min(np.log10(self.df[3].f)), np.abs(self.KK_ymax[3])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax4.annotate('Lin-KK, ('+str(np.round(np.average(self.df[3].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[3].f)), self.KK_ymax[3]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[4]) > self.KK_ymax[4]:
ax5.set_ylim(self.KK_ymin[4]*100*1.5, np.abs(self.KK_ymin[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymin[4])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[4]) < self.KK_ymax[4]:
ax5.set_ylim(np.negative(self.KK_ymax[4])*100*1.5, np.abs(self.KK_ymax[4])*100*1.5)
if legend == 'on':
ax5.annotate('Lin-KK, #5', xy=[np.min(np.log10(self.df[4].f)), np.abs(self.KK_ymax[4])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax5.annotate('Lin-KK, ('+str(np.round(np.average(self.df[4].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[4].f)), self.KK_ymax[4]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[5]) > self.KK_ymax[5]:
ax6.set_ylim(self.KK_ymin[5]*100*1.5, np.abs(self.KK_ymin[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymin[5])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[5]) < self.KK_ymax[5]:
ax6.set_ylim(np.negative(self.KK_ymax[5])*100*1.5, np.abs(self.KK_ymax[5])*100*1.5)
if legend == 'on':
ax6.annotate('Lin-KK, #6', xy=[np.min(np.log10(self.df[5].f)), np.abs(self.KK_ymax[5])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax6.annotate('Lin-KK, ('+str(np.round(np.average(self.df[5].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[5].f)), self.KK_ymax[5]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[6]) > self.KK_ymax[6]:
ax7.set_ylim(self.KK_ymin[6]*100*1.5, np.abs(self.KK_ymin[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymin[6])*100*1.2], color='k', fontweight='bold')
elif np.abs(self.KK_ymin[6]) < self.KK_ymax[6]:
ax7.set_ylim(np.negative(self.KK_ymax[6])*100*1.5, np.abs(self.KK_ymax[6])*100*1.5)
if legend == 'on':
ax7.annotate('Lin-KK, #7', xy=[np.min(np.log10(self.df[6].f)), np.abs(self.KK_ymax[6])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax7.annotate('Lin-KK, ('+str(np.round(np.average(self.df[6].E_avg),2))+' V)', xy=[np.min(np.log10(self.df[6].f)), self.KK_ymax[6]*100*1.2], color='k', fontweight='bold')
if np.abs(self.KK_ymin[7]) > self.KK_ymax[7]:
ax8.set_ylim(self.KK_ymin[7]*100*1.5, np.abs(self.KK_ymin[7])*100*1.5)
if legend == 'on':
ax8.annotate('Lin-KK, #8', xy=[np.min(np.log10(self.df[7].f)), np.abs(self.KK_ymin[7])*100*1.2], color='k', fontweight='bold')
elif legend == 'potential':
ax8.annotate('Lin-KK ('+str(np.round(
|
np.average(self.df[7].E_avg)
|
numpy.average
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.